Compare commits
122 Commits
460d877339
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
| 81f99d0b50 | |||
| 030bca09b7 | |||
| 5b7d1a4360 | |||
| e7834a888e | |||
| abb224e01b | |||
| 57cf93b1e5 | |||
| c41165c294 | |||
| cdf73e2d65 | |||
| 21753c4f06 | |||
| eb859af371 | |||
| 5a5c619a34 | |||
| 00119ed68d | |||
| b81e0cdbb4 | |||
| 41dcd60225 | |||
| 12f04bd8d6 | |||
| d4d04491d2 | |||
| 93dc699825 | |||
| 61daa8bbc0 | |||
| 57a0bbe36e | |||
| f62785aaf2 | |||
| 1e33220f59 | |||
| 1da38361a9 | |||
| 9630aea877 | |||
| 037c18eb00 | |||
| 2e1a4b3b2b | |||
| 4be2469f92 | |||
| 6bb38dbd8c | |||
| d3b2022ffb | |||
| 4b6e118a88 | |||
| 936946010f | |||
| ee7412442a | |||
| 68d8056d2e | |||
| 528d0bd8ea | |||
| baf45c6c62 | |||
| 0966f347c4 | |||
| ab11ece001 | |||
| 5e1b8134d9 | |||
| 2f2e5a7419 | |||
| 2274e20123 | |||
| 3e4f688484 | |||
| c110352e9e | |||
| 201cca8b66 | |||
| 6e4797d71e | |||
| 6e35c5d269 | |||
| f7cc130432 | |||
| 2f60b0915e | |||
| e15ad8fb62 | |||
| 4c138424a5 | |||
| 8d9d63b866 | |||
| a344f1035b | |||
| c250439326 | |||
| a284d38f56 | |||
| 0c0acd7f51 | |||
| cf2336c0bc | |||
| 44f3fb8718 | |||
| 9b73f6719d | |||
| a0e8566ff8 | |||
| 8bdad3529f | |||
| f5c3635258 | |||
| 673eb4c7c2 | |||
| 63b48849a7 | |||
| d6da81131f | |||
| d0b8b78d12 | |||
| e3375fd187 | |||
| ea35695221 | |||
| 59464a1592 | |||
| 28f7b1cfcd | |||
| a61c9dc969 | |||
| 029c094e18 | |||
| 2f602e45f7 | |||
| 0a73c49d01 | |||
| 576ec43854 | |||
| fe8eefa173 | |||
| df0528b2c2 | |||
| 259ff17eba | |||
| 9242b4709a | |||
| 2ddfddfbbb | |||
| d931e8c6a3 | |||
| 44a5a3d70e | |||
| 0225f32901 | |||
| 6877637507 | |||
| 4773ae1c7a | |||
| 207be94c42 | |||
| 12a859061c | |||
| ddfc8a0b02 | |||
| 53d664de4f | |||
| 706d2e1df8 | |||
| cbad4ea706 | |||
| 06738dbfa5 | |||
| 5ebefaea44 | |||
| 08b8f3872a | |||
| 73c1300d9f | |||
| a172b5465f | |||
| 224a95a85f | |||
| 2eb032ecd4 | |||
| 1c89454197 | |||
| 39ee1e2945 | |||
| 73860bd9f2 | |||
| 401a5d4169 | |||
| 6e76711940 | |||
| d1d6366cd2 | |||
| 19bb94ee47 | |||
| f06fea699f | |||
| 36e1e12a8a | |||
| da95a399a1 | |||
| f409939e6b | |||
| e8214b5856 | |||
| c097e55222 | |||
| 1cdc97a729 | |||
| 1efa0e973b | |||
| b8f3a1c562 | |||
| 54313fd3e0 | |||
| 7f81f0614b | |||
| ebec5e0f58 | |||
| 9ac7f8d22d | |||
| 94661d7877 | |||
| dfc386216b | |||
| 33a38014e6 | |||
| 60683da3ca | |||
| a41a99dad4 | |||
| 750785680b | |||
| 7392c930d6 |
49
.containerignore
Normal file
49
.containerignore
Normal file
@@ -0,0 +1,49 @@
|
||||
# ──────────────────────────────────────────────
|
||||
# BanGUI — .dockerignore / .containerignore
|
||||
# Works with both Docker and Podman.
|
||||
# ──────────────────────────────────────────────
|
||||
|
||||
# Version control
|
||||
.git
|
||||
.gitignore
|
||||
|
||||
# Virtual environments
|
||||
.venv
|
||||
venv
|
||||
env
|
||||
|
||||
# IDE / editor
|
||||
.vscode
|
||||
.idea
|
||||
*.swp
|
||||
*.swo
|
||||
*~
|
||||
|
||||
# Python caches
|
||||
__pycache__
|
||||
*.pyc
|
||||
*.pyo
|
||||
.mypy_cache
|
||||
.ruff_cache
|
||||
.pytest_cache
|
||||
.coverage
|
||||
htmlcov
|
||||
|
||||
# Node
|
||||
frontend/node_modules
|
||||
frontend/.vite
|
||||
|
||||
# Build artifacts
|
||||
dist
|
||||
build
|
||||
*.egg-info
|
||||
|
||||
# Documentation (keep README at root if needed)
|
||||
Docs
|
||||
|
||||
# Tests (not needed in production images)
|
||||
backend/tests
|
||||
|
||||
# OS files
|
||||
.DS_Store
|
||||
Thumbs.db
|
||||
49
.dockerignore
Normal file
49
.dockerignore
Normal file
@@ -0,0 +1,49 @@
|
||||
# ──────────────────────────────────────────────
|
||||
# BanGUI — .dockerignore / .containerignore
|
||||
# Works with both Docker and Podman.
|
||||
# ──────────────────────────────────────────────
|
||||
|
||||
# Version control
|
||||
.git
|
||||
.gitignore
|
||||
|
||||
# Virtual environments
|
||||
.venv
|
||||
venv
|
||||
env
|
||||
|
||||
# IDE / editor
|
||||
.vscode
|
||||
.idea
|
||||
*.swp
|
||||
*.swo
|
||||
*~
|
||||
|
||||
# Python caches
|
||||
__pycache__
|
||||
*.pyc
|
||||
*.pyo
|
||||
.mypy_cache
|
||||
.ruff_cache
|
||||
.pytest_cache
|
||||
.coverage
|
||||
htmlcov
|
||||
|
||||
# Node
|
||||
frontend/node_modules
|
||||
frontend/.vite
|
||||
|
||||
# Build artifacts
|
||||
dist
|
||||
build
|
||||
*.egg-info
|
||||
|
||||
# Documentation (keep README at root if needed)
|
||||
Docs
|
||||
|
||||
# Tests (not needed in production images)
|
||||
backend/tests
|
||||
|
||||
# OS files
|
||||
.DS_Store
|
||||
Thumbs.db
|
||||
0
.github/agents/ProcessTasks.agent.md
vendored
Normal file
0
.github/agents/ProcessTasks.agent.md
vendored
Normal file
114
.gitignore
vendored
Normal file
114
.gitignore
vendored
Normal file
@@ -0,0 +1,114 @@
|
||||
# ─────────────────────────────────────────────
|
||||
# BanGUI — root .gitignore
|
||||
# ─────────────────────────────────────────────
|
||||
|
||||
# ── Python ────────────────────────────────────
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*.pyo
|
||||
*.pyd
|
||||
*.so
|
||||
.Python
|
||||
|
||||
# Virtualenvs
|
||||
.venv/
|
||||
venv/
|
||||
env/
|
||||
ENV/
|
||||
.python-version
|
||||
|
||||
# Distribution / packaging
|
||||
dist/
|
||||
build/
|
||||
*.egg-info/
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# Testing & coverage
|
||||
.coverage
|
||||
.coverage.*
|
||||
htmlcov/
|
||||
.pytest_cache/
|
||||
.tox/
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
|
||||
# Type checkers & linters
|
||||
.mypy_cache/
|
||||
.ruff_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
pyrightconfig.json
|
||||
.pytype/
|
||||
|
||||
# ── Node / Frontend ───────────────────────────
|
||||
node_modules/
|
||||
.pnp
|
||||
.pnp.js
|
||||
|
||||
# Build output
|
||||
frontend/dist/
|
||||
frontend/.vite/
|
||||
|
||||
# Logs
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
pnpm-debug.log*
|
||||
lerna-debug.log*
|
||||
|
||||
# ── Secrets / Environment ─────────────────────
|
||||
.env
|
||||
.env.*
|
||||
!.env.example
|
||||
*.pem
|
||||
secrets.json
|
||||
|
||||
# ── Databases ─────────────────────────────────
|
||||
*.sqlite3
|
||||
*.db
|
||||
*.db-shm
|
||||
*.db-wal
|
||||
|
||||
# ── OS artefacts ──────────────────────────────
|
||||
.DS_Store
|
||||
.DS_Store?
|
||||
._*
|
||||
.Spotlight-V100
|
||||
.Trashes
|
||||
ehthumbs.db
|
||||
Thumbs.db
|
||||
|
||||
# ── Editor / IDE ──────────────────────────────
|
||||
.idea/
|
||||
*.iml
|
||||
*.sublime-project
|
||||
*.sublime-workspace
|
||||
.vscode/settings.json
|
||||
.vscode/launch.json
|
||||
.vscode/*.log
|
||||
*.swp
|
||||
*.swo
|
||||
*~
|
||||
|
||||
# ── Docker dev config ─────────────────────────
|
||||
# Ignore auto-generated linuxserver/fail2ban config files,
|
||||
# but track our custom filter, jail, and documentation.
|
||||
Docker/fail2ban-dev-config/**
|
||||
!Docker/fail2ban-dev-config/README.md
|
||||
!Docker/fail2ban-dev-config/fail2ban/
|
||||
!Docker/fail2ban-dev-config/fail2ban/filter.d/
|
||||
!Docker/fail2ban-dev-config/fail2ban/filter.d/bangui-sim.conf
|
||||
!Docker/fail2ban-dev-config/fail2ban/filter.d/bangui-access.conf
|
||||
!Docker/fail2ban-dev-config/fail2ban/jail.d/
|
||||
!Docker/fail2ban-dev-config/fail2ban/jail.d/bangui-sim.conf
|
||||
!Docker/fail2ban-dev-config/fail2ban/jail.d/bangui-access.conf
|
||||
!Docker/fail2ban-dev-config/fail2ban/jail.d/blocklist-import.conf
|
||||
!Docker/fail2ban-dev-config/fail2ban/jail.local
|
||||
|
||||
# ── Misc ──────────────────────────────────────
|
||||
*.log
|
||||
*.tmp
|
||||
*.bak
|
||||
*.orig
|
||||
69
Docker/Dockerfile.backend
Normal file
69
Docker/Dockerfile.backend
Normal file
@@ -0,0 +1,69 @@
|
||||
# ──────────────────────────────────────────────────────────────
|
||||
# BanGUI — Backend image (Python / FastAPI)
|
||||
#
|
||||
# Compatible with Docker and Podman.
|
||||
# Build context must be the project root.
|
||||
#
|
||||
# Usage:
|
||||
# docker build -t bangui-backend -f Docker/Dockerfile.backend .
|
||||
# podman build -t bangui-backend -f Docker/Dockerfile.backend .
|
||||
# ──────────────────────────────────────────────────────────────
|
||||
|
||||
# ── Stage 1: build dependencies ──────────────────────────────
|
||||
FROM docker.io/library/python:3.12-slim AS builder
|
||||
|
||||
WORKDIR /build
|
||||
|
||||
# Install build-time system dependencies
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends gcc libffi-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY backend/pyproject.toml /build/
|
||||
|
||||
# Install Python dependencies into a virtual-env so we can copy it cleanly
|
||||
RUN python -m venv /opt/venv
|
||||
ENV PATH="/opt/venv/bin:$PATH"
|
||||
RUN pip install --no-cache-dir --upgrade pip \
|
||||
&& pip install --no-cache-dir .
|
||||
|
||||
# ── Stage 2: runtime image ───────────────────────────────────
|
||||
FROM docker.io/library/python:3.12-slim AS runtime
|
||||
|
||||
LABEL maintainer="BanGUI" \
|
||||
description="BanGUI backend — fail2ban web management API"
|
||||
|
||||
# Non-root user for security
|
||||
RUN groupadd --gid 1000 bangui \
|
||||
&& useradd --uid 1000 --gid bangui --shell /bin/bash --create-home bangui
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy the pre-built virtual-env
|
||||
COPY --from=builder /opt/venv /opt/venv
|
||||
ENV PATH="/opt/venv/bin:$PATH" \
|
||||
PYTHONDONTWRITEBYTECODE=1 \
|
||||
PYTHONUNBUFFERED=1
|
||||
|
||||
# Copy application source
|
||||
COPY backend/app /app/app
|
||||
COPY fail2ban-master /app/fail2ban-master
|
||||
|
||||
# Data directory for the SQLite database
|
||||
RUN mkdir -p /data && chown bangui:bangui /data
|
||||
VOLUME ["/data"]
|
||||
|
||||
# Default environment values (override at runtime)
|
||||
ENV BANGUI_DATABASE_PATH="/data/bangui.db" \
|
||||
BANGUI_FAIL2BAN_SOCKET="/var/run/fail2ban/fail2ban.sock" \
|
||||
BANGUI_LOG_LEVEL="info"
|
||||
|
||||
EXPOSE 8000
|
||||
|
||||
USER bangui
|
||||
|
||||
# Health-check using the built-in health endpoint
|
||||
HEALTHCHECK --interval=30s --timeout=5s --start-period=10s --retries=3 \
|
||||
CMD python -c "import urllib.request; urllib.request.urlopen('http://localhost:8000/api/health')" || exit 1
|
||||
|
||||
CMD ["uvicorn", "app.main:create_app", "--factory", "--host", "0.0.0.0", "--port", "8000"]
|
||||
45
Docker/Dockerfile.frontend
Normal file
45
Docker/Dockerfile.frontend
Normal file
@@ -0,0 +1,45 @@
|
||||
# ──────────────────────────────────────────────────────────────
|
||||
# BanGUI — Frontend image (React / Vite → nginx)
|
||||
#
|
||||
# Compatible with Docker and Podman.
|
||||
# Build context must be the project root.
|
||||
#
|
||||
# Usage:
|
||||
# docker build -t bangui-frontend -f Docker/Dockerfile.frontend .
|
||||
# podman build -t bangui-frontend -f Docker/Dockerfile.frontend .
|
||||
# ──────────────────────────────────────────────────────────────
|
||||
|
||||
# ── Stage 1: install & build ─────────────────────────────────
|
||||
FROM docker.io/library/node:22-alpine AS builder
|
||||
|
||||
WORKDIR /build
|
||||
|
||||
# Install dependencies first (layer caching)
|
||||
COPY frontend/package.json frontend/package-lock.json* /build/
|
||||
RUN npm ci --ignore-scripts
|
||||
|
||||
# Copy source and build
|
||||
COPY frontend/ /build/
|
||||
RUN npm run build
|
||||
|
||||
# ── Stage 2: serve with nginx ────────────────────────────────
|
||||
FROM docker.io/library/nginx:1.27-alpine AS runtime
|
||||
|
||||
LABEL maintainer="BanGUI" \
|
||||
description="BanGUI frontend — fail2ban web management UI"
|
||||
|
||||
# Remove default nginx content
|
||||
RUN rm -rf /usr/share/nginx/html/*
|
||||
|
||||
# Copy built assets
|
||||
COPY --from=builder /build/dist /usr/share/nginx/html
|
||||
|
||||
# Custom nginx config for SPA routing + API reverse proxy
|
||||
COPY Docker/nginx.conf /etc/nginx/conf.d/default.conf
|
||||
|
||||
EXPOSE 80
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=5s --start-period=5s --retries=3 \
|
||||
CMD wget -qO /dev/null http://localhost:80/ || exit 1
|
||||
|
||||
CMD ["nginx", "-g", "daemon off;"]
|
||||
1
Docker/VERSION
Normal file
1
Docker/VERSION
Normal file
@@ -0,0 +1 @@
|
||||
v0.9.4
|
||||
67
Docker/check_ban_status.sh
Normal file
67
Docker/check_ban_status.sh
Normal file
@@ -0,0 +1,67 @@
|
||||
#!/usr/bin/env bash
|
||||
# ──────────────────────────────────────────────────────────────
|
||||
# check_ban_status.sh
|
||||
#
|
||||
# Queries the manual-Jail jail inside the running fail2ban
|
||||
# container and optionally unbans a specific IP.
|
||||
#
|
||||
# Usage:
|
||||
# bash Docker/check_ban_status.sh
|
||||
# bash Docker/check_ban_status.sh --unban 192.168.100.99
|
||||
#
|
||||
# Requirements:
|
||||
# The bangui-fail2ban-dev container must be running.
|
||||
# (docker compose -f Docker/compose.debug.yml up -d fail2ban)
|
||||
# ──────────────────────────────────────────────────────────────
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
readonly CONTAINER="bangui-fail2ban-dev"
|
||||
readonly JAIL="manual-Jail"
|
||||
|
||||
# ── Helper: run a fail2ban-client command inside the container ─
|
||||
f2b() {
|
||||
docker exec "${CONTAINER}" fail2ban-client "$@"
|
||||
}
|
||||
|
||||
# ── Parse arguments ───────────────────────────────────────────
|
||||
UNBAN_IP=""
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--unban)
|
||||
if [[ -z "${2:-}" ]]; then
|
||||
echo "ERROR: --unban requires an IP address argument." >&2
|
||||
exit 1
|
||||
fi
|
||||
UNBAN_IP="$2"
|
||||
shift 2
|
||||
;;
|
||||
*)
|
||||
echo "ERROR: Unknown argument: '$1'" >&2
|
||||
echo "Usage: $0 [--unban <IP>]" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# ── Unban mode ────────────────────────────────────────────────
|
||||
if [[ -n "${UNBAN_IP}" ]]; then
|
||||
echo "Unbanning ${UNBAN_IP} from jail '${JAIL}' ..."
|
||||
f2b set "${JAIL}" unbanip "${UNBAN_IP}"
|
||||
echo "Done. '${UNBAN_IP}' has been removed from the ban list."
|
||||
echo ""
|
||||
fi
|
||||
|
||||
# ── Jail status ───────────────────────────────────────────────
|
||||
echo "═══════════════════════════════════════════"
|
||||
echo " Jail status: ${JAIL}"
|
||||
echo "═══════════════════════════════════════════"
|
||||
f2b status "${JAIL}"
|
||||
|
||||
# ── Banned IPs with timestamps ────────────────────────────────
|
||||
echo ""
|
||||
echo "═══════════════════════════════════════════"
|
||||
echo " Banned IPs with timestamps: ${JAIL}"
|
||||
echo "═══════════════════════════════════════════"
|
||||
f2b get "${JAIL}" banip --with-time || echo "(no IPs currently banned)"
|
||||
123
Docker/compose.debug.yml
Normal file
123
Docker/compose.debug.yml
Normal file
@@ -0,0 +1,123 @@
|
||||
# ──────────────────────────────────────────────────────────────
|
||||
# BanGUI — Debug / Development Compose
|
||||
#
|
||||
# Compatible with:
|
||||
# docker compose -f Docker/compose.debug.yml up
|
||||
# podman compose -f Docker/compose.debug.yml up
|
||||
# podman-compose -f Docker/compose.debug.yml up
|
||||
#
|
||||
# Features:
|
||||
# - Source code mounted as volumes (hot-reload)
|
||||
# - Uvicorn --reload for backend auto-restart
|
||||
# - Vite dev server for frontend with HMR
|
||||
# - Ports exposed on host for direct access
|
||||
# - Debug log level enabled
|
||||
# ──────────────────────────────────────────────────────────────
|
||||
|
||||
name: bangui-dev
|
||||
|
||||
services:
|
||||
# ── fail2ban ─────────────────────────────────────────────────
|
||||
fail2ban:
|
||||
image: lscr.io/linuxserver/fail2ban:latest
|
||||
container_name: bangui-fail2ban-dev
|
||||
restart: unless-stopped
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
- NET_RAW
|
||||
network_mode: host
|
||||
environment:
|
||||
TZ: "${BANGUI_TIMEZONE:-UTC}"
|
||||
PUID: 0
|
||||
PGID: 0
|
||||
volumes:
|
||||
- ./fail2ban-dev-config:/config
|
||||
- fail2ban-dev-run:/var/run/fail2ban
|
||||
- /var/log:/var/log:ro
|
||||
- ./logs:/remotelogs/bangui
|
||||
healthcheck:
|
||||
test: ["CMD", "fail2ban-client", "ping"]
|
||||
interval: 15s
|
||||
timeout: 5s
|
||||
start_period: 15s
|
||||
retries: 3
|
||||
|
||||
# ── Backend (FastAPI + uvicorn with --reload) ───────────────
|
||||
backend:
|
||||
build:
|
||||
context: ..
|
||||
dockerfile: Docker/Dockerfile.backend
|
||||
target: runtime
|
||||
container_name: bangui-backend-dev
|
||||
restart: unless-stopped
|
||||
user: "0"
|
||||
depends_on:
|
||||
fail2ban:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
BANGUI_DATABASE_PATH: "/data/bangui.db"
|
||||
BANGUI_FAIL2BAN_SOCKET: "/var/run/fail2ban/fail2ban.sock"
|
||||
BANGUI_FAIL2BAN_CONFIG_DIR: "/config/fail2ban"
|
||||
BANGUI_LOG_LEVEL: "debug"
|
||||
BANGUI_SESSION_SECRET: "${BANGUI_SESSION_SECRET:-dev-secret-do-not-use-in-production}"
|
||||
BANGUI_TIMEZONE: "${BANGUI_TIMEZONE:-UTC}"
|
||||
volumes:
|
||||
- ../backend/app:/app/app:z
|
||||
- ../fail2ban-master:/app/fail2ban-master:ro,z
|
||||
- bangui-dev-data:/data
|
||||
- fail2ban-dev-run:/var/run/fail2ban:ro
|
||||
- ./fail2ban-dev-config:/config:rw
|
||||
ports:
|
||||
- "${BANGUI_BACKEND_PORT:-8000}:8000"
|
||||
command:
|
||||
[
|
||||
"uvicorn", "app.main:create_app", "--factory",
|
||||
"--host", "0.0.0.0", "--port", "8000",
|
||||
"--reload", "--reload-dir", "/app/app"
|
||||
]
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "python -c 'import urllib.request; urllib.request.urlopen(\"http://127.0.0.1:8000/api/health\", timeout=4)'"]
|
||||
interval: 15s
|
||||
timeout: 5s
|
||||
start_period: 45s
|
||||
retries: 5
|
||||
networks:
|
||||
- bangui-dev-net
|
||||
|
||||
# ── Frontend (Vite dev server with HMR) ─────────────────────
|
||||
frontend:
|
||||
image: node:22-alpine
|
||||
container_name: bangui-frontend-dev
|
||||
restart: unless-stopped
|
||||
working_dir: /app
|
||||
environment:
|
||||
NODE_ENV: development
|
||||
volumes:
|
||||
- ../frontend:/app:z
|
||||
- frontend-node-modules:/app/node_modules
|
||||
ports:
|
||||
- "${BANGUI_FRONTEND_PORT:-5173}:5173"
|
||||
command: ["sh", "-c", "npm install && npm run dev -- --host 0.0.0.0"]
|
||||
depends_on:
|
||||
backend:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "-qO", "/dev/null", "http://localhost:5173/"]
|
||||
interval: 15s
|
||||
timeout: 5s
|
||||
start_period: 30s
|
||||
retries: 5
|
||||
networks:
|
||||
- bangui-dev-net
|
||||
|
||||
volumes:
|
||||
bangui-dev-data:
|
||||
driver: local
|
||||
frontend-node-modules:
|
||||
driver: local
|
||||
fail2ban-dev-run:
|
||||
driver: local
|
||||
|
||||
networks:
|
||||
bangui-dev-net:
|
||||
driver: bridge
|
||||
109
Docker/compose.prod.yml
Normal file
109
Docker/compose.prod.yml
Normal file
@@ -0,0 +1,109 @@
|
||||
# ──────────────────────────────────────────────────────────────
|
||||
# BanGUI — Production Compose
|
||||
#
|
||||
# Compatible with:
|
||||
# docker compose -f Docker/compose.prod.yml up -d
|
||||
# podman compose -f Docker/compose.prod.yml up -d
|
||||
# podman-compose -f Docker/compose.prod.yml up -d
|
||||
#
|
||||
# Prerequisites:
|
||||
# Create a .env file at the project root (or pass --env-file):
|
||||
# BANGUI_SESSION_SECRET=<random-secret>
|
||||
# ──────────────────────────────────────────────────────────────
|
||||
|
||||
name: bangui
|
||||
|
||||
services:
|
||||
# ── fail2ban ─────────────────────────────────────────────────
|
||||
fail2ban:
|
||||
image: lscr.io/linuxserver/fail2ban:latest
|
||||
container_name: bangui-fail2ban
|
||||
restart: unless-stopped
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
- NET_RAW
|
||||
network_mode: host
|
||||
environment:
|
||||
TZ: "${BANGUI_TIMEZONE:-UTC}"
|
||||
PUID: 0
|
||||
PGID: 0
|
||||
volumes:
|
||||
- fail2ban-config:/config
|
||||
- fail2ban-run:/var/run/fail2ban
|
||||
- /var/log:/var/log:ro
|
||||
healthcheck:
|
||||
test: ["CMD", "fail2ban-client", "ping"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
start_period: 15s
|
||||
retries: 3
|
||||
# NOTE: The fail2ban-config volume must be pre-populated with the following files:
|
||||
# • fail2ban/jail.conf (or jail.d/*.conf) with the DEFAULT section containing:
|
||||
# banaction = iptables-allports[lockingopt="-w 5"]
|
||||
# This prevents xtables lock contention errors when multiple jails start in parallel.
|
||||
# See https://fail2ban.readthedocs.io/en/latest/development/environment.html
|
||||
|
||||
# ── Backend (FastAPI + uvicorn) ─────────────────────────────
|
||||
backend:
|
||||
build:
|
||||
context: ..
|
||||
dockerfile: Docker/Dockerfile.backend
|
||||
container_name: bangui-backend
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
fail2ban:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
BANGUI_DATABASE_PATH: "/data/bangui.db"
|
||||
BANGUI_FAIL2BAN_SOCKET: "/var/run/fail2ban/fail2ban.sock"
|
||||
BANGUI_FAIL2BAN_CONFIG_DIR: "/config/fail2ban"
|
||||
BANGUI_LOG_LEVEL: "info"
|
||||
BANGUI_SESSION_SECRET: "${BANGUI_SESSION_SECRET:?Set BANGUI_SESSION_SECRET}"
|
||||
BANGUI_TIMEZONE: "${BANGUI_TIMEZONE:-UTC}"
|
||||
volumes:
|
||||
- bangui-data:/data
|
||||
- fail2ban-run:/var/run/fail2ban:ro
|
||||
- fail2ban-config:/config:rw
|
||||
expose:
|
||||
- "8000"
|
||||
healthcheck:
|
||||
test: ["CMD", "python", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:8000/api/health')"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
start_period: 10s
|
||||
retries: 3
|
||||
networks:
|
||||
- bangui-net
|
||||
|
||||
# ── Frontend (nginx serving built SPA + API proxy) ──────────
|
||||
frontend:
|
||||
build:
|
||||
context: ..
|
||||
dockerfile: Docker/Dockerfile.frontend
|
||||
container_name: bangui-frontend
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "${BANGUI_PORT:-8080}:80"
|
||||
depends_on:
|
||||
backend:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "-qO", "/dev/null", "http://localhost:80/"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
start_period: 5s
|
||||
retries: 3
|
||||
networks:
|
||||
- bangui-net
|
||||
|
||||
volumes:
|
||||
bangui-data:
|
||||
driver: local
|
||||
fail2ban-config:
|
||||
driver: local
|
||||
fail2ban-run:
|
||||
driver: local
|
||||
|
||||
networks:
|
||||
bangui-net:
|
||||
driver: bridge
|
||||
73
Docker/docker-compose.yml
Normal file
73
Docker/docker-compose.yml
Normal file
@@ -0,0 +1,73 @@
|
||||
version: '3.8'
|
||||
services:
|
||||
fail2ban:
|
||||
image: lscr.io/linuxserver/fail2ban:latest
|
||||
container_name: fail2ban
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
- NET_RAW
|
||||
network_mode: host
|
||||
environment:
|
||||
- PUID=1011
|
||||
- PGID=1001
|
||||
- TZ=Etc/UTC
|
||||
- VERBOSITY=-vv #optional
|
||||
|
||||
volumes:
|
||||
- /server/server_fail2ban/config:/config
|
||||
- /server/server_fail2ban/fail2ban-run:/var/run/fail2ban
|
||||
- /var/log:/var/log
|
||||
- /server/server_nextcloud/config/nextcloud.log:/remotelogs/nextcloud/nextcloud.log:ro #optional
|
||||
- /server/server_nginx/data/logs:/remotelogs/nginx:ro #optional
|
||||
- /server/server_gitea/log/gitea.log:/remotelogs/gitea/gitea.log:ro #optional
|
||||
|
||||
|
||||
#- /path/to/homeassistant/log:/remotelogs/homeassistant:ro #optional
|
||||
#- /path/to/unificontroller/log:/remotelogs/unificontroller:ro #optional
|
||||
#- /path/to/vaultwarden/log:/remotelogs/vaultwarden:ro #optional
|
||||
restart: unless-stopped
|
||||
|
||||
backend:
|
||||
image: git.lpl-mind.de/lukas.pupkalipinski/bangui/backend:latest
|
||||
container_name: bangui-backend
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
fail2ban:
|
||||
condition: service_started
|
||||
environment:
|
||||
- PUID=1011
|
||||
- PGID=1001
|
||||
- BANGUI_DATABASE_PATH=/data/bangui.db
|
||||
- BANGUI_FAIL2BAN_SOCKET=/var/run/fail2ban/fail2ban.sock
|
||||
- BANGUI_FAIL2BAN_CONFIG_DIR=/config/fail2ban
|
||||
- BANGUI_LOG_LEVEL=info
|
||||
- BANGUI_SESSION_SECRET=${BANGUI_SESSION_SECRET:?Set BANGUI_SESSION_SECRET}
|
||||
- BANGUI_TIMEZONE=${BANGUI_TIMEZONE:-UTC}
|
||||
volumes:
|
||||
- /server/server_fail2ban/bangui-data:/data
|
||||
- /server/server_fail2ban/fail2ban-run:/var/run/fail2ban:ro
|
||||
- /server/server_fail2ban/config:/config:rw
|
||||
expose:
|
||||
- "8000"
|
||||
networks:
|
||||
- bangui-net
|
||||
|
||||
# ── Frontend (nginx serving built SPA + API proxy) ──────────
|
||||
frontend:
|
||||
image: git.lpl-mind.de/lukas.pupkalipinski/bangui/frontend:latest
|
||||
container_name: bangui-frontend
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- PUID=1011
|
||||
- PGID=1001
|
||||
ports:
|
||||
- "${BANGUI_PORT:-8080}:80"
|
||||
depends_on:
|
||||
backend:
|
||||
condition: service_started
|
||||
networks:
|
||||
- bangui-net
|
||||
|
||||
networks:
|
||||
bangui-net:
|
||||
name: bangui-net
|
||||
142
Docker/fail2ban-dev-config/README.md
Normal file
142
Docker/fail2ban-dev-config/README.md
Normal file
@@ -0,0 +1,142 @@
|
||||
# BanGUI — Fail2ban Dev Test Environment
|
||||
|
||||
This directory contains the fail2ban configuration and supporting scripts for a
|
||||
self-contained development test environment. A simulation script writes fake
|
||||
authentication-failure log lines, fail2ban detects them via the `manual-Jail`
|
||||
jail, and bans the offending IP — giving a fully reproducible ban/unban cycle
|
||||
without a real service.
|
||||
|
||||
---
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Docker or Podman installed and running.
|
||||
- `docker compose` (v2) or `podman-compose` available on the `PATH`.
|
||||
- The repo checked out; all commands run from the **repo root**.
|
||||
|
||||
---
|
||||
|
||||
## Quick Start
|
||||
|
||||
### 1 — Start the fail2ban container
|
||||
|
||||
```bash
|
||||
docker compose -f Docker/compose.debug.yml up -d fail2ban
|
||||
# or: make up (starts the full dev stack)
|
||||
```
|
||||
|
||||
Wait ~15 s for the health-check to pass (`docker ps` shows `healthy`).
|
||||
|
||||
### 2 — Run the login-failure simulation
|
||||
|
||||
```bash
|
||||
bash Docker/simulate_failed_logins.sh
|
||||
```
|
||||
|
||||
Default: writes **5** failure lines for IP `192.168.100.99` to
|
||||
`Docker/logs/auth.log`.
|
||||
Optional overrides:
|
||||
|
||||
```bash
|
||||
bash Docker/simulate_failed_logins.sh <COUNT> <SOURCE_IP> <LOG_FILE>
|
||||
# e.g. bash Docker/simulate_failed_logins.sh 10 203.0.113.42
|
||||
```
|
||||
|
||||
### 3 — Verify the IP was banned
|
||||
|
||||
```bash
|
||||
bash Docker/check_ban_status.sh
|
||||
```
|
||||
|
||||
The output shows the current jail counters and the list of banned IPs with their
|
||||
ban expiry timestamps.
|
||||
|
||||
### 4 — Unban and re-test
|
||||
|
||||
```bash
|
||||
bash Docker/check_ban_status.sh --unban 192.168.100.99
|
||||
```
|
||||
|
||||
### One-command smoke test (Makefile shortcut)
|
||||
|
||||
```bash
|
||||
make dev-ban-test
|
||||
```
|
||||
|
||||
Chains steps 1–3 automatically with appropriate sleep intervals.
|
||||
|
||||
---
|
||||
|
||||
## Configuration Reference
|
||||
|
||||
| File | Purpose |
|
||||
|------|---------|
|
||||
| `fail2ban/filter.d/manual-Jail.conf` | Defines the `failregex` that matches simulation log lines |
|
||||
| `fail2ban/jail.d/manual-Jail.conf` | Jail settings: `maxretry=3`, `bantime=60s`, `findtime=120s` |
|
||||
| `Docker/logs/auth.log` | Log file written by the simulation script (host path) |
|
||||
|
||||
Inside the container the log file is mounted at `/remotelogs/bangui/auth.log`
|
||||
(see `fail2ban/paths-lsio.conf` — `remote_logs_path = /remotelogs`).
|
||||
|
||||
To change sensitivity, edit `fail2ban/jail.d/manual-Jail.conf`:
|
||||
|
||||
```ini
|
||||
maxretry = 3 # failures before a ban
|
||||
findtime = 120 # look-back window in seconds
|
||||
bantime = 60 # ban duration in seconds
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Log file not detected
|
||||
|
||||
The jail uses `backend = polling` for reliability inside Docker containers.
|
||||
If fail2ban still does not pick up new lines, verify the volume mount in
|
||||
`Docker/compose.debug.yml`:
|
||||
|
||||
```yaml
|
||||
- ./logs:/remotelogs/bangui
|
||||
```
|
||||
|
||||
and confirm `Docker/logs/auth.log` exists after running the simulation script.
|
||||
|
||||
### Filter regex mismatch
|
||||
|
||||
Test the regex manually:
|
||||
|
||||
```bash
|
||||
docker exec bangui-fail2ban-dev \
|
||||
fail2ban-regex /remotelogs/bangui/auth.log manual-Jail
|
||||
```
|
||||
|
||||
The output should show matched lines. If nothing matches, check that the log
|
||||
lines match the corresponding `failregex` pattern:
|
||||
|
||||
```
|
||||
# manual-Jail (auth log):
|
||||
YYYY-MM-DD HH:MM:SS bangui-auth: authentication failure from <IP>
|
||||
```
|
||||
|
||||
### iptables / permission errors
|
||||
|
||||
The fail2ban container requires `NET_ADMIN` and `NET_RAW` capabilities and
|
||||
`network_mode: host`. Both are already set in `Docker/compose.debug.yml`. If
|
||||
you see iptables errors, check that the host kernel has iptables loaded:
|
||||
|
||||
```bash
|
||||
sudo modprobe ip_tables
|
||||
```
|
||||
|
||||
### IP not banned despite enough failures
|
||||
|
||||
Check whether the source IP falls inside the `ignoreip` range defined in
|
||||
`fail2ban/jail.d/manual-Jail.conf`:
|
||||
|
||||
```ini
|
||||
ignoreip = 127.0.0.0/8 ::1 172.16.0.0/12
|
||||
```
|
||||
|
||||
The default simulation IP `192.168.100.99` is outside these ranges and will be
|
||||
banned normally.
|
||||
@@ -0,0 +1,13 @@
|
||||
# ──────────────────────────────────────────────────────────────
|
||||
# BanGUI — Simulated authentication failure filter
|
||||
#
|
||||
# Matches lines written by Docker/simulate_failed_logins.sh
|
||||
# Format: <timestamp> bangui-auth: authentication failure from <HOST>
|
||||
# Jail: manual-Jail
|
||||
# ──────────────────────────────────────────────────────────────
|
||||
|
||||
[Definition]
|
||||
|
||||
failregex = ^.* bangui-auth: authentication failure from <HOST>\s*$
|
||||
|
||||
ignoreregex =
|
||||
@@ -0,0 +1,25 @@
|
||||
# ──────────────────────────────────────────────────────────────
|
||||
# BanGUI — Blocklist-import jail
|
||||
#
|
||||
# Dedicated jail for IPs banned via the BanGUI blocklist import
|
||||
# feature. This is a manual-ban jail: it does not watch any log
|
||||
# file. All bans are injected programmatically via
|
||||
# fail2ban-client set blocklist-import banip <ip>
|
||||
# which the BanGUI backend uses through its fail2ban socket
|
||||
# client.
|
||||
# ──────────────────────────────────────────────────────────────
|
||||
|
||||
[blocklist-import]
|
||||
|
||||
enabled = true
|
||||
# No log-based detection — only manual banip commands are used.
|
||||
filter =
|
||||
logpath = /dev/null
|
||||
backend = auto
|
||||
maxretry = 1
|
||||
findtime = 1d
|
||||
# Block imported IPs for one week.
|
||||
bantime = 1w
|
||||
|
||||
# Never ban the Docker bridge network or localhost.
|
||||
ignoreip = 127.0.0.0/8 ::1 172.16.0.0/12
|
||||
19
Docker/fail2ban-dev-config/fail2ban/jail.d/manual-Jail.conf
Normal file
19
Docker/fail2ban-dev-config/fail2ban/jail.d/manual-Jail.conf
Normal file
@@ -0,0 +1,19 @@
|
||||
# ──────────────────────────────────────────────────────────────
|
||||
# BanGUI — Simulated authentication failure jail
|
||||
#
|
||||
# Watches Docker/logs/auth.log (mounted at /remotelogs/bangui)
|
||||
# for lines produced by Docker/simulate_failed_logins.sh.
|
||||
# ──────────────────────────────────────────────────────────────
|
||||
|
||||
[manual-Jail]
|
||||
|
||||
enabled = true
|
||||
filter = manual-Jail
|
||||
logpath = /remotelogs/bangui/auth.log
|
||||
backend = polling
|
||||
maxretry = 3
|
||||
findtime = 120
|
||||
bantime = 60
|
||||
|
||||
# Never ban localhost, the Docker bridge network, or the host machine.
|
||||
ignoreip = 127.0.0.0/8 ::1 172.16.0.0/12
|
||||
6
Docker/fail2ban-dev-config/fail2ban/jail.local
Normal file
6
Docker/fail2ban-dev-config/fail2ban/jail.local
Normal file
@@ -0,0 +1,6 @@
|
||||
# Local overrides — not overwritten by the container init script.
|
||||
# Provides banaction so all jails can resolve %(action_)s interpolation.
|
||||
|
||||
[DEFAULT]
|
||||
banaction = iptables-multiport
|
||||
banaction_allports = iptables-allports
|
||||
34
Docker/nginx.conf
Normal file
34
Docker/nginx.conf
Normal file
@@ -0,0 +1,34 @@
|
||||
server {
|
||||
listen 80;
|
||||
server_name _;
|
||||
|
||||
root /usr/share/nginx/html;
|
||||
index index.html;
|
||||
|
||||
# ── Gzip compression ─────────────────────────────────────
|
||||
gzip on;
|
||||
gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript image/svg+xml;
|
||||
gzip_min_length 256;
|
||||
|
||||
# ── API reverse proxy → backend container ─────────────────
|
||||
location /api/ {
|
||||
proxy_pass http://backend:8000;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_read_timeout 60s;
|
||||
}
|
||||
|
||||
# ── Static assets with long-term caching ──────────────────
|
||||
location /assets/ {
|
||||
expires 1y;
|
||||
add_header Cache-Control "public, immutable";
|
||||
try_files $uri =404;
|
||||
}
|
||||
|
||||
# ── SPA fallback — serve index.html for client routes ─────
|
||||
location / {
|
||||
try_files $uri $uri/ /index.html;
|
||||
}
|
||||
}
|
||||
106
Docker/push.sh
Normal file
106
Docker/push.sh
Normal file
@@ -0,0 +1,106 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Build and push BanGUI container images to the Gitea registry.
|
||||
#
|
||||
# Usage:
|
||||
# ./push.sh # builds & pushes with tag "latest"
|
||||
# ./push.sh v1.2.3 # builds & pushes with tag "v1.2.3"
|
||||
# ./push.sh v1.2.3 --no-build # pushes existing images only
|
||||
#
|
||||
# Prerequisites:
|
||||
# podman login git.lpl-mind.de (or: docker login git.lpl-mind.de)
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Configuration
|
||||
# ---------------------------------------------------------------------------
|
||||
REGISTRY="git.lpl-mind.de"
|
||||
NAMESPACE="lukas.pupkalipinski"
|
||||
PROJECT="bangui"
|
||||
|
||||
BACKEND_IMAGE="${REGISTRY}/${NAMESPACE}/${PROJECT}/backend"
|
||||
FRONTEND_IMAGE="${REGISTRY}/${NAMESPACE}/${PROJECT}/frontend"
|
||||
|
||||
TAG="${1:-latest}"
|
||||
SKIP_BUILD=false
|
||||
if [[ "${2:-}" == "--no-build" ]]; then
|
||||
SKIP_BUILD=true
|
||||
fi
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
log() { echo -e "\n>>> $*"; }
|
||||
err() { echo -e "\nERROR: $*" >&2; exit 1; }
|
||||
|
||||
# Detect container engine (podman preferred, docker fallback)
|
||||
if command -v podman &>/dev/null; then
|
||||
ENGINE="podman"
|
||||
elif command -v docker &>/dev/null; then
|
||||
ENGINE="docker"
|
||||
else
|
||||
err "Neither podman nor docker is installed."
|
||||
fi
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Pre-flight checks
|
||||
# ---------------------------------------------------------------------------
|
||||
echo "============================================"
|
||||
echo " BanGUI — Build & Push"
|
||||
echo " Engine : ${ENGINE}"
|
||||
echo " Registry : ${REGISTRY}"
|
||||
echo " Tag : ${TAG}"
|
||||
echo "============================================"
|
||||
|
||||
if [[ "${ENGINE}" == "podman" ]]; then
|
||||
if ! podman login --get-login "${REGISTRY}" &>/dev/null; then
|
||||
err "Not logged in. Run:\n podman login ${REGISTRY}"
|
||||
fi
|
||||
fi
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Build
|
||||
# ---------------------------------------------------------------------------
|
||||
if [[ "${SKIP_BUILD}" == false ]]; then
|
||||
log "Building backend image → ${BACKEND_IMAGE}:${TAG}"
|
||||
"${ENGINE}" build \
|
||||
-t "${BACKEND_IMAGE}:${TAG}" \
|
||||
-f "${SCRIPT_DIR}/Dockerfile.backend" \
|
||||
"${PROJECT_ROOT}"
|
||||
|
||||
log "Building frontend image → ${FRONTEND_IMAGE}:${TAG}"
|
||||
"${ENGINE}" build \
|
||||
-t "${FRONTEND_IMAGE}:${TAG}" \
|
||||
-f "${SCRIPT_DIR}/Dockerfile.frontend" \
|
||||
"${PROJECT_ROOT}"
|
||||
fi
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Push
|
||||
# ---------------------------------------------------------------------------
|
||||
log "Pushing ${BACKEND_IMAGE}:${TAG}"
|
||||
"${ENGINE}" push "${BACKEND_IMAGE}:${TAG}"
|
||||
|
||||
log "Pushing ${FRONTEND_IMAGE}:${TAG}"
|
||||
"${ENGINE}" push "${FRONTEND_IMAGE}:${TAG}"
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Summary
|
||||
# ---------------------------------------------------------------------------
|
||||
echo ""
|
||||
echo "============================================"
|
||||
echo " Push complete!"
|
||||
echo ""
|
||||
echo " Images:"
|
||||
echo " ${BACKEND_IMAGE}:${TAG}"
|
||||
echo " ${FRONTEND_IMAGE}:${TAG}"
|
||||
echo ""
|
||||
echo " Deploy on server:"
|
||||
echo " ${ENGINE} login ${REGISTRY}"
|
||||
echo " ${ENGINE} compose -f Docker/compose.prod.yml pull"
|
||||
echo " ${ENGINE} compose -f Docker/compose.prod.yml up -d"
|
||||
echo "============================================"
|
||||
86
Docker/release.sh
Normal file
86
Docker/release.sh
Normal file
@@ -0,0 +1,86 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Bump the project version and push images to the registry.
|
||||
#
|
||||
# Usage:
|
||||
# ./release.sh
|
||||
#
|
||||
# The current version is stored in VERSION (next to this script).
|
||||
# You will be asked whether to bump major, minor, or patch.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
VERSION_FILE="${SCRIPT_DIR}/VERSION"
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Read current version
|
||||
# ---------------------------------------------------------------------------
|
||||
if [[ ! -f "${VERSION_FILE}" ]]; then
|
||||
echo "0.0.0" > "${VERSION_FILE}"
|
||||
fi
|
||||
|
||||
CURRENT="$(cat "${VERSION_FILE}")"
|
||||
# Strip leading 'v' for arithmetic
|
||||
VERSION="${CURRENT#v}"
|
||||
|
||||
IFS='.' read -r MAJOR MINOR PATCH <<< "${VERSION}"
|
||||
|
||||
echo "============================================"
|
||||
echo " BanGUI — Release"
|
||||
echo " Current version: v${MAJOR}.${MINOR}.${PATCH}"
|
||||
echo "============================================"
|
||||
echo ""
|
||||
echo "How would you like to bump the version?"
|
||||
echo " 1) patch (v${MAJOR}.${MINOR}.${PATCH} → v${MAJOR}.${MINOR}.$((PATCH + 1)))"
|
||||
echo " 2) minor (v${MAJOR}.${MINOR}.${PATCH} → v${MAJOR}.$((MINOR + 1)).0)"
|
||||
echo " 3) major (v${MAJOR}.${MINOR}.${PATCH} → v$((MAJOR + 1)).0.0)"
|
||||
echo ""
|
||||
read -rp "Enter choice [1/2/3]: " CHOICE
|
||||
|
||||
case "${CHOICE}" in
|
||||
1) NEW_TAG="v${MAJOR}.${MINOR}.$((PATCH + 1))" ;;
|
||||
2) NEW_TAG="v${MAJOR}.$((MINOR + 1)).0" ;;
|
||||
3) NEW_TAG="v$((MAJOR + 1)).0.0" ;;
|
||||
*)
|
||||
echo "Invalid choice. Aborting." >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
echo ""
|
||||
echo "New version: ${NEW_TAG}"
|
||||
read -rp "Confirm? [y/N]: " CONFIRM
|
||||
if [[ ! "${CONFIRM}" =~ ^[yY]$ ]]; then
|
||||
echo "Aborted."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Write new version
|
||||
# ---------------------------------------------------------------------------
|
||||
echo "${NEW_TAG}" > "${VERSION_FILE}"
|
||||
echo "Version file updated → ${VERSION_FILE}"
|
||||
|
||||
# Keep frontend/package.json in sync so __APP_VERSION__ matches Docker/VERSION.
|
||||
FRONT_VERSION="${NEW_TAG#v}"
|
||||
FRONT_PKG="${SCRIPT_DIR}/../frontend/package.json"
|
||||
sed -i "s/\"version\": \"[^\"]*\"/\"version\": \"${FRONT_VERSION}\"/" "${FRONT_PKG}"
|
||||
echo "frontend/package.json version updated → ${FRONT_VERSION}"
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Git tag
|
||||
# ---------------------------------------------------------------------------
|
||||
cd "${SCRIPT_DIR}/.."
|
||||
git add Docker/VERSION frontend/package.json
|
||||
git commit -m "chore: release ${NEW_TAG}"
|
||||
git tag -a "${NEW_TAG}" -m "Release ${NEW_TAG}"
|
||||
git push origin HEAD
|
||||
git push origin "${NEW_TAG}"
|
||||
echo "Git tag ${NEW_TAG} created and pushed."
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Push
|
||||
# ---------------------------------------------------------------------------
|
||||
bash "${SCRIPT_DIR}/push.sh" "${NEW_TAG}"
|
||||
bash "${SCRIPT_DIR}/push.sh"
|
||||
59
Docker/simulate_failed_logins.sh
Normal file
59
Docker/simulate_failed_logins.sh
Normal file
@@ -0,0 +1,59 @@
|
||||
#!/usr/bin/env bash
|
||||
# ──────────────────────────────────────────────────────────────
|
||||
# simulate_failed_logins.sh
|
||||
#
|
||||
# Writes synthetic authentication-failure log lines to a file
|
||||
# that matches the manual-Jail fail2ban filter.
|
||||
#
|
||||
# Usage:
|
||||
# bash Docker/simulate_failed_logins.sh [COUNT] [SOURCE_IP] [LOG_FILE]
|
||||
#
|
||||
# Defaults:
|
||||
# COUNT : 5
|
||||
# SOURCE_IP: 192.168.100.99
|
||||
# LOG_FILE : Docker/logs/auth.log (relative to repo root)
|
||||
#
|
||||
# Log line format (must match manual-Jail failregex exactly):
|
||||
# YYYY-MM-DD HH:MM:SS bangui-auth: authentication failure from <IP>
|
||||
# ──────────────────────────────────────────────────────────────
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# ── Defaults ──────────────────────────────────────────────────
|
||||
readonly DEFAULT_COUNT=5
|
||||
readonly DEFAULT_IP="192.168.100.99"
|
||||
|
||||
# Resolve script location so defaults work regardless of cwd.
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
readonly DEFAULT_LOG_FILE="${SCRIPT_DIR}/logs/auth.log"
|
||||
|
||||
# ── Arguments ─────────────────────────────────────────────────
|
||||
COUNT="${1:-${DEFAULT_COUNT}}"
|
||||
SOURCE_IP="${2:-${DEFAULT_IP}}"
|
||||
LOG_FILE="${3:-${DEFAULT_LOG_FILE}}"
|
||||
|
||||
# ── Validate COUNT is a positive integer ──────────────────────
|
||||
if ! [[ "${COUNT}" =~ ^[1-9][0-9]*$ ]]; then
|
||||
echo "ERROR: COUNT must be a positive integer, got: '${COUNT}'" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# ── Ensure log directory exists ───────────────────────────────
|
||||
LOG_DIR="$(dirname "${LOG_FILE}")"
|
||||
mkdir -p "${LOG_DIR}"
|
||||
|
||||
# ── Write failure lines ───────────────────────────────────────
|
||||
echo "Writing ${COUNT} authentication-failure line(s) for ${SOURCE_IP} to ${LOG_FILE} ..."
|
||||
|
||||
for ((i = 1; i <= COUNT; i++)); do
|
||||
TIMESTAMP="$(date '+%Y-%m-%d %H:%M:%S')"
|
||||
printf '%s bangui-auth: authentication failure from %s\n' \
|
||||
"${TIMESTAMP}" "${SOURCE_IP}" >> "${LOG_FILE}"
|
||||
sleep 0.5
|
||||
done
|
||||
|
||||
# ── Summary ───────────────────────────────────────────────────
|
||||
echo "Done."
|
||||
echo " Lines written : ${COUNT}"
|
||||
echo " Source IP : ${SOURCE_IP}"
|
||||
echo " Log file : ${LOG_FILE}"
|
||||
@@ -123,6 +123,7 @@ backend/
|
||||
│ │ └── import_log_repo.py # Import run history records
|
||||
│ ├── tasks/ # APScheduler background jobs
|
||||
│ │ ├── blocklist_import.py# Scheduled blocklist download and application
|
||||
│ │ ├── geo_cache_flush.py # Periodic geo cache persistence (dirty-set flush to SQLite)
|
||||
│ │ └── health_check.py # Periodic fail2ban connectivity probe
|
||||
│ └── utils/ # Helpers, constants, shared types
|
||||
│ ├── fail2ban_client.py # Async wrapper around the fail2ban socket protocol
|
||||
@@ -151,7 +152,8 @@ The HTTP interface layer. Each router maps URL paths to handler functions. Route
|
||||
| `dashboard.py` | `/api/dashboard` | Server status bar data, recent bans for the dashboard |
|
||||
| `jails.py` | `/api/jails` | List jails, jail detail, start/stop/reload/idle controls |
|
||||
| `bans.py` | `/api/bans` | Ban an IP, unban an IP, unban all, list currently banned IPs |
|
||||
| `config.py` | `/api/config` | Read and write fail2ban jail/filter/server configuration |
|
||||
| `config.py` | `/api/config` | Read and write fail2ban jail/filter/server configuration via the socket; also serves the fail2ban log tail and service status for the Log tab |
|
||||
| `file_config.py` | `/api/config` | Read and write fail2ban config files on disk (jail.d/, filter.d/, action.d/) — list, get, and overwrite raw file contents, toggle jail enabled/disabled |
|
||||
| `history.py` | `/api/history` | Query historical bans, per-IP timeline |
|
||||
| `blocklist.py` | `/api/blocklists` | CRUD blocklist sources, trigger import, view import logs |
|
||||
| `geo.py` | `/api/geo` | IP geolocation lookup, ASN and RIR data |
|
||||
@@ -167,7 +169,10 @@ The business logic layer. Services orchestrate operations, enforce rules, and co
|
||||
| `setup_service.py` | Validates setup input, persists initial configuration, ensures setup runs only once |
|
||||
| `jail_service.py` | Retrieves jail list and details from fail2ban, aggregates metrics (banned count, failure count), sends start/stop/reload/idle commands |
|
||||
| `ban_service.py` | Executes ban and unban commands via the fail2ban socket, queries the currently banned IP list, validates IPs before banning |
|
||||
| `config_service.py` | Reads active jail and filter configuration from fail2ban, writes configuration changes, validates regex patterns, triggers reload |
|
||||
| `config_service.py` | Reads active jail and filter configuration from fail2ban, writes configuration changes, validates regex patterns, triggers reload; reads the fail2ban log file tail and queries service status for the Log tab |
|
||||
| `file_config_service.py` | Reads and writes raw fail2ban config files on disk (jail.d/, filter.d/, action.d/); lists files, reads content, overwrites files, toggles enabled/disabled |
|
||||
| `config_file_service.py` | Parses jail.conf / jail.local / jail.d/* to discover inactive jails; writes .local overrides to activate or deactivate jails; triggers fail2ban reload |
|
||||
| `conffile_parser.py` | Parses fail2ban `.conf` files into structured Python types (jail config, filter config, action config); also serialises back to text |
|
||||
| `history_service.py` | Queries the fail2ban database for historical ban records, builds per-IP timelines, computes ban counts and repeat-offender flags |
|
||||
| `blocklist_service.py` | Downloads blocklists via aiohttp, validates IPs/CIDRs, applies bans through fail2ban or iptables, logs import results |
|
||||
| `geo_service.py` | Resolves IP addresses to country, ASN, and RIR using external APIs or a local database, caches results |
|
||||
@@ -200,6 +205,7 @@ APScheduler background jobs that run on a schedule without user interaction.
|
||||
| Task | Purpose |
|
||||
|---|---|
|
||||
| `blocklist_import.py` | Downloads all enabled blocklist sources, validates entries, applies bans, records results in the import log |
|
||||
| `geo_cache_flush.py` | Periodically flushes newly resolved IPs from the in-memory dirty set to the `geo_cache` SQLite table (default: every 60 seconds). GET requests populate only the in-memory cache; this task persists them without blocking any request. |
|
||||
| `health_check.py` | Periodically pings the fail2ban socket and updates the cached server status so the frontend always has fresh data |
|
||||
|
||||
#### Utils (`app/utils/`)
|
||||
@@ -285,6 +291,8 @@ frontend/
|
||||
│ │ ├── WorldMap.tsx # Country-outline map with ban counts
|
||||
│ │ ├── ImportLogTable.tsx # Blocklist import run history
|
||||
│ │ ├── ConfirmDialog.tsx # Reusable confirmation modal
|
||||
│ │ ├── RequireAuth.tsx # Route guard: redirects unauthenticated users to /login
|
||||
│ │ ├── SetupGuard.tsx # Route guard: redirects to /setup if setup incomplete
|
||||
│ │ └── ... # (additional shared components)
|
||||
│ ├── hooks/ # Custom React hooks (stateful logic + API calls)
|
||||
│ │ ├── useAuth.ts # Login state, login/logout actions
|
||||
@@ -300,8 +308,8 @@ frontend/
|
||||
│ ├── pages/ # Route-level page components (one per route)
|
||||
│ │ ├── SetupPage.tsx # First-run wizard
|
||||
│ │ ├── LoginPage.tsx # Password prompt
|
||||
│ │ ├── DashboardPage.tsx # Ban overview, status bar, access list
|
||||
│ │ ├── WorldMapPage.tsx # Geographical ban map + access table
|
||||
│ │ ├── DashboardPage.tsx # Ban overview, status bar
|
||||
│ │ ├── WorldMapPage.tsx # Geographical ban map
|
||||
│ │ ├── JailsPage.tsx # Jail list, detail, controls, ban/unban
|
||||
│ │ ├── ConfigPage.tsx # Configuration viewer/editor
|
||||
│ │ ├── HistoryPage.tsx # Ban history browser
|
||||
@@ -325,6 +333,7 @@ frontend/
|
||||
│ ├── utils/ # Pure helper functions
|
||||
│ │ ├── formatDate.ts # Date/time formatting with timezone support
|
||||
│ │ ├── formatIp.ts # IP display formatting
|
||||
│ │ ├── crypto.ts # Browser-native SHA-256 helper (SubtleCrypto)
|
||||
│ │ └── constants.ts # Frontend constants (time presets, etc.)
|
||||
│ ├── App.tsx # Root: FluentProvider + BrowserRouter + routes
|
||||
│ ├── main.tsx # Vite entry point
|
||||
@@ -344,8 +353,8 @@ Top-level route components. Each page composes layout, components, and hooks to
|
||||
|---|---|---|
|
||||
| `SetupPage` | `/setup` | First-run wizard: set master password, database path, fail2ban connection, preferences |
|
||||
| `LoginPage` | `/login` | Single-field password prompt; redirects to requested page after success |
|
||||
| `DashboardPage` | `/` | Server status bar, ban list table, access list tab, time-range selector |
|
||||
| `WorldMapPage` | `/map` | World map with per-country ban counts, companion access table, country filter |
|
||||
| `DashboardPage` | `/` | Server status bar, ban list table, time-range selector |
|
||||
| `WorldMapPage` | `/map` | World map with per-country ban counts, country filter |
|
||||
| `JailsPage` | `/jails` | Jail overview list, jail detail panel, controls (start/stop/reload), ban/unban forms, IP lookup, whitelist management |
|
||||
| `ConfigPage` | `/config` | View and edit jail parameters, filter regex, server settings, regex tester, add log observation |
|
||||
| `HistoryPage` | `/history` | Browse all past bans, filter by jail/IP/time, per-IP timeline drill-down |
|
||||
@@ -366,6 +375,11 @@ Reusable UI building blocks. Components receive data via props, emit changes via
|
||||
| `RegexTester` | Side-by-side sample log + regex input with live match highlighting |
|
||||
| `ImportLogTable` | Table displaying blocklist import history |
|
||||
| `ConfirmDialog` | Reusable Fluent UI Dialog for destructive action confirmations |
|
||||
| `RequireAuth` | Route guard: renders children only when authenticated; otherwise redirects to `/login?next=<path>` |
|
||||
| `SetupGuard` | Route guard: checks `GET /api/setup` on mount and redirects to `/setup` if not complete; shows a spinner while loading |
|
||||
| `config/ConfigListDetail` | Reusable two-pane master/detail layout used by the Jails, Filters, and Actions config tabs. Left pane lists items with active/inactive badges (active sorted first, keyboard navigable); right pane renders the selected item's detail content. Collapses to a dropdown on narrow screens. |
|
||||
| `config/RawConfigSection` | Collapsible section that lazily loads the raw text of a config file into a monospace textarea. Provides a Save button backed by a configurable save callback; shows idle/saving/saved/error feedback. Used by all three config tabs. |
|
||||
| `config/AutoSaveIndicator` | Small inline indicator showing the current save state (idle, saving, saved, error) for form fields that auto-save on change. |
|
||||
|
||||
#### Hooks (`src/hooks/`)
|
||||
|
||||
@@ -376,7 +390,12 @@ Encapsulate all stateful logic, side effects, and API calls. Components and page
|
||||
| `useAuth` | Manages login state, provides `login()`, `logout()`, and `isAuthenticated` |
|
||||
| `useBans` | Fetches ban list for a given time range, returns `{ bans, loading, error }` |
|
||||
| `useJails` | Fetches jail list and individual jail detail |
|
||||
| `useConfig` | Reads and writes fail2ban configuration |
|
||||
| `useConfig` | Reads and writes fail2ban jail configuration via the socket-based API |
|
||||
| `useFilterConfig` | Fetches and manages a single filter file's parsed configuration |
|
||||
| `useActionConfig` | Fetches and manages a single action file's parsed configuration |
|
||||
| `useJailFileConfig` | Fetches and manages a single jail.d config file |
|
||||
| `useConfigActiveStatus` | Derives active status sets for jails, filters, and actions by correlating the live jail list with the config file lists; returns `{ activeJails, activeFilters, activeActions, loading, error, refresh }` |
|
||||
| `useAutoSave` | Debounced auto-save hook: invokes a save callback after the user stops typing, tracks saving/saved/error state |
|
||||
| `useHistory` | Queries historical ban data with filters |
|
||||
| `useBlocklists` | Manages blocklist sources and import triggers |
|
||||
| `useServerStatus` | Polls the server status endpoint at an interval |
|
||||
@@ -394,7 +413,7 @@ A thin typed wrapper around `fetch`. All HTTP communication is centralised here
|
||||
| `dashboard.ts` | `fetchStatus()`, `fetchRecentBans()` |
|
||||
| `jails.ts` | `fetchJails()`, `fetchJailDetail()`, `startJail()`, `stopJail()`, `reloadJail()` |
|
||||
| `bans.ts` | `banIp()`, `unbanIp()`, `unbanAll()`, `fetchBannedIps()` |
|
||||
| `config.ts` | `fetchConfig()`, `updateConfig()`, `testRegex()` |
|
||||
| `config.ts` | Socket-based config: `fetchJailConfigs()`, `updateJailConfig()`, `testRegex()`. File-based config: `fetchJailFiles()`, `fetchJailFile()`, `writeJailFile()`, `setJailFileEnabled()`, `fetchFilterFiles()`, `fetchFilterFile()`, `writeFilterFile()`, `fetchActionFiles()`, `fetchActionFile()`, `writeActionFile()`, `reloadConfig()` |
|
||||
| `history.ts` | `fetchHistory()`, `fetchIpTimeline()` |
|
||||
| `blocklist.ts` | `fetchSources()`, `addSource()`, `removeSource()`, `triggerImport()`, `fetchImportLog()` |
|
||||
| `geo.ts` | `lookupIp()` |
|
||||
@@ -410,7 +429,8 @@ React context providers for application-wide concerns.
|
||||
|
||||
| Provider | Purpose |
|
||||
|---|---|
|
||||
| `AuthProvider` | Holds authentication state, wraps protected routes, redirects unauthenticated users to `/login` |
|
||||
| `AuthProvider` | Holds authentication state; exposes `isAuthenticated`, `login()`, and `logout()` via `useAuth()` |
|
||||
| `TimezoneProvider` | Reads the configured IANA timezone from the backend and supplies it to all children via `useTimezone()` |
|
||||
| `ThemeProvider` | Manages light/dark theme selection, supplies the active Fluent UI theme to `FluentProvider` |
|
||||
|
||||
#### Theme (`src/theme/`)
|
||||
@@ -419,7 +439,14 @@ Fluent UI custom theme definitions and design token constants. No component logi
|
||||
|
||||
#### Utils (`src/utils/`)
|
||||
|
||||
Pure helper functions with no React or framework dependency. Date formatting, IP display formatting, shared constants.
|
||||
Pure helper functions with no React or framework dependency. Date formatting, IP display formatting, shared constants, and cryptographic utilities.
|
||||
|
||||
| Utility | Purpose |
|
||||
|---|---|
|
||||
| `formatDate.ts` | Date/time formatting with IANA timezone support |
|
||||
| `formatIp.ts` | IP address display formatting |
|
||||
| `crypto.ts` | `sha256Hex(input)` — SHA-256 digest via browser-native `SubtleCrypto` API; used to hash passwords before transmission |
|
||||
| `constants.ts` | Frontend constants (time presets, etc.) |
|
||||
|
||||
---
|
||||
|
||||
@@ -573,6 +600,7 @@ BanGUI maintains its **own SQLite database** (separate from the fail2ban databas
|
||||
|---|---|
|
||||
| `settings` | Key-value store for application configuration (master password hash, fail2ban socket path, database path, timezone, session duration) |
|
||||
| `sessions` | Active session tokens with expiry timestamps |
|
||||
| `geo_cache` | Resolved IP geolocation results (ip, country_code, country_name, asn, org, cached_at). Loaded into memory at startup via `load_cache_from_db()`; new entries are flushed back by the `geo_cache_flush` background task. |
|
||||
| `blocklist_sources` | Registered blocklist URLs (id, name, url, enabled, created_at, updated_at) |
|
||||
| `import_logs` | Record of every blocklist import run (id, source_id, timestamp, ips_imported, ips_skipped, errors, status) |
|
||||
|
||||
@@ -593,6 +621,8 @@ BanGUI maintains its **own SQLite database** (separate from the fail2ban databas
|
||||
- Session expiry is configurable (set during setup, stored in `settings`).
|
||||
- The frontend `AuthProvider` checks session validity on mount and redirects to `/login` if invalid.
|
||||
- The backend `dependencies.py` provides an `authenticated` dependency that validates the session cookie on every protected endpoint.
|
||||
- **Session validation cache** — validated session tokens are cached in memory for 10 seconds (`_session_cache` dict in `dependencies.py`) to avoid a SQLite round-trip on every request from the same browser. The cache is invalidated immediately on logout.
|
||||
- **Setup-completion flag** — once `is_setup_complete()` returns `True`, the result is stored in `app.state._setup_complete_cached`. The `SetupRedirectMiddleware` skips the DB query on all subsequent requests, removing 1 SQL query per request for the common post-setup case.
|
||||
|
||||
---
|
||||
|
||||
@@ -606,6 +636,7 @@ APScheduler 4.x (async mode) manages recurring background tasks.
|
||||
│ (async, in-process) │
|
||||
├──────────────────────┤
|
||||
│ blocklist_import │ ── runs on configured schedule (default: daily 03:00)
|
||||
│ geo_cache_flush │ ── runs every 60 seconds
|
||||
│ health_check │ ── runs every 30 seconds
|
||||
└──────────────────────┘
|
||||
```
|
||||
|
||||
@@ -111,6 +111,15 @@ backend/
|
||||
- Group endpoints into routers by feature domain (`routers/jails.py`, `routers/bans.py`, …).
|
||||
- Use appropriate HTTP status codes: `201` for creation, `204` for deletion with no body, `404` for not found, etc.
|
||||
- Use **HTTPException** or custom exception handlers — never return error dicts manually.
|
||||
- **GET endpoints are read-only — never call `db.commit()` or execute INSERT/UPDATE/DELETE inside a GET handler.** If a GET path produces side-effects (e.g., caching resolved data), that write belongs in a background task, a scheduled flush, or a separate POST endpoint. Users and HTTP caches assume GET is idempotent and non-mutating.
|
||||
|
||||
```python
|
||||
# Good — pass db=None on GET so geo_service never commits
|
||||
result = await geo_service.lookup_batch(ips, http_session, db=None)
|
||||
|
||||
# Bad — triggers INSERT + COMMIT per IP inside a GET handler
|
||||
result = await geo_service.lookup_batch(ips, http_session, db=app_db)
|
||||
```
|
||||
|
||||
```python
|
||||
from fastapi import APIRouter, Depends, HTTPException, status
|
||||
@@ -156,6 +165,26 @@ class BanResponse(BaseModel):
|
||||
- Use `aiohttp.ClientSession` for HTTP calls, `aiosqlite` for database access.
|
||||
- Use `asyncio.TaskGroup` (Python 3.11+) when you need to run independent coroutines concurrently.
|
||||
- Long-running startup/shutdown logic goes into the **FastAPI lifespan** context manager.
|
||||
- **Never call `db.commit()` inside a loop.** With aiosqlite, every commit serialises through a background thread and forces an `fsync`. N rows × 1 commit = N fsyncs. Accumulate all writes in the loop, then issue a single `db.commit()` once after the loop ends. The difference between 5,000 commits and 1 commit can be seconds vs milliseconds.
|
||||
|
||||
```python
|
||||
# Good — one commit for the whole batch
|
||||
for ip, info in results.items():
|
||||
await db.execute(INSERT_SQL, (ip, info.country_code, ...))
|
||||
await db.commit() # ← single fsync
|
||||
|
||||
# Bad — one fsync per row
|
||||
for ip, info in results.items():
|
||||
await db.execute(INSERT_SQL, (ip, info.country_code, ...))
|
||||
await db.commit() # ← fsync on every iteration
|
||||
```
|
||||
- **Prefer `executemany()` over calling `execute()` in a loop** when inserting or updating multiple rows with the same SQL template. aiosqlite passes the entire batch to SQLite in one call, reducing Python↔thread overhead on top of the single-commit saving.
|
||||
|
||||
```python
|
||||
# Good
|
||||
await db.executemany(INSERT_SQL, [(ip, cc, cn, asn, org) for ip, info in results.items()])
|
||||
await db.commit()
|
||||
```
|
||||
- Shared resources (DB connections, HTTP sessions) are created once during startup and closed during shutdown — never inside request handlers.
|
||||
|
||||
```python
|
||||
@@ -427,4 +456,7 @@ class SqliteBanRepository:
|
||||
| Handle errors with custom exceptions | Use bare `except:` |
|
||||
| Keep routers thin, logic in services | Put business logic in routers |
|
||||
| Use `datetime.now(datetime.UTC)` | Use naive datetimes |
|
||||
| Run ruff + mypy before committing | Push code that doesn't pass linting |
|
||||
| Run ruff + mypy before committing | Push code that doesn't pass linting |
|
||||
| Keep GET endpoints read-only (no `db.commit()`) | Call `db.commit()` / INSERT inside GET handlers |
|
||||
| Batch DB writes; issue one `db.commit()` after the loop | Commit inside a loop (1 fsync per row) |
|
||||
| Use `executemany()` for bulk inserts | Call `execute()` + `commit()` per row in a loop |
|
||||
@@ -8,7 +8,9 @@ A web application to monitor, manage, and configure fail2ban from a clean, acces
|
||||
|
||||
- Displayed automatically on first launch when no configuration exists.
|
||||
- As long as no configuration is saved, every route redirects to the setup page.
|
||||
- Once setup is complete and a configuration is saved, the setup page is never shown again and cannot be accessed.
|
||||
- Once setup is complete and a configuration is saved, the setup page redirects to the login page and cannot be used again.
|
||||
- The `SetupGuard` component checks the setup status on every protected route; if setup is not complete it redirects the user to `/setup`.
|
||||
- **Security:** The master password is SHA-256 hashed in the browser using the native `SubtleCrypto` API before it is transmitted. The backend then bcrypt-hashes the received hash with an auto-generated salt. The plaintext password never leaves the browser and is never stored.
|
||||
|
||||
### Options
|
||||
|
||||
@@ -51,12 +53,6 @@ The main landing page after login. Shows recent ban activity at a glance.
|
||||
- Last 30 days (month)
|
||||
- Last 365 days (year)
|
||||
|
||||
### Access List
|
||||
|
||||
- A secondary view (tab or toggle) on the same page showing **all recorded accesses**, not just bans.
|
||||
- Uses the same table format: time, IP address, requested URL, country, domain, subdomain.
|
||||
- Shares the same time-range presets so the user can compare total traffic against banned traffic for the same period.
|
||||
|
||||
---
|
||||
|
||||
## 4. World Map View
|
||||
@@ -65,21 +61,24 @@ A geographical overview of ban activity.
|
||||
|
||||
### Map
|
||||
|
||||
- A full world map rendered with country outlines only (no fill colours, no satellite imagery).
|
||||
- For every country that has at least one banned IP in the selected time range, the total count is displayed centred inside that country's borders.
|
||||
- Countries with zero banned IPs show no number and no label — they remain blank.
|
||||
- A full world map rendered with country outlines, showing ban activity through color-coded fills (no satellite imagery).
|
||||
- **Color coding:** Countries are colored based on their ban count for the selected time range:
|
||||
- **Red:** High ban count (100+ bans by default)
|
||||
- **Yellow:** Medium ban count (50 bans by default)
|
||||
- **Green:** Low ban count (20 bans by default)
|
||||
- **Transparent (no fill):** Zero bans
|
||||
- Colors are smoothly interpolated between the thresholds (e.g., 35 bans shows a yellow-green blend)
|
||||
- The color threshold values are configurable through the application settings
|
||||
- **Interactive zoom and pan:** Users can zoom in/out using mouse wheel or touch gestures, and pan by clicking and dragging. This allows detailed inspection of densely-affected regions. Zoom controls (zoom in, zoom out, reset view) are provided as overlay buttons in the top-right corner.
|
||||
- For every country that has bans, the total count is displayed centred inside that country's borders in the selected time range.
|
||||
- Countries with zero banned IPs show no number and no label — they remain blank and transparent.
|
||||
- Clicking a country filters the companion table below to show only bans from that country.
|
||||
- Time-range selector with the same quick presets:
|
||||
- Last 24 hours
|
||||
- Last 7 days
|
||||
- Last 30 days
|
||||
- Last 365 days
|
||||
|
||||
### Access List (Map context)
|
||||
|
||||
- A companion table below or beside the map listing all accesses for the selected time range.
|
||||
- Same columns as the Ban Overview tables: time, IP, URL, country, domain, subdomain.
|
||||
- Selecting a country on the map filters the table to show only entries from that country.
|
||||
|
||||
---
|
||||
|
||||
## 5. Jail Management
|
||||
@@ -91,6 +90,8 @@ A dedicated view for managing fail2ban jails and taking manual ban actions.
|
||||
- A list of all jails showing their name, current status (running / stopped / idle), backend type, and key metrics.
|
||||
- For each jail: number of currently banned IPs, total bans since start, current failures detected, and total failures.
|
||||
- Quick indicators for the jail's find time, ban time, and max retries.
|
||||
- A toggle to also show **Inactive Jails** — jails that are defined in fail2ban config files but are not currently running.
|
||||
- Each inactive jail has an **Activate** button that enables and reloads it immediately, with optional overrides for ban time, find time, max retries, port, and log path.
|
||||
|
||||
### Jail Detail
|
||||
|
||||
@@ -151,9 +152,14 @@ A page to inspect and modify the fail2ban configuration without leaving the web
|
||||
|
||||
### View Configuration
|
||||
|
||||
- Display all active fail2ban jails and their current settings.
|
||||
- For each jail, show the associated filter and its regex patterns in a readable format.
|
||||
- Show global fail2ban settings (ban time, find time, max retries, etc.).
|
||||
- The **Jails**, **Filters**, and **Actions** tabs each use a **master/detail list layout**:
|
||||
- A scrollable left pane lists all items (jail names, filter filenames, action filenames).
|
||||
- Each item displays an **Active** or **Inactive** badge. Active items are sorted to the top; items within each group are sorted alphabetically.
|
||||
- A jail is "active" if fail2ban reports it as enabled at runtime. A filter or action is "active" if it is referenced by at least one enabled jail.
|
||||
- Inactive jails (present in config files but not running) are discoverable from the Jails tab. Selecting one shows its config file settings and allows activating it.
|
||||
- Clicking an item loads its structured configuration form in the right detail pane.
|
||||
- On narrow screens (< 900 px) the list pane collapses into a dropdown above the detail pane.
|
||||
- Show global fail2ban settings (ban time, find time, max retries, etc.) on the Global Settings tab.
|
||||
|
||||
### Edit Configuration
|
||||
|
||||
@@ -166,6 +172,16 @@ A page to inspect and modify the fail2ban configuration without leaving the web
|
||||
- Configure ban-time escalation: enable incremental banning and set factor, formula, multipliers, maximum ban time, and random jitter.
|
||||
- Save changes and optionally reload fail2ban to apply them immediately.
|
||||
- Validation feedback if a regex pattern or setting value is invalid before saving.
|
||||
- **Activate** an inactive jail directly from the Jails tab detail pane, with optional parameter overrides.
|
||||
- **Deactivate** a running jail from the Jails tab; writes ``enabled = false`` to a local override file and reloads fail2ban.
|
||||
|
||||
### Raw Configuration Editing
|
||||
|
||||
- Every jail, filter, and action detail pane includes a collapsible **Raw Configuration** section at the bottom.
|
||||
- The section shows the complete raw text of the config file (`.conf`) in an editable monospace textarea.
|
||||
- The user can edit the raw text directly and click **Save Raw** to overwrite the file on disk.
|
||||
- The textarea loads lazily — the raw file content is only fetched when the section is first expanded.
|
||||
- A save-state indicator shows idle / saving / saved / error feedback after each save attempt.
|
||||
|
||||
### Add Log Observation
|
||||
|
||||
@@ -194,6 +210,37 @@ A page to inspect and modify the fail2ban configuration without leaving the web
|
||||
- Set the database purge age — how long historical ban records are kept before automatic cleanup.
|
||||
- Set the maximum number of log-line matches stored per ban record in the database.
|
||||
|
||||
### Map Settings
|
||||
|
||||
- Configure the three color thresholds that determine how countries are colored on the World Map view based on their ban count:
|
||||
- **Low Threshold (Green):** Ban count at which the color transitions from light green to full green (default: 20).
|
||||
- **Medium Threshold (Yellow):** Ban count at which the color transitions from green to yellow (default: 50).
|
||||
- **High Threshold (Red):** Ban count at which the color transitions from yellow to red (default: 100).
|
||||
- Countries with ban counts between thresholds display smoothly interpolated colors.
|
||||
- Countries with zero bans remain transparent (no fill).
|
||||
- Changes take effect immediately on the World Map view without requiring a page reload.
|
||||
|
||||
### Log
|
||||
|
||||
- A dedicated **Log** tab on the Configuration page shows fail2ban service health and a live log viewer in one place.
|
||||
- **Service Health panel** (always visible):
|
||||
- Online/offline **badge** (Running / Offline).
|
||||
- When online: version, active jail count, currently banned IPs, and currently failed attempts as stat cards.
|
||||
- Log level and log target displayed as meta labels.
|
||||
- Warning banner when fail2ban is offline, prompting the user to check the server and socket configuration.
|
||||
- **Log Viewer** (shown when fail2ban logs to a file):
|
||||
- Displays the tail of the fail2ban log file in a scrollable monospace container.
|
||||
- Log lines are **color-coded by severity**: errors and critical messages in red, warnings in yellow, debug lines in grey, and informational lines in the default color.
|
||||
- Toolbar controls:
|
||||
- **Filter** — substring input with 300 ms debounce; only lines containing the filter text are shown.
|
||||
- **Lines** — selector for how many tail lines to fetch (100 / 200 / 500 / 1000).
|
||||
- **Refresh** button for an on-demand reload.
|
||||
- **Auto-refresh** toggle with interval selector (5 s / 10 s / 30 s) for live monitoring.
|
||||
- Truncation notice when the total log file line count exceeds the requested tail limit.
|
||||
- Container automatically scrolls to the bottom after each data update.
|
||||
- When fail2ban is configured to log to a non-file target (STDOUT, STDERR, SYSLOG, SYSTEMD-JOURNAL), an informational banner explains that file-based log viewing is unavailable.
|
||||
- The log file path is validated against a safe prefix allowlist on the backend to prevent path-traversal reads.
|
||||
|
||||
---
|
||||
|
||||
## 7. Ban History
|
||||
|
||||
@@ -226,3 +226,34 @@ Verify against [Architekture.md](Architekture.md) and the project structure rule
|
||||
- **Never** push directly to `main` — always use feature branches.
|
||||
- **Never** skip the review step — sloppy code compounds over time.
|
||||
- **Never** leave a task half-done — finish it or revert it.
|
||||
|
||||
---
|
||||
|
||||
## 7. Dev Quick-Reference
|
||||
|
||||
### Start / stop the stack
|
||||
|
||||
```bash
|
||||
make up # start all containers (from repo root)
|
||||
make down # stop all containers
|
||||
make logs # tail logs
|
||||
```
|
||||
|
||||
Backend: `http://127.0.0.1:8000` · Frontend (Vite proxy): `http://127.0.0.1:5173`
|
||||
|
||||
### API login (dev)
|
||||
|
||||
The frontend SHA256-hashes the password before sending it to the API.
|
||||
The session cookie is named `bangui_session`.
|
||||
|
||||
```bash
|
||||
# Dev master password: Hallo123!
|
||||
HASHED=$(echo -n "Hallo123!" | sha256sum | awk '{print $1}')
|
||||
TOKEN=$(curl -s -X POST http://127.0.0.1:8000/api/auth/login \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d "{\"password\":\"$HASHED\"}" \
|
||||
| python3 -c 'import sys,json; print(json.load(sys.stdin)["token"])')
|
||||
|
||||
# Use token in subsequent requests:
|
||||
curl -H "Cookie: bangui_session=$TOKEN" http://127.0.0.1:8000/api/dashboard/status
|
||||
```
|
||||
|
||||
408
Docs/Tasks.md
408
Docs/Tasks.md
@@ -4,406 +4,56 @@ This document breaks the entire BanGUI project into development stages, ordered
|
||||
|
||||
---
|
||||
|
||||
## Stage 1 — Project Scaffolding
|
||||
## Open Issues
|
||||
|
||||
Everything in this stage is about creating the project skeleton — folder structures, configuration files, and tooling — so that development can begin on solid ground. No application logic is written here.
|
||||
### ~~1. Dashboard — Version Tag Mismatch~~ ✅ Done
|
||||
|
||||
### 1.1 Initialise the backend project
|
||||
**Implemented:**
|
||||
- `frontend/vite.config.ts`: reads `package.json#version` at build time and injects it as the global `__APP_VERSION__` via Vite `define`.
|
||||
- `frontend/src/vite-env.d.ts`: adds `declare const __APP_VERSION__: string` so TypeScript knows about the global.
|
||||
- `frontend/src/layouts/MainLayout.tsx`: renders `BanGUI v{__APP_VERSION__}` in the sidebar footer when expanded (hidden when collapsed).
|
||||
- `frontend/src/components/ServerStatusBar.tsx`: tooltip changed from `"fail2ban version"` to `"fail2ban daemon version"`.
|
||||
- `Docker/release.sh`: after bumping `VERSION`, also updates `frontend/package.json#version` via `sed` to keep them in sync.
|
||||
- `frontend/package.json`: version bumped from `0.9.0` to `0.9.3` to match `Docker/VERSION`.
|
||||
- Tests added: `src/components/__tests__/ServerStatusBar.test.tsx`, `src/layouts/__tests__/MainLayout.test.tsx`.
|
||||
|
||||
Create the `backend/` directory with the full folder structure defined in [Backend-Development.md § 3](Backend-Development.md). Set up `pyproject.toml` with all required dependencies (FastAPI, Pydantic v2, aiosqlite, aiohttp, APScheduler 4.x, structlog, pydantic-settings) and dev dependencies (pytest, pytest-asyncio, httpx, ruff, mypy). Configure ruff for 120-character line length and double-quote strings. Configure mypy in strict mode. Add a `.env.example` with placeholder keys for `BANGUI_DATABASE_PATH`, `BANGUI_FAIL2BAN_SOCKET`, and `BANGUI_SESSION_SECRET`. Make sure the bundled fail2ban client at `./fail2ban-master` is importable by configuring the path in `pyproject.toml` or a startup shim as described in [Backend-Development.md § 2](Backend-Development.md).
|
||||
**Problem:** The `ServerStatusBar` component on the Dashboard displays `v{status.version}`, which is the **fail2ban daemon version** (e.g. `v1.1.0`). The BanGUI application version lives in `Docker/VERSION` (e.g. `v0.9.3`) and is unrelated to the fail2ban version. Users see a version number they don't recognise and assume it reflects the BanGUI release.
|
||||
|
||||
### 1.2 Initialise the frontend project
|
||||
**Goal:** Make the distinction clear and expose the BanGUI application version.
|
||||
|
||||
Scaffold a Vite + React + TypeScript project inside `frontend/`. Install `@fluentui/react-components`, `@fluentui/react-icons`, and `react-router-dom`. Set up `tsconfig.json` with `"strict": true`. Configure ESLint with `@typescript-eslint`, `eslint-plugin-react-hooks`, and `eslint-config-prettier`. Add Prettier with the project defaults. Create the directory structure from [Web-Development.md § 4](Web-Development.md): `src/api/`, `src/components/`, `src/hooks/`, `src/layouts/`, `src/pages/`, `src/providers/`, `src/theme/`, `src/types/`, `src/utils/`. Create a minimal `App.tsx` that wraps the application in `<FluentProvider>` and `<BrowserRouter>` as shown in [Web-Development.md § 5](Web-Development.md).
|
||||
**Suggested approach:**
|
||||
1. Inject the BanGUI app version at build time — add a `define` entry in `frontend/vite.config.ts` that reads the `version` field from `frontend/package.json` (e.g. `__APP_VERSION__`). Keep `frontend/package.json` and `Docker/VERSION` in sync (update the release script `Docker/release.sh` or `Makefile` to write `package.json#version` from `VERSION`).
|
||||
2. Show the BanGUI version in the sidebar footer inside `MainLayout.tsx` (collapsed view: show only when expanded, or via tooltip). This is the natural place for an "about" version tag.
|
||||
3. Update the fail2ban version tooltip in `ServerStatusBar.tsx` from the generic `"fail2ban version"` to something like `"fail2ban daemon version"` so the two are no longer visually indistinguishable.
|
||||
|
||||
### 1.3 Set up the Fluent UI custom theme
|
||||
|
||||
Create the light and dark brand-colour themes inside `frontend/src/theme/`. Follow the colour rules in [Web-Design.md § 2](Web-Design.md): use the Fluent UI Theme Designer to generate a brand ramp, ensure the primary colour meets the 4.5 : 1 contrast ratio, and export both `lightTheme` and `darkTheme`. Wire the theme into `App.tsx` via the `FluentProvider` `theme` prop.
|
||||
|
||||
### 1.4 Create the central API client
|
||||
|
||||
Build the typed API client in `frontend/src/api/client.ts`. It should be a thin wrapper around `fetch` that returns typed responses, includes credentials, and throws a custom `ApiError` on non-OK responses. Define the `BASE_URL` from `import.meta.env.VITE_API_URL` with a fallback to `"/api"`. Create `frontend/src/api/endpoints.ts` for path constants. See [Web-Development.md § 3](Web-Development.md) for the pattern.
|
||||
|
||||
### 1.5 Create the FastAPI application factory
|
||||
|
||||
Implement `backend/app/main.py` with the `create_app()` factory function. Register the async lifespan context manager that opens the aiosqlite database connection, creates a shared `aiohttp.ClientSession`, and initialises the APScheduler instance on startup, then closes all three on shutdown. Store these on `app.state`. Register a placeholder router so the app can start and respond to a health-check request. See [Backend-Development.md § 6](Backend-Development.md) and [Architekture.md § 2](Architekture.md) for details.
|
||||
|
||||
### 1.6 Create the Pydantic settings model
|
||||
|
||||
Implement `backend/app/config.py` using pydantic-settings. Define the `Settings` class with fields for `database_path`, `fail2ban_socket`, `session_secret`, `session_duration_minutes`, and `timezone`. Load from environment variables prefixed `BANGUI_` and from `.env`. Validate at startup — the app must fail fast with a clear error if required values are missing. See [Backend-Development.md § 11](Backend-Development.md).
|
||||
|
||||
### 1.7 Set up the application database schema
|
||||
|
||||
Design and create the SQLite schema for BanGUI's own data. The database needs tables for application settings (key-value pairs for master password hash, database path, fail2ban socket path, preferences), sessions (token, created-at, expires-at), blocklist sources (name, URL, enabled flag), and import log entries (timestamp, source URL, IPs imported, IPs skipped, errors). Write an initialisation function that creates these tables on first run via aiosqlite. This schema is for BanGUI's internal state — it does not replace the fail2ban database. See [Architekture.md § 2.2](Architekture.md) for the repository breakdown.
|
||||
|
||||
### 1.8 Write the fail2ban socket client wrapper
|
||||
|
||||
Implement `backend/app/utils/fail2ban_client.py` — an async wrapper around the fail2ban Unix domain socket protocol. Study `./fail2ban-master/fail2ban/client/csocket.py` and `./fail2ban-master/fail2ban/client/fail2banclient.py` to understand the wire protocol (pickle-based command/response). The wrapper should provide async methods for sending commands and receiving responses, handle connection errors gracefully, and log every interaction with structlog. This module is the single point of contact between BanGUI and the fail2ban daemon. See [Backend-Development.md § 2 (fail2ban Client Usage)](Backend-Development.md) and [Architekture.md § 2.2 (Utils)](Architekture.md).
|
||||
**Files:** `frontend/vite.config.ts`, `frontend/package.json`, `Docker/VERSION`, `Docker/release.sh`, `frontend/src/layouts/MainLayout.tsx`, `frontend/src/components/ServerStatusBar.tsx`.
|
||||
|
||||
---
|
||||
|
||||
## Stage 2 — Authentication & Setup Flow
|
||||
### ~~2. Dashboard — Improve "Failures" Tooltip~~ ✅ Done
|
||||
|
||||
This stage implements the very first user experience: the setup wizard that runs on first launch and the login system that protects every subsequent visit. All other features depend on these being complete.
|
||||
**Implemented:** In `frontend/src/components/ServerStatusBar.tsx`, changed the `Failures:` label to `Failed Attempts:` and updated the tooltip from `"Currently failing IPs"` to `"Total failed authentication attempts currently tracked by fail2ban across all active jails"`. Updated `ServerStatusBar.test.tsx` to assert the new label text.
|
||||
|
||||
### 2.1 Implement the setup service and repository
|
||||
**Problem:** The `ServerStatusBar` shows a "Failures: 42" counter with the tooltip `"Currently failing IPs"`. In fail2ban terminology *failures* are individual **failed authentication attempts** tracked in the fail2ban DB, not the number of unique IPs that failed. The current wording is ambiguous and misleading — users may think it means broken connections or error states.
|
||||
|
||||
Build `backend/app/services/setup_service.py` and `backend/app/repositories/settings_repo.py`. The setup service accepts the initial configuration (master password, database path, fail2ban socket path, general preferences), hashes the password with a secure algorithm (e.g. bcrypt or argon2), and persists everything through the settings repository. It must enforce the one-time-only rule: once a configuration is saved, setup cannot run again. Add a method to check whether setup has been completed (i.e. whether any configuration exists in the database). See [Features.md § 1](Features.md).
|
||||
**Goal:** Replace the tooltip with accurate, self-explanatory wording.
|
||||
|
||||
### 2.2 Implement the setup router
|
||||
**Suggested fix:** Change the `Tooltip` content for the Failures stat in `ServerStatusBar.tsx` from `"Currently failing IPs"` to something like `"Total failed authentication attempts currently tracked by fail2ban across all active jails"`. Additionally, consider renaming the label from `"Failures:"` to `"Failed Attempts:"` to match the tooltip language.
|
||||
|
||||
Create `backend/app/routers/setup.py` with a `POST /api/setup` endpoint that accepts a Pydantic request model containing all setup fields and delegates to the setup service. If setup has already been completed, return a `409 Conflict`. Define request and response models in `backend/app/models/setup.py`.
|
||||
|
||||
### 2.3 Implement the setup-redirect middleware
|
||||
|
||||
Add middleware to the FastAPI app that checks on every incoming request whether setup has been completed. If not, redirect all requests (except those to `/api/setup` itself) to `/api/setup` with a `307 Temporary Redirect` or return a `403` with a clear message. Once setup is done, the middleware becomes a no-op. See [Features.md § 1](Features.md).
|
||||
|
||||
### 2.4 Implement the authentication service
|
||||
|
||||
Build `backend/app/services/auth_service.py`. It must verify the master password against the stored hash, create session tokens on successful login, store sessions through `backend/app/repositories/session_repo.py`, validate tokens on every subsequent request, and enforce session expiry. Sessions should be stored in the SQLite database so they survive server restarts. See [Features.md § 2](Features.md) and [Architekture.md § 2.2](Architekture.md).
|
||||
|
||||
### 2.5 Implement the auth router
|
||||
|
||||
Create `backend/app/routers/auth.py` with two endpoints: `POST /api/auth/login` (accepts a password, returns a session token or sets a cookie) and `POST /api/auth/logout` (invalidates the session). Define request and response models in `backend/app/models/auth.py`.
|
||||
|
||||
### 2.6 Implement the auth dependency
|
||||
|
||||
Create a FastAPI dependency in `backend/app/dependencies.py` that extracts the session token from the request (cookie or header), validates it through the auth service, and either returns the authenticated session or raises a `401 Unauthorized`. Every protected router must declare this dependency. See [Backend-Development.md § 4](Backend-Development.md) for the Depends pattern.
|
||||
|
||||
### 2.7 Build the setup page (frontend)
|
||||
|
||||
Create `frontend/src/pages/SetupPage.tsx`. The page should present a form with fields for the master password (with confirmation), database path, fail2ban socket path, and general preferences (timezone, date format, session duration). Use Fluent UI form components (`Input`, `Button`, `Field`, `Dropdown` for timezone). On submission, call `POST /api/setup` through the API client. Show validation errors inline. After successful setup, redirect to the login page. Create the corresponding API function in `frontend/src/api/setup.ts` and types in `frontend/src/types/setup.ts`. See [Features.md § 1](Features.md) and [Web-Design.md § 8](Web-Design.md) for component choices.
|
||||
|
||||
### 2.8 Build the login page (frontend)
|
||||
|
||||
Create `frontend/src/pages/LoginPage.tsx`. A single password input and a submit button — no username field. On submission, call `POST /api/auth/login`. On success, store the session (cookie or context) and redirect to the originally requested page or the dashboard. Show an error message on wrong password. Create `frontend/src/api/auth.ts` and `frontend/src/types/auth.ts`. See [Features.md § 2](Features.md).
|
||||
|
||||
### 2.9 Implement the auth context and route guard
|
||||
|
||||
Create `frontend/src/providers/AuthProvider.tsx` that manages authentication state (logged in / not logged in) and exposes login, logout, and session-check methods via React context. Create a route guard component that wraps all protected routes: if the user is not authenticated, redirect to the login page and remember the intended destination. After login, redirect back. See [Features.md § 2](Features.md) and [Web-Development.md § 7](Web-Development.md).
|
||||
|
||||
### 2.10 Write tests for setup and auth
|
||||
|
||||
Write backend tests covering: setup endpoint accepts valid data, setup endpoint rejects a second call, login succeeds with correct password, login fails with wrong password, protected endpoints reject unauthenticated requests, logout invalidates the session for both router and service. Use pytest-asyncio and httpx `AsyncClient` as described in [Backend-Development.md § 9](Backend-Development.md).
|
||||
**Files:** `frontend/src/components/ServerStatusBar.tsx`.
|
||||
|
||||
---
|
||||
|
||||
## Stage 3 — Application Shell & Navigation
|
||||
### ~~3. Config → Server Tab — Move "Service Health" to Top~~ ✅ Done
|
||||
|
||||
With authentication working, this stage builds the persistent layout that every page shares: the navigation sidebar, the header, and the routing skeleton.
|
||||
**Implemented:** In `frontend/src/components/config/ServerTab.tsx`, moved `<ServerHealthSection />` from the end of the JSX return to be the first element rendered inside the tab container, before all settings fields.
|
||||
|
||||
### 3.1 Build the main layout component
|
||||
**Problem:** In the Config page → Server tab, the `Service Health` panel (`ServerHealthSection`) is rendered at the bottom of the tab, after all settings sections (log level, log target, DB purge settings, map thresholds, reload/restart buttons). This means users must scroll past all editable fields to check service connectivity status, even though the health status is the most critical piece of context — it indicates whether the server is reachable at all.
|
||||
|
||||
Create `frontend/src/layouts/MainLayout.tsx`. This is the outer shell visible on every authenticated page. It contains a fixed-width sidebar navigation (240 px, collapsing to 48 px on small screens) and a main content area. Use the Fluent UI `Nav` component for the sidebar with groups for Dashboard, World Map, Jails, Configuration, History, Blocklists, and a Logout action at the bottom. The layout must be responsive following the breakpoints in [Web-Design.md § 4](Web-Design.md). The main content area is capped at 1440 px and centred on wide screens.
|
||||
**Goal:** Move the `<ServerHealthSection />` block to the **top** of the `ServerTab` render output, before any settings fields.
|
||||
|
||||
### 3.2 Set up client-side routing
|
||||
**Suggested fix:** In `frontend/src/components/config/ServerTab.tsx`, move the `{/* Service Health & Log Viewer section */}` block (currently at the end of the JSX return around line 415) to be the first section rendered inside the tab container.
|
||||
|
||||
Configure React Router in `frontend/src/App.tsx` (or a dedicated `AppRoutes.tsx`). Define routes for every page: `/` (dashboard), `/map`, `/jails`, `/jails/:name`, `/config`, `/history`, `/blocklists`, `/setup`, `/login`. Wrap all routes except setup and login inside the auth guard from Stage 2. Use the `MainLayout` for authenticated routes. Create placeholder page components for each route so navigation works end to end.
|
||||
|
||||
### 3.3 Implement the logout flow
|
||||
|
||||
Wire the Logout button in the sidebar to call `POST /api/auth/logout`, clear the client-side session state, and redirect to the login page. The logout option must be accessible from every page as specified in [Features.md § 2](Features.md).
|
||||
**Files:** `frontend/src/components/config/ServerTab.tsx`.
|
||||
|
||||
---
|
||||
|
||||
## Stage 4 — fail2ban Connection & Server Status
|
||||
|
||||
This stage establishes the live connection to the fail2ban daemon and surfaces its health to the user. It is a prerequisite for every data-driven feature.
|
||||
|
||||
### 4.1 Implement the health service
|
||||
|
||||
Build `backend/app/services/health_service.py`. It connects to the fail2ban socket using the wrapper from Stage 1.8, sends a `status` command, and parses the response to extract: whether the server is reachable, the fail2ban version, the number of active jails, and aggregated ban/failure counts. Expose a method that returns a structured health status object. Log connectivity changes (online → offline and vice versa) via structlog. See [Features.md § 3 (Server Status Bar)](Features.md).
|
||||
|
||||
### 4.2 Implement the health-check background task
|
||||
|
||||
Create `backend/app/tasks/health_check.py` — an APScheduler job that runs the health service probe every 30 seconds and caches the result in memory (e.g. on `app.state`). This ensures the dashboard endpoint can return fresh status without blocking on a socket call. See [Architekture.md § 2.2 (Tasks)](Architekture.md).
|
||||
|
||||
### 4.3 Implement the dashboard status endpoint
|
||||
|
||||
Create `backend/app/routers/dashboard.py` with a `GET /api/dashboard/status` endpoint that returns the cached server status (online/offline, version, jail count, total bans, total failures). Define response models in `backend/app/models/server.py`. This endpoint is lightweight — it reads from the in-memory cache populated by the health-check task.
|
||||
|
||||
### 4.4 Build the server status bar component (frontend)
|
||||
|
||||
Create `frontend/src/components/ServerStatusBar.tsx`. This persistent bar appears at the top of the dashboard (and optionally on other pages). It displays the fail2ban connection status (green badge for online, red for offline), the server version, active jail count, and total bans/failures. Use Fluent UI `Badge` and `Text` components. Poll `GET /api/dashboard/status` at a reasonable interval or on page focus. Create `frontend/src/api/dashboard.ts`, `frontend/src/types/server.ts`, and a `useServerStatus` hook.
|
||||
|
||||
### 4.5 Write tests for health service and dashboard
|
||||
|
||||
Test that the health service correctly parses a mock fail2ban status response, handles socket errors gracefully, and that the dashboard endpoint returns the expected shape. Mock the fail2ban socket — tests must never touch a real daemon.
|
||||
|
||||
---
|
||||
|
||||
## Stage 5 — Ban Overview (Dashboard)
|
||||
|
||||
The main landing page. This stage delivers the ban list and access list tables that give users a quick picture of recent activity.
|
||||
|
||||
### 5.1 Implement the ban service (list recent bans)
|
||||
|
||||
Build `backend/app/services/ban_service.py` with a method that queries the fail2ban database for bans within a given time range. The fail2ban SQLite database stores ban records — read them using aiosqlite (open the fail2ban DB path from settings, read-only). Return structured ban objects including IP, jail, timestamp, and any additional metadata available. See [Features.md § 3 (Ban List)](Features.md).
|
||||
|
||||
### 5.2 Implement the geo service
|
||||
|
||||
Build `backend/app/services/geo_service.py`. Given an IP address, resolve its country of origin (and optionally ASN and RIR). Use an external API via aiohttp or a local GeoIP database. Cache results to avoid repeated lookups for the same IP. The geo service is used throughout the application wherever country information is displayed. See [Features.md § 5 (IP Lookup)](Features.md) and [Architekture.md § 2.2](Architekture.md).
|
||||
|
||||
### 5.3 Implement the dashboard bans endpoint
|
||||
|
||||
Add `GET /api/dashboard/bans` to `backend/app/routers/dashboard.py`. It accepts a time-range query parameter (hours or a preset like `24h`, `7d`, `30d`, `365d`). It calls the ban service to retrieve bans in that window, enriches each ban with country data from the geo service, and returns a paginated list. Define request/response models in `backend/app/models/ban.py`.
|
||||
|
||||
### 5.4 Build the ban list table (frontend)
|
||||
|
||||
Create `frontend/src/components/BanTable.tsx` using Fluent UI `DataGrid`. Columns: time of ban, IP address (monospace), requested URL/service, country, domain, subdomain. Rows are sorted newest-first. Above the table, place a time-range selector implemented as a `Toolbar` with `ToggleButton` for the four presets (24 h, 7 d, 30 d, 365 d). Create a `useBans` hook that calls `GET /api/dashboard/bans` with the selected range. See [Features.md § 3 (Ban List)](Features.md) and [Web-Design.md § 8 (Data Display)](Web-Design.md).
|
||||
|
||||
### 5.5 Build the dashboard page
|
||||
|
||||
Create `frontend/src/pages/DashboardPage.tsx`. Compose the server status bar at the top, then a `Pivot` (tab control) switching between "Ban List" and "Access List". The Ban List tab renders the `BanTable`. The Access List tab uses the same table component but fetches all recorded accesses, not just bans. If the access list requires a separate endpoint, add `GET /api/dashboard/accesses` to the backend with the same time-range support. See [Features.md § 3](Features.md).
|
||||
|
||||
### 5.6 Write tests for ban service and dashboard endpoints
|
||||
|
||||
Test ban queries for each time-range preset, test that geo enrichment works with mocked API responses, and test that the endpoint returns the correct response shape. Verify edge cases: no bans in the selected range, an IP that fails geo lookup.
|
||||
|
||||
---
|
||||
|
||||
## Stage 6 — Jail Management
|
||||
|
||||
This stage exposes fail2ban's jail system through the UI — listing jails, viewing details, and executing control commands.
|
||||
|
||||
### 6.1 Implement the jail service
|
||||
|
||||
Build `backend/app/services/jail_service.py`. Using the fail2ban socket client, implement methods to: list all jails with their status and key metrics, retrieve the full detail of a single jail (log paths, regex patterns, date pattern, encoding, actions, ban-time escalation settings), start a jail, stop a jail, toggle idle mode, reload a single jail, and reload all jails. Each method sends the appropriate command through the socket wrapper and parses the response. See [Features.md § 5 (Jail Overview, Jail Detail, Jail Controls)](Features.md).
|
||||
|
||||
### 6.2 Implement the jails router
|
||||
|
||||
Create `backend/app/routers/jails.py`:
|
||||
- `GET /api/jails` — list all jails with status and metrics.
|
||||
- `GET /api/jails/{name}` — full detail for a single jail.
|
||||
- `POST /api/jails/{name}/start` — start a jail.
|
||||
- `POST /api/jails/{name}/stop` — stop a jail.
|
||||
- `POST /api/jails/{name}/idle` — toggle idle mode.
|
||||
- `POST /api/jails/{name}/reload` — reload a single jail.
|
||||
- `POST /api/jails/reload-all` — reload all jails.
|
||||
|
||||
Define request/response models in `backend/app/models/jail.py`. Use appropriate HTTP status codes (404 if a jail name does not exist, 409 if a jail is already in the requested state). See [Architekture.md § 2.2 (Routers)](Architekture.md).
|
||||
|
||||
### 6.3 Implement ban and unban endpoints
|
||||
|
||||
Add to `backend/app/routers/bans.py`:
|
||||
- `POST /api/bans` — ban an IP in a specified jail. Validate the IP with `ipaddress` before sending.
|
||||
- `DELETE /api/bans` — unban an IP from a specific jail or all jails. Support an `unban_all` flag.
|
||||
- `GET /api/bans/active` — list all currently banned IPs across all jails, with jail name, ban start time, expiry, and ban count.
|
||||
|
||||
Delegate to the ban service. See [Features.md § 5 (Ban an IP, Unban an IP, Currently Banned IPs)](Features.md).
|
||||
|
||||
### 6.4 Build the jail overview page (frontend)
|
||||
|
||||
Create `frontend/src/pages/JailsPage.tsx`. Display a card or table for each jail showing name, status badge (running/stopped/idle), backend type, banned count, total bans, failure counts, find time, ban time, and max retries. Each jail links to a detail view. Use Fluent UI `Card` or `DataGrid`. Create `frontend/src/api/jails.ts`, `frontend/src/types/jail.ts`, and a `useJails` hook. See [Features.md § 5 (Jail Overview)](Features.md).
|
||||
|
||||
### 6.5 Build the jail detail page (frontend)
|
||||
|
||||
Create `frontend/src/pages/JailDetailPage.tsx` — reached via `/jails/:name`. Fetch the full jail detail and display: monitored log paths, fail regex and ignore regex lists (rendered in monospace), date pattern, log encoding, attached actions and their config, and ban-time escalation settings. Include control buttons (Start, Stop, Idle, Reload) that call the corresponding API endpoints with confirmation dialogs (Fluent UI `Dialog`). See [Features.md § 5 (Jail Detail, Jail Controls)](Features.md).
|
||||
|
||||
### 6.6 Build the ban/unban UI (frontend)
|
||||
|
||||
On the Jails page (or a dedicated sub-section), add a "Ban an IP" form with an IP input field and a jail selector dropdown. Add an "Unban an IP" form with an IP input (or selection from the currently-banned list), a jail selector (or "all jails"), and an "unban all" option. Show success/error feedback using Fluent UI `MessageBar` or `Toast`. Build a "Currently Banned IPs" table showing IP, jail, ban start, expiry, ban count, and a direct unban button per row. See [Features.md § 5 (Ban an IP, Unban an IP, Currently Banned IPs)](Features.md).
|
||||
|
||||
### 6.7 Implement IP lookup endpoint and UI
|
||||
|
||||
Add `GET /api/geo/lookup/{ip}` to `backend/app/routers/geo.py`. The endpoint checks whether the IP is currently banned (and in which jails), retrieves its ban history (count, timestamps, jails), and fetches enriched info (country, ASN, RIR) from the geo service. On the frontend, create an IP Lookup section in the Jails area where the user can enter any IP and see all this information. See [Features.md § 5 (IP Lookup)](Features.md).
|
||||
|
||||
### 6.8 Implement the ignore list (whitelist) endpoints and UI
|
||||
|
||||
Add endpoints to `backend/app/routers/jails.py` for managing ignore lists:
|
||||
- `GET /api/jails/{name}/ignoreip` — get the ignore list for a jail.
|
||||
- `POST /api/jails/{name}/ignoreip` — add an IP or network to a jail's ignore list.
|
||||
- `DELETE /api/jails/{name}/ignoreip` — remove an IP from the ignore list.
|
||||
- `POST /api/jails/{name}/ignoreself` — toggle the "ignore self" option.
|
||||
|
||||
On the frontend, add an "IP Whitelist" section to the jail detail page showing the ignore list with add/remove controls. See [Features.md § 5 (IP Whitelist)](Features.md).
|
||||
|
||||
### 6.9 Write tests for jail and ban features
|
||||
|
||||
Test jail listing with mocked socket responses, jail detail parsing, start/stop/reload commands, ban and unban execution, currently-banned list retrieval, IP lookup with and without ban history, and ignore list operations. Ensure all socket interactions are mocked.
|
||||
|
||||
---
|
||||
|
||||
## Stage 7 — Configuration View
|
||||
|
||||
This stage lets users inspect and edit fail2ban configuration directly from the web interface.
|
||||
|
||||
### 7.1 Implement the config service
|
||||
|
||||
Build `backend/app/services/config_service.py`. It reads the active fail2ban configuration by querying the daemon for jail settings, filter regex patterns, and global parameters. It also writes configuration changes by sending the appropriate set commands through the socket (or by editing config files and triggering a reload, depending on what fail2ban supports for each setting). The service must validate regex patterns before applying them — attempting to compile each pattern and returning a clear error if it is invalid. See [Features.md § 6 (View Configuration, Edit Configuration)](Features.md).
|
||||
|
||||
### 7.2 Implement the config router
|
||||
|
||||
Create `backend/app/routers/config.py`:
|
||||
- `GET /api/config/jails` — list all jails with their current configuration.
|
||||
- `GET /api/config/jails/{name}` — full configuration for a single jail (filter, regex, dates, actions, escalation).
|
||||
- `PUT /api/config/jails/{name}` — update a jail's configuration (ban time, max retries, enabled, regex patterns, date pattern, DNS mode, escalation settings).
|
||||
- `GET /api/config/global` — global fail2ban settings.
|
||||
- `PUT /api/config/global` — update global settings.
|
||||
- `POST /api/config/reload` — reload fail2ban to apply changes.
|
||||
|
||||
Define models in `backend/app/models/config.py`. Return validation errors before saving. See [Architekture.md § 2.2 (Routers)](Architekture.md).
|
||||
|
||||
### 7.3 Implement log observation endpoints
|
||||
|
||||
Add endpoints for registering new log files that fail2ban should monitor. The user needs to specify a log file path, one or more failure-detection regex patterns, a jail name, and basic jail settings. Include a preview endpoint that reads the specified log file and tests the provided regex against its contents, returning matching lines so the user can verify the pattern before saving. See [Features.md § 6 (Add Log Observation)](Features.md).
|
||||
|
||||
### 7.4 Implement the regex tester endpoint
|
||||
|
||||
Add `POST /api/config/regex-test` to the config router. It accepts a sample log line and a fail regex pattern, attempts to match them, and returns whether the pattern matched along with any captured groups highlighted by position. This is a stateless utility endpoint. See [Features.md § 6 (Regex Tester)](Features.md).
|
||||
|
||||
### 7.5 Implement server settings endpoints
|
||||
|
||||
Create `backend/app/routers/server.py`:
|
||||
- `GET /api/server/settings` — current log level, log target, syslog socket, DB path, purge age, max matches.
|
||||
- `PUT /api/server/settings` — update server-level settings.
|
||||
- `POST /api/server/flush-logs` — flush and re-open log files.
|
||||
|
||||
Delegate to `backend/app/services/server_service.py`. See [Features.md § 6 (Server Settings)](Features.md).
|
||||
|
||||
### 7.6 Build the configuration page (frontend)
|
||||
|
||||
Create `frontend/src/pages/ConfigPage.tsx`. The page should show all jails with their current settings in a readable format. Each jail section expands to show filter regex, ignore regex, date pattern, actions, and escalation settings. Provide inline editing: clicking a value turns it into an editable field. Add/remove buttons for regex patterns. A "Save" button persists changes and optionally triggers a reload. Show validation errors inline. Use Fluent UI `Accordion`, `Input`, `Textarea`, `Switch`, and `Button`. See [Features.md § 6](Features.md) and [Web-Design.md](Web-Design.md).
|
||||
|
||||
### 7.7 Build the regex tester UI (frontend)
|
||||
|
||||
Add a "Regex Tester" section to the configuration page (or as a dialog/panel). Two input fields: one for a sample log line, one for the regex pattern. On every change (debounced), call the regex-test endpoint and display the result — whether it matched, and highlight the matched groups. Use monospace font for both inputs. See [Features.md § 6 (Regex Tester)](Features.md).
|
||||
|
||||
### 7.8 Build the server settings UI (frontend)
|
||||
|
||||
Add a "Server Settings" section to the configuration page. Display current values for log level, log target, syslog socket, DB path, purge age, and max matches. Provide dropdowns for log level and log target, text inputs for paths and numeric values. Include a "Flush Logs" button. See [Features.md § 6 (Server Settings)](Features.md).
|
||||
|
||||
### 7.9 Write tests for configuration features
|
||||
|
||||
Test config read and write operations with mocked fail2ban responses, regex validation (valid and invalid patterns), the regex tester with matching and non-matching inputs, and server settings read/write. Verify that changes are only applied after validation passes.
|
||||
|
||||
---
|
||||
|
||||
## Stage 8 — World Map View
|
||||
|
||||
A geographical visualisation of ban activity. This stage depends on the geo service from Stage 5 and the ban data pipeline from Stage 5.
|
||||
|
||||
### 8.1 Implement the map data endpoint
|
||||
|
||||
Add `GET /api/dashboard/bans/by-country` to the dashboard router. It accepts the same time-range parameter as the ban list endpoint. It queries bans in the selected window, enriches them with geo data, and returns an aggregated count per country (ISO country code → ban count). Also return the full ban list so the frontend can display the companion table. See [Features.md § 4](Features.md).
|
||||
|
||||
### 8.2 Build the world map component (frontend)
|
||||
|
||||
Create `frontend/src/components/WorldMap.tsx`. Render a full world map with country outlines only — no fill colours, no satellite imagery. For each country with bans, display the ban count centred inside the country's borders. Countries with zero bans remain blank. Consider a lightweight SVG-based map library or a TopoJSON/GeoJSON world outline rendered with D3 or a comparable tool. The map must be interactive: clicking a country filters the companion access list. Include the same time-range selector as the dashboard. See [Features.md § 4](Features.md).
|
||||
|
||||
### 8.3 Build the map page (frontend)
|
||||
|
||||
Create `frontend/src/pages/MapPage.tsx`. Compose the time-range selector, the `WorldMap` component, and an access list table below. When a country is selected on the map, the table filters to show only entries from that country. Clicking the map background (or a "Clear filter" button) removes the country filter. Create `frontend/src/hooks/useMapData.ts` to fetch and manage the aggregated data. See [Features.md § 4](Features.md).
|
||||
|
||||
### 8.4 Write tests for the map data endpoint
|
||||
|
||||
Test aggregation correctness: multiple bans from the same country should be summed, unknown countries should be handled gracefully, and empty time ranges should return an empty map object.
|
||||
|
||||
---
|
||||
|
||||
## Stage 9 — Ban History
|
||||
|
||||
This stage exposes historical ban data from the fail2ban database for forensic exploration.
|
||||
|
||||
### 9.1 Implement the history service
|
||||
|
||||
Build `backend/app/services/history_service.py`. Query the fail2ban database for all past ban records (not just currently active ones). Support filtering by jail, IP address, and time range. Compute ban count per IP to identify repeat offenders. Provide a per-IP timeline method that returns every ban event for a given IP: which jail triggered it, when it started, how long it lasted, and any matched log lines stored in the database. See [Features.md § 7](Features.md).
|
||||
|
||||
### 9.2 Implement the history router
|
||||
|
||||
Create `backend/app/routers/history.py`:
|
||||
- `GET /api/history` — paginated list of all historical bans with filters (jail, IP, time range). Returns time, IP, jail, duration, ban count, country.
|
||||
- `GET /api/history/{ip}` — per-IP detail: full ban timeline, total failures, matched log lines.
|
||||
|
||||
Define models in `backend/app/models/history.py`. Enrich results with geo data. See [Architekture.md § 2.2](Architekture.md).
|
||||
|
||||
### 9.3 Build the history page (frontend)
|
||||
|
||||
Create `frontend/src/pages/HistoryPage.tsx`. Display a `DataGrid` table of all past bans with columns for time, IP (monospace), jail, ban duration, ban count, and country. Add filter controls above the table: a jail dropdown, an IP search input, and the standard time-range selector. Highlight rows with high ban counts to flag repeat offenders. Clicking an IP row navigates to a per-IP detail view showing the full ban timeline and aggregated failures. See [Features.md § 7](Features.md).
|
||||
|
||||
### 9.4 Write tests for history features
|
||||
|
||||
Test history queries with various filters, per-IP timeline construction, ban count computation, and edge cases (IP with no history, jail that no longer exists).
|
||||
|
||||
---
|
||||
|
||||
## Stage 10 — External Blocklist Importer
|
||||
|
||||
This stage adds the ability to automatically download and apply external IP blocklists on a schedule.
|
||||
|
||||
### 10.1 Implement the blocklist repository
|
||||
|
||||
Build `backend/app/repositories/blocklist_repo.py` and `backend/app/repositories/import_log_repo.py`. The blocklist repo persists blocklist source definitions (name, URL, enabled flag) in the application database. The import log repo records every import run with timestamp, source URL, IPs imported, IPs skipped, and any errors encountered. See [Architekture.md § 2.2 (Repositories)](Architekture.md).
|
||||
|
||||
### 10.2 Implement the blocklist service
|
||||
|
||||
Build `backend/app/services/blocklist_service.py`. It manages blocklist source CRUD (add, edit, remove, toggle enabled). For the actual import: download each enabled source URL via aiohttp, validate every entry as a well-formed IP or CIDR range using the `ipaddress` module, skip malformed lines gracefully, and apply valid IPs as bans through fail2ban (in a dedicated blocklist jail) or via iptables. If using iptables, flush the chain before re-populating. Log every step with structlog. Record import results through the import log repository. Handle unreachable URLs by logging the error and continuing with remaining sources. See [Features.md § 8](Features.md).
|
||||
|
||||
### 10.3 Implement the blocklist import scheduled task
|
||||
|
||||
Create `backend/app/tasks/blocklist_import.py` — an APScheduler job that runs the blocklist service import at the configured schedule. The default is daily at 03:00. The schedule should be configurable through the blocklist service (saved in the app database). See [Features.md § 8 (Schedule)](Features.md).
|
||||
|
||||
### 10.4 Implement the blocklist router
|
||||
|
||||
Create `backend/app/routers/blocklist.py`:
|
||||
- `GET /api/blocklists` — list all blocklist sources with their status.
|
||||
- `POST /api/blocklists` — add a new source.
|
||||
- `PUT /api/blocklists/{id}` — edit a source (name, URL, enabled).
|
||||
- `DELETE /api/blocklists/{id}` — remove a source.
|
||||
- `GET /api/blocklists/{id}/preview` — download and display a sample of the blocklist contents.
|
||||
- `POST /api/blocklists/import` — trigger a manual import immediately ("Run Now").
|
||||
- `GET /api/blocklists/schedule` — get the current schedule and next run time.
|
||||
- `PUT /api/blocklists/schedule` — update the schedule.
|
||||
- `GET /api/blocklists/log` — paginated import log, filterable by source and date range.
|
||||
|
||||
Define models in `backend/app/models/blocklist.py`. See [Architekture.md § 2.2](Architekture.md).
|
||||
|
||||
### 10.5 Build the blocklist management page (frontend)
|
||||
|
||||
Create `frontend/src/pages/BlocklistPage.tsx`. Display a list of blocklist sources as cards or rows showing name, URL, enabled toggle, and action buttons (edit, delete, preview). Add a form to create or edit a source. Show the schedule configuration with a simple time-and-frequency picker (no raw cron) — dropdowns for frequency preset and a time input. Include a "Run Now" button and a display of last import time and next scheduled run. Below, show the import log as a table (timestamp, source, IPs imported, IPs skipped, errors) with filters. If the most recent import had errors, show a warning badge in the navigation. See [Features.md § 8](Features.md).
|
||||
|
||||
### 10.6 Write tests for blocklist features
|
||||
|
||||
Test source CRUD, import with valid/invalid entries, schedule update, manual import trigger, import log persistence, and error handling when a URL is unreachable. Mock all HTTP calls.
|
||||
|
||||
---
|
||||
|
||||
## Stage 11 — Polish, Cross-Cutting Concerns & Hardening
|
||||
|
||||
This final stage covers everything that spans multiple features or improves the overall quality of the application.
|
||||
|
||||
### 11.1 Implement connection health indicator
|
||||
|
||||
Add a persistent connection-health indicator visible on every page (part of the `MainLayout`). When the fail2ban server becomes unreachable, show a clear warning bar at the top of the interface. When it recovers, dismiss the warning. The indicator reads from the cached health status maintained by the background task from Stage 4. See [Features.md § 9](Features.md).
|
||||
|
||||
### 11.2 Add timezone awareness
|
||||
|
||||
Ensure all timestamps displayed in the frontend respect the timezone configured during setup. Store all dates in UTC on the backend. Convert to the user's configured timezone on the frontend before display. Create a `formatDate` utility in `frontend/src/utils/` that applies the configured timezone and format. See [Features.md § 9](Features.md).
|
||||
|
||||
### 11.3 Add responsive layout polish
|
||||
|
||||
Review every page against the breakpoint table in [Web-Design.md § 4](Web-Design.md). Ensure the sidebar collapses correctly on small screens, tables scroll horizontally instead of breaking, cards stack vertically, and no content overflows. Test at 320 px, 640 px, 1024 px, and 1920 px widths.
|
||||
|
||||
### 11.4 Add loading and error states
|
||||
|
||||
Every page and data-fetching component must handle three states: loading (show Fluent UI `Spinner` or skeleton shimmer), error (show a `MessageBar` with details and a retry action), and empty (show an informational message). Remove bare spinners that persist longer than one second — replace them with skeleton screens as required by [Web-Design.md § 6](Web-Design.md).
|
||||
|
||||
### 11.5 Implement reduced-motion support
|
||||
|
||||
Honour the `prefers-reduced-motion` media query. When detected, disable all non-essential animations (tab transitions, row slide-outs, panel fly-ins) and replace them with instant state changes. See [Web-Design.md § 6 (Motion Rules)](Web-Design.md).
|
||||
|
||||
### 11.6 Add accessibility audit
|
||||
|
||||
Verify WCAG 2.1 AA compliance across the entire application. All interactive elements must be keyboard-accessible. All Fluent UI components include accessibility by default, but custom components (world map, regex tester highlight) need manual `aria-label` and role attributes. Ensure colour is never the sole indicator of status — combine with icons or text labels. See [Web-Design.md § 1](Web-Design.md).
|
||||
|
||||
### 11.7 Add structured logging throughout
|
||||
|
||||
Review every service and task to confirm that all significant operations are logged with structlog and contextual key-value pairs. Log ban/unban actions, config changes, blocklist imports, authentication events, and health transitions. Never log passwords, session tokens, or other secrets. See [Backend-Development.md § 7](Backend-Development.md).
|
||||
|
||||
### 11.8 Add global error handling
|
||||
|
||||
Register FastAPI exception handlers in `main.py` that map all custom domain exceptions to HTTP status codes with structured error bodies. Ensure no unhandled exception ever returns a raw 500 with a stack trace to the client. Log all errors with full context before returning the response. See [Backend-Development.md § 8](Backend-Development.md).
|
||||
|
||||
### 11.9 Final test pass and coverage check
|
||||
|
||||
Run the full test suite. Ensure all tests pass. Check coverage: aim for over 80 % line coverage overall, with 100 % on critical paths (auth, banning, scheduled imports). Add missing tests where coverage is below threshold. Ensure `ruff`, `mypy --strict`, and `tsc --noEmit` all pass with zero errors. See [Backend-Development.md § 9](Backend-Development.md) and [Web-Development.md § 1](Web-Development.md).
|
||||
|
||||
@@ -204,7 +204,7 @@ Use Fluent UI React components as the building blocks. The following mapping sho
|
||||
|---|---|---|
|
||||
| Side navigation | `Nav` | Persistent on large screens, collapsible on small. Groups: Dashboard, Map, Jails, Config, History, Blocklists. |
|
||||
| Breadcrumbs | `Breadcrumb` | Show on detail pages (Jail > sshd, History > IP detail). |
|
||||
| Page tabs | `Pivot` | Dashboard (Ban List / Access List), Map (Map / Access List). |
|
||||
| Page tabs | `Pivot` | None currently (previous tabs removed). |
|
||||
|
||||
### Data Display
|
||||
|
||||
@@ -271,12 +271,14 @@ The dashboard uses cards to display key figures (server status, total bans, acti
|
||||
|
||||
## 11. World Map View
|
||||
|
||||
- The map renders country outlines only — **no fill colours, no satellite imagery, no terrain shading**.
|
||||
- The map renders country outlines only — **no fill colours, no satellite imagery, no terrain shading**. Countries are transparent with neutral strokes.
|
||||
- **The map is fully interactive:** users can zoom in/out using mouse wheel or pinch gestures, and pan by dragging. Zoom range: 1× (full world) to 8× (regional detail).
|
||||
- **Zoom controls:** Three small buttons overlaid in the top-right corner provide zoom in (+), zoom out (−), and reset view (⟲) functionality. Buttons use `appearance="secondary"` and `size="small"`.
|
||||
- Countries with banned IPs display a **count badge** centred inside the country polygon. Use `FontSizes.size14` semibold, `themePrimary` colour.
|
||||
- Countries with zero bans remain completely blank — no label, no tint.
|
||||
- On hover: country region gets a subtle `neutralLighterAlt` fill. On click: fill shifts to `themeLighterAlt` and the companion table below filters to that country.
|
||||
- The map must have a **light neutral border** (`neutralLight`) around its container, at **Depth 4**.
|
||||
- Time-range selector above the map uses `Pivot` with quick presets (24 h, 7 d, 30 d, 365 d).
|
||||
- On hover: country region gets a subtle `neutralBackground3` fill (only if the country has data). On click: fill shifts to `brandBackgroundHover` and the companion table below filters to that country. Default state remains transparent.
|
||||
- The map must have a **light neutral border** (`neutralStroke1`) around its container, with `borderRadius.medium`.
|
||||
- Time-range selector above the map uses `Select` dropdown with quick presets (24 h, 7 d, 30 d, 365 d).
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -130,10 +130,15 @@ frontend/
|
||||
├── .eslintrc.cjs
|
||||
├── .prettierrc
|
||||
├── tsconfig.json
|
||||
├── vite.config.ts
|
||||
├── vite.config.ts # Dev proxy: /api → http://backend:8000 (service DNS)
|
||||
└── package.json
|
||||
```
|
||||
|
||||
> **Dev proxy target:** `vite.config.ts` proxies all `/api` requests to
|
||||
> `http://backend:8000`. Use the compose **service name** (`backend`), not
|
||||
> `localhost` — inside the container network `localhost` resolves to the
|
||||
> frontend container itself and causes `ECONNREFUSED`.
|
||||
|
||||
### Separation of Concerns
|
||||
|
||||
- **Pages** handle routing and compose layout + components — they contain no business logic.
|
||||
|
||||
1
Docs/test.md
Normal file
1
Docs/test.md
Normal file
@@ -0,0 +1 @@
|
||||
https://lists.blocklist.de/lists/all.txt
|
||||
81
Makefile
Normal file
81
Makefile
Normal file
@@ -0,0 +1,81 @@
|
||||
# ──────────────────────────────────────────────────────────────
|
||||
# BanGUI — Project Makefile
|
||||
#
|
||||
# Compatible with both Docker Compose and Podman Compose.
|
||||
# Auto-detects which compose binary is available.
|
||||
#
|
||||
# Usage:
|
||||
# make up — start the debug stack
|
||||
# make down — stop the debug stack
|
||||
# make build — (re)build the backend image without starting
|
||||
# make clean — stop, remove all containers, volumes, and local images
|
||||
# make logs — tail logs for all debug services
|
||||
# make restart — restart the debug stack
|
||||
# make dev-ban-test — one-command smoke test of the ban pipeline
|
||||
# ──────────────────────────────────────────────────────────────
|
||||
|
||||
COMPOSE_FILE := Docker/compose.debug.yml
|
||||
|
||||
# Compose project name (matches `name:` in compose.debug.yml).
|
||||
PROJECT := bangui-dev
|
||||
|
||||
# All named volumes declared in compose.debug.yml.
|
||||
# Compose prefixes them with the project name.
|
||||
DEV_VOLUMES := \
|
||||
$(PROJECT)_bangui-dev-data \
|
||||
$(PROJECT)_frontend-node-modules \
|
||||
$(PROJECT)_fail2ban-dev-config \
|
||||
$(PROJECT)_fail2ban-dev-run
|
||||
|
||||
# Locally-built images (compose project name + service name).
|
||||
# Public images (fail2ban, node) are intentionally excluded.
|
||||
DEV_IMAGES := \
|
||||
$(PROJECT)_backend
|
||||
|
||||
# Detect available compose binary.
|
||||
COMPOSE := $(shell command -v podman-compose 2>/dev/null \
|
||||
|| echo "podman compose")
|
||||
|
||||
# Detect available container runtime (podman or docker).
|
||||
RUNTIME := $(shell command -v podman 2>/dev/null || echo "docker")
|
||||
|
||||
.PHONY: up down build restart logs clean dev-ban-test
|
||||
|
||||
## Start the debug stack (detached).
|
||||
## Ensures log stub files exist so fail2ban can open them on first start.
|
||||
up:
|
||||
@mkdir -p Docker/logs
|
||||
@touch Docker/logs/auth.log
|
||||
$(COMPOSE) -f $(COMPOSE_FILE) up -d
|
||||
|
||||
## Stop the debug stack.
|
||||
down:
|
||||
$(COMPOSE) -f $(COMPOSE_FILE) down
|
||||
|
||||
## (Re)build the backend image without starting containers.
|
||||
build:
|
||||
$(COMPOSE) -f $(COMPOSE_FILE) build
|
||||
|
||||
## Restart the debug stack.
|
||||
restart: down up
|
||||
|
||||
## Tail logs for all debug services.
|
||||
logs:
|
||||
$(COMPOSE) -f $(COMPOSE_FILE) logs -f
|
||||
|
||||
## Stop containers, remove ALL debug volumes and locally-built images.
|
||||
## The next 'make up' will rebuild images from scratch and start fresh.
|
||||
clean:
|
||||
$(COMPOSE) -f $(COMPOSE_FILE) down --remove-orphans
|
||||
$(RUNTIME) volume rm $(DEV_VOLUMES) 2>/dev/null || true
|
||||
$(RUNTIME) rmi $(DEV_IMAGES) 2>/dev/null || true
|
||||
@echo "All debug volumes and local images removed. Run 'make up' to rebuild and start fresh."
|
||||
|
||||
## One-command smoke test for the ban pipeline:
|
||||
## 1. Start fail2ban, 2. write failure lines, 3. check ban status.
|
||||
dev-ban-test:
|
||||
$(COMPOSE) -f $(COMPOSE_FILE) up -d fail2ban
|
||||
sleep 5
|
||||
bash Docker/simulate_failed_logins.sh
|
||||
sleep 3
|
||||
bash Docker/check_ban_status.sh
|
||||
22
backend/.env.example
Normal file
22
backend/.env.example
Normal file
@@ -0,0 +1,22 @@
|
||||
# BanGUI Backend — Environment Variables
|
||||
# Copy this file to .env and fill in the values.
|
||||
# Never commit .env to version control.
|
||||
|
||||
# Path to the BanGUI application SQLite database.
|
||||
BANGUI_DATABASE_PATH=bangui.db
|
||||
|
||||
# Path to the fail2ban Unix domain socket.
|
||||
BANGUI_FAIL2BAN_SOCKET=/var/run/fail2ban/fail2ban.sock
|
||||
|
||||
# Secret key used to sign session tokens. Use a long, random string.
|
||||
# Generate with: python -c "import secrets; print(secrets.token_hex(64))"
|
||||
BANGUI_SESSION_SECRET=replace-this-with-a-long-random-secret
|
||||
|
||||
# Session duration in minutes. Default: 60 minutes.
|
||||
BANGUI_SESSION_DURATION_MINUTES=60
|
||||
|
||||
# Timezone for displaying timestamps in the UI (IANA tz name).
|
||||
BANGUI_TIMEZONE=UTC
|
||||
|
||||
# Application log level: debug | info | warning | error | critical
|
||||
BANGUI_LOG_LEVEL=info
|
||||
49
backend/.gitignore
vendored
Normal file
49
backend/.gitignore
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
# ─────────────────────────────────────────────
|
||||
# backend/.gitignore (Python / FastAPI)
|
||||
# ─────────────────────────────────────────────
|
||||
|
||||
# Byte-compiled / optimised source files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*.pyo
|
||||
*.pyd
|
||||
|
||||
# Virtual environment (local override)
|
||||
.venv/
|
||||
venv/
|
||||
env/
|
||||
|
||||
# Distribution / packaging
|
||||
dist/
|
||||
build/
|
||||
*.egg-info/
|
||||
|
||||
# Testing
|
||||
.coverage
|
||||
.coverage.*
|
||||
htmlcov/
|
||||
.pytest_cache/
|
||||
.tox/
|
||||
|
||||
# Type checkers & linters
|
||||
.mypy_cache/
|
||||
.ruff_cache/
|
||||
.pytype/
|
||||
|
||||
# Local database files
|
||||
*.sqlite3
|
||||
*.db
|
||||
*.db-shm
|
||||
*.db-wal
|
||||
|
||||
# Alembic generated junk
|
||||
alembic/versions/__pycache__/
|
||||
|
||||
# Secrets
|
||||
.env
|
||||
.env.*
|
||||
!.env.example
|
||||
secrets.json
|
||||
|
||||
# Logs
|
||||
*.log
|
||||
1
backend/app/__init__.py
Normal file
1
backend/app/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""BanGUI backend application package."""
|
||||
88
backend/app/config.py
Normal file
88
backend/app/config.py
Normal file
@@ -0,0 +1,88 @@
|
||||
"""Application configuration loaded from environment variables and .env file.
|
||||
|
||||
Follows pydantic-settings patterns: all values are prefixed with BANGUI_
|
||||
and validated at startup via the Settings singleton.
|
||||
"""
|
||||
|
||||
from pydantic import Field
|
||||
from pydantic_settings import BaseSettings, SettingsConfigDict
|
||||
|
||||
|
||||
class Settings(BaseSettings):
|
||||
"""BanGUI runtime configuration.
|
||||
|
||||
All fields are loaded from environment variables prefixed with ``BANGUI_``
|
||||
or from a ``.env`` file located next to the process working directory.
|
||||
The application will raise a :class:`pydantic.ValidationError` on startup
|
||||
if any required field is missing or has an invalid value.
|
||||
"""
|
||||
|
||||
database_path: str = Field(
|
||||
default="bangui.db",
|
||||
description="Filesystem path to the BanGUI SQLite application database.",
|
||||
)
|
||||
fail2ban_socket: str = Field(
|
||||
default="/var/run/fail2ban/fail2ban.sock",
|
||||
description="Path to the fail2ban Unix domain socket.",
|
||||
)
|
||||
session_secret: str = Field(
|
||||
...,
|
||||
description=(
|
||||
"Secret key used when generating session tokens. "
|
||||
"Must be unique and never committed to source control."
|
||||
),
|
||||
)
|
||||
session_duration_minutes: int = Field(
|
||||
default=60,
|
||||
ge=1,
|
||||
description="Number of minutes a session token remains valid after creation.",
|
||||
)
|
||||
timezone: str = Field(
|
||||
default="UTC",
|
||||
description="IANA timezone name used when displaying timestamps in the UI.",
|
||||
)
|
||||
log_level: str = Field(
|
||||
default="info",
|
||||
description="Application log level: debug | info | warning | error | critical.",
|
||||
)
|
||||
geoip_db_path: str | None = Field(
|
||||
default=None,
|
||||
description=(
|
||||
"Optional path to a MaxMind GeoLite2-Country .mmdb file. "
|
||||
"When set, failed ip-api.com lookups fall back to local resolution."
|
||||
),
|
||||
)
|
||||
fail2ban_config_dir: str = Field(
|
||||
default="/config/fail2ban",
|
||||
description=(
|
||||
"Path to the fail2ban configuration directory. "
|
||||
"Must contain subdirectories jail.d/, filter.d/, and action.d/. "
|
||||
"Used for listing, viewing, and editing configuration files through the web UI."
|
||||
),
|
||||
)
|
||||
fail2ban_start_command: str = Field(
|
||||
default="fail2ban-client start",
|
||||
description=(
|
||||
"Shell command used to start (not reload) the fail2ban daemon during "
|
||||
"recovery rollback. Split by whitespace to build the argument list — "
|
||||
"no shell interpretation is performed. "
|
||||
"Example: 'systemctl start fail2ban' or 'fail2ban-client start'."
|
||||
),
|
||||
)
|
||||
|
||||
model_config = SettingsConfigDict(
|
||||
env_prefix="BANGUI_",
|
||||
env_file=".env",
|
||||
env_file_encoding="utf-8",
|
||||
case_sensitive=False,
|
||||
)
|
||||
|
||||
|
||||
def get_settings() -> Settings:
|
||||
"""Return a fresh :class:`Settings` instance loaded from the environment.
|
||||
|
||||
Returns:
|
||||
A validated :class:`Settings` object. Raises :class:`pydantic.ValidationError`
|
||||
if required keys are absent or values fail validation.
|
||||
"""
|
||||
return Settings() # pydantic-settings populates required fields from env vars
|
||||
112
backend/app/db.py
Normal file
112
backend/app/db.py
Normal file
@@ -0,0 +1,112 @@
|
||||
"""Application database schema definition and initialisation.
|
||||
|
||||
BanGUI maintains its own SQLite database that stores configuration, session
|
||||
state, blocklist source definitions, and import run logs. This module is
|
||||
the single source of truth for the schema — all ``CREATE TABLE`` statements
|
||||
live here and are applied on first run via :func:`init_db`.
|
||||
|
||||
The fail2ban database is separate and is accessed read-only by the history
|
||||
and ban services.
|
||||
"""
|
||||
|
||||
import aiosqlite
|
||||
import structlog
|
||||
|
||||
log: structlog.stdlib.BoundLogger = structlog.get_logger()
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# DDL statements
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_CREATE_SETTINGS: str = """
|
||||
CREATE TABLE IF NOT EXISTS settings (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
key TEXT NOT NULL UNIQUE,
|
||||
value TEXT NOT NULL,
|
||||
created_at TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')),
|
||||
updated_at TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ', 'now'))
|
||||
);
|
||||
"""
|
||||
|
||||
_CREATE_SESSIONS: str = """
|
||||
CREATE TABLE IF NOT EXISTS sessions (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
token TEXT NOT NULL UNIQUE,
|
||||
created_at TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')),
|
||||
expires_at TEXT NOT NULL
|
||||
);
|
||||
"""
|
||||
|
||||
_CREATE_SESSIONS_TOKEN_INDEX: str = """
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS idx_sessions_token ON sessions (token);
|
||||
"""
|
||||
|
||||
_CREATE_BLOCKLIST_SOURCES: str = """
|
||||
CREATE TABLE IF NOT EXISTS blocklist_sources (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
name TEXT NOT NULL,
|
||||
url TEXT NOT NULL UNIQUE,
|
||||
enabled INTEGER NOT NULL DEFAULT 1,
|
||||
created_at TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')),
|
||||
updated_at TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ', 'now'))
|
||||
);
|
||||
"""
|
||||
|
||||
_CREATE_IMPORT_LOG: str = """
|
||||
CREATE TABLE IF NOT EXISTS import_log (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
source_id INTEGER REFERENCES blocklist_sources(id) ON DELETE SET NULL,
|
||||
source_url TEXT NOT NULL,
|
||||
timestamp TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')),
|
||||
ips_imported INTEGER NOT NULL DEFAULT 0,
|
||||
ips_skipped INTEGER NOT NULL DEFAULT 0,
|
||||
errors TEXT
|
||||
);
|
||||
"""
|
||||
|
||||
_CREATE_GEO_CACHE: str = """
|
||||
CREATE TABLE IF NOT EXISTS geo_cache (
|
||||
ip TEXT PRIMARY KEY,
|
||||
country_code TEXT,
|
||||
country_name TEXT,
|
||||
asn TEXT,
|
||||
org TEXT,
|
||||
cached_at TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ', 'now'))
|
||||
);
|
||||
"""
|
||||
|
||||
# Ordered list of DDL statements to execute on initialisation.
|
||||
_SCHEMA_STATEMENTS: list[str] = [
|
||||
_CREATE_SETTINGS,
|
||||
_CREATE_SESSIONS,
|
||||
_CREATE_SESSIONS_TOKEN_INDEX,
|
||||
_CREATE_BLOCKLIST_SOURCES,
|
||||
_CREATE_IMPORT_LOG,
|
||||
_CREATE_GEO_CACHE,
|
||||
]
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Public API
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
async def init_db(db: aiosqlite.Connection) -> None:
|
||||
"""Create all BanGUI application tables if they do not already exist.
|
||||
|
||||
This function is idempotent — calling it on an already-initialised
|
||||
database has no effect. It should be called once during application
|
||||
startup inside the FastAPI lifespan handler.
|
||||
|
||||
Args:
|
||||
db: An open :class:`aiosqlite.Connection` to the application database.
|
||||
"""
|
||||
log.info("initialising_database_schema")
|
||||
async with db.execute("PRAGMA journal_mode=WAL;"):
|
||||
pass
|
||||
async with db.execute("PRAGMA foreign_keys=ON;"):
|
||||
pass
|
||||
for statement in _SCHEMA_STATEMENTS:
|
||||
await db.executescript(statement)
|
||||
await db.commit()
|
||||
log.info("database_schema_ready")
|
||||
156
backend/app/dependencies.py
Normal file
156
backend/app/dependencies.py
Normal file
@@ -0,0 +1,156 @@
|
||||
"""FastAPI dependency providers.
|
||||
|
||||
All ``Depends()`` callables that inject shared resources (database
|
||||
connection, settings, services, auth guard) are defined here.
|
||||
Routers import directly from this module — never from ``app.state``
|
||||
directly — to keep coupling explicit and testable.
|
||||
"""
|
||||
|
||||
import time
|
||||
from typing import Annotated
|
||||
|
||||
import aiosqlite
|
||||
import structlog
|
||||
from fastapi import Depends, HTTPException, Request, status
|
||||
|
||||
from app.config import Settings
|
||||
from app.models.auth import Session
|
||||
from app.utils.time_utils import utc_now
|
||||
|
||||
log: structlog.stdlib.BoundLogger = structlog.get_logger()
|
||||
|
||||
_COOKIE_NAME = "bangui_session"
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Session validation cache
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
#: How long (seconds) a validated session token is served from the in-memory
|
||||
#: cache without re-querying SQLite. Eliminates repeated DB lookups for the
|
||||
#: same token arriving in near-simultaneous parallel requests.
|
||||
_SESSION_CACHE_TTL: float = 10.0
|
||||
|
||||
#: ``token → (Session, cache_expiry_monotonic_time)``
|
||||
_session_cache: dict[str, tuple[Session, float]] = {}
|
||||
|
||||
|
||||
def clear_session_cache() -> None:
|
||||
"""Flush the entire in-memory session validation cache.
|
||||
|
||||
Useful in tests to prevent stale state from leaking between test cases.
|
||||
"""
|
||||
_session_cache.clear()
|
||||
|
||||
|
||||
def invalidate_session_cache(token: str) -> None:
|
||||
"""Evict *token* from the in-memory session cache.
|
||||
|
||||
Must be called during logout so the revoked token is no longer served
|
||||
from cache without a DB round-trip.
|
||||
|
||||
Args:
|
||||
token: The session token to remove.
|
||||
"""
|
||||
_session_cache.pop(token, None)
|
||||
|
||||
|
||||
async def get_db(request: Request) -> aiosqlite.Connection:
|
||||
"""Provide the shared :class:`aiosqlite.Connection` from ``app.state``.
|
||||
|
||||
Args:
|
||||
request: The current FastAPI request (injected automatically).
|
||||
|
||||
Returns:
|
||||
The application-wide aiosqlite connection opened during startup.
|
||||
|
||||
Raises:
|
||||
HTTPException: 503 if the database has not been initialised.
|
||||
"""
|
||||
db: aiosqlite.Connection | None = getattr(request.app.state, "db", None)
|
||||
if db is None:
|
||||
log.error("database_not_initialised")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
|
||||
detail="Database is not available.",
|
||||
)
|
||||
return db
|
||||
|
||||
|
||||
async def get_settings(request: Request) -> Settings:
|
||||
"""Provide the :class:`~app.config.Settings` instance from ``app.state``.
|
||||
|
||||
Args:
|
||||
request: The current FastAPI request (injected automatically).
|
||||
|
||||
Returns:
|
||||
The application settings loaded at startup.
|
||||
"""
|
||||
return request.app.state.settings # type: ignore[no-any-return]
|
||||
|
||||
|
||||
async def require_auth(
|
||||
request: Request,
|
||||
db: Annotated[aiosqlite.Connection, Depends(get_db)],
|
||||
) -> Session:
|
||||
"""Validate the session token and return the active session.
|
||||
|
||||
The token is read from the ``bangui_session`` cookie or the
|
||||
``Authorization: Bearer`` header.
|
||||
|
||||
Validated tokens are cached in memory for :data:`_SESSION_CACHE_TTL`
|
||||
seconds so that concurrent requests sharing the same token avoid repeated
|
||||
SQLite round-trips. The cache is bypassed on expiry and explicitly
|
||||
cleared by :func:`invalidate_session_cache` on logout.
|
||||
|
||||
Args:
|
||||
request: The incoming FastAPI request.
|
||||
db: Injected aiosqlite connection.
|
||||
|
||||
Returns:
|
||||
The active :class:`~app.models.auth.Session`.
|
||||
|
||||
Raises:
|
||||
HTTPException: 401 if no valid session token is found.
|
||||
"""
|
||||
from app.services import auth_service # noqa: PLC0415
|
||||
|
||||
token: str | None = request.cookies.get(_COOKIE_NAME)
|
||||
if not token:
|
||||
auth_header: str = request.headers.get("Authorization", "")
|
||||
if auth_header.startswith("Bearer "):
|
||||
token = auth_header[len("Bearer "):]
|
||||
|
||||
if not token:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Authentication required.",
|
||||
headers={"WWW-Authenticate": "Bearer"},
|
||||
)
|
||||
|
||||
# Fast path: serve from in-memory cache when the entry is still fresh and
|
||||
# the session itself has not yet exceeded its own expiry time.
|
||||
cached = _session_cache.get(token)
|
||||
if cached is not None:
|
||||
session, cache_expires_at = cached
|
||||
if time.monotonic() < cache_expires_at and session.expires_at > utc_now().isoformat():
|
||||
return session
|
||||
# Stale cache entry — evict and fall through to DB.
|
||||
_session_cache.pop(token, None)
|
||||
|
||||
try:
|
||||
session = await auth_service.validate_session(db, token)
|
||||
except ValueError as exc:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail=str(exc),
|
||||
headers={"WWW-Authenticate": "Bearer"},
|
||||
) from exc
|
||||
|
||||
_session_cache[token] = (session, time.monotonic() + _SESSION_CACHE_TTL)
|
||||
return session
|
||||
|
||||
|
||||
# Convenience type aliases for route signatures.
|
||||
DbDep = Annotated[aiosqlite.Connection, Depends(get_db)]
|
||||
SettingsDep = Annotated[Settings, Depends(get_settings)]
|
||||
AuthDep = Annotated[Session, Depends(require_auth)]
|
||||
413
backend/app/main.py
Normal file
413
backend/app/main.py
Normal file
@@ -0,0 +1,413 @@
|
||||
"""BanGUI FastAPI application factory.
|
||||
|
||||
Call :func:`create_app` to obtain a configured :class:`fastapi.FastAPI`
|
||||
instance suitable for direct use with an ASGI server (e.g. ``uvicorn``) or
|
||||
in tests via ``httpx.AsyncClient``.
|
||||
|
||||
The lifespan handler manages all shared resources — database connection, HTTP
|
||||
session, and scheduler — so every component can rely on them being available
|
||||
on ``app.state`` throughout the request lifecycle.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import sys
|
||||
from contextlib import asynccontextmanager
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import AsyncGenerator, Awaitable, Callable
|
||||
|
||||
from starlette.responses import Response as StarletteResponse
|
||||
|
||||
import aiohttp
|
||||
import aiosqlite
|
||||
import structlog
|
||||
from apscheduler.schedulers.asyncio import AsyncIOScheduler # type: ignore[import-untyped]
|
||||
from fastapi import FastAPI, Request, status
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from fastapi.responses import JSONResponse, RedirectResponse
|
||||
from starlette.middleware.base import BaseHTTPMiddleware
|
||||
|
||||
from app.config import Settings, get_settings
|
||||
from app.db import init_db
|
||||
from app.routers import (
|
||||
auth,
|
||||
bans,
|
||||
blocklist,
|
||||
config,
|
||||
dashboard,
|
||||
file_config,
|
||||
geo,
|
||||
health,
|
||||
history,
|
||||
jails,
|
||||
server,
|
||||
setup,
|
||||
)
|
||||
from app.tasks import blocklist_import, geo_cache_flush, geo_re_resolve, health_check
|
||||
from app.utils.fail2ban_client import Fail2BanConnectionError, Fail2BanProtocolError
|
||||
from app.utils.jail_config import ensure_jail_configs
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Ensure the bundled fail2ban package is importable from fail2ban-master/
|
||||
#
|
||||
# The directory layout differs between local dev and the Docker image:
|
||||
# Local: <repo-root>/backend/app/main.py → fail2ban-master at parents[2]
|
||||
# Docker: /app/app/main.py → fail2ban-master at parents[1]
|
||||
# Walk up from this file until we find a "fail2ban-master" sibling directory
|
||||
# so the path resolution is environment-agnostic.
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _find_fail2ban_master() -> Path | None:
|
||||
"""Return the first ``fail2ban-master`` directory found while walking up.
|
||||
|
||||
Returns:
|
||||
Absolute :class:`~pathlib.Path` to the ``fail2ban-master`` directory,
|
||||
or ``None`` if no such directory exists among the ancestors.
|
||||
"""
|
||||
here = Path(__file__).resolve()
|
||||
for ancestor in here.parents:
|
||||
candidate = ancestor / "fail2ban-master"
|
||||
if candidate.is_dir():
|
||||
return candidate
|
||||
return None
|
||||
|
||||
|
||||
_fail2ban_master: Path | None = _find_fail2ban_master()
|
||||
if _fail2ban_master is not None and str(_fail2ban_master) not in sys.path:
|
||||
sys.path.insert(0, str(_fail2ban_master))
|
||||
|
||||
log: structlog.stdlib.BoundLogger = structlog.get_logger()
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Logging configuration
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _configure_logging(log_level: str) -> None:
|
||||
"""Configure structlog for production JSON output.
|
||||
|
||||
Args:
|
||||
log_level: One of ``debug``, ``info``, ``warning``, ``error``, ``critical``.
|
||||
"""
|
||||
level: int = logging.getLevelName(log_level.upper())
|
||||
logging.basicConfig(level=level, stream=sys.stdout, format="%(message)s")
|
||||
structlog.configure(
|
||||
processors=[
|
||||
structlog.contextvars.merge_contextvars,
|
||||
structlog.stdlib.filter_by_level,
|
||||
structlog.processors.TimeStamper(fmt="iso"),
|
||||
structlog.stdlib.add_logger_name,
|
||||
structlog.stdlib.add_log_level,
|
||||
structlog.stdlib.PositionalArgumentsFormatter(),
|
||||
structlog.processors.StackInfoRenderer(),
|
||||
structlog.processors.format_exc_info,
|
||||
structlog.processors.UnicodeDecoder(),
|
||||
structlog.processors.JSONRenderer(),
|
||||
],
|
||||
wrapper_class=structlog.stdlib.BoundLogger,
|
||||
context_class=dict,
|
||||
logger_factory=structlog.stdlib.LoggerFactory(),
|
||||
cache_logger_on_first_use=True,
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Lifespan
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def _lifespan(app: FastAPI) -> AsyncGenerator[None, None]:
|
||||
"""Manage the lifetime of all shared application resources.
|
||||
|
||||
Resources are initialised in order on startup and released in reverse
|
||||
order on shutdown. They are stored on ``app.state`` so they are
|
||||
accessible to dependency providers and tests.
|
||||
|
||||
Args:
|
||||
app: The :class:`fastapi.FastAPI` instance being started.
|
||||
"""
|
||||
settings: Settings = app.state.settings
|
||||
_configure_logging(settings.log_level)
|
||||
|
||||
log.info("bangui_starting_up", database_path=settings.database_path)
|
||||
|
||||
# --- Ensure required jail config files are present ---
|
||||
ensure_jail_configs(Path(settings.fail2ban_config_dir) / "jail.d")
|
||||
|
||||
# --- Application database ---
|
||||
db_path: Path = Path(settings.database_path)
|
||||
db_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
log.debug("database_directory_ensured", directory=str(db_path.parent))
|
||||
db: aiosqlite.Connection = await aiosqlite.connect(settings.database_path)
|
||||
db.row_factory = aiosqlite.Row
|
||||
await init_db(db)
|
||||
app.state.db = db
|
||||
|
||||
# --- Shared HTTP client session ---
|
||||
http_session: aiohttp.ClientSession = aiohttp.ClientSession()
|
||||
app.state.http_session = http_session
|
||||
|
||||
# --- Pre-warm geo cache from the persistent store ---
|
||||
from app.services import geo_service # noqa: PLC0415
|
||||
|
||||
geo_service.init_geoip(settings.geoip_db_path)
|
||||
await geo_service.load_cache_from_db(db)
|
||||
|
||||
# Log unresolved geo entries so the operator can see the scope of the issue.
|
||||
async with db.execute(
|
||||
"SELECT COUNT(*) FROM geo_cache WHERE country_code IS NULL"
|
||||
) as cur:
|
||||
row = await cur.fetchone()
|
||||
unresolved_count: int = int(row[0]) if row else 0
|
||||
if unresolved_count > 0:
|
||||
log.warning("geo_cache_unresolved_ips", unresolved=unresolved_count)
|
||||
|
||||
# --- Background task scheduler ---
|
||||
scheduler: AsyncIOScheduler = AsyncIOScheduler(timezone="UTC")
|
||||
scheduler.start()
|
||||
app.state.scheduler = scheduler
|
||||
|
||||
# --- Health-check background probe ---
|
||||
health_check.register(app)
|
||||
|
||||
# --- Blocklist import scheduled task ---
|
||||
blocklist_import.register(app)
|
||||
|
||||
# --- Periodic geo cache flush to SQLite ---
|
||||
geo_cache_flush.register(app)
|
||||
|
||||
# --- Periodic re-resolve of NULL-country geo entries ---
|
||||
geo_re_resolve.register(app)
|
||||
|
||||
log.info("bangui_started")
|
||||
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
log.info("bangui_shutting_down")
|
||||
scheduler.shutdown(wait=False)
|
||||
await http_session.close()
|
||||
await db.close()
|
||||
log.info("bangui_shut_down")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Exception handlers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
async def _unhandled_exception_handler(
|
||||
request: Request,
|
||||
exc: Exception,
|
||||
) -> JSONResponse:
|
||||
"""Return a sanitised 500 JSON response for any unhandled exception.
|
||||
|
||||
The exception is logged with full context before the response is sent.
|
||||
No stack trace is leaked to the client.
|
||||
|
||||
Args:
|
||||
request: The incoming FastAPI request.
|
||||
exc: The unhandled exception.
|
||||
|
||||
Returns:
|
||||
A :class:`fastapi.responses.JSONResponse` with status 500.
|
||||
"""
|
||||
log.error(
|
||||
"unhandled_exception",
|
||||
path=request.url.path,
|
||||
method=request.method,
|
||||
exc_info=exc,
|
||||
)
|
||||
return JSONResponse(
|
||||
status_code=500,
|
||||
content={"detail": "An unexpected error occurred. Please try again later."},
|
||||
)
|
||||
|
||||
|
||||
async def _fail2ban_connection_handler(
|
||||
request: Request,
|
||||
exc: Fail2BanConnectionError,
|
||||
) -> JSONResponse:
|
||||
"""Return a ``502 Bad Gateway`` response when fail2ban is unreachable.
|
||||
|
||||
Args:
|
||||
request: The incoming FastAPI request.
|
||||
exc: The :class:`~app.utils.fail2ban_client.Fail2BanConnectionError`.
|
||||
|
||||
Returns:
|
||||
A :class:`fastapi.responses.JSONResponse` with status 502.
|
||||
"""
|
||||
log.warning(
|
||||
"fail2ban_connection_error",
|
||||
path=request.url.path,
|
||||
method=request.method,
|
||||
error=str(exc),
|
||||
)
|
||||
return JSONResponse(
|
||||
status_code=502,
|
||||
content={"detail": f"Cannot reach fail2ban: {exc}"},
|
||||
)
|
||||
|
||||
|
||||
async def _fail2ban_protocol_handler(
|
||||
request: Request,
|
||||
exc: Fail2BanProtocolError,
|
||||
) -> JSONResponse:
|
||||
"""Return a ``502 Bad Gateway`` response for fail2ban protocol errors.
|
||||
|
||||
Args:
|
||||
request: The incoming FastAPI request.
|
||||
exc: The :class:`~app.utils.fail2ban_client.Fail2BanProtocolError`.
|
||||
|
||||
Returns:
|
||||
A :class:`fastapi.responses.JSONResponse` with status 502.
|
||||
"""
|
||||
log.warning(
|
||||
"fail2ban_protocol_error",
|
||||
path=request.url.path,
|
||||
method=request.method,
|
||||
error=str(exc),
|
||||
)
|
||||
return JSONResponse(
|
||||
status_code=502,
|
||||
content={"detail": f"fail2ban protocol error: {exc}"},
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Setup-redirect middleware
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
# Paths that are always reachable, even before setup is complete.
|
||||
_ALWAYS_ALLOWED: frozenset[str] = frozenset(
|
||||
{"/api/setup", "/api/health"},
|
||||
)
|
||||
|
||||
|
||||
class SetupRedirectMiddleware(BaseHTTPMiddleware):
|
||||
"""Redirect all API requests to ``/api/setup`` until setup is done.
|
||||
|
||||
Once setup is complete this middleware is a no-op. Paths listed in
|
||||
:data:`_ALWAYS_ALLOWED` are exempt so the setup endpoint itself is
|
||||
always reachable.
|
||||
"""
|
||||
|
||||
async def dispatch(
|
||||
self,
|
||||
request: Request,
|
||||
call_next: Callable[[Request], Awaitable[StarletteResponse]],
|
||||
) -> StarletteResponse:
|
||||
"""Intercept requests before they reach the router.
|
||||
|
||||
Args:
|
||||
request: The incoming HTTP request.
|
||||
call_next: The next middleware / router handler.
|
||||
|
||||
Returns:
|
||||
Either a ``307 Temporary Redirect`` to ``/api/setup`` or the
|
||||
normal router response.
|
||||
"""
|
||||
path: str = request.url.path.rstrip("/") or "/"
|
||||
|
||||
# Allow requests that don't need setup guard.
|
||||
if any(path.startswith(allowed) for allowed in _ALWAYS_ALLOWED):
|
||||
return await call_next(request)
|
||||
|
||||
# If setup is not complete, block all other API requests.
|
||||
# Fast path: setup completion is a one-way transition. Once it is
|
||||
# True it is cached on app.state so all subsequent requests skip the
|
||||
# DB query entirely. The flag is reset only when the app restarts.
|
||||
if path.startswith("/api") and not getattr(
|
||||
request.app.state, "_setup_complete_cached", False
|
||||
):
|
||||
from app.services import setup_service # noqa: PLC0415
|
||||
|
||||
db: aiosqlite.Connection | None = getattr(request.app.state, "db", None)
|
||||
if db is None or not await setup_service.is_setup_complete(db):
|
||||
return RedirectResponse(
|
||||
url="/api/setup",
|
||||
status_code=status.HTTP_307_TEMPORARY_REDIRECT,
|
||||
)
|
||||
request.app.state._setup_complete_cached = True
|
||||
|
||||
return await call_next(request)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Application factory
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def create_app(settings: Settings | None = None) -> FastAPI:
|
||||
"""Create and configure the BanGUI FastAPI application.
|
||||
|
||||
This factory is the single entry point for creating the application.
|
||||
Tests can pass a custom ``settings`` object to override defaults
|
||||
without touching environment variables.
|
||||
|
||||
Args:
|
||||
settings: Optional pre-built :class:`~app.config.Settings` instance.
|
||||
If ``None``, settings are loaded from the environment via
|
||||
:func:`~app.config.get_settings`.
|
||||
|
||||
Returns:
|
||||
A fully configured :class:`fastapi.FastAPI` application ready for use.
|
||||
"""
|
||||
resolved_settings: Settings = settings if settings is not None else get_settings()
|
||||
|
||||
app: FastAPI = FastAPI(
|
||||
title="BanGUI",
|
||||
description="Web interface for monitoring, managing, and configuring fail2ban.",
|
||||
version="0.1.0",
|
||||
lifespan=_lifespan,
|
||||
)
|
||||
|
||||
# Store settings on app.state so the lifespan handler can access them.
|
||||
app.state.settings = resolved_settings
|
||||
|
||||
# --- CORS ---
|
||||
# In production the frontend is served by the same origin.
|
||||
# CORS is intentionally permissive only in development.
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=["http://localhost:5173"], # Vite dev server
|
||||
allow_credentials=True,
|
||||
allow_methods=["*"],
|
||||
allow_headers=["*"],
|
||||
)
|
||||
|
||||
# --- Middleware ---
|
||||
# Note: middleware is applied in reverse order of registration.
|
||||
# The setup-redirect must run *after* CORS, so it is added last.
|
||||
app.add_middleware(SetupRedirectMiddleware)
|
||||
|
||||
# --- Exception handlers ---
|
||||
# Ordered from most specific to least specific. FastAPI evaluates handlers
|
||||
# in the order they were registered, so fail2ban network errors get a 502
|
||||
# rather than falling through to the generic 500 handler.
|
||||
app.add_exception_handler(Fail2BanConnectionError, _fail2ban_connection_handler) # type: ignore[arg-type]
|
||||
app.add_exception_handler(Fail2BanProtocolError, _fail2ban_protocol_handler) # type: ignore[arg-type]
|
||||
app.add_exception_handler(Exception, _unhandled_exception_handler)
|
||||
|
||||
# --- Routers ---
|
||||
app.include_router(health.router)
|
||||
app.include_router(setup.router)
|
||||
app.include_router(auth.router)
|
||||
app.include_router(dashboard.router)
|
||||
app.include_router(jails.router)
|
||||
app.include_router(bans.router)
|
||||
app.include_router(geo.router)
|
||||
app.include_router(config.router)
|
||||
app.include_router(file_config.router)
|
||||
app.include_router(server.router)
|
||||
app.include_router(history.router)
|
||||
app.include_router(blocklist.router)
|
||||
|
||||
return app
|
||||
1
backend/app/models/__init__.py
Normal file
1
backend/app/models/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Pydantic request/response/domain models package."""
|
||||
46
backend/app/models/auth.py
Normal file
46
backend/app/models/auth.py
Normal file
@@ -0,0 +1,46 @@
|
||||
"""Authentication Pydantic models.
|
||||
|
||||
Request, response, and domain models used by the auth router and service.
|
||||
"""
|
||||
|
||||
from pydantic import BaseModel, ConfigDict, Field
|
||||
|
||||
|
||||
class LoginRequest(BaseModel):
|
||||
"""Payload for ``POST /api/auth/login``."""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
password: str = Field(..., description="Master password to authenticate with.")
|
||||
|
||||
|
||||
class LoginResponse(BaseModel):
|
||||
"""Successful login response.
|
||||
|
||||
The session token is also set as an ``HttpOnly`` cookie by the router.
|
||||
This model documents the JSON body for API-first consumers.
|
||||
"""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
token: str = Field(..., description="Session token for use in subsequent requests.")
|
||||
expires_at: str = Field(..., description="ISO 8601 UTC expiry timestamp.")
|
||||
|
||||
|
||||
class LogoutResponse(BaseModel):
|
||||
"""Response body for ``POST /api/auth/logout``."""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
message: str = Field(default="Logged out successfully.")
|
||||
|
||||
|
||||
class Session(BaseModel):
|
||||
"""Internal domain model representing a persisted session record."""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
id: int = Field(..., description="Auto-incremented row ID.")
|
||||
token: str = Field(..., description="Opaque session token.")
|
||||
created_at: str = Field(..., description="ISO 8601 UTC creation timestamp.")
|
||||
expires_at: str = Field(..., description="ISO 8601 UTC expiry timestamp.")
|
||||
335
backend/app/models/ban.py
Normal file
335
backend/app/models/ban.py
Normal file
@@ -0,0 +1,335 @@
|
||||
"""Ban management Pydantic models.
|
||||
|
||||
Request, response, and domain models used by the ban router and service.
|
||||
"""
|
||||
|
||||
import math
|
||||
from typing import Literal
|
||||
|
||||
from pydantic import BaseModel, ConfigDict, Field
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Time-range selector
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
#: The four supported time-range presets for the dashboard views.
|
||||
TimeRange = Literal["24h", "7d", "30d", "365d"]
|
||||
|
||||
#: Number of seconds represented by each preset.
|
||||
TIME_RANGE_SECONDS: dict[str, int] = {
|
||||
"24h": 24 * 3600,
|
||||
"7d": 7 * 24 * 3600,
|
||||
"30d": 30 * 24 * 3600,
|
||||
"365d": 365 * 24 * 3600,
|
||||
}
|
||||
|
||||
|
||||
class BanRequest(BaseModel):
|
||||
"""Payload for ``POST /api/bans`` (ban an IP)."""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
ip: str = Field(..., description="IP address to ban.")
|
||||
jail: str = Field(..., description="Jail in which to apply the ban.")
|
||||
|
||||
|
||||
class UnbanRequest(BaseModel):
|
||||
"""Payload for ``DELETE /api/bans`` (unban an IP)."""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
ip: str = Field(..., description="IP address to unban.")
|
||||
jail: str | None = Field(
|
||||
default=None,
|
||||
description="Jail to remove the ban from. ``null`` means all jails.",
|
||||
)
|
||||
unban_all: bool = Field(
|
||||
default=False,
|
||||
description="When ``true`` the IP is unbanned from every jail.",
|
||||
)
|
||||
|
||||
|
||||
#: Discriminator literal for the origin of a ban.
|
||||
BanOrigin = Literal["blocklist", "selfblock"]
|
||||
|
||||
#: Jail name used by the blocklist import service.
|
||||
BLOCKLIST_JAIL: str = "blocklist-import"
|
||||
|
||||
|
||||
def _derive_origin(jail: str) -> BanOrigin:
|
||||
"""Derive the ban origin from the jail name.
|
||||
|
||||
Args:
|
||||
jail: The jail that issued the ban.
|
||||
|
||||
Returns:
|
||||
``"blocklist"`` when the jail is the dedicated blocklist-import
|
||||
jail, ``"selfblock"`` otherwise.
|
||||
"""
|
||||
return "blocklist" if jail == BLOCKLIST_JAIL else "selfblock"
|
||||
|
||||
|
||||
class Ban(BaseModel):
|
||||
"""Domain model representing a single active or historical ban record."""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
ip: str = Field(..., description="Banned IP address.")
|
||||
jail: str = Field(..., description="Jail that issued the ban.")
|
||||
banned_at: str = Field(..., description="ISO 8601 UTC timestamp of the ban.")
|
||||
expires_at: str | None = Field(
|
||||
default=None,
|
||||
description="ISO 8601 UTC expiry timestamp, or ``null`` if permanent.",
|
||||
)
|
||||
ban_count: int = Field(..., ge=1, description="Number of times this IP was banned.")
|
||||
country: str | None = Field(
|
||||
default=None,
|
||||
description="ISO 3166-1 alpha-2 country code resolved from the IP.",
|
||||
)
|
||||
origin: BanOrigin = Field(
|
||||
...,
|
||||
description="Whether this ban came from a blocklist import or fail2ban itself.",
|
||||
)
|
||||
|
||||
|
||||
class BanResponse(BaseModel):
|
||||
"""Response containing a single ban record."""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
ban: Ban
|
||||
|
||||
|
||||
class BanListResponse(BaseModel):
|
||||
"""Paginated list of ban records."""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
bans: list[Ban] = Field(default_factory=list)
|
||||
total: int = Field(..., ge=0, description="Total number of matching records.")
|
||||
|
||||
|
||||
class ActiveBan(BaseModel):
|
||||
"""A currently active ban entry returned by ``GET /api/bans/active``."""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
ip: str = Field(..., description="Banned IP address.")
|
||||
jail: str = Field(..., description="Jail holding the ban.")
|
||||
banned_at: str | None = Field(default=None, description="ISO 8601 UTC start of the ban.")
|
||||
expires_at: str | None = Field(
|
||||
default=None,
|
||||
description="ISO 8601 UTC expiry, or ``null`` if permanent.",
|
||||
)
|
||||
ban_count: int = Field(default=1, ge=1, description="Running ban count for this IP.")
|
||||
country: str | None = Field(default=None, description="ISO 3166-1 alpha-2 country code.")
|
||||
|
||||
|
||||
class ActiveBanListResponse(BaseModel):
|
||||
"""List of all currently active bans across all jails."""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
bans: list[ActiveBan] = Field(default_factory=list)
|
||||
total: int = Field(..., ge=0)
|
||||
|
||||
|
||||
class UnbanAllResponse(BaseModel):
|
||||
"""Response for ``DELETE /api/bans/all``."""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
message: str = Field(..., description="Human-readable summary of the operation.")
|
||||
count: int = Field(..., ge=0, description="Number of IPs that were unbanned.")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Dashboard ban-list view models
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class DashboardBanItem(BaseModel):
|
||||
"""A single row in the dashboard ban-list table.
|
||||
|
||||
Populated from the fail2ban database and enriched with geo data.
|
||||
"""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
ip: str = Field(..., description="Banned IP address.")
|
||||
jail: str = Field(..., description="Jail that issued the ban.")
|
||||
banned_at: str = Field(..., description="ISO 8601 UTC timestamp of the ban.")
|
||||
service: str | None = Field(
|
||||
default=None,
|
||||
description="First matched log line — used as context for the ban.",
|
||||
)
|
||||
country_code: str | None = Field(
|
||||
default=None,
|
||||
description="ISO 3166-1 alpha-2 country code, or ``null`` if unknown.",
|
||||
)
|
||||
country_name: str | None = Field(
|
||||
default=None,
|
||||
description="Human-readable country name, or ``null`` if unknown.",
|
||||
)
|
||||
asn: str | None = Field(
|
||||
default=None,
|
||||
description="Autonomous System Number string (e.g. ``'AS3320'``).",
|
||||
)
|
||||
org: str | None = Field(
|
||||
default=None,
|
||||
description="Organisation name associated with the IP.",
|
||||
)
|
||||
ban_count: int = Field(..., ge=1, description="How many times this IP was banned.")
|
||||
origin: BanOrigin = Field(
|
||||
...,
|
||||
description="Whether this ban came from a blocklist import or fail2ban itself.",
|
||||
)
|
||||
|
||||
|
||||
class DashboardBanListResponse(BaseModel):
|
||||
"""Paginated dashboard ban-list response."""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
items: list[DashboardBanItem] = Field(default_factory=list)
|
||||
total: int = Field(..., ge=0, description="Total bans in the selected time window.")
|
||||
page: int = Field(..., ge=1)
|
||||
page_size: int = Field(..., ge=1)
|
||||
|
||||
|
||||
class BansByCountryResponse(BaseModel):
|
||||
"""Response for the bans-by-country aggregation endpoint.
|
||||
|
||||
Contains a per-country ban count, a human-readable country name map, and
|
||||
the full (un-paginated) ban list for the selected time window so the
|
||||
frontend can render both the world map and its companion table from a
|
||||
single request.
|
||||
"""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
countries: dict[str, int] = Field(
|
||||
default_factory=dict,
|
||||
description="ISO 3166-1 alpha-2 country code → ban count.",
|
||||
)
|
||||
country_names: dict[str, str] = Field(
|
||||
default_factory=dict,
|
||||
description="ISO 3166-1 alpha-2 country code → human-readable country name.",
|
||||
)
|
||||
bans: list[DashboardBanItem] = Field(
|
||||
default_factory=list,
|
||||
description="All bans in the selected time window (up to the server limit).",
|
||||
)
|
||||
total: int = Field(..., ge=0, description="Total ban count in the window.")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Trend endpoint models
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
#: Bucket size in seconds for each time-range preset.
|
||||
BUCKET_SECONDS: dict[str, int] = {
|
||||
"24h": 3_600, # 1 hour → 24 buckets
|
||||
"7d": 6 * 3_600, # 6 hours → 28 buckets
|
||||
"30d": 86_400, # 1 day → 30 buckets
|
||||
"365d": 7 * 86_400, # 7 days → ~53 buckets
|
||||
}
|
||||
|
||||
#: Human-readable bucket size label for each time-range preset.
|
||||
BUCKET_SIZE_LABEL: dict[str, str] = {
|
||||
"24h": "1h",
|
||||
"7d": "6h",
|
||||
"30d": "1d",
|
||||
"365d": "7d",
|
||||
}
|
||||
|
||||
|
||||
def bucket_count(range_: TimeRange) -> int:
|
||||
"""Return the number of buckets needed to cover *range_* completely.
|
||||
|
||||
Args:
|
||||
range_: One of the supported time-range presets.
|
||||
|
||||
Returns:
|
||||
Ceiling division of the range duration by the bucket size so that
|
||||
the last bucket is included even when the window is not an exact
|
||||
multiple of the bucket size.
|
||||
"""
|
||||
return math.ceil(TIME_RANGE_SECONDS[range_] / BUCKET_SECONDS[range_])
|
||||
|
||||
|
||||
class BanTrendBucket(BaseModel):
|
||||
"""A single time bucket in the ban trend series."""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
timestamp: str = Field(..., description="ISO 8601 UTC start of the bucket.")
|
||||
count: int = Field(..., ge=0, description="Number of bans that started in this bucket.")
|
||||
|
||||
|
||||
class BanTrendResponse(BaseModel):
|
||||
"""Response for the ``GET /api/dashboard/bans/trend`` endpoint."""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
buckets: list[BanTrendBucket] = Field(
|
||||
default_factory=list,
|
||||
description="Time-ordered list of ban-count buckets covering the full window.",
|
||||
)
|
||||
bucket_size: str = Field(
|
||||
...,
|
||||
description="Human-readable bucket size label (e.g. '1h', '6h', '1d', '7d').",
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# By-jail endpoint models
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class JailBanCount(BaseModel):
|
||||
"""A single jail entry in the bans-by-jail aggregation."""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
jail: str = Field(..., description="Jail name.")
|
||||
count: int = Field(..., ge=0, description="Number of bans recorded in this jail.")
|
||||
|
||||
|
||||
class BansByJailResponse(BaseModel):
|
||||
"""Response for the ``GET /api/dashboard/bans/by-jail`` endpoint."""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
jails: list[JailBanCount] = Field(
|
||||
default_factory=list,
|
||||
description="Jails ordered by ban count descending.",
|
||||
)
|
||||
total: int = Field(..., ge=0, description="Total ban count in the selected window.")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Jail-specific paginated bans
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class JailBannedIpsResponse(BaseModel):
|
||||
"""Paginated response for ``GET /api/jails/{name}/banned``.
|
||||
|
||||
Contains only the current page of active ban entries for a single jail,
|
||||
geo-enriched exclusively for the page slice to avoid rate-limit issues.
|
||||
"""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
items: list[ActiveBan] = Field(
|
||||
default_factory=list,
|
||||
description="Active ban entries for the current page.",
|
||||
)
|
||||
total: int = Field(
|
||||
...,
|
||||
ge=0,
|
||||
description="Total matching entries (after applying the search filter).",
|
||||
)
|
||||
page: int = Field(..., ge=1, description="Current page number (1-based).")
|
||||
page_size: int = Field(..., ge=1, description="Number of items per page.")
|
||||
181
backend/app/models/blocklist.py
Normal file
181
backend/app/models/blocklist.py
Normal file
@@ -0,0 +1,181 @@
|
||||
"""Blocklist source and import log Pydantic models.
|
||||
|
||||
Data shapes for blocklist source management, import operations, scheduling,
|
||||
and import log retrieval.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from enum import StrEnum
|
||||
|
||||
from pydantic import BaseModel, ConfigDict, Field
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Blocklist source
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class BlocklistSource(BaseModel):
|
||||
"""Domain model for a blocklist source definition."""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
id: int
|
||||
name: str
|
||||
url: str
|
||||
enabled: bool
|
||||
created_at: str
|
||||
updated_at: str
|
||||
|
||||
|
||||
class BlocklistSourceCreate(BaseModel):
|
||||
"""Payload for ``POST /api/blocklists``."""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
name: str = Field(..., min_length=1, max_length=100, description="Human-readable source name.")
|
||||
url: str = Field(..., min_length=1, description="URL of the blocklist file.")
|
||||
enabled: bool = Field(default=True)
|
||||
|
||||
|
||||
class BlocklistSourceUpdate(BaseModel):
|
||||
"""Payload for ``PUT /api/blocklists/{id}``. All fields are optional."""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
name: str | None = Field(default=None, min_length=1, max_length=100)
|
||||
url: str | None = Field(default=None)
|
||||
enabled: bool | None = Field(default=None)
|
||||
|
||||
|
||||
class BlocklistListResponse(BaseModel):
|
||||
"""Response for ``GET /api/blocklists``."""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
sources: list[BlocklistSource] = Field(default_factory=list)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Import log
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class ImportLogEntry(BaseModel):
|
||||
"""A single blocklist import run record."""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
id: int
|
||||
source_id: int | None
|
||||
source_url: str
|
||||
timestamp: str
|
||||
ips_imported: int
|
||||
ips_skipped: int
|
||||
errors: str | None
|
||||
|
||||
|
||||
class ImportLogListResponse(BaseModel):
|
||||
"""Response for ``GET /api/blocklists/log``."""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
items: list[ImportLogEntry] = Field(default_factory=list)
|
||||
total: int = Field(..., ge=0)
|
||||
page: int = Field(default=1, ge=1)
|
||||
page_size: int = Field(default=50, ge=1)
|
||||
total_pages: int = Field(default=1, ge=1)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Schedule
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class ScheduleFrequency(StrEnum):
|
||||
"""Available import schedule frequency presets."""
|
||||
|
||||
hourly = "hourly"
|
||||
daily = "daily"
|
||||
weekly = "weekly"
|
||||
|
||||
|
||||
class ScheduleConfig(BaseModel):
|
||||
"""Import schedule configuration.
|
||||
|
||||
The interpretation of fields depends on *frequency*:
|
||||
|
||||
- ``hourly``: ``interval_hours`` controls how often (every N hours).
|
||||
- ``daily``: ``hour`` and ``minute`` specify the daily run time (UTC).
|
||||
- ``weekly``: additionally uses ``day_of_week`` (0=Monday … 6=Sunday).
|
||||
"""
|
||||
|
||||
# No strict=True here: FastAPI and json.loads() both supply enum values as
|
||||
# plain strings; strict mode would reject string→enum coercion.
|
||||
|
||||
frequency: ScheduleFrequency = ScheduleFrequency.daily
|
||||
interval_hours: int = Field(default=24, ge=1, le=168, description="Used when frequency=hourly")
|
||||
hour: int = Field(default=3, ge=0, le=23, description="UTC hour for daily/weekly runs")
|
||||
minute: int = Field(default=0, ge=0, le=59, description="Minute for daily/weekly runs")
|
||||
day_of_week: int = Field(
|
||||
default=0,
|
||||
ge=0,
|
||||
le=6,
|
||||
description="Day of week for weekly runs (0=Monday … 6=Sunday)",
|
||||
)
|
||||
|
||||
|
||||
class ScheduleInfo(BaseModel):
|
||||
"""Current schedule configuration together with runtime metadata."""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
config: ScheduleConfig
|
||||
next_run_at: str | None
|
||||
last_run_at: str | None
|
||||
last_run_errors: bool | None = None
|
||||
"""``True`` if the most recent import had errors, ``False`` if clean, ``None`` if never run."""
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Import results
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class ImportSourceResult(BaseModel):
|
||||
"""Result of importing a single blocklist source."""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
source_id: int | None
|
||||
source_url: str
|
||||
ips_imported: int
|
||||
ips_skipped: int
|
||||
error: str | None
|
||||
|
||||
|
||||
class ImportRunResult(BaseModel):
|
||||
"""Aggregated result from a full import run across all enabled sources."""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
results: list[ImportSourceResult] = Field(default_factory=list)
|
||||
total_imported: int
|
||||
total_skipped: int
|
||||
errors_count: int
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Preview
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class PreviewResponse(BaseModel):
|
||||
"""Response for ``GET /api/blocklists/{id}/preview``."""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
entries: list[str] = Field(default_factory=list, description="Sample of valid IP entries")
|
||||
total_lines: int
|
||||
valid_count: int
|
||||
skipped_count: int
|
||||
1009
backend/app/models/config.py
Normal file
1009
backend/app/models/config.py
Normal file
File diff suppressed because it is too large
Load Diff
109
backend/app/models/file_config.py
Normal file
109
backend/app/models/file_config.py
Normal file
@@ -0,0 +1,109 @@
|
||||
"""Pydantic models for file-based fail2ban configuration management.
|
||||
|
||||
Covers jail config files (``jail.d/``), filter definitions (``filter.d/``),
|
||||
and action definitions (``action.d/``).
|
||||
"""
|
||||
|
||||
from pydantic import BaseModel, ConfigDict, Field
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Jail config file models (Task 4a)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class JailConfigFile(BaseModel):
|
||||
"""Metadata for a single jail configuration file in ``jail.d/``."""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
name: str = Field(..., description="Jail name (file stem, e.g. ``sshd``).")
|
||||
filename: str = Field(..., description="Actual filename (e.g. ``sshd.conf``).")
|
||||
enabled: bool = Field(
|
||||
...,
|
||||
description=(
|
||||
"Whether the jail is enabled. Derived from the ``enabled`` key "
|
||||
"inside the file; defaults to ``true`` when the key is absent."
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
class JailConfigFilesResponse(BaseModel):
|
||||
"""Response for ``GET /api/config/jail-files``."""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
files: list[JailConfigFile] = Field(default_factory=list)
|
||||
total: int = Field(..., ge=0)
|
||||
|
||||
|
||||
class JailConfigFileContent(BaseModel):
|
||||
"""Single jail config file with its raw content."""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
name: str = Field(..., description="Jail name (file stem).")
|
||||
filename: str = Field(..., description="Actual filename.")
|
||||
enabled: bool = Field(..., description="Whether the jail is enabled.")
|
||||
content: str = Field(..., description="Raw file content.")
|
||||
|
||||
|
||||
class JailConfigFileEnabledUpdate(BaseModel):
|
||||
"""Payload for ``PUT /api/config/jail-files/{filename}/enabled``."""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
enabled: bool = Field(..., description="New enabled state for this jail.")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Generic conf-file entry (shared by filter.d and action.d)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class ConfFileEntry(BaseModel):
|
||||
"""Metadata for a single ``.conf`` or ``.local`` file."""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
name: str = Field(..., description="Base name without extension (e.g. ``sshd``).")
|
||||
filename: str = Field(..., description="Actual filename (e.g. ``sshd.conf``).")
|
||||
|
||||
|
||||
class ConfFilesResponse(BaseModel):
|
||||
"""Response for list endpoints (``GET /api/config/filters`` and ``GET /api/config/actions``)."""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
files: list[ConfFileEntry] = Field(default_factory=list)
|
||||
total: int = Field(..., ge=0)
|
||||
|
||||
|
||||
class ConfFileContent(BaseModel):
|
||||
"""A conf file with its raw text content."""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
name: str = Field(..., description="Base name without extension.")
|
||||
filename: str = Field(..., description="Actual filename.")
|
||||
content: str = Field(..., description="Raw file content.")
|
||||
|
||||
|
||||
class ConfFileUpdateRequest(BaseModel):
|
||||
"""Payload for ``PUT /api/config/filters/{name}`` and ``PUT /api/config/actions/{name}``."""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
content: str = Field(..., description="New raw file content (must not exceed 512 KB).")
|
||||
|
||||
|
||||
class ConfFileCreateRequest(BaseModel):
|
||||
"""Payload for ``POST /api/config/filters`` and ``POST /api/config/actions``."""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
name: str = Field(
|
||||
...,
|
||||
description="New file base name (without extension). Must contain only "
|
||||
"alphanumeric characters, hyphens, underscores, and dots.",
|
||||
)
|
||||
content: str = Field(..., description="Initial raw file content (must not exceed 512 KB).")
|
||||
66
backend/app/models/geo.py
Normal file
66
backend/app/models/geo.py
Normal file
@@ -0,0 +1,66 @@
|
||||
"""Geo and IP lookup Pydantic models.
|
||||
|
||||
Response models for the ``GET /api/geo/lookup/{ip}`` endpoint.
|
||||
"""
|
||||
|
||||
from pydantic import BaseModel, ConfigDict, Field
|
||||
|
||||
|
||||
class GeoDetail(BaseModel):
|
||||
"""Enriched geolocation data for an IP address.
|
||||
|
||||
Populated from the ip-api.com free API.
|
||||
"""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
country_code: str | None = Field(
|
||||
default=None,
|
||||
description="ISO 3166-1 alpha-2 country code.",
|
||||
)
|
||||
country_name: str | None = Field(
|
||||
default=None,
|
||||
description="Human-readable country name.",
|
||||
)
|
||||
asn: str | None = Field(
|
||||
default=None,
|
||||
description="Autonomous System Number (e.g. ``'AS3320'``).",
|
||||
)
|
||||
org: str | None = Field(
|
||||
default=None,
|
||||
description="Organisation associated with the ASN.",
|
||||
)
|
||||
|
||||
|
||||
class GeoCacheStatsResponse(BaseModel):
|
||||
"""Response for ``GET /api/geo/stats``.
|
||||
|
||||
Exposes diagnostic counters of the geo cache subsystem so operators
|
||||
can assess resolution health from the UI or CLI.
|
||||
"""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
cache_size: int = Field(..., description="Number of positive entries in the in-memory cache.")
|
||||
unresolved: int = Field(..., description="Number of geo_cache rows with country_code IS NULL.")
|
||||
neg_cache_size: int = Field(..., description="Number of entries in the in-memory negative cache.")
|
||||
dirty_size: int = Field(..., description="Number of newly resolved entries not yet flushed to disk.")
|
||||
|
||||
|
||||
class IpLookupResponse(BaseModel):
|
||||
"""Response for ``GET /api/geo/lookup/{ip}``.
|
||||
|
||||
Aggregates current ban status and geographical information for an IP.
|
||||
"""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
ip: str = Field(..., description="The queried IP address.")
|
||||
currently_banned_in: list[str] = Field(
|
||||
default_factory=list,
|
||||
description="Names of jails where this IP is currently banned.",
|
||||
)
|
||||
geo: GeoDetail | None = Field(
|
||||
default=None,
|
||||
description="Enriched geographical and network information.",
|
||||
)
|
||||
142
backend/app/models/history.py
Normal file
142
backend/app/models/history.py
Normal file
@@ -0,0 +1,142 @@
|
||||
"""Ban history Pydantic models.
|
||||
|
||||
Request, response, and domain models used by the history router and service.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from pydantic import BaseModel, ConfigDict, Field
|
||||
|
||||
from app.models.ban import TimeRange
|
||||
|
||||
__all__ = [
|
||||
"HistoryBanItem",
|
||||
"HistoryListResponse",
|
||||
"IpDetailResponse",
|
||||
"IpTimelineEvent",
|
||||
"TimeRange",
|
||||
]
|
||||
|
||||
|
||||
class HistoryBanItem(BaseModel):
|
||||
"""A single row in the history ban-list table.
|
||||
|
||||
Populated from the fail2ban database and optionally enriched with
|
||||
geolocation data.
|
||||
"""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
ip: str = Field(..., description="Banned IP address.")
|
||||
jail: str = Field(..., description="Jail that issued the ban.")
|
||||
banned_at: str = Field(..., description="ISO 8601 UTC timestamp of the ban.")
|
||||
ban_count: int = Field(..., ge=1, description="How many times this IP was banned.")
|
||||
failures: int = Field(
|
||||
default=0,
|
||||
ge=0,
|
||||
description="Total failure count extracted from the ``data`` column.",
|
||||
)
|
||||
matches: list[str] = Field(
|
||||
default_factory=list,
|
||||
description="Matched log lines stored in the ``data`` column.",
|
||||
)
|
||||
country_code: str | None = Field(
|
||||
default=None,
|
||||
description="ISO 3166-1 alpha-2 country code, or ``null`` if unknown.",
|
||||
)
|
||||
country_name: str | None = Field(
|
||||
default=None,
|
||||
description="Human-readable country name, or ``null`` if unknown.",
|
||||
)
|
||||
asn: str | None = Field(
|
||||
default=None,
|
||||
description="Autonomous System Number string (e.g. ``'AS3320'``).",
|
||||
)
|
||||
org: str | None = Field(
|
||||
default=None,
|
||||
description="Organisation name associated with the IP.",
|
||||
)
|
||||
|
||||
|
||||
class HistoryListResponse(BaseModel):
|
||||
"""Paginated history ban-list response."""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
items: list[HistoryBanItem] = Field(default_factory=list)
|
||||
total: int = Field(..., ge=0, description="Total matching records.")
|
||||
page: int = Field(..., ge=1)
|
||||
page_size: int = Field(..., ge=1)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Per-IP timeline
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class IpTimelineEvent(BaseModel):
|
||||
"""A single ban event in a per-IP timeline.
|
||||
|
||||
Represents one row from the fail2ban ``bans`` table for a specific IP.
|
||||
"""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
jail: str = Field(..., description="Jail that triggered this ban.")
|
||||
banned_at: str = Field(..., description="ISO 8601 UTC timestamp of the ban.")
|
||||
ban_count: int = Field(
|
||||
...,
|
||||
ge=1,
|
||||
description="Running ban counter for this IP at the time of this event.",
|
||||
)
|
||||
failures: int = Field(
|
||||
default=0,
|
||||
ge=0,
|
||||
description="Failure count at the time of the ban.",
|
||||
)
|
||||
matches: list[str] = Field(
|
||||
default_factory=list,
|
||||
description="Matched log lines that triggered the ban.",
|
||||
)
|
||||
|
||||
|
||||
class IpDetailResponse(BaseModel):
|
||||
"""Full historical record for a single IP address.
|
||||
|
||||
Contains aggregated totals and a chronological timeline of all ban events
|
||||
recorded in the fail2ban database for the given IP.
|
||||
"""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
ip: str = Field(..., description="The IP address.")
|
||||
total_bans: int = Field(..., ge=0, description="Total number of ban records.")
|
||||
total_failures: int = Field(
|
||||
...,
|
||||
ge=0,
|
||||
description="Sum of all failure counts across all ban events.",
|
||||
)
|
||||
last_ban_at: str | None = Field(
|
||||
default=None,
|
||||
description="ISO 8601 UTC timestamp of the most recent ban, or ``null``.",
|
||||
)
|
||||
country_code: str | None = Field(
|
||||
default=None,
|
||||
description="ISO 3166-1 alpha-2 country code, or ``null`` if unknown.",
|
||||
)
|
||||
country_name: str | None = Field(
|
||||
default=None,
|
||||
description="Human-readable country name, or ``null`` if unknown.",
|
||||
)
|
||||
asn: str | None = Field(
|
||||
default=None,
|
||||
description="Autonomous System Number string.",
|
||||
)
|
||||
org: str | None = Field(
|
||||
default=None,
|
||||
description="Organisation name associated with the IP.",
|
||||
)
|
||||
timeline: list[IpTimelineEvent] = Field(
|
||||
default_factory=list,
|
||||
description="All ban events for this IP, ordered newest-first.",
|
||||
)
|
||||
96
backend/app/models/jail.py
Normal file
96
backend/app/models/jail.py
Normal file
@@ -0,0 +1,96 @@
|
||||
"""Jail management Pydantic models.
|
||||
|
||||
Request, response, and domain models used by the jails router and service.
|
||||
"""
|
||||
|
||||
from pydantic import BaseModel, ConfigDict, Field
|
||||
|
||||
from app.models.config import BantimeEscalation
|
||||
|
||||
|
||||
class JailStatus(BaseModel):
|
||||
"""Runtime metrics for a single jail."""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
currently_banned: int = Field(..., ge=0)
|
||||
total_banned: int = Field(..., ge=0)
|
||||
currently_failed: int = Field(..., ge=0)
|
||||
total_failed: int = Field(..., ge=0)
|
||||
|
||||
|
||||
class Jail(BaseModel):
|
||||
"""Domain model for a single fail2ban jail with its full configuration."""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
name: str = Field(..., description="Jail name as configured in fail2ban.")
|
||||
enabled: bool = Field(..., description="Whether the jail is currently active.")
|
||||
running: bool = Field(..., description="Whether the jail backend is running.")
|
||||
idle: bool = Field(default=False, description="Whether the jail is in idle mode.")
|
||||
backend: str = Field(..., description="Log monitoring backend (e.g. polling, systemd).")
|
||||
log_paths: list[str] = Field(default_factory=list, description="Monitored log files.")
|
||||
fail_regex: list[str] = Field(default_factory=list, description="Failure detection regex patterns.")
|
||||
ignore_regex: list[str] = Field(default_factory=list, description="Regex patterns that bypass the ban logic.")
|
||||
ignore_ips: list[str] = Field(default_factory=list, description="IP addresses or CIDRs on the ignore list.")
|
||||
date_pattern: str | None = Field(default=None, description="Custom date pattern for log parsing.")
|
||||
log_encoding: str = Field(default="UTF-8", description="Log file encoding.")
|
||||
find_time: int = Field(..., description="Time window (seconds) for counting failures.")
|
||||
ban_time: int = Field(..., description="Duration (seconds) of a ban. -1 means permanent.")
|
||||
max_retry: int = Field(..., description="Number of failures before a ban is issued.")
|
||||
actions: list[str] = Field(default_factory=list, description="Names of actions attached to this jail.")
|
||||
bantime_escalation: BantimeEscalation | None = Field(
|
||||
default=None,
|
||||
description="Incremental ban-time escalation settings, or None if not configured.",
|
||||
)
|
||||
status: JailStatus | None = Field(default=None, description="Runtime counters.")
|
||||
|
||||
|
||||
class JailSummary(BaseModel):
|
||||
"""Lightweight jail entry for the overview list."""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
name: str
|
||||
enabled: bool
|
||||
running: bool
|
||||
idle: bool
|
||||
backend: str
|
||||
find_time: int
|
||||
ban_time: int
|
||||
max_retry: int
|
||||
status: JailStatus | None = None
|
||||
|
||||
|
||||
class JailListResponse(BaseModel):
|
||||
"""Response for ``GET /api/jails``."""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
jails: list[JailSummary] = Field(default_factory=list)
|
||||
total: int = Field(..., ge=0)
|
||||
|
||||
|
||||
class JailDetailResponse(BaseModel):
|
||||
"""Response for ``GET /api/jails/{name}``."""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
jail: Jail
|
||||
|
||||
|
||||
class JailCommandResponse(BaseModel):
|
||||
"""Generic response for jail control commands (start, stop, reload, idle)."""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
message: str
|
||||
jail: str
|
||||
|
||||
|
||||
class IgnoreIpRequest(BaseModel):
|
||||
"""Payload for adding an IP or network to a jail's ignore list."""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
ip: str = Field(..., description="IP address or CIDR network to ignore.")
|
||||
58
backend/app/models/server.py
Normal file
58
backend/app/models/server.py
Normal file
@@ -0,0 +1,58 @@
|
||||
"""Server status and health-check Pydantic models.
|
||||
|
||||
Used by the dashboard router, health service, and server settings router.
|
||||
"""
|
||||
|
||||
from pydantic import BaseModel, ConfigDict, Field
|
||||
|
||||
|
||||
class ServerStatus(BaseModel):
|
||||
"""Cached fail2ban server health snapshot."""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
online: bool = Field(..., description="Whether fail2ban is reachable via its socket.")
|
||||
version: str | None = Field(default=None, description="fail2ban version string.")
|
||||
active_jails: int = Field(default=0, ge=0, description="Number of currently active jails.")
|
||||
total_bans: int = Field(default=0, ge=0, description="Aggregated current ban count across all jails.")
|
||||
total_failures: int = Field(default=0, ge=0, description="Aggregated current failure count across all jails.")
|
||||
|
||||
|
||||
class ServerStatusResponse(BaseModel):
|
||||
"""Response for ``GET /api/dashboard/status``."""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
status: ServerStatus
|
||||
|
||||
|
||||
class ServerSettings(BaseModel):
|
||||
"""Domain model for fail2ban server-level settings."""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
log_level: str = Field(..., description="fail2ban daemon log level.")
|
||||
log_target: str = Field(..., description="Log destination: STDOUT, STDERR, SYSLOG, or a file path.")
|
||||
syslog_socket: str | None = Field(default=None)
|
||||
db_path: str = Field(..., description="Path to the fail2ban ban history database.")
|
||||
db_purge_age: int = Field(..., description="Seconds before old records are purged.")
|
||||
db_max_matches: int = Field(..., description="Maximum stored matches per ban record.")
|
||||
|
||||
|
||||
class ServerSettingsUpdate(BaseModel):
|
||||
"""Payload for ``PUT /api/server/settings``."""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
log_level: str | None = Field(default=None)
|
||||
log_target: str | None = Field(default=None)
|
||||
db_purge_age: int | None = Field(default=None, ge=0)
|
||||
db_max_matches: int | None = Field(default=None, ge=0)
|
||||
|
||||
|
||||
class ServerSettingsResponse(BaseModel):
|
||||
"""Response for ``GET /api/server/settings``."""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
settings: ServerSettings
|
||||
64
backend/app/models/setup.py
Normal file
64
backend/app/models/setup.py
Normal file
@@ -0,0 +1,64 @@
|
||||
"""Setup wizard Pydantic models.
|
||||
|
||||
Request, response, and domain models for the first-run configuration wizard.
|
||||
"""
|
||||
|
||||
from pydantic import BaseModel, ConfigDict, Field
|
||||
|
||||
|
||||
class SetupRequest(BaseModel):
|
||||
"""Payload for ``POST /api/setup``."""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
master_password: str = Field(
|
||||
...,
|
||||
min_length=8,
|
||||
description="Master password that protects the BanGUI interface.",
|
||||
)
|
||||
database_path: str = Field(
|
||||
default="bangui.db",
|
||||
description="Filesystem path to the BanGUI SQLite application database.",
|
||||
)
|
||||
fail2ban_socket: str = Field(
|
||||
default="/var/run/fail2ban/fail2ban.sock",
|
||||
description="Path to the fail2ban Unix domain socket.",
|
||||
)
|
||||
timezone: str = Field(
|
||||
default="UTC",
|
||||
description="IANA timezone name used when displaying timestamps.",
|
||||
)
|
||||
session_duration_minutes: int = Field(
|
||||
default=60,
|
||||
ge=1,
|
||||
description="Number of minutes a user session remains valid.",
|
||||
)
|
||||
|
||||
|
||||
class SetupResponse(BaseModel):
|
||||
"""Response returned after a successful initial setup."""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
message: str = Field(
|
||||
default="Setup completed successfully. Please log in.",
|
||||
)
|
||||
|
||||
|
||||
class SetupTimezoneResponse(BaseModel):
|
||||
"""Response for ``GET /api/setup/timezone``."""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
timezone: str = Field(..., description="Configured IANA timezone identifier.")
|
||||
|
||||
|
||||
class SetupStatusResponse(BaseModel):
|
||||
"""Response indicating whether setup has been completed."""
|
||||
|
||||
model_config = ConfigDict(strict=True)
|
||||
|
||||
completed: bool = Field(
|
||||
...,
|
||||
description="``True`` if the initial setup has already been performed.",
|
||||
)
|
||||
1
backend/app/repositories/__init__.py
Normal file
1
backend/app/repositories/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Database access layer (repositories) package."""
|
||||
187
backend/app/repositories/blocklist_repo.py
Normal file
187
backend/app/repositories/blocklist_repo.py
Normal file
@@ -0,0 +1,187 @@
|
||||
"""Blocklist sources repository.
|
||||
|
||||
CRUD operations for the ``blocklist_sources`` table in the application
|
||||
SQLite database. All methods accept a :class:`aiosqlite.Connection` — no
|
||||
ORM, no HTTP exceptions.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import aiosqlite
|
||||
|
||||
|
||||
async def create_source(
|
||||
db: aiosqlite.Connection,
|
||||
name: str,
|
||||
url: str,
|
||||
*,
|
||||
enabled: bool = True,
|
||||
) -> int:
|
||||
"""Insert a new blocklist source and return its generated id.
|
||||
|
||||
Args:
|
||||
db: Active aiosqlite connection.
|
||||
name: Human-readable display name.
|
||||
url: URL of the blocklist text file.
|
||||
enabled: Whether the source is active. Defaults to ``True``.
|
||||
|
||||
Returns:
|
||||
The ``ROWID`` / primary key of the new row.
|
||||
"""
|
||||
cursor = await db.execute(
|
||||
"""
|
||||
INSERT INTO blocklist_sources (name, url, enabled)
|
||||
VALUES (?, ?, ?)
|
||||
""",
|
||||
(name, url, int(enabled)),
|
||||
)
|
||||
await db.commit()
|
||||
return int(cursor.lastrowid) # type: ignore[arg-type]
|
||||
|
||||
|
||||
async def get_source(
|
||||
db: aiosqlite.Connection,
|
||||
source_id: int,
|
||||
) -> dict[str, Any] | None:
|
||||
"""Return a single blocklist source row as a plain dict, or ``None``.
|
||||
|
||||
Args:
|
||||
db: Active aiosqlite connection.
|
||||
source_id: Primary key of the source to retrieve.
|
||||
|
||||
Returns:
|
||||
A dict with keys matching the ``blocklist_sources`` columns, or
|
||||
``None`` if no row with that id exists.
|
||||
"""
|
||||
async with db.execute(
|
||||
"SELECT id, name, url, enabled, created_at, updated_at FROM blocklist_sources WHERE id = ?",
|
||||
(source_id,),
|
||||
) as cursor:
|
||||
row = await cursor.fetchone()
|
||||
if row is None:
|
||||
return None
|
||||
return _row_to_dict(row)
|
||||
|
||||
|
||||
async def list_sources(db: aiosqlite.Connection) -> list[dict[str, Any]]:
|
||||
"""Return all blocklist sources ordered by id ascending.
|
||||
|
||||
Args:
|
||||
db: Active aiosqlite connection.
|
||||
|
||||
Returns:
|
||||
List of dicts, one per row in ``blocklist_sources``.
|
||||
"""
|
||||
async with db.execute(
|
||||
"SELECT id, name, url, enabled, created_at, updated_at FROM blocklist_sources ORDER BY id"
|
||||
) as cursor:
|
||||
rows = await cursor.fetchall()
|
||||
return [_row_to_dict(r) for r in rows]
|
||||
|
||||
|
||||
async def list_enabled_sources(db: aiosqlite.Connection) -> list[dict[str, Any]]:
|
||||
"""Return only enabled blocklist sources ordered by id.
|
||||
|
||||
Args:
|
||||
db: Active aiosqlite connection.
|
||||
|
||||
Returns:
|
||||
List of dicts for rows where ``enabled = 1``.
|
||||
"""
|
||||
async with db.execute(
|
||||
"SELECT id, name, url, enabled, created_at, updated_at FROM blocklist_sources WHERE enabled = 1 ORDER BY id"
|
||||
) as cursor:
|
||||
rows = await cursor.fetchall()
|
||||
return [_row_to_dict(r) for r in rows]
|
||||
|
||||
|
||||
async def update_source(
|
||||
db: aiosqlite.Connection,
|
||||
source_id: int,
|
||||
*,
|
||||
name: str | None = None,
|
||||
url: str | None = None,
|
||||
enabled: bool | None = None,
|
||||
) -> bool:
|
||||
"""Update one or more fields on a blocklist source.
|
||||
|
||||
Only the keyword arguments that are not ``None`` are included in the
|
||||
``UPDATE`` statement.
|
||||
|
||||
Args:
|
||||
db: Active aiosqlite connection.
|
||||
source_id: Primary key of the source to update.
|
||||
name: New display name, or ``None`` to leave unchanged.
|
||||
url: New URL, or ``None`` to leave unchanged.
|
||||
enabled: New enabled flag, or ``None`` to leave unchanged.
|
||||
|
||||
Returns:
|
||||
``True`` if a row was updated, ``False`` if the id does not exist.
|
||||
"""
|
||||
fields: list[str] = []
|
||||
params: list[Any] = []
|
||||
|
||||
if name is not None:
|
||||
fields.append("name = ?")
|
||||
params.append(name)
|
||||
if url is not None:
|
||||
fields.append("url = ?")
|
||||
params.append(url)
|
||||
if enabled is not None:
|
||||
fields.append("enabled = ?")
|
||||
params.append(int(enabled))
|
||||
|
||||
if not fields:
|
||||
# Nothing to update — treat as success only if the row exists.
|
||||
return await get_source(db, source_id) is not None
|
||||
|
||||
fields.append("updated_at = strftime('%Y-%m-%dT%H:%M:%fZ', 'now')")
|
||||
params.append(source_id)
|
||||
|
||||
cursor = await db.execute(
|
||||
f"UPDATE blocklist_sources SET {', '.join(fields)} WHERE id = ?", # noqa: S608
|
||||
params,
|
||||
)
|
||||
await db.commit()
|
||||
return cursor.rowcount > 0
|
||||
|
||||
|
||||
async def delete_source(db: aiosqlite.Connection, source_id: int) -> bool:
|
||||
"""Delete a blocklist source by id.
|
||||
|
||||
Args:
|
||||
db: Active aiosqlite connection.
|
||||
source_id: Primary key of the source to remove.
|
||||
|
||||
Returns:
|
||||
``True`` if a row was deleted, ``False`` if the id did not exist.
|
||||
"""
|
||||
cursor = await db.execute(
|
||||
"DELETE FROM blocklist_sources WHERE id = ?",
|
||||
(source_id,),
|
||||
)
|
||||
await db.commit()
|
||||
return cursor.rowcount > 0
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Internal helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _row_to_dict(row: Any) -> dict[str, Any]:
|
||||
"""Convert an aiosqlite row to a plain Python dict.
|
||||
|
||||
Args:
|
||||
row: An :class:`aiosqlite.Row` or sequence returned by a cursor.
|
||||
|
||||
Returns:
|
||||
``dict`` mapping column names to values with ``enabled`` cast to
|
||||
``bool``.
|
||||
"""
|
||||
d: dict[str, Any] = dict(row)
|
||||
d["enabled"] = bool(d["enabled"])
|
||||
return d
|
||||
155
backend/app/repositories/import_log_repo.py
Normal file
155
backend/app/repositories/import_log_repo.py
Normal file
@@ -0,0 +1,155 @@
|
||||
"""Import log repository.
|
||||
|
||||
Persists and queries blocklist import run records in the ``import_log``
|
||||
table. All methods are plain async functions that accept a
|
||||
:class:`aiosqlite.Connection`.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import math
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import aiosqlite
|
||||
|
||||
|
||||
async def add_log(
|
||||
db: aiosqlite.Connection,
|
||||
*,
|
||||
source_id: int | None,
|
||||
source_url: str,
|
||||
ips_imported: int,
|
||||
ips_skipped: int,
|
||||
errors: str | None,
|
||||
) -> int:
|
||||
"""Insert a new import log entry and return its id.
|
||||
|
||||
Args:
|
||||
db: Active aiosqlite connection.
|
||||
source_id: FK to ``blocklist_sources.id``, or ``None`` if the source
|
||||
has been deleted since the import ran.
|
||||
source_url: URL that was downloaded.
|
||||
ips_imported: Number of IPs successfully applied as bans.
|
||||
ips_skipped: Number of lines that were skipped (invalid or CIDR).
|
||||
errors: Error message string, or ``None`` if the import succeeded.
|
||||
|
||||
Returns:
|
||||
Primary key of the inserted row.
|
||||
"""
|
||||
cursor = await db.execute(
|
||||
"""
|
||||
INSERT INTO import_log (source_id, source_url, ips_imported, ips_skipped, errors)
|
||||
VALUES (?, ?, ?, ?, ?)
|
||||
""",
|
||||
(source_id, source_url, ips_imported, ips_skipped, errors),
|
||||
)
|
||||
await db.commit()
|
||||
return int(cursor.lastrowid) # type: ignore[arg-type]
|
||||
|
||||
|
||||
async def list_logs(
|
||||
db: aiosqlite.Connection,
|
||||
*,
|
||||
source_id: int | None = None,
|
||||
page: int = 1,
|
||||
page_size: int = 50,
|
||||
) -> tuple[list[dict[str, Any]], int]:
|
||||
"""Return a paginated list of import log entries.
|
||||
|
||||
Args:
|
||||
db: Active aiosqlite connection.
|
||||
source_id: If given, filter to logs for this source only.
|
||||
page: 1-based page index.
|
||||
page_size: Number of items per page.
|
||||
|
||||
Returns:
|
||||
A 2-tuple ``(items, total)`` where *items* is a list of dicts and
|
||||
*total* is the count of all matching rows (ignoring pagination).
|
||||
"""
|
||||
where = ""
|
||||
params_count: list[Any] = []
|
||||
params_rows: list[Any] = []
|
||||
|
||||
if source_id is not None:
|
||||
where = " WHERE source_id = ?"
|
||||
params_count.append(source_id)
|
||||
params_rows.append(source_id)
|
||||
|
||||
# Total count
|
||||
async with db.execute(
|
||||
f"SELECT COUNT(*) FROM import_log{where}", # noqa: S608
|
||||
params_count,
|
||||
) as cursor:
|
||||
count_row = await cursor.fetchone()
|
||||
total: int = int(count_row[0]) if count_row else 0
|
||||
|
||||
offset = (page - 1) * page_size
|
||||
params_rows.extend([page_size, offset])
|
||||
|
||||
async with db.execute(
|
||||
f"""
|
||||
SELECT id, source_id, source_url, timestamp, ips_imported, ips_skipped, errors
|
||||
FROM import_log{where}
|
||||
ORDER BY id DESC
|
||||
LIMIT ? OFFSET ?
|
||||
""", # noqa: S608
|
||||
params_rows,
|
||||
) as cursor:
|
||||
rows = await cursor.fetchall()
|
||||
items = [_row_to_dict(r) for r in rows]
|
||||
|
||||
return items, total
|
||||
|
||||
|
||||
async def get_last_log(db: aiosqlite.Connection) -> dict[str, Any] | None:
|
||||
"""Return the most recent import log entry across all sources.
|
||||
|
||||
Args:
|
||||
db: Active aiosqlite connection.
|
||||
|
||||
Returns:
|
||||
The latest log entry as a dict, or ``None`` if no logs exist.
|
||||
"""
|
||||
async with db.execute(
|
||||
"""
|
||||
SELECT id, source_id, source_url, timestamp, ips_imported, ips_skipped, errors
|
||||
FROM import_log
|
||||
ORDER BY id DESC
|
||||
LIMIT 1
|
||||
"""
|
||||
) as cursor:
|
||||
row = await cursor.fetchone()
|
||||
return _row_to_dict(row) if row is not None else None
|
||||
|
||||
|
||||
def compute_total_pages(total: int, page_size: int) -> int:
|
||||
"""Return the total number of pages for a given total and page size.
|
||||
|
||||
Args:
|
||||
total: Total number of items.
|
||||
page_size: Items per page.
|
||||
|
||||
Returns:
|
||||
Number of pages (minimum 1).
|
||||
"""
|
||||
if total == 0:
|
||||
return 1
|
||||
return math.ceil(total / page_size)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Internal helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _row_to_dict(row: Any) -> dict[str, Any]:
|
||||
"""Convert an aiosqlite row to a plain Python dict.
|
||||
|
||||
Args:
|
||||
row: An :class:`aiosqlite.Row` or sequence returned by a cursor.
|
||||
|
||||
Returns:
|
||||
Dict mapping column names to Python values.
|
||||
"""
|
||||
return dict(row)
|
||||
100
backend/app/repositories/session_repo.py
Normal file
100
backend/app/repositories/session_repo.py
Normal file
@@ -0,0 +1,100 @@
|
||||
"""Session repository.
|
||||
|
||||
Provides storage, retrieval, and deletion of session records in the
|
||||
``sessions`` table of the application SQLite database.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import aiosqlite
|
||||
|
||||
from app.models.auth import Session
|
||||
|
||||
|
||||
async def create_session(
|
||||
db: aiosqlite.Connection,
|
||||
token: str,
|
||||
created_at: str,
|
||||
expires_at: str,
|
||||
) -> Session:
|
||||
"""Insert a new session row and return the domain model.
|
||||
|
||||
Args:
|
||||
db: Active aiosqlite connection.
|
||||
token: Opaque random session token (hex string).
|
||||
created_at: ISO 8601 UTC creation timestamp.
|
||||
expires_at: ISO 8601 UTC expiry timestamp.
|
||||
|
||||
Returns:
|
||||
The newly created :class:`~app.models.auth.Session`.
|
||||
"""
|
||||
cursor = await db.execute(
|
||||
"INSERT INTO sessions (token, created_at, expires_at) VALUES (?, ?, ?)",
|
||||
(token, created_at, expires_at),
|
||||
)
|
||||
await db.commit()
|
||||
return Session(
|
||||
id=int(cursor.lastrowid) if cursor.lastrowid else 0,
|
||||
token=token,
|
||||
created_at=created_at,
|
||||
expires_at=expires_at,
|
||||
)
|
||||
|
||||
|
||||
async def get_session(db: aiosqlite.Connection, token: str) -> Session | None:
|
||||
"""Look up a session by its token.
|
||||
|
||||
Args:
|
||||
db: Active aiosqlite connection.
|
||||
token: The session token to retrieve.
|
||||
|
||||
Returns:
|
||||
The :class:`~app.models.auth.Session` if found, else ``None``.
|
||||
"""
|
||||
async with db.execute(
|
||||
"SELECT id, token, created_at, expires_at FROM sessions WHERE token = ?",
|
||||
(token,),
|
||||
) as cursor:
|
||||
row = await cursor.fetchone()
|
||||
|
||||
if row is None:
|
||||
return None
|
||||
|
||||
return Session(
|
||||
id=int(row[0]),
|
||||
token=str(row[1]),
|
||||
created_at=str(row[2]),
|
||||
expires_at=str(row[3]),
|
||||
)
|
||||
|
||||
|
||||
async def delete_session(db: aiosqlite.Connection, token: str) -> None:
|
||||
"""Delete a session by token (logout / expiry clean-up).
|
||||
|
||||
Args:
|
||||
db: Active aiosqlite connection.
|
||||
token: The session token to remove.
|
||||
"""
|
||||
await db.execute("DELETE FROM sessions WHERE token = ?", (token,))
|
||||
await db.commit()
|
||||
|
||||
|
||||
async def delete_expired_sessions(db: aiosqlite.Connection, now_iso: str) -> int:
|
||||
"""Remove all sessions whose ``expires_at`` timestamp is in the past.
|
||||
|
||||
Args:
|
||||
db: Active aiosqlite connection.
|
||||
now_iso: Current UTC time as ISO 8601 string used as the cutoff.
|
||||
|
||||
Returns:
|
||||
Number of rows deleted.
|
||||
"""
|
||||
cursor = await db.execute(
|
||||
"DELETE FROM sessions WHERE expires_at <= ?",
|
||||
(now_iso,),
|
||||
)
|
||||
await db.commit()
|
||||
return int(cursor.rowcount)
|
||||
71
backend/app/repositories/settings_repo.py
Normal file
71
backend/app/repositories/settings_repo.py
Normal file
@@ -0,0 +1,71 @@
|
||||
"""Settings repository.
|
||||
|
||||
Provides CRUD operations for the ``settings`` key-value table in the
|
||||
application SQLite database. All methods are plain async functions that
|
||||
accept a :class:`aiosqlite.Connection` — no ORM, no HTTP exceptions.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import aiosqlite
|
||||
|
||||
|
||||
async def get_setting(db: aiosqlite.Connection, key: str) -> str | None:
|
||||
"""Return the value for *key*, or ``None`` if it does not exist.
|
||||
|
||||
Args:
|
||||
db: Active aiosqlite connection.
|
||||
key: The setting key to look up.
|
||||
|
||||
Returns:
|
||||
The stored value string, or ``None`` if the key is absent.
|
||||
"""
|
||||
async with db.execute(
|
||||
"SELECT value FROM settings WHERE key = ?",
|
||||
(key,),
|
||||
) as cursor:
|
||||
row = await cursor.fetchone()
|
||||
return str(row[0]) if row is not None else None
|
||||
|
||||
|
||||
async def set_setting(db: aiosqlite.Connection, key: str, value: str) -> None:
|
||||
"""Insert or replace the setting identified by *key*.
|
||||
|
||||
Args:
|
||||
db: Active aiosqlite connection.
|
||||
key: The setting key.
|
||||
value: The value to store.
|
||||
"""
|
||||
await db.execute(
|
||||
"INSERT OR REPLACE INTO settings (key, value) VALUES (?, ?)",
|
||||
(key, value),
|
||||
)
|
||||
await db.commit()
|
||||
|
||||
|
||||
async def delete_setting(db: aiosqlite.Connection, key: str) -> None:
|
||||
"""Delete the setting identified by *key* if it exists.
|
||||
|
||||
Args:
|
||||
db: Active aiosqlite connection.
|
||||
key: The setting key to remove.
|
||||
"""
|
||||
await db.execute("DELETE FROM settings WHERE key = ?", (key,))
|
||||
await db.commit()
|
||||
|
||||
|
||||
async def get_all_settings(db: aiosqlite.Connection) -> dict[str, str]:
|
||||
"""Return all settings as a plain ``dict``.
|
||||
|
||||
Args:
|
||||
db: Active aiosqlite connection.
|
||||
|
||||
Returns:
|
||||
A dictionary mapping every stored key to its value.
|
||||
"""
|
||||
async with db.execute("SELECT key, value FROM settings") as cursor:
|
||||
rows = await cursor.fetchall()
|
||||
return {str(row[0]): str(row[1]) for row in rows}
|
||||
1
backend/app/routers/__init__.py
Normal file
1
backend/app/routers/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""FastAPI routers package."""
|
||||
129
backend/app/routers/auth.py
Normal file
129
backend/app/routers/auth.py
Normal file
@@ -0,0 +1,129 @@
|
||||
"""Authentication router.
|
||||
|
||||
``POST /api/auth/login`` — verify master password and issue a session.
|
||||
``POST /api/auth/logout`` — revoke the current session.
|
||||
|
||||
The session token is returned both in the JSON body (for API-first
|
||||
consumers) and as an ``HttpOnly`` cookie (for the browser SPA).
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import structlog
|
||||
from fastapi import APIRouter, HTTPException, Request, Response, status
|
||||
|
||||
from app.dependencies import DbDep, SettingsDep, invalidate_session_cache
|
||||
from app.models.auth import LoginRequest, LoginResponse, LogoutResponse
|
||||
from app.services import auth_service
|
||||
|
||||
log: structlog.stdlib.BoundLogger = structlog.get_logger()
|
||||
|
||||
router = APIRouter(prefix="/api/auth", tags=["auth"])
|
||||
|
||||
_COOKIE_NAME = "bangui_session"
|
||||
|
||||
|
||||
@router.post(
|
||||
"/login",
|
||||
response_model=LoginResponse,
|
||||
summary="Authenticate with the master password",
|
||||
)
|
||||
async def login(
|
||||
body: LoginRequest,
|
||||
response: Response,
|
||||
db: DbDep,
|
||||
settings: SettingsDep,
|
||||
) -> LoginResponse:
|
||||
"""Verify the master password and return a session token.
|
||||
|
||||
On success the token is also set as an ``HttpOnly`` ``SameSite=Lax``
|
||||
cookie so the browser SPA benefits from automatic credential handling.
|
||||
|
||||
Args:
|
||||
body: Login request validated by Pydantic.
|
||||
response: FastAPI response object used to set the cookie.
|
||||
db: Injected aiosqlite connection.
|
||||
settings: Application settings (used for session duration).
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.auth.LoginResponse` containing the token.
|
||||
|
||||
Raises:
|
||||
HTTPException: 401 if the password is incorrect.
|
||||
"""
|
||||
try:
|
||||
session = await auth_service.login(
|
||||
db,
|
||||
password=body.password,
|
||||
session_duration_minutes=settings.session_duration_minutes,
|
||||
)
|
||||
except ValueError as exc:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail=str(exc),
|
||||
) from exc
|
||||
|
||||
response.set_cookie(
|
||||
key=_COOKIE_NAME,
|
||||
value=session.token,
|
||||
httponly=True,
|
||||
samesite="lax",
|
||||
secure=False, # Set to True in production behind HTTPS
|
||||
max_age=settings.session_duration_minutes * 60,
|
||||
)
|
||||
return LoginResponse(token=session.token, expires_at=session.expires_at)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/logout",
|
||||
response_model=LogoutResponse,
|
||||
summary="Revoke the current session",
|
||||
)
|
||||
async def logout(
|
||||
request: Request,
|
||||
response: Response,
|
||||
db: DbDep,
|
||||
) -> LogoutResponse:
|
||||
"""Invalidate the active session.
|
||||
|
||||
The session token is read from the ``bangui_session`` cookie or the
|
||||
``Authorization: Bearer`` header. If no token is present the request
|
||||
is silently treated as a successful logout (idempotent).
|
||||
|
||||
Args:
|
||||
request: FastAPI request (used to extract the token).
|
||||
response: FastAPI response (used to clear the cookie).
|
||||
db: Injected aiosqlite connection.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.auth.LogoutResponse`.
|
||||
"""
|
||||
token = _extract_token(request)
|
||||
if token:
|
||||
await auth_service.logout(db, token)
|
||||
invalidate_session_cache(token)
|
||||
response.delete_cookie(key=_COOKIE_NAME)
|
||||
return LogoutResponse()
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _extract_token(request: Request) -> str | None:
|
||||
"""Extract the session token from cookie or Authorization header.
|
||||
|
||||
Args:
|
||||
request: The incoming FastAPI request.
|
||||
|
||||
Returns:
|
||||
The token string, or ``None`` if absent.
|
||||
"""
|
||||
token: str | None = request.cookies.get(_COOKIE_NAME)
|
||||
if token:
|
||||
return token
|
||||
auth_header: str = request.headers.get("Authorization", "")
|
||||
if auth_header.startswith("Bearer "):
|
||||
return auth_header[len("Bearer "):]
|
||||
return None
|
||||
234
backend/app/routers/bans.py
Normal file
234
backend/app/routers/bans.py
Normal file
@@ -0,0 +1,234 @@
|
||||
"""Bans router.
|
||||
|
||||
Manual ban and unban operations and the active-bans overview:
|
||||
|
||||
* ``GET /api/bans/active`` — list all currently banned IPs
|
||||
* ``POST /api/bans`` — ban an IP in a specific jail
|
||||
* ``DELETE /api/bans`` — unban an IP from one or all jails
|
||||
* ``DELETE /api/bans/all`` — unban every currently banned IP across all jails
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import aiohttp
|
||||
|
||||
from fastapi import APIRouter, HTTPException, Request, status
|
||||
|
||||
from app.dependencies import AuthDep
|
||||
from app.models.ban import ActiveBanListResponse, BanRequest, UnbanAllResponse, UnbanRequest
|
||||
from app.models.jail import JailCommandResponse
|
||||
from app.services import jail_service
|
||||
from app.services.jail_service import JailNotFoundError, JailOperationError
|
||||
from app.utils.fail2ban_client import Fail2BanConnectionError
|
||||
|
||||
router: APIRouter = APIRouter(prefix="/api/bans", tags=["Bans"])
|
||||
|
||||
|
||||
def _bad_gateway(exc: Exception) -> HTTPException:
|
||||
"""Return a 502 response when fail2ban is unreachable.
|
||||
|
||||
Args:
|
||||
exc: The underlying connection error.
|
||||
|
||||
Returns:
|
||||
:class:`fastapi.HTTPException` with status 502.
|
||||
"""
|
||||
return HTTPException(
|
||||
status_code=status.HTTP_502_BAD_GATEWAY,
|
||||
detail=f"Cannot reach fail2ban: {exc}",
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/active",
|
||||
response_model=ActiveBanListResponse,
|
||||
summary="List all currently banned IPs across all jails",
|
||||
)
|
||||
async def get_active_bans(
|
||||
request: Request,
|
||||
_auth: AuthDep,
|
||||
) -> ActiveBanListResponse:
|
||||
"""Return every IP that is currently banned across all fail2ban jails.
|
||||
|
||||
Each entry includes the jail name, ban start time, expiry time, and
|
||||
enriched geolocation data (country code).
|
||||
|
||||
Args:
|
||||
request: Incoming request (used to access ``app.state``).
|
||||
_auth: Validated session — enforces authentication.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.ban.ActiveBanListResponse` with all active bans.
|
||||
|
||||
Raises:
|
||||
HTTPException: 502 when fail2ban is unreachable.
|
||||
"""
|
||||
socket_path: str = request.app.state.settings.fail2ban_socket
|
||||
http_session: aiohttp.ClientSession = request.app.state.http_session
|
||||
app_db = request.app.state.db
|
||||
|
||||
try:
|
||||
return await jail_service.get_active_bans(
|
||||
socket_path,
|
||||
http_session=http_session,
|
||||
app_db=app_db,
|
||||
)
|
||||
except Fail2BanConnectionError as exc:
|
||||
raise _bad_gateway(exc) from exc
|
||||
|
||||
|
||||
@router.post(
|
||||
"",
|
||||
status_code=status.HTTP_201_CREATED,
|
||||
response_model=JailCommandResponse,
|
||||
summary="Ban an IP address in a specific jail",
|
||||
)
|
||||
async def ban_ip(
|
||||
request: Request,
|
||||
_auth: AuthDep,
|
||||
body: BanRequest,
|
||||
) -> JailCommandResponse:
|
||||
"""Ban an IP address in the specified fail2ban jail.
|
||||
|
||||
The IP address is validated before the command is sent. IPv4 and
|
||||
IPv6 addresses are both accepted.
|
||||
|
||||
Args:
|
||||
request: Incoming request (used to access ``app.state``).
|
||||
_auth: Validated session — enforces authentication.
|
||||
body: Payload containing the IP address and target jail.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.jail.JailCommandResponse` confirming the ban.
|
||||
|
||||
Raises:
|
||||
HTTPException: 400 when the IP address is invalid.
|
||||
HTTPException: 404 when the specified jail does not exist.
|
||||
HTTPException: 409 when fail2ban reports the ban failed.
|
||||
HTTPException: 502 when fail2ban is unreachable.
|
||||
"""
|
||||
socket_path: str = request.app.state.settings.fail2ban_socket
|
||||
try:
|
||||
await jail_service.ban_ip(socket_path, body.jail, body.ip)
|
||||
return JailCommandResponse(
|
||||
message=f"IP {body.ip!r} banned in jail {body.jail!r}.",
|
||||
jail=body.jail,
|
||||
)
|
||||
except ValueError as exc:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=str(exc),
|
||||
) from exc
|
||||
except JailNotFoundError:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail=f"Jail not found: {body.jail!r}",
|
||||
) from None
|
||||
except JailOperationError as exc:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_409_CONFLICT,
|
||||
detail=str(exc),
|
||||
) from exc
|
||||
except Fail2BanConnectionError as exc:
|
||||
raise _bad_gateway(exc) from exc
|
||||
|
||||
|
||||
@router.delete(
|
||||
"",
|
||||
response_model=JailCommandResponse,
|
||||
summary="Unban an IP address from one or all jails",
|
||||
)
|
||||
async def unban_ip(
|
||||
request: Request,
|
||||
_auth: AuthDep,
|
||||
body: UnbanRequest,
|
||||
) -> JailCommandResponse:
|
||||
"""Unban an IP address from a specific jail or all jails.
|
||||
|
||||
When ``unban_all`` is ``true`` the IP is removed from every jail using
|
||||
fail2ban's global unban command. When ``jail`` is specified only that
|
||||
jail is targeted. If neither ``unban_all`` nor ``jail`` is provided the
|
||||
IP is unbanned from all jails (equivalent to ``unban_all=true``).
|
||||
|
||||
Args:
|
||||
request: Incoming request (used to access ``app.state``).
|
||||
_auth: Validated session — enforces authentication.
|
||||
body: Payload with the IP address, optional jail, and unban_all flag.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.jail.JailCommandResponse` confirming the unban.
|
||||
|
||||
Raises:
|
||||
HTTPException: 400 when the IP address is invalid.
|
||||
HTTPException: 404 when the specified jail does not exist.
|
||||
HTTPException: 409 when fail2ban reports the unban failed.
|
||||
HTTPException: 502 when fail2ban is unreachable.
|
||||
"""
|
||||
socket_path: str = request.app.state.settings.fail2ban_socket
|
||||
|
||||
# Determine target jail (None means all jails).
|
||||
target_jail: str | None = None if (body.unban_all or body.jail is None) else body.jail
|
||||
|
||||
try:
|
||||
await jail_service.unban_ip(socket_path, body.ip, jail=target_jail)
|
||||
scope = f"jail {target_jail!r}" if target_jail else "all jails"
|
||||
return JailCommandResponse(
|
||||
message=f"IP {body.ip!r} unbanned from {scope}.",
|
||||
jail=target_jail or "*",
|
||||
)
|
||||
except ValueError as exc:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=str(exc),
|
||||
) from exc
|
||||
except JailNotFoundError:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail=f"Jail not found: {target_jail!r}",
|
||||
) from None
|
||||
except JailOperationError as exc:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_409_CONFLICT,
|
||||
detail=str(exc),
|
||||
) from exc
|
||||
except Fail2BanConnectionError as exc:
|
||||
raise _bad_gateway(exc) from exc
|
||||
|
||||
|
||||
@router.delete(
|
||||
"/all",
|
||||
response_model=UnbanAllResponse,
|
||||
summary="Unban every currently banned IP across all jails",
|
||||
)
|
||||
async def unban_all(
|
||||
request: Request,
|
||||
_auth: AuthDep,
|
||||
) -> UnbanAllResponse:
|
||||
"""Remove all active bans from every fail2ban jail in a single operation.
|
||||
|
||||
Uses fail2ban's ``unban --all`` command to atomically clear every active
|
||||
ban across all jails. Returns the number of IPs that were unbanned.
|
||||
|
||||
Args:
|
||||
request: Incoming request (used to access ``app.state``).
|
||||
_auth: Validated session — enforces authentication.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.ban.UnbanAllResponse` with the count of
|
||||
unbanned IPs.
|
||||
|
||||
Raises:
|
||||
HTTPException: 502 when fail2ban is unreachable.
|
||||
"""
|
||||
socket_path: str = request.app.state.settings.fail2ban_socket
|
||||
try:
|
||||
count: int = await jail_service.unban_all_ips(socket_path)
|
||||
return UnbanAllResponse(
|
||||
message=f"All bans cleared. {count} IP address{'es' if count != 1 else ''} unbanned.",
|
||||
count=count,
|
||||
)
|
||||
except Fail2BanConnectionError as exc:
|
||||
raise _bad_gateway(exc) from exc
|
||||
370
backend/app/routers/blocklist.py
Normal file
370
backend/app/routers/blocklist.py
Normal file
@@ -0,0 +1,370 @@
|
||||
"""Blocklist router.
|
||||
|
||||
Manages external IP blocklist sources, triggers manual imports, and exposes
|
||||
the import schedule and log:
|
||||
|
||||
* ``GET /api/blocklists`` — list all sources
|
||||
* ``POST /api/blocklists`` — add a source
|
||||
* ``GET /api/blocklists/import`` — (reserved; use POST)
|
||||
* ``POST /api/blocklists/import`` — trigger a manual import now
|
||||
* ``GET /api/blocklists/schedule`` — get current schedule + next run
|
||||
* ``PUT /api/blocklists/schedule`` — update schedule
|
||||
* ``GET /api/blocklists/log`` — paginated import log
|
||||
* ``GET /api/blocklists/{id}`` — get a single source
|
||||
* ``PUT /api/blocklists/{id}`` — edit a source
|
||||
* ``DELETE /api/blocklists/{id}`` — remove a source
|
||||
* ``GET /api/blocklists/{id}/preview`` — preview the blocklist contents
|
||||
|
||||
Note: static path segments (``/import``, ``/schedule``, ``/log``) are
|
||||
registered *before* the ``/{id}`` routes so FastAPI resolves them correctly.
|
||||
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, Annotated
|
||||
|
||||
import aiosqlite
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import aiohttp
|
||||
from fastapi import APIRouter, Depends, HTTPException, Query, Request, status
|
||||
|
||||
from app.dependencies import AuthDep, get_db
|
||||
from app.models.blocklist import (
|
||||
BlocklistListResponse,
|
||||
BlocklistSource,
|
||||
BlocklistSourceCreate,
|
||||
BlocklistSourceUpdate,
|
||||
ImportLogListResponse,
|
||||
ImportRunResult,
|
||||
PreviewResponse,
|
||||
ScheduleConfig,
|
||||
ScheduleInfo,
|
||||
)
|
||||
from app.repositories import import_log_repo
|
||||
from app.services import blocklist_service
|
||||
from app.tasks import blocklist_import as blocklist_import_task
|
||||
|
||||
router: APIRouter = APIRouter(prefix="/api/blocklists", tags=["Blocklists"])
|
||||
|
||||
DbDep = Annotated[aiosqlite.Connection, Depends(get_db)]
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Source list + create
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@router.get(
|
||||
"",
|
||||
response_model=BlocklistListResponse,
|
||||
summary="List all blocklist sources",
|
||||
)
|
||||
async def list_blocklists(
|
||||
db: DbDep,
|
||||
_auth: AuthDep,
|
||||
) -> BlocklistListResponse:
|
||||
"""Return all configured blocklist source definitions.
|
||||
|
||||
Args:
|
||||
db: Application database connection (injected).
|
||||
_auth: Validated session — enforces authentication.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.blocklist.BlocklistListResponse` with all sources.
|
||||
"""
|
||||
sources = await blocklist_service.list_sources(db)
|
||||
return BlocklistListResponse(sources=sources)
|
||||
|
||||
|
||||
@router.post(
|
||||
"",
|
||||
response_model=BlocklistSource,
|
||||
status_code=status.HTTP_201_CREATED,
|
||||
summary="Add a new blocklist source",
|
||||
)
|
||||
async def create_blocklist(
|
||||
payload: BlocklistSourceCreate,
|
||||
db: DbDep,
|
||||
_auth: AuthDep,
|
||||
) -> BlocklistSource:
|
||||
"""Create a new blocklist source definition.
|
||||
|
||||
Args:
|
||||
payload: New source data (name, url, enabled).
|
||||
db: Application database connection (injected).
|
||||
_auth: Validated session — enforces authentication.
|
||||
|
||||
Returns:
|
||||
The newly created :class:`~app.models.blocklist.BlocklistSource`.
|
||||
"""
|
||||
return await blocklist_service.create_source(
|
||||
db, payload.name, payload.url, enabled=payload.enabled
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Static sub-paths — must be declared BEFORE /{id}
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@router.post(
|
||||
"/import",
|
||||
response_model=ImportRunResult,
|
||||
summary="Trigger a manual blocklist import",
|
||||
)
|
||||
async def run_import_now(
|
||||
request: Request,
|
||||
db: DbDep,
|
||||
_auth: AuthDep,
|
||||
) -> ImportRunResult:
|
||||
"""Download and apply all enabled blocklist sources immediately.
|
||||
|
||||
Args:
|
||||
request: Incoming request (used to access shared HTTP session).
|
||||
db: Application database connection (injected).
|
||||
_auth: Validated session — enforces authentication.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.blocklist.ImportRunResult` with per-source
|
||||
results and aggregated counters.
|
||||
"""
|
||||
http_session: aiohttp.ClientSession = request.app.state.http_session
|
||||
socket_path: str = request.app.state.settings.fail2ban_socket
|
||||
return await blocklist_service.import_all(db, http_session, socket_path)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/schedule",
|
||||
response_model=ScheduleInfo,
|
||||
summary="Get the current import schedule",
|
||||
)
|
||||
async def get_schedule(
|
||||
request: Request,
|
||||
db: DbDep,
|
||||
_auth: AuthDep,
|
||||
) -> ScheduleInfo:
|
||||
"""Return the current schedule configuration and runtime metadata.
|
||||
|
||||
The ``next_run_at`` field is read from APScheduler if the job is active.
|
||||
|
||||
Args:
|
||||
request: Incoming request (used to query the scheduler).
|
||||
db: Application database connection (injected).
|
||||
_auth: Validated session — enforces authentication.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.blocklist.ScheduleInfo` with config and run
|
||||
times.
|
||||
"""
|
||||
scheduler = request.app.state.scheduler
|
||||
job = scheduler.get_job(blocklist_import_task.JOB_ID)
|
||||
next_run_at: str | None = None
|
||||
if job is not None and job.next_run_time is not None:
|
||||
next_run_at = job.next_run_time.isoformat()
|
||||
|
||||
return await blocklist_service.get_schedule_info(db, next_run_at)
|
||||
|
||||
|
||||
@router.put(
|
||||
"/schedule",
|
||||
response_model=ScheduleInfo,
|
||||
summary="Update the import schedule",
|
||||
)
|
||||
async def update_schedule(
|
||||
payload: ScheduleConfig,
|
||||
request: Request,
|
||||
db: DbDep,
|
||||
_auth: AuthDep,
|
||||
) -> ScheduleInfo:
|
||||
"""Persist a new schedule configuration and reschedule the import job.
|
||||
|
||||
Args:
|
||||
payload: New :class:`~app.models.blocklist.ScheduleConfig`.
|
||||
request: Incoming request (used to access the scheduler).
|
||||
db: Application database connection (injected).
|
||||
_auth: Validated session — enforces authentication.
|
||||
|
||||
Returns:
|
||||
Updated :class:`~app.models.blocklist.ScheduleInfo`.
|
||||
"""
|
||||
await blocklist_service.set_schedule(db, payload)
|
||||
# Reschedule the background job immediately.
|
||||
blocklist_import_task.reschedule(request.app)
|
||||
|
||||
job = request.app.state.scheduler.get_job(blocklist_import_task.JOB_ID)
|
||||
next_run_at: str | None = None
|
||||
if job is not None and job.next_run_time is not None:
|
||||
next_run_at = job.next_run_time.isoformat()
|
||||
|
||||
return await blocklist_service.get_schedule_info(db, next_run_at)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/log",
|
||||
response_model=ImportLogListResponse,
|
||||
summary="Get the paginated import log",
|
||||
)
|
||||
async def get_import_log(
|
||||
db: DbDep,
|
||||
_auth: AuthDep,
|
||||
source_id: int | None = Query(default=None, description="Filter by source id"),
|
||||
page: int = Query(default=1, ge=1),
|
||||
page_size: int = Query(default=50, ge=1, le=200),
|
||||
) -> ImportLogListResponse:
|
||||
"""Return a paginated log of all import runs.
|
||||
|
||||
Args:
|
||||
db: Application database connection (injected).
|
||||
_auth: Validated session — enforces authentication.
|
||||
source_id: Optional filter — only show logs for this source.
|
||||
page: 1-based page number.
|
||||
page_size: Items per page.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.blocklist.ImportLogListResponse`.
|
||||
"""
|
||||
items, total = await import_log_repo.list_logs(
|
||||
db, source_id=source_id, page=page, page_size=page_size
|
||||
)
|
||||
total_pages = import_log_repo.compute_total_pages(total, page_size)
|
||||
from app.models.blocklist import ImportLogEntry # noqa: PLC0415
|
||||
|
||||
return ImportLogListResponse(
|
||||
items=[ImportLogEntry.model_validate(i) for i in items],
|
||||
total=total,
|
||||
page=page,
|
||||
page_size=page_size,
|
||||
total_pages=total_pages,
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Single source CRUD — parameterised routes AFTER static sub-paths
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@router.get(
|
||||
"/{source_id}",
|
||||
response_model=BlocklistSource,
|
||||
summary="Get a single blocklist source",
|
||||
)
|
||||
async def get_blocklist(
|
||||
source_id: int,
|
||||
db: DbDep,
|
||||
_auth: AuthDep,
|
||||
) -> BlocklistSource:
|
||||
"""Return a single blocklist source by id.
|
||||
|
||||
Args:
|
||||
source_id: Primary key of the source.
|
||||
db: Application database connection (injected).
|
||||
_auth: Validated session — enforces authentication.
|
||||
|
||||
Raises:
|
||||
HTTPException: 404 if the source does not exist.
|
||||
"""
|
||||
source = await blocklist_service.get_source(db, source_id)
|
||||
if source is None:
|
||||
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Blocklist source not found.")
|
||||
return source
|
||||
|
||||
|
||||
@router.put(
|
||||
"/{source_id}",
|
||||
response_model=BlocklistSource,
|
||||
summary="Update a blocklist source",
|
||||
)
|
||||
async def update_blocklist(
|
||||
source_id: int,
|
||||
payload: BlocklistSourceUpdate,
|
||||
db: DbDep,
|
||||
_auth: AuthDep,
|
||||
) -> BlocklistSource:
|
||||
"""Update one or more fields on a blocklist source.
|
||||
|
||||
Args:
|
||||
source_id: Primary key of the source to update.
|
||||
payload: Fields to update (all optional).
|
||||
db: Application database connection (injected).
|
||||
_auth: Validated session — enforces authentication.
|
||||
|
||||
Raises:
|
||||
HTTPException: 404 if the source does not exist.
|
||||
"""
|
||||
updated = await blocklist_service.update_source(
|
||||
db,
|
||||
source_id,
|
||||
name=payload.name,
|
||||
url=payload.url,
|
||||
enabled=payload.enabled,
|
||||
)
|
||||
if updated is None:
|
||||
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Blocklist source not found.")
|
||||
return updated
|
||||
|
||||
|
||||
@router.delete(
|
||||
"/{source_id}",
|
||||
status_code=status.HTTP_204_NO_CONTENT,
|
||||
summary="Delete a blocklist source",
|
||||
)
|
||||
async def delete_blocklist(
|
||||
source_id: int,
|
||||
db: DbDep,
|
||||
_auth: AuthDep,
|
||||
) -> None:
|
||||
"""Delete a blocklist source by id.
|
||||
|
||||
Args:
|
||||
source_id: Primary key of the source to remove.
|
||||
db: Application database connection (injected).
|
||||
_auth: Validated session — enforces authentication.
|
||||
|
||||
Raises:
|
||||
HTTPException: 404 if the source does not exist.
|
||||
"""
|
||||
deleted = await blocklist_service.delete_source(db, source_id)
|
||||
if not deleted:
|
||||
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Blocklist source not found.")
|
||||
|
||||
|
||||
@router.get(
|
||||
"/{source_id}/preview",
|
||||
response_model=PreviewResponse,
|
||||
summary="Preview the contents of a blocklist source",
|
||||
)
|
||||
async def preview_blocklist(
|
||||
source_id: int,
|
||||
request: Request,
|
||||
db: DbDep,
|
||||
_auth: AuthDep,
|
||||
) -> PreviewResponse:
|
||||
"""Download and preview a sample of a blocklist source.
|
||||
|
||||
Returns the first :data:`~app.services.blocklist_service._PREVIEW_LINES`
|
||||
valid IP entries together with validation statistics.
|
||||
|
||||
Args:
|
||||
source_id: Primary key of the source to preview.
|
||||
request: Incoming request (used to access the HTTP session).
|
||||
db: Application database connection (injected).
|
||||
_auth: Validated session — enforces authentication.
|
||||
|
||||
Raises:
|
||||
HTTPException: 404 if the source does not exist.
|
||||
HTTPException: 502 if the URL cannot be reached.
|
||||
"""
|
||||
source = await blocklist_service.get_source(db, source_id)
|
||||
if source is None:
|
||||
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Blocklist source not found.")
|
||||
|
||||
http_session: aiohttp.ClientSession = request.app.state.http_session
|
||||
try:
|
||||
return await blocklist_service.preview_source(source.url, http_session)
|
||||
except ValueError as exc:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_502_BAD_GATEWAY,
|
||||
detail=f"Could not fetch blocklist: {exc}",
|
||||
) from exc
|
||||
1692
backend/app/routers/config.py
Normal file
1692
backend/app/routers/config.py
Normal file
File diff suppressed because it is too large
Load Diff
246
backend/app/routers/dashboard.py
Normal file
246
backend/app/routers/dashboard.py
Normal file
@@ -0,0 +1,246 @@
|
||||
"""Dashboard router.
|
||||
|
||||
Provides the ``GET /api/dashboard/status`` endpoint that returns the cached
|
||||
fail2ban server health snapshot. The snapshot is maintained by the
|
||||
background health-check task and refreshed every 30 seconds.
|
||||
|
||||
Also provides ``GET /api/dashboard/bans`` for the dashboard ban-list table,
|
||||
``GET /api/dashboard/bans/by-country`` for country aggregation,
|
||||
``GET /api/dashboard/bans/trend`` for time-bucketed ban counts, and
|
||||
``GET /api/dashboard/bans/by-jail`` for per-jail ban counts.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import aiohttp
|
||||
|
||||
from fastapi import APIRouter, Query, Request
|
||||
|
||||
from app.dependencies import AuthDep
|
||||
from app.models.ban import (
|
||||
BanOrigin,
|
||||
BansByCountryResponse,
|
||||
BansByJailResponse,
|
||||
BanTrendResponse,
|
||||
DashboardBanListResponse,
|
||||
TimeRange,
|
||||
)
|
||||
from app.models.server import ServerStatus, ServerStatusResponse
|
||||
from app.services import ban_service
|
||||
|
||||
router: APIRouter = APIRouter(prefix="/api/dashboard", tags=["Dashboard"])
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Default pagination constants
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_DEFAULT_PAGE_SIZE: int = 100
|
||||
_DEFAULT_RANGE: TimeRange = "24h"
|
||||
|
||||
|
||||
@router.get(
|
||||
"/status",
|
||||
response_model=ServerStatusResponse,
|
||||
summary="Return the cached fail2ban server status",
|
||||
)
|
||||
async def get_server_status(
|
||||
request: Request,
|
||||
_auth: AuthDep,
|
||||
) -> ServerStatusResponse:
|
||||
"""Return the most recent fail2ban health snapshot.
|
||||
|
||||
The snapshot is populated by a background task that runs every 30 seconds.
|
||||
If the task has not yet executed a placeholder ``online=False`` status is
|
||||
returned so the response is always well-formed.
|
||||
|
||||
Args:
|
||||
request: The incoming request (used to access ``app.state``).
|
||||
_auth: Validated session — enforces authentication on this endpoint.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.server.ServerStatusResponse` containing the
|
||||
current health snapshot.
|
||||
"""
|
||||
cached: ServerStatus = getattr(
|
||||
request.app.state,
|
||||
"server_status",
|
||||
ServerStatus(online=False),
|
||||
)
|
||||
return ServerStatusResponse(status=cached)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/bans",
|
||||
response_model=DashboardBanListResponse,
|
||||
summary="Return a paginated list of recent bans",
|
||||
)
|
||||
async def get_dashboard_bans(
|
||||
request: Request,
|
||||
_auth: AuthDep,
|
||||
range: TimeRange = Query(default=_DEFAULT_RANGE, description="Time-range preset."),
|
||||
page: int = Query(default=1, ge=1, description="1-based page number."),
|
||||
page_size: int = Query(default=_DEFAULT_PAGE_SIZE, ge=1, le=500, description="Items per page."),
|
||||
origin: BanOrigin | None = Query(
|
||||
default=None,
|
||||
description="Filter by ban origin: 'blocklist' or 'selfblock'. Omit for all.",
|
||||
),
|
||||
) -> DashboardBanListResponse:
|
||||
"""Return a paginated list of bans within the selected time window.
|
||||
|
||||
Reads from the fail2ban database and enriches each entry with
|
||||
geolocation data (country, ASN, organisation) from the ip-api.com
|
||||
free API. Results are sorted newest-first. Geo lookups are served
|
||||
from the in-memory cache only; no database writes occur during this
|
||||
GET request.
|
||||
|
||||
Args:
|
||||
request: The incoming request (used to access ``app.state``).
|
||||
_auth: Validated session dependency.
|
||||
range: Time-range preset — ``"24h"``, ``"7d"``, ``"30d"``, or
|
||||
``"365d"``.
|
||||
page: 1-based page number.
|
||||
page_size: Maximum items per page (1–500).
|
||||
origin: Optional filter by ban origin.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.ban.DashboardBanListResponse` with paginated
|
||||
ban items and the total count for the selected window.
|
||||
"""
|
||||
socket_path: str = request.app.state.settings.fail2ban_socket
|
||||
http_session: aiohttp.ClientSession = request.app.state.http_session
|
||||
|
||||
return await ban_service.list_bans(
|
||||
socket_path,
|
||||
range,
|
||||
page=page,
|
||||
page_size=page_size,
|
||||
http_session=http_session,
|
||||
app_db=None,
|
||||
origin=origin,
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/bans/by-country",
|
||||
response_model=BansByCountryResponse,
|
||||
summary="Return ban counts aggregated by country",
|
||||
)
|
||||
async def get_bans_by_country(
|
||||
request: Request,
|
||||
_auth: AuthDep,
|
||||
range: TimeRange = Query(default=_DEFAULT_RANGE, description="Time-range preset."),
|
||||
origin: BanOrigin | None = Query(
|
||||
default=None,
|
||||
description="Filter by ban origin: 'blocklist' or 'selfblock'. Omit for all.",
|
||||
),
|
||||
) -> BansByCountryResponse:
|
||||
"""Return ban counts aggregated by ISO country code.
|
||||
|
||||
Uses SQL aggregation (``GROUP BY ip``) and batch geo-resolution to handle
|
||||
10 000+ banned IPs efficiently. Returns a ``{country_code: count}`` map
|
||||
and the 200 most recent raw ban rows for the companion access table. Geo
|
||||
lookups are served from the in-memory cache only; no database writes occur
|
||||
during this GET request.
|
||||
|
||||
Args:
|
||||
request: The incoming request.
|
||||
_auth: Validated session dependency.
|
||||
range: Time-range preset.
|
||||
origin: Optional filter by ban origin.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.ban.BansByCountryResponse` with per-country
|
||||
aggregation and the companion ban list.
|
||||
"""
|
||||
socket_path: str = request.app.state.settings.fail2ban_socket
|
||||
http_session: aiohttp.ClientSession = request.app.state.http_session
|
||||
|
||||
return await ban_service.bans_by_country(
|
||||
socket_path,
|
||||
range,
|
||||
http_session=http_session,
|
||||
app_db=None,
|
||||
origin=origin,
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/bans/trend",
|
||||
response_model=BanTrendResponse,
|
||||
summary="Return ban counts aggregated into time buckets",
|
||||
)
|
||||
async def get_ban_trend(
|
||||
request: Request,
|
||||
_auth: AuthDep,
|
||||
range: TimeRange = Query(default=_DEFAULT_RANGE, description="Time-range preset."),
|
||||
origin: BanOrigin | None = Query(
|
||||
default=None,
|
||||
description="Filter by ban origin: 'blocklist' or 'selfblock'. Omit for all.",
|
||||
),
|
||||
) -> BanTrendResponse:
|
||||
"""Return ban counts grouped into equal-width time buckets.
|
||||
|
||||
Each bucket represents a contiguous time interval within the selected
|
||||
window. All buckets are returned — empty buckets (zero bans) are
|
||||
included so the frontend always receives a complete, gap-free series
|
||||
suitable for rendering a continuous area or line chart.
|
||||
|
||||
Bucket sizes:
|
||||
|
||||
* ``24h`` → 1-hour buckets (24 total)
|
||||
* ``7d`` → 6-hour buckets (28 total)
|
||||
* ``30d`` → 1-day buckets (30 total)
|
||||
* ``365d`` → 7-day buckets (~53 total)
|
||||
|
||||
Args:
|
||||
request: The incoming request (used to access ``app.state``).
|
||||
_auth: Validated session dependency.
|
||||
range: Time-range preset.
|
||||
origin: Optional filter by ban origin.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.ban.BanTrendResponse` with the ordered bucket
|
||||
list and the bucket-size label.
|
||||
"""
|
||||
socket_path: str = request.app.state.settings.fail2ban_socket
|
||||
|
||||
return await ban_service.ban_trend(socket_path, range, origin=origin)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/bans/by-jail",
|
||||
response_model=BansByJailResponse,
|
||||
summary="Return ban counts aggregated by jail",
|
||||
)
|
||||
async def get_bans_by_jail(
|
||||
request: Request,
|
||||
_auth: AuthDep,
|
||||
range: TimeRange = Query(default=_DEFAULT_RANGE, description="Time-range preset."),
|
||||
origin: BanOrigin | None = Query(
|
||||
default=None,
|
||||
description="Filter by ban origin: 'blocklist' or 'selfblock'. Omit for all.",
|
||||
),
|
||||
) -> BansByJailResponse:
|
||||
"""Return ban counts grouped by jail name for the selected time window.
|
||||
|
||||
Queries the fail2ban database and returns a list of jails sorted by
|
||||
ban count descending. This endpoint is intended for the dashboard jail
|
||||
distribution bar chart.
|
||||
|
||||
Args:
|
||||
request: The incoming request (used to access ``app.state``).
|
||||
_auth: Validated session dependency.
|
||||
range: Time-range preset — ``"24h"``, ``"7d"``, ``"30d"``, or
|
||||
``"365d"``.
|
||||
origin: Optional filter by ban origin.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.ban.BansByJailResponse` with per-jail counts
|
||||
sorted descending and the total for the selected window.
|
||||
"""
|
||||
socket_path: str = request.app.state.settings.fail2ban_socket
|
||||
|
||||
return await ban_service.bans_by_jail(socket_path, range, origin=origin)
|
||||
832
backend/app/routers/file_config.py
Normal file
832
backend/app/routers/file_config.py
Normal file
@@ -0,0 +1,832 @@
|
||||
"""File-based fail2ban configuration router.
|
||||
|
||||
Provides endpoints to list, view, edit, and create fail2ban configuration
|
||||
files directly on the filesystem (``jail.d/``, ``filter.d/``, ``action.d/``).
|
||||
|
||||
Endpoints:
|
||||
* ``GET /api/config/jail-files`` — list all jail config files
|
||||
* ``GET /api/config/jail-files/{filename}`` — get one jail config file (with content)
|
||||
* ``PUT /api/config/jail-files/{filename}`` — overwrite a jail config file
|
||||
* ``PUT /api/config/jail-files/{filename}/enabled`` — enable/disable a jail config
|
||||
* ``GET /api/config/filters/{name}/raw`` — get one filter file raw content
|
||||
* ``PUT /api/config/filters/{name}/raw`` — update a filter file (raw content)
|
||||
* ``POST /api/config/filters/raw`` — create a new filter file (raw content)
|
||||
* ``GET /api/config/filters/{name}/parsed`` — parse a filter file into a structured model
|
||||
* ``PUT /api/config/filters/{name}/parsed`` — update a filter file from a structured model
|
||||
* ``GET /api/config/actions`` — list all action files
|
||||
* ``GET /api/config/actions/{name}/raw`` — get one action file (raw content)
|
||||
* ``PUT /api/config/actions/{name}/raw`` — update an action file (raw content)
|
||||
* ``POST /api/config/actions`` — create a new action file
|
||||
* ``GET /api/config/actions/{name}/parsed`` — parse an action file into a structured model
|
||||
* ``PUT /api/config/actions/{name}/parsed`` — update an action file from a structured model
|
||||
|
||||
Note: ``GET /api/config/filters`` (enriched list) and
|
||||
``GET /api/config/filters/{name}`` (full parsed detail) are handled by the
|
||||
config router (``config.py``), which is registered first and therefore takes
|
||||
precedence. Raw-content read/write variants are at ``/filters/{name}/raw``
|
||||
and ``POST /filters/raw``.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Annotated
|
||||
|
||||
from fastapi import APIRouter, HTTPException, Path, Request, status
|
||||
|
||||
from app.dependencies import AuthDep
|
||||
from app.models.config import (
|
||||
ActionConfig,
|
||||
ActionConfigUpdate,
|
||||
FilterConfig,
|
||||
FilterConfigUpdate,
|
||||
JailFileConfig,
|
||||
JailFileConfigUpdate,
|
||||
)
|
||||
from app.models.file_config import (
|
||||
ConfFileContent,
|
||||
ConfFileCreateRequest,
|
||||
ConfFilesResponse,
|
||||
ConfFileUpdateRequest,
|
||||
JailConfigFileContent,
|
||||
JailConfigFileEnabledUpdate,
|
||||
JailConfigFilesResponse,
|
||||
)
|
||||
from app.services import file_config_service
|
||||
from app.services.file_config_service import (
|
||||
ConfigDirError,
|
||||
ConfigFileExistsError,
|
||||
ConfigFileNameError,
|
||||
ConfigFileNotFoundError,
|
||||
ConfigFileWriteError,
|
||||
)
|
||||
|
||||
router: APIRouter = APIRouter(prefix="/api/config", tags=["Config"])
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Path type aliases
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_FilenamePath = Annotated[
|
||||
str, Path(description="Config filename including extension (e.g. ``sshd.conf``).")
|
||||
]
|
||||
_NamePath = Annotated[
|
||||
str, Path(description="Base name with or without extension (e.g. ``sshd`` or ``sshd.conf``).")
|
||||
]
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Error helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _not_found(filename: str) -> HTTPException:
|
||||
return HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail=f"Config file not found: {filename!r}",
|
||||
)
|
||||
|
||||
|
||||
def _bad_request(message: str) -> HTTPException:
|
||||
return HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=message,
|
||||
)
|
||||
|
||||
|
||||
def _conflict(filename: str) -> HTTPException:
|
||||
return HTTPException(
|
||||
status_code=status.HTTP_409_CONFLICT,
|
||||
detail=f"Config file already exists: {filename!r}",
|
||||
)
|
||||
|
||||
|
||||
def _service_unavailable(message: str) -> HTTPException:
|
||||
return HTTPException(
|
||||
status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
|
||||
detail=message,
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Jail config file endpoints (Task 4a)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@router.get(
|
||||
"/jail-files",
|
||||
response_model=JailConfigFilesResponse,
|
||||
summary="List all jail config files",
|
||||
)
|
||||
async def list_jail_config_files(
|
||||
request: Request,
|
||||
_auth: AuthDep,
|
||||
) -> JailConfigFilesResponse:
|
||||
"""Return metadata for every ``.conf`` and ``.local`` file in ``jail.d/``.
|
||||
|
||||
The ``enabled`` field reflects the value of the ``enabled`` key inside the
|
||||
file (defaulting to ``true`` when the key is absent).
|
||||
|
||||
Args:
|
||||
request: Incoming request (used for ``app.state.settings``).
|
||||
_auth: Validated session — enforces authentication.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.file_config.JailConfigFilesResponse`.
|
||||
"""
|
||||
config_dir: str = request.app.state.settings.fail2ban_config_dir
|
||||
try:
|
||||
return await file_config_service.list_jail_config_files(config_dir)
|
||||
except ConfigDirError as exc:
|
||||
raise _service_unavailable(str(exc)) from exc
|
||||
|
||||
|
||||
@router.get(
|
||||
"/jail-files/{filename}",
|
||||
response_model=JailConfigFileContent,
|
||||
summary="Return a single jail config file with its content",
|
||||
)
|
||||
async def get_jail_config_file(
|
||||
request: Request,
|
||||
_auth: AuthDep,
|
||||
filename: _FilenamePath,
|
||||
) -> JailConfigFileContent:
|
||||
"""Return the metadata and raw content of one jail config file.
|
||||
|
||||
Args:
|
||||
request: Incoming request.
|
||||
_auth: Validated session.
|
||||
filename: Filename including extension (e.g. ``sshd.conf``).
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.file_config.JailConfigFileContent`.
|
||||
|
||||
Raises:
|
||||
HTTPException: 400 if *filename* is unsafe.
|
||||
HTTPException: 404 if the file does not exist.
|
||||
HTTPException: 503 if the config directory is unavailable.
|
||||
"""
|
||||
config_dir: str = request.app.state.settings.fail2ban_config_dir
|
||||
try:
|
||||
return await file_config_service.get_jail_config_file(config_dir, filename)
|
||||
except ConfigFileNameError as exc:
|
||||
raise _bad_request(str(exc)) from exc
|
||||
except ConfigFileNotFoundError:
|
||||
raise _not_found(filename) from None
|
||||
except ConfigDirError as exc:
|
||||
raise _service_unavailable(str(exc)) from exc
|
||||
|
||||
|
||||
@router.put(
|
||||
"/jail-files/{filename}",
|
||||
status_code=status.HTTP_204_NO_CONTENT,
|
||||
summary="Overwrite a jail.d config file with new raw content",
|
||||
)
|
||||
async def write_jail_config_file(
|
||||
request: Request,
|
||||
_auth: AuthDep,
|
||||
filename: _FilenamePath,
|
||||
body: ConfFileUpdateRequest,
|
||||
) -> None:
|
||||
"""Overwrite the raw content of an existing jail.d config file.
|
||||
|
||||
The change is written directly to disk. You must reload fail2ban
|
||||
(``POST /api/config/reload``) separately for the change to take effect.
|
||||
|
||||
Args:
|
||||
request: Incoming request.
|
||||
_auth: Validated session.
|
||||
filename: Filename of the jail config file (e.g. ``sshd.conf``).
|
||||
body: New raw file content.
|
||||
|
||||
Raises:
|
||||
HTTPException: 400 if *filename* is unsafe or content is invalid.
|
||||
HTTPException: 404 if the file does not exist.
|
||||
HTTPException: 503 if the config directory is unavailable.
|
||||
"""
|
||||
config_dir: str = request.app.state.settings.fail2ban_config_dir
|
||||
try:
|
||||
await file_config_service.write_jail_config_file(config_dir, filename, body)
|
||||
except ConfigFileNameError as exc:
|
||||
raise _bad_request(str(exc)) from exc
|
||||
except ConfigFileNotFoundError:
|
||||
raise _not_found(filename) from None
|
||||
except ConfigFileWriteError as exc:
|
||||
raise _bad_request(str(exc)) from exc
|
||||
except ConfigDirError as exc:
|
||||
raise _service_unavailable(str(exc)) from exc
|
||||
|
||||
|
||||
@router.put(
|
||||
"/jail-files/{filename}/enabled",
|
||||
status_code=status.HTTP_204_NO_CONTENT,
|
||||
summary="Enable or disable a jail configuration file",
|
||||
)
|
||||
async def set_jail_config_file_enabled(
|
||||
request: Request,
|
||||
_auth: AuthDep,
|
||||
filename: _FilenamePath,
|
||||
body: JailConfigFileEnabledUpdate,
|
||||
) -> None:
|
||||
"""Set the ``enabled = true/false`` key inside a jail config file.
|
||||
|
||||
The change modifies the file on disk. You must reload fail2ban
|
||||
(``POST /api/config/reload``) separately for the change to take effect.
|
||||
|
||||
Args:
|
||||
request: Incoming request.
|
||||
_auth: Validated session.
|
||||
filename: Filename of the jail config file (e.g. ``sshd.conf``).
|
||||
body: New enabled state.
|
||||
|
||||
Raises:
|
||||
HTTPException: 400 if *filename* is unsafe or the operation fails.
|
||||
HTTPException: 404 if the file does not exist.
|
||||
HTTPException: 503 if the config directory is unavailable.
|
||||
"""
|
||||
config_dir: str = request.app.state.settings.fail2ban_config_dir
|
||||
try:
|
||||
await file_config_service.set_jail_config_enabled(
|
||||
config_dir, filename, body.enabled
|
||||
)
|
||||
except ConfigFileNameError as exc:
|
||||
raise _bad_request(str(exc)) from exc
|
||||
except ConfigFileNotFoundError:
|
||||
raise _not_found(filename) from None
|
||||
except ConfigFileWriteError as exc:
|
||||
raise _bad_request(str(exc)) from exc
|
||||
except ConfigDirError as exc:
|
||||
raise _service_unavailable(str(exc)) from exc
|
||||
|
||||
|
||||
@router.post(
|
||||
"/jail-files",
|
||||
response_model=ConfFileContent,
|
||||
status_code=status.HTTP_201_CREATED,
|
||||
summary="Create a new jail.d config file",
|
||||
)
|
||||
async def create_jail_config_file(
|
||||
request: Request,
|
||||
_auth: AuthDep,
|
||||
body: ConfFileCreateRequest,
|
||||
) -> ConfFileContent:
|
||||
"""Create a new ``.conf`` file in ``jail.d/``.
|
||||
|
||||
Args:
|
||||
request: Incoming request.
|
||||
_auth: Validated session.
|
||||
body: :class:`~app.models.file_config.ConfFileCreateRequest` with name and content.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.file_config.ConfFileContent` with the created file metadata.
|
||||
|
||||
Raises:
|
||||
HTTPException: 400 if the name is unsafe or the content exceeds the size limit.
|
||||
HTTPException: 409 if a file with that name already exists.
|
||||
HTTPException: 503 if the config directory is unavailable.
|
||||
"""
|
||||
config_dir: str = request.app.state.settings.fail2ban_config_dir
|
||||
try:
|
||||
filename = await file_config_service.create_jail_config_file(config_dir, body)
|
||||
except ConfigFileNameError as exc:
|
||||
raise _bad_request(str(exc)) from exc
|
||||
except ConfigFileExistsError:
|
||||
raise _conflict(body.name) from None
|
||||
except ConfigFileWriteError as exc:
|
||||
raise _bad_request(str(exc)) from exc
|
||||
except ConfigDirError as exc:
|
||||
raise _service_unavailable(str(exc)) from exc
|
||||
|
||||
return ConfFileContent(
|
||||
name=body.name,
|
||||
filename=filename,
|
||||
content=body.content,
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Filter file endpoints (Task 4d)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@router.get(
|
||||
"/filters/{name}/raw",
|
||||
response_model=ConfFileContent,
|
||||
summary="Return a filter definition file's raw content",
|
||||
)
|
||||
async def get_filter_file_raw(
|
||||
request: Request,
|
||||
_auth: AuthDep,
|
||||
name: _NamePath,
|
||||
) -> ConfFileContent:
|
||||
"""Return the raw content of a filter definition file.
|
||||
|
||||
This endpoint provides direct access to the file bytes for the raw
|
||||
config editor. For structured parsing with active/inactive status use
|
||||
``GET /api/config/filters/{name}`` (served by the config router).
|
||||
|
||||
Args:
|
||||
request: Incoming request.
|
||||
_auth: Validated session.
|
||||
name: Base name with or without extension (e.g. ``sshd`` or ``sshd.conf``).
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.file_config.ConfFileContent`.
|
||||
|
||||
Raises:
|
||||
HTTPException: 400 if *name* is unsafe.
|
||||
HTTPException: 404 if the file does not exist.
|
||||
HTTPException: 503 if the config directory is unavailable.
|
||||
"""
|
||||
config_dir: str = request.app.state.settings.fail2ban_config_dir
|
||||
try:
|
||||
return await file_config_service.get_filter_file(config_dir, name)
|
||||
except ConfigFileNameError as exc:
|
||||
raise _bad_request(str(exc)) from exc
|
||||
except ConfigFileNotFoundError:
|
||||
raise _not_found(name) from None
|
||||
except ConfigDirError as exc:
|
||||
raise _service_unavailable(str(exc)) from exc
|
||||
|
||||
|
||||
@router.put(
|
||||
"/filters/{name}/raw",
|
||||
status_code=status.HTTP_204_NO_CONTENT,
|
||||
summary="Update a filter definition file (raw content)",
|
||||
)
|
||||
async def write_filter_file(
|
||||
request: Request,
|
||||
_auth: AuthDep,
|
||||
name: _NamePath,
|
||||
body: ConfFileUpdateRequest,
|
||||
) -> None:
|
||||
"""Overwrite the content of an existing filter definition file.
|
||||
|
||||
Args:
|
||||
request: Incoming request.
|
||||
_auth: Validated session.
|
||||
name: Base name with or without extension.
|
||||
body: New file content.
|
||||
|
||||
Raises:
|
||||
HTTPException: 400 if *name* is unsafe or content exceeds the size limit.
|
||||
HTTPException: 404 if the file does not exist.
|
||||
HTTPException: 503 if the config directory is unavailable.
|
||||
"""
|
||||
config_dir: str = request.app.state.settings.fail2ban_config_dir
|
||||
try:
|
||||
await file_config_service.write_filter_file(config_dir, name, body)
|
||||
except ConfigFileNameError as exc:
|
||||
raise _bad_request(str(exc)) from exc
|
||||
except ConfigFileNotFoundError:
|
||||
raise _not_found(name) from None
|
||||
except ConfigFileWriteError as exc:
|
||||
raise _bad_request(str(exc)) from exc
|
||||
except ConfigDirError as exc:
|
||||
raise _service_unavailable(str(exc)) from exc
|
||||
|
||||
|
||||
@router.post(
|
||||
"/filters/raw",
|
||||
status_code=status.HTTP_201_CREATED,
|
||||
response_model=ConfFileContent,
|
||||
summary="Create a new filter definition file (raw content)",
|
||||
)
|
||||
async def create_filter_file(
|
||||
request: Request,
|
||||
_auth: AuthDep,
|
||||
body: ConfFileCreateRequest,
|
||||
) -> ConfFileContent:
|
||||
"""Create a new ``.conf`` file in ``filter.d/``.
|
||||
|
||||
Args:
|
||||
request: Incoming request.
|
||||
_auth: Validated session.
|
||||
body: Name and initial content for the new file.
|
||||
|
||||
Returns:
|
||||
The created :class:`~app.models.file_config.ConfFileContent`.
|
||||
|
||||
Raises:
|
||||
HTTPException: 400 if *name* is invalid or content exceeds limit.
|
||||
HTTPException: 409 if a file with that name already exists.
|
||||
HTTPException: 503 if the config directory is unavailable.
|
||||
"""
|
||||
config_dir: str = request.app.state.settings.fail2ban_config_dir
|
||||
try:
|
||||
filename = await file_config_service.create_filter_file(config_dir, body)
|
||||
except ConfigFileNameError as exc:
|
||||
raise _bad_request(str(exc)) from exc
|
||||
except ConfigFileExistsError:
|
||||
raise _conflict(body.name) from None
|
||||
except ConfigFileWriteError as exc:
|
||||
raise _bad_request(str(exc)) from exc
|
||||
except ConfigDirError as exc:
|
||||
raise _service_unavailable(str(exc)) from exc
|
||||
|
||||
return ConfFileContent(
|
||||
name=body.name,
|
||||
filename=filename,
|
||||
content=body.content,
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Action file endpoints (Task 4e)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@router.get(
|
||||
"/actions",
|
||||
response_model=ConfFilesResponse,
|
||||
summary="List all action definition files",
|
||||
)
|
||||
async def list_action_files(
|
||||
request: Request,
|
||||
_auth: AuthDep,
|
||||
) -> ConfFilesResponse:
|
||||
"""Return a list of every ``.conf`` and ``.local`` file in ``action.d/``.
|
||||
|
||||
Args:
|
||||
request: Incoming request.
|
||||
_auth: Validated session.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.file_config.ConfFilesResponse`.
|
||||
"""
|
||||
config_dir: str = request.app.state.settings.fail2ban_config_dir
|
||||
try:
|
||||
return await file_config_service.list_action_files(config_dir)
|
||||
except ConfigDirError as exc:
|
||||
raise _service_unavailable(str(exc)) from exc
|
||||
|
||||
|
||||
@router.get(
|
||||
"/actions/{name}/raw",
|
||||
response_model=ConfFileContent,
|
||||
summary="Return an action definition file with its content",
|
||||
)
|
||||
async def get_action_file(
|
||||
request: Request,
|
||||
_auth: AuthDep,
|
||||
name: _NamePath,
|
||||
) -> ConfFileContent:
|
||||
"""Return the content of an action definition file.
|
||||
|
||||
Args:
|
||||
request: Incoming request.
|
||||
_auth: Validated session.
|
||||
name: Base name with or without extension.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.file_config.ConfFileContent`.
|
||||
|
||||
Raises:
|
||||
HTTPException: 400 if *name* is unsafe.
|
||||
HTTPException: 404 if the file does not exist.
|
||||
HTTPException: 503 if the config directory is unavailable.
|
||||
"""
|
||||
config_dir: str = request.app.state.settings.fail2ban_config_dir
|
||||
try:
|
||||
return await file_config_service.get_action_file(config_dir, name)
|
||||
except ConfigFileNameError as exc:
|
||||
raise _bad_request(str(exc)) from exc
|
||||
except ConfigFileNotFoundError:
|
||||
raise _not_found(name) from None
|
||||
except ConfigDirError as exc:
|
||||
raise _service_unavailable(str(exc)) from exc
|
||||
|
||||
|
||||
@router.put(
|
||||
"/actions/{name}/raw",
|
||||
status_code=status.HTTP_204_NO_CONTENT,
|
||||
summary="Update an action definition file",
|
||||
)
|
||||
async def write_action_file(
|
||||
request: Request,
|
||||
_auth: AuthDep,
|
||||
name: _NamePath,
|
||||
body: ConfFileUpdateRequest,
|
||||
) -> None:
|
||||
"""Overwrite the content of an existing action definition file.
|
||||
|
||||
Args:
|
||||
request: Incoming request.
|
||||
_auth: Validated session.
|
||||
name: Base name with or without extension.
|
||||
body: New file content.
|
||||
|
||||
Raises:
|
||||
HTTPException: 400 if *name* is unsafe or content exceeds the size limit.
|
||||
HTTPException: 404 if the file does not exist.
|
||||
HTTPException: 503 if the config directory is unavailable.
|
||||
"""
|
||||
config_dir: str = request.app.state.settings.fail2ban_config_dir
|
||||
try:
|
||||
await file_config_service.write_action_file(config_dir, name, body)
|
||||
except ConfigFileNameError as exc:
|
||||
raise _bad_request(str(exc)) from exc
|
||||
except ConfigFileNotFoundError:
|
||||
raise _not_found(name) from None
|
||||
except ConfigFileWriteError as exc:
|
||||
raise _bad_request(str(exc)) from exc
|
||||
except ConfigDirError as exc:
|
||||
raise _service_unavailable(str(exc)) from exc
|
||||
|
||||
|
||||
@router.post(
|
||||
"/actions",
|
||||
status_code=status.HTTP_201_CREATED,
|
||||
response_model=ConfFileContent,
|
||||
summary="Create a new action definition file",
|
||||
)
|
||||
async def create_action_file(
|
||||
request: Request,
|
||||
_auth: AuthDep,
|
||||
body: ConfFileCreateRequest,
|
||||
) -> ConfFileContent:
|
||||
"""Create a new ``.conf`` file in ``action.d/``.
|
||||
|
||||
Args:
|
||||
request: Incoming request.
|
||||
_auth: Validated session.
|
||||
body: Name and initial content for the new file.
|
||||
|
||||
Returns:
|
||||
The created :class:`~app.models.file_config.ConfFileContent`.
|
||||
|
||||
Raises:
|
||||
HTTPException: 400 if *name* is invalid or content exceeds limit.
|
||||
HTTPException: 409 if a file with that name already exists.
|
||||
HTTPException: 503 if the config directory is unavailable.
|
||||
"""
|
||||
config_dir: str = request.app.state.settings.fail2ban_config_dir
|
||||
try:
|
||||
filename = await file_config_service.create_action_file(config_dir, body)
|
||||
except ConfigFileNameError as exc:
|
||||
raise _bad_request(str(exc)) from exc
|
||||
except ConfigFileExistsError:
|
||||
raise _conflict(body.name) from None
|
||||
except ConfigFileWriteError as exc:
|
||||
raise _bad_request(str(exc)) from exc
|
||||
except ConfigDirError as exc:
|
||||
raise _service_unavailable(str(exc)) from exc
|
||||
|
||||
return ConfFileContent(
|
||||
name=body.name,
|
||||
filename=filename,
|
||||
content=body.content,
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Parsed filter endpoints (Task 2.1)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@router.get(
|
||||
"/filters/{name}/parsed",
|
||||
response_model=FilterConfig,
|
||||
summary="Return a filter file parsed into a structured model",
|
||||
)
|
||||
async def get_parsed_filter(
|
||||
request: Request,
|
||||
_auth: AuthDep,
|
||||
name: _NamePath,
|
||||
) -> FilterConfig:
|
||||
"""Parse a filter definition file and return its structured fields.
|
||||
|
||||
The file is read from ``filter.d/``, parsed as fail2ban INI format, and
|
||||
returned as a :class:`~app.models.config.FilterConfig` JSON object. This
|
||||
is the input model for the form-based filter editor (Task 2.3).
|
||||
|
||||
Args:
|
||||
request: Incoming request.
|
||||
_auth: Validated session.
|
||||
name: Base name (e.g. ``sshd`` or ``sshd.conf``).
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.config.FilterConfig`.
|
||||
|
||||
Raises:
|
||||
HTTPException: 400 if *name* is unsafe.
|
||||
HTTPException: 404 if the file does not exist.
|
||||
HTTPException: 503 if the config directory is unavailable.
|
||||
"""
|
||||
config_dir: str = request.app.state.settings.fail2ban_config_dir
|
||||
try:
|
||||
return await file_config_service.get_parsed_filter_file(config_dir, name)
|
||||
except ConfigFileNameError as exc:
|
||||
raise _bad_request(str(exc)) from exc
|
||||
except ConfigFileNotFoundError:
|
||||
raise _not_found(name) from None
|
||||
except ConfigDirError as exc:
|
||||
raise _service_unavailable(str(exc)) from exc
|
||||
|
||||
|
||||
@router.put(
|
||||
"/filters/{name}/parsed",
|
||||
status_code=status.HTTP_204_NO_CONTENT,
|
||||
summary="Update a filter file from a structured model",
|
||||
)
|
||||
async def update_parsed_filter(
|
||||
request: Request,
|
||||
_auth: AuthDep,
|
||||
name: _NamePath,
|
||||
body: FilterConfigUpdate,
|
||||
) -> None:
|
||||
"""Apply a partial structured update to a filter definition file.
|
||||
|
||||
Fields set to ``null`` in the request body are left unchanged. The file is
|
||||
re-serialized to fail2ban INI format after merging.
|
||||
|
||||
Args:
|
||||
request: Incoming request.
|
||||
_auth: Validated session.
|
||||
name: Base name of the filter to update.
|
||||
body: Partial :class:`~app.models.config.FilterConfigUpdate`.
|
||||
|
||||
Raises:
|
||||
HTTPException: 400 if *name* is unsafe or content exceeds the size limit.
|
||||
HTTPException: 404 if the file does not exist.
|
||||
HTTPException: 503 if the config directory is unavailable.
|
||||
"""
|
||||
config_dir: str = request.app.state.settings.fail2ban_config_dir
|
||||
try:
|
||||
await file_config_service.update_parsed_filter_file(config_dir, name, body)
|
||||
except ConfigFileNameError as exc:
|
||||
raise _bad_request(str(exc)) from exc
|
||||
except ConfigFileNotFoundError:
|
||||
raise _not_found(name) from None
|
||||
except ConfigFileWriteError as exc:
|
||||
raise _bad_request(str(exc)) from exc
|
||||
except ConfigDirError as exc:
|
||||
raise _service_unavailable(str(exc)) from exc
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Parsed action endpoints (Task 3.1)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@router.get(
|
||||
"/actions/{name}/parsed",
|
||||
response_model=ActionConfig,
|
||||
summary="Return an action file parsed into a structured model",
|
||||
)
|
||||
async def get_parsed_action(
|
||||
request: Request,
|
||||
_auth: AuthDep,
|
||||
name: _NamePath,
|
||||
) -> ActionConfig:
|
||||
"""Parse an action definition file and return its structured fields.
|
||||
|
||||
The file is read from ``action.d/``, parsed as fail2ban INI format, and
|
||||
returned as a :class:`~app.models.config.ActionConfig` JSON object. This
|
||||
is the input model for the form-based action editor (Task 3.3).
|
||||
|
||||
Args:
|
||||
request: Incoming request.
|
||||
_auth: Validated session.
|
||||
name: Base name (e.g. ``iptables`` or ``iptables.conf``).
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.config.ActionConfig`.
|
||||
|
||||
Raises:
|
||||
HTTPException: 400 if *name* is unsafe.
|
||||
HTTPException: 404 if the file does not exist.
|
||||
HTTPException: 503 if the config directory is unavailable.
|
||||
"""
|
||||
config_dir: str = request.app.state.settings.fail2ban_config_dir
|
||||
try:
|
||||
return await file_config_service.get_parsed_action_file(config_dir, name)
|
||||
except ConfigFileNameError as exc:
|
||||
raise _bad_request(str(exc)) from exc
|
||||
except ConfigFileNotFoundError:
|
||||
raise _not_found(name) from None
|
||||
except ConfigDirError as exc:
|
||||
raise _service_unavailable(str(exc)) from exc
|
||||
|
||||
|
||||
@router.put(
|
||||
"/actions/{name}/parsed",
|
||||
status_code=status.HTTP_204_NO_CONTENT,
|
||||
summary="Update an action file from a structured model",
|
||||
)
|
||||
async def update_parsed_action(
|
||||
request: Request,
|
||||
_auth: AuthDep,
|
||||
name: _NamePath,
|
||||
body: ActionConfigUpdate,
|
||||
) -> None:
|
||||
"""Apply a partial structured update to an action definition file.
|
||||
|
||||
Fields set to ``null`` in the request body are left unchanged. The file is
|
||||
re-serialized to fail2ban INI format after merging.
|
||||
|
||||
Args:
|
||||
request: Incoming request.
|
||||
_auth: Validated session.
|
||||
name: Base name of the action to update.
|
||||
body: Partial :class:`~app.models.config.ActionConfigUpdate`.
|
||||
|
||||
Raises:
|
||||
HTTPException: 400 if *name* is unsafe or content exceeds the size limit.
|
||||
HTTPException: 404 if the file does not exist.
|
||||
HTTPException: 503 if the config directory is unavailable.
|
||||
"""
|
||||
config_dir: str = request.app.state.settings.fail2ban_config_dir
|
||||
try:
|
||||
await file_config_service.update_parsed_action_file(config_dir, name, body)
|
||||
except ConfigFileNameError as exc:
|
||||
raise _bad_request(str(exc)) from exc
|
||||
except ConfigFileNotFoundError:
|
||||
raise _not_found(name) from None
|
||||
except ConfigFileWriteError as exc:
|
||||
raise _bad_request(str(exc)) from exc
|
||||
except ConfigDirError as exc:
|
||||
raise _service_unavailable(str(exc)) from exc
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Parsed jail file endpoints (Task 6.1)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@router.get(
|
||||
"/jail-files/{filename}/parsed",
|
||||
response_model=JailFileConfig,
|
||||
summary="Return a jail.d file parsed into a structured model",
|
||||
)
|
||||
async def get_parsed_jail_file(
|
||||
request: Request,
|
||||
_auth: AuthDep,
|
||||
filename: _NamePath,
|
||||
) -> JailFileConfig:
|
||||
"""Parse a jail.d config file and return its structured fields.
|
||||
|
||||
The file is read from ``jail.d/``, parsed as fail2ban INI format, and
|
||||
returned as a :class:`~app.models.config.JailFileConfig` JSON object. This
|
||||
is the input model for the form-based jail file editor (Task 6.2).
|
||||
|
||||
Args:
|
||||
request: Incoming request.
|
||||
_auth: Validated session.
|
||||
filename: Filename including extension (e.g. ``sshd.conf``).
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.config.JailFileConfig`.
|
||||
|
||||
Raises:
|
||||
HTTPException: 400 if *filename* is unsafe.
|
||||
HTTPException: 404 if the file does not exist.
|
||||
HTTPException: 503 if the config directory is unavailable.
|
||||
"""
|
||||
config_dir: str = request.app.state.settings.fail2ban_config_dir
|
||||
try:
|
||||
return await file_config_service.get_parsed_jail_file(config_dir, filename)
|
||||
except ConfigFileNameError as exc:
|
||||
raise _bad_request(str(exc)) from exc
|
||||
except ConfigFileNotFoundError:
|
||||
raise _not_found(filename) from None
|
||||
except ConfigDirError as exc:
|
||||
raise _service_unavailable(str(exc)) from exc
|
||||
|
||||
|
||||
@router.put(
|
||||
"/jail-files/{filename}/parsed",
|
||||
status_code=status.HTTP_204_NO_CONTENT,
|
||||
summary="Update a jail.d file from a structured model",
|
||||
)
|
||||
async def update_parsed_jail_file(
|
||||
request: Request,
|
||||
_auth: AuthDep,
|
||||
filename: _NamePath,
|
||||
body: JailFileConfigUpdate,
|
||||
) -> None:
|
||||
"""Apply a partial structured update to a jail.d config file.
|
||||
|
||||
Fields set to ``null`` in the request body are left unchanged. The file is
|
||||
re-serialized to fail2ban INI format after merging.
|
||||
|
||||
Args:
|
||||
request: Incoming request.
|
||||
_auth: Validated session.
|
||||
filename: Filename including extension (e.g. ``sshd.conf``).
|
||||
body: Partial :class:`~app.models.config.JailFileConfigUpdate`.
|
||||
|
||||
Raises:
|
||||
HTTPException: 400 if *filename* is unsafe or content exceeds size limit.
|
||||
HTTPException: 404 if the file does not exist.
|
||||
HTTPException: 503 if the config directory is unavailable.
|
||||
"""
|
||||
config_dir: str = request.app.state.settings.fail2ban_config_dir
|
||||
try:
|
||||
await file_config_service.update_parsed_jail_file(config_dir, filename, body)
|
||||
except ConfigFileNameError as exc:
|
||||
raise _bad_request(str(exc)) from exc
|
||||
except ConfigFileNotFoundError:
|
||||
raise _not_found(filename) from None
|
||||
except ConfigFileWriteError as exc:
|
||||
raise _bad_request(str(exc)) from exc
|
||||
except ConfigDirError as exc:
|
||||
raise _service_unavailable(str(exc)) from exc
|
||||
175
backend/app/routers/geo.py
Normal file
175
backend/app/routers/geo.py
Normal file
@@ -0,0 +1,175 @@
|
||||
"""Geo / IP lookup router.
|
||||
|
||||
Provides the IP enrichment endpoints:
|
||||
|
||||
* ``GET /api/geo/lookup/{ip}`` — ban status, ban history, and geo info for an IP
|
||||
* ``POST /api/geo/re-resolve`` — retry all previously failed geo lookups
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, Annotated
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import aiohttp
|
||||
|
||||
import aiosqlite
|
||||
from fastapi import APIRouter, Depends, HTTPException, Path, Request, status
|
||||
|
||||
from app.dependencies import AuthDep, get_db
|
||||
from app.models.geo import GeoCacheStatsResponse, GeoDetail, IpLookupResponse
|
||||
from app.services import geo_service, jail_service
|
||||
from app.utils.fail2ban_client import Fail2BanConnectionError
|
||||
|
||||
router: APIRouter = APIRouter(prefix="/api/geo", tags=["Geo"])
|
||||
|
||||
_IpPath = Annotated[str, Path(description="IPv4 or IPv6 address to look up.")]
|
||||
|
||||
|
||||
@router.get(
|
||||
"/lookup/{ip}",
|
||||
response_model=IpLookupResponse,
|
||||
summary="Look up ban status and geo information for an IP",
|
||||
)
|
||||
async def lookup_ip(
|
||||
request: Request,
|
||||
_auth: AuthDep,
|
||||
ip: _IpPath,
|
||||
) -> IpLookupResponse:
|
||||
"""Return current ban status, geo data, and network information for an IP.
|
||||
|
||||
Checks every running fail2ban jail to determine whether the IP is
|
||||
currently banned, and enriches the result with country, ASN, and
|
||||
organisation data from ip-api.com.
|
||||
|
||||
Args:
|
||||
request: Incoming request (used to access ``app.state``).
|
||||
_auth: Validated session — enforces authentication.
|
||||
ip: The IP address to look up.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.geo.IpLookupResponse` with ban status and geo data.
|
||||
|
||||
Raises:
|
||||
HTTPException: 400 when *ip* is not a valid IP address.
|
||||
HTTPException: 502 when fail2ban is unreachable.
|
||||
"""
|
||||
socket_path: str = request.app.state.settings.fail2ban_socket
|
||||
http_session: aiohttp.ClientSession = request.app.state.http_session
|
||||
|
||||
async def _enricher(addr: str) -> geo_service.GeoInfo | None:
|
||||
return await geo_service.lookup(addr, http_session)
|
||||
|
||||
try:
|
||||
result = await jail_service.lookup_ip(
|
||||
socket_path,
|
||||
ip,
|
||||
geo_enricher=_enricher,
|
||||
)
|
||||
except ValueError as exc:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=str(exc),
|
||||
) from exc
|
||||
except Fail2BanConnectionError as exc:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_502_BAD_GATEWAY,
|
||||
detail=f"Cannot reach fail2ban: {exc}",
|
||||
) from exc
|
||||
|
||||
raw_geo = result.get("geo")
|
||||
geo_detail: GeoDetail | None = None
|
||||
if raw_geo is not None:
|
||||
geo_detail = GeoDetail(
|
||||
country_code=raw_geo.country_code,
|
||||
country_name=raw_geo.country_name,
|
||||
asn=raw_geo.asn,
|
||||
org=raw_geo.org,
|
||||
)
|
||||
|
||||
return IpLookupResponse(
|
||||
ip=result["ip"],
|
||||
currently_banned_in=result["currently_banned_in"],
|
||||
geo=geo_detail,
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# POST /api/geo/re-resolve
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# GET /api/geo/stats
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@router.get(
|
||||
"/stats",
|
||||
response_model=GeoCacheStatsResponse,
|
||||
summary="Geo cache diagnostic counters",
|
||||
)
|
||||
async def geo_stats(
|
||||
_auth: AuthDep,
|
||||
db: Annotated[aiosqlite.Connection, Depends(get_db)],
|
||||
) -> GeoCacheStatsResponse:
|
||||
"""Return diagnostic counters for the geo cache subsystem.
|
||||
|
||||
Useful for operators and the UI to gauge geo-resolution health.
|
||||
|
||||
Args:
|
||||
_auth: Validated session — enforces authentication.
|
||||
db: BanGUI application database connection.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.geo.GeoCacheStatsResponse` with current counters.
|
||||
"""
|
||||
stats: dict[str, int] = await geo_service.cache_stats(db)
|
||||
return GeoCacheStatsResponse(**stats)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/re-resolve",
|
||||
summary="Re-resolve all IPs whose country could not be determined",
|
||||
)
|
||||
async def re_resolve_geo(
|
||||
request: Request,
|
||||
_auth: AuthDep,
|
||||
db: Annotated[aiosqlite.Connection, Depends(get_db)],
|
||||
) -> dict[str, int]:
|
||||
"""Retry geo resolution for every IP in ``geo_cache`` with a null country.
|
||||
|
||||
Clears the in-memory negative cache first so that previously failing IPs
|
||||
are immediately eligible for a new API attempt.
|
||||
|
||||
Args:
|
||||
request: Incoming request (used to access ``app.state.http_session``).
|
||||
_auth: Validated session — enforces authentication.
|
||||
db: BanGUI application database (for reading/writing ``geo_cache``).
|
||||
|
||||
Returns:
|
||||
JSON object ``{"resolved": N, "total": M}`` where *N* is the number
|
||||
of IPs that gained a country code and *M* is the total number of IPs
|
||||
that were retried.
|
||||
"""
|
||||
# Collect all IPs in geo_cache that still lack a country code.
|
||||
unresolved: list[str] = []
|
||||
async with db.execute(
|
||||
"SELECT ip FROM geo_cache WHERE country_code IS NULL"
|
||||
) as cur:
|
||||
async for row in cur:
|
||||
unresolved.append(str(row[0]))
|
||||
|
||||
if not unresolved:
|
||||
return {"resolved": 0, "total": 0}
|
||||
|
||||
# Clear negative cache so these IPs bypass the TTL check.
|
||||
geo_service.clear_neg_cache()
|
||||
|
||||
http_session: aiohttp.ClientSession = request.app.state.http_session
|
||||
geo_map = await geo_service.lookup_batch(unresolved, http_session, db=db)
|
||||
|
||||
resolved_count = sum(
|
||||
1 for info in geo_map.values() if info.country_code is not None
|
||||
)
|
||||
return {"resolved": resolved_count, "total": len(unresolved)}
|
||||
37
backend/app/routers/health.py
Normal file
37
backend/app/routers/health.py
Normal file
@@ -0,0 +1,37 @@
|
||||
"""Health check router.
|
||||
|
||||
A lightweight ``GET /api/health`` endpoint that verifies the application
|
||||
is running and can serve requests. Also reports the cached fail2ban liveness
|
||||
state so monitoring tools and Docker health checks can observe daemon status
|
||||
without probing the socket directly.
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Request
|
||||
from fastapi.responses import JSONResponse
|
||||
|
||||
from app.models.server import ServerStatus
|
||||
|
||||
router: APIRouter = APIRouter(prefix="/api", tags=["Health"])
|
||||
|
||||
|
||||
@router.get("/health", summary="Application health check")
|
||||
async def health_check(request: Request) -> JSONResponse:
|
||||
"""Return 200 with application and fail2ban status.
|
||||
|
||||
HTTP 200 is always returned so Docker health checks do not restart the
|
||||
backend container when fail2ban is temporarily offline. The
|
||||
``fail2ban`` field in the body indicates the daemon's current state.
|
||||
|
||||
Args:
|
||||
request: FastAPI request (used to read cached server status).
|
||||
|
||||
Returns:
|
||||
A JSON object with ``{"status": "ok", "fail2ban": "online"|"offline"}``.
|
||||
"""
|
||||
cached: ServerStatus = getattr(
|
||||
request.app.state, "server_status", ServerStatus(online=False)
|
||||
)
|
||||
return JSONResponse(content={
|
||||
"status": "ok",
|
||||
"fail2ban": "online" if cached.online else "offline",
|
||||
})
|
||||
141
backend/app/routers/history.py
Normal file
141
backend/app/routers/history.py
Normal file
@@ -0,0 +1,141 @@
|
||||
"""History router.
|
||||
|
||||
Provides endpoints for forensic exploration of all historical ban records
|
||||
stored in the fail2ban SQLite database.
|
||||
|
||||
Routes
|
||||
------
|
||||
``GET /api/history``
|
||||
Paginated list of all historical bans, filterable by jail, IP prefix, and
|
||||
time range.
|
||||
|
||||
``GET /api/history/{ip}``
|
||||
Per-IP detail: complete ban timeline, aggregated totals, and geolocation.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import aiohttp
|
||||
|
||||
from fastapi import APIRouter, HTTPException, Query, Request
|
||||
|
||||
from app.dependencies import AuthDep
|
||||
from app.models.ban import TimeRange
|
||||
from app.models.history import HistoryListResponse, IpDetailResponse
|
||||
from app.services import geo_service, history_service
|
||||
|
||||
router: APIRouter = APIRouter(prefix="/api/history", tags=["History"])
|
||||
|
||||
_DEFAULT_PAGE_SIZE: int = 100
|
||||
|
||||
|
||||
@router.get(
|
||||
"",
|
||||
response_model=HistoryListResponse,
|
||||
summary="Return a paginated list of historical bans",
|
||||
)
|
||||
async def get_history(
|
||||
request: Request,
|
||||
_auth: AuthDep,
|
||||
range: TimeRange | None = Query(
|
||||
default=None,
|
||||
description="Optional time-range filter. Omit for all-time.",
|
||||
),
|
||||
jail: str | None = Query(
|
||||
default=None,
|
||||
description="Restrict results to this jail name.",
|
||||
),
|
||||
ip: str | None = Query(
|
||||
default=None,
|
||||
description="Restrict results to IPs matching this prefix.",
|
||||
),
|
||||
page: int = Query(default=1, ge=1, description="1-based page number."),
|
||||
page_size: int = Query(
|
||||
default=_DEFAULT_PAGE_SIZE,
|
||||
ge=1,
|
||||
le=500,
|
||||
description="Items per page (max 500).",
|
||||
),
|
||||
) -> HistoryListResponse:
|
||||
"""Return a paginated list of historical bans with optional filters.
|
||||
|
||||
Queries the fail2ban database for all ban records, applying the requested
|
||||
filters. Results are ordered newest-first and enriched with geolocation.
|
||||
|
||||
Args:
|
||||
request: The incoming request (used to access ``app.state``).
|
||||
_auth: Validated session — enforces authentication.
|
||||
range: Optional time-range preset. ``None`` means all-time.
|
||||
jail: Optional jail name filter (exact match).
|
||||
ip: Optional IP prefix filter (prefix match).
|
||||
page: 1-based page number.
|
||||
page_size: Items per page (1–500).
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.history.HistoryListResponse` with paginated items
|
||||
and the total matching count.
|
||||
"""
|
||||
socket_path: str = request.app.state.settings.fail2ban_socket
|
||||
http_session: aiohttp.ClientSession = request.app.state.http_session
|
||||
|
||||
async def _enricher(addr: str) -> geo_service.GeoInfo | None:
|
||||
return await geo_service.lookup(addr, http_session)
|
||||
|
||||
return await history_service.list_history(
|
||||
socket_path,
|
||||
range_=range,
|
||||
jail=jail,
|
||||
ip_filter=ip,
|
||||
page=page,
|
||||
page_size=page_size,
|
||||
geo_enricher=_enricher,
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/{ip}",
|
||||
response_model=IpDetailResponse,
|
||||
summary="Return the full ban history for a single IP address",
|
||||
)
|
||||
async def get_ip_history(
|
||||
request: Request,
|
||||
_auth: AuthDep,
|
||||
ip: str,
|
||||
) -> IpDetailResponse:
|
||||
"""Return the complete historical record for a single IP address.
|
||||
|
||||
Fetches all ban events for the given IP from the fail2ban database and
|
||||
aggregates them into a timeline. Returns ``404`` if the IP has no
|
||||
recorded history.
|
||||
|
||||
Args:
|
||||
request: The incoming request.
|
||||
_auth: Validated session dependency.
|
||||
ip: The IP address to look up.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.history.IpDetailResponse` with aggregated totals
|
||||
and a full ban timeline.
|
||||
|
||||
Raises:
|
||||
HTTPException: 404 if the IP has no history in the database.
|
||||
"""
|
||||
socket_path: str = request.app.state.settings.fail2ban_socket
|
||||
http_session: aiohttp.ClientSession = request.app.state.http_session
|
||||
|
||||
async def _enricher(addr: str) -> geo_service.GeoInfo | None:
|
||||
return await geo_service.lookup(addr, http_session)
|
||||
|
||||
detail: IpDetailResponse | None = await history_service.get_ip_detail(
|
||||
socket_path,
|
||||
ip,
|
||||
geo_enricher=_enricher,
|
||||
)
|
||||
|
||||
if detail is None:
|
||||
raise HTTPException(status_code=404, detail=f"No history found for IP {ip!r}.")
|
||||
|
||||
return detail
|
||||
615
backend/app/routers/jails.py
Normal file
615
backend/app/routers/jails.py
Normal file
@@ -0,0 +1,615 @@
|
||||
"""Jails router.
|
||||
|
||||
Provides CRUD and control operations for fail2ban jails:
|
||||
|
||||
* ``GET /api/jails`` — list all jails
|
||||
* ``GET /api/jails/{name}`` — full detail for one jail
|
||||
* ``GET /api/jails/{name}/banned`` — paginated currently-banned IPs for one jail
|
||||
* ``POST /api/jails/{name}/start`` — start a jail
|
||||
* ``POST /api/jails/{name}/stop`` — stop a jail
|
||||
* ``POST /api/jails/{name}/idle`` — toggle idle mode
|
||||
* ``POST /api/jails/{name}/reload`` — reload a single jail
|
||||
* ``POST /api/jails/reload-all`` — reload every jail
|
||||
|
||||
* ``GET /api/jails/{name}/ignoreip`` — ignore-list for a jail
|
||||
* ``POST /api/jails/{name}/ignoreip`` — add IP to ignore list
|
||||
* ``DELETE /api/jails/{name}/ignoreip`` — remove IP from ignore list
|
||||
* ``POST /api/jails/{name}/ignoreself`` — toggle ignoreself option
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Annotated
|
||||
|
||||
from fastapi import APIRouter, Body, HTTPException, Path, Request, status
|
||||
|
||||
from app.dependencies import AuthDep
|
||||
from app.models.ban import JailBannedIpsResponse
|
||||
from app.models.jail import (
|
||||
IgnoreIpRequest,
|
||||
JailCommandResponse,
|
||||
JailDetailResponse,
|
||||
JailListResponse,
|
||||
)
|
||||
from app.services import jail_service
|
||||
from app.services.jail_service import JailNotFoundError, JailOperationError
|
||||
from app.utils.fail2ban_client import Fail2BanConnectionError
|
||||
|
||||
router: APIRouter = APIRouter(prefix="/api/jails", tags=["Jails"])
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_NamePath = Annotated[str, Path(description="Jail name as configured in fail2ban.")]
|
||||
|
||||
|
||||
def _not_found(name: str) -> HTTPException:
|
||||
"""Return a 404 response for an unknown jail.
|
||||
|
||||
Args:
|
||||
name: Jail name that was not found.
|
||||
|
||||
Returns:
|
||||
:class:`fastapi.HTTPException` with status 404.
|
||||
"""
|
||||
return HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail=f"Jail not found: {name!r}",
|
||||
)
|
||||
|
||||
|
||||
def _bad_gateway(exc: Exception) -> HTTPException:
|
||||
"""Return a 502 response when fail2ban is unreachable.
|
||||
|
||||
Args:
|
||||
exc: The underlying connection error.
|
||||
|
||||
Returns:
|
||||
:class:`fastapi.HTTPException` with status 502.
|
||||
"""
|
||||
return HTTPException(
|
||||
status_code=status.HTTP_502_BAD_GATEWAY,
|
||||
detail=f"Cannot reach fail2ban: {exc}",
|
||||
)
|
||||
|
||||
|
||||
def _conflict(message: str) -> HTTPException:
|
||||
"""Return a 409 response for invalid jail state transitions.
|
||||
|
||||
Args:
|
||||
message: Human-readable description of the conflict.
|
||||
|
||||
Returns:
|
||||
:class:`fastapi.HTTPException` with status 409.
|
||||
"""
|
||||
return HTTPException(
|
||||
status_code=status.HTTP_409_CONFLICT,
|
||||
detail=message,
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Jail listing & detail
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@router.get(
|
||||
"",
|
||||
response_model=JailListResponse,
|
||||
summary="List all active fail2ban jails",
|
||||
)
|
||||
async def get_jails(
|
||||
request: Request,
|
||||
_auth: AuthDep,
|
||||
) -> JailListResponse:
|
||||
"""Return a summary of every active fail2ban jail.
|
||||
|
||||
Includes runtime metrics (currently banned, total bans, failures) and
|
||||
key configuration (find time, ban time, max retries, backend, idle state)
|
||||
for each jail.
|
||||
|
||||
Args:
|
||||
request: Incoming request (used to access ``app.state``).
|
||||
_auth: Validated session — enforces authentication.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.jail.JailListResponse` with all active jails.
|
||||
"""
|
||||
socket_path: str = request.app.state.settings.fail2ban_socket
|
||||
try:
|
||||
return await jail_service.list_jails(socket_path)
|
||||
except Fail2BanConnectionError as exc:
|
||||
raise _bad_gateway(exc) from exc
|
||||
|
||||
|
||||
@router.get(
|
||||
"/{name}",
|
||||
response_model=JailDetailResponse,
|
||||
summary="Return full detail for a single jail",
|
||||
)
|
||||
async def get_jail(
|
||||
request: Request,
|
||||
_auth: AuthDep,
|
||||
name: _NamePath,
|
||||
) -> JailDetailResponse:
|
||||
"""Return the complete configuration and runtime state for one jail.
|
||||
|
||||
Includes log paths, fail regex and ignore regex patterns, date pattern,
|
||||
log encoding, attached action names, ban-time settings, and runtime
|
||||
counters.
|
||||
|
||||
Args:
|
||||
request: Incoming request (used to access ``app.state``).
|
||||
_auth: Validated session — enforces authentication.
|
||||
name: Jail name.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.jail.JailDetailResponse` with the full jail.
|
||||
|
||||
Raises:
|
||||
HTTPException: 404 when the jail does not exist.
|
||||
HTTPException: 502 when fail2ban is unreachable.
|
||||
"""
|
||||
socket_path: str = request.app.state.settings.fail2ban_socket
|
||||
try:
|
||||
return await jail_service.get_jail(socket_path, name)
|
||||
except JailNotFoundError:
|
||||
raise _not_found(name) from None
|
||||
except Fail2BanConnectionError as exc:
|
||||
raise _bad_gateway(exc) from exc
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Jail control commands
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@router.post(
|
||||
"/reload-all",
|
||||
response_model=JailCommandResponse,
|
||||
summary="Reload all fail2ban jails",
|
||||
)
|
||||
async def reload_all_jails(
|
||||
request: Request,
|
||||
_auth: AuthDep,
|
||||
) -> JailCommandResponse:
|
||||
"""Reload every fail2ban jail to apply configuration changes.
|
||||
|
||||
This command instructs fail2ban to re-read its configuration for all
|
||||
jails simultaneously.
|
||||
|
||||
Args:
|
||||
request: Incoming request (used to access ``app.state``).
|
||||
_auth: Validated session — enforces authentication.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.jail.JailCommandResponse` confirming the reload.
|
||||
|
||||
Raises:
|
||||
HTTPException: 502 when fail2ban is unreachable.
|
||||
HTTPException: 409 when fail2ban reports the operation failed.
|
||||
"""
|
||||
socket_path: str = request.app.state.settings.fail2ban_socket
|
||||
try:
|
||||
await jail_service.reload_all(socket_path)
|
||||
return JailCommandResponse(message="All jails reloaded successfully.", jail="*")
|
||||
except JailOperationError as exc:
|
||||
raise _conflict(str(exc)) from exc
|
||||
except Fail2BanConnectionError as exc:
|
||||
raise _bad_gateway(exc) from exc
|
||||
|
||||
|
||||
@router.post(
|
||||
"/{name}/start",
|
||||
response_model=JailCommandResponse,
|
||||
summary="Start a stopped jail",
|
||||
)
|
||||
async def start_jail(
|
||||
request: Request,
|
||||
_auth: AuthDep,
|
||||
name: _NamePath,
|
||||
) -> JailCommandResponse:
|
||||
"""Start a fail2ban jail that is currently stopped.
|
||||
|
||||
Args:
|
||||
request: Incoming request (used to access ``app.state``).
|
||||
_auth: Validated session — enforces authentication.
|
||||
name: Jail name.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.jail.JailCommandResponse` confirming the start.
|
||||
|
||||
Raises:
|
||||
HTTPException: 404 when the jail does not exist.
|
||||
HTTPException: 409 when fail2ban reports the operation failed.
|
||||
HTTPException: 502 when fail2ban is unreachable.
|
||||
"""
|
||||
socket_path: str = request.app.state.settings.fail2ban_socket
|
||||
try:
|
||||
await jail_service.start_jail(socket_path, name)
|
||||
return JailCommandResponse(message=f"Jail {name!r} started.", jail=name)
|
||||
except JailNotFoundError:
|
||||
raise _not_found(name) from None
|
||||
except JailOperationError as exc:
|
||||
raise _conflict(str(exc)) from exc
|
||||
except Fail2BanConnectionError as exc:
|
||||
raise _bad_gateway(exc) from exc
|
||||
|
||||
|
||||
@router.post(
|
||||
"/{name}/stop",
|
||||
response_model=JailCommandResponse,
|
||||
summary="Stop a running jail",
|
||||
)
|
||||
async def stop_jail(
|
||||
request: Request,
|
||||
_auth: AuthDep,
|
||||
name: _NamePath,
|
||||
) -> JailCommandResponse:
|
||||
"""Stop a running fail2ban jail.
|
||||
|
||||
The jail will no longer monitor logs or issue new bans. Existing bans
|
||||
may or may not be removed depending on fail2ban configuration. If the
|
||||
jail is already stopped the request succeeds silently (idempotent).
|
||||
|
||||
Args:
|
||||
request: Incoming request (used to access ``app.state``).
|
||||
_auth: Validated session — enforces authentication.
|
||||
name: Jail name.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.jail.JailCommandResponse` confirming the stop.
|
||||
|
||||
Raises:
|
||||
HTTPException: 409 when fail2ban reports the operation failed.
|
||||
HTTPException: 502 when fail2ban is unreachable.
|
||||
"""
|
||||
socket_path: str = request.app.state.settings.fail2ban_socket
|
||||
try:
|
||||
await jail_service.stop_jail(socket_path, name)
|
||||
return JailCommandResponse(message=f"Jail {name!r} stopped.", jail=name)
|
||||
except JailOperationError as exc:
|
||||
raise _conflict(str(exc)) from exc
|
||||
except Fail2BanConnectionError as exc:
|
||||
raise _bad_gateway(exc) from exc
|
||||
|
||||
|
||||
@router.post(
|
||||
"/{name}/idle",
|
||||
response_model=JailCommandResponse,
|
||||
summary="Toggle idle mode for a jail",
|
||||
)
|
||||
async def toggle_idle(
|
||||
request: Request,
|
||||
_auth: AuthDep,
|
||||
name: _NamePath,
|
||||
on: bool = Body(..., description="``true`` to enable idle, ``false`` to disable."),
|
||||
) -> JailCommandResponse:
|
||||
"""Enable or disable idle mode for a fail2ban jail.
|
||||
|
||||
In idle mode the jail suspends log monitoring without fully stopping,
|
||||
preserving all existing bans.
|
||||
|
||||
Args:
|
||||
request: Incoming request (used to access ``app.state``).
|
||||
_auth: Validated session — enforces authentication.
|
||||
name: Jail name.
|
||||
on: ``true`` to enable idle, ``false`` to disable.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.jail.JailCommandResponse` confirming the change.
|
||||
|
||||
Raises:
|
||||
HTTPException: 404 when the jail does not exist.
|
||||
HTTPException: 409 when fail2ban reports the operation failed.
|
||||
HTTPException: 502 when fail2ban is unreachable.
|
||||
"""
|
||||
socket_path: str = request.app.state.settings.fail2ban_socket
|
||||
state_str = "on" if on else "off"
|
||||
try:
|
||||
await jail_service.set_idle(socket_path, name, on=on)
|
||||
return JailCommandResponse(
|
||||
message=f"Jail {name!r} idle mode turned {state_str}.",
|
||||
jail=name,
|
||||
)
|
||||
except JailNotFoundError:
|
||||
raise _not_found(name) from None
|
||||
except JailOperationError as exc:
|
||||
raise _conflict(str(exc)) from exc
|
||||
except Fail2BanConnectionError as exc:
|
||||
raise _bad_gateway(exc) from exc
|
||||
|
||||
|
||||
@router.post(
|
||||
"/{name}/reload",
|
||||
response_model=JailCommandResponse,
|
||||
summary="Reload a single jail",
|
||||
)
|
||||
async def reload_jail(
|
||||
request: Request,
|
||||
_auth: AuthDep,
|
||||
name: _NamePath,
|
||||
) -> JailCommandResponse:
|
||||
"""Reload a single fail2ban jail to pick up configuration changes.
|
||||
|
||||
Args:
|
||||
request: Incoming request (used to access ``app.state``).
|
||||
_auth: Validated session — enforces authentication.
|
||||
name: Jail name.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.jail.JailCommandResponse` confirming the reload.
|
||||
|
||||
Raises:
|
||||
HTTPException: 404 when the jail does not exist.
|
||||
HTTPException: 409 when fail2ban reports the operation failed.
|
||||
HTTPException: 502 when fail2ban is unreachable.
|
||||
"""
|
||||
socket_path: str = request.app.state.settings.fail2ban_socket
|
||||
try:
|
||||
await jail_service.reload_jail(socket_path, name)
|
||||
return JailCommandResponse(message=f"Jail {name!r} reloaded.", jail=name)
|
||||
except JailNotFoundError:
|
||||
raise _not_found(name) from None
|
||||
except JailOperationError as exc:
|
||||
raise _conflict(str(exc)) from exc
|
||||
except Fail2BanConnectionError as exc:
|
||||
raise _bad_gateway(exc) from exc
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Ignore list (IP whitelist)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class _IgnoreSelfRequest(IgnoreIpRequest):
|
||||
"""Request body for the ignoreself toggle endpoint.
|
||||
|
||||
Inherits from :class:`~app.models.jail.IgnoreIpRequest` but overrides
|
||||
the ``ip`` field with a boolean ``on`` field.
|
||||
"""
|
||||
|
||||
|
||||
@router.get(
|
||||
"/{name}/ignoreip",
|
||||
response_model=list[str],
|
||||
summary="List the ignore IPs for a jail",
|
||||
)
|
||||
async def get_ignore_list(
|
||||
request: Request,
|
||||
_auth: AuthDep,
|
||||
name: _NamePath,
|
||||
) -> list[str]:
|
||||
"""Return the current ignore list (IP whitelist) for a fail2ban jail.
|
||||
|
||||
Args:
|
||||
request: Incoming request (used to access ``app.state``).
|
||||
_auth: Validated session — enforces authentication.
|
||||
name: Jail name.
|
||||
|
||||
Returns:
|
||||
List of IP addresses and CIDR networks on the ignore list.
|
||||
|
||||
Raises:
|
||||
HTTPException: 404 when the jail does not exist.
|
||||
HTTPException: 502 when fail2ban is unreachable.
|
||||
"""
|
||||
socket_path: str = request.app.state.settings.fail2ban_socket
|
||||
try:
|
||||
return await jail_service.get_ignore_list(socket_path, name)
|
||||
except JailNotFoundError:
|
||||
raise _not_found(name) from None
|
||||
except Fail2BanConnectionError as exc:
|
||||
raise _bad_gateway(exc) from exc
|
||||
|
||||
|
||||
@router.post(
|
||||
"/{name}/ignoreip",
|
||||
status_code=status.HTTP_201_CREATED,
|
||||
response_model=JailCommandResponse,
|
||||
summary="Add an IP or network to the ignore list",
|
||||
)
|
||||
async def add_ignore_ip(
|
||||
request: Request,
|
||||
_auth: AuthDep,
|
||||
name: _NamePath,
|
||||
body: IgnoreIpRequest,
|
||||
) -> JailCommandResponse:
|
||||
"""Add an IP address or CIDR network to a jail's ignore list.
|
||||
|
||||
IPs on the ignore list are never banned by that jail, even if they
|
||||
trigger the configured fail regex.
|
||||
|
||||
Args:
|
||||
request: Incoming request (used to access ``app.state``).
|
||||
_auth: Validated session — enforces authentication.
|
||||
name: Jail name.
|
||||
body: Payload containing the IP or CIDR to add.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.jail.JailCommandResponse` confirming the addition.
|
||||
|
||||
Raises:
|
||||
HTTPException: 400 when the IP address or network is invalid.
|
||||
HTTPException: 404 when the jail does not exist.
|
||||
HTTPException: 409 when fail2ban reports the operation failed.
|
||||
HTTPException: 502 when fail2ban is unreachable.
|
||||
"""
|
||||
socket_path: str = request.app.state.settings.fail2ban_socket
|
||||
try:
|
||||
await jail_service.add_ignore_ip(socket_path, name, body.ip)
|
||||
return JailCommandResponse(
|
||||
message=f"IP {body.ip!r} added to ignore list of jail {name!r}.",
|
||||
jail=name,
|
||||
)
|
||||
except ValueError as exc:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=str(exc),
|
||||
) from exc
|
||||
except JailNotFoundError:
|
||||
raise _not_found(name) from None
|
||||
except JailOperationError as exc:
|
||||
raise _conflict(str(exc)) from exc
|
||||
except Fail2BanConnectionError as exc:
|
||||
raise _bad_gateway(exc) from exc
|
||||
|
||||
|
||||
@router.delete(
|
||||
"/{name}/ignoreip",
|
||||
response_model=JailCommandResponse,
|
||||
summary="Remove an IP or network from the ignore list",
|
||||
)
|
||||
async def del_ignore_ip(
|
||||
request: Request,
|
||||
_auth: AuthDep,
|
||||
name: _NamePath,
|
||||
body: IgnoreIpRequest,
|
||||
) -> JailCommandResponse:
|
||||
"""Remove an IP address or CIDR network from a jail's ignore list.
|
||||
|
||||
Args:
|
||||
request: Incoming request (used to access ``app.state``).
|
||||
_auth: Validated session — enforces authentication.
|
||||
name: Jail name.
|
||||
body: Payload containing the IP or CIDR to remove.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.jail.JailCommandResponse` confirming the removal.
|
||||
|
||||
Raises:
|
||||
HTTPException: 404 when the jail does not exist.
|
||||
HTTPException: 409 when fail2ban reports the operation failed.
|
||||
HTTPException: 502 when fail2ban is unreachable.
|
||||
"""
|
||||
socket_path: str = request.app.state.settings.fail2ban_socket
|
||||
try:
|
||||
await jail_service.del_ignore_ip(socket_path, name, body.ip)
|
||||
return JailCommandResponse(
|
||||
message=f"IP {body.ip!r} removed from ignore list of jail {name!r}.",
|
||||
jail=name,
|
||||
)
|
||||
except JailNotFoundError:
|
||||
raise _not_found(name) from None
|
||||
except JailOperationError as exc:
|
||||
raise _conflict(str(exc)) from exc
|
||||
except Fail2BanConnectionError as exc:
|
||||
raise _bad_gateway(exc) from exc
|
||||
|
||||
|
||||
@router.post(
|
||||
"/{name}/ignoreself",
|
||||
response_model=JailCommandResponse,
|
||||
summary="Toggle the ignoreself option for a jail",
|
||||
)
|
||||
async def toggle_ignore_self(
|
||||
request: Request,
|
||||
_auth: AuthDep,
|
||||
name: _NamePath,
|
||||
on: bool = Body(..., description="``true`` to enable ignoreself, ``false`` to disable."),
|
||||
) -> JailCommandResponse:
|
||||
"""Toggle the ``ignoreself`` flag for a fail2ban jail.
|
||||
|
||||
When ``ignoreself`` is enabled fail2ban automatically adds the server's
|
||||
own IP addresses to the ignore list so the host can never ban itself.
|
||||
|
||||
Args:
|
||||
request: Incoming request (used to access ``app.state``).
|
||||
_auth: Validated session — enforces authentication.
|
||||
name: Jail name.
|
||||
on: ``true`` to enable, ``false`` to disable.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.jail.JailCommandResponse` confirming the change.
|
||||
|
||||
Raises:
|
||||
HTTPException: 404 when the jail does not exist.
|
||||
HTTPException: 409 when fail2ban reports the operation failed.
|
||||
HTTPException: 502 when fail2ban is unreachable.
|
||||
"""
|
||||
socket_path: str = request.app.state.settings.fail2ban_socket
|
||||
state_str = "enabled" if on else "disabled"
|
||||
try:
|
||||
await jail_service.set_ignore_self(socket_path, name, on=on)
|
||||
return JailCommandResponse(
|
||||
message=f"ignoreself {state_str} for jail {name!r}.",
|
||||
jail=name,
|
||||
)
|
||||
except JailNotFoundError:
|
||||
raise _not_found(name) from None
|
||||
except JailOperationError as exc:
|
||||
raise _conflict(str(exc)) from exc
|
||||
except Fail2BanConnectionError as exc:
|
||||
raise _bad_gateway(exc) from exc
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Currently banned IPs (paginated)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@router.get(
|
||||
"/{name}/banned",
|
||||
response_model=JailBannedIpsResponse,
|
||||
summary="Return paginated currently-banned IPs for a single jail",
|
||||
)
|
||||
async def get_jail_banned_ips(
|
||||
request: Request,
|
||||
_auth: AuthDep,
|
||||
name: _NamePath,
|
||||
page: int = 1,
|
||||
page_size: int = 25,
|
||||
search: str | None = None,
|
||||
) -> JailBannedIpsResponse:
|
||||
"""Return a paginated list of IPs currently banned by a specific jail.
|
||||
|
||||
The full ban list is fetched from the fail2ban socket, filtered by the
|
||||
optional *search* substring, sliced to the requested page, and then
|
||||
geo-enriched exclusively for that page slice.
|
||||
|
||||
Args:
|
||||
request: Incoming request (used to access ``app.state``).
|
||||
_auth: Validated session — enforces authentication.
|
||||
name: Jail name.
|
||||
page: 1-based page number (default 1, min 1).
|
||||
page_size: Items per page (default 25, max 100).
|
||||
search: Optional case-insensitive substring filter on the IP address.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.ban.JailBannedIpsResponse` with the paginated bans.
|
||||
|
||||
Raises:
|
||||
HTTPException: 400 when *page* or *page_size* are out of range.
|
||||
HTTPException: 404 when the jail does not exist.
|
||||
HTTPException: 502 when fail2ban is unreachable.
|
||||
"""
|
||||
if page < 1:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail="page must be >= 1.",
|
||||
)
|
||||
if not (1 <= page_size <= 100):
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail="page_size must be between 1 and 100.",
|
||||
)
|
||||
|
||||
socket_path: str = request.app.state.settings.fail2ban_socket
|
||||
http_session = getattr(request.app.state, "http_session", None)
|
||||
app_db = getattr(request.app.state, "db", None)
|
||||
|
||||
try:
|
||||
return await jail_service.get_jail_banned_ips(
|
||||
socket_path=socket_path,
|
||||
jail_name=name,
|
||||
page=page,
|
||||
page_size=page_size,
|
||||
search=search,
|
||||
http_session=http_session,
|
||||
app_db=app_db,
|
||||
)
|
||||
except JailNotFoundError:
|
||||
raise _not_found(name) from None
|
||||
except Fail2BanConnectionError as exc:
|
||||
raise _bad_gateway(exc) from exc
|
||||
144
backend/app/routers/server.py
Normal file
144
backend/app/routers/server.py
Normal file
@@ -0,0 +1,144 @@
|
||||
"""Server settings router.
|
||||
|
||||
Provides endpoints to view and update fail2ban server-level settings and
|
||||
to flush log files.
|
||||
|
||||
* ``GET /api/server/settings`` — current log level, target, and DB config
|
||||
* ``PUT /api/server/settings`` — update server-level settings
|
||||
* ``POST /api/server/flush-logs`` — flush and re-open log files
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from fastapi import APIRouter, HTTPException, Request, status
|
||||
|
||||
from app.dependencies import AuthDep
|
||||
from app.models.server import ServerSettingsResponse, ServerSettingsUpdate
|
||||
from app.services import server_service
|
||||
from app.services.server_service import ServerOperationError
|
||||
from app.utils.fail2ban_client import Fail2BanConnectionError
|
||||
|
||||
router: APIRouter = APIRouter(prefix="/api/server", tags=["Server"])
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _bad_gateway(exc: Exception) -> HTTPException:
|
||||
return HTTPException(
|
||||
status_code=status.HTTP_502_BAD_GATEWAY,
|
||||
detail=f"Cannot reach fail2ban: {exc}",
|
||||
)
|
||||
|
||||
|
||||
def _bad_request(message: str) -> HTTPException:
|
||||
return HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=message,
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Endpoints
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@router.get(
|
||||
"/settings",
|
||||
response_model=ServerSettingsResponse,
|
||||
summary="Return fail2ban server-level settings",
|
||||
)
|
||||
async def get_server_settings(
|
||||
request: Request,
|
||||
_auth: AuthDep,
|
||||
) -> ServerSettingsResponse:
|
||||
"""Return the current fail2ban server-level settings.
|
||||
|
||||
Includes log level, log target, syslog socket, database file path,
|
||||
database purge age, and maximum stored matches per record.
|
||||
|
||||
Args:
|
||||
request: Incoming request (used to access ``app.state``).
|
||||
_auth: Validated session — enforces authentication.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.server.ServerSettingsResponse`.
|
||||
|
||||
Raises:
|
||||
HTTPException: 502 when fail2ban is unreachable.
|
||||
"""
|
||||
socket_path: str = request.app.state.settings.fail2ban_socket
|
||||
try:
|
||||
return await server_service.get_settings(socket_path)
|
||||
except Fail2BanConnectionError as exc:
|
||||
raise _bad_gateway(exc) from exc
|
||||
|
||||
|
||||
@router.put(
|
||||
"/settings",
|
||||
status_code=status.HTTP_204_NO_CONTENT,
|
||||
summary="Update fail2ban server-level settings",
|
||||
)
|
||||
async def update_server_settings(
|
||||
request: Request,
|
||||
_auth: AuthDep,
|
||||
body: ServerSettingsUpdate,
|
||||
) -> None:
|
||||
"""Update fail2ban server-level settings.
|
||||
|
||||
Only non-None fields in the request body are written. Changes take
|
||||
effect immediately without a daemon restart.
|
||||
|
||||
Args:
|
||||
request: Incoming request.
|
||||
_auth: Validated session.
|
||||
body: Partial settings update.
|
||||
|
||||
Raises:
|
||||
HTTPException: 400 when a set command is rejected by fail2ban.
|
||||
HTTPException: 502 when fail2ban is unreachable.
|
||||
"""
|
||||
socket_path: str = request.app.state.settings.fail2ban_socket
|
||||
try:
|
||||
await server_service.update_settings(socket_path, body)
|
||||
except ServerOperationError as exc:
|
||||
raise _bad_request(str(exc)) from exc
|
||||
except Fail2BanConnectionError as exc:
|
||||
raise _bad_gateway(exc) from exc
|
||||
|
||||
|
||||
@router.post(
|
||||
"/flush-logs",
|
||||
status_code=status.HTTP_200_OK,
|
||||
summary="Flush and re-open fail2ban log files",
|
||||
)
|
||||
async def flush_logs(
|
||||
request: Request,
|
||||
_auth: AuthDep,
|
||||
) -> dict[str, str]:
|
||||
"""Flush and re-open fail2ban log files.
|
||||
|
||||
Useful after log rotation so the daemon writes to the newly created
|
||||
log file rather than continuing to append to the rotated one.
|
||||
|
||||
Args:
|
||||
request: Incoming request.
|
||||
_auth: Validated session.
|
||||
|
||||
Returns:
|
||||
``{"message": "<response from fail2ban>"}``
|
||||
|
||||
Raises:
|
||||
HTTPException: 400 when the command is rejected.
|
||||
HTTPException: 502 when fail2ban is unreachable.
|
||||
"""
|
||||
socket_path: str = request.app.state.settings.fail2ban_socket
|
||||
try:
|
||||
result = await server_service.flush_logs(socket_path)
|
||||
return {"message": result}
|
||||
except ServerOperationError as exc:
|
||||
raise _bad_request(str(exc)) from exc
|
||||
except Fail2BanConnectionError as exc:
|
||||
raise _bad_gateway(exc) from exc
|
||||
91
backend/app/routers/setup.py
Normal file
91
backend/app/routers/setup.py
Normal file
@@ -0,0 +1,91 @@
|
||||
"""Setup router.
|
||||
|
||||
Exposes the ``POST /api/setup`` endpoint for the one-time first-run
|
||||
configuration wizard. Once setup has been completed, subsequent calls
|
||||
return ``409 Conflict``.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import structlog
|
||||
from fastapi import APIRouter, HTTPException, status
|
||||
|
||||
from app.dependencies import DbDep
|
||||
from app.models.setup import SetupRequest, SetupResponse, SetupStatusResponse, SetupTimezoneResponse
|
||||
from app.services import setup_service
|
||||
|
||||
log: structlog.stdlib.BoundLogger = structlog.get_logger()
|
||||
|
||||
router = APIRouter(prefix="/api/setup", tags=["setup"])
|
||||
|
||||
|
||||
@router.get(
|
||||
"",
|
||||
response_model=SetupStatusResponse,
|
||||
summary="Check whether setup has been completed",
|
||||
)
|
||||
async def get_setup_status(db: DbDep) -> SetupStatusResponse:
|
||||
"""Return whether the initial setup wizard has been completed.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.setup.SetupStatusResponse` with ``completed``
|
||||
set to ``True`` if setup is done, ``False`` otherwise.
|
||||
"""
|
||||
done = await setup_service.is_setup_complete(db)
|
||||
return SetupStatusResponse(completed=done)
|
||||
|
||||
|
||||
@router.post(
|
||||
"",
|
||||
response_model=SetupResponse,
|
||||
status_code=status.HTTP_201_CREATED,
|
||||
summary="Run the initial setup wizard",
|
||||
)
|
||||
async def post_setup(body: SetupRequest, db: DbDep) -> SetupResponse:
|
||||
"""Persist the initial BanGUI configuration.
|
||||
|
||||
Args:
|
||||
body: Setup request payload validated by Pydantic.
|
||||
db: Injected aiosqlite connection.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.setup.SetupResponse` on success.
|
||||
|
||||
Raises:
|
||||
HTTPException: 409 if setup has already been completed.
|
||||
"""
|
||||
if await setup_service.is_setup_complete(db):
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_409_CONFLICT,
|
||||
detail="Setup has already been completed.",
|
||||
)
|
||||
|
||||
await setup_service.run_setup(
|
||||
db,
|
||||
master_password=body.master_password,
|
||||
database_path=body.database_path,
|
||||
fail2ban_socket=body.fail2ban_socket,
|
||||
timezone=body.timezone,
|
||||
session_duration_minutes=body.session_duration_minutes,
|
||||
)
|
||||
return SetupResponse()
|
||||
|
||||
|
||||
@router.get(
|
||||
"/timezone",
|
||||
response_model=SetupTimezoneResponse,
|
||||
summary="Return the configured IANA timezone",
|
||||
)
|
||||
async def get_timezone(db: DbDep) -> SetupTimezoneResponse:
|
||||
"""Return the IANA timezone configured during the initial setup wizard.
|
||||
|
||||
The frontend uses this to convert UTC timestamps to the local time zone
|
||||
chosen by the administrator.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.setup.SetupTimezoneResponse` with ``timezone``
|
||||
set to the stored IANA identifier (e.g. ``"UTC"`` or
|
||||
``"Europe/Berlin"``), defaulting to ``"UTC"`` if unset.
|
||||
"""
|
||||
tz = await setup_service.get_timezone(db)
|
||||
return SetupTimezoneResponse(timezone=tz)
|
||||
1
backend/app/services/__init__.py
Normal file
1
backend/app/services/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Business logic services package."""
|
||||
122
backend/app/services/auth_service.py
Normal file
122
backend/app/services/auth_service.py
Normal file
@@ -0,0 +1,122 @@
|
||||
"""Authentication service.
|
||||
|
||||
Handles password verification, session creation, session validation, and
|
||||
session expiry. Sessions are stored in the SQLite database so they
|
||||
survive server restarts.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import secrets
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import bcrypt
|
||||
import structlog
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import aiosqlite
|
||||
|
||||
from app.models.auth import Session
|
||||
|
||||
from app.repositories import session_repo
|
||||
from app.services import setup_service
|
||||
from app.utils.time_utils import add_minutes, utc_now
|
||||
|
||||
log: structlog.stdlib.BoundLogger = structlog.get_logger()
|
||||
|
||||
|
||||
async def _check_password(plain: str, hashed: str) -> bool:
|
||||
"""Return ``True`` if *plain* matches the bcrypt *hashed* password.
|
||||
|
||||
Runs in a thread executor so the blocking bcrypt operation does not stall
|
||||
the asyncio event loop.
|
||||
|
||||
Args:
|
||||
plain: The plain-text password to verify.
|
||||
hashed: The stored bcrypt hash string.
|
||||
|
||||
Returns:
|
||||
``True`` on a successful match, ``False`` otherwise.
|
||||
"""
|
||||
plain_bytes = plain.encode()
|
||||
hashed_bytes = hashed.encode()
|
||||
loop = asyncio.get_running_loop()
|
||||
return await loop.run_in_executor(
|
||||
None, lambda: bool(bcrypt.checkpw(plain_bytes, hashed_bytes))
|
||||
)
|
||||
|
||||
|
||||
async def login(
|
||||
db: aiosqlite.Connection,
|
||||
password: str,
|
||||
session_duration_minutes: int,
|
||||
) -> Session:
|
||||
"""Verify *password* and create a new session on success.
|
||||
|
||||
Args:
|
||||
db: Active aiosqlite connection.
|
||||
password: Plain-text password supplied by the user.
|
||||
session_duration_minutes: How long the new session is valid for.
|
||||
|
||||
Returns:
|
||||
A :class:`~app.models.auth.Session` domain model for the new session.
|
||||
|
||||
Raises:
|
||||
ValueError: If the password is incorrect or no password hash is stored.
|
||||
"""
|
||||
stored_hash = await setup_service.get_password_hash(db)
|
||||
if stored_hash is None:
|
||||
log.warning("bangui_login_no_hash")
|
||||
raise ValueError("No password is configured — run setup first.")
|
||||
|
||||
if not await _check_password(password, stored_hash):
|
||||
log.warning("bangui_login_wrong_password")
|
||||
raise ValueError("Incorrect password.")
|
||||
|
||||
token = secrets.token_hex(32)
|
||||
now = utc_now()
|
||||
created_iso = now.isoformat()
|
||||
expires_iso = add_minutes(now, session_duration_minutes).isoformat()
|
||||
|
||||
session = await session_repo.create_session(
|
||||
db, token=token, created_at=created_iso, expires_at=expires_iso
|
||||
)
|
||||
log.info("bangui_login_success", token_prefix=token[:8])
|
||||
return session
|
||||
|
||||
|
||||
async def validate_session(db: aiosqlite.Connection, token: str) -> Session:
|
||||
"""Return the session for *token* if it is valid and not expired.
|
||||
|
||||
Args:
|
||||
db: Active aiosqlite connection.
|
||||
token: The opaque session token from the client.
|
||||
|
||||
Returns:
|
||||
The :class:`~app.models.auth.Session` if it is valid.
|
||||
|
||||
Raises:
|
||||
ValueError: If the token is not found or has expired.
|
||||
"""
|
||||
session = await session_repo.get_session(db, token)
|
||||
if session is None:
|
||||
raise ValueError("Session not found.")
|
||||
|
||||
now_iso = utc_now().isoformat()
|
||||
if session.expires_at <= now_iso:
|
||||
await session_repo.delete_session(db, token)
|
||||
raise ValueError("Session has expired.")
|
||||
|
||||
return session
|
||||
|
||||
|
||||
async def logout(db: aiosqlite.Connection, token: str) -> None:
|
||||
"""Invalidate the session identified by *token*.
|
||||
|
||||
Args:
|
||||
db: Active aiosqlite connection.
|
||||
token: The session token to revoke.
|
||||
"""
|
||||
await session_repo.delete_session(db, token)
|
||||
log.info("bangui_logout", token_prefix=token[:8])
|
||||
692
backend/app/services/ban_service.py
Normal file
692
backend/app/services/ban_service.py
Normal file
@@ -0,0 +1,692 @@
|
||||
"""Ban service.
|
||||
|
||||
Queries the fail2ban SQLite database for ban history. The fail2ban database
|
||||
path is obtained at runtime by sending ``get dbfile`` to the fail2ban daemon
|
||||
via the Unix domain socket.
|
||||
|
||||
All database I/O is performed through aiosqlite opened in **read-only** mode
|
||||
so BanGUI never modifies or locks the fail2ban database.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import time
|
||||
from datetime import UTC, datetime
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
import aiosqlite
|
||||
import structlog
|
||||
|
||||
from app.models.ban import (
|
||||
BLOCKLIST_JAIL,
|
||||
BUCKET_SECONDS,
|
||||
BUCKET_SIZE_LABEL,
|
||||
TIME_RANGE_SECONDS,
|
||||
BanOrigin,
|
||||
BansByCountryResponse,
|
||||
BansByJailResponse,
|
||||
BanTrendBucket,
|
||||
BanTrendResponse,
|
||||
DashboardBanItem,
|
||||
DashboardBanListResponse,
|
||||
JailBanCount,
|
||||
TimeRange,
|
||||
_derive_origin,
|
||||
bucket_count,
|
||||
)
|
||||
from app.utils.fail2ban_client import Fail2BanClient
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import aiohttp
|
||||
|
||||
log: structlog.stdlib.BoundLogger = structlog.get_logger()
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Constants
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_DEFAULT_PAGE_SIZE: int = 100
|
||||
_MAX_PAGE_SIZE: int = 500
|
||||
_SOCKET_TIMEOUT: float = 5.0
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Internal helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _origin_sql_filter(origin: BanOrigin | None) -> tuple[str, tuple[str, ...]]:
|
||||
"""Return a SQL fragment and its parameters for the origin filter.
|
||||
|
||||
Args:
|
||||
origin: ``"blocklist"`` to restrict to the blocklist-import jail,
|
||||
``"selfblock"`` to exclude it, or ``None`` for no restriction.
|
||||
|
||||
Returns:
|
||||
A ``(sql_fragment, params)`` pair — the fragment starts with ``" AND"``
|
||||
so it can be appended directly to an existing WHERE clause.
|
||||
"""
|
||||
if origin == "blocklist":
|
||||
return " AND jail = ?", (BLOCKLIST_JAIL,)
|
||||
if origin == "selfblock":
|
||||
return " AND jail != ?", (BLOCKLIST_JAIL,)
|
||||
return "", ()
|
||||
|
||||
|
||||
def _since_unix(range_: TimeRange) -> int:
|
||||
"""Return the Unix timestamp representing the start of the time window.
|
||||
|
||||
Uses :func:`time.time` (always UTC epoch seconds on all platforms) to be
|
||||
consistent with how fail2ban stores ``timeofban`` values in its SQLite
|
||||
database. fail2ban records ``time.time()`` values directly, so
|
||||
comparing against a timezone-aware ``datetime.now(UTC).timestamp()`` would
|
||||
theoretically produce the same number but using :func:`time.time` avoids
|
||||
any tz-aware datetime pitfalls on misconfigured systems.
|
||||
|
||||
Args:
|
||||
range_: One of the supported time-range presets.
|
||||
|
||||
Returns:
|
||||
Unix timestamp (seconds since epoch) equal to *now − range_*.
|
||||
"""
|
||||
seconds: int = TIME_RANGE_SECONDS[range_]
|
||||
return int(time.time()) - seconds
|
||||
|
||||
|
||||
def _ts_to_iso(unix_ts: int) -> str:
|
||||
"""Convert a Unix timestamp to an ISO 8601 UTC string.
|
||||
|
||||
Args:
|
||||
unix_ts: Seconds since the Unix epoch.
|
||||
|
||||
Returns:
|
||||
ISO 8601 UTC timestamp, e.g. ``"2026-03-01T12:00:00+00:00"``.
|
||||
"""
|
||||
return datetime.fromtimestamp(unix_ts, tz=UTC).isoformat()
|
||||
|
||||
|
||||
async def _get_fail2ban_db_path(socket_path: str) -> str:
|
||||
"""Query fail2ban for the path to its SQLite database.
|
||||
|
||||
Sends the ``get dbfile`` command via the fail2ban socket and returns
|
||||
the value of the ``dbfile`` setting.
|
||||
|
||||
Args:
|
||||
socket_path: Path to the fail2ban Unix domain socket.
|
||||
|
||||
Returns:
|
||||
Absolute path to the fail2ban SQLite database file.
|
||||
|
||||
Raises:
|
||||
RuntimeError: If fail2ban reports that no database is configured
|
||||
or if the socket response is unexpected.
|
||||
~app.utils.fail2ban_client.Fail2BanConnectionError: If the socket
|
||||
cannot be reached.
|
||||
"""
|
||||
async with Fail2BanClient(socket_path, timeout=_SOCKET_TIMEOUT) as client:
|
||||
response = await client.send(["get", "dbfile"])
|
||||
|
||||
try:
|
||||
code, data = response
|
||||
except (TypeError, ValueError) as exc:
|
||||
raise RuntimeError(f"Unexpected response from fail2ban: {response!r}") from exc
|
||||
|
||||
if code != 0:
|
||||
raise RuntimeError(f"fail2ban error code {code}: {data!r}")
|
||||
|
||||
if data is None:
|
||||
raise RuntimeError("fail2ban has no database configured (dbfile is None)")
|
||||
|
||||
return str(data)
|
||||
|
||||
|
||||
def _parse_data_json(raw: Any) -> tuple[list[str], int]:
|
||||
"""Extract matches and failure count from the ``bans.data`` column.
|
||||
|
||||
The ``data`` column stores a JSON blob with optional keys:
|
||||
|
||||
* ``matches`` — list of raw matched log lines.
|
||||
* ``failures`` — total failure count that triggered the ban.
|
||||
|
||||
Args:
|
||||
raw: The raw ``data`` column value (string, dict, or ``None``).
|
||||
|
||||
Returns:
|
||||
A ``(matches, failures)`` tuple. Both default to empty/zero when
|
||||
parsing fails or the column is absent.
|
||||
"""
|
||||
if raw is None:
|
||||
return [], 0
|
||||
|
||||
obj: dict[str, Any] = {}
|
||||
if isinstance(raw, str):
|
||||
try:
|
||||
parsed: Any = json.loads(raw)
|
||||
if isinstance(parsed, dict):
|
||||
obj = parsed
|
||||
# json.loads("null") → None, or other non-dict — treat as empty
|
||||
except json.JSONDecodeError:
|
||||
return [], 0
|
||||
elif isinstance(raw, dict):
|
||||
obj = raw
|
||||
|
||||
matches: list[str] = [str(m) for m in (obj.get("matches") or [])]
|
||||
failures: int = int(obj.get("failures", 0))
|
||||
return matches, failures
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Public API
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
async def list_bans(
|
||||
socket_path: str,
|
||||
range_: TimeRange,
|
||||
*,
|
||||
page: int = 1,
|
||||
page_size: int = _DEFAULT_PAGE_SIZE,
|
||||
http_session: aiohttp.ClientSession | None = None,
|
||||
app_db: aiosqlite.Connection | None = None,
|
||||
geo_enricher: Any | None = None,
|
||||
origin: BanOrigin | None = None,
|
||||
) -> DashboardBanListResponse:
|
||||
"""Return a paginated list of bans within the selected time window.
|
||||
|
||||
Queries the fail2ban database ``bans`` table for records whose
|
||||
``timeofban`` falls within the specified *range_*. Results are ordered
|
||||
newest-first.
|
||||
|
||||
Geo enrichment strategy (highest priority first):
|
||||
|
||||
1. When *http_session* is provided the entire page of IPs is resolved in
|
||||
one :func:`~app.services.geo_service.lookup_batch` call (up to 100 IPs
|
||||
per HTTP request). This avoids the 45 req/min rate limit of the
|
||||
single-IP endpoint and is the preferred production path.
|
||||
2. When only *geo_enricher* is provided (legacy / test path) each IP is
|
||||
resolved individually via the supplied async callable.
|
||||
|
||||
Args:
|
||||
socket_path: Path to the fail2ban Unix domain socket.
|
||||
range_: Time-range preset (``"24h"``, ``"7d"``, ``"30d"``, or
|
||||
``"365d"``).
|
||||
page: 1-based page number (default: ``1``).
|
||||
page_size: Maximum items per page, capped at ``_MAX_PAGE_SIZE``
|
||||
(default: ``100``).
|
||||
http_session: Optional shared :class:`aiohttp.ClientSession`. When
|
||||
provided, :func:`~app.services.geo_service.lookup_batch` is used
|
||||
for efficient bulk geo resolution.
|
||||
app_db: Optional BanGUI application database used to persist newly
|
||||
resolved geo entries and to read back cached results.
|
||||
geo_enricher: Optional async callable ``(ip: str) -> GeoInfo | None``.
|
||||
Used as a fallback when *http_session* is ``None`` (e.g. tests).
|
||||
origin: Optional origin filter — ``"blocklist"`` restricts results to
|
||||
the ``blocklist-import`` jail, ``"selfblock"`` excludes it.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.ban.DashboardBanListResponse` containing the
|
||||
paginated items and total count.
|
||||
"""
|
||||
from app.services import geo_service # noqa: PLC0415
|
||||
|
||||
since: int = _since_unix(range_)
|
||||
effective_page_size: int = min(page_size, _MAX_PAGE_SIZE)
|
||||
offset: int = (page - 1) * effective_page_size
|
||||
origin_clause, origin_params = _origin_sql_filter(origin)
|
||||
|
||||
db_path: str = await _get_fail2ban_db_path(socket_path)
|
||||
log.info(
|
||||
"ban_service_list_bans",
|
||||
db_path=db_path,
|
||||
since=since,
|
||||
range=range_,
|
||||
origin=origin,
|
||||
)
|
||||
|
||||
async with aiosqlite.connect(f"file:{db_path}?mode=ro", uri=True) as f2b_db:
|
||||
f2b_db.row_factory = aiosqlite.Row
|
||||
|
||||
async with f2b_db.execute(
|
||||
"SELECT COUNT(*) FROM bans WHERE timeofban >= ?" + origin_clause,
|
||||
(since, *origin_params),
|
||||
) as cur:
|
||||
count_row = await cur.fetchone()
|
||||
total: int = int(count_row[0]) if count_row else 0
|
||||
|
||||
async with f2b_db.execute(
|
||||
"SELECT jail, ip, timeofban, bancount, data "
|
||||
"FROM bans "
|
||||
"WHERE timeofban >= ?"
|
||||
+ origin_clause
|
||||
+ " ORDER BY timeofban DESC "
|
||||
"LIMIT ? OFFSET ?",
|
||||
(since, *origin_params, effective_page_size, offset),
|
||||
) as cur:
|
||||
rows = await cur.fetchall()
|
||||
|
||||
# Batch-resolve geo data for all IPs on this page in a single API call.
|
||||
# This avoids hitting the 45 req/min single-IP rate limit when the
|
||||
# page contains many bans (e.g. after a large blocklist import).
|
||||
geo_map: dict[str, Any] = {}
|
||||
if http_session is not None and rows:
|
||||
page_ips: list[str] = [str(r["ip"]) for r in rows]
|
||||
try:
|
||||
geo_map = await geo_service.lookup_batch(page_ips, http_session, db=app_db)
|
||||
except Exception: # noqa: BLE001
|
||||
log.warning("ban_service_batch_geo_failed_list_bans")
|
||||
|
||||
items: list[DashboardBanItem] = []
|
||||
for row in rows:
|
||||
jail: str = str(row["jail"])
|
||||
ip: str = str(row["ip"])
|
||||
banned_at: str = _ts_to_iso(int(row["timeofban"]))
|
||||
ban_count: int = int(row["bancount"])
|
||||
matches, _ = _parse_data_json(row["data"])
|
||||
service: str | None = matches[0] if matches else None
|
||||
|
||||
country_code: str | None = None
|
||||
country_name: str | None = None
|
||||
asn: str | None = None
|
||||
org: str | None = None
|
||||
|
||||
if geo_map:
|
||||
geo = geo_map.get(ip)
|
||||
if geo is not None:
|
||||
country_code = geo.country_code
|
||||
country_name = geo.country_name
|
||||
asn = geo.asn
|
||||
org = geo.org
|
||||
elif geo_enricher is not None:
|
||||
try:
|
||||
geo = await geo_enricher(ip)
|
||||
if geo is not None:
|
||||
country_code = geo.country_code
|
||||
country_name = geo.country_name
|
||||
asn = geo.asn
|
||||
org = geo.org
|
||||
except Exception: # noqa: BLE001
|
||||
log.warning("ban_service_geo_lookup_failed", ip=ip)
|
||||
|
||||
items.append(
|
||||
DashboardBanItem(
|
||||
ip=ip,
|
||||
jail=jail,
|
||||
banned_at=banned_at,
|
||||
service=service,
|
||||
country_code=country_code,
|
||||
country_name=country_name,
|
||||
asn=asn,
|
||||
org=org,
|
||||
ban_count=ban_count,
|
||||
origin=_derive_origin(jail),
|
||||
)
|
||||
)
|
||||
|
||||
return DashboardBanListResponse(
|
||||
items=items,
|
||||
total=total,
|
||||
page=page,
|
||||
page_size=effective_page_size,
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# bans_by_country
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
#: Maximum rows returned in the companion table alongside the map.
|
||||
_MAX_COMPANION_BANS: int = 200
|
||||
|
||||
|
||||
async def bans_by_country(
|
||||
socket_path: str,
|
||||
range_: TimeRange,
|
||||
http_session: aiohttp.ClientSession | None = None,
|
||||
geo_enricher: Any | None = None,
|
||||
app_db: aiosqlite.Connection | None = None,
|
||||
origin: BanOrigin | None = None,
|
||||
) -> BansByCountryResponse:
|
||||
"""Aggregate ban counts per country for the selected time window.
|
||||
|
||||
Uses a two-step strategy optimised for large datasets:
|
||||
|
||||
1. Queries the fail2ban DB with ``GROUP BY ip`` to get the per-IP ban
|
||||
counts for all unique IPs in the window — no row-count cap.
|
||||
2. Serves geo data from the in-memory cache only (non-blocking).
|
||||
Any IPs not yet in the cache are scheduled for background resolution
|
||||
via :func:`asyncio.create_task` so the response is returned immediately
|
||||
and subsequent requests benefit from the warmed cache.
|
||||
3. Returns a ``{country_code: count}`` aggregation and the 200 most
|
||||
recent raw rows for the companion table.
|
||||
|
||||
Note:
|
||||
On the very first request a large number of IPs may be uncached and
|
||||
the country map will be sparse. The background task will resolve them
|
||||
and the next request will return a complete map. This trade-off keeps
|
||||
the endpoint fast regardless of dataset size.
|
||||
|
||||
Args:
|
||||
socket_path: Path to the fail2ban Unix domain socket.
|
||||
range_: Time-range preset.
|
||||
http_session: Optional :class:`aiohttp.ClientSession` for background
|
||||
geo lookups. When ``None``, only cached data is used.
|
||||
geo_enricher: Legacy async ``(ip) -> GeoInfo | None`` callable;
|
||||
used when *http_session* is ``None`` (e.g. tests).
|
||||
app_db: Optional BanGUI application database used to persist newly
|
||||
resolved geo entries across restarts.
|
||||
origin: Optional origin filter — ``"blocklist"`` restricts results to
|
||||
the ``blocklist-import`` jail, ``"selfblock"`` excludes it.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.ban.BansByCountryResponse` with per-country
|
||||
aggregation and the companion ban list.
|
||||
"""
|
||||
from app.services import geo_service # noqa: PLC0415
|
||||
|
||||
since: int = _since_unix(range_)
|
||||
origin_clause, origin_params = _origin_sql_filter(origin)
|
||||
db_path: str = await _get_fail2ban_db_path(socket_path)
|
||||
log.info(
|
||||
"ban_service_bans_by_country",
|
||||
db_path=db_path,
|
||||
since=since,
|
||||
range=range_,
|
||||
origin=origin,
|
||||
)
|
||||
|
||||
async with aiosqlite.connect(f"file:{db_path}?mode=ro", uri=True) as f2b_db:
|
||||
f2b_db.row_factory = aiosqlite.Row
|
||||
|
||||
# Total count for the window.
|
||||
async with f2b_db.execute(
|
||||
"SELECT COUNT(*) FROM bans WHERE timeofban >= ?" + origin_clause,
|
||||
(since, *origin_params),
|
||||
) as cur:
|
||||
count_row = await cur.fetchone()
|
||||
total: int = int(count_row[0]) if count_row else 0
|
||||
|
||||
# Aggregation: unique IPs + their total event count.
|
||||
# No LIMIT here — we need all unique source IPs for accurate country counts.
|
||||
async with f2b_db.execute(
|
||||
"SELECT ip, COUNT(*) AS event_count "
|
||||
"FROM bans "
|
||||
"WHERE timeofban >= ?"
|
||||
+ origin_clause
|
||||
+ " GROUP BY ip",
|
||||
(since, *origin_params),
|
||||
) as cur:
|
||||
agg_rows = await cur.fetchall()
|
||||
|
||||
# Companion table: most recent raw rows for display alongside the map.
|
||||
async with f2b_db.execute(
|
||||
"SELECT jail, ip, timeofban, bancount, data "
|
||||
"FROM bans "
|
||||
"WHERE timeofban >= ?"
|
||||
+ origin_clause
|
||||
+ " ORDER BY timeofban DESC "
|
||||
"LIMIT ?",
|
||||
(since, *origin_params, _MAX_COMPANION_BANS),
|
||||
) as cur:
|
||||
companion_rows = await cur.fetchall()
|
||||
|
||||
unique_ips: list[str] = [str(r["ip"]) for r in agg_rows]
|
||||
geo_map: dict[str, Any] = {}
|
||||
|
||||
if http_session is not None and unique_ips:
|
||||
# Serve only what is already in the in-memory cache — no API calls on
|
||||
# the hot path. Uncached IPs are resolved asynchronously in the
|
||||
# background so subsequent requests benefit from a warmer cache.
|
||||
geo_map, uncached = geo_service.lookup_cached_only(unique_ips)
|
||||
if uncached:
|
||||
log.info(
|
||||
"ban_service_geo_background_scheduled",
|
||||
uncached=len(uncached),
|
||||
cached=len(geo_map),
|
||||
)
|
||||
# Fire-and-forget: lookup_batch handles rate-limiting / retries.
|
||||
# The dirty-set flush task persists results to the DB.
|
||||
asyncio.create_task( # noqa: RUF006
|
||||
geo_service.lookup_batch(uncached, http_session, db=app_db),
|
||||
name="geo_bans_by_country",
|
||||
)
|
||||
elif geo_enricher is not None and unique_ips:
|
||||
# Fallback: legacy per-IP enricher (used in tests / older callers).
|
||||
async def _safe_lookup(ip: str) -> tuple[str, Any]:
|
||||
try:
|
||||
return ip, await geo_enricher(ip)
|
||||
except Exception: # noqa: BLE001
|
||||
log.warning("ban_service_geo_lookup_failed", ip=ip)
|
||||
return ip, None
|
||||
|
||||
results = await asyncio.gather(*(_safe_lookup(ip) for ip in unique_ips))
|
||||
geo_map = dict(results)
|
||||
|
||||
# Build country aggregation from the SQL-grouped rows.
|
||||
countries: dict[str, int] = {}
|
||||
country_names: dict[str, str] = {}
|
||||
|
||||
for row in agg_rows:
|
||||
ip: str = str(row["ip"])
|
||||
geo = geo_map.get(ip)
|
||||
cc: str | None = geo.country_code if geo else None
|
||||
cn: str | None = geo.country_name if geo else None
|
||||
event_count: int = int(row["event_count"])
|
||||
|
||||
if cc:
|
||||
countries[cc] = countries.get(cc, 0) + event_count
|
||||
if cn and cc not in country_names:
|
||||
country_names[cc] = cn
|
||||
|
||||
# Build companion table from recent rows (geo already cached from batch step).
|
||||
bans: list[DashboardBanItem] = []
|
||||
for row in companion_rows:
|
||||
ip = str(row["ip"])
|
||||
geo = geo_map.get(ip)
|
||||
cc = geo.country_code if geo else None
|
||||
cn = geo.country_name if geo else None
|
||||
asn: str | None = geo.asn if geo else None
|
||||
org: str | None = geo.org if geo else None
|
||||
matches, _ = _parse_data_json(row["data"])
|
||||
|
||||
bans.append(
|
||||
DashboardBanItem(
|
||||
ip=ip,
|
||||
jail=str(row["jail"]),
|
||||
banned_at=_ts_to_iso(int(row["timeofban"])),
|
||||
service=matches[0] if matches else None,
|
||||
country_code=cc,
|
||||
country_name=cn,
|
||||
asn=asn,
|
||||
org=org,
|
||||
ban_count=int(row["bancount"]),
|
||||
origin=_derive_origin(str(row["jail"])),
|
||||
)
|
||||
)
|
||||
|
||||
return BansByCountryResponse(
|
||||
countries=countries,
|
||||
country_names=country_names,
|
||||
bans=bans,
|
||||
total=total,
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# ban_trend
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
async def ban_trend(
|
||||
socket_path: str,
|
||||
range_: TimeRange,
|
||||
*,
|
||||
origin: BanOrigin | None = None,
|
||||
) -> BanTrendResponse:
|
||||
"""Return ban counts aggregated into equal-width time buckets.
|
||||
|
||||
Queries the fail2ban database ``bans`` table and groups records by a
|
||||
computed bucket index so the frontend can render a continuous time-series
|
||||
chart. All buckets within the requested window are returned — buckets
|
||||
that contain zero bans are included as zero-count entries so the
|
||||
frontend always receives a complete, gap-free series.
|
||||
|
||||
Bucket sizes per time-range preset:
|
||||
|
||||
* ``24h`` → 1-hour buckets (24 total)
|
||||
* ``7d`` → 6-hour buckets (28 total)
|
||||
* ``30d`` → 1-day buckets (30 total)
|
||||
* ``365d`` → 7-day buckets (~53 total)
|
||||
|
||||
Args:
|
||||
socket_path: Path to the fail2ban Unix domain socket.
|
||||
range_: Time-range preset (``"24h"``, ``"7d"``, ``"30d"``, or
|
||||
``"365d"``).
|
||||
origin: Optional origin filter — ``"blocklist"`` restricts to the
|
||||
``blocklist-import`` jail, ``"selfblock"`` excludes it.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.ban.BanTrendResponse` with a full bucket list
|
||||
and the human-readable bucket-size label.
|
||||
"""
|
||||
since: int = _since_unix(range_)
|
||||
bucket_secs: int = BUCKET_SECONDS[range_]
|
||||
num_buckets: int = bucket_count(range_)
|
||||
origin_clause, origin_params = _origin_sql_filter(origin)
|
||||
|
||||
db_path: str = await _get_fail2ban_db_path(socket_path)
|
||||
log.info(
|
||||
"ban_service_ban_trend",
|
||||
db_path=db_path,
|
||||
since=since,
|
||||
range=range_,
|
||||
origin=origin,
|
||||
bucket_secs=bucket_secs,
|
||||
num_buckets=num_buckets,
|
||||
)
|
||||
|
||||
async with aiosqlite.connect(f"file:{db_path}?mode=ro", uri=True) as f2b_db:
|
||||
f2b_db.row_factory = aiosqlite.Row
|
||||
|
||||
async with f2b_db.execute(
|
||||
"SELECT CAST((timeofban - ?) / ? AS INTEGER) AS bucket_idx, "
|
||||
"COUNT(*) AS cnt "
|
||||
"FROM bans "
|
||||
"WHERE timeofban >= ?"
|
||||
+ origin_clause
|
||||
+ " GROUP BY bucket_idx "
|
||||
"ORDER BY bucket_idx",
|
||||
(since, bucket_secs, since, *origin_params),
|
||||
) as cur:
|
||||
rows = await cur.fetchall()
|
||||
|
||||
# Map bucket_idx → count; ignore any out-of-range indices.
|
||||
counts: dict[int, int] = {}
|
||||
for row in rows:
|
||||
idx: int = int(row["bucket_idx"])
|
||||
if 0 <= idx < num_buckets:
|
||||
counts[idx] = int(row["cnt"])
|
||||
|
||||
buckets: list[BanTrendBucket] = [
|
||||
BanTrendBucket(
|
||||
timestamp=_ts_to_iso(since + i * bucket_secs),
|
||||
count=counts.get(i, 0),
|
||||
)
|
||||
for i in range(num_buckets)
|
||||
]
|
||||
|
||||
return BanTrendResponse(
|
||||
buckets=buckets,
|
||||
bucket_size=BUCKET_SIZE_LABEL[range_],
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# bans_by_jail
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
async def bans_by_jail(
|
||||
socket_path: str,
|
||||
range_: TimeRange,
|
||||
*,
|
||||
origin: BanOrigin | None = None,
|
||||
) -> BansByJailResponse:
|
||||
"""Return ban counts aggregated per jail for the selected time window.
|
||||
|
||||
Queries the fail2ban database ``bans`` table, groups records by jail
|
||||
name, and returns them ordered by count descending. The origin filter
|
||||
is applied when provided so callers can restrict results to blocklist-
|
||||
imported bans or organic fail2ban bans.
|
||||
|
||||
Args:
|
||||
socket_path: Path to the fail2ban Unix domain socket.
|
||||
range_: Time-range preset (``"24h"``, ``"7d"``, ``"30d"``, or
|
||||
``"365d"``).
|
||||
origin: Optional origin filter — ``"blocklist"`` restricts to the
|
||||
``blocklist-import`` jail, ``"selfblock"`` excludes it.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.ban.BansByJailResponse` with per-jail counts
|
||||
sorted descending and the total ban count.
|
||||
"""
|
||||
since: int = _since_unix(range_)
|
||||
origin_clause, origin_params = _origin_sql_filter(origin)
|
||||
|
||||
db_path: str = await _get_fail2ban_db_path(socket_path)
|
||||
log.debug(
|
||||
"ban_service_bans_by_jail",
|
||||
db_path=db_path,
|
||||
since=since,
|
||||
since_iso=_ts_to_iso(since),
|
||||
range=range_,
|
||||
origin=origin,
|
||||
)
|
||||
|
||||
async with aiosqlite.connect(f"file:{db_path}?mode=ro", uri=True) as f2b_db:
|
||||
f2b_db.row_factory = aiosqlite.Row
|
||||
|
||||
async with f2b_db.execute(
|
||||
"SELECT COUNT(*) FROM bans WHERE timeofban >= ?" + origin_clause,
|
||||
(since, *origin_params),
|
||||
) as cur:
|
||||
count_row = await cur.fetchone()
|
||||
total: int = int(count_row[0]) if count_row else 0
|
||||
|
||||
# Diagnostic guard: if zero results were returned, check whether the
|
||||
# table has *any* rows and log a warning with min/max timeofban so
|
||||
# operators can diagnose timezone or filter mismatches from logs.
|
||||
if total == 0:
|
||||
async with f2b_db.execute(
|
||||
"SELECT COUNT(*), MIN(timeofban), MAX(timeofban) FROM bans"
|
||||
) as cur:
|
||||
diag_row = await cur.fetchone()
|
||||
if diag_row and diag_row[0] > 0:
|
||||
log.warning(
|
||||
"ban_service_bans_by_jail_empty_despite_data",
|
||||
table_row_count=diag_row[0],
|
||||
min_timeofban=diag_row[1],
|
||||
max_timeofban=diag_row[2],
|
||||
since=since,
|
||||
range=range_,
|
||||
)
|
||||
|
||||
async with f2b_db.execute(
|
||||
"SELECT jail, COUNT(*) AS cnt "
|
||||
"FROM bans "
|
||||
"WHERE timeofban >= ?"
|
||||
+ origin_clause
|
||||
+ " GROUP BY jail ORDER BY cnt DESC",
|
||||
(since, *origin_params),
|
||||
) as cur:
|
||||
rows = await cur.fetchall()
|
||||
|
||||
jails: list[JailBanCount] = [
|
||||
JailBanCount(jail=str(row["jail"]), count=int(row["cnt"])) for row in rows
|
||||
]
|
||||
log.debug(
|
||||
"ban_service_bans_by_jail_result",
|
||||
total=total,
|
||||
jail_count=len(jails),
|
||||
)
|
||||
return BansByJailResponse(jails=jails, total=total)
|
||||
548
backend/app/services/blocklist_service.py
Normal file
548
backend/app/services/blocklist_service.py
Normal file
@@ -0,0 +1,548 @@
|
||||
"""Blocklist service.
|
||||
|
||||
Manages blocklist source CRUD, URL preview, IP import (download → validate →
|
||||
ban via fail2ban), and schedule persistence.
|
||||
|
||||
All ban operations target a dedicated fail2ban jail (default:
|
||||
``"blocklist-import"``) so blocklist-origin bans are tracked separately from
|
||||
regular bans. If that jail does not exist or fail2ban is unreachable, the
|
||||
error is recorded in the import log and processing continues.
|
||||
|
||||
Schedule configuration is stored as JSON in the application settings table
|
||||
under the key ``"blocklist_schedule"``.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
import structlog
|
||||
|
||||
from app.models.blocklist import (
|
||||
BlocklistSource,
|
||||
ImportRunResult,
|
||||
ImportSourceResult,
|
||||
PreviewResponse,
|
||||
ScheduleConfig,
|
||||
ScheduleInfo,
|
||||
)
|
||||
from app.repositories import blocklist_repo, import_log_repo, settings_repo
|
||||
from app.utils.ip_utils import is_valid_ip, is_valid_network
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import aiohttp
|
||||
import aiosqlite
|
||||
|
||||
log: structlog.stdlib.BoundLogger = structlog.get_logger()
|
||||
|
||||
#: Settings key used to persist the schedule config.
|
||||
_SCHEDULE_SETTINGS_KEY: str = "blocklist_schedule"
|
||||
|
||||
#: fail2ban jail name for blocklist-origin bans.
|
||||
BLOCKLIST_JAIL: str = "blocklist-import"
|
||||
|
||||
#: Maximum number of sample entries returned by the preview endpoint.
|
||||
_PREVIEW_LINES: int = 20
|
||||
|
||||
#: Maximum bytes to download for a preview (first 64 KB).
|
||||
_PREVIEW_MAX_BYTES: int = 65536
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Source CRUD helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _row_to_source(row: dict[str, Any]) -> BlocklistSource:
|
||||
"""Convert a repository row dict to a :class:`BlocklistSource`.
|
||||
|
||||
Args:
|
||||
row: Dict with keys matching the ``blocklist_sources`` columns.
|
||||
|
||||
Returns:
|
||||
A validated :class:`~app.models.blocklist.BlocklistSource` instance.
|
||||
"""
|
||||
return BlocklistSource.model_validate(row)
|
||||
|
||||
|
||||
async def list_sources(db: aiosqlite.Connection) -> list[BlocklistSource]:
|
||||
"""Return all configured blocklist sources.
|
||||
|
||||
Args:
|
||||
db: Active application database connection.
|
||||
|
||||
Returns:
|
||||
List of :class:`~app.models.blocklist.BlocklistSource` instances.
|
||||
"""
|
||||
rows = await blocklist_repo.list_sources(db)
|
||||
return [_row_to_source(r) for r in rows]
|
||||
|
||||
|
||||
async def get_source(
|
||||
db: aiosqlite.Connection,
|
||||
source_id: int,
|
||||
) -> BlocklistSource | None:
|
||||
"""Return a single blocklist source, or ``None`` if not found.
|
||||
|
||||
Args:
|
||||
db: Active application database connection.
|
||||
source_id: Primary key of the desired source.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.blocklist.BlocklistSource` or ``None``.
|
||||
"""
|
||||
row = await blocklist_repo.get_source(db, source_id)
|
||||
return _row_to_source(row) if row is not None else None
|
||||
|
||||
|
||||
async def create_source(
|
||||
db: aiosqlite.Connection,
|
||||
name: str,
|
||||
url: str,
|
||||
*,
|
||||
enabled: bool = True,
|
||||
) -> BlocklistSource:
|
||||
"""Create a new blocklist source and return the persisted record.
|
||||
|
||||
Args:
|
||||
db: Active application database connection.
|
||||
name: Human-readable display name.
|
||||
url: URL of the blocklist text file.
|
||||
enabled: Whether the source is active. Defaults to ``True``.
|
||||
|
||||
Returns:
|
||||
The newly created :class:`~app.models.blocklist.BlocklistSource`.
|
||||
"""
|
||||
new_id = await blocklist_repo.create_source(db, name, url, enabled=enabled)
|
||||
source = await get_source(db, new_id)
|
||||
assert source is not None # noqa: S101
|
||||
log.info("blocklist_source_created", id=new_id, name=name, url=url)
|
||||
return source
|
||||
|
||||
|
||||
async def update_source(
|
||||
db: aiosqlite.Connection,
|
||||
source_id: int,
|
||||
*,
|
||||
name: str | None = None,
|
||||
url: str | None = None,
|
||||
enabled: bool | None = None,
|
||||
) -> BlocklistSource | None:
|
||||
"""Update fields on a blocklist source.
|
||||
|
||||
Args:
|
||||
db: Active application database connection.
|
||||
source_id: Primary key of the source to modify.
|
||||
name: New display name, or ``None`` to leave unchanged.
|
||||
url: New URL, or ``None`` to leave unchanged.
|
||||
enabled: New enabled state, or ``None`` to leave unchanged.
|
||||
|
||||
Returns:
|
||||
Updated :class:`~app.models.blocklist.BlocklistSource`, or ``None``
|
||||
if the source does not exist.
|
||||
"""
|
||||
updated = await blocklist_repo.update_source(
|
||||
db, source_id, name=name, url=url, enabled=enabled
|
||||
)
|
||||
if not updated:
|
||||
return None
|
||||
source = await get_source(db, source_id)
|
||||
log.info("blocklist_source_updated", id=source_id)
|
||||
return source
|
||||
|
||||
|
||||
async def delete_source(db: aiosqlite.Connection, source_id: int) -> bool:
|
||||
"""Delete a blocklist source.
|
||||
|
||||
Args:
|
||||
db: Active application database connection.
|
||||
source_id: Primary key of the source to delete.
|
||||
|
||||
Returns:
|
||||
``True`` if the source was found and deleted, ``False`` otherwise.
|
||||
"""
|
||||
deleted = await blocklist_repo.delete_source(db, source_id)
|
||||
if deleted:
|
||||
log.info("blocklist_source_deleted", id=source_id)
|
||||
return deleted
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Preview
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
async def preview_source(
|
||||
url: str,
|
||||
http_session: aiohttp.ClientSession,
|
||||
*,
|
||||
sample_lines: int = _PREVIEW_LINES,
|
||||
) -> PreviewResponse:
|
||||
"""Download the beginning of a blocklist URL and return a preview.
|
||||
|
||||
Args:
|
||||
url: URL to download.
|
||||
http_session: Shared :class:`aiohttp.ClientSession`.
|
||||
sample_lines: Maximum number of lines to include in the preview.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.blocklist.PreviewResponse` with a sample of
|
||||
valid IP entries and validation statistics.
|
||||
|
||||
Raises:
|
||||
ValueError: If the URL cannot be reached or returns a non-200 status.
|
||||
"""
|
||||
try:
|
||||
async with http_session.get(url, timeout=_aiohttp_timeout(10)) as resp:
|
||||
if resp.status != 200:
|
||||
raise ValueError(f"HTTP {resp.status} from {url}")
|
||||
raw = await resp.content.read(_PREVIEW_MAX_BYTES)
|
||||
except Exception as exc:
|
||||
log.warning("blocklist_preview_failed", url=url, error=str(exc))
|
||||
raise ValueError(str(exc)) from exc
|
||||
|
||||
lines = raw.decode(errors="replace").splitlines()
|
||||
entries: list[str] = []
|
||||
valid = 0
|
||||
skipped = 0
|
||||
|
||||
for line in lines:
|
||||
stripped = line.strip()
|
||||
if not stripped or stripped.startswith("#"):
|
||||
continue
|
||||
if is_valid_ip(stripped) or is_valid_network(stripped):
|
||||
valid += 1
|
||||
if len(entries) < sample_lines:
|
||||
entries.append(stripped)
|
||||
else:
|
||||
skipped += 1
|
||||
|
||||
return PreviewResponse(
|
||||
entries=entries,
|
||||
total_lines=len(lines),
|
||||
valid_count=valid,
|
||||
skipped_count=skipped,
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Import
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
async def import_source(
|
||||
source: BlocklistSource,
|
||||
http_session: aiohttp.ClientSession,
|
||||
socket_path: str,
|
||||
db: aiosqlite.Connection,
|
||||
) -> ImportSourceResult:
|
||||
"""Download and apply bans from a single blocklist source.
|
||||
|
||||
The function downloads the URL, validates each line as an IP address,
|
||||
and bans valid IPv4/IPv6 addresses via fail2ban in
|
||||
:data:`BLOCKLIST_JAIL`. CIDR ranges are counted as skipped since
|
||||
fail2ban requires individual addresses. Any error encountered during
|
||||
download is recorded and the result is returned without raising.
|
||||
|
||||
After a successful import the geo cache is pre-warmed by batch-resolving
|
||||
all newly banned IPs. This ensures the dashboard and map show country
|
||||
data immediately after import rather than facing cold-cache lookups.
|
||||
|
||||
Args:
|
||||
source: The :class:`~app.models.blocklist.BlocklistSource` to import.
|
||||
http_session: Shared :class:`aiohttp.ClientSession`.
|
||||
socket_path: Path to the fail2ban Unix socket.
|
||||
db: Application database for logging.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.blocklist.ImportSourceResult` with counters.
|
||||
"""
|
||||
# --- Download ---
|
||||
try:
|
||||
async with http_session.get(
|
||||
source.url, timeout=_aiohttp_timeout(30)
|
||||
) as resp:
|
||||
if resp.status != 200:
|
||||
error_msg = f"HTTP {resp.status}"
|
||||
await _log_result(db, source, 0, 0, error_msg)
|
||||
log.warning("blocklist_import_download_failed", url=source.url, status=resp.status)
|
||||
return ImportSourceResult(
|
||||
source_id=source.id,
|
||||
source_url=source.url,
|
||||
ips_imported=0,
|
||||
ips_skipped=0,
|
||||
error=error_msg,
|
||||
)
|
||||
content = await resp.text(errors="replace")
|
||||
except Exception as exc:
|
||||
error_msg = str(exc)
|
||||
await _log_result(db, source, 0, 0, error_msg)
|
||||
log.warning("blocklist_import_download_error", url=source.url, error=error_msg)
|
||||
return ImportSourceResult(
|
||||
source_id=source.id,
|
||||
source_url=source.url,
|
||||
ips_imported=0,
|
||||
ips_skipped=0,
|
||||
error=error_msg,
|
||||
)
|
||||
|
||||
# --- Validate and ban ---
|
||||
imported = 0
|
||||
skipped = 0
|
||||
ban_error: str | None = None
|
||||
imported_ips: list[str] = []
|
||||
|
||||
# Import jail_service here to avoid circular import at module level.
|
||||
from app.services import jail_service # noqa: PLC0415
|
||||
|
||||
for line in content.splitlines():
|
||||
stripped = line.strip()
|
||||
if not stripped or stripped.startswith("#"):
|
||||
continue
|
||||
|
||||
if not is_valid_ip(stripped):
|
||||
# Skip CIDRs and malformed entries gracefully.
|
||||
skipped += 1
|
||||
continue
|
||||
|
||||
try:
|
||||
await jail_service.ban_ip(socket_path, BLOCKLIST_JAIL, stripped)
|
||||
imported += 1
|
||||
imported_ips.append(stripped)
|
||||
except jail_service.JailNotFoundError as exc:
|
||||
# The target jail does not exist in fail2ban — there is no point
|
||||
# continuing because every subsequent ban would also fail.
|
||||
ban_error = str(exc)
|
||||
log.warning(
|
||||
"blocklist_jail_not_found",
|
||||
jail=BLOCKLIST_JAIL,
|
||||
error=str(exc),
|
||||
)
|
||||
break
|
||||
except Exception as exc:
|
||||
skipped += 1
|
||||
if ban_error is None:
|
||||
ban_error = str(exc)
|
||||
log.debug("blocklist_ban_failed", ip=stripped, error=str(exc))
|
||||
|
||||
await _log_result(db, source, imported, skipped, ban_error)
|
||||
log.info(
|
||||
"blocklist_source_imported",
|
||||
source_id=source.id,
|
||||
url=source.url,
|
||||
imported=imported,
|
||||
skipped=skipped,
|
||||
error=ban_error,
|
||||
)
|
||||
|
||||
# --- Pre-warm geo cache for newly imported IPs ---
|
||||
if imported_ips:
|
||||
from app.services import geo_service # noqa: PLC0415
|
||||
|
||||
uncached_ips: list[str] = [
|
||||
ip for ip in imported_ips if not geo_service.is_cached(ip)
|
||||
]
|
||||
skipped_geo: int = len(imported_ips) - len(uncached_ips)
|
||||
|
||||
if skipped_geo > 0:
|
||||
log.info(
|
||||
"blocklist_geo_prewarm_cache_hit",
|
||||
source_id=source.id,
|
||||
skipped=skipped_geo,
|
||||
to_lookup=len(uncached_ips),
|
||||
)
|
||||
|
||||
if uncached_ips:
|
||||
try:
|
||||
await geo_service.lookup_batch(uncached_ips, http_session, db=db)
|
||||
log.info(
|
||||
"blocklist_geo_prewarm_complete",
|
||||
source_id=source.id,
|
||||
count=len(uncached_ips),
|
||||
)
|
||||
except Exception as exc: # noqa: BLE001
|
||||
log.warning(
|
||||
"blocklist_geo_prewarm_failed",
|
||||
source_id=source.id,
|
||||
error=str(exc),
|
||||
)
|
||||
|
||||
return ImportSourceResult(
|
||||
source_id=source.id,
|
||||
source_url=source.url,
|
||||
ips_imported=imported,
|
||||
ips_skipped=skipped,
|
||||
error=ban_error,
|
||||
)
|
||||
|
||||
|
||||
async def import_all(
|
||||
db: aiosqlite.Connection,
|
||||
http_session: aiohttp.ClientSession,
|
||||
socket_path: str,
|
||||
) -> ImportRunResult:
|
||||
"""Import all enabled blocklist sources.
|
||||
|
||||
Iterates over every source with ``enabled = True``, calls
|
||||
:func:`import_source` for each, and aggregates the results.
|
||||
|
||||
Args:
|
||||
db: Application database connection.
|
||||
http_session: Shared :class:`aiohttp.ClientSession`.
|
||||
socket_path: fail2ban socket path.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.blocklist.ImportRunResult` with aggregated
|
||||
counters and per-source results.
|
||||
"""
|
||||
sources = await blocklist_repo.list_enabled_sources(db)
|
||||
results: list[ImportSourceResult] = []
|
||||
total_imported = 0
|
||||
total_skipped = 0
|
||||
errors_count = 0
|
||||
|
||||
for row in sources:
|
||||
source = _row_to_source(row)
|
||||
result = await import_source(source, http_session, socket_path, db)
|
||||
results.append(result)
|
||||
total_imported += result.ips_imported
|
||||
total_skipped += result.ips_skipped
|
||||
if result.error is not None:
|
||||
errors_count += 1
|
||||
|
||||
log.info(
|
||||
"blocklist_import_all_complete",
|
||||
sources=len(sources),
|
||||
total_imported=total_imported,
|
||||
total_skipped=total_skipped,
|
||||
errors=errors_count,
|
||||
)
|
||||
return ImportRunResult(
|
||||
results=results,
|
||||
total_imported=total_imported,
|
||||
total_skipped=total_skipped,
|
||||
errors_count=errors_count,
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Schedule
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_DEFAULT_SCHEDULE = ScheduleConfig()
|
||||
|
||||
|
||||
async def get_schedule(db: aiosqlite.Connection) -> ScheduleConfig:
|
||||
"""Read the import schedule config from the settings table.
|
||||
|
||||
Returns the default config (daily at 03:00 UTC) if no schedule has been
|
||||
saved yet.
|
||||
|
||||
Args:
|
||||
db: Active application database connection.
|
||||
|
||||
Returns:
|
||||
The stored (or default) :class:`~app.models.blocklist.ScheduleConfig`.
|
||||
"""
|
||||
raw = await settings_repo.get_setting(db, _SCHEDULE_SETTINGS_KEY)
|
||||
if raw is None:
|
||||
return _DEFAULT_SCHEDULE
|
||||
try:
|
||||
data = json.loads(raw)
|
||||
return ScheduleConfig.model_validate(data)
|
||||
except Exception:
|
||||
log.warning("blocklist_schedule_invalid", raw=raw)
|
||||
return _DEFAULT_SCHEDULE
|
||||
|
||||
|
||||
async def set_schedule(
|
||||
db: aiosqlite.Connection,
|
||||
config: ScheduleConfig,
|
||||
) -> ScheduleConfig:
|
||||
"""Persist a new schedule configuration.
|
||||
|
||||
Args:
|
||||
db: Active application database connection.
|
||||
config: The :class:`~app.models.blocklist.ScheduleConfig` to store.
|
||||
|
||||
Returns:
|
||||
The saved configuration (same object after validation).
|
||||
"""
|
||||
await settings_repo.set_setting(
|
||||
db, _SCHEDULE_SETTINGS_KEY, config.model_dump_json()
|
||||
)
|
||||
log.info("blocklist_schedule_updated", frequency=config.frequency, hour=config.hour)
|
||||
return config
|
||||
|
||||
|
||||
async def get_schedule_info(
|
||||
db: aiosqlite.Connection,
|
||||
next_run_at: str | None,
|
||||
) -> ScheduleInfo:
|
||||
"""Return the schedule config together with last-run metadata.
|
||||
|
||||
Args:
|
||||
db: Active application database connection.
|
||||
next_run_at: ISO 8601 string of the next scheduled run, or ``None``
|
||||
if not yet scheduled (provided by the caller from APScheduler).
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.blocklist.ScheduleInfo` combining config and
|
||||
runtime metadata.
|
||||
"""
|
||||
config = await get_schedule(db)
|
||||
last_log = await import_log_repo.get_last_log(db)
|
||||
last_run_at = last_log["timestamp"] if last_log else None
|
||||
last_run_errors: bool | None = (last_log["errors"] is not None) if last_log else None
|
||||
return ScheduleInfo(
|
||||
config=config,
|
||||
next_run_at=next_run_at,
|
||||
last_run_at=last_run_at,
|
||||
last_run_errors=last_run_errors,
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Internal helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _aiohttp_timeout(seconds: float) -> Any:
|
||||
"""Return an :class:`aiohttp.ClientTimeout` with the given total timeout.
|
||||
|
||||
Args:
|
||||
seconds: Total timeout in seconds.
|
||||
|
||||
Returns:
|
||||
An :class:`aiohttp.ClientTimeout` instance.
|
||||
"""
|
||||
import aiohttp # noqa: PLC0415
|
||||
|
||||
return aiohttp.ClientTimeout(total=seconds)
|
||||
|
||||
|
||||
async def _log_result(
|
||||
db: aiosqlite.Connection,
|
||||
source: BlocklistSource,
|
||||
ips_imported: int,
|
||||
ips_skipped: int,
|
||||
error: str | None,
|
||||
) -> None:
|
||||
"""Write an import log entry for a completed source import.
|
||||
|
||||
Args:
|
||||
db: Application database connection.
|
||||
source: The source that was imported.
|
||||
ips_imported: Count of successfully banned IPs.
|
||||
ips_skipped: Count of skipped/invalid entries.
|
||||
error: Error string, or ``None`` on success.
|
||||
"""
|
||||
await import_log_repo.add_log(
|
||||
db,
|
||||
source_id=source.id,
|
||||
source_url=source.url,
|
||||
ips_imported=ips_imported,
|
||||
ips_skipped=ips_skipped,
|
||||
errors=error,
|
||||
)
|
||||
695
backend/app/services/conffile_parser.py
Normal file
695
backend/app/services/conffile_parser.py
Normal file
@@ -0,0 +1,695 @@
|
||||
"""Fail2ban INI-style configuration file parser and serializer.
|
||||
|
||||
Provides structured parsing and serialization for ``filter.d/*.conf`` and
|
||||
``action.d/*.conf`` files, mirroring fail2ban's own ``RawConfigParser``-based
|
||||
reading logic.
|
||||
|
||||
Key design decisions:
|
||||
- Uses :class:`configparser.RawConfigParser` with ``interpolation=None`` so
|
||||
fail2ban-style ``%`` / ``<>`` tags are preserved verbatim.
|
||||
- Multi-line values (lines that begin with whitespace) are handled by
|
||||
configparser automatically; the raw string is then post-processed to split
|
||||
``failregex``/``ignoreregex`` into individual patterns.
|
||||
- Section ordering in serialized output: ``[INCLUDES]`` → ``[DEFAULT]`` →
|
||||
``[Definition]`` → ``[Init]``. Unknown extra sections from action files
|
||||
(e.g. ``[ipt_oneport]``) are intentionally discarded because the structured
|
||||
model does not capture them — users should edit those sections via the raw
|
||||
(Export) tab.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import configparser
|
||||
import contextlib
|
||||
import io
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import structlog
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pathlib import Path
|
||||
|
||||
from app.models.config import (
|
||||
ActionConfig,
|
||||
ActionConfigUpdate,
|
||||
FilterConfig,
|
||||
FilterConfigUpdate,
|
||||
JailFileConfig,
|
||||
JailFileConfigUpdate,
|
||||
JailSectionConfig,
|
||||
)
|
||||
|
||||
log: structlog.stdlib.BoundLogger = structlog.get_logger()
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Constants — well-known Definition keys for action files
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_ACTION_LIFECYCLE_KEYS: frozenset[str] = frozenset(
|
||||
{
|
||||
"actionstart",
|
||||
"actionstop",
|
||||
"actioncheck",
|
||||
"actionban",
|
||||
"actionunban",
|
||||
"actionflush",
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Internal helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _make_parser() -> configparser.RawConfigParser:
|
||||
"""Create a :class:`configparser.RawConfigParser` configured for fail2ban.
|
||||
|
||||
Returns:
|
||||
A parser with interpolation disabled, case-sensitive keys, and a
|
||||
``DEFAULT`` section that does not inherit into other sections.
|
||||
"""
|
||||
parser = configparser.RawConfigParser(
|
||||
# Disable interpolation so fail2ban % / <> tags survive unchanged.
|
||||
interpolation=None,
|
||||
# Preserve original key casing (fail2ban keys are lowercase but some
|
||||
# custom config files may use mixed case).
|
||||
strict=False,
|
||||
)
|
||||
# Keys are case-sensitive in fail2ban.
|
||||
parser.optionxform = str # type: ignore[assignment]
|
||||
return parser
|
||||
|
||||
|
||||
def _split_multiline_patterns(raw: str) -> list[str]:
|
||||
"""Split a raw multi-line configparser value into individual patterns.
|
||||
|
||||
Each non-blank, non-comment line becomes a separate entry.
|
||||
|
||||
Args:
|
||||
raw: The raw multi-line string from configparser (may include blank
|
||||
lines and ``#`` comments).
|
||||
|
||||
Returns:
|
||||
List of stripped non-empty, non-comment pattern strings.
|
||||
"""
|
||||
result: list[str] = []
|
||||
for line in raw.splitlines():
|
||||
stripped = line.strip()
|
||||
if stripped and not stripped.startswith("#"):
|
||||
result.append(stripped)
|
||||
return result
|
||||
|
||||
|
||||
def _get_opt(parser: configparser.RawConfigParser, section: str, key: str) -> str | None:
|
||||
"""Return the value of *key* in *section*, or ``None`` if absent.
|
||||
|
||||
Args:
|
||||
parser: Populated parser instance.
|
||||
section: Section name.
|
||||
key: Option name.
|
||||
|
||||
Returns:
|
||||
Option value string, or ``None``.
|
||||
"""
|
||||
if parser.has_section(section) and parser.has_option(section, key):
|
||||
return parser.get(section, key)
|
||||
return None
|
||||
|
||||
|
||||
def _section_dict(
|
||||
parser: configparser.RawConfigParser, section: str, skip: frozenset[str] | None = None
|
||||
) -> dict[str, str]:
|
||||
"""Return all key-value pairs from *section* as a plain dict.
|
||||
|
||||
Args:
|
||||
parser: Populated parser instance.
|
||||
section: Section name.
|
||||
skip: Optional set of keys to exclude.
|
||||
|
||||
Returns:
|
||||
Dict of option → value for the section.
|
||||
"""
|
||||
if not parser.has_section(section):
|
||||
return {}
|
||||
drop = skip or frozenset()
|
||||
return {
|
||||
k: v
|
||||
for k, v in parser.items(section)
|
||||
if not k.startswith("__") and k not in drop # __ keys come from DEFAULT inheritance
|
||||
}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Filter file parser / serializer
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def parse_filter_file(content: str, name: str = "", filename: str = "") -> FilterConfig:
|
||||
"""Parse a ``filter.d/*.conf`` file into a :class:`~app.models.config.FilterConfig`.
|
||||
|
||||
Args:
|
||||
content: Raw file content (UTF-8 string).
|
||||
name: Filter base name (e.g. ``"sshd"``). Used only to populate the
|
||||
``name`` field on the returned model.
|
||||
filename: Actual filename (e.g. ``"sshd.conf"``).
|
||||
|
||||
Returns:
|
||||
Populated :class:`~app.models.config.FilterConfig`.
|
||||
"""
|
||||
parser = _make_parser()
|
||||
try:
|
||||
parser.read_string(content)
|
||||
except configparser.Error as exc:
|
||||
log.warning("filter_parse_error", name=name, error=str(exc))
|
||||
|
||||
# [INCLUDES]
|
||||
before = _get_opt(parser, "INCLUDES", "before")
|
||||
after = _get_opt(parser, "INCLUDES", "after")
|
||||
|
||||
# [DEFAULT] — all keys that aren't hidden configparser internals
|
||||
# configparser stores DEFAULT keys accessible from every section; we
|
||||
# reconstruct them by reading DEFAULT directly.
|
||||
variables: dict[str, str] = {}
|
||||
if parser.defaults():
|
||||
variables = dict(parser.defaults())
|
||||
|
||||
# [Definition]
|
||||
prefregex = _get_opt(parser, "Definition", "prefregex")
|
||||
|
||||
raw_failregex = _get_opt(parser, "Definition", "failregex") or ""
|
||||
failregex = _split_multiline_patterns(raw_failregex)
|
||||
|
||||
raw_ignoreregex = _get_opt(parser, "Definition", "ignoreregex") or ""
|
||||
ignoreregex = _split_multiline_patterns(raw_ignoreregex)
|
||||
|
||||
maxlines_raw = _get_opt(parser, "Definition", "maxlines")
|
||||
maxlines: int | None = None
|
||||
if maxlines_raw is not None:
|
||||
with contextlib.suppress(ValueError):
|
||||
maxlines = int(maxlines_raw.strip())
|
||||
|
||||
datepattern = _get_opt(parser, "Definition", "datepattern")
|
||||
journalmatch = _get_opt(parser, "Definition", "journalmatch")
|
||||
|
||||
log.debug("filter_parsed", name=name, failregex_count=len(failregex))
|
||||
return FilterConfig(
|
||||
name=name,
|
||||
filename=filename,
|
||||
before=before,
|
||||
after=after,
|
||||
variables=variables,
|
||||
prefregex=prefregex,
|
||||
failregex=failregex,
|
||||
ignoreregex=ignoreregex,
|
||||
maxlines=maxlines,
|
||||
datepattern=datepattern,
|
||||
journalmatch=journalmatch,
|
||||
)
|
||||
|
||||
|
||||
def serialize_filter_config(cfg: FilterConfig) -> str:
|
||||
"""Serialize a :class:`~app.models.config.FilterConfig` to a ``.conf`` string.
|
||||
|
||||
The output preserves the canonical fail2ban INI section ordering:
|
||||
``[INCLUDES]`` → ``[DEFAULT]`` → ``[Definition]``.
|
||||
|
||||
Args:
|
||||
cfg: The filter configuration to serialize.
|
||||
|
||||
Returns:
|
||||
UTF-8 string suitable for writing to a ``.conf`` file.
|
||||
"""
|
||||
buf = io.StringIO()
|
||||
|
||||
# [INCLUDES]
|
||||
if cfg.before is not None or cfg.after is not None:
|
||||
buf.write("[INCLUDES]\n\n")
|
||||
if cfg.before is not None:
|
||||
buf.write(f"before = {cfg.before}\n")
|
||||
if cfg.after is not None:
|
||||
buf.write(f"after = {cfg.after}\n")
|
||||
buf.write("\n")
|
||||
|
||||
# [DEFAULT]
|
||||
if cfg.variables:
|
||||
buf.write("[DEFAULT]\n\n")
|
||||
for key, value in cfg.variables.items():
|
||||
buf.write(f"{key} = {value}\n")
|
||||
buf.write("\n")
|
||||
|
||||
# [Definition]
|
||||
buf.write("[Definition]\n\n")
|
||||
|
||||
if cfg.prefregex is not None:
|
||||
buf.write(f"prefregex = {cfg.prefregex}\n\n")
|
||||
|
||||
if cfg.failregex:
|
||||
buf.write("failregex = " + cfg.failregex[0] + "\n")
|
||||
for pattern in cfg.failregex[1:]:
|
||||
buf.write(f" {pattern}\n")
|
||||
buf.write("\n")
|
||||
|
||||
if cfg.ignoreregex:
|
||||
buf.write("ignoreregex = " + cfg.ignoreregex[0] + "\n")
|
||||
for pattern in cfg.ignoreregex[1:]:
|
||||
buf.write(f" {pattern}\n")
|
||||
buf.write("\n")
|
||||
|
||||
if cfg.maxlines is not None:
|
||||
buf.write(f"maxlines = {cfg.maxlines}\n\n")
|
||||
|
||||
if cfg.datepattern is not None:
|
||||
buf.write(f"datepattern = {cfg.datepattern}\n\n")
|
||||
|
||||
if cfg.journalmatch is not None:
|
||||
buf.write(f"journalmatch = {cfg.journalmatch}\n\n")
|
||||
|
||||
return buf.getvalue()
|
||||
|
||||
|
||||
def merge_filter_update(cfg: FilterConfig, update: FilterConfigUpdate) -> FilterConfig:
|
||||
"""Apply a partial :class:`~app.models.config.FilterConfigUpdate` onto *cfg*.
|
||||
|
||||
Only fields that are explicitly set (not ``None``) in *update* are written.
|
||||
Returns a new :class:`~app.models.config.FilterConfig` with the merged
|
||||
values; the original is not mutated.
|
||||
|
||||
Args:
|
||||
cfg: Current filter configuration.
|
||||
update: Partial update to apply.
|
||||
|
||||
Returns:
|
||||
Updated :class:`~app.models.config.FilterConfig`.
|
||||
"""
|
||||
return FilterConfig(
|
||||
name=cfg.name,
|
||||
filename=cfg.filename,
|
||||
before=update.before if update.before is not None else cfg.before,
|
||||
after=update.after if update.after is not None else cfg.after,
|
||||
variables=update.variables if update.variables is not None else cfg.variables,
|
||||
prefregex=update.prefregex if update.prefregex is not None else cfg.prefregex,
|
||||
failregex=update.failregex if update.failregex is not None else cfg.failregex,
|
||||
ignoreregex=update.ignoreregex if update.ignoreregex is not None else cfg.ignoreregex,
|
||||
maxlines=update.maxlines if update.maxlines is not None else cfg.maxlines,
|
||||
datepattern=update.datepattern if update.datepattern is not None else cfg.datepattern,
|
||||
journalmatch=update.journalmatch if update.journalmatch is not None else cfg.journalmatch,
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Action file parser / serializer
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def parse_action_file(content: str, name: str = "", filename: str = "") -> ActionConfig:
|
||||
"""Parse an ``action.d/*.conf`` file into a :class:`~app.models.config.ActionConfig`.
|
||||
|
||||
Args:
|
||||
content: Raw file content (UTF-8 string).
|
||||
name: Action base name (e.g. ``"iptables"``).
|
||||
filename: Actual filename (e.g. ``"iptables.conf"``).
|
||||
|
||||
Returns:
|
||||
Populated :class:`~app.models.config.ActionConfig`.
|
||||
"""
|
||||
parser = _make_parser()
|
||||
try:
|
||||
parser.read_string(content)
|
||||
except configparser.Error as exc:
|
||||
log.warning("action_parse_error", name=name, error=str(exc))
|
||||
|
||||
# [INCLUDES]
|
||||
before = _get_opt(parser, "INCLUDES", "before")
|
||||
after = _get_opt(parser, "INCLUDES", "after")
|
||||
|
||||
# [Definition] — extract well-known lifecycle keys, rest goes to definition_vars
|
||||
def_lifecycle: dict[str, str | None] = dict.fromkeys(_ACTION_LIFECYCLE_KEYS)
|
||||
definition_vars: dict[str, str] = {}
|
||||
|
||||
if parser.has_section("Definition"):
|
||||
for key, value in parser.items("Definition"):
|
||||
if key in _ACTION_LIFECYCLE_KEYS:
|
||||
def_lifecycle[key] = value
|
||||
else:
|
||||
definition_vars[key] = value
|
||||
|
||||
# [Init] — all keys go into init_vars (multiple [Init?...] sections are ignored)
|
||||
init_vars: dict[str, str] = {}
|
||||
if parser.has_section("Init"):
|
||||
for key, value in parser.items("Init"):
|
||||
init_vars[key] = value
|
||||
|
||||
log.debug("action_parsed", name=name, init_vars_count=len(init_vars))
|
||||
return ActionConfig(
|
||||
name=name,
|
||||
filename=filename,
|
||||
before=before,
|
||||
after=after,
|
||||
actionstart=def_lifecycle.get("actionstart"),
|
||||
actionstop=def_lifecycle.get("actionstop"),
|
||||
actioncheck=def_lifecycle.get("actioncheck"),
|
||||
actionban=def_lifecycle.get("actionban"),
|
||||
actionunban=def_lifecycle.get("actionunban"),
|
||||
actionflush=def_lifecycle.get("actionflush"),
|
||||
definition_vars=definition_vars,
|
||||
init_vars=init_vars,
|
||||
)
|
||||
|
||||
|
||||
def serialize_action_config(cfg: ActionConfig) -> str:
|
||||
"""Serialize an :class:`~app.models.config.ActionConfig` to a ``.conf`` string.
|
||||
|
||||
Section order: ``[INCLUDES]`` → ``[Definition]`` → ``[Init]``.
|
||||
|
||||
Args:
|
||||
cfg: The action configuration to serialize.
|
||||
|
||||
Returns:
|
||||
UTF-8 string suitable for writing to a ``.conf`` file.
|
||||
"""
|
||||
buf = io.StringIO()
|
||||
|
||||
# [INCLUDES]
|
||||
if cfg.before is not None or cfg.after is not None:
|
||||
buf.write("[INCLUDES]\n\n")
|
||||
if cfg.before is not None:
|
||||
buf.write(f"before = {cfg.before}\n")
|
||||
if cfg.after is not None:
|
||||
buf.write(f"after = {cfg.after}\n")
|
||||
buf.write("\n")
|
||||
|
||||
# [Definition]
|
||||
buf.write("[Definition]\n\n")
|
||||
|
||||
# Lifecycle commands first (in canonical order)
|
||||
_lifecycle_order = (
|
||||
"actionstart",
|
||||
"actionstop",
|
||||
"actioncheck",
|
||||
"actionban",
|
||||
"actionunban",
|
||||
"actionflush",
|
||||
)
|
||||
for key in _lifecycle_order:
|
||||
value = getattr(cfg, key)
|
||||
if value is not None:
|
||||
lines = value.splitlines()
|
||||
if lines:
|
||||
buf.write(f"{key} = {lines[0]}\n")
|
||||
for extra in lines[1:]:
|
||||
buf.write(f" {extra}\n")
|
||||
buf.write("\n")
|
||||
|
||||
# Extra definition variables
|
||||
for key, value in cfg.definition_vars.items():
|
||||
lines = value.splitlines()
|
||||
if lines:
|
||||
buf.write(f"{key} = {lines[0]}\n")
|
||||
for extra in lines[1:]:
|
||||
buf.write(f" {extra}\n")
|
||||
if cfg.definition_vars:
|
||||
buf.write("\n")
|
||||
|
||||
# [Init]
|
||||
if cfg.init_vars:
|
||||
buf.write("[Init]\n\n")
|
||||
for key, value in cfg.init_vars.items():
|
||||
buf.write(f"{key} = {value}\n")
|
||||
buf.write("\n")
|
||||
|
||||
return buf.getvalue()
|
||||
|
||||
|
||||
def merge_action_update(cfg: ActionConfig, update: ActionConfigUpdate) -> ActionConfig:
|
||||
"""Apply a partial :class:`~app.models.config.ActionConfigUpdate` onto *cfg*.
|
||||
|
||||
Args:
|
||||
cfg: Current action configuration.
|
||||
update: Partial update to apply.
|
||||
|
||||
Returns:
|
||||
Updated :class:`~app.models.config.ActionConfig`.
|
||||
"""
|
||||
return ActionConfig(
|
||||
name=cfg.name,
|
||||
filename=cfg.filename,
|
||||
before=update.before if update.before is not None else cfg.before,
|
||||
after=update.after if update.after is not None else cfg.after,
|
||||
actionstart=update.actionstart if update.actionstart is not None else cfg.actionstart,
|
||||
actionstop=update.actionstop if update.actionstop is not None else cfg.actionstop,
|
||||
actioncheck=update.actioncheck if update.actioncheck is not None else cfg.actioncheck,
|
||||
actionban=update.actionban if update.actionban is not None else cfg.actionban,
|
||||
actionunban=update.actionunban if update.actionunban is not None else cfg.actionunban,
|
||||
actionflush=update.actionflush if update.actionflush is not None else cfg.actionflush,
|
||||
definition_vars=update.definition_vars
|
||||
if update.definition_vars is not None
|
||||
else cfg.definition_vars,
|
||||
init_vars=update.init_vars if update.init_vars is not None else cfg.init_vars,
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Convenience helpers for reading/writing files
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def read_and_parse_filter(path: Path) -> FilterConfig:
|
||||
"""Read *path* and return a parsed :class:`~app.models.config.FilterConfig`.
|
||||
|
||||
Args:
|
||||
path: Absolute path to the filter file.
|
||||
|
||||
Returns:
|
||||
Parsed filter config.
|
||||
"""
|
||||
content = path.read_text(encoding="utf-8")
|
||||
return parse_filter_file(content, name=path.stem, filename=path.name)
|
||||
|
||||
|
||||
def read_and_parse_action(path: Path) -> ActionConfig:
|
||||
"""Read *path* and return a parsed :class:`~app.models.config.ActionConfig`.
|
||||
|
||||
Args:
|
||||
path: Absolute path to the action file.
|
||||
|
||||
Returns:
|
||||
Parsed action config.
|
||||
"""
|
||||
content = path.read_text(encoding="utf-8")
|
||||
return parse_action_file(content, name=path.stem, filename=path.name)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Jail file parser / serializer (Task 6.1)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
# Keys handled by named fields in JailSectionConfig.
|
||||
_JAIL_NAMED_KEYS: frozenset[str] = frozenset(
|
||||
{
|
||||
"enabled",
|
||||
"port",
|
||||
"filter",
|
||||
"logpath",
|
||||
"maxretry",
|
||||
"findtime",
|
||||
"bantime",
|
||||
"action",
|
||||
"backend",
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
def _parse_bool(value: str) -> bool | None:
|
||||
"""Parse a fail2ban boolean string.
|
||||
|
||||
Args:
|
||||
value: Raw string value from config (e.g. "true", "false", "yes", "no", "1", "0").
|
||||
|
||||
Returns:
|
||||
Boolean, or ``None`` if the value is not a recognised boolean token.
|
||||
"""
|
||||
lower = value.strip().lower()
|
||||
if lower in {"true", "yes", "1"}:
|
||||
return True
|
||||
if lower in {"false", "no", "0"}:
|
||||
return False
|
||||
return None
|
||||
|
||||
|
||||
def _parse_int(value: str) -> int | None:
|
||||
"""Parse an integer string, returning ``None`` on failure.
|
||||
|
||||
Args:
|
||||
value: Raw string value from config.
|
||||
|
||||
Returns:
|
||||
Integer, or ``None``.
|
||||
"""
|
||||
with contextlib.suppress(ValueError):
|
||||
return int(value.strip())
|
||||
return None
|
||||
|
||||
|
||||
def _parse_multiline_list(raw: str) -> list[str]:
|
||||
"""Split a multi-line configparser value into a list of non-blank lines.
|
||||
|
||||
Args:
|
||||
raw: Raw multi-line string from configparser.
|
||||
|
||||
Returns:
|
||||
List of stripped, non-empty, non-comment strings.
|
||||
"""
|
||||
result: list[str] = []
|
||||
for line in raw.splitlines():
|
||||
stripped = line.strip()
|
||||
if stripped and not stripped.startswith("#"):
|
||||
result.append(stripped)
|
||||
return result
|
||||
|
||||
|
||||
def parse_jail_file(content: str, filename: str = "") -> JailFileConfig:
|
||||
"""Parse a ``jail.d/*.conf`` file into a :class:`~app.models.config.JailFileConfig`.
|
||||
|
||||
Each INI section in the file maps to a jail. The ``[DEFAULT]`` section (if
|
||||
present) is silently ignored — fail2ban merges it with jail sections, but
|
||||
the structured model represents per-jail settings only.
|
||||
|
||||
Args:
|
||||
content: Raw file content (UTF-8 string).
|
||||
filename: Filename (e.g. ``"sshd.conf"``).
|
||||
|
||||
Returns:
|
||||
Populated :class:`~app.models.config.JailFileConfig`.
|
||||
"""
|
||||
parser = _make_parser()
|
||||
try:
|
||||
parser.read_string(content)
|
||||
except configparser.Error as exc:
|
||||
log.warning("jail_file_parse_error", filename=filename, error=str(exc))
|
||||
|
||||
jails: dict[str, JailSectionConfig] = {}
|
||||
for section in parser.sections():
|
||||
# Skip meta-sections used by fail2ban include system.
|
||||
if section in {"INCLUDES", "DEFAULT"}:
|
||||
continue
|
||||
|
||||
items = dict(parser.items(section))
|
||||
|
||||
enabled_raw = items.get("enabled")
|
||||
enabled = _parse_bool(enabled_raw) if enabled_raw is not None else None
|
||||
|
||||
port = items.get("port")
|
||||
filter_name = items.get("filter")
|
||||
backend = items.get("backend")
|
||||
|
||||
logpath_raw = items.get("logpath", "")
|
||||
logpath = _parse_multiline_list(logpath_raw)
|
||||
|
||||
action_raw = items.get("action", "")
|
||||
action = _parse_multiline_list(action_raw)
|
||||
|
||||
maxretry = _parse_int(items.get("maxretry", "")) if "maxretry" in items else None
|
||||
findtime = _parse_int(items.get("findtime", "")) if "findtime" in items else None
|
||||
bantime = _parse_int(items.get("bantime", "")) if "bantime" in items else None
|
||||
|
||||
extra: dict[str, str] = {
|
||||
k: v for k, v in items.items() if k not in _JAIL_NAMED_KEYS
|
||||
}
|
||||
|
||||
jails[section] = JailSectionConfig(
|
||||
enabled=enabled,
|
||||
port=port,
|
||||
filter=filter_name,
|
||||
logpath=logpath,
|
||||
maxretry=maxretry,
|
||||
findtime=findtime,
|
||||
bantime=bantime,
|
||||
action=action,
|
||||
backend=backend,
|
||||
extra=extra,
|
||||
)
|
||||
|
||||
log.debug("jail_file_parsed", filename=filename, jail_count=len(jails))
|
||||
return JailFileConfig(filename=filename, jails=jails)
|
||||
|
||||
|
||||
def serialize_jail_file_config(cfg: JailFileConfig) -> str:
|
||||
"""Serialize a :class:`~app.models.config.JailFileConfig` to a fail2ban INI string.
|
||||
|
||||
Args:
|
||||
cfg: Structured jail file configuration.
|
||||
|
||||
Returns:
|
||||
UTF-8 file content suitable for writing to a ``jail.d/*.conf`` file.
|
||||
"""
|
||||
buf = io.StringIO()
|
||||
buf.write(f"# Generated by BanGUI — {cfg.filename}\n")
|
||||
|
||||
for jail_name, jail in cfg.jails.items():
|
||||
buf.write(f"\n[{jail_name}]\n\n")
|
||||
|
||||
if jail.enabled is not None:
|
||||
buf.write(f"enabled = {'true' if jail.enabled else 'false'}\n")
|
||||
if jail.port is not None:
|
||||
buf.write(f"port = {jail.port}\n")
|
||||
if jail.filter is not None:
|
||||
buf.write(f"filter = {jail.filter}\n")
|
||||
if jail.backend is not None:
|
||||
buf.write(f"backend = {jail.backend}\n")
|
||||
if jail.maxretry is not None:
|
||||
buf.write(f"maxretry = {jail.maxretry}\n")
|
||||
if jail.findtime is not None:
|
||||
buf.write(f"findtime = {jail.findtime}\n")
|
||||
if jail.bantime is not None:
|
||||
buf.write(f"bantime = {jail.bantime}\n")
|
||||
|
||||
if jail.logpath:
|
||||
first, *rest = jail.logpath
|
||||
buf.write(f"logpath = {first}\n")
|
||||
for path in rest:
|
||||
buf.write(f" {path}\n")
|
||||
|
||||
if jail.action:
|
||||
first_action, *rest_actions = jail.action
|
||||
buf.write(f"action = {first_action}\n")
|
||||
for a in rest_actions:
|
||||
buf.write(f" {a}\n")
|
||||
|
||||
for key, value in jail.extra.items():
|
||||
buf.write(f"{key} = {value}\n")
|
||||
|
||||
return buf.getvalue()
|
||||
|
||||
|
||||
def merge_jail_file_update(cfg: JailFileConfig, update: JailFileConfigUpdate) -> JailFileConfig:
|
||||
"""Apply a partial :class:`~app.models.config.JailFileConfigUpdate` onto *cfg*.
|
||||
|
||||
Only jails present in ``update.jails`` are replaced; other jails are left
|
||||
unchanged.
|
||||
|
||||
Args:
|
||||
cfg: Current jail file configuration.
|
||||
update: Partial update to apply.
|
||||
|
||||
Returns:
|
||||
Updated :class:`~app.models.config.JailFileConfig`.
|
||||
"""
|
||||
if update.jails is None:
|
||||
return cfg
|
||||
merged = dict(cfg.jails)
|
||||
merged.update(update.jails)
|
||||
return JailFileConfig(filename=cfg.filename, jails=merged)
|
||||
|
||||
|
||||
def read_and_parse_jail_file(path: Path) -> JailFileConfig:
|
||||
"""Read *path* and return a parsed :class:`~app.models.config.JailFileConfig`.
|
||||
|
||||
Args:
|
||||
path: Absolute path to the jail config file.
|
||||
|
||||
Returns:
|
||||
Parsed jail file config.
|
||||
"""
|
||||
content = path.read_text(encoding="utf-8")
|
||||
return parse_jail_file(content, filename=path.name)
|
||||
3215
backend/app/services/config_file_service.py
Normal file
3215
backend/app/services/config_file_service.py
Normal file
File diff suppressed because it is too large
Load Diff
929
backend/app/services/config_service.py
Normal file
929
backend/app/services/config_service.py
Normal file
@@ -0,0 +1,929 @@
|
||||
"""Configuration inspection and editing service.
|
||||
|
||||
Provides methods to read and update fail2ban jail configuration and global
|
||||
server settings via the Unix domain socket. Regex validation is performed
|
||||
locally with Python's :mod:`re` module before any write is sent to the daemon
|
||||
so that invalid patterns are rejected early.
|
||||
|
||||
Architecture note: this module is a pure service — it contains **no**
|
||||
HTTP/FastAPI concerns. All results are returned as Pydantic models so
|
||||
routers can serialise them directly.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import contextlib
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
import structlog
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import aiosqlite
|
||||
|
||||
from app.models.config import (
|
||||
AddLogPathRequest,
|
||||
BantimeEscalation,
|
||||
Fail2BanLogResponse,
|
||||
GlobalConfigResponse,
|
||||
GlobalConfigUpdate,
|
||||
JailConfig,
|
||||
JailConfigListResponse,
|
||||
JailConfigResponse,
|
||||
JailConfigUpdate,
|
||||
LogPreviewLine,
|
||||
LogPreviewRequest,
|
||||
LogPreviewResponse,
|
||||
MapColorThresholdsResponse,
|
||||
MapColorThresholdsUpdate,
|
||||
RegexTestRequest,
|
||||
RegexTestResponse,
|
||||
ServiceStatusResponse,
|
||||
)
|
||||
from app.services import setup_service
|
||||
from app.utils.fail2ban_client import Fail2BanClient
|
||||
|
||||
log: structlog.stdlib.BoundLogger = structlog.get_logger()
|
||||
|
||||
_SOCKET_TIMEOUT: float = 10.0
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Custom exceptions
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class JailNotFoundError(Exception):
|
||||
"""Raised when a requested jail name does not exist in fail2ban."""
|
||||
|
||||
def __init__(self, name: str) -> None:
|
||||
"""Initialise with the jail name that was not found.
|
||||
|
||||
Args:
|
||||
name: The jail name that could not be located.
|
||||
"""
|
||||
self.name: str = name
|
||||
super().__init__(f"Jail not found: {name!r}")
|
||||
|
||||
|
||||
class ConfigValidationError(Exception):
|
||||
"""Raised when a configuration value fails validation before writing."""
|
||||
|
||||
|
||||
class ConfigOperationError(Exception):
|
||||
"""Raised when a configuration write command fails."""
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Internal helpers (mirrored from jail_service for isolation)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _ok(response: Any) -> Any:
|
||||
"""Extract payload from a fail2ban ``(return_code, data)`` response.
|
||||
|
||||
Args:
|
||||
response: Raw value returned by :meth:`~Fail2BanClient.send`.
|
||||
|
||||
Returns:
|
||||
The payload ``data`` portion of the response.
|
||||
|
||||
Raises:
|
||||
ValueError: If the return code indicates an error.
|
||||
"""
|
||||
try:
|
||||
code, data = response
|
||||
except (TypeError, ValueError) as exc:
|
||||
raise ValueError(f"Unexpected fail2ban response shape: {response!r}") from exc
|
||||
if code != 0:
|
||||
raise ValueError(f"fail2ban returned error code {code}: {data!r}")
|
||||
return data
|
||||
|
||||
|
||||
def _to_dict(pairs: Any) -> dict[str, Any]:
|
||||
"""Convert a list of ``(key, value)`` pairs to a plain dict."""
|
||||
if not isinstance(pairs, (list, tuple)):
|
||||
return {}
|
||||
result: dict[str, Any] = {}
|
||||
for item in pairs:
|
||||
try:
|
||||
k, v = item
|
||||
result[str(k)] = v
|
||||
except (TypeError, ValueError):
|
||||
pass
|
||||
return result
|
||||
|
||||
|
||||
def _ensure_list(value: Any) -> list[str]:
|
||||
"""Coerce a fail2ban ``get`` result to a list of strings."""
|
||||
if value is None:
|
||||
return []
|
||||
if isinstance(value, str):
|
||||
return [value] if value.strip() else []
|
||||
if isinstance(value, (list, tuple)):
|
||||
return [str(v) for v in value if v is not None]
|
||||
return [str(value)]
|
||||
|
||||
|
||||
async def _safe_get(
|
||||
client: Fail2BanClient,
|
||||
command: list[Any],
|
||||
default: Any = None,
|
||||
) -> Any:
|
||||
"""Send a command and return *default* if it fails."""
|
||||
try:
|
||||
return _ok(await client.send(command))
|
||||
except Exception:
|
||||
return default
|
||||
|
||||
|
||||
def _is_not_found_error(exc: Exception) -> bool:
|
||||
"""Return ``True`` if *exc* signals an unknown jail."""
|
||||
msg = str(exc).lower()
|
||||
return any(
|
||||
phrase in msg
|
||||
for phrase in ("unknown jail", "no jail", "does not exist", "not found")
|
||||
)
|
||||
|
||||
|
||||
def _validate_regex(pattern: str) -> str | None:
|
||||
"""Try to compile *pattern* and return an error message if invalid.
|
||||
|
||||
Args:
|
||||
pattern: A regex pattern string to validate.
|
||||
|
||||
Returns:
|
||||
``None`` if valid, or an error message string if the pattern is broken.
|
||||
"""
|
||||
try:
|
||||
re.compile(pattern)
|
||||
return None
|
||||
except re.error as exc:
|
||||
return str(exc)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Public API — read jail configuration
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
async def get_jail_config(socket_path: str, name: str) -> JailConfigResponse:
|
||||
"""Return the editable configuration for a single jail.
|
||||
|
||||
Args:
|
||||
socket_path: Path to the fail2ban Unix domain socket.
|
||||
name: Jail name.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.config.JailConfigResponse`.
|
||||
|
||||
Raises:
|
||||
JailNotFoundError: If *name* is not a known jail.
|
||||
~app.utils.fail2ban_client.Fail2BanConnectionError: Socket unreachable.
|
||||
"""
|
||||
client = Fail2BanClient(socket_path=socket_path, timeout=_SOCKET_TIMEOUT)
|
||||
|
||||
# Verify existence.
|
||||
try:
|
||||
_ok(await client.send(["status", name, "short"]))
|
||||
except ValueError as exc:
|
||||
if _is_not_found_error(exc):
|
||||
raise JailNotFoundError(name) from exc
|
||||
raise
|
||||
|
||||
(
|
||||
bantime_raw,
|
||||
findtime_raw,
|
||||
maxretry_raw,
|
||||
failregex_raw,
|
||||
ignoreregex_raw,
|
||||
logpath_raw,
|
||||
datepattern_raw,
|
||||
logencoding_raw,
|
||||
backend_raw,
|
||||
usedns_raw,
|
||||
prefregex_raw,
|
||||
actions_raw,
|
||||
bt_increment_raw,
|
||||
bt_factor_raw,
|
||||
bt_formula_raw,
|
||||
bt_multipliers_raw,
|
||||
bt_maxtime_raw,
|
||||
bt_rndtime_raw,
|
||||
bt_overalljails_raw,
|
||||
) = await asyncio.gather(
|
||||
_safe_get(client, ["get", name, "bantime"], 600),
|
||||
_safe_get(client, ["get", name, "findtime"], 600),
|
||||
_safe_get(client, ["get", name, "maxretry"], 5),
|
||||
_safe_get(client, ["get", name, "failregex"], []),
|
||||
_safe_get(client, ["get", name, "ignoreregex"], []),
|
||||
_safe_get(client, ["get", name, "logpath"], []),
|
||||
_safe_get(client, ["get", name, "datepattern"], None),
|
||||
_safe_get(client, ["get", name, "logencoding"], "UTF-8"),
|
||||
_safe_get(client, ["get", name, "backend"], "polling"),
|
||||
_safe_get(client, ["get", name, "usedns"], "warn"),
|
||||
_safe_get(client, ["get", name, "prefregex"], ""),
|
||||
_safe_get(client, ["get", name, "actions"], []),
|
||||
_safe_get(client, ["get", name, "bantime.increment"], False),
|
||||
_safe_get(client, ["get", name, "bantime.factor"], None),
|
||||
_safe_get(client, ["get", name, "bantime.formula"], None),
|
||||
_safe_get(client, ["get", name, "bantime.multipliers"], None),
|
||||
_safe_get(client, ["get", name, "bantime.maxtime"], None),
|
||||
_safe_get(client, ["get", name, "bantime.rndtime"], None),
|
||||
_safe_get(client, ["get", name, "bantime.overalljails"], False),
|
||||
)
|
||||
|
||||
bantime_escalation = BantimeEscalation(
|
||||
increment=bool(bt_increment_raw),
|
||||
factor=float(bt_factor_raw) if bt_factor_raw is not None else None,
|
||||
formula=str(bt_formula_raw) if bt_formula_raw else None,
|
||||
multipliers=str(bt_multipliers_raw) if bt_multipliers_raw else None,
|
||||
max_time=int(bt_maxtime_raw) if bt_maxtime_raw is not None else None,
|
||||
rnd_time=int(bt_rndtime_raw) if bt_rndtime_raw is not None else None,
|
||||
overall_jails=bool(bt_overalljails_raw),
|
||||
)
|
||||
|
||||
jail_cfg = JailConfig(
|
||||
name=name,
|
||||
ban_time=int(bantime_raw or 600),
|
||||
find_time=int(findtime_raw or 600),
|
||||
max_retry=int(maxretry_raw or 5),
|
||||
fail_regex=_ensure_list(failregex_raw),
|
||||
ignore_regex=_ensure_list(ignoreregex_raw),
|
||||
log_paths=_ensure_list(logpath_raw),
|
||||
date_pattern=str(datepattern_raw) if datepattern_raw else None,
|
||||
log_encoding=str(logencoding_raw or "UTF-8"),
|
||||
backend=str(backend_raw or "polling"),
|
||||
use_dns=str(usedns_raw or "warn"),
|
||||
prefregex=str(prefregex_raw) if prefregex_raw else "",
|
||||
actions=_ensure_list(actions_raw),
|
||||
bantime_escalation=bantime_escalation,
|
||||
)
|
||||
|
||||
log.info("jail_config_fetched", jail=name)
|
||||
return JailConfigResponse(jail=jail_cfg)
|
||||
|
||||
|
||||
async def list_jail_configs(socket_path: str) -> JailConfigListResponse:
|
||||
"""Return configuration for all active jails.
|
||||
|
||||
Args:
|
||||
socket_path: Path to the fail2ban Unix domain socket.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.config.JailConfigListResponse`.
|
||||
|
||||
Raises:
|
||||
~app.utils.fail2ban_client.Fail2BanConnectionError: Socket unreachable.
|
||||
"""
|
||||
client = Fail2BanClient(socket_path=socket_path, timeout=_SOCKET_TIMEOUT)
|
||||
|
||||
global_status = _to_dict(_ok(await client.send(["status"])))
|
||||
jail_list_raw: str = str(global_status.get("Jail list", "") or "").strip()
|
||||
jail_names: list[str] = (
|
||||
[j.strip() for j in jail_list_raw.split(",") if j.strip()]
|
||||
if jail_list_raw
|
||||
else []
|
||||
)
|
||||
|
||||
if not jail_names:
|
||||
return JailConfigListResponse(jails=[], total=0)
|
||||
|
||||
responses: list[JailConfigResponse] = await asyncio.gather(
|
||||
*[get_jail_config(socket_path, name) for name in jail_names],
|
||||
return_exceptions=False,
|
||||
)
|
||||
|
||||
jails = [r.jail for r in responses]
|
||||
log.info("jail_configs_listed", count=len(jails))
|
||||
return JailConfigListResponse(jails=jails, total=len(jails))
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Public API — write jail configuration
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
async def update_jail_config(
|
||||
socket_path: str,
|
||||
name: str,
|
||||
update: JailConfigUpdate,
|
||||
) -> None:
|
||||
"""Apply *update* to the configuration of a running jail.
|
||||
|
||||
Each non-None field in *update* is sent as a separate ``set`` command.
|
||||
Regex patterns are validated locally before any write is sent.
|
||||
|
||||
Args:
|
||||
socket_path: Path to the fail2ban Unix domain socket.
|
||||
name: Jail name.
|
||||
update: Partial update payload.
|
||||
|
||||
Raises:
|
||||
JailNotFoundError: If *name* is not a known jail.
|
||||
ConfigValidationError: If a regex pattern fails to compile.
|
||||
ConfigOperationError: If a ``set`` command is rejected by fail2ban.
|
||||
~app.utils.fail2ban_client.Fail2BanConnectionError: Socket unreachable.
|
||||
"""
|
||||
# Validate all regex patterns before touching the daemon.
|
||||
for pattern_list, field in [
|
||||
(update.fail_regex, "fail_regex"),
|
||||
(update.ignore_regex, "ignore_regex"),
|
||||
]:
|
||||
if pattern_list is None:
|
||||
continue
|
||||
for pattern in pattern_list:
|
||||
err = _validate_regex(pattern)
|
||||
if err:
|
||||
raise ConfigValidationError(f"Invalid regex in {field!r}: {err!r} (pattern: {pattern!r})")
|
||||
if update.prefregex is not None and update.prefregex:
|
||||
err = _validate_regex(update.prefregex)
|
||||
if err:
|
||||
raise ConfigValidationError(f"Invalid regex in 'prefregex': {err!r} (pattern: {update.prefregex!r})")
|
||||
|
||||
client = Fail2BanClient(socket_path=socket_path, timeout=_SOCKET_TIMEOUT)
|
||||
|
||||
# Verify existence.
|
||||
try:
|
||||
_ok(await client.send(["status", name, "short"]))
|
||||
except ValueError as exc:
|
||||
if _is_not_found_error(exc):
|
||||
raise JailNotFoundError(name) from exc
|
||||
raise
|
||||
|
||||
async def _set(key: str, value: Any) -> None:
|
||||
try:
|
||||
_ok(await client.send(["set", name, key, value]))
|
||||
except ValueError as exc:
|
||||
raise ConfigOperationError(f"Failed to set {key!r} = {value!r}: {exc}") from exc
|
||||
|
||||
if update.ban_time is not None:
|
||||
await _set("bantime", update.ban_time)
|
||||
if update.find_time is not None:
|
||||
await _set("findtime", update.find_time)
|
||||
if update.max_retry is not None:
|
||||
await _set("maxretry", update.max_retry)
|
||||
if update.date_pattern is not None:
|
||||
await _set("datepattern", update.date_pattern)
|
||||
if update.dns_mode is not None:
|
||||
await _set("usedns", update.dns_mode)
|
||||
if update.backend is not None:
|
||||
await _set("backend", update.backend)
|
||||
if update.log_encoding is not None:
|
||||
await _set("logencoding", update.log_encoding)
|
||||
if update.prefregex is not None:
|
||||
await _set("prefregex", update.prefregex)
|
||||
if update.enabled is not None:
|
||||
await _set("idle", "off" if update.enabled else "on")
|
||||
|
||||
# Ban-time escalation fields.
|
||||
if update.bantime_escalation is not None:
|
||||
esc = update.bantime_escalation
|
||||
if esc.increment is not None:
|
||||
await _set("bantime.increment", "true" if esc.increment else "false")
|
||||
if esc.factor is not None:
|
||||
await _set("bantime.factor", str(esc.factor))
|
||||
if esc.formula is not None:
|
||||
await _set("bantime.formula", esc.formula)
|
||||
if esc.multipliers is not None:
|
||||
await _set("bantime.multipliers", esc.multipliers)
|
||||
if esc.max_time is not None:
|
||||
await _set("bantime.maxtime", esc.max_time)
|
||||
if esc.rnd_time is not None:
|
||||
await _set("bantime.rndtime", esc.rnd_time)
|
||||
if esc.overall_jails is not None:
|
||||
await _set("bantime.overalljails", "true" if esc.overall_jails else "false")
|
||||
|
||||
# Replacing regex lists requires deleting old entries then adding new ones.
|
||||
if update.fail_regex is not None:
|
||||
await _replace_regex_list(client, name, "failregex", update.fail_regex)
|
||||
if update.ignore_regex is not None:
|
||||
await _replace_regex_list(client, name, "ignoreregex", update.ignore_regex)
|
||||
|
||||
log.info("jail_config_updated", jail=name)
|
||||
|
||||
|
||||
async def _replace_regex_list(
|
||||
client: Fail2BanClient,
|
||||
jail: str,
|
||||
field: str,
|
||||
new_patterns: list[str],
|
||||
) -> None:
|
||||
"""Replace the full regex list for *field* in *jail*.
|
||||
|
||||
Deletes all existing entries (highest index first to preserve ordering)
|
||||
then inserts all *new_patterns* in order.
|
||||
|
||||
Args:
|
||||
client: Shared :class:`~app.utils.fail2ban_client.Fail2BanClient`.
|
||||
jail: Jail name.
|
||||
field: Either ``"failregex"`` or ``"ignoreregex"``.
|
||||
new_patterns: Replacement list (may be empty to clear).
|
||||
"""
|
||||
# Determine current count.
|
||||
current_raw = await _safe_get(client, ["get", jail, field], [])
|
||||
current: list[str] = _ensure_list(current_raw)
|
||||
|
||||
del_cmd = f"del{field}"
|
||||
add_cmd = f"add{field}"
|
||||
|
||||
# Delete in reverse order so indices stay stable.
|
||||
for idx in range(len(current) - 1, -1, -1):
|
||||
with contextlib.suppress(ValueError):
|
||||
_ok(await client.send(["set", jail, del_cmd, idx]))
|
||||
|
||||
# Add new patterns.
|
||||
for pattern in new_patterns:
|
||||
err = _validate_regex(pattern)
|
||||
if err:
|
||||
raise ConfigValidationError(f"Invalid regex: {err!r} (pattern: {pattern!r})")
|
||||
try:
|
||||
_ok(await client.send(["set", jail, add_cmd, pattern]))
|
||||
except ValueError as exc:
|
||||
raise ConfigOperationError(f"Failed to add {field} pattern: {exc}") from exc
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Public API — global configuration
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
async def get_global_config(socket_path: str) -> GlobalConfigResponse:
|
||||
"""Return fail2ban global configuration settings.
|
||||
|
||||
Args:
|
||||
socket_path: Path to the fail2ban Unix domain socket.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.config.GlobalConfigResponse`.
|
||||
|
||||
Raises:
|
||||
~app.utils.fail2ban_client.Fail2BanConnectionError: Socket unreachable.
|
||||
"""
|
||||
client = Fail2BanClient(socket_path=socket_path, timeout=_SOCKET_TIMEOUT)
|
||||
|
||||
(
|
||||
log_level_raw,
|
||||
log_target_raw,
|
||||
db_purge_age_raw,
|
||||
db_max_matches_raw,
|
||||
) = await asyncio.gather(
|
||||
_safe_get(client, ["get", "loglevel"], "INFO"),
|
||||
_safe_get(client, ["get", "logtarget"], "STDOUT"),
|
||||
_safe_get(client, ["get", "dbpurgeage"], 86400),
|
||||
_safe_get(client, ["get", "dbmaxmatches"], 10),
|
||||
)
|
||||
|
||||
return GlobalConfigResponse(
|
||||
log_level=str(log_level_raw or "INFO").upper(),
|
||||
log_target=str(log_target_raw or "STDOUT"),
|
||||
db_purge_age=int(db_purge_age_raw or 86400),
|
||||
db_max_matches=int(db_max_matches_raw or 10),
|
||||
)
|
||||
|
||||
|
||||
async def update_global_config(socket_path: str, update: GlobalConfigUpdate) -> None:
|
||||
"""Apply *update* to fail2ban global settings.
|
||||
|
||||
Args:
|
||||
socket_path: Path to the fail2ban Unix domain socket.
|
||||
update: Partial update payload.
|
||||
|
||||
Raises:
|
||||
ConfigOperationError: If a ``set`` command is rejected.
|
||||
~app.utils.fail2ban_client.Fail2BanConnectionError: Socket unreachable.
|
||||
"""
|
||||
client = Fail2BanClient(socket_path=socket_path, timeout=_SOCKET_TIMEOUT)
|
||||
|
||||
async def _set_global(key: str, value: Any) -> None:
|
||||
try:
|
||||
_ok(await client.send(["set", key, value]))
|
||||
except ValueError as exc:
|
||||
raise ConfigOperationError(f"Failed to set global {key!r} = {value!r}: {exc}") from exc
|
||||
|
||||
if update.log_level is not None:
|
||||
await _set_global("loglevel", update.log_level.upper())
|
||||
if update.log_target is not None:
|
||||
await _set_global("logtarget", update.log_target)
|
||||
if update.db_purge_age is not None:
|
||||
await _set_global("dbpurgeage", update.db_purge_age)
|
||||
if update.db_max_matches is not None:
|
||||
await _set_global("dbmaxmatches", update.db_max_matches)
|
||||
|
||||
log.info("global_config_updated")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Public API — regex tester (stateless, no socket)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def test_regex(request: RegexTestRequest) -> RegexTestResponse:
|
||||
"""Test a regex pattern against a sample log line.
|
||||
|
||||
This is a pure in-process operation — no socket communication occurs.
|
||||
|
||||
Args:
|
||||
request: The :class:`~app.models.config.RegexTestRequest` payload.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.config.RegexTestResponse` with match result.
|
||||
"""
|
||||
try:
|
||||
compiled = re.compile(request.fail_regex)
|
||||
except re.error as exc:
|
||||
return RegexTestResponse(matched=False, groups=[], error=str(exc))
|
||||
|
||||
match = compiled.search(request.log_line)
|
||||
if match is None:
|
||||
return RegexTestResponse(matched=False)
|
||||
|
||||
groups: list[str] = list(match.groups() or [])
|
||||
return RegexTestResponse(matched=True, groups=[str(g) for g in groups if g is not None])
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Public API — log observation
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
async def add_log_path(
|
||||
socket_path: str,
|
||||
jail: str,
|
||||
req: AddLogPathRequest,
|
||||
) -> None:
|
||||
"""Add a log path to an existing jail.
|
||||
|
||||
Args:
|
||||
socket_path: Path to the fail2ban Unix domain socket.
|
||||
jail: Jail name to which the log path should be added.
|
||||
req: :class:`~app.models.config.AddLogPathRequest` with the path to add.
|
||||
|
||||
Raises:
|
||||
JailNotFoundError: If *jail* is not a known jail.
|
||||
ConfigOperationError: If the command is rejected by fail2ban.
|
||||
~app.utils.fail2ban_client.Fail2BanConnectionError: Socket unreachable.
|
||||
"""
|
||||
client = Fail2BanClient(socket_path=socket_path, timeout=_SOCKET_TIMEOUT)
|
||||
|
||||
try:
|
||||
_ok(await client.send(["status", jail, "short"]))
|
||||
except ValueError as exc:
|
||||
if _is_not_found_error(exc):
|
||||
raise JailNotFoundError(jail) from exc
|
||||
raise
|
||||
|
||||
tail_flag = "tail" if req.tail else "head"
|
||||
try:
|
||||
_ok(await client.send(["set", jail, "addlogpath", req.log_path, tail_flag]))
|
||||
log.info("log_path_added", jail=jail, path=req.log_path)
|
||||
except ValueError as exc:
|
||||
raise ConfigOperationError(f"Failed to add log path {req.log_path!r}: {exc}") from exc
|
||||
|
||||
|
||||
async def delete_log_path(
|
||||
socket_path: str,
|
||||
jail: str,
|
||||
log_path: str,
|
||||
) -> None:
|
||||
"""Remove a monitored log path from an existing jail.
|
||||
|
||||
Uses ``set <jail> dellogpath <path>`` to remove the path at runtime
|
||||
without requiring a daemon restart.
|
||||
|
||||
Args:
|
||||
socket_path: Path to the fail2ban Unix domain socket.
|
||||
jail: Jail name from which the log path should be removed.
|
||||
log_path: Absolute path of the log file to stop monitoring.
|
||||
|
||||
Raises:
|
||||
JailNotFoundError: If *jail* is not a known jail.
|
||||
ConfigOperationError: If the command is rejected by fail2ban.
|
||||
~app.utils.fail2ban_client.Fail2BanConnectionError: Socket unreachable.
|
||||
"""
|
||||
client = Fail2BanClient(socket_path=socket_path, timeout=_SOCKET_TIMEOUT)
|
||||
|
||||
try:
|
||||
_ok(await client.send(["status", jail, "short"]))
|
||||
except ValueError as exc:
|
||||
if _is_not_found_error(exc):
|
||||
raise JailNotFoundError(jail) from exc
|
||||
raise
|
||||
|
||||
try:
|
||||
_ok(await client.send(["set", jail, "dellogpath", log_path]))
|
||||
log.info("log_path_deleted", jail=jail, path=log_path)
|
||||
except ValueError as exc:
|
||||
raise ConfigOperationError(f"Failed to delete log path {log_path!r}: {exc}") from exc
|
||||
|
||||
|
||||
async def preview_log(req: LogPreviewRequest) -> LogPreviewResponse:
|
||||
"""Read the last *num_lines* of a log file and test *fail_regex* against each.
|
||||
|
||||
This operation reads from the local filesystem — no socket is used.
|
||||
|
||||
Args:
|
||||
req: :class:`~app.models.config.LogPreviewRequest`.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.config.LogPreviewResponse` with line-by-line results.
|
||||
"""
|
||||
# Validate the regex first.
|
||||
try:
|
||||
compiled = re.compile(req.fail_regex)
|
||||
except re.error as exc:
|
||||
return LogPreviewResponse(
|
||||
lines=[],
|
||||
total_lines=0,
|
||||
matched_count=0,
|
||||
regex_error=str(exc),
|
||||
)
|
||||
|
||||
path = Path(req.log_path)
|
||||
if not path.is_file():
|
||||
return LogPreviewResponse(
|
||||
lines=[],
|
||||
total_lines=0,
|
||||
matched_count=0,
|
||||
regex_error=f"File not found: {req.log_path!r}",
|
||||
)
|
||||
|
||||
# Read the last num_lines lines efficiently.
|
||||
try:
|
||||
raw_lines = await asyncio.get_event_loop().run_in_executor(
|
||||
None,
|
||||
_read_tail_lines,
|
||||
str(path),
|
||||
req.num_lines,
|
||||
)
|
||||
except OSError as exc:
|
||||
return LogPreviewResponse(
|
||||
lines=[],
|
||||
total_lines=0,
|
||||
matched_count=0,
|
||||
regex_error=f"Cannot read file: {exc}",
|
||||
)
|
||||
|
||||
result_lines: list[LogPreviewLine] = []
|
||||
matched_count = 0
|
||||
for line in raw_lines:
|
||||
m = compiled.search(line)
|
||||
groups = [str(g) for g in (m.groups() or []) if g is not None] if m else []
|
||||
result_lines.append(LogPreviewLine(line=line, matched=(m is not None), groups=groups))
|
||||
if m:
|
||||
matched_count += 1
|
||||
|
||||
return LogPreviewResponse(
|
||||
lines=result_lines,
|
||||
total_lines=len(result_lines),
|
||||
matched_count=matched_count,
|
||||
)
|
||||
|
||||
|
||||
def _read_tail_lines(file_path: str, num_lines: int) -> list[str]:
|
||||
"""Read the last *num_lines* from *file_path* synchronously.
|
||||
|
||||
Uses a memory-efficient approach that seeks from the end of the file.
|
||||
|
||||
Args:
|
||||
file_path: Absolute path to the log file.
|
||||
num_lines: Number of lines to return.
|
||||
|
||||
Returns:
|
||||
A list of stripped line strings.
|
||||
"""
|
||||
chunk_size = 8192
|
||||
raw_lines: list[bytes] = []
|
||||
with open(file_path, "rb") as fh:
|
||||
fh.seek(0, 2) # seek to end
|
||||
end_pos = fh.tell()
|
||||
if end_pos == 0:
|
||||
return []
|
||||
buf = b""
|
||||
pos = end_pos
|
||||
while len(raw_lines) <= num_lines and pos > 0:
|
||||
read_size = min(chunk_size, pos)
|
||||
pos -= read_size
|
||||
fh.seek(pos)
|
||||
chunk = fh.read(read_size)
|
||||
buf = chunk + buf
|
||||
raw_lines = buf.split(b"\n")
|
||||
# Strip incomplete leading line unless we've read the whole file.
|
||||
if pos > 0 and len(raw_lines) > 1:
|
||||
raw_lines = raw_lines[1:]
|
||||
return [ln.decode("utf-8", errors="replace").rstrip() for ln in raw_lines[-num_lines:] if ln.strip()]
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Map color thresholds
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
async def get_map_color_thresholds(db: aiosqlite.Connection) -> MapColorThresholdsResponse:
|
||||
"""Retrieve the current map color threshold configuration.
|
||||
|
||||
Args:
|
||||
db: Active aiosqlite connection to the application database.
|
||||
|
||||
Returns:
|
||||
A :class:`MapColorThresholdsResponse` containing the three threshold values.
|
||||
"""
|
||||
high, medium, low = await setup_service.get_map_color_thresholds(db)
|
||||
return MapColorThresholdsResponse(
|
||||
threshold_high=high,
|
||||
threshold_medium=medium,
|
||||
threshold_low=low,
|
||||
)
|
||||
|
||||
|
||||
async def update_map_color_thresholds(
|
||||
db: aiosqlite.Connection,
|
||||
update: MapColorThresholdsUpdate,
|
||||
) -> None:
|
||||
"""Update the map color threshold configuration.
|
||||
|
||||
Args:
|
||||
db: Active aiosqlite connection to the application database.
|
||||
update: The new threshold values.
|
||||
|
||||
Raises:
|
||||
ValueError: If validation fails (thresholds must satisfy high > medium > low).
|
||||
"""
|
||||
await setup_service.set_map_color_thresholds(
|
||||
db,
|
||||
threshold_high=update.threshold_high,
|
||||
threshold_medium=update.threshold_medium,
|
||||
threshold_low=update.threshold_low,
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# fail2ban log file reader
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
# Log targets that are not file paths — log viewing is unavailable for these.
|
||||
_NON_FILE_LOG_TARGETS: frozenset[str] = frozenset(
|
||||
{"STDOUT", "STDERR", "SYSLOG", "SYSTEMD-JOURNAL"}
|
||||
)
|
||||
|
||||
# Only allow reading log files under these base directories (security).
|
||||
_SAFE_LOG_PREFIXES: tuple[str, ...] = ("/var/log", "/config/log")
|
||||
|
||||
|
||||
def _count_file_lines(file_path: str) -> int:
|
||||
"""Count the total number of lines in *file_path* synchronously.
|
||||
|
||||
Uses a memory-efficient buffered read to avoid loading the whole file.
|
||||
|
||||
Args:
|
||||
file_path: Absolute path to the file.
|
||||
|
||||
Returns:
|
||||
Total number of lines in the file.
|
||||
"""
|
||||
count = 0
|
||||
with open(file_path, "rb") as fh:
|
||||
for chunk in iter(lambda: fh.read(65536), b""):
|
||||
count += chunk.count(b"\n")
|
||||
return count
|
||||
|
||||
|
||||
async def read_fail2ban_log(
|
||||
socket_path: str,
|
||||
lines: int,
|
||||
filter_text: str | None = None,
|
||||
) -> Fail2BanLogResponse:
|
||||
"""Read the tail of the fail2ban daemon log file.
|
||||
|
||||
Queries the fail2ban socket for the current log target and log level,
|
||||
validates that the target is a readable file, then returns the last
|
||||
*lines* entries optionally filtered by *filter_text*.
|
||||
|
||||
Security: the resolved log path is rejected unless it starts with one of
|
||||
the paths in :data:`_SAFE_LOG_PREFIXES`, preventing path traversal.
|
||||
|
||||
Args:
|
||||
socket_path: Path to the fail2ban Unix domain socket.
|
||||
lines: Number of lines to return from the tail of the file (1–2000).
|
||||
filter_text: Optional plain-text substring — only matching lines are
|
||||
returned. Applied server-side; does not affect *total_lines*.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.config.Fail2BanLogResponse`.
|
||||
|
||||
Raises:
|
||||
ConfigOperationError: When the log target is not a file, when the
|
||||
resolved path is outside the allowed directories, or when the
|
||||
file cannot be read.
|
||||
~app.utils.fail2ban_client.Fail2BanConnectionError: Socket unreachable.
|
||||
"""
|
||||
client = Fail2BanClient(socket_path=socket_path, timeout=_SOCKET_TIMEOUT)
|
||||
|
||||
log_level_raw, log_target_raw = await asyncio.gather(
|
||||
_safe_get(client, ["get", "loglevel"], "INFO"),
|
||||
_safe_get(client, ["get", "logtarget"], "STDOUT"),
|
||||
)
|
||||
|
||||
log_level = str(log_level_raw or "INFO").upper()
|
||||
log_target = str(log_target_raw or "STDOUT")
|
||||
|
||||
# Reject non-file targets up front.
|
||||
if log_target.upper() in _NON_FILE_LOG_TARGETS:
|
||||
raise ConfigOperationError(
|
||||
f"fail2ban is logging to {log_target!r}. "
|
||||
"File-based log viewing is only available when fail2ban logs to a file path."
|
||||
)
|
||||
|
||||
# Resolve and validate (security: no path traversal outside safe dirs).
|
||||
try:
|
||||
resolved = Path(log_target).resolve()
|
||||
except (ValueError, OSError) as exc:
|
||||
raise ConfigOperationError(
|
||||
f"Cannot resolve log target path {log_target!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
resolved_str = str(resolved)
|
||||
if not any(resolved_str.startswith(safe) for safe in _SAFE_LOG_PREFIXES):
|
||||
raise ConfigOperationError(
|
||||
f"Log path {resolved_str!r} is outside the allowed directory. "
|
||||
"Only paths under /var/log or /config/log are permitted."
|
||||
)
|
||||
|
||||
if not resolved.is_file():
|
||||
raise ConfigOperationError(f"Log file not found: {resolved_str!r}")
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
|
||||
total_lines, raw_lines = await asyncio.gather(
|
||||
loop.run_in_executor(None, _count_file_lines, resolved_str),
|
||||
loop.run_in_executor(None, _read_tail_lines, resolved_str, lines),
|
||||
)
|
||||
|
||||
filtered = (
|
||||
[ln for ln in raw_lines if filter_text in ln]
|
||||
if filter_text
|
||||
else raw_lines
|
||||
)
|
||||
|
||||
log.info(
|
||||
"fail2ban_log_read",
|
||||
log_path=resolved_str,
|
||||
lines_requested=lines,
|
||||
lines_returned=len(filtered),
|
||||
filter_active=filter_text is not None,
|
||||
)
|
||||
|
||||
return Fail2BanLogResponse(
|
||||
log_path=resolved_str,
|
||||
lines=filtered,
|
||||
total_lines=total_lines,
|
||||
log_level=log_level,
|
||||
log_target=log_target,
|
||||
)
|
||||
|
||||
|
||||
async def get_service_status(socket_path: str) -> ServiceStatusResponse:
|
||||
"""Return fail2ban service health status with log configuration.
|
||||
|
||||
Delegates to :func:`~app.services.health_service.probe` for the core
|
||||
health snapshot and augments it with the current log-level and log-target
|
||||
values from the socket.
|
||||
|
||||
Args:
|
||||
socket_path: Path to the fail2ban Unix domain socket.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.config.ServiceStatusResponse`.
|
||||
"""
|
||||
from app.services.health_service import probe # lazy import avoids circular dep
|
||||
|
||||
server_status = await probe(socket_path)
|
||||
|
||||
if server_status.online:
|
||||
client = Fail2BanClient(socket_path=socket_path, timeout=_SOCKET_TIMEOUT)
|
||||
log_level_raw, log_target_raw = await asyncio.gather(
|
||||
_safe_get(client, ["get", "loglevel"], "INFO"),
|
||||
_safe_get(client, ["get", "logtarget"], "STDOUT"),
|
||||
)
|
||||
log_level = str(log_level_raw or "INFO").upper()
|
||||
log_target = str(log_target_raw or "STDOUT")
|
||||
else:
|
||||
log_level = "UNKNOWN"
|
||||
log_target = "UNKNOWN"
|
||||
|
||||
log.info(
|
||||
"service_status_fetched",
|
||||
online=server_status.online,
|
||||
jail_count=server_status.active_jails,
|
||||
)
|
||||
|
||||
return ServiceStatusResponse(
|
||||
online=server_status.online,
|
||||
version=server_status.version,
|
||||
jail_count=server_status.active_jails,
|
||||
total_bans=server_status.total_bans,
|
||||
total_failures=server_status.total_failures,
|
||||
log_level=log_level,
|
||||
log_target=log_target,
|
||||
)
|
||||
1011
backend/app/services/file_config_service.py
Normal file
1011
backend/app/services/file_config_service.py
Normal file
File diff suppressed because it is too large
Load Diff
816
backend/app/services/geo_service.py
Normal file
816
backend/app/services/geo_service.py
Normal file
@@ -0,0 +1,816 @@
|
||||
"""Geo service.
|
||||
|
||||
Resolves IP addresses to their country, ASN, and organisation using the
|
||||
`ip-api.com <http://ip-api.com>`_ JSON API. Results are cached in two tiers:
|
||||
|
||||
1. **In-memory dict** — fastest; survives for the life of the process.
|
||||
2. **Persistent SQLite table** (``geo_cache``) — survives restarts; loaded
|
||||
into the in-memory dict during application startup via
|
||||
:func:`load_cache_from_db`.
|
||||
|
||||
Only *successful* lookups (those returning a non-``None`` ``country_code``)
|
||||
are written to the persistent cache. Failed lookups are **not** cached so
|
||||
they will be retried on the next request.
|
||||
|
||||
For bulk operations the batch endpoint ``http://ip-api.com/batch`` is used
|
||||
(up to 100 IPs per HTTP call) which is far more efficient than one-at-a-time
|
||||
requests. Use :func:`lookup_batch` from the ban or blocklist services.
|
||||
|
||||
Usage::
|
||||
|
||||
import aiohttp
|
||||
import aiosqlite
|
||||
from app.services import geo_service
|
||||
|
||||
# warm the cache from the persistent store at startup
|
||||
async with aiosqlite.connect("bangui.db") as db:
|
||||
await geo_service.load_cache_from_db(db)
|
||||
|
||||
async with aiohttp.ClientSession() as session:
|
||||
# single lookup
|
||||
info = await geo_service.lookup("1.2.3.4", session)
|
||||
if info:
|
||||
print(info.country_code) # "DE"
|
||||
|
||||
# bulk lookup (more efficient for large sets)
|
||||
geo_map = await geo_service.lookup_batch(["1.2.3.4", "5.6.7.8"], session)
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import time
|
||||
from dataclasses import dataclass
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import aiohttp
|
||||
import structlog
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import aiosqlite
|
||||
import geoip2.database
|
||||
import geoip2.errors
|
||||
|
||||
log: structlog.stdlib.BoundLogger = structlog.get_logger()
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Constants
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
#: ip-api.com single-IP lookup endpoint (HTTP only on the free tier).
|
||||
_API_URL: str = (
|
||||
"http://ip-api.com/json/{ip}?fields=status,message,country,countryCode,org,as"
|
||||
)
|
||||
|
||||
#: ip-api.com batch endpoint — accepts up to 100 IPs per POST.
|
||||
_BATCH_API_URL: str = (
|
||||
"http://ip-api.com/batch?fields=status,message,country,countryCode,org,as,query"
|
||||
)
|
||||
|
||||
#: Maximum IPs per batch request (ip-api.com hard limit is 100).
|
||||
_BATCH_SIZE: int = 100
|
||||
|
||||
#: Maximum number of entries kept in the in-process cache before it is
|
||||
#: flushed completely. A simple eviction strategy — the cache is cheap to
|
||||
#: rebuild from the persistent store.
|
||||
_MAX_CACHE_SIZE: int = 50_000
|
||||
|
||||
#: Timeout for outgoing geo API requests in seconds.
|
||||
_REQUEST_TIMEOUT: float = 5.0
|
||||
|
||||
#: How many seconds a failed lookup result is suppressed before the IP is
|
||||
#: eligible for a new API attempt. Default: 5 minutes.
|
||||
_NEG_CACHE_TTL: float = 300.0
|
||||
|
||||
#: Minimum delay in seconds between consecutive batch HTTP requests to
|
||||
#: ip-api.com. The free tier allows 45 requests/min; 1.5 s ≈ 40 req/min.
|
||||
_BATCH_DELAY: float = 1.5
|
||||
|
||||
#: Maximum number of retries for a batch chunk that fails with a
|
||||
#: transient error (e.g. connection reset due to rate limiting).
|
||||
_BATCH_MAX_RETRIES: int = 2
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Domain model
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@dataclass
|
||||
class GeoInfo:
|
||||
"""Geographical and network metadata for a single IP address.
|
||||
|
||||
All fields default to ``None`` when the information is unavailable or
|
||||
the lookup fails gracefully.
|
||||
"""
|
||||
|
||||
country_code: str | None
|
||||
"""ISO 3166-1 alpha-2 country code, e.g. ``"DE"``."""
|
||||
|
||||
country_name: str | None
|
||||
"""Human-readable country name, e.g. ``"Germany"``."""
|
||||
|
||||
asn: str | None
|
||||
"""Autonomous System Number string, e.g. ``"AS3320"``."""
|
||||
|
||||
org: str | None
|
||||
"""Organisation name associated with the IP, e.g. ``"Deutsche Telekom"``."""
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Internal cache
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
#: Module-level in-memory cache: ``ip → GeoInfo`` (positive results only).
|
||||
_cache: dict[str, GeoInfo] = {}
|
||||
|
||||
#: Negative cache: ``ip → epoch timestamp`` of last failed lookup attempt.
|
||||
#: Entries within :data:`_NEG_CACHE_TTL` seconds are not re-queried.
|
||||
_neg_cache: dict[str, float] = {}
|
||||
|
||||
#: IPs added to :data:`_cache` but not yet persisted to the database.
|
||||
#: Consumed and cleared atomically by :func:`flush_dirty`.
|
||||
_dirty: set[str] = set()
|
||||
|
||||
#: Optional MaxMind GeoLite2 reader initialised by :func:`init_geoip`.
|
||||
_geoip_reader: geoip2.database.Reader | None = None
|
||||
|
||||
|
||||
def clear_cache() -> None:
|
||||
"""Flush both the positive and negative lookup caches.
|
||||
|
||||
Also clears the dirty set so any pending-but-unpersisted entries are
|
||||
discarded. Useful in tests and when the operator suspects stale data.
|
||||
"""
|
||||
_cache.clear()
|
||||
_neg_cache.clear()
|
||||
_dirty.clear()
|
||||
|
||||
|
||||
def clear_neg_cache() -> None:
|
||||
"""Flush only the negative (failed-lookups) cache.
|
||||
|
||||
Useful when triggering a manual re-resolve so that previously failed
|
||||
IPs are immediately eligible for a new API attempt.
|
||||
"""
|
||||
_neg_cache.clear()
|
||||
|
||||
|
||||
def is_cached(ip: str) -> bool:
|
||||
"""Return ``True`` if *ip* has a positive entry in the in-memory cache.
|
||||
|
||||
A positive entry is one with a non-``None`` ``country_code``. This is
|
||||
useful for skipping IPs that have already been resolved when building
|
||||
a list for :func:`lookup_batch`.
|
||||
|
||||
Args:
|
||||
ip: IPv4 or IPv6 address string.
|
||||
|
||||
Returns:
|
||||
``True`` when *ip* is in the cache with a known country code.
|
||||
"""
|
||||
return ip in _cache and _cache[ip].country_code is not None
|
||||
|
||||
|
||||
async def cache_stats(db: aiosqlite.Connection) -> dict[str, int]:
|
||||
"""Return diagnostic counters for the geo cache subsystem.
|
||||
|
||||
Queries the persistent store for the number of unresolved entries and
|
||||
combines it with in-memory counters.
|
||||
|
||||
Args:
|
||||
db: Open BanGUI application database connection.
|
||||
|
||||
Returns:
|
||||
Dict with keys ``cache_size``, ``unresolved``, ``neg_cache_size``,
|
||||
and ``dirty_size``.
|
||||
"""
|
||||
async with db.execute(
|
||||
"SELECT COUNT(*) FROM geo_cache WHERE country_code IS NULL"
|
||||
) as cur:
|
||||
row = await cur.fetchone()
|
||||
unresolved: int = int(row[0]) if row else 0
|
||||
|
||||
return {
|
||||
"cache_size": len(_cache),
|
||||
"unresolved": unresolved,
|
||||
"neg_cache_size": len(_neg_cache),
|
||||
"dirty_size": len(_dirty),
|
||||
}
|
||||
|
||||
|
||||
def init_geoip(mmdb_path: str | None) -> None:
|
||||
"""Initialise the MaxMind GeoLite2-Country database reader.
|
||||
|
||||
If *mmdb_path* is ``None``, empty, or the file does not exist the
|
||||
fallback is silently disabled — ip-api.com remains the sole resolver.
|
||||
|
||||
Args:
|
||||
mmdb_path: Absolute path to a ``GeoLite2-Country.mmdb`` file.
|
||||
"""
|
||||
global _geoip_reader # noqa: PLW0603
|
||||
if not mmdb_path:
|
||||
return
|
||||
from pathlib import Path # noqa: PLC0415
|
||||
|
||||
import geoip2.database # noqa: PLC0415
|
||||
|
||||
if not Path(mmdb_path).is_file():
|
||||
log.warning("geoip_mmdb_not_found", path=mmdb_path)
|
||||
return
|
||||
_geoip_reader = geoip2.database.Reader(mmdb_path)
|
||||
log.info("geoip_mmdb_loaded", path=mmdb_path)
|
||||
|
||||
|
||||
def _geoip_lookup(ip: str) -> GeoInfo | None:
|
||||
"""Attempt a local MaxMind GeoLite2 lookup for *ip*.
|
||||
|
||||
Returns ``None`` when the reader is not initialised, the IP is not in
|
||||
the database, or any other error occurs.
|
||||
|
||||
Args:
|
||||
ip: IPv4 or IPv6 address string.
|
||||
|
||||
Returns:
|
||||
A :class:`GeoInfo` with at least ``country_code`` populated, or
|
||||
``None`` when resolution is impossible.
|
||||
"""
|
||||
if _geoip_reader is None:
|
||||
return None
|
||||
import geoip2.errors # noqa: PLC0415
|
||||
|
||||
try:
|
||||
response = _geoip_reader.country(ip)
|
||||
code: str | None = response.country.iso_code or None
|
||||
name: str | None = response.country.name or None
|
||||
if code is None:
|
||||
return None
|
||||
return GeoInfo(country_code=code, country_name=name, asn=None, org=None)
|
||||
except geoip2.errors.AddressNotFoundError:
|
||||
return None
|
||||
except Exception as exc: # noqa: BLE001
|
||||
log.warning("geoip_lookup_failed", ip=ip, error=str(exc))
|
||||
return None
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Persistent cache I/O
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
async def load_cache_from_db(db: aiosqlite.Connection) -> None:
|
||||
"""Pre-populate the in-memory cache from the ``geo_cache`` table.
|
||||
|
||||
Should be called once during application startup so the service starts
|
||||
with a warm cache instead of making cold API calls on the first request.
|
||||
|
||||
Args:
|
||||
db: Open :class:`aiosqlite.Connection` to the BanGUI application
|
||||
database (not the fail2ban database).
|
||||
"""
|
||||
count = 0
|
||||
async with db.execute(
|
||||
"SELECT ip, country_code, country_name, asn, org FROM geo_cache"
|
||||
) as cur:
|
||||
async for row in cur:
|
||||
ip: str = str(row[0])
|
||||
country_code: str | None = row[1]
|
||||
if country_code is None:
|
||||
continue
|
||||
_cache[ip] = GeoInfo(
|
||||
country_code=country_code,
|
||||
country_name=row[2],
|
||||
asn=row[3],
|
||||
org=row[4],
|
||||
)
|
||||
count += 1
|
||||
log.info("geo_cache_loaded_from_db", entries=count)
|
||||
|
||||
|
||||
async def _persist_entry(
|
||||
db: aiosqlite.Connection,
|
||||
ip: str,
|
||||
info: GeoInfo,
|
||||
) -> None:
|
||||
"""Upsert a resolved :class:`GeoInfo` into the ``geo_cache`` table.
|
||||
|
||||
Only called when ``info.country_code`` is not ``None`` so the persistent
|
||||
store never contains empty placeholder rows.
|
||||
|
||||
Args:
|
||||
db: BanGUI application database connection.
|
||||
ip: IP address string.
|
||||
info: Resolved geo data to persist.
|
||||
"""
|
||||
await db.execute(
|
||||
"""
|
||||
INSERT INTO geo_cache (ip, country_code, country_name, asn, org)
|
||||
VALUES (?, ?, ?, ?, ?)
|
||||
ON CONFLICT(ip) DO UPDATE SET
|
||||
country_code = excluded.country_code,
|
||||
country_name = excluded.country_name,
|
||||
asn = excluded.asn,
|
||||
org = excluded.org,
|
||||
cached_at = strftime('%Y-%m-%dT%H:%M:%fZ', 'now')
|
||||
""",
|
||||
(ip, info.country_code, info.country_name, info.asn, info.org),
|
||||
)
|
||||
|
||||
|
||||
async def _persist_neg_entry(db: aiosqlite.Connection, ip: str) -> None:
|
||||
"""Record a failed lookup attempt in ``geo_cache`` with all-NULL fields.
|
||||
|
||||
Uses ``INSERT OR IGNORE`` so that an existing *positive* entry (one that
|
||||
has a ``country_code``) is never overwritten by a later failure.
|
||||
|
||||
Args:
|
||||
db: BanGUI application database connection.
|
||||
ip: IP address string whose resolution failed.
|
||||
"""
|
||||
await db.execute(
|
||||
"INSERT OR IGNORE INTO geo_cache (ip) VALUES (?)",
|
||||
(ip,),
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Public API — single lookup
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
async def lookup(
|
||||
ip: str,
|
||||
http_session: aiohttp.ClientSession,
|
||||
db: aiosqlite.Connection | None = None,
|
||||
) -> GeoInfo | None:
|
||||
"""Resolve an IP address to country, ASN, and organisation metadata.
|
||||
|
||||
Results are cached in-process. If the cache exceeds ``_MAX_CACHE_SIZE``
|
||||
entries it is flushed before the new result is stored.
|
||||
|
||||
Only successful resolutions (``country_code is not None``) are written to
|
||||
the persistent cache when *db* is provided. Failed lookups are **not**
|
||||
cached so they are retried on the next call.
|
||||
|
||||
Args:
|
||||
ip: IPv4 or IPv6 address string.
|
||||
http_session: Shared :class:`aiohttp.ClientSession` (from
|
||||
``app.state.http_session``).
|
||||
db: Optional BanGUI application database. When provided, successful
|
||||
lookups are persisted for cross-restart cache warming.
|
||||
|
||||
Returns:
|
||||
A :class:`GeoInfo` instance, or ``None`` when the lookup fails
|
||||
in a way that should prevent the caller from caching a bad result
|
||||
(e.g. network timeout).
|
||||
"""
|
||||
if ip in _cache:
|
||||
return _cache[ip]
|
||||
|
||||
# Negative cache: skip IPs that recently failed to avoid hammering the API.
|
||||
neg_ts = _neg_cache.get(ip)
|
||||
if neg_ts is not None and (time.monotonic() - neg_ts) < _NEG_CACHE_TTL:
|
||||
return GeoInfo(country_code=None, country_name=None, asn=None, org=None)
|
||||
|
||||
url: str = _API_URL.format(ip=ip)
|
||||
api_ok = False
|
||||
try:
|
||||
async with http_session.get(url, timeout=aiohttp.ClientTimeout(total=_REQUEST_TIMEOUT)) as resp:
|
||||
if resp.status != 200:
|
||||
log.warning("geo_lookup_non_200", ip=ip, status=resp.status)
|
||||
else:
|
||||
data: dict[str, object] = await resp.json(content_type=None)
|
||||
if data.get("status") == "success":
|
||||
api_ok = True
|
||||
result = _parse_single_response(data)
|
||||
_store(ip, result)
|
||||
if result.country_code is not None and db is not None:
|
||||
try:
|
||||
await _persist_entry(db, ip, result)
|
||||
await db.commit()
|
||||
except Exception as exc: # noqa: BLE001
|
||||
log.warning("geo_persist_failed", ip=ip, error=str(exc))
|
||||
log.debug("geo_lookup_success", ip=ip, country=result.country_code, asn=result.asn)
|
||||
return result
|
||||
log.debug(
|
||||
"geo_lookup_failed",
|
||||
ip=ip,
|
||||
message=data.get("message", "unknown"),
|
||||
)
|
||||
except Exception as exc: # noqa: BLE001
|
||||
log.warning(
|
||||
"geo_lookup_request_failed",
|
||||
ip=ip,
|
||||
exc_type=type(exc).__name__,
|
||||
error=repr(exc),
|
||||
)
|
||||
|
||||
if not api_ok:
|
||||
# Try local MaxMind database as fallback.
|
||||
fallback = _geoip_lookup(ip)
|
||||
if fallback is not None:
|
||||
_store(ip, fallback)
|
||||
if fallback.country_code is not None and db is not None:
|
||||
try:
|
||||
await _persist_entry(db, ip, fallback)
|
||||
await db.commit()
|
||||
except Exception as exc: # noqa: BLE001
|
||||
log.warning("geo_persist_failed", ip=ip, error=str(exc))
|
||||
log.debug("geo_geoip_fallback_success", ip=ip, country=fallback.country_code)
|
||||
return fallback
|
||||
|
||||
# Both resolvers failed — record in negative cache to avoid hammering.
|
||||
_neg_cache[ip] = time.monotonic()
|
||||
if db is not None:
|
||||
try:
|
||||
await _persist_neg_entry(db, ip)
|
||||
await db.commit()
|
||||
except Exception as exc: # noqa: BLE001
|
||||
log.warning("geo_persist_neg_failed", ip=ip, error=str(exc))
|
||||
|
||||
return GeoInfo(country_code=None, country_name=None, asn=None, org=None)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Public API — batch lookup
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def lookup_cached_only(
|
||||
ips: list[str],
|
||||
) -> tuple[dict[str, GeoInfo], list[str]]:
|
||||
"""Return cached geo data for *ips* without making any external API calls.
|
||||
|
||||
Used by callers that want to return a fast response using only what is
|
||||
already in memory, while deferring resolution of uncached IPs to a
|
||||
background task.
|
||||
|
||||
Args:
|
||||
ips: IP address strings to look up.
|
||||
|
||||
Returns:
|
||||
A ``(geo_map, uncached)`` tuple where *geo_map* maps every IP that
|
||||
was already in the in-memory cache to its :class:`GeoInfo`, and
|
||||
*uncached* is the list of IPs that were not found in the cache.
|
||||
Entries in the negative cache (recently failed) are **not** included
|
||||
in *uncached* so they are not re-queued immediately.
|
||||
"""
|
||||
geo_map: dict[str, GeoInfo] = {}
|
||||
uncached: list[str] = []
|
||||
now = time.monotonic()
|
||||
|
||||
for ip in dict.fromkeys(ips): # deduplicate, preserve order
|
||||
if ip in _cache:
|
||||
geo_map[ip] = _cache[ip]
|
||||
elif ip in _neg_cache and (now - _neg_cache[ip]) < _NEG_CACHE_TTL:
|
||||
# Still within the cool-down window — do not re-queue.
|
||||
pass
|
||||
else:
|
||||
uncached.append(ip)
|
||||
|
||||
return geo_map, uncached
|
||||
|
||||
|
||||
async def lookup_batch(
|
||||
ips: list[str],
|
||||
http_session: aiohttp.ClientSession,
|
||||
db: aiosqlite.Connection | None = None,
|
||||
) -> dict[str, GeoInfo]:
|
||||
"""Resolve multiple IP addresses in bulk using ip-api.com batch endpoint.
|
||||
|
||||
IPs already present in the in-memory cache are returned immediately
|
||||
without making an HTTP request. Uncached IPs are sent to
|
||||
``http://ip-api.com/batch`` in chunks of up to :data:`_BATCH_SIZE`.
|
||||
|
||||
Only successful resolutions (``country_code is not None``) are written to
|
||||
the persistent cache when *db* is provided. Both positive and negative
|
||||
entries are written in bulk using ``executemany`` (one round-trip per
|
||||
chunk) rather than one ``execute`` per IP.
|
||||
|
||||
Args:
|
||||
ips: List of IP address strings to resolve. Duplicates are ignored.
|
||||
http_session: Shared :class:`aiohttp.ClientSession`.
|
||||
db: Optional BanGUI application database for persistent cache writes.
|
||||
|
||||
Returns:
|
||||
Dict mapping ``ip → GeoInfo`` for every input IP. IPs whose
|
||||
resolution failed will have a ``GeoInfo`` with all-``None`` fields.
|
||||
"""
|
||||
geo_result: dict[str, GeoInfo] = {}
|
||||
uncached: list[str] = []
|
||||
_empty = GeoInfo(country_code=None, country_name=None, asn=None, org=None)
|
||||
|
||||
unique_ips = list(dict.fromkeys(ips)) # deduplicate, preserve order
|
||||
now = time.monotonic()
|
||||
for ip in unique_ips:
|
||||
if ip in _cache:
|
||||
geo_result[ip] = _cache[ip]
|
||||
elif ip in _neg_cache and (now - _neg_cache[ip]) < _NEG_CACHE_TTL:
|
||||
# Recently failed — skip API call, return empty result.
|
||||
geo_result[ip] = _empty
|
||||
else:
|
||||
uncached.append(ip)
|
||||
|
||||
if not uncached:
|
||||
return geo_result
|
||||
|
||||
log.info("geo_batch_lookup_start", total=len(uncached))
|
||||
|
||||
for batch_idx, chunk_start in enumerate(range(0, len(uncached), _BATCH_SIZE)):
|
||||
chunk = uncached[chunk_start : chunk_start + _BATCH_SIZE]
|
||||
|
||||
# Throttle: pause between consecutive HTTP calls to stay within the
|
||||
# ip-api.com free-tier rate limit (45 req/min).
|
||||
if batch_idx > 0:
|
||||
await asyncio.sleep(_BATCH_DELAY)
|
||||
|
||||
# Retry transient failures (e.g. connection-reset from rate limit).
|
||||
chunk_result: dict[str, GeoInfo] | None = None
|
||||
for attempt in range(_BATCH_MAX_RETRIES + 1):
|
||||
chunk_result = await _batch_api_call(chunk, http_session)
|
||||
# If every IP in the chunk came back with country_code=None and the
|
||||
# batch wasn't tiny, that almost certainly means the whole request
|
||||
# was rejected (connection reset / 429). Retry after a back-off.
|
||||
all_failed = all(
|
||||
info.country_code is None for info in chunk_result.values()
|
||||
)
|
||||
if not all_failed or attempt >= _BATCH_MAX_RETRIES:
|
||||
break
|
||||
backoff = _BATCH_DELAY * (2 ** (attempt + 1))
|
||||
log.warning(
|
||||
"geo_batch_retry",
|
||||
attempt=attempt + 1,
|
||||
chunk_size=len(chunk),
|
||||
backoff=backoff,
|
||||
)
|
||||
await asyncio.sleep(backoff)
|
||||
|
||||
assert chunk_result is not None # noqa: S101
|
||||
|
||||
# Collect bulk-write rows instead of one execute per IP.
|
||||
pos_rows: list[tuple[str, str | None, str | None, str | None, str | None]] = []
|
||||
neg_ips: list[str] = []
|
||||
|
||||
for ip, info in chunk_result.items():
|
||||
if info.country_code is not None:
|
||||
# Successful API resolution.
|
||||
_store(ip, info)
|
||||
geo_result[ip] = info
|
||||
if db is not None:
|
||||
pos_rows.append(
|
||||
(ip, info.country_code, info.country_name, info.asn, info.org)
|
||||
)
|
||||
else:
|
||||
# API failed — try local GeoIP fallback.
|
||||
fallback = _geoip_lookup(ip)
|
||||
if fallback is not None:
|
||||
_store(ip, fallback)
|
||||
geo_result[ip] = fallback
|
||||
if db is not None:
|
||||
pos_rows.append(
|
||||
(
|
||||
ip,
|
||||
fallback.country_code,
|
||||
fallback.country_name,
|
||||
fallback.asn,
|
||||
fallback.org,
|
||||
)
|
||||
)
|
||||
else:
|
||||
# Both resolvers failed — record in negative cache.
|
||||
_neg_cache[ip] = time.monotonic()
|
||||
geo_result[ip] = _empty
|
||||
if db is not None:
|
||||
neg_ips.append(ip)
|
||||
|
||||
if db is not None:
|
||||
if pos_rows:
|
||||
try:
|
||||
await db.executemany(
|
||||
"""
|
||||
INSERT INTO geo_cache (ip, country_code, country_name, asn, org)
|
||||
VALUES (?, ?, ?, ?, ?)
|
||||
ON CONFLICT(ip) DO UPDATE SET
|
||||
country_code = excluded.country_code,
|
||||
country_name = excluded.country_name,
|
||||
asn = excluded.asn,
|
||||
org = excluded.org,
|
||||
cached_at = strftime('%Y-%m-%dT%H:%M:%fZ', 'now')
|
||||
""",
|
||||
pos_rows,
|
||||
)
|
||||
except Exception as exc: # noqa: BLE001
|
||||
log.warning(
|
||||
"geo_batch_persist_failed",
|
||||
count=len(pos_rows),
|
||||
error=str(exc),
|
||||
)
|
||||
if neg_ips:
|
||||
try:
|
||||
await db.executemany(
|
||||
"INSERT OR IGNORE INTO geo_cache (ip) VALUES (?)",
|
||||
[(ip,) for ip in neg_ips],
|
||||
)
|
||||
except Exception as exc: # noqa: BLE001
|
||||
log.warning(
|
||||
"geo_batch_persist_neg_failed",
|
||||
count=len(neg_ips),
|
||||
error=str(exc),
|
||||
)
|
||||
|
||||
if db is not None:
|
||||
try:
|
||||
await db.commit()
|
||||
except Exception as exc: # noqa: BLE001
|
||||
log.warning("geo_batch_commit_failed", error=str(exc))
|
||||
|
||||
log.info(
|
||||
"geo_batch_lookup_complete",
|
||||
requested=len(uncached),
|
||||
resolved=sum(1 for g in geo_result.values() if g.country_code is not None),
|
||||
)
|
||||
return geo_result
|
||||
|
||||
|
||||
async def _batch_api_call(
|
||||
ips: list[str],
|
||||
http_session: aiohttp.ClientSession,
|
||||
) -> dict[str, GeoInfo]:
|
||||
"""Send one batch request to the ip-api.com batch endpoint.
|
||||
|
||||
Args:
|
||||
ips: Up to :data:`_BATCH_SIZE` IP address strings.
|
||||
http_session: Shared HTTP session.
|
||||
|
||||
Returns:
|
||||
Dict mapping ``ip → GeoInfo`` for every IP in *ips*. IPs where the
|
||||
API returned a failure record or the request raised an exception get
|
||||
an all-``None`` :class:`GeoInfo`.
|
||||
"""
|
||||
empty = GeoInfo(country_code=None, country_name=None, asn=None, org=None)
|
||||
fallback: dict[str, GeoInfo] = dict.fromkeys(ips, empty)
|
||||
|
||||
payload = [{"query": ip} for ip in ips]
|
||||
try:
|
||||
async with http_session.post(
|
||||
_BATCH_API_URL,
|
||||
json=payload,
|
||||
timeout=aiohttp.ClientTimeout(total=_REQUEST_TIMEOUT * 2),
|
||||
) as resp:
|
||||
if resp.status != 200:
|
||||
log.warning("geo_batch_non_200", status=resp.status, count=len(ips))
|
||||
return fallback
|
||||
data: list[dict[str, object]] = await resp.json(content_type=None)
|
||||
except Exception as exc: # noqa: BLE001
|
||||
log.warning(
|
||||
"geo_batch_request_failed",
|
||||
count=len(ips),
|
||||
exc_type=type(exc).__name__,
|
||||
error=repr(exc),
|
||||
)
|
||||
return fallback
|
||||
|
||||
out: dict[str, GeoInfo] = {}
|
||||
for entry in data:
|
||||
ip_str: str = str(entry.get("query", ""))
|
||||
if not ip_str:
|
||||
continue
|
||||
if entry.get("status") != "success":
|
||||
out[ip_str] = empty
|
||||
log.debug(
|
||||
"geo_batch_entry_failed",
|
||||
ip=ip_str,
|
||||
message=entry.get("message", "unknown"),
|
||||
)
|
||||
continue
|
||||
out[ip_str] = _parse_single_response(entry)
|
||||
|
||||
# Fill any IPs missing from the response.
|
||||
for ip in ips:
|
||||
if ip not in out:
|
||||
out[ip] = empty
|
||||
|
||||
return out
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _parse_single_response(data: dict[str, object]) -> GeoInfo:
|
||||
"""Build a :class:`GeoInfo` from a single ip-api.com response dict.
|
||||
|
||||
Args:
|
||||
data: A ``status == "success"`` JSON response from ip-api.com.
|
||||
|
||||
Returns:
|
||||
Populated :class:`GeoInfo`.
|
||||
"""
|
||||
country_code: str | None = _str_or_none(data.get("countryCode"))
|
||||
country_name: str | None = _str_or_none(data.get("country"))
|
||||
asn_raw: str | None = _str_or_none(data.get("as"))
|
||||
org_raw: str | None = _str_or_none(data.get("org"))
|
||||
|
||||
# ip-api returns "AS12345 Some Org" in both "as" and "org".
|
||||
asn: str | None = asn_raw.split()[0] if asn_raw else None
|
||||
|
||||
return GeoInfo(
|
||||
country_code=country_code,
|
||||
country_name=country_name,
|
||||
asn=asn,
|
||||
org=org_raw,
|
||||
)
|
||||
|
||||
|
||||
def _str_or_none(value: object) -> str | None:
|
||||
"""Return *value* as a non-empty string, or ``None``.
|
||||
|
||||
Args:
|
||||
value: Raw JSON value which may be ``None``, empty, or a string.
|
||||
|
||||
Returns:
|
||||
Stripped string if non-empty, else ``None``.
|
||||
"""
|
||||
if value is None:
|
||||
return None
|
||||
s = str(value).strip()
|
||||
return s if s else None
|
||||
|
||||
|
||||
def _store(ip: str, info: GeoInfo) -> None:
|
||||
"""Insert *info* into the module-level cache, flushing if over capacity.
|
||||
|
||||
When the IP resolved successfully (``country_code is not None``) it is
|
||||
also added to the :data:`_dirty` set so :func:`flush_dirty` can persist
|
||||
it to the database on the next scheduled flush.
|
||||
|
||||
Args:
|
||||
ip: The IP address key.
|
||||
info: The :class:`GeoInfo` to store.
|
||||
"""
|
||||
if len(_cache) >= _MAX_CACHE_SIZE:
|
||||
_cache.clear()
|
||||
_dirty.clear()
|
||||
log.info("geo_cache_flushed", reason="capacity")
|
||||
_cache[ip] = info
|
||||
if info.country_code is not None:
|
||||
_dirty.add(ip)
|
||||
|
||||
|
||||
async def flush_dirty(db: aiosqlite.Connection) -> int:
|
||||
"""Persist all new in-memory geo entries to the ``geo_cache`` table.
|
||||
|
||||
Takes an atomic snapshot of :data:`_dirty`, clears it, then batch-inserts
|
||||
all entries that are still present in :data:`_cache` using a single
|
||||
``executemany`` call and one ``COMMIT``. This is the only place that
|
||||
writes to the persistent cache during normal operation after startup.
|
||||
|
||||
If the database write fails the entries are re-added to :data:`_dirty`
|
||||
so they will be retried on the next flush cycle.
|
||||
|
||||
Args:
|
||||
db: Open :class:`aiosqlite.Connection` to the BanGUI application
|
||||
database.
|
||||
|
||||
Returns:
|
||||
The number of rows successfully upserted.
|
||||
"""
|
||||
if not _dirty:
|
||||
return 0
|
||||
|
||||
# Atomically snapshot and clear in a single-threaded async context.
|
||||
# No ``await`` between copy and clear ensures no interleaving.
|
||||
to_flush = _dirty.copy()
|
||||
_dirty.clear()
|
||||
|
||||
rows = [
|
||||
(ip, _cache[ip].country_code, _cache[ip].country_name, _cache[ip].asn, _cache[ip].org)
|
||||
for ip in to_flush
|
||||
if ip in _cache
|
||||
]
|
||||
if not rows:
|
||||
return 0
|
||||
|
||||
try:
|
||||
await db.executemany(
|
||||
"""
|
||||
INSERT INTO geo_cache (ip, country_code, country_name, asn, org)
|
||||
VALUES (?, ?, ?, ?, ?)
|
||||
ON CONFLICT(ip) DO UPDATE SET
|
||||
country_code = excluded.country_code,
|
||||
country_name = excluded.country_name,
|
||||
asn = excluded.asn,
|
||||
org = excluded.org,
|
||||
cached_at = strftime('%Y-%m-%dT%H:%M:%fZ', 'now')
|
||||
""",
|
||||
rows,
|
||||
)
|
||||
await db.commit()
|
||||
except Exception as exc: # noqa: BLE001
|
||||
log.warning("geo_flush_dirty_failed", error=str(exc))
|
||||
# Re-add to dirty so they are retried on the next flush cycle.
|
||||
_dirty.update(to_flush)
|
||||
return 0
|
||||
|
||||
log.info("geo_flush_dirty_complete", count=len(rows))
|
||||
return len(rows)
|
||||
171
backend/app/services/health_service.py
Normal file
171
backend/app/services/health_service.py
Normal file
@@ -0,0 +1,171 @@
|
||||
"""Health service.
|
||||
|
||||
Probes the fail2ban socket to determine whether the daemon is reachable and
|
||||
collects aggregated server statistics (version, jail count, ban counts).
|
||||
|
||||
The probe is intentionally lightweight — it is meant to be called every 30
|
||||
seconds by the background health-check task, not on every HTTP request.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
import structlog
|
||||
|
||||
from app.models.server import ServerStatus
|
||||
from app.utils.fail2ban_client import Fail2BanClient, Fail2BanConnectionError, Fail2BanProtocolError
|
||||
|
||||
log: structlog.stdlib.BoundLogger = structlog.get_logger()
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Internal helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_SOCKET_TIMEOUT: float = 5.0
|
||||
|
||||
|
||||
def _ok(response: Any) -> Any:
|
||||
"""Extract the payload from a fail2ban ``(return_code, data)`` response.
|
||||
|
||||
fail2ban wraps every response in a ``(0, data)`` success tuple or
|
||||
a ``(1, exception)`` error tuple. This helper returns ``data`` for
|
||||
successful responses or raises :class:`ValueError` for error responses.
|
||||
|
||||
Args:
|
||||
response: Raw value returned by :meth:`~Fail2BanClient.send`.
|
||||
|
||||
Returns:
|
||||
The payload ``data`` portion of the response.
|
||||
|
||||
Raises:
|
||||
ValueError: If the response indicates an error (return code ≠ 0).
|
||||
"""
|
||||
try:
|
||||
code, data = response
|
||||
except (TypeError, ValueError) as exc:
|
||||
raise ValueError(f"Unexpected fail2ban response shape: {response!r}") from exc
|
||||
|
||||
if code != 0:
|
||||
raise ValueError(f"fail2ban returned error code {code}: {data!r}")
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def _to_dict(pairs: Any) -> dict[str, Any]:
|
||||
"""Convert a list of ``(key, value)`` pairs to a plain dict.
|
||||
|
||||
fail2ban returns structured data as lists of 2-tuples rather than dicts.
|
||||
This helper converts them safely, ignoring non-pair items.
|
||||
|
||||
Args:
|
||||
pairs: A list of ``(key, value)`` pairs (or any iterable thereof).
|
||||
|
||||
Returns:
|
||||
A :class:`dict` with the keys and values from *pairs*.
|
||||
"""
|
||||
if not isinstance(pairs, (list, tuple)):
|
||||
return {}
|
||||
result: dict[str, Any] = {}
|
||||
for item in pairs:
|
||||
try:
|
||||
k, v = item
|
||||
result[str(k)] = v
|
||||
except (TypeError, ValueError):
|
||||
pass
|
||||
return result
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Public interface
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
async def probe(socket_path: str, timeout: float = _SOCKET_TIMEOUT) -> ServerStatus:
|
||||
"""Probe the fail2ban daemon and return a :class:`~app.models.server.ServerStatus`.
|
||||
|
||||
Sends ``ping``, ``version``, ``status``, and per-jail ``status <jail>``
|
||||
commands. Any socket or protocol error is caught and results in an
|
||||
``online=False`` status so the dashboard can always return a safe default.
|
||||
|
||||
Args:
|
||||
socket_path: Path to the fail2ban Unix domain socket.
|
||||
timeout: Per-command socket timeout in seconds.
|
||||
|
||||
Returns:
|
||||
A :class:`~app.models.server.ServerStatus` snapshot. ``online`` is
|
||||
``True`` when the daemon is reachable, ``False`` otherwise.
|
||||
"""
|
||||
client = Fail2BanClient(socket_path=socket_path, timeout=timeout)
|
||||
|
||||
try:
|
||||
# ------------------------------------------------------------------ #
|
||||
# 1. Connectivity check #
|
||||
# ------------------------------------------------------------------ #
|
||||
ping_data = _ok(await client.send(["ping"]))
|
||||
if ping_data != "pong":
|
||||
log.warning("fail2ban_unexpected_ping_response", response=ping_data)
|
||||
return ServerStatus(online=False)
|
||||
|
||||
# ------------------------------------------------------------------ #
|
||||
# 2. Version #
|
||||
# ------------------------------------------------------------------ #
|
||||
try:
|
||||
version: str | None = str(_ok(await client.send(["version"])))
|
||||
except (ValueError, TypeError):
|
||||
version = None
|
||||
|
||||
# ------------------------------------------------------------------ #
|
||||
# 3. Global status — jail count and names #
|
||||
# ------------------------------------------------------------------ #
|
||||
status_data = _to_dict(_ok(await client.send(["status"])))
|
||||
active_jails: int = int(status_data.get("Number of jail", 0) or 0)
|
||||
jail_list_raw: str = str(status_data.get("Jail list", "") or "").strip()
|
||||
jail_names: list[str] = (
|
||||
[j.strip() for j in jail_list_raw.split(",") if j.strip()]
|
||||
if jail_list_raw
|
||||
else []
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------------ #
|
||||
# 4. Per-jail aggregation #
|
||||
# ------------------------------------------------------------------ #
|
||||
total_bans: int = 0
|
||||
total_failures: int = 0
|
||||
|
||||
for jail_name in jail_names:
|
||||
try:
|
||||
jail_resp = _to_dict(_ok(await client.send(["status", jail_name])))
|
||||
filter_stats = _to_dict(jail_resp.get("Filter") or [])
|
||||
action_stats = _to_dict(jail_resp.get("Actions") or [])
|
||||
total_failures += int(filter_stats.get("Currently failed", 0) or 0)
|
||||
total_bans += int(action_stats.get("Currently banned", 0) or 0)
|
||||
except (ValueError, TypeError, KeyError) as exc:
|
||||
log.warning(
|
||||
"fail2ban_jail_status_parse_error",
|
||||
jail=jail_name,
|
||||
error=str(exc),
|
||||
)
|
||||
|
||||
log.debug(
|
||||
"fail2ban_probe_ok",
|
||||
version=version,
|
||||
active_jails=active_jails,
|
||||
total_bans=total_bans,
|
||||
total_failures=total_failures,
|
||||
)
|
||||
|
||||
return ServerStatus(
|
||||
online=True,
|
||||
version=version,
|
||||
active_jails=active_jails,
|
||||
total_bans=total_bans,
|
||||
total_failures=total_failures,
|
||||
)
|
||||
|
||||
except (Fail2BanConnectionError, Fail2BanProtocolError) as exc:
|
||||
log.warning("fail2ban_probe_failed", error=str(exc))
|
||||
return ServerStatus(online=False)
|
||||
except ValueError as exc:
|
||||
log.error("fail2ban_probe_parse_error", error=str(exc))
|
||||
return ServerStatus(online=False)
|
||||
269
backend/app/services/history_service.py
Normal file
269
backend/app/services/history_service.py
Normal file
@@ -0,0 +1,269 @@
|
||||
"""History service.
|
||||
|
||||
Queries the fail2ban SQLite database for all historical ban records.
|
||||
Supports filtering by jail, IP, and time range. For per-IP forensics the
|
||||
service provides a full ban timeline with matched log lines and failure counts.
|
||||
|
||||
All database I/O uses aiosqlite in **read-only** mode so BanGUI never
|
||||
modifies or locks the fail2ban database.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import UTC, datetime
|
||||
from typing import Any
|
||||
|
||||
import aiosqlite
|
||||
import structlog
|
||||
|
||||
from app.models.ban import TIME_RANGE_SECONDS, TimeRange
|
||||
from app.models.history import (
|
||||
HistoryBanItem,
|
||||
HistoryListResponse,
|
||||
IpDetailResponse,
|
||||
IpTimelineEvent,
|
||||
)
|
||||
from app.services.ban_service import _get_fail2ban_db_path, _parse_data_json, _ts_to_iso
|
||||
|
||||
log: structlog.stdlib.BoundLogger = structlog.get_logger()
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Constants
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_DEFAULT_PAGE_SIZE: int = 100
|
||||
_MAX_PAGE_SIZE: int = 500
|
||||
|
||||
|
||||
def _since_unix(range_: TimeRange) -> int:
|
||||
"""Return the Unix timestamp for the start of the given time window.
|
||||
|
||||
Args:
|
||||
range_: One of the supported time-range presets.
|
||||
|
||||
Returns:
|
||||
Unix timestamp (seconds since epoch) equal to *now − range_*.
|
||||
"""
|
||||
seconds: int = TIME_RANGE_SECONDS[range_]
|
||||
return int(datetime.now(tz=UTC).timestamp()) - seconds
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Public API
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
async def list_history(
|
||||
socket_path: str,
|
||||
*,
|
||||
range_: TimeRange | None = None,
|
||||
jail: str | None = None,
|
||||
ip_filter: str | None = None,
|
||||
page: int = 1,
|
||||
page_size: int = _DEFAULT_PAGE_SIZE,
|
||||
geo_enricher: Any | None = None,
|
||||
) -> HistoryListResponse:
|
||||
"""Return a paginated list of historical ban records with optional filters.
|
||||
|
||||
Queries the fail2ban ``bans`` table applying the requested filters and
|
||||
returns a paginated list ordered newest-first. When *geo_enricher* is
|
||||
supplied, each record is enriched with country and ASN data.
|
||||
|
||||
Args:
|
||||
socket_path: Path to the fail2ban Unix domain socket.
|
||||
range_: Time-range preset. ``None`` means all-time (no time filter).
|
||||
jail: If given, restrict results to bans from this jail.
|
||||
ip_filter: If given, restrict results to bans for this exact IP
|
||||
(or a prefix — the query uses ``LIKE ip_filter%``).
|
||||
page: 1-based page number (default: ``1``).
|
||||
page_size: Maximum items per page, capped at ``_MAX_PAGE_SIZE``.
|
||||
geo_enricher: Optional async callable ``(ip: str) -> GeoInfo | None``.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.history.HistoryListResponse` with paginated items
|
||||
and the total matching count.
|
||||
"""
|
||||
effective_page_size: int = min(page_size, _MAX_PAGE_SIZE)
|
||||
offset: int = (page - 1) * effective_page_size
|
||||
|
||||
# Build WHERE clauses dynamically.
|
||||
wheres: list[str] = []
|
||||
params: list[Any] = []
|
||||
|
||||
if range_ is not None:
|
||||
since: int = _since_unix(range_)
|
||||
wheres.append("timeofban >= ?")
|
||||
params.append(since)
|
||||
|
||||
if jail is not None:
|
||||
wheres.append("jail = ?")
|
||||
params.append(jail)
|
||||
|
||||
if ip_filter is not None:
|
||||
wheres.append("ip LIKE ?")
|
||||
params.append(f"{ip_filter}%")
|
||||
|
||||
where_sql: str = ("WHERE " + " AND ".join(wheres)) if wheres else ""
|
||||
|
||||
db_path: str = await _get_fail2ban_db_path(socket_path)
|
||||
log.info(
|
||||
"history_service_list",
|
||||
db_path=db_path,
|
||||
range=range_,
|
||||
jail=jail,
|
||||
ip_filter=ip_filter,
|
||||
page=page,
|
||||
)
|
||||
|
||||
async with aiosqlite.connect(f"file:{db_path}?mode=ro", uri=True) as f2b_db:
|
||||
f2b_db.row_factory = aiosqlite.Row
|
||||
|
||||
async with f2b_db.execute(
|
||||
f"SELECT COUNT(*) FROM bans {where_sql}", # noqa: S608
|
||||
params,
|
||||
) as cur:
|
||||
count_row = await cur.fetchone()
|
||||
total: int = int(count_row[0]) if count_row else 0
|
||||
|
||||
async with f2b_db.execute(
|
||||
f"SELECT jail, ip, timeofban, bancount, data " # noqa: S608
|
||||
f"FROM bans {where_sql} "
|
||||
"ORDER BY timeofban DESC "
|
||||
"LIMIT ? OFFSET ?",
|
||||
[*params, effective_page_size, offset],
|
||||
) as cur:
|
||||
rows = await cur.fetchall()
|
||||
|
||||
items: list[HistoryBanItem] = []
|
||||
for row in rows:
|
||||
jail_name: str = str(row["jail"])
|
||||
ip: str = str(row["ip"])
|
||||
banned_at: str = _ts_to_iso(int(row["timeofban"]))
|
||||
ban_count: int = int(row["bancount"])
|
||||
matches, failures = _parse_data_json(row["data"])
|
||||
|
||||
country_code: str | None = None
|
||||
country_name: str | None = None
|
||||
asn: str | None = None
|
||||
org: str | None = None
|
||||
|
||||
if geo_enricher is not None:
|
||||
try:
|
||||
geo = await geo_enricher(ip)
|
||||
if geo is not None:
|
||||
country_code = geo.country_code
|
||||
country_name = geo.country_name
|
||||
asn = geo.asn
|
||||
org = geo.org
|
||||
except Exception: # noqa: BLE001
|
||||
log.warning("history_service_geo_lookup_failed", ip=ip)
|
||||
|
||||
items.append(
|
||||
HistoryBanItem(
|
||||
ip=ip,
|
||||
jail=jail_name,
|
||||
banned_at=banned_at,
|
||||
ban_count=ban_count,
|
||||
failures=failures,
|
||||
matches=matches,
|
||||
country_code=country_code,
|
||||
country_name=country_name,
|
||||
asn=asn,
|
||||
org=org,
|
||||
)
|
||||
)
|
||||
|
||||
return HistoryListResponse(
|
||||
items=items,
|
||||
total=total,
|
||||
page=page,
|
||||
page_size=effective_page_size,
|
||||
)
|
||||
|
||||
|
||||
async def get_ip_detail(
|
||||
socket_path: str,
|
||||
ip: str,
|
||||
*,
|
||||
geo_enricher: Any | None = None,
|
||||
) -> IpDetailResponse | None:
|
||||
"""Return the full historical record for a single IP address.
|
||||
|
||||
Fetches all ban events for *ip* from the fail2ban database, ordered
|
||||
newest-first. Aggregates total bans, total failures, and the timestamp of
|
||||
the most recent ban. Optionally enriches with geolocation data.
|
||||
|
||||
Args:
|
||||
socket_path: Path to the fail2ban Unix domain socket.
|
||||
ip: The IP address to look up.
|
||||
geo_enricher: Optional async callable ``(ip: str) -> GeoInfo | None``.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.history.IpDetailResponse` if any records exist
|
||||
for *ip*, or ``None`` if the IP has no history in the database.
|
||||
"""
|
||||
db_path: str = await _get_fail2ban_db_path(socket_path)
|
||||
log.info("history_service_ip_detail", db_path=db_path, ip=ip)
|
||||
|
||||
async with aiosqlite.connect(f"file:{db_path}?mode=ro", uri=True) as f2b_db:
|
||||
f2b_db.row_factory = aiosqlite.Row
|
||||
async with f2b_db.execute(
|
||||
"SELECT jail, ip, timeofban, bancount, data "
|
||||
"FROM bans "
|
||||
"WHERE ip = ? "
|
||||
"ORDER BY timeofban DESC",
|
||||
(ip,),
|
||||
) as cur:
|
||||
rows = await cur.fetchall()
|
||||
|
||||
if not rows:
|
||||
return None
|
||||
|
||||
timeline: list[IpTimelineEvent] = []
|
||||
total_failures: int = 0
|
||||
|
||||
for row in rows:
|
||||
jail_name: str = str(row["jail"])
|
||||
banned_at: str = _ts_to_iso(int(row["timeofban"]))
|
||||
ban_count: int = int(row["bancount"])
|
||||
matches, failures = _parse_data_json(row["data"])
|
||||
total_failures += failures
|
||||
timeline.append(
|
||||
IpTimelineEvent(
|
||||
jail=jail_name,
|
||||
banned_at=banned_at,
|
||||
ban_count=ban_count,
|
||||
failures=failures,
|
||||
matches=matches,
|
||||
)
|
||||
)
|
||||
|
||||
last_ban_at: str | None = timeline[0].banned_at if timeline else None
|
||||
|
||||
country_code: str | None = None
|
||||
country_name: str | None = None
|
||||
asn: str | None = None
|
||||
org: str | None = None
|
||||
|
||||
if geo_enricher is not None:
|
||||
try:
|
||||
geo = await geo_enricher(ip)
|
||||
if geo is not None:
|
||||
country_code = geo.country_code
|
||||
country_name = geo.country_name
|
||||
asn = geo.asn
|
||||
org = geo.org
|
||||
except Exception: # noqa: BLE001
|
||||
log.warning("history_service_geo_lookup_failed_detail", ip=ip)
|
||||
|
||||
return IpDetailResponse(
|
||||
ip=ip,
|
||||
total_bans=len(timeline),
|
||||
total_failures=total_failures,
|
||||
last_ban_at=last_ban_at,
|
||||
country_code=country_code,
|
||||
country_name=country_name,
|
||||
asn=asn,
|
||||
org=org,
|
||||
timeline=timeline,
|
||||
)
|
||||
1356
backend/app/services/jail_service.py
Normal file
1356
backend/app/services/jail_service.py
Normal file
File diff suppressed because it is too large
Load Diff
189
backend/app/services/server_service.py
Normal file
189
backend/app/services/server_service.py
Normal file
@@ -0,0 +1,189 @@
|
||||
"""Server-level settings service.
|
||||
|
||||
Provides methods to read and update fail2ban server-level settings
|
||||
(log level, log target, database configuration) via the Unix domain socket.
|
||||
Also exposes the ``flushlogs`` command for use after log rotation.
|
||||
|
||||
Architecture note: this module is a pure service — it contains **no**
|
||||
HTTP/FastAPI concerns.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
import structlog
|
||||
|
||||
from app.models.server import ServerSettings, ServerSettingsResponse, ServerSettingsUpdate
|
||||
from app.utils.fail2ban_client import Fail2BanClient
|
||||
|
||||
log: structlog.stdlib.BoundLogger = structlog.get_logger()
|
||||
|
||||
_SOCKET_TIMEOUT: float = 10.0
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Custom exceptions
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class ServerOperationError(Exception):
|
||||
"""Raised when a server-level set command fails."""
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Internal helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _ok(response: Any) -> Any:
|
||||
"""Extract payload from a fail2ban ``(code, data)`` response.
|
||||
|
||||
Args:
|
||||
response: Raw value returned by :meth:`~Fail2BanClient.send`.
|
||||
|
||||
Returns:
|
||||
The payload ``data`` portion of the response.
|
||||
|
||||
Raises:
|
||||
ValueError: If the return code indicates an error.
|
||||
"""
|
||||
try:
|
||||
code, data = response
|
||||
except (TypeError, ValueError) as exc:
|
||||
raise ValueError(f"Unexpected response shape: {response!r}") from exc
|
||||
if code != 0:
|
||||
raise ValueError(f"fail2ban error {code}: {data!r}")
|
||||
return data
|
||||
|
||||
|
||||
async def _safe_get(
|
||||
client: Fail2BanClient,
|
||||
command: list[Any],
|
||||
default: Any = None,
|
||||
) -> Any:
|
||||
"""Send a command and silently return *default* on any error.
|
||||
|
||||
Args:
|
||||
client: The :class:`~app.utils.fail2ban_client.Fail2BanClient` to use.
|
||||
command: Command list to send.
|
||||
default: Fallback value.
|
||||
|
||||
Returns:
|
||||
The successful response, or *default*.
|
||||
"""
|
||||
try:
|
||||
return _ok(await client.send(command))
|
||||
except Exception:
|
||||
return default
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Public API
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
async def get_settings(socket_path: str) -> ServerSettingsResponse:
|
||||
"""Return current fail2ban server-level settings.
|
||||
|
||||
Fetches log level, log target, syslog socket, database file path, purge
|
||||
age, and max matches in a single round-trip batch.
|
||||
|
||||
Args:
|
||||
socket_path: Path to the fail2ban Unix domain socket.
|
||||
|
||||
Returns:
|
||||
:class:`~app.models.server.ServerSettingsResponse`.
|
||||
|
||||
Raises:
|
||||
~app.utils.fail2ban_client.Fail2BanConnectionError: Socket unreachable.
|
||||
"""
|
||||
import asyncio
|
||||
|
||||
client = Fail2BanClient(socket_path=socket_path, timeout=_SOCKET_TIMEOUT)
|
||||
|
||||
(
|
||||
log_level_raw,
|
||||
log_target_raw,
|
||||
syslog_socket_raw,
|
||||
db_path_raw,
|
||||
db_purge_age_raw,
|
||||
db_max_matches_raw,
|
||||
) = await asyncio.gather(
|
||||
_safe_get(client, ["get", "loglevel"], "INFO"),
|
||||
_safe_get(client, ["get", "logtarget"], "STDOUT"),
|
||||
_safe_get(client, ["get", "syslogsocket"], None),
|
||||
_safe_get(client, ["get", "dbfile"], "/var/lib/fail2ban/fail2ban.sqlite3"),
|
||||
_safe_get(client, ["get", "dbpurgeage"], 86400),
|
||||
_safe_get(client, ["get", "dbmaxmatches"], 10),
|
||||
)
|
||||
|
||||
settings = ServerSettings(
|
||||
log_level=str(log_level_raw or "INFO").upper(),
|
||||
log_target=str(log_target_raw or "STDOUT"),
|
||||
syslog_socket=str(syslog_socket_raw) if syslog_socket_raw else None,
|
||||
db_path=str(db_path_raw or "/var/lib/fail2ban/fail2ban.sqlite3"),
|
||||
db_purge_age=int(db_purge_age_raw or 86400),
|
||||
db_max_matches=int(db_max_matches_raw or 10),
|
||||
)
|
||||
|
||||
log.info("server_settings_fetched")
|
||||
return ServerSettingsResponse(settings=settings)
|
||||
|
||||
|
||||
async def update_settings(socket_path: str, update: ServerSettingsUpdate) -> None:
|
||||
"""Apply *update* to fail2ban server-level settings.
|
||||
|
||||
Only non-None fields in *update* are sent.
|
||||
|
||||
Args:
|
||||
socket_path: Path to the fail2ban Unix domain socket.
|
||||
update: Partial update payload.
|
||||
|
||||
Raises:
|
||||
ServerOperationError: If any ``set`` command is rejected.
|
||||
~app.utils.fail2ban_client.Fail2BanConnectionError: Socket unreachable.
|
||||
"""
|
||||
client = Fail2BanClient(socket_path=socket_path, timeout=_SOCKET_TIMEOUT)
|
||||
|
||||
async def _set(key: str, value: Any) -> None:
|
||||
try:
|
||||
_ok(await client.send(["set", key, value]))
|
||||
except ValueError as exc:
|
||||
raise ServerOperationError(f"Failed to set {key!r} = {value!r}: {exc}") from exc
|
||||
|
||||
if update.log_level is not None:
|
||||
await _set("loglevel", update.log_level.upper())
|
||||
if update.log_target is not None:
|
||||
await _set("logtarget", update.log_target)
|
||||
if update.db_purge_age is not None:
|
||||
await _set("dbpurgeage", update.db_purge_age)
|
||||
if update.db_max_matches is not None:
|
||||
await _set("dbmaxmatches", update.db_max_matches)
|
||||
|
||||
log.info("server_settings_updated")
|
||||
|
||||
|
||||
async def flush_logs(socket_path: str) -> str:
|
||||
"""Flush and re-open fail2ban log files.
|
||||
|
||||
Useful after log rotation so the daemon starts writing to the newly
|
||||
created file rather than the old rotated one.
|
||||
|
||||
Args:
|
||||
socket_path: Path to the fail2ban Unix domain socket.
|
||||
|
||||
Returns:
|
||||
The response message from fail2ban (e.g. ``"OK"``) as a string.
|
||||
|
||||
Raises:
|
||||
ServerOperationError: If the command is rejected.
|
||||
~app.utils.fail2ban_client.Fail2BanConnectionError: Socket unreachable.
|
||||
"""
|
||||
client = Fail2BanClient(socket_path=socket_path, timeout=_SOCKET_TIMEOUT)
|
||||
try:
|
||||
result = _ok(await client.send(["flushlogs"]))
|
||||
log.info("logs_flushed", result=result)
|
||||
return str(result)
|
||||
except ValueError as exc:
|
||||
raise ServerOperationError(f"flushlogs failed: {exc}") from exc
|
||||
201
backend/app/services/setup_service.py
Normal file
201
backend/app/services/setup_service.py
Normal file
@@ -0,0 +1,201 @@
|
||||
"""Setup service.
|
||||
|
||||
Implements the one-time first-run configuration wizard. Responsible for
|
||||
hashing the master password, persisting all initial settings, and
|
||||
enforcing the rule that setup can only run once.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import bcrypt
|
||||
import structlog
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import aiosqlite
|
||||
|
||||
from app.repositories import settings_repo
|
||||
|
||||
log: structlog.stdlib.BoundLogger = structlog.get_logger()
|
||||
|
||||
# Keys used in the settings table.
|
||||
_KEY_PASSWORD_HASH = "master_password_hash"
|
||||
_KEY_SETUP_DONE = "setup_completed"
|
||||
_KEY_DATABASE_PATH = "database_path"
|
||||
_KEY_FAIL2BAN_SOCKET = "fail2ban_socket"
|
||||
_KEY_TIMEZONE = "timezone"
|
||||
_KEY_SESSION_DURATION = "session_duration_minutes"
|
||||
_KEY_MAP_COLOR_THRESHOLD_HIGH = "map_color_threshold_high"
|
||||
_KEY_MAP_COLOR_THRESHOLD_MEDIUM = "map_color_threshold_medium"
|
||||
_KEY_MAP_COLOR_THRESHOLD_LOW = "map_color_threshold_low"
|
||||
|
||||
|
||||
async def is_setup_complete(db: aiosqlite.Connection) -> bool:
|
||||
"""Return ``True`` if initial setup has already been performed.
|
||||
|
||||
Args:
|
||||
db: Active aiosqlite connection.
|
||||
|
||||
Returns:
|
||||
``True`` when the ``setup_completed`` key exists in settings.
|
||||
"""
|
||||
value = await settings_repo.get_setting(db, _KEY_SETUP_DONE)
|
||||
return value == "1"
|
||||
|
||||
|
||||
async def run_setup(
|
||||
db: aiosqlite.Connection,
|
||||
*,
|
||||
master_password: str,
|
||||
database_path: str,
|
||||
fail2ban_socket: str,
|
||||
timezone: str,
|
||||
session_duration_minutes: int,
|
||||
) -> None:
|
||||
"""Persist the initial configuration and mark setup as complete.
|
||||
|
||||
Hashes *master_password* with bcrypt before storing. Raises
|
||||
:class:`RuntimeError` if setup has already been completed.
|
||||
|
||||
Args:
|
||||
db: Active aiosqlite connection.
|
||||
master_password: Plain-text master password chosen by the user.
|
||||
database_path: Filesystem path to the BanGUI SQLite database.
|
||||
fail2ban_socket: Unix socket path for the fail2ban daemon.
|
||||
timezone: IANA timezone identifier (e.g. ``"UTC"``).
|
||||
session_duration_minutes: Session validity period in minutes.
|
||||
|
||||
Raises:
|
||||
RuntimeError: If setup has already been completed.
|
||||
"""
|
||||
if await is_setup_complete(db):
|
||||
raise RuntimeError("Setup has already been completed.")
|
||||
|
||||
log.info("bangui_setup_started")
|
||||
|
||||
# Hash the master password — bcrypt automatically generates a salt.
|
||||
# Run in a thread executor so the blocking bcrypt operation does not stall
|
||||
# the asyncio event loop.
|
||||
password_bytes = master_password.encode()
|
||||
loop = asyncio.get_running_loop()
|
||||
hashed: str = await loop.run_in_executor(
|
||||
None, lambda: bcrypt.hashpw(password_bytes, bcrypt.gensalt()).decode()
|
||||
)
|
||||
|
||||
await settings_repo.set_setting(db, _KEY_PASSWORD_HASH, hashed)
|
||||
await settings_repo.set_setting(db, _KEY_DATABASE_PATH, database_path)
|
||||
await settings_repo.set_setting(db, _KEY_FAIL2BAN_SOCKET, fail2ban_socket)
|
||||
await settings_repo.set_setting(db, _KEY_TIMEZONE, timezone)
|
||||
await settings_repo.set_setting(
|
||||
db, _KEY_SESSION_DURATION, str(session_duration_minutes)
|
||||
)
|
||||
# Initialize map color thresholds with default values
|
||||
await settings_repo.set_setting(db, _KEY_MAP_COLOR_THRESHOLD_HIGH, "100")
|
||||
await settings_repo.set_setting(db, _KEY_MAP_COLOR_THRESHOLD_MEDIUM, "50")
|
||||
await settings_repo.set_setting(db, _KEY_MAP_COLOR_THRESHOLD_LOW, "20")
|
||||
# Mark setup as complete — must be last so a partial failure leaves
|
||||
# setup_completed unset and does not lock out the user.
|
||||
await settings_repo.set_setting(db, _KEY_SETUP_DONE, "1")
|
||||
|
||||
log.info("bangui_setup_completed")
|
||||
|
||||
|
||||
async def get_password_hash(db: aiosqlite.Connection) -> str | None:
|
||||
"""Return the stored bcrypt password hash, or ``None`` if not set.
|
||||
|
||||
Args:
|
||||
db: Active aiosqlite connection.
|
||||
|
||||
Returns:
|
||||
The bcrypt hash string, or ``None``.
|
||||
"""
|
||||
return await settings_repo.get_setting(db, _KEY_PASSWORD_HASH)
|
||||
|
||||
|
||||
async def get_timezone(db: aiosqlite.Connection) -> str:
|
||||
"""Return the configured IANA timezone string.
|
||||
|
||||
Falls back to ``"UTC"`` when no timezone has been stored (e.g. before
|
||||
setup completes or for legacy databases).
|
||||
|
||||
Args:
|
||||
db: Active aiosqlite connection.
|
||||
|
||||
Returns:
|
||||
An IANA timezone identifier such as ``"Europe/Berlin"`` or ``"UTC"``.
|
||||
"""
|
||||
tz = await settings_repo.get_setting(db, _KEY_TIMEZONE)
|
||||
return tz if tz else "UTC"
|
||||
|
||||
|
||||
async def get_map_color_thresholds(
|
||||
db: aiosqlite.Connection,
|
||||
) -> tuple[int, int, int]:
|
||||
"""Return the configured map color thresholds (high, medium, low).
|
||||
|
||||
Falls back to default values (100, 50, 20) if not set.
|
||||
|
||||
Args:
|
||||
db: Active aiosqlite connection.
|
||||
|
||||
Returns:
|
||||
A tuple of (threshold_high, threshold_medium, threshold_low).
|
||||
"""
|
||||
high = await settings_repo.get_setting(
|
||||
db, _KEY_MAP_COLOR_THRESHOLD_HIGH
|
||||
)
|
||||
medium = await settings_repo.get_setting(
|
||||
db, _KEY_MAP_COLOR_THRESHOLD_MEDIUM
|
||||
)
|
||||
low = await settings_repo.get_setting(
|
||||
db, _KEY_MAP_COLOR_THRESHOLD_LOW
|
||||
)
|
||||
|
||||
return (
|
||||
int(high) if high else 100,
|
||||
int(medium) if medium else 50,
|
||||
int(low) if low else 20,
|
||||
)
|
||||
|
||||
|
||||
async def set_map_color_thresholds(
|
||||
db: aiosqlite.Connection,
|
||||
*,
|
||||
threshold_high: int,
|
||||
threshold_medium: int,
|
||||
threshold_low: int,
|
||||
) -> None:
|
||||
"""Update the map color threshold configuration.
|
||||
|
||||
Args:
|
||||
db: Active aiosqlite connection.
|
||||
threshold_high: Ban count for red coloring.
|
||||
threshold_medium: Ban count for yellow coloring.
|
||||
threshold_low: Ban count for green coloring.
|
||||
|
||||
Raises:
|
||||
ValueError: If thresholds are not positive integers or if
|
||||
high <= medium <= low.
|
||||
"""
|
||||
if threshold_high <= 0 or threshold_medium <= 0 or threshold_low <= 0:
|
||||
raise ValueError("All thresholds must be positive integers.")
|
||||
if not (threshold_high > threshold_medium > threshold_low):
|
||||
raise ValueError("Thresholds must satisfy: high > medium > low.")
|
||||
|
||||
await settings_repo.set_setting(
|
||||
db, _KEY_MAP_COLOR_THRESHOLD_HIGH, str(threshold_high)
|
||||
)
|
||||
await settings_repo.set_setting(
|
||||
db, _KEY_MAP_COLOR_THRESHOLD_MEDIUM, str(threshold_medium)
|
||||
)
|
||||
await settings_repo.set_setting(
|
||||
db, _KEY_MAP_COLOR_THRESHOLD_LOW, str(threshold_low)
|
||||
)
|
||||
log.info(
|
||||
"map_color_thresholds_updated",
|
||||
high=threshold_high,
|
||||
medium=threshold_medium,
|
||||
low=threshold_low,
|
||||
)
|
||||
1
backend/app/tasks/__init__.py
Normal file
1
backend/app/tasks/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""APScheduler background tasks package."""
|
||||
153
backend/app/tasks/blocklist_import.py
Normal file
153
backend/app/tasks/blocklist_import.py
Normal file
@@ -0,0 +1,153 @@
|
||||
"""External blocklist import background task.
|
||||
|
||||
Registers an APScheduler job that downloads all enabled blocklist sources,
|
||||
validates their entries, and applies bans via fail2ban on a configurable
|
||||
schedule. The default schedule is daily at 03:00 UTC; it is stored in the
|
||||
application :class:`~app.models.blocklist.ScheduleConfig` settings and can
|
||||
be updated at runtime through the blocklist router.
|
||||
|
||||
The scheduler job ID is ``"blocklist_import"`` — using a stable id means
|
||||
re-registering the job (e.g. after a schedule update) safely replaces the
|
||||
existing entry without creating duplicates.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
import structlog
|
||||
|
||||
from app.models.blocklist import ScheduleFrequency
|
||||
from app.services import blocklist_service
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fastapi import FastAPI
|
||||
|
||||
log: structlog.stdlib.BoundLogger = structlog.get_logger()
|
||||
|
||||
#: Stable APScheduler job id so the job can be replaced without duplicates.
|
||||
JOB_ID: str = "blocklist_import"
|
||||
|
||||
|
||||
async def _run_import(app: Any) -> None:
|
||||
"""APScheduler callback that imports all enabled blocklist sources.
|
||||
|
||||
Reads shared resources from ``app.state`` and delegates to
|
||||
:func:`~app.services.blocklist_service.import_all`.
|
||||
|
||||
Args:
|
||||
app: The :class:`fastapi.FastAPI` application instance passed via
|
||||
APScheduler ``kwargs``.
|
||||
"""
|
||||
db = app.state.db
|
||||
http_session = app.state.http_session
|
||||
socket_path: str = app.state.settings.fail2ban_socket
|
||||
|
||||
log.info("blocklist_import_starting")
|
||||
try:
|
||||
result = await blocklist_service.import_all(db, http_session, socket_path)
|
||||
log.info(
|
||||
"blocklist_import_finished",
|
||||
total_imported=result.total_imported,
|
||||
total_skipped=result.total_skipped,
|
||||
errors=result.errors_count,
|
||||
)
|
||||
except Exception:
|
||||
log.exception("blocklist_import_unexpected_error")
|
||||
|
||||
|
||||
def register(app: FastAPI) -> None:
|
||||
"""Add (or replace) the blocklist import job in the application scheduler.
|
||||
|
||||
Reads the persisted :class:`~app.models.blocklist.ScheduleConfig` from
|
||||
the database and translates it into the appropriate APScheduler trigger.
|
||||
|
||||
Should be called inside the lifespan handler after the scheduler and
|
||||
database have been initialised.
|
||||
|
||||
Args:
|
||||
app: The :class:`fastapi.FastAPI` application instance whose
|
||||
``app.state.scheduler`` will receive the job.
|
||||
"""
|
||||
import asyncio # noqa: PLC0415
|
||||
|
||||
async def _do_register() -> None:
|
||||
config = await blocklist_service.get_schedule(app.state.db)
|
||||
_apply_schedule(app, config)
|
||||
|
||||
# APScheduler is synchronous at registration time; use asyncio to read
|
||||
# the stored schedule from the DB before registering.
|
||||
try:
|
||||
loop = asyncio.get_event_loop()
|
||||
loop.run_until_complete(_do_register())
|
||||
except RuntimeError:
|
||||
# If the current thread already has a running loop (uvicorn), schedule
|
||||
# the registration as a coroutine.
|
||||
asyncio.ensure_future(_do_register())
|
||||
|
||||
|
||||
def reschedule(app: FastAPI) -> None:
|
||||
"""Re-register the blocklist import job with the latest schedule config.
|
||||
|
||||
Called by the blocklist router after a schedule update so changes take
|
||||
effect immediately without a server restart.
|
||||
|
||||
Args:
|
||||
app: The :class:`fastapi.FastAPI` application instance.
|
||||
"""
|
||||
import asyncio # noqa: PLC0415
|
||||
|
||||
async def _do_reschedule() -> None:
|
||||
config = await blocklist_service.get_schedule(app.state.db)
|
||||
_apply_schedule(app, config)
|
||||
|
||||
asyncio.ensure_future(_do_reschedule())
|
||||
|
||||
|
||||
def _apply_schedule(app: FastAPI, config: Any) -> None:
|
||||
"""Add or replace the APScheduler cron/interval job for the given config.
|
||||
|
||||
Args:
|
||||
app: FastAPI application instance.
|
||||
config: :class:`~app.models.blocklist.ScheduleConfig` to apply.
|
||||
"""
|
||||
scheduler = app.state.scheduler
|
||||
|
||||
kwargs: dict[str, Any] = {"app": app}
|
||||
trigger_type: str
|
||||
trigger_kwargs: dict[str, Any]
|
||||
|
||||
if config.frequency == ScheduleFrequency.hourly:
|
||||
trigger_type = "interval"
|
||||
trigger_kwargs = {"hours": config.interval_hours}
|
||||
elif config.frequency == ScheduleFrequency.weekly:
|
||||
trigger_type = "cron"
|
||||
trigger_kwargs = {
|
||||
"day_of_week": config.day_of_week,
|
||||
"hour": config.hour,
|
||||
"minute": config.minute,
|
||||
}
|
||||
else: # daily (default)
|
||||
trigger_type = "cron"
|
||||
trigger_kwargs = {
|
||||
"hour": config.hour,
|
||||
"minute": config.minute,
|
||||
}
|
||||
|
||||
# Remove existing job if it exists, then add new one.
|
||||
if scheduler.get_job(JOB_ID):
|
||||
scheduler.remove_job(JOB_ID)
|
||||
|
||||
scheduler.add_job(
|
||||
_run_import,
|
||||
trigger=trigger_type,
|
||||
id=JOB_ID,
|
||||
kwargs=kwargs,
|
||||
**trigger_kwargs,
|
||||
)
|
||||
log.info(
|
||||
"blocklist_import_scheduled",
|
||||
frequency=config.frequency,
|
||||
trigger=trigger_type,
|
||||
trigger_kwargs=trigger_kwargs,
|
||||
)
|
||||
66
backend/app/tasks/geo_cache_flush.py
Normal file
66
backend/app/tasks/geo_cache_flush.py
Normal file
@@ -0,0 +1,66 @@
|
||||
"""Geo cache flush background task.
|
||||
|
||||
Registers an APScheduler job that periodically persists newly resolved IP
|
||||
geo entries from the in-memory ``_dirty`` set to the ``geo_cache`` table.
|
||||
|
||||
After Task 2 removed geo cache writes from GET requests, newly resolved IPs
|
||||
are only held in the in-memory cache until this task flushes them. With the
|
||||
default 60-second interval, at most one minute of new resolution results is
|
||||
at risk on an unexpected process restart.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
import structlog
|
||||
|
||||
from app.services import geo_service
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fastapi import FastAPI
|
||||
|
||||
log: structlog.stdlib.BoundLogger = structlog.get_logger()
|
||||
|
||||
#: How often the flush job fires (seconds). Configurable tuning constant.
|
||||
GEO_FLUSH_INTERVAL: int = 60
|
||||
|
||||
#: Stable APScheduler job ID — ensures re-registration replaces, not duplicates.
|
||||
JOB_ID: str = "geo_cache_flush"
|
||||
|
||||
|
||||
async def _run_flush(app: Any) -> None:
|
||||
"""Flush the geo service dirty set to the application database.
|
||||
|
||||
Reads shared resources from ``app.state`` and delegates to
|
||||
:func:`~app.services.geo_service.flush_dirty`.
|
||||
|
||||
Args:
|
||||
app: The :class:`fastapi.FastAPI` application instance passed via
|
||||
APScheduler ``kwargs``.
|
||||
"""
|
||||
db = app.state.db
|
||||
count = await geo_service.flush_dirty(db)
|
||||
if count > 0:
|
||||
log.debug("geo_cache_flush_ran", flushed=count)
|
||||
|
||||
|
||||
def register(app: FastAPI) -> None:
|
||||
"""Add (or replace) the geo cache flush job in the application scheduler.
|
||||
|
||||
Must be called after the scheduler has been started (i.e., inside the
|
||||
lifespan handler, after ``scheduler.start()``).
|
||||
|
||||
Args:
|
||||
app: The :class:`fastapi.FastAPI` application instance whose
|
||||
``app.state.scheduler`` will receive the job.
|
||||
"""
|
||||
app.state.scheduler.add_job(
|
||||
_run_flush,
|
||||
trigger="interval",
|
||||
seconds=GEO_FLUSH_INTERVAL,
|
||||
kwargs={"app": app},
|
||||
id=JOB_ID,
|
||||
replace_existing=True,
|
||||
)
|
||||
log.info("geo_cache_flush_scheduled", interval_seconds=GEO_FLUSH_INTERVAL)
|
||||
103
backend/app/tasks/geo_re_resolve.py
Normal file
103
backend/app/tasks/geo_re_resolve.py
Normal file
@@ -0,0 +1,103 @@
|
||||
"""Geo re-resolve background task.
|
||||
|
||||
Registers an APScheduler job that periodically retries IP addresses in the
|
||||
``geo_cache`` table whose ``country_code`` is ``NULL``. These are IPs that
|
||||
previously failed to resolve (e.g. due to ip-api.com rate limiting) and were
|
||||
recorded as negative entries.
|
||||
|
||||
The task runs every 10 minutes. On each invocation it:
|
||||
|
||||
1. Queries all ``NULL``-country rows from ``geo_cache``.
|
||||
2. Clears the in-memory negative cache so those IPs are eligible for a fresh
|
||||
API attempt.
|
||||
3. Delegates to :func:`~app.services.geo_service.lookup_batch` which already
|
||||
handles rate-limit throttling and retries.
|
||||
4. Logs how many IPs were retried and how many resolved successfully.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
import structlog
|
||||
|
||||
from app.services import geo_service
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fastapi import FastAPI
|
||||
|
||||
log: structlog.stdlib.BoundLogger = structlog.get_logger()
|
||||
|
||||
#: How often the re-resolve job fires (seconds). 10 minutes.
|
||||
GEO_RE_RESOLVE_INTERVAL: int = 600
|
||||
|
||||
#: Stable APScheduler job ID — ensures re-registration replaces, not duplicates.
|
||||
JOB_ID: str = "geo_re_resolve"
|
||||
|
||||
|
||||
async def _run_re_resolve(app: Any) -> None:
|
||||
"""Query NULL-country IPs from the database and re-resolve them.
|
||||
|
||||
Reads shared resources from ``app.state`` and delegates to
|
||||
:func:`~app.services.geo_service.lookup_batch`.
|
||||
|
||||
Args:
|
||||
app: The :class:`fastapi.FastAPI` application instance passed via
|
||||
APScheduler ``kwargs``.
|
||||
"""
|
||||
db = app.state.db
|
||||
http_session = app.state.http_session
|
||||
|
||||
# Fetch all IPs with NULL country_code from the persistent cache.
|
||||
unresolved_ips: list[str] = []
|
||||
async with db.execute(
|
||||
"SELECT ip FROM geo_cache WHERE country_code IS NULL"
|
||||
) as cursor:
|
||||
async for row in cursor:
|
||||
unresolved_ips.append(str(row[0]))
|
||||
|
||||
if not unresolved_ips:
|
||||
log.debug("geo_re_resolve_skip", reason="no_unresolved_ips")
|
||||
return
|
||||
|
||||
log.info("geo_re_resolve_start", unresolved=len(unresolved_ips))
|
||||
|
||||
# Clear the negative cache so these IPs are eligible for fresh API calls.
|
||||
geo_service.clear_neg_cache()
|
||||
|
||||
# lookup_batch handles throttling, retries, and persistence when db is
|
||||
# passed. This is a background task so DB writes are allowed.
|
||||
results = await geo_service.lookup_batch(unresolved_ips, http_session, db=db)
|
||||
|
||||
resolved_count: int = sum(
|
||||
1 for info in results.values() if info.country_code is not None
|
||||
)
|
||||
log.info(
|
||||
"geo_re_resolve_complete",
|
||||
retried=len(unresolved_ips),
|
||||
resolved=resolved_count,
|
||||
)
|
||||
|
||||
|
||||
def register(app: FastAPI) -> None:
|
||||
"""Add (or replace) the geo re-resolve job in the application scheduler.
|
||||
|
||||
Must be called after the scheduler has been started (i.e., inside the
|
||||
lifespan handler, after ``scheduler.start()``).
|
||||
|
||||
The first invocation is deferred by one full interval so the initial
|
||||
blocklist prewarm has time to finish before re-resolve kicks in.
|
||||
|
||||
Args:
|
||||
app: The :class:`fastapi.FastAPI` application instance whose
|
||||
``app.state.scheduler`` will receive the job.
|
||||
"""
|
||||
app.state.scheduler.add_job(
|
||||
_run_re_resolve,
|
||||
trigger="interval",
|
||||
seconds=GEO_RE_RESOLVE_INTERVAL,
|
||||
kwargs={"app": app},
|
||||
id=JOB_ID,
|
||||
replace_existing=True,
|
||||
)
|
||||
log.info("geo_re_resolve_scheduled", interval_seconds=GEO_RE_RESOLVE_INTERVAL)
|
||||
156
backend/app/tasks/health_check.py
Normal file
156
backend/app/tasks/health_check.py
Normal file
@@ -0,0 +1,156 @@
|
||||
"""Health-check background task.
|
||||
|
||||
Registers an APScheduler job that probes the fail2ban socket every 30 seconds
|
||||
and stores the result on ``app.state.server_status``. The dashboard endpoint
|
||||
reads from this cache, keeping HTTP responses fast and the daemon connection
|
||||
decoupled from user-facing requests.
|
||||
|
||||
Crash detection (Task 3)
|
||||
------------------------
|
||||
When a jail activation is performed, the router stores a timestamp on
|
||||
``app.state.last_activation`` (a ``dict`` with ``jail_name`` and ``at``
|
||||
keys). If the health probe subsequently detects an online→offline transition
|
||||
within 60 seconds of that activation, a
|
||||
:class:`~app.models.config.PendingRecovery` record is written to
|
||||
``app.state.pending_recovery`` so the UI can offer a one-click rollback.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import datetime
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
import structlog
|
||||
|
||||
from app.models.config import PendingRecovery
|
||||
from app.models.server import ServerStatus
|
||||
from app.services import health_service
|
||||
|
||||
if TYPE_CHECKING: # pragma: no cover
|
||||
from fastapi import FastAPI
|
||||
|
||||
log: structlog.stdlib.BoundLogger = structlog.get_logger()
|
||||
|
||||
#: How often the probe fires (seconds).
|
||||
HEALTH_CHECK_INTERVAL: int = 30
|
||||
|
||||
#: Maximum seconds since an activation for a subsequent crash to be attributed
|
||||
#: to that activation.
|
||||
_ACTIVATION_CRASH_WINDOW: int = 60
|
||||
|
||||
|
||||
async def _run_probe(app: Any) -> None:
|
||||
"""Probe fail2ban and cache the result on *app.state*.
|
||||
|
||||
Detects online/offline state transitions. When fail2ban goes offline
|
||||
within :data:`_ACTIVATION_CRASH_WINDOW` seconds of the last jail
|
||||
activation, writes a :class:`~app.models.config.PendingRecovery` record to
|
||||
``app.state.pending_recovery``.
|
||||
|
||||
This is the APScheduler job callback. It reads ``fail2ban_socket`` from
|
||||
``app.state.settings``, runs the health probe, and writes the result to
|
||||
``app.state.server_status``.
|
||||
|
||||
Args:
|
||||
app: The :class:`fastapi.FastAPI` application instance passed by the
|
||||
scheduler via the ``kwargs`` mechanism.
|
||||
"""
|
||||
socket_path: str = app.state.settings.fail2ban_socket
|
||||
prev_status: ServerStatus = getattr(
|
||||
app.state, "server_status", ServerStatus(online=False)
|
||||
)
|
||||
status: ServerStatus = await health_service.probe(socket_path)
|
||||
app.state.server_status = status
|
||||
|
||||
now = datetime.datetime.now(tz=datetime.UTC)
|
||||
|
||||
# Log transitions between online and offline states.
|
||||
if status.online and not prev_status.online:
|
||||
log.info("fail2ban_came_online", version=status.version)
|
||||
# Clear any pending recovery once fail2ban is back online.
|
||||
existing: PendingRecovery | None = getattr(
|
||||
app.state, "pending_recovery", None
|
||||
)
|
||||
if existing is not None and not existing.recovered:
|
||||
app.state.pending_recovery = PendingRecovery(
|
||||
jail_name=existing.jail_name,
|
||||
activated_at=existing.activated_at,
|
||||
detected_at=existing.detected_at,
|
||||
recovered=True,
|
||||
)
|
||||
log.info(
|
||||
"pending_recovery_resolved",
|
||||
jail=existing.jail_name,
|
||||
)
|
||||
|
||||
elif not status.online and prev_status.online:
|
||||
log.warning("fail2ban_went_offline")
|
||||
# Check whether this crash happened shortly after a jail activation.
|
||||
last_activation: dict[str, Any] | None = getattr(
|
||||
app.state, "last_activation", None
|
||||
)
|
||||
if last_activation is not None:
|
||||
activated_at: datetime.datetime = last_activation["at"]
|
||||
seconds_since = (now - activated_at).total_seconds()
|
||||
if seconds_since <= _ACTIVATION_CRASH_WINDOW:
|
||||
jail_name: str = last_activation["jail_name"]
|
||||
# Only create a new record when there is not already an
|
||||
# unresolved one for the same jail.
|
||||
current: PendingRecovery | None = getattr(
|
||||
app.state, "pending_recovery", None
|
||||
)
|
||||
if current is None or current.recovered:
|
||||
app.state.pending_recovery = PendingRecovery(
|
||||
jail_name=jail_name,
|
||||
activated_at=activated_at,
|
||||
detected_at=now,
|
||||
)
|
||||
log.warning(
|
||||
"activation_crash_detected",
|
||||
jail=jail_name,
|
||||
seconds_since_activation=seconds_since,
|
||||
)
|
||||
|
||||
log.debug(
|
||||
"health_check_complete",
|
||||
online=status.online,
|
||||
version=status.version,
|
||||
active_jails=status.active_jails,
|
||||
)
|
||||
|
||||
|
||||
def register(app: FastAPI) -> None:
|
||||
"""Add the health-check job to the application scheduler.
|
||||
|
||||
Must be called after the scheduler has been started (i.e., inside the
|
||||
lifespan handler, after ``scheduler.start()``).
|
||||
|
||||
Args:
|
||||
app: The :class:`fastapi.FastAPI` application instance whose
|
||||
``app.state.scheduler`` will receive the job.
|
||||
"""
|
||||
# Initialise the cache with an offline placeholder so the dashboard
|
||||
# endpoint is always able to return a valid response even before the
|
||||
# first probe fires.
|
||||
app.state.server_status = ServerStatus(online=False)
|
||||
|
||||
# Initialise activation tracking state.
|
||||
app.state.last_activation = None
|
||||
app.state.pending_recovery = None
|
||||
|
||||
app.state.scheduler.add_job(
|
||||
_run_probe,
|
||||
trigger="interval",
|
||||
seconds=HEALTH_CHECK_INTERVAL,
|
||||
kwargs={"app": app},
|
||||
id="health_check",
|
||||
replace_existing=True,
|
||||
# Fire immediately on startup too, so the UI isn't dark for 30 s.
|
||||
next_run_time=__import__("datetime").datetime.now(
|
||||
tz=__import__("datetime").timezone.utc
|
||||
),
|
||||
)
|
||||
log.info(
|
||||
"health_check_scheduled",
|
||||
interval_seconds=HEALTH_CHECK_INTERVAL,
|
||||
)
|
||||
1
backend/app/utils/__init__.py
Normal file
1
backend/app/utils/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Shared utilities, helpers, and constants package."""
|
||||
358
backend/app/utils/config_parser.py
Normal file
358
backend/app/utils/config_parser.py
Normal file
@@ -0,0 +1,358 @@
|
||||
"""Fail2ban INI-style config parser with include and interpolation support.
|
||||
|
||||
Provides a :class:`Fail2BanConfigParser` class that wraps Python's
|
||||
:class:`configparser.RawConfigParser` with fail2ban-specific behaviour:
|
||||
|
||||
- **Merge order**: ``.conf`` file first, then ``.local`` overlay, then ``*.d/``
|
||||
directory overrides — each subsequent layer overwrites earlier values.
|
||||
- **Include directives**: ``[INCLUDES]`` sections can specify ``before`` and
|
||||
``after`` filenames. ``before`` is loaded at lower priority (loaded first),
|
||||
``after`` at higher priority (loaded last). Both are resolved relative to
|
||||
the directory of the including file. Circular includes and runaway recursion
|
||||
are detected and logged.
|
||||
- **Variable interpolation**: :meth:`interpolate` resolves ``%(variable)s``
|
||||
references using the ``[DEFAULT]`` section, the ``[Init]`` section, and any
|
||||
caller-supplied variables. Multiple passes handle nested references.
|
||||
- **Multi-line values**: Handled transparently by ``configparser``; the
|
||||
:meth:`split_multiline` helper further strips blank lines and ``#`` comments.
|
||||
- **Comments**: ``configparser`` strips full-line ``#``/``;`` comments; inline
|
||||
comments inside multi-line values are stripped by :meth:`split_multiline`.
|
||||
|
||||
All methods are synchronous. Call from async contexts via
|
||||
:func:`asyncio.get_event_loop().run_in_executor`.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import configparser
|
||||
import re
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import structlog
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pathlib import Path
|
||||
|
||||
log: structlog.stdlib.BoundLogger = structlog.get_logger()
|
||||
|
||||
# Compiled pattern that matches fail2ban-style %(variable_name)s references.
|
||||
_INTERPOLATE_RE: re.Pattern[str] = re.compile(r"%\((\w+)\)s")
|
||||
|
||||
# Guard against infinite interpolation loops.
|
||||
_MAX_INTERPOLATION_PASSES: int = 10
|
||||
|
||||
|
||||
class Fail2BanConfigParser:
|
||||
"""Parse fail2ban INI config files with include resolution and interpolation.
|
||||
|
||||
Typical usage for a ``filter.d/`` file::
|
||||
|
||||
parser = Fail2BanConfigParser(config_dir=Path("/etc/fail2ban"))
|
||||
parser.read_with_overrides(Path("/etc/fail2ban/filter.d/sshd.conf"))
|
||||
section = parser.section_dict("Definition")
|
||||
failregex = parser.split_multiline(section.get("failregex", ""))
|
||||
|
||||
Args:
|
||||
config_dir: Optional fail2ban configuration root directory. Used only
|
||||
by :meth:`ordered_conf_files`; pass ``None`` if not needed.
|
||||
max_include_depth: Maximum ``[INCLUDES]`` nesting depth before giving up.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
config_dir: Path | None = None,
|
||||
max_include_depth: int = 10,
|
||||
) -> None:
|
||||
self._config_dir = config_dir
|
||||
self._max_include_depth = max_include_depth
|
||||
self._parser: configparser.RawConfigParser = self._make_parser()
|
||||
# Tracks resolved absolute paths to detect include cycles.
|
||||
self._read_paths: set[Path] = set()
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Internal helpers
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
@staticmethod
|
||||
def _make_parser() -> configparser.RawConfigParser:
|
||||
"""Return a case-sensitive :class:`configparser.RawConfigParser`."""
|
||||
parser = configparser.RawConfigParser(interpolation=None, strict=False)
|
||||
# Keep original key casing (fail2ban is case-sensitive in option names).
|
||||
parser.optionxform = str # type: ignore[assignment]
|
||||
return parser
|
||||
|
||||
def _get_include(
|
||||
self,
|
||||
include_dir: Path,
|
||||
tmp_parser: configparser.RawConfigParser,
|
||||
key: str,
|
||||
) -> Path | None:
|
||||
"""Return the resolved path for an include directive, or ``None``."""
|
||||
if not tmp_parser.has_section("INCLUDES"):
|
||||
return None
|
||||
if not tmp_parser.has_option("INCLUDES", key):
|
||||
return None
|
||||
raw = tmp_parser.get("INCLUDES", key).strip()
|
||||
if not raw:
|
||||
return None
|
||||
return include_dir / raw
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Public interface — reading files
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def read_file(self, path: Path, _depth: int = 0) -> None:
|
||||
"""Read *path*, following ``[INCLUDES]`` ``before``/``after`` directives.
|
||||
|
||||
``before`` references are loaded before the current file (lower
|
||||
priority); ``after`` references are loaded after (higher priority).
|
||||
Circular includes are detected by tracking resolved absolute paths.
|
||||
|
||||
Args:
|
||||
path: Config file to read.
|
||||
_depth: Current include nesting depth. Internal parameter.
|
||||
"""
|
||||
if _depth > self._max_include_depth:
|
||||
log.warning(
|
||||
"include_depth_exceeded",
|
||||
path=str(path),
|
||||
max_depth=self._max_include_depth,
|
||||
)
|
||||
return
|
||||
|
||||
resolved = path.resolve()
|
||||
if resolved in self._read_paths:
|
||||
log.debug("include_cycle_detected", path=str(path))
|
||||
return
|
||||
|
||||
try:
|
||||
content = path.read_text(encoding="utf-8")
|
||||
except OSError as exc:
|
||||
log.warning("config_read_error", path=str(path), error=str(exc))
|
||||
return
|
||||
|
||||
# Pre-scan for includes without yet committing to the main parser.
|
||||
tmp = self._make_parser()
|
||||
try:
|
||||
tmp.read_string(content)
|
||||
except configparser.Error as exc:
|
||||
log.warning("config_parse_error", path=str(path), error=str(exc))
|
||||
return
|
||||
|
||||
include_dir = path.parent
|
||||
before_path = self._get_include(include_dir, tmp, "before")
|
||||
after_path = self._get_include(include_dir, tmp, "after")
|
||||
|
||||
# Load ``before`` first (lower priority than current file).
|
||||
if before_path is not None:
|
||||
self.read_file(before_path, _depth=_depth + 1)
|
||||
|
||||
# Mark this path visited *before* merging to guard against cycles
|
||||
# introduced by the ``after`` include referencing the same file.
|
||||
self._read_paths.add(resolved)
|
||||
|
||||
# Merge current file into the accumulating parser.
|
||||
try:
|
||||
self._parser.read_string(content, source=str(path))
|
||||
except configparser.Error as exc:
|
||||
log.warning(
|
||||
"config_parse_string_error", path=str(path), error=str(exc)
|
||||
)
|
||||
|
||||
# Load ``after`` last (highest priority).
|
||||
if after_path is not None:
|
||||
self.read_file(after_path, _depth=_depth + 1)
|
||||
|
||||
def read_with_overrides(self, conf_path: Path) -> None:
|
||||
"""Read *conf_path* and its ``.local`` override if it exists.
|
||||
|
||||
The ``.local`` file is read after the ``.conf`` file so its values
|
||||
take precedence. Include directives inside each file are still honoured.
|
||||
|
||||
Args:
|
||||
conf_path: Path to the ``.conf`` file. The corresponding
|
||||
``.local`` is derived by replacing the suffix with ``.local``.
|
||||
"""
|
||||
self.read_file(conf_path)
|
||||
local_path = conf_path.with_suffix(".local")
|
||||
if local_path.is_file():
|
||||
self.read_file(local_path)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Public interface — querying parsed data
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def sections(self) -> list[str]:
|
||||
"""Return all section names (excludes the ``[DEFAULT]`` pseudo-section).
|
||||
|
||||
Returns:
|
||||
Sorted list of section names present in the parsed files.
|
||||
"""
|
||||
return list(self._parser.sections())
|
||||
|
||||
def has_section(self, section: str) -> bool:
|
||||
"""Return whether *section* exists in the parsed configuration.
|
||||
|
||||
Args:
|
||||
section: Section name to check.
|
||||
"""
|
||||
return self._parser.has_section(section)
|
||||
|
||||
def get(self, section: str, key: str) -> str | None:
|
||||
"""Return the raw value for *key* in *section*, or ``None``.
|
||||
|
||||
Args:
|
||||
section: Section name.
|
||||
key: Option name.
|
||||
|
||||
Returns:
|
||||
Raw option value string, or ``None`` if not present.
|
||||
"""
|
||||
if self._parser.has_section(section) and self._parser.has_option(
|
||||
section, key
|
||||
):
|
||||
return self._parser.get(section, key)
|
||||
return None
|
||||
|
||||
def section_dict(
|
||||
self,
|
||||
section: str,
|
||||
*,
|
||||
skip: frozenset[str] | None = None,
|
||||
) -> dict[str, str]:
|
||||
"""Return all key-value pairs from *section* as a plain :class:`dict`.
|
||||
|
||||
Keys whose names start with ``__`` (configparser internals from
|
||||
``DEFAULT`` inheritance) are always excluded.
|
||||
|
||||
Args:
|
||||
section: Section name to read.
|
||||
skip: Additional key names to exclude.
|
||||
|
||||
Returns:
|
||||
Mapping of option name → raw value. Empty dict if section absent.
|
||||
"""
|
||||
if not self._parser.has_section(section):
|
||||
return {}
|
||||
drop: frozenset[str] = skip or frozenset()
|
||||
return {
|
||||
k: v
|
||||
for k, v in self._parser.items(section)
|
||||
if not k.startswith("__") and k not in drop
|
||||
}
|
||||
|
||||
def defaults(self) -> dict[str, str]:
|
||||
"""Return all ``[DEFAULT]`` section key-value pairs.
|
||||
|
||||
Returns:
|
||||
Dict of default keys and their values.
|
||||
"""
|
||||
return dict(self._parser.defaults())
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Public interface — interpolation and helpers
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def interpolate(
|
||||
self,
|
||||
value: str,
|
||||
extra_vars: dict[str, str] | None = None,
|
||||
) -> str:
|
||||
"""Resolve ``%(variable)s`` references in *value*.
|
||||
|
||||
Variables are resolved in the following priority order (low → high):
|
||||
|
||||
1. ``[DEFAULT]`` section values.
|
||||
2. ``[Init]`` section values (fail2ban action parameters).
|
||||
3. *extra_vars* provided by the caller.
|
||||
|
||||
Multiple passes are performed to handle nested references (up to
|
||||
:data:`_MAX_INTERPOLATION_PASSES` iterations). Unresolvable references
|
||||
are left unchanged.
|
||||
|
||||
Args:
|
||||
value: Raw string possibly containing ``%(name)s`` placeholders.
|
||||
extra_vars: Optional caller-supplied variables (highest priority).
|
||||
|
||||
Returns:
|
||||
String with ``%(name)s`` references substituted where possible.
|
||||
"""
|
||||
vars_: dict[str, str] = {}
|
||||
vars_.update(self.defaults())
|
||||
vars_.update(self.section_dict("Init"))
|
||||
if extra_vars:
|
||||
vars_.update(extra_vars)
|
||||
|
||||
def _sub(m: re.Match[str]) -> str:
|
||||
return vars_.get(m.group(1), m.group(0))
|
||||
|
||||
result = value
|
||||
for _ in range(_MAX_INTERPOLATION_PASSES):
|
||||
new = _INTERPOLATE_RE.sub(_sub, result)
|
||||
if new == result:
|
||||
break
|
||||
result = new
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def split_multiline(raw: str) -> list[str]:
|
||||
"""Split a multi-line INI value into individual non-blank lines.
|
||||
|
||||
Each line is stripped of surrounding whitespace. Lines that are empty
|
||||
or that start with ``#`` (comments) are discarded.
|
||||
|
||||
Used for ``failregex``, ``ignoreregex``, ``action``, and ``logpath``
|
||||
values which fail2ban allows to span multiple lines.
|
||||
|
||||
Args:
|
||||
raw: Raw multi-line string from configparser.
|
||||
|
||||
Returns:
|
||||
List of stripped, non-empty, non-comment strings.
|
||||
"""
|
||||
result: list[str] = []
|
||||
for line in raw.splitlines():
|
||||
stripped = line.strip()
|
||||
if stripped and not stripped.startswith("#"):
|
||||
result.append(stripped)
|
||||
return result
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Class-level utility — file ordering
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
@classmethod
|
||||
def ordered_conf_files(cls, config_dir: Path, base_name: str) -> list[Path]:
|
||||
"""Return config files for *base_name* in fail2ban merge order.
|
||||
|
||||
Merge order (ascending priority — later entries override earlier):
|
||||
|
||||
1. ``{config_dir}/{base_name}.conf``
|
||||
2. ``{config_dir}/{base_name}.local``
|
||||
3. ``{config_dir}/{base_name}.d/*.conf`` (sorted alphabetically)
|
||||
4. ``{config_dir}/{base_name}.d/*.local`` (sorted alphabetically)
|
||||
|
||||
Args:
|
||||
config_dir: Fail2ban configuration root directory.
|
||||
base_name: Config base name without extension (e.g. ``"jail"``).
|
||||
|
||||
Returns:
|
||||
List of existing :class:`~pathlib.Path` objects in ascending
|
||||
priority order (only files that actually exist are included).
|
||||
"""
|
||||
files: list[Path] = []
|
||||
|
||||
conf = config_dir / f"{base_name}.conf"
|
||||
if conf.is_file():
|
||||
files.append(conf)
|
||||
|
||||
local = config_dir / f"{base_name}.local"
|
||||
if local.is_file():
|
||||
files.append(local)
|
||||
|
||||
d_dir = config_dir / f"{base_name}.d"
|
||||
if d_dir.is_dir():
|
||||
files.extend(sorted(d_dir.glob("*.conf")))
|
||||
files.extend(sorted(d_dir.glob("*.local")))
|
||||
|
||||
return files
|
||||
303
backend/app/utils/config_writer.py
Normal file
303
backend/app/utils/config_writer.py
Normal file
@@ -0,0 +1,303 @@
|
||||
"""Atomic config file writer for fail2ban ``.local`` override files.
|
||||
|
||||
All write operations are atomic: content is first written to a temporary file
|
||||
in the same directory as the target, then :func:`os.replace` is used to rename
|
||||
it into place. This guarantees that a crash or power failure during the write
|
||||
never leaves a partially-written file behind.
|
||||
|
||||
A per-file :class:`threading.Lock` prevents concurrent writes from the same
|
||||
process from racing.
|
||||
|
||||
Security constraints
|
||||
--------------------
|
||||
- Every write function asserts that the target path **ends in ``.local``**.
|
||||
This prevents accidentally writing to ``.conf`` files (which belong to the
|
||||
fail2ban package and should never be modified by BanGUI).
|
||||
|
||||
Public functions
|
||||
----------------
|
||||
- :func:`write_local_override` — create or update keys inside a ``.local`` file.
|
||||
- :func:`remove_local_key` — remove a single key from a ``.local`` file.
|
||||
- :func:`delete_local_file` — delete an entire ``.local`` file.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import configparser
|
||||
import contextlib
|
||||
import io
|
||||
import os
|
||||
import tempfile
|
||||
import threading
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import structlog
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pathlib import Path
|
||||
|
||||
log: structlog.stdlib.BoundLogger = structlog.get_logger()
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Per-file lock registry
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
# Maps resolved absolute path strings → threading.Lock instances.
|
||||
_locks: dict[str, threading.Lock] = {}
|
||||
# Guards the _locks dict itself.
|
||||
_registry_lock: threading.Lock = threading.Lock()
|
||||
|
||||
|
||||
def _get_file_lock(path: Path) -> threading.Lock:
|
||||
"""Return the per-file :class:`threading.Lock` for *path*.
|
||||
|
||||
The lock is created on first access and reused on subsequent calls.
|
||||
|
||||
Args:
|
||||
path: Target file path (need not exist yet).
|
||||
|
||||
Returns:
|
||||
:class:`threading.Lock` bound to the resolved absolute path of *path*.
|
||||
"""
|
||||
key = str(path.resolve())
|
||||
with _registry_lock:
|
||||
if key not in _locks:
|
||||
_locks[key] = threading.Lock()
|
||||
return _locks[key]
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Internal helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _assert_local_file(path: Path) -> None:
|
||||
"""Raise :class:`ValueError` if *path* does not end with ``.local``.
|
||||
|
||||
This is a safety guard against accidentally modifying ``.conf`` files.
|
||||
|
||||
Args:
|
||||
path: Path to validate.
|
||||
|
||||
Raises:
|
||||
ValueError: When *path* does not have a ``.local`` suffix.
|
||||
"""
|
||||
if path.suffix != ".local":
|
||||
raise ValueError(
|
||||
f"Refusing to write to non-.local file: {path!r}. "
|
||||
"Only .local override files may be modified by BanGUI."
|
||||
)
|
||||
|
||||
|
||||
def _make_parser() -> configparser.RawConfigParser:
|
||||
"""Return a case-sensitive :class:`configparser.RawConfigParser`."""
|
||||
parser = configparser.RawConfigParser(interpolation=None, strict=False)
|
||||
parser.optionxform = str # type: ignore[assignment]
|
||||
return parser
|
||||
|
||||
|
||||
def _read_or_new_parser(path: Path) -> configparser.RawConfigParser:
|
||||
"""Read *path* into a parser, or return a fresh empty parser.
|
||||
|
||||
If the file does not exist or cannot be read, a fresh parser is returned.
|
||||
Any parse errors are logged as warnings (not re-raised).
|
||||
|
||||
Args:
|
||||
path: Path to the ``.local`` file to read.
|
||||
|
||||
Returns:
|
||||
Populated (or empty) :class:`configparser.RawConfigParser`.
|
||||
"""
|
||||
parser = _make_parser()
|
||||
if path.is_file():
|
||||
try:
|
||||
content = path.read_text(encoding="utf-8")
|
||||
parser.read_string(content)
|
||||
except (OSError, configparser.Error) as exc:
|
||||
log.warning("local_file_read_error", path=str(path), error=str(exc))
|
||||
return parser
|
||||
|
||||
|
||||
def _write_parser_atomic(
|
||||
parser: configparser.RawConfigParser,
|
||||
path: Path,
|
||||
) -> None:
|
||||
"""Write *parser* contents to *path* atomically.
|
||||
|
||||
Writes to a temporary file in the same directory as *path*, then renames
|
||||
the temporary file over *path* using :func:`os.replace`. The temporary
|
||||
file is cleaned up on failure.
|
||||
|
||||
Args:
|
||||
parser: Populated parser whose contents should be written.
|
||||
path: Destination ``.local`` file path.
|
||||
|
||||
Raises:
|
||||
OSError: On filesystem errors (propagated to caller).
|
||||
"""
|
||||
buf = io.StringIO()
|
||||
parser.write(buf)
|
||||
content = buf.getvalue()
|
||||
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
fd, tmp_path_str = tempfile.mkstemp(
|
||||
dir=str(path.parent),
|
||||
prefix=f".{path.name}.tmp",
|
||||
suffix="",
|
||||
)
|
||||
try:
|
||||
with os.fdopen(fd, "w", encoding="utf-8") as f:
|
||||
f.write(content)
|
||||
os.replace(tmp_path_str, str(path))
|
||||
except Exception:
|
||||
with contextlib.suppress(OSError):
|
||||
os.unlink(tmp_path_str)
|
||||
raise
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Public API
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def write_local_override(
|
||||
base_path: Path,
|
||||
section: str,
|
||||
key_values: dict[str, str],
|
||||
) -> None:
|
||||
"""Create or update keys in a ``.local`` override file.
|
||||
|
||||
If the file already exists, only the specified *key_values* are written
|
||||
under *section*; all other sections and keys are preserved.
|
||||
|
||||
If the file does not exist, it is created with the given *section* and
|
||||
*key_values*.
|
||||
|
||||
The write is **atomic**: a temporary file is written and renamed into place.
|
||||
|
||||
Args:
|
||||
base_path: Absolute path to the ``.local`` file (e.g.
|
||||
``filter.d/sshd.local``). The parent directory is created if it
|
||||
does not already exist.
|
||||
section: INI section name (e.g. ``"Definition"``, ``"Init"``).
|
||||
key_values: Mapping of option name → value to write/update.
|
||||
|
||||
Raises:
|
||||
ValueError: If *base_path* does not end with ``.local``.
|
||||
"""
|
||||
_assert_local_file(base_path)
|
||||
|
||||
lock = _get_file_lock(base_path)
|
||||
with lock:
|
||||
parser = _read_or_new_parser(base_path)
|
||||
|
||||
if not parser.has_section(section):
|
||||
parser.add_section(section)
|
||||
|
||||
for key, value in key_values.items():
|
||||
parser.set(section, key, value)
|
||||
|
||||
log.info(
|
||||
"local_override_written",
|
||||
path=str(base_path),
|
||||
section=section,
|
||||
keys=sorted(key_values),
|
||||
)
|
||||
_write_parser_atomic(parser, base_path)
|
||||
|
||||
|
||||
def remove_local_key(base_path: Path, section: str, key: str) -> None:
|
||||
"""Remove a single key from a ``.local`` override file.
|
||||
|
||||
Post-removal cleanup:
|
||||
|
||||
- If the section becomes empty after key removal, the section is also
|
||||
removed.
|
||||
- If no sections remain after section removal, the file is deleted.
|
||||
|
||||
This function is a no-op when the file, section, or key does not exist.
|
||||
|
||||
Args:
|
||||
base_path: Path to the ``.local`` file to update.
|
||||
section: INI section containing the key.
|
||||
key: Option name to remove.
|
||||
|
||||
Raises:
|
||||
ValueError: If *base_path* does not end with ``.local``.
|
||||
"""
|
||||
_assert_local_file(base_path)
|
||||
|
||||
if not base_path.is_file():
|
||||
return
|
||||
|
||||
lock = _get_file_lock(base_path)
|
||||
with lock:
|
||||
parser = _read_or_new_parser(base_path)
|
||||
|
||||
if not parser.has_section(section) or not parser.has_option(section, key):
|
||||
return # Nothing to remove.
|
||||
|
||||
parser.remove_option(section, key)
|
||||
|
||||
# Remove the section if it has no remaining options.
|
||||
if not parser.options(section):
|
||||
parser.remove_section(section)
|
||||
|
||||
# Delete the file entirely if it has no remaining sections.
|
||||
if not parser.sections():
|
||||
with contextlib.suppress(OSError):
|
||||
base_path.unlink()
|
||||
log.info("local_file_deleted_empty", path=str(base_path))
|
||||
return
|
||||
|
||||
log.info(
|
||||
"local_key_removed",
|
||||
path=str(base_path),
|
||||
section=section,
|
||||
key=key,
|
||||
)
|
||||
_write_parser_atomic(parser, base_path)
|
||||
|
||||
|
||||
def delete_local_file(path: Path, *, allow_orphan: bool = False) -> None:
|
||||
"""Delete a ``.local`` override file.
|
||||
|
||||
By default, refuses to delete a ``.local`` file that has no corresponding
|
||||
``.conf`` file (an *orphan* ``.local``), because it may be the only copy of
|
||||
a user-defined config. Pass ``allow_orphan=True`` to override this guard.
|
||||
|
||||
Args:
|
||||
path: Path to the ``.local`` file to delete.
|
||||
allow_orphan: When ``True``, delete even if no corresponding ``.conf``
|
||||
exists alongside *path*.
|
||||
|
||||
Raises:
|
||||
ValueError: If *path* does not end with ``.local``.
|
||||
FileNotFoundError: If *path* does not exist.
|
||||
OSError: If no corresponding ``.conf`` exists and *allow_orphan* is
|
||||
``False``.
|
||||
"""
|
||||
_assert_local_file(path)
|
||||
|
||||
if not path.is_file():
|
||||
raise FileNotFoundError(f"Local file not found: {path!r}")
|
||||
|
||||
if not allow_orphan:
|
||||
conf_path = path.with_suffix(".conf")
|
||||
if not conf_path.is_file():
|
||||
raise OSError(
|
||||
f"No corresponding .conf file found for {path!r}. "
|
||||
"Pass allow_orphan=True to delete a local-only file."
|
||||
)
|
||||
|
||||
lock = _get_file_lock(path)
|
||||
with lock:
|
||||
try:
|
||||
path.unlink()
|
||||
log.info("local_file_deleted", path=str(path))
|
||||
except OSError as exc:
|
||||
log.error(
|
||||
"local_file_delete_failed", path=str(path), error=str(exc)
|
||||
)
|
||||
raise
|
||||
78
backend/app/utils/constants.py
Normal file
78
backend/app/utils/constants.py
Normal file
@@ -0,0 +1,78 @@
|
||||
"""Application-wide constants.
|
||||
|
||||
All magic numbers, default paths, and limit values live here.
|
||||
Import from this module rather than hard-coding values in business logic.
|
||||
"""
|
||||
|
||||
from typing import Final
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# fail2ban integration
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
DEFAULT_FAIL2BAN_SOCKET: Final[str] = "/var/run/fail2ban/fail2ban.sock"
|
||||
"""Default path to the fail2ban Unix domain socket."""
|
||||
|
||||
FAIL2BAN_SOCKET_TIMEOUT_SECONDS: Final[float] = 5.0
|
||||
"""Maximum seconds to wait for a response from the fail2ban socket."""
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Database
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
DEFAULT_DATABASE_PATH: Final[str] = "bangui.db"
|
||||
"""Default filename for the BanGUI application SQLite database."""
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Authentication
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
DEFAULT_SESSION_DURATION_MINUTES: Final[int] = 60
|
||||
"""Default session lifetime in minutes."""
|
||||
|
||||
SESSION_TOKEN_BYTES: Final[int] = 64
|
||||
"""Number of random bytes used when generating a session token."""
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Time-range presets (used by dashboard and history endpoints)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
TIME_RANGE_24H: Final[str] = "24h"
|
||||
TIME_RANGE_7D: Final[str] = "7d"
|
||||
TIME_RANGE_30D: Final[str] = "30d"
|
||||
TIME_RANGE_365D: Final[str] = "365d"
|
||||
|
||||
VALID_TIME_RANGES: Final[frozenset[str]] = frozenset(
|
||||
{TIME_RANGE_24H, TIME_RANGE_7D, TIME_RANGE_30D, TIME_RANGE_365D}
|
||||
)
|
||||
|
||||
TIME_RANGE_HOURS: Final[dict[str, int]] = {
|
||||
TIME_RANGE_24H: 24,
|
||||
TIME_RANGE_7D: 7 * 24,
|
||||
TIME_RANGE_30D: 30 * 24,
|
||||
TIME_RANGE_365D: 365 * 24,
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Pagination
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
DEFAULT_PAGE_SIZE: Final[int] = 50
|
||||
MAX_PAGE_SIZE: Final[int] = 500
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Blocklist import
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
BLOCKLIST_IMPORT_DEFAULT_HOUR: Final[int] = 3
|
||||
"""Default hour (UTC) for the nightly blocklist import job."""
|
||||
|
||||
BLOCKLIST_PREVIEW_MAX_LINES: Final[int] = 100
|
||||
"""Maximum number of IP lines returned by the blocklist preview endpoint."""
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Health check
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
HEALTH_CHECK_INTERVAL_SECONDS: Final[int] = 30
|
||||
"""How often the background health-check task polls fail2ban."""
|
||||
318
backend/app/utils/fail2ban_client.py
Normal file
318
backend/app/utils/fail2ban_client.py
Normal file
@@ -0,0 +1,318 @@
|
||||
"""Async wrapper around the fail2ban Unix domain socket protocol.
|
||||
|
||||
fail2ban uses a proprietary binary protocol over a Unix domain socket:
|
||||
commands are transmitted as pickle-serialised Python lists and responses
|
||||
are returned the same way. The protocol constants (``END``, ``CLOSE``)
|
||||
come from ``fail2ban.protocol.CSPROTO``.
|
||||
|
||||
Because the underlying socket is blocking, all I/O is dispatched to a
|
||||
thread-pool executor so the FastAPI event loop is never blocked.
|
||||
|
||||
Usage::
|
||||
|
||||
async with Fail2BanClient(socket_path="/var/run/fail2ban/fail2ban.sock") as client:
|
||||
status = await client.send(["status"])
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import contextlib
|
||||
import errno
|
||||
import socket
|
||||
import time
|
||||
from pickle import HIGHEST_PROTOCOL, dumps, loads
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from types import TracebackType
|
||||
|
||||
import structlog
|
||||
|
||||
log: structlog.stdlib.BoundLogger = structlog.get_logger()
|
||||
|
||||
# fail2ban protocol constants — inline to avoid a hard import dependency
|
||||
# at module load time (the fail2ban-master path may not be on sys.path yet
|
||||
# in some test environments).
|
||||
_PROTO_END: bytes = b"<F2B_END_COMMAND>"
|
||||
_PROTO_CLOSE: bytes = b"<F2B_CLOSE_COMMAND>"
|
||||
_PROTO_EMPTY: bytes = b""
|
||||
|
||||
# Default receive buffer size (doubles on each iteration up to max).
|
||||
_RECV_BUFSIZE_START: int = 1024
|
||||
_RECV_BUFSIZE_MAX: int = 32768
|
||||
|
||||
# OSError errno values that indicate a transient socket condition and may be
|
||||
# safely retried. ENOENT (socket file missing) is intentionally excluded so
|
||||
# a missing socket raises immediately without delay.
|
||||
_RETRYABLE_ERRNOS: frozenset[int] = frozenset(
|
||||
{errno.EAGAIN, errno.ECONNREFUSED, errno.ENOBUFS}
|
||||
)
|
||||
|
||||
# Retry policy for _send_command_sync.
|
||||
_RETRY_MAX_ATTEMPTS: int = 3
|
||||
_RETRY_INITIAL_BACKOFF: float = 0.15 # seconds; doubles on each attempt
|
||||
|
||||
# Maximum number of concurrent in-flight socket commands. Operations that
|
||||
# exceed this cap wait until a slot is available.
|
||||
_COMMAND_SEMAPHORE_CONCURRENCY: int = 10
|
||||
# The semaphore is created lazily on the first send() call so it binds to the
|
||||
# event loop that is actually running (important for test isolation).
|
||||
_command_semaphore: asyncio.Semaphore | None = None
|
||||
|
||||
|
||||
class Fail2BanConnectionError(Exception):
|
||||
"""Raised when the fail2ban socket is unreachable or returns an error."""
|
||||
|
||||
def __init__(self, message: str, socket_path: str) -> None:
|
||||
"""Initialise with a human-readable message and the socket path.
|
||||
|
||||
Args:
|
||||
message: Description of the connection problem.
|
||||
socket_path: The fail2ban socket path that was targeted.
|
||||
"""
|
||||
self.socket_path: str = socket_path
|
||||
super().__init__(f"{message} (socket: {socket_path})")
|
||||
|
||||
|
||||
class Fail2BanProtocolError(Exception):
|
||||
"""Raised when the response from fail2ban cannot be parsed."""
|
||||
|
||||
|
||||
def _send_command_sync(
|
||||
socket_path: str,
|
||||
command: list[Any],
|
||||
timeout: float,
|
||||
) -> Any:
|
||||
"""Send a command to fail2ban and return the parsed response.
|
||||
|
||||
This is a **synchronous** function intended to be called from within
|
||||
:func:`asyncio.get_event_loop().run_in_executor` so that the event loop
|
||||
is not blocked.
|
||||
|
||||
Transient ``OSError`` conditions (``EAGAIN``, ``ECONNREFUSED``,
|
||||
``ENOBUFS``) are retried up to :data:`_RETRY_MAX_ATTEMPTS` times with
|
||||
exponential back-off starting at :data:`_RETRY_INITIAL_BACKOFF` seconds.
|
||||
All other ``OSError`` variants (including ``ENOENT`` — socket file
|
||||
missing) and :class:`Fail2BanProtocolError` are raised immediately.
|
||||
A structured log event ``fail2ban_socket_retry`` is emitted for each
|
||||
retry attempt.
|
||||
|
||||
Args:
|
||||
socket_path: Path to the fail2ban Unix domain socket.
|
||||
command: List of command tokens, e.g. ``["status", "sshd"]``.
|
||||
timeout: Socket timeout in seconds.
|
||||
|
||||
Returns:
|
||||
The deserialized Python object returned by fail2ban.
|
||||
|
||||
Raises:
|
||||
Fail2BanConnectionError: If the socket cannot be reached after all
|
||||
retry attempts, or immediately for non-retryable errors.
|
||||
Fail2BanProtocolError: If the response cannot be unpickled.
|
||||
"""
|
||||
last_oserror: OSError | None = None
|
||||
for attempt in range(1, _RETRY_MAX_ATTEMPTS + 1):
|
||||
sock: socket.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||
try:
|
||||
sock.settimeout(timeout)
|
||||
sock.connect(socket_path)
|
||||
|
||||
# Serialise and send the command.
|
||||
payload: bytes = dumps(
|
||||
list(map(_coerce_command_token, command)),
|
||||
HIGHEST_PROTOCOL,
|
||||
)
|
||||
sock.sendall(payload)
|
||||
sock.sendall(_PROTO_END)
|
||||
|
||||
# Receive until we see the end marker.
|
||||
raw: bytes = _PROTO_EMPTY
|
||||
bufsize: int = _RECV_BUFSIZE_START
|
||||
while raw.rfind(_PROTO_END, -32) == -1:
|
||||
chunk: bytes = sock.recv(bufsize)
|
||||
if not chunk:
|
||||
raise Fail2BanConnectionError(
|
||||
"Connection closed unexpectedly by fail2ban",
|
||||
socket_path,
|
||||
)
|
||||
if chunk == _PROTO_END:
|
||||
break
|
||||
raw += chunk
|
||||
if bufsize < _RECV_BUFSIZE_MAX:
|
||||
bufsize <<= 1
|
||||
|
||||
try:
|
||||
return loads(raw)
|
||||
except Exception as exc:
|
||||
raise Fail2BanProtocolError(
|
||||
f"Failed to unpickle fail2ban response: {exc}"
|
||||
) from exc
|
||||
except Fail2BanProtocolError:
|
||||
# Protocol errors are never transient — raise immediately.
|
||||
raise
|
||||
except Fail2BanConnectionError:
|
||||
# Mid-receive close or empty-chunk error — raise immediately.
|
||||
raise
|
||||
except OSError as exc:
|
||||
is_retryable = exc.errno in _RETRYABLE_ERRNOS
|
||||
if is_retryable and attempt < _RETRY_MAX_ATTEMPTS:
|
||||
log.warning(
|
||||
"fail2ban_socket_retry",
|
||||
attempt=attempt,
|
||||
socket_errno=exc.errno,
|
||||
socket_path=socket_path,
|
||||
)
|
||||
last_oserror = exc
|
||||
time.sleep(_RETRY_INITIAL_BACKOFF * (2 ** (attempt - 1)))
|
||||
continue
|
||||
raise Fail2BanConnectionError(str(exc), socket_path) from exc
|
||||
finally:
|
||||
with contextlib.suppress(OSError):
|
||||
sock.sendall(_PROTO_CLOSE + _PROTO_END)
|
||||
with contextlib.suppress(OSError):
|
||||
sock.shutdown(socket.SHUT_RDWR)
|
||||
sock.close()
|
||||
|
||||
# Exhausted all retry attempts — surface the last transient error.
|
||||
raise Fail2BanConnectionError(
|
||||
str(last_oserror), socket_path
|
||||
) from last_oserror
|
||||
|
||||
|
||||
def _coerce_command_token(token: Any) -> Any:
|
||||
"""Coerce a command token to a type that fail2ban understands.
|
||||
|
||||
fail2ban's ``CSocket.convert`` accepts ``str``, ``bool``, ``int``,
|
||||
``float``, ``list``, ``dict``, and ``set``. Any other type is
|
||||
stringified.
|
||||
|
||||
Args:
|
||||
token: A single token from the command list.
|
||||
|
||||
Returns:
|
||||
The token in a type safe for pickle transmission to fail2ban.
|
||||
"""
|
||||
if isinstance(token, (str, bool, int, float, list, dict, set)):
|
||||
return token
|
||||
return str(token)
|
||||
|
||||
|
||||
class Fail2BanClient:
|
||||
"""Async client for communicating with the fail2ban daemon via its socket.
|
||||
|
||||
All blocking socket I/O is offloaded to the default thread-pool executor
|
||||
so the asyncio event loop remains unblocked.
|
||||
|
||||
The client can be used as an async context manager::
|
||||
|
||||
async with Fail2BanClient(socket_path) as client:
|
||||
result = await client.send(["status"])
|
||||
|
||||
Or instantiated directly and closed manually::
|
||||
|
||||
client = Fail2BanClient(socket_path)
|
||||
result = await client.send(["status"])
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
socket_path: str,
|
||||
timeout: float = 5.0,
|
||||
) -> None:
|
||||
"""Initialise the client.
|
||||
|
||||
Args:
|
||||
socket_path: Path to the fail2ban Unix domain socket.
|
||||
timeout: Socket I/O timeout in seconds.
|
||||
"""
|
||||
self.socket_path: str = socket_path
|
||||
self.timeout: float = timeout
|
||||
|
||||
async def send(self, command: list[Any]) -> Any:
|
||||
"""Send a command to fail2ban and return the response.
|
||||
|
||||
Acquires the module-level concurrency semaphore before dispatching
|
||||
so that no more than :data:`_COMMAND_SEMAPHORE_CONCURRENCY` commands
|
||||
are in-flight at the same time. Commands that exceed the cap are
|
||||
queued until a slot becomes available. A debug-level log event is
|
||||
emitted when a command must wait.
|
||||
|
||||
The command is serialised as a pickle list, sent to the socket, and
|
||||
the response is deserialised before being returned.
|
||||
|
||||
Args:
|
||||
command: A list of command tokens, e.g. ``["status", "sshd"]``.
|
||||
|
||||
Returns:
|
||||
The Python object returned by fail2ban (typically a list or dict).
|
||||
|
||||
Raises:
|
||||
Fail2BanConnectionError: If the socket cannot be reached or the
|
||||
connection is unexpectedly closed.
|
||||
Fail2BanProtocolError: If the response cannot be decoded.
|
||||
"""
|
||||
global _command_semaphore
|
||||
if _command_semaphore is None:
|
||||
_command_semaphore = asyncio.Semaphore(_COMMAND_SEMAPHORE_CONCURRENCY)
|
||||
|
||||
if _command_semaphore.locked():
|
||||
log.debug(
|
||||
"fail2ban_command_waiting_semaphore",
|
||||
command=command,
|
||||
concurrency_limit=_COMMAND_SEMAPHORE_CONCURRENCY,
|
||||
)
|
||||
|
||||
async with _command_semaphore:
|
||||
log.debug("fail2ban_sending_command", command=command)
|
||||
loop: asyncio.AbstractEventLoop = asyncio.get_event_loop()
|
||||
try:
|
||||
response: Any = await loop.run_in_executor(
|
||||
None,
|
||||
_send_command_sync,
|
||||
self.socket_path,
|
||||
command,
|
||||
self.timeout,
|
||||
)
|
||||
except Fail2BanConnectionError:
|
||||
log.warning(
|
||||
"fail2ban_connection_error",
|
||||
socket_path=self.socket_path,
|
||||
command=command,
|
||||
)
|
||||
raise
|
||||
except Fail2BanProtocolError:
|
||||
log.error(
|
||||
"fail2ban_protocol_error",
|
||||
socket_path=self.socket_path,
|
||||
command=command,
|
||||
)
|
||||
raise
|
||||
log.debug("fail2ban_received_response", command=command)
|
||||
return response
|
||||
|
||||
async def ping(self) -> bool:
|
||||
"""Return ``True`` if the fail2ban daemon is reachable.
|
||||
|
||||
Sends a ``ping`` command and checks for a ``pong`` response.
|
||||
|
||||
Returns:
|
||||
``True`` when the daemon responds correctly, ``False`` otherwise.
|
||||
"""
|
||||
try:
|
||||
response: Any = await self.send(["ping"])
|
||||
return bool(response == 1) # fail2ban returns 1 on successful ping
|
||||
except (Fail2BanConnectionError, Fail2BanProtocolError):
|
||||
return False
|
||||
|
||||
async def __aenter__(self) -> Fail2BanClient:
|
||||
"""Return self when used as an async context manager."""
|
||||
return self
|
||||
|
||||
async def __aexit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> None:
|
||||
"""No-op exit — each command opens and closes its own socket."""
|
||||
101
backend/app/utils/ip_utils.py
Normal file
101
backend/app/utils/ip_utils.py
Normal file
@@ -0,0 +1,101 @@
|
||||
"""IP address and CIDR range validation and normalisation utilities.
|
||||
|
||||
All IP handling in BanGUI goes through these helpers to enforce consistency
|
||||
and prevent malformed addresses from reaching fail2ban.
|
||||
"""
|
||||
|
||||
import ipaddress
|
||||
|
||||
|
||||
def is_valid_ip(address: str) -> bool:
|
||||
"""Return ``True`` if *address* is a valid IPv4 or IPv6 address.
|
||||
|
||||
Args:
|
||||
address: The string to validate.
|
||||
|
||||
Returns:
|
||||
``True`` if the string represents a valid IP address, ``False`` otherwise.
|
||||
"""
|
||||
try:
|
||||
ipaddress.ip_address(address)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
def is_valid_network(cidr: str) -> bool:
|
||||
"""Return ``True`` if *cidr* is a valid IPv4 or IPv6 network in CIDR notation.
|
||||
|
||||
Args:
|
||||
cidr: The string to validate, e.g. ``"192.168.0.0/24"``.
|
||||
|
||||
Returns:
|
||||
``True`` if the string is a valid CIDR network, ``False`` otherwise.
|
||||
"""
|
||||
try:
|
||||
ipaddress.ip_network(cidr, strict=False)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
def is_valid_ip_or_network(value: str) -> bool:
|
||||
"""Return ``True`` if *value* is a valid IP address or CIDR network.
|
||||
|
||||
Args:
|
||||
value: The string to validate.
|
||||
|
||||
Returns:
|
||||
``True`` if the string is a valid IP address or CIDR range.
|
||||
"""
|
||||
return is_valid_ip(value) or is_valid_network(value)
|
||||
|
||||
|
||||
def normalise_ip(address: str) -> str:
|
||||
"""Return a normalised string representation of an IP address.
|
||||
|
||||
IPv6 addresses are compressed to their canonical short form.
|
||||
IPv4 addresses are returned unchanged.
|
||||
|
||||
Args:
|
||||
address: A valid IP address string.
|
||||
|
||||
Returns:
|
||||
Normalised IP address string.
|
||||
|
||||
Raises:
|
||||
ValueError: If *address* is not a valid IP address.
|
||||
"""
|
||||
return str(ipaddress.ip_address(address))
|
||||
|
||||
|
||||
def normalise_network(cidr: str) -> str:
|
||||
"""Return a normalised string representation of a CIDR network.
|
||||
|
||||
Host bits are masked to produce the network address.
|
||||
|
||||
Args:
|
||||
cidr: A valid CIDR network string, e.g. ``"192.168.1.5/24"``.
|
||||
|
||||
Returns:
|
||||
Normalised network string, e.g. ``"192.168.1.0/24"``.
|
||||
|
||||
Raises:
|
||||
ValueError: If *cidr* is not a valid network.
|
||||
"""
|
||||
return str(ipaddress.ip_network(cidr, strict=False))
|
||||
|
||||
|
||||
def ip_version(address: str) -> int:
|
||||
"""Return 4 or 6 depending on the IP version of *address*.
|
||||
|
||||
Args:
|
||||
address: A valid IP address string.
|
||||
|
||||
Returns:
|
||||
``4`` for IPv4, ``6`` for IPv6.
|
||||
|
||||
Raises:
|
||||
ValueError: If *address* is not a valid IP address.
|
||||
"""
|
||||
return ipaddress.ip_address(address).version
|
||||
93
backend/app/utils/jail_config.py
Normal file
93
backend/app/utils/jail_config.py
Normal file
@@ -0,0 +1,93 @@
|
||||
"""Utilities for ensuring required fail2ban jail configuration files exist.
|
||||
|
||||
BanGUI requires two custom jails — ``manual-Jail`` and ``blocklist-import``
|
||||
— to be present in the fail2ban ``jail.d`` directory. This module provides
|
||||
:func:`ensure_jail_configs` which checks each of the four files
|
||||
(``*.conf`` template + ``*.local`` override) and creates any that are missing
|
||||
with the correct default content.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import structlog
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pathlib import Path
|
||||
|
||||
log: structlog.stdlib.BoundLogger = structlog.get_logger()
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Default file contents
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_MANUAL_JAIL_CONF = """\
|
||||
[manual-Jail]
|
||||
|
||||
enabled = false
|
||||
filter = manual-Jail
|
||||
logpath = /remotelogs/bangui/auth.log
|
||||
backend = polling
|
||||
maxretry = 3
|
||||
findtime = 120
|
||||
bantime = 60
|
||||
ignoreip = 127.0.0.0/8 ::1 172.16.0.0/12
|
||||
"""
|
||||
|
||||
_MANUAL_JAIL_LOCAL = """\
|
||||
[manual-Jail]
|
||||
enabled = true
|
||||
"""
|
||||
|
||||
_BLOCKLIST_IMPORT_CONF = """\
|
||||
[blocklist-import]
|
||||
|
||||
enabled = false
|
||||
filter =
|
||||
logpath = /dev/null
|
||||
backend = auto
|
||||
maxretry = 1
|
||||
findtime = 1d
|
||||
bantime = 1w
|
||||
ignoreip = 127.0.0.0/8 ::1 172.16.0.0/12
|
||||
"""
|
||||
|
||||
_BLOCKLIST_IMPORT_LOCAL = """\
|
||||
[blocklist-import]
|
||||
enabled = true
|
||||
"""
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# File registry: (filename, default_content)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_JAIL_FILES: list[tuple[str, str]] = [
|
||||
("manual-Jail.conf", _MANUAL_JAIL_CONF),
|
||||
("manual-Jail.local", _MANUAL_JAIL_LOCAL),
|
||||
("blocklist-import.conf", _BLOCKLIST_IMPORT_CONF),
|
||||
("blocklist-import.local", _BLOCKLIST_IMPORT_LOCAL),
|
||||
]
|
||||
|
||||
|
||||
def ensure_jail_configs(jail_d_path: Path) -> None:
|
||||
"""Ensure the required fail2ban jail configuration files exist.
|
||||
|
||||
Checks for ``manual-Jail.conf``, ``manual-Jail.local``,
|
||||
``blocklist-import.conf``, and ``blocklist-import.local`` inside
|
||||
*jail_d_path*. Any file that is missing is created with its default
|
||||
content. Existing files are **never** overwritten.
|
||||
|
||||
Args:
|
||||
jail_d_path: Path to the fail2ban ``jail.d`` directory. Will be
|
||||
created (including all parents) if it does not already exist.
|
||||
"""
|
||||
jail_d_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
for filename, default_content in _JAIL_FILES:
|
||||
file_path = jail_d_path / filename
|
||||
if file_path.exists():
|
||||
log.debug("jail_config_already_exists", path=str(file_path))
|
||||
else:
|
||||
file_path.write_text(default_content, encoding="utf-8")
|
||||
log.info("jail_config_created", path=str(file_path))
|
||||
67
backend/app/utils/time_utils.py
Normal file
67
backend/app/utils/time_utils.py
Normal file
@@ -0,0 +1,67 @@
|
||||
"""Timezone-aware datetime helpers.
|
||||
|
||||
All datetimes in BanGUI are stored and transmitted in UTC.
|
||||
Conversion to the user's display timezone happens only at the presentation
|
||||
layer (frontend). These utilities provide a consistent, safe foundation
|
||||
for working with time throughout the backend.
|
||||
"""
|
||||
|
||||
import datetime
|
||||
|
||||
|
||||
def utc_now() -> datetime.datetime:
|
||||
"""Return the current UTC time as a timezone-aware :class:`datetime.datetime`.
|
||||
|
||||
Returns:
|
||||
Current UTC datetime with ``tzinfo=datetime.UTC``.
|
||||
"""
|
||||
return datetime.datetime.now(datetime.UTC)
|
||||
|
||||
|
||||
def utc_from_timestamp(ts: float) -> datetime.datetime:
|
||||
"""Convert a POSIX timestamp to a timezone-aware UTC datetime.
|
||||
|
||||
Args:
|
||||
ts: POSIX timestamp (seconds since Unix epoch).
|
||||
|
||||
Returns:
|
||||
Timezone-aware UTC :class:`datetime.datetime`.
|
||||
"""
|
||||
return datetime.datetime.fromtimestamp(ts, tz=datetime.UTC)
|
||||
|
||||
|
||||
def add_minutes(dt: datetime.datetime, minutes: int) -> datetime.datetime:
|
||||
"""Return a new datetime that is *minutes* ahead of *dt*.
|
||||
|
||||
Args:
|
||||
dt: The source datetime (must be timezone-aware).
|
||||
minutes: Number of minutes to add. May be negative.
|
||||
|
||||
Returns:
|
||||
A new timezone-aware :class:`datetime.datetime`.
|
||||
"""
|
||||
return dt + datetime.timedelta(minutes=minutes)
|
||||
|
||||
|
||||
def is_expired(expires_at: datetime.datetime) -> bool:
|
||||
"""Return ``True`` if *expires_at* is in the past relative to UTC now.
|
||||
|
||||
Args:
|
||||
expires_at: The expiry timestamp to check (must be timezone-aware).
|
||||
|
||||
Returns:
|
||||
``True`` when the timestamp is past, ``False`` otherwise.
|
||||
"""
|
||||
return utc_now() >= expires_at
|
||||
|
||||
|
||||
def hours_ago(hours: int) -> datetime.datetime:
|
||||
"""Return a timezone-aware UTC datetime *hours* before now.
|
||||
|
||||
Args:
|
||||
hours: Number of hours to subtract from the current time.
|
||||
|
||||
Returns:
|
||||
Timezone-aware UTC :class:`datetime.datetime`.
|
||||
"""
|
||||
return utc_now() - datetime.timedelta(hours=hours)
|
||||
63
backend/pyproject.toml
Normal file
63
backend/pyproject.toml
Normal file
@@ -0,0 +1,63 @@
|
||||
[build-system]
|
||||
requires = ["hatchling"]
|
||||
build-backend = "hatchling.build"
|
||||
|
||||
[project]
|
||||
name = "bangui-backend"
|
||||
version = "0.9.0"
|
||||
description = "BanGUI backend — fail2ban web management interface"
|
||||
requires-python = ">=3.12"
|
||||
dependencies = [
|
||||
"fastapi>=0.115.0",
|
||||
"uvicorn[standard]>=0.32.0",
|
||||
"pydantic>=2.9.0",
|
||||
"pydantic-settings>=2.6.0",
|
||||
"aiosqlite>=0.20.0",
|
||||
"aiohttp>=3.11.0",
|
||||
"apscheduler>=3.10,<4.0",
|
||||
"structlog>=24.4.0",
|
||||
"bcrypt>=4.2.0",
|
||||
"geoip2>=4.8.0",
|
||||
]
|
||||
|
||||
[project.optional-dependencies]
|
||||
dev = [
|
||||
"pytest>=8.3.0",
|
||||
"pytest-asyncio>=0.24.0",
|
||||
"httpx>=0.27.0",
|
||||
"ruff>=0.8.0",
|
||||
"mypy>=1.13.0",
|
||||
"pytest-cov>=6.0.0",
|
||||
"pytest-mock>=3.14.0",
|
||||
]
|
||||
|
||||
[tool.hatch.build.targets.wheel]
|
||||
packages = ["app"]
|
||||
|
||||
[tool.ruff]
|
||||
line-length = 120
|
||||
target-version = "py312"
|
||||
|
||||
[tool.ruff.lint]
|
||||
select = ["E", "F", "W", "I", "N", "UP", "B", "C4", "SIM", "TCH"]
|
||||
ignore = ["B008"] # FastAPI uses function calls in default arguments (Depends)
|
||||
|
||||
[tool.ruff.lint.per-file-ignores]
|
||||
# sys.path manipulation before stdlib imports is intentional in test helpers
|
||||
# pytest evaluates fixture type annotations at runtime, so TC001/TC002/TC003 are false-positives
|
||||
"tests/**" = ["E402", "TC001", "TC002", "TC003"]
|
||||
"app/routers/**" = ["TC001", "TC002"] # FastAPI evaluates Depends() type aliases at runtime via get_type_hints()
|
||||
|
||||
[tool.ruff.format]
|
||||
quote-style = "double"
|
||||
|
||||
[tool.mypy]
|
||||
python_version = "3.12"
|
||||
strict = true
|
||||
plugins = ["pydantic.mypy"]
|
||||
|
||||
[tool.pytest.ini_options]
|
||||
asyncio_mode = "auto"
|
||||
pythonpath = [".", "../fail2ban-master"]
|
||||
testpaths = ["tests"]
|
||||
addopts = "--cov=app --cov-report=term-missing"
|
||||
1
backend/tests/__init__.py
Normal file
1
backend/tests/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Tests package."""
|
||||
77
backend/tests/conftest.py
Normal file
77
backend/tests/conftest.py
Normal file
@@ -0,0 +1,77 @@
|
||||
"""Shared pytest fixtures for the BanGUI backend test suite.
|
||||
|
||||
All fixtures are async-compatible via pytest-asyncio. External dependencies
|
||||
(fail2ban socket, HTTP APIs) are always mocked so tests never touch real
|
||||
infrastructure.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
# Ensure the bundled fail2ban package is importable.
|
||||
_FAIL2BAN_MASTER: Path = Path(__file__).resolve().parents[2] / "fail2ban-master"
|
||||
if str(_FAIL2BAN_MASTER) not in sys.path:
|
||||
sys.path.insert(0, str(_FAIL2BAN_MASTER))
|
||||
|
||||
import aiosqlite
|
||||
import pytest
|
||||
from httpx import ASGITransport, AsyncClient
|
||||
|
||||
from app.config import Settings
|
||||
from app.db import init_db
|
||||
from app.main import create_app
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def test_settings(tmp_path: Path) -> Settings:
|
||||
"""Return a ``Settings`` instance configured for testing.
|
||||
|
||||
Uses a temporary directory for the database so tests are isolated from
|
||||
each other and from the development database.
|
||||
|
||||
Args:
|
||||
tmp_path: Pytest-provided temporary directory (unique per test).
|
||||
|
||||
Returns:
|
||||
A :class:`~app.config.Settings` instance with overridden paths.
|
||||
"""
|
||||
return Settings(
|
||||
database_path=str(tmp_path / "test_bangui.db"),
|
||||
fail2ban_socket="/tmp/fake_fail2ban.sock",
|
||||
session_secret="test-secret-key-do-not-use-in-production",
|
||||
session_duration_minutes=60,
|
||||
timezone="UTC",
|
||||
log_level="debug",
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
async def client(test_settings: Settings) -> AsyncClient: # type: ignore[misc]
|
||||
"""Provide an ``AsyncClient`` wired to a test instance of the BanGUI app.
|
||||
|
||||
The client sends requests directly to the ASGI application (no network).
|
||||
``app.state.db`` is initialised manually so router tests can use the
|
||||
database without triggering the full ASGI lifespan.
|
||||
|
||||
Args:
|
||||
test_settings: Injected test settings fixture.
|
||||
|
||||
Yields:
|
||||
An :class:`httpx.AsyncClient` with ``base_url="http://test"``.
|
||||
"""
|
||||
app = create_app(settings=test_settings)
|
||||
|
||||
# Bootstrap the database on app.state so Depends(get_db) works in tests.
|
||||
# The ASGI lifespan is not triggered by ASGITransport, so we do this here.
|
||||
db: aiosqlite.Connection = await aiosqlite.connect(test_settings.database_path)
|
||||
db.row_factory = aiosqlite.Row
|
||||
await init_db(db)
|
||||
app.state.db = db
|
||||
|
||||
transport: ASGITransport = ASGITransport(app=app)
|
||||
async with AsyncClient(transport=transport, base_url="http://test") as ac:
|
||||
yield ac
|
||||
|
||||
await db.close()
|
||||
0
backend/tests/scripts/__init__.py
Normal file
0
backend/tests/scripts/__init__.py
Normal file
213
backend/tests/scripts/seed_10k_bans.py
Normal file
213
backend/tests/scripts/seed_10k_bans.py
Normal file
@@ -0,0 +1,213 @@
|
||||
"""Seed 10 000 synthetic bans into the fail2ban dev database.
|
||||
|
||||
Usage::
|
||||
|
||||
cd backend
|
||||
python tests/scripts/seed_10k_bans.py [--db-path /path/to/fail2ban.sqlite3]
|
||||
|
||||
This script inserts 10 000 synthetic ban rows spread over the last 365 days
|
||||
into the fail2ban SQLite database and pre-resolves all synthetic IPs into the
|
||||
BanGUI geo_cache. Run it once to get realistic dashboard and map load times
|
||||
in the browser without requiring a live fail2ban instance with active traffic.
|
||||
|
||||
.. warning::
|
||||
This script **writes** to the fail2ban database. Only use it against the
|
||||
development database (``Docker/fail2ban-dev-config/fail2ban.sqlite3`` or
|
||||
equivalent). Never run it against a production database.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import logging
|
||||
import random
|
||||
import sqlite3
|
||||
import sys
|
||||
import time
|
||||
from pathlib import Path
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Default paths
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_DEFAULT_F2B_DB: str = str(
|
||||
Path(__file__).resolve().parents[3] / "Docker" / "fail2ban-dev-config" / "fail2ban.sqlite3"
|
||||
)
|
||||
_DEFAULT_APP_DB: str = str(
|
||||
Path(__file__).resolve().parents[2] / "bangui.db"
|
||||
)
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Constants
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_BAN_COUNT: int = 10_000
|
||||
_YEAR_SECONDS: int = 365 * 24 * 3600
|
||||
_JAIL_POOL: list[str] = ["sshd", "nginx", "blocklist-import", "postfix", "dovecot"]
|
||||
_COUNTRY_POOL: list[tuple[str, str]] = [
|
||||
("DE", "Germany"),
|
||||
("US", "United States"),
|
||||
("CN", "China"),
|
||||
("RU", "Russia"),
|
||||
("FR", "France"),
|
||||
("BR", "Brazil"),
|
||||
("IN", "India"),
|
||||
("GB", "United Kingdom"),
|
||||
("NL", "Netherlands"),
|
||||
("CA", "Canada"),
|
||||
]
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _random_ip() -> str:
|
||||
"""Return a random dotted-decimal IPv4 string in public ranges."""
|
||||
return ".".join(str(random.randint(1, 254)) for _ in range(4))
|
||||
|
||||
|
||||
def _seed_bans(f2b_db_path: str) -> list[str]:
|
||||
"""Insert 10 000 synthetic ban rows into the fail2ban SQLite database.
|
||||
|
||||
Uses the synchronous ``sqlite3`` module because fail2ban itself uses
|
||||
synchronous writes and the schema is straightforward.
|
||||
|
||||
Args:
|
||||
f2b_db_path: Filesystem path to the fail2ban SQLite database.
|
||||
|
||||
Returns:
|
||||
List of all IP addresses inserted.
|
||||
"""
|
||||
now = int(time.time())
|
||||
ips: list[str] = [_random_ip() for _ in range(_BAN_COUNT)]
|
||||
rows = [
|
||||
(
|
||||
random.choice(_JAIL_POOL),
|
||||
ip,
|
||||
now - random.randint(0, _YEAR_SECONDS),
|
||||
3600,
|
||||
random.randint(1, 10),
|
||||
None,
|
||||
)
|
||||
for ip in ips
|
||||
]
|
||||
|
||||
with sqlite3.connect(f2b_db_path) as con:
|
||||
# Ensure the bans table exists (for dev environments where fail2ban
|
||||
# may not have created it yet).
|
||||
con.execute(
|
||||
"CREATE TABLE IF NOT EXISTS bans ("
|
||||
"jail TEXT NOT NULL, "
|
||||
"ip TEXT, "
|
||||
"timeofban INTEGER NOT NULL, "
|
||||
"bantime INTEGER NOT NULL DEFAULT 3600, "
|
||||
"bancount INTEGER NOT NULL DEFAULT 1, "
|
||||
"data JSON"
|
||||
")"
|
||||
)
|
||||
con.executemany(
|
||||
"INSERT INTO bans (jail, ip, timeofban, bantime, bancount, data) "
|
||||
"VALUES (?, ?, ?, ?, ?, ?)",
|
||||
rows,
|
||||
)
|
||||
con.commit()
|
||||
|
||||
log.info("Inserted %d ban rows into %s", _BAN_COUNT, f2b_db_path)
|
||||
return ips
|
||||
|
||||
|
||||
def _seed_geo_cache(app_db_path: str, ips: list[str]) -> None:
|
||||
"""Pre-populate the BanGUI geo_cache table for all inserted IPs.
|
||||
|
||||
Assigns synthetic country data cycling through :data:`_COUNTRY_POOL` so
|
||||
the world map shows a realistic distribution of countries without making
|
||||
any real HTTP requests.
|
||||
|
||||
Args:
|
||||
app_db_path: Filesystem path to the BanGUI application database.
|
||||
ips: List of IP addresses to pre-cache.
|
||||
"""
|
||||
country_cycle = _COUNTRY_POOL * (len(ips) // len(_COUNTRY_POOL) + 1)
|
||||
rows = [
|
||||
(ip, cc, cn, f"AS{1000 + i % 500}", f"Synthetic ISP {i % 50}")
|
||||
for i, (ip, (cc, cn)) in enumerate(zip(ips, country_cycle, strict=False))
|
||||
]
|
||||
|
||||
with sqlite3.connect(app_db_path) as con:
|
||||
con.execute(
|
||||
"CREATE TABLE IF NOT EXISTS geo_cache ("
|
||||
"ip TEXT PRIMARY KEY, "
|
||||
"country_code TEXT, "
|
||||
"country_name TEXT, "
|
||||
"asn TEXT, "
|
||||
"org TEXT, "
|
||||
"cached_at TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ', 'now'))"
|
||||
")"
|
||||
)
|
||||
con.executemany(
|
||||
"""
|
||||
INSERT INTO geo_cache (ip, country_code, country_name, asn, org)
|
||||
VALUES (?, ?, ?, ?, ?)
|
||||
ON CONFLICT(ip) DO UPDATE SET
|
||||
country_code = excluded.country_code,
|
||||
country_name = excluded.country_name,
|
||||
asn = excluded.asn,
|
||||
org = excluded.org
|
||||
""",
|
||||
rows,
|
||||
)
|
||||
con.commit()
|
||||
|
||||
log.info("Pre-cached geo data for %d IPs in %s", len(ips), app_db_path)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Entry point
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def main() -> None:
|
||||
"""Parse CLI arguments and run the seed operation."""
|
||||
logging.basicConfig(level=logging.INFO, format="%(levelname)s %(message)s")
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Seed 10 000 synthetic bans for performance testing."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--f2b-db",
|
||||
default=_DEFAULT_F2B_DB,
|
||||
help=f"Path to the fail2ban SQLite database (default: {_DEFAULT_F2B_DB})",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--app-db",
|
||||
default=_DEFAULT_APP_DB,
|
||||
help=f"Path to the BanGUI application database (default: {_DEFAULT_APP_DB})",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
f2b_path = Path(args.f2b_db)
|
||||
app_path = Path(args.app_db)
|
||||
|
||||
if not f2b_path.parent.exists():
|
||||
log.error("fail2ban DB directory does not exist: %s", f2b_path.parent)
|
||||
sys.exit(1)
|
||||
|
||||
if not app_path.parent.exists():
|
||||
log.error("App DB directory does not exist: %s", app_path.parent)
|
||||
sys.exit(1)
|
||||
|
||||
log.info("Seeding %d bans into: %s", _BAN_COUNT, f2b_path)
|
||||
ips = _seed_bans(str(f2b_path))
|
||||
|
||||
log.info("Pre-caching geo data into: %s", app_path)
|
||||
_seed_geo_cache(str(app_path), ips)
|
||||
|
||||
log.info("Done. Restart the BanGUI backend to load the new geo cache entries.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
1
backend/tests/test_repositories/__init__.py
Normal file
1
backend/tests/test_repositories/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Repository test package."""
|
||||
210
backend/tests/test_repositories/test_blocklist.py
Normal file
210
backend/tests/test_repositories/test_blocklist.py
Normal file
@@ -0,0 +1,210 @@
|
||||
"""Tests for blocklist_repo and import_log_repo."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
import aiosqlite
|
||||
import pytest
|
||||
|
||||
from app.db import init_db
|
||||
from app.repositories import blocklist_repo, import_log_repo
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
async def db(tmp_path: Path) -> aiosqlite.Connection: # type: ignore[misc]
|
||||
"""Provide an initialised aiosqlite connection for repository tests."""
|
||||
conn: aiosqlite.Connection = await aiosqlite.connect(str(tmp_path / "bl_test.db"))
|
||||
conn.row_factory = aiosqlite.Row
|
||||
await init_db(conn)
|
||||
yield conn
|
||||
await conn.close()
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# blocklist_repo tests
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestBlocklistRepo:
|
||||
async def test_create_source_returns_int_id(self, db: aiosqlite.Connection) -> None:
|
||||
"""create_source returns a positive integer id."""
|
||||
source_id = await blocklist_repo.create_source(db, "Test", "https://example.com/list.txt")
|
||||
assert isinstance(source_id, int)
|
||||
assert source_id > 0
|
||||
|
||||
async def test_get_source_returns_row(self, db: aiosqlite.Connection) -> None:
|
||||
"""get_source returns the correct row after creation."""
|
||||
source_id = await blocklist_repo.create_source(db, "Alpha", "https://alpha.test/ips.txt")
|
||||
row = await blocklist_repo.get_source(db, source_id)
|
||||
assert row is not None
|
||||
assert row["name"] == "Alpha"
|
||||
assert row["url"] == "https://alpha.test/ips.txt"
|
||||
assert row["enabled"] is True
|
||||
|
||||
async def test_get_source_missing_returns_none(self, db: aiosqlite.Connection) -> None:
|
||||
"""get_source returns None for a non-existent id."""
|
||||
result = await blocklist_repo.get_source(db, 9999)
|
||||
assert result is None
|
||||
|
||||
async def test_list_sources_empty(self, db: aiosqlite.Connection) -> None:
|
||||
"""list_sources returns empty list when no sources exist."""
|
||||
rows = await blocklist_repo.list_sources(db)
|
||||
assert rows == []
|
||||
|
||||
async def test_list_sources_returns_all(self, db: aiosqlite.Connection) -> None:
|
||||
"""list_sources returns all created sources."""
|
||||
await blocklist_repo.create_source(db, "A", "https://a.test/")
|
||||
await blocklist_repo.create_source(db, "B", "https://b.test/")
|
||||
rows = await blocklist_repo.list_sources(db)
|
||||
assert len(rows) == 2
|
||||
|
||||
async def test_list_enabled_sources_filters(self, db: aiosqlite.Connection) -> None:
|
||||
"""list_enabled_sources only returns rows with enabled=True."""
|
||||
await blocklist_repo.create_source(db, "Enabled", "https://on.test/", enabled=True)
|
||||
id2 = await blocklist_repo.create_source(db, "Disabled", "https://off.test/", enabled=False)
|
||||
await blocklist_repo.update_source(db, id2, enabled=False)
|
||||
rows = await blocklist_repo.list_enabled_sources(db)
|
||||
assert len(rows) == 1
|
||||
assert rows[0]["name"] == "Enabled"
|
||||
|
||||
async def test_update_source_name(self, db: aiosqlite.Connection) -> None:
|
||||
"""update_source changes the name field."""
|
||||
source_id = await blocklist_repo.create_source(db, "Old", "https://old.test/")
|
||||
updated = await blocklist_repo.update_source(db, source_id, name="New")
|
||||
assert updated is True
|
||||
row = await blocklist_repo.get_source(db, source_id)
|
||||
assert row is not None
|
||||
assert row["name"] == "New"
|
||||
|
||||
async def test_update_source_enabled_false(self, db: aiosqlite.Connection) -> None:
|
||||
"""update_source can disable a source."""
|
||||
source_id = await blocklist_repo.create_source(db, "On", "https://on.test/")
|
||||
await blocklist_repo.update_source(db, source_id, enabled=False)
|
||||
row = await blocklist_repo.get_source(db, source_id)
|
||||
assert row is not None
|
||||
assert row["enabled"] is False
|
||||
|
||||
async def test_update_source_missing_returns_false(self, db: aiosqlite.Connection) -> None:
|
||||
"""update_source returns False for a non-existent id."""
|
||||
result = await blocklist_repo.update_source(db, 9999, name="Ghost")
|
||||
assert result is False
|
||||
|
||||
async def test_delete_source_removes_row(self, db: aiosqlite.Connection) -> None:
|
||||
"""delete_source removes the row and returns True."""
|
||||
source_id = await blocklist_repo.create_source(db, "Del", "https://del.test/")
|
||||
deleted = await blocklist_repo.delete_source(db, source_id)
|
||||
assert deleted is True
|
||||
assert await blocklist_repo.get_source(db, source_id) is None
|
||||
|
||||
async def test_delete_source_missing_returns_false(self, db: aiosqlite.Connection) -> None:
|
||||
"""delete_source returns False for a non-existent id."""
|
||||
result = await blocklist_repo.delete_source(db, 9999)
|
||||
assert result is False
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# import_log_repo tests
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestImportLogRepo:
|
||||
async def test_add_log_returns_id(self, db: aiosqlite.Connection) -> None:
|
||||
"""add_log returns a positive integer id."""
|
||||
log_id = await import_log_repo.add_log(
|
||||
db,
|
||||
source_id=None,
|
||||
source_url="https://example.com/list.txt",
|
||||
ips_imported=10,
|
||||
ips_skipped=2,
|
||||
errors=None,
|
||||
)
|
||||
assert isinstance(log_id, int)
|
||||
assert log_id > 0
|
||||
|
||||
async def test_list_logs_returns_all(self, db: aiosqlite.Connection) -> None:
|
||||
"""list_logs returns all logs when no source_id filter is applied."""
|
||||
for i in range(3):
|
||||
await import_log_repo.add_log(
|
||||
db,
|
||||
source_id=None,
|
||||
source_url=f"https://s{i}.test/",
|
||||
ips_imported=i * 5,
|
||||
ips_skipped=0,
|
||||
errors=None,
|
||||
)
|
||||
items, total = await import_log_repo.list_logs(db)
|
||||
assert total == 3
|
||||
assert len(items) == 3
|
||||
|
||||
async def test_list_logs_pagination(self, db: aiosqlite.Connection) -> None:
|
||||
"""list_logs respects page and page_size."""
|
||||
for i in range(5):
|
||||
await import_log_repo.add_log(
|
||||
db,
|
||||
source_id=None,
|
||||
source_url=f"https://p{i}.test/",
|
||||
ips_imported=1,
|
||||
ips_skipped=0,
|
||||
errors=None,
|
||||
)
|
||||
items, total = await import_log_repo.list_logs(db, page=2, page_size=2)
|
||||
assert total == 5
|
||||
assert len(items) == 2
|
||||
|
||||
async def test_list_logs_source_filter(self, db: aiosqlite.Connection) -> None:
|
||||
"""list_logs filters by source_id."""
|
||||
source_id = await blocklist_repo.create_source(db, "Src", "https://s.test/")
|
||||
await import_log_repo.add_log(
|
||||
db,
|
||||
source_id=source_id,
|
||||
source_url="https://s.test/",
|
||||
ips_imported=5,
|
||||
ips_skipped=0,
|
||||
errors=None,
|
||||
)
|
||||
await import_log_repo.add_log(
|
||||
db,
|
||||
source_id=None,
|
||||
source_url="https://other.test/",
|
||||
ips_imported=3,
|
||||
ips_skipped=0,
|
||||
errors=None,
|
||||
)
|
||||
items, total = await import_log_repo.list_logs(db, source_id=source_id)
|
||||
assert total == 1
|
||||
assert items[0]["source_url"] == "https://s.test/"
|
||||
|
||||
async def test_get_last_log_empty(self, db: aiosqlite.Connection) -> None:
|
||||
"""get_last_log returns None when no logs exist."""
|
||||
result = await import_log_repo.get_last_log(db)
|
||||
assert result is None
|
||||
|
||||
async def test_get_last_log_returns_most_recent(self, db: aiosqlite.Connection) -> None:
|
||||
"""get_last_log returns the most recently inserted entry."""
|
||||
await import_log_repo.add_log(
|
||||
db,
|
||||
source_id=None,
|
||||
source_url="https://first.test/",
|
||||
ips_imported=1,
|
||||
ips_skipped=0,
|
||||
errors=None,
|
||||
)
|
||||
await import_log_repo.add_log(
|
||||
db,
|
||||
source_id=None,
|
||||
source_url="https://last.test/",
|
||||
ips_imported=2,
|
||||
ips_skipped=0,
|
||||
errors=None,
|
||||
)
|
||||
last = await import_log_repo.get_last_log(db)
|
||||
assert last is not None
|
||||
assert last["source_url"] == "https://last.test/"
|
||||
|
||||
async def test_compute_total_pages(self) -> None:
|
||||
"""compute_total_pages returns correct page count."""
|
||||
assert import_log_repo.compute_total_pages(0, 10) == 1
|
||||
assert import_log_repo.compute_total_pages(10, 10) == 1
|
||||
assert import_log_repo.compute_total_pages(11, 10) == 2
|
||||
assert import_log_repo.compute_total_pages(20, 5) == 4
|
||||
69
backend/tests/test_repositories/test_db_init.py
Normal file
69
backend/tests/test_repositories/test_db_init.py
Normal file
@@ -0,0 +1,69 @@
|
||||
"""Tests for app.db — database schema initialisation."""
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
import aiosqlite
|
||||
import pytest
|
||||
|
||||
from app.db import init_db
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_init_db_creates_settings_table(tmp_path: Path) -> None:
|
||||
"""``init_db`` must create the ``settings`` table."""
|
||||
db_path = str(tmp_path / "test.db")
|
||||
async with aiosqlite.connect(db_path) as db:
|
||||
await init_db(db)
|
||||
async with db.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='settings';"
|
||||
) as cursor:
|
||||
row = await cursor.fetchone()
|
||||
assert row is not None
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_init_db_creates_sessions_table(tmp_path: Path) -> None:
|
||||
"""``init_db`` must create the ``sessions`` table."""
|
||||
db_path = str(tmp_path / "test.db")
|
||||
async with aiosqlite.connect(db_path) as db:
|
||||
await init_db(db)
|
||||
async with db.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='sessions';"
|
||||
) as cursor:
|
||||
row = await cursor.fetchone()
|
||||
assert row is not None
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_init_db_creates_blocklist_sources_table(tmp_path: Path) -> None:
|
||||
"""``init_db`` must create the ``blocklist_sources`` table."""
|
||||
db_path = str(tmp_path / "test.db")
|
||||
async with aiosqlite.connect(db_path) as db:
|
||||
await init_db(db)
|
||||
async with db.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='blocklist_sources';"
|
||||
) as cursor:
|
||||
row = await cursor.fetchone()
|
||||
assert row is not None
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_init_db_creates_import_log_table(tmp_path: Path) -> None:
|
||||
"""``init_db`` must create the ``import_log`` table."""
|
||||
db_path = str(tmp_path / "test.db")
|
||||
async with aiosqlite.connect(db_path) as db:
|
||||
await init_db(db)
|
||||
async with db.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='import_log';"
|
||||
) as cursor:
|
||||
row = await cursor.fetchone()
|
||||
assert row is not None
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_init_db_is_idempotent(tmp_path: Path) -> None:
|
||||
"""Calling ``init_db`` twice on the same database must not raise."""
|
||||
db_path = str(tmp_path / "test.db")
|
||||
async with aiosqlite.connect(db_path) as db:
|
||||
await init_db(db)
|
||||
await init_db(db) # Second call must be a no-op.
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user