remove part 3

This commit is contained in:
Lukas 2025-10-30 20:20:52 +01:00
parent fd76be02fd
commit 55781a8448
15 changed files with 173 additions and 3379 deletions

View File

@ -17,7 +17,7 @@
"keep_days": 30
},
"other": {
"master_password_hash": "$pbkdf2-sha256$29000$h/BeKwXA2BvDOAdgjFHqvQ$2Yg8AHGbPwrNDbxJbES3N3GiCI/hE2TthGAQ61AFir0",
"master_password_hash": "$pbkdf2-sha256$29000$gDAmhLCWsrY25vz/f28NIQ$Oi7qK6MnW/N67bDCiut6wiU/5KVFkdnxkg34XjQEDmQ",
"anime_directory": "/home/lukas/Volume/serien/"
},
"version": "1.0.0"

View File

@ -0,0 +1,24 @@
{
"name": "Aniworld",
"data_dir": "data",
"scheduler": {
"enabled": true,
"interval_minutes": 60
},
"logging": {
"level": "INFO",
"file": null,
"max_bytes": null,
"backup_count": 3
},
"backup": {
"enabled": false,
"path": "data/backups",
"keep_days": 30
},
"other": {
"master_password_hash": "$pbkdf2-sha256$29000$ivEe41yLkZIyBqA0Rmht7Q$71tDA8ymSS.OrdyZ0KYGQ5GFxqCaAR4UxbNVMH8nJNs",
"anime_directory": "/home/lukas/Volume/serien/"
},
"version": "1.0.0"
}

View File

@ -0,0 +1,24 @@
{
"name": "Aniworld",
"data_dir": "data",
"scheduler": {
"enabled": true,
"interval_minutes": 60
},
"logging": {
"level": "INFO",
"file": null,
"max_bytes": null,
"backup_count": 3
},
"backup": {
"enabled": false,
"path": "data/backups",
"keep_days": 30
},
"other": {
"master_password_hash": "$pbkdf2-sha256$29000$3zuHECKEcK4VwjjnHMN4rw$IQMdj42ajSj4Gp16BPTKt.Hl6byaTQOZSnSfPaej6xY",
"anime_directory": "/home/lukas/Volume/serien/"
},
"version": "1.0.0"
}

View File

@ -1,7 +1,7 @@
{
"pending": [
{
"id": "e2f16d76-6a15-4958-a523-dfc58d25cf40",
"id": "5e5dc91b-310a-46c5-8b50-3c08bc637356",
"serie_id": "workflow-series",
"serie_name": "Workflow Test Series",
"episode": {
@ -11,7 +11,7 @@
},
"status": "pending",
"priority": "high",
"added_at": "2025-10-30T19:09:58.786271Z",
"added_at": "2025-10-30T19:17:17.067624Z",
"started_at": null,
"completed_at": null,
"progress": null,
@ -20,7 +20,7 @@
"source_url": null
},
{
"id": "a2391f20-1430-4533-8928-2d501c0ffb3a",
"id": "b7827d50-c935-47d7-bec3-0348200c4816",
"serie_id": "series-2",
"serie_name": "Series 2",
"episode": {
@ -30,7 +30,7 @@
},
"status": "pending",
"priority": "normal",
"added_at": "2025-10-30T19:09:58.375433Z",
"added_at": "2025-10-30T19:17:16.647481Z",
"started_at": null,
"completed_at": null,
"progress": null,
@ -39,7 +39,7 @@
"source_url": null
},
{
"id": "2335d591-6b56-4b6e-b6fc-84f3b8f86907",
"id": "600fb372-2043-4570-aa6b-d5c01f8767a2",
"serie_id": "series-1",
"serie_name": "Series 1",
"episode": {
@ -49,7 +49,7 @@
},
"status": "pending",
"priority": "normal",
"added_at": "2025-10-30T19:09:58.372955Z",
"added_at": "2025-10-30T19:17:16.644472Z",
"started_at": null,
"completed_at": null,
"progress": null,
@ -58,7 +58,7 @@
"source_url": null
},
{
"id": "65e99af4-9ebc-4048-b5d7-30206e3116a4",
"id": "103b493a-417a-4f49-80d5-32b9dd37bcfc",
"serie_id": "series-0",
"serie_name": "Series 0",
"episode": {
@ -68,7 +68,7 @@
},
"status": "pending",
"priority": "normal",
"added_at": "2025-10-30T19:09:58.370802Z",
"added_at": "2025-10-30T19:17:16.639634Z",
"started_at": null,
"completed_at": null,
"progress": null,
@ -77,7 +77,7 @@
"source_url": null
},
{
"id": "b4d3cf14-316d-4c80-ba88-6f3379983fd6",
"id": "9b83f463-bfdf-4aa1-8f39-a35a84bceeb8",
"serie_id": "series-high",
"serie_name": "Series High",
"episode": {
@ -87,7 +87,7 @@
},
"status": "pending",
"priority": "high",
"added_at": "2025-10-30T19:09:58.067288Z",
"added_at": "2025-10-30T19:17:16.317304Z",
"started_at": null,
"completed_at": null,
"progress": null,
@ -96,7 +96,7 @@
"source_url": null
},
{
"id": "2d236bcd-816c-4d64-ba69-b46fd8913134",
"id": "662747e0-861c-4ac3-b866-9a770c3f1539",
"serie_id": "test-series-2",
"serie_name": "Another Series",
"episode": {
@ -106,7 +106,7 @@
},
"status": "pending",
"priority": "high",
"added_at": "2025-10-30T19:09:58.029347Z",
"added_at": "2025-10-30T19:17:16.281208Z",
"started_at": null,
"completed_at": null,
"progress": null,
@ -115,7 +115,7 @@
"source_url": null
},
{
"id": "8547af6d-5f7f-46da-83c8-f4ba06d4f59c",
"id": "0c08edf9-2ea9-4bc9-b24d-94a05523020d",
"serie_id": "test-series-1",
"serie_name": "Test Anime Series",
"episode": {
@ -125,7 +125,7 @@
},
"status": "pending",
"priority": "normal",
"added_at": "2025-10-30T19:09:57.994538Z",
"added_at": "2025-10-30T19:17:16.246180Z",
"started_at": null,
"completed_at": null,
"progress": null,
@ -134,7 +134,7 @@
"source_url": null
},
{
"id": "42e8ac82-def0-49be-91cb-eb248a9018ab",
"id": "dae85200-e270-44ae-baf9-51ab20b463a5",
"serie_id": "test-series-1",
"serie_name": "Test Anime Series",
"episode": {
@ -144,7 +144,7 @@
},
"status": "pending",
"priority": "normal",
"added_at": "2025-10-30T19:09:57.994647Z",
"added_at": "2025-10-30T19:17:16.246296Z",
"started_at": null,
"completed_at": null,
"progress": null,
@ -153,7 +153,7 @@
"source_url": null
},
{
"id": "0091bf48-5377-4a9a-9a55-a0abd60a3258",
"id": "ee132f7f-9eda-4e0c-b822-400a7901f4ee",
"serie_id": "series-normal",
"serie_name": "Series Normal",
"episode": {
@ -163,7 +163,7 @@
},
"status": "pending",
"priority": "normal",
"added_at": "2025-10-30T19:09:58.069543Z",
"added_at": "2025-10-30T19:17:16.319396Z",
"started_at": null,
"completed_at": null,
"progress": null,
@ -172,7 +172,7 @@
"source_url": null
},
{
"id": "c6f5cf39-94c8-4ba6-b397-a62c8d183329",
"id": "b0b8f295-dbf1-4180-82c4-f11261b7246b",
"serie_id": "series-low",
"serie_name": "Series Low",
"episode": {
@ -182,7 +182,7 @@
},
"status": "pending",
"priority": "low",
"added_at": "2025-10-30T19:09:58.071479Z",
"added_at": "2025-10-30T19:17:16.321350Z",
"started_at": null,
"completed_at": null,
"progress": null,
@ -191,7 +191,7 @@
"source_url": null
},
{
"id": "c939d7ec-1918-4b1e-9ad4-d8bf9256ccf6",
"id": "19c0f081-7b82-42bd-8b49-e09f99b3df43",
"serie_id": "test-series",
"serie_name": "Test Series",
"episode": {
@ -201,7 +201,7 @@
},
"status": "pending",
"priority": "normal",
"added_at": "2025-10-30T19:09:58.306482Z",
"added_at": "2025-10-30T19:17:16.572221Z",
"started_at": null,
"completed_at": null,
"progress": null,
@ -210,7 +210,7 @@
"source_url": null
},
{
"id": "b471abef-dc4e-4f0d-a26a-3342ed54f4de",
"id": "be242bf2-de52-46f1-b406-898a5bc55af9",
"serie_id": "test-series",
"serie_name": "Test Series",
"episode": {
@ -220,7 +220,7 @@
},
"status": "pending",
"priority": "normal",
"added_at": "2025-10-30T19:09:58.410862Z",
"added_at": "2025-10-30T19:17:16.682838Z",
"started_at": null,
"completed_at": null,
"progress": null,
@ -229,7 +229,7 @@
"source_url": null
},
{
"id": "b52c97d5-6897-4762-a72a-4ed19701b187",
"id": "d6f4038d-0962-4ecf-999a-2db499e7cc2e",
"serie_id": "invalid-series",
"serie_name": "Invalid Series",
"episode": {
@ -239,7 +239,7 @@
},
"status": "pending",
"priority": "normal",
"added_at": "2025-10-30T19:09:58.481423Z",
"added_at": "2025-10-30T19:17:16.748563Z",
"started_at": null,
"completed_at": null,
"progress": null,
@ -248,7 +248,7 @@
"source_url": null
},
{
"id": "7a6fda0d-0402-47fe-a833-597fc625463c",
"id": "5d65f9c8-a530-446e-babd-d689a26d21af",
"serie_id": "test-series",
"serie_name": "Test Series",
"episode": {
@ -258,7 +258,7 @@
},
"status": "pending",
"priority": "normal",
"added_at": "2025-10-30T19:09:58.515500Z",
"added_at": "2025-10-30T19:17:16.780706Z",
"started_at": null,
"completed_at": null,
"progress": null,
@ -267,7 +267,7 @@
"source_url": null
},
{
"id": "9edd6b21-8b50-417c-a971-38ed5dfd2fec",
"id": "2271d5a2-f9fd-410c-acbd-2c0b03208397",
"serie_id": "series-3",
"serie_name": "Series 3",
"episode": {
@ -277,7 +277,7 @@
},
"status": "pending",
"priority": "normal",
"added_at": "2025-10-30T19:09:58.601566Z",
"added_at": "2025-10-30T19:17:16.866313Z",
"started_at": null,
"completed_at": null,
"progress": null,
@ -286,45 +286,7 @@
"source_url": null
},
{
"id": "5a076539-ff06-4ec6-9ff4-4f83288bb0f7",
"serie_id": "series-4",
"serie_name": "Series 4",
"episode": {
"season": 1,
"episode": 1,
"title": null
},
"status": "pending",
"priority": "normal",
"added_at": "2025-10-30T19:09:58.602644Z",
"started_at": null,
"completed_at": null,
"progress": null,
"error": null,
"retry_count": 0,
"source_url": null
},
{
"id": "2d4da5dc-9a8a-4b7a-94ba-65d6992649d1",
"serie_id": "series-1",
"serie_name": "Series 1",
"episode": {
"season": 1,
"episode": 1,
"title": null
},
"status": "pending",
"priority": "normal",
"added_at": "2025-10-30T19:09:58.603367Z",
"started_at": null,
"completed_at": null,
"progress": null,
"error": null,
"retry_count": 0,
"source_url": null
},
{
"id": "f1cc1316-271f-497e-a89c-b40f4ae55af3",
"id": "ddd4f9af-1741-46d7-89b9-db3f3533fdc0",
"serie_id": "series-0",
"serie_name": "Series 0",
"episode": {
@ -334,7 +296,7 @@
},
"status": "pending",
"priority": "normal",
"added_at": "2025-10-30T19:09:58.604139Z",
"added_at": "2025-10-30T19:17:16.867199Z",
"started_at": null,
"completed_at": null,
"progress": null,
@ -343,7 +305,7 @@
"source_url": null
},
{
"id": "5dcf3947-3b48-459a-a5b3-7a7b80bde64d",
"id": "9b800fb7-2cc3-4662-88e5-917da8aa164e",
"serie_id": "series-2",
"serie_name": "Series 2",
"episode": {
@ -353,7 +315,7 @@
},
"status": "pending",
"priority": "normal",
"added_at": "2025-10-30T19:09:58.605026Z",
"added_at": "2025-10-30T19:17:16.868013Z",
"started_at": null,
"completed_at": null,
"progress": null,
@ -362,7 +324,45 @@
"source_url": null
},
{
"id": "981cee6f-9404-4973-975a-8f8988c9bd15",
"id": "8defdb03-66d8-44df-8d5d-4e29e9ff5975",
"serie_id": "series-1",
"serie_name": "Series 1",
"episode": {
"season": 1,
"episode": 1,
"title": null
},
"status": "pending",
"priority": "normal",
"added_at": "2025-10-30T19:17:16.868670Z",
"started_at": null,
"completed_at": null,
"progress": null,
"error": null,
"retry_count": 0,
"source_url": null
},
{
"id": "1bbec06b-e99f-4ab8-9836-e061cd46149b",
"serie_id": "series-4",
"serie_name": "Series 4",
"episode": {
"season": 1,
"episode": 1,
"title": null
},
"status": "pending",
"priority": "normal",
"added_at": "2025-10-30T19:17:16.869328Z",
"started_at": null,
"completed_at": null,
"progress": null,
"error": null,
"retry_count": 0,
"source_url": null
},
{
"id": "872bd8c8-b3fd-4432-9a6b-a2612d8961c9",
"serie_id": "persistent-series",
"serie_name": "Persistent Series",
"episode": {
@ -372,7 +372,7 @@
},
"status": "pending",
"priority": "normal",
"added_at": "2025-10-30T19:09:58.686065Z",
"added_at": "2025-10-30T19:17:16.955411Z",
"started_at": null,
"completed_at": null,
"progress": null,
@ -381,7 +381,7 @@
"source_url": null
},
{
"id": "5a33815e-e68f-4708-a0de-d8a23243ef0b",
"id": "ceee105d-c1e6-4336-be13-886849d642f7",
"serie_id": "ws-series",
"serie_name": "WebSocket Series",
"episode": {
@ -391,7 +391,7 @@
},
"status": "pending",
"priority": "normal",
"added_at": "2025-10-30T19:09:58.752568Z",
"added_at": "2025-10-30T19:17:17.032768Z",
"started_at": null,
"completed_at": null,
"progress": null,
@ -400,7 +400,7 @@
"source_url": null
},
{
"id": "58ecc4b3-483a-4c85-ab68-47bf663edad0",
"id": "722f65b0-a323-488f-9345-60ea27b8d5ae",
"serie_id": "pause-test",
"serie_name": "Pause Test Series",
"episode": {
@ -410,7 +410,7 @@
},
"status": "pending",
"priority": "normal",
"added_at": "2025-10-30T19:09:58.927908Z",
"added_at": "2025-10-30T19:17:17.213711Z",
"started_at": null,
"completed_at": null,
"progress": null,
@ -421,5 +421,5 @@
],
"active": [],
"failed": [],
"timestamp": "2025-10-30T19:09:58.928247+00:00"
"timestamp": "2025-10-30T19:17:17.213969+00:00"
}

View File

@ -1,24 +1,46 @@
# Aniworld Web Application Features
## Authentication & Security
- **Master Password Login**: Secure access to the application with a master password system
- **Master Password Login**: Secure access to the application with a master password system
- **JWT Token Sessions**: Stateless authentication with JSON Web Tokens
- **Rate Limiting**: Built-in protection against brute force attacks
## Configuration Management
- **Setup Page**: Initial configuration interface for server setup and basic settings
- **Config Page**: View and modify application configuration settings
- **Setup Page**: Initial configuration interface for server setup and basic settings
- **Config Page**: View and modify application configuration settings
- **Scheduler Configuration**: Configure automated rescan schedules
- **Backup Management**: Create, restore, and manage configuration backups
## User Interface
- **Dark Mode**: Toggle between light and dark themes for better user experience
- **Dark Mode**: Toggle between light and dark themes for better user experience
- **Responsive Design**: Mobile-friendly interface with touch support
- **Real-time Updates**: WebSocket-based live notifications and progress tracking
## Anime Management
- **Anime Library Page**: Display list of anime series with missing episodes
- **Series Selection**: Select individual anime series and add episodes to download queue
- **Anime Search Page**: Search functionality to find and add new anime series to the library
- **Anime Library Page**: Display list of anime series with missing episodes
- **Series Selection**: Select individual anime series and add episodes to download queue
- **Anime Search**: Search for anime series using integrated providers
- **Library Scanning**: Automated scanning for missing episodes
## Download Management
- **Download Queue Page**: View and manage the current download queue
- **Download Status Display**: Real-time status updates and progress of current downloads
- **Queue Operations**: Add, remove, and prioritize items in the download queue
- **Download Queue Page**: View and manage the current download queue
- **Download Status Display**: Real-time status updates and progress of current downloads
- **Queue Operations**: Add, remove, prioritize, and reorder items in the download queue
- **Queue Control**: Start, stop, pause, and resume download processing
- **Retry Failed Downloads**: Automatically retry failed downloads with configurable limits
- **Clear Completed**: Remove completed downloads from the queue
## Real-time Communication
- **WebSocket Support**: Real-time notifications for download progress and queue updates
- **Progress Tracking**: Live progress updates for downloads and scans
- **System Notifications**: Real-time system messages and alerts
## Core Functionality Overview
The web application provides a complete interface for managing anime downloads with user-friendly pages for configuration, library management, search capabilities, and download monitoring.
The web application provides a complete interface for managing anime downloads with user-friendly pages for configuration, library management, search capabilities, and download monitoring. All operations are tracked in real-time with comprehensive progress reporting and error handling.

View File

@ -41,9 +41,8 @@ conda activate AniWorld
│ │ │ ├── config.py # Configuration endpoints
│ │ │ ├── anime.py # Anime management endpoints
│ │ │ ├── download.py # Download queue endpoints
│ │ │ ├── providers.py # Provider health & config endpoints
│ │ │ ├── websocket.py # WebSocket real-time endpoints
│ │ │ └── search.py # Search endpoints
│ │ │ ├── scheduler.py # Scheduler configuration endpoints
│ │ │ └── websocket.py # WebSocket real-time endpoints
│ │ ├── models/ # Pydantic models
│ │ │ ├── __init__.py
│ │ │ ├── auth.py
@ -57,7 +56,10 @@ conda activate AniWorld
│ │ │ ├── config_service.py
│ │ │ ├── anime_service.py
│ │ │ ├── download_service.py
│ │ │ └── websocket_service.py # WebSocket connection management
│ │ │ ├── websocket_service.py # WebSocket connection management
│ │ │ ├── progress_service.py # Progress tracking
│ │ │ ├── notification_service.py # Notification system
│ │ │ └── cache_service.py # Caching layer
│ │ ├── database/ # Database layer
│ │ │ ├── __init__.py # Database package
│ │ │ ├── base.py # Base models and mixins
@ -214,21 +216,6 @@ conda activate AniWorld
- `POST /api/scheduler/config` - Update scheduler configuration
- `POST /api/scheduler/trigger-rescan` - Manually trigger rescan
### Logging
- `GET /api/logging/config` - Get logging configuration
- `POST /api/logging/config` - Update logging configuration
- `GET /api/logging/files` - List all log files
- `GET /api/logging/files/{filename}/download` - Download log file
- `GET /api/logging/files/{filename}/tail` - Get last N lines of log file
- `POST /api/logging/test` - Test logging by writing messages at all levels
- `POST /api/logging/cleanup` - Clean up old log files
### Diagnostics
- `GET /api/diagnostics/network` - Run network connectivity diagnostics
- `GET /api/diagnostics/system` - Get basic system information
### Anime Management
- `GET /api/anime` - List anime with missing episodes
@ -255,75 +242,12 @@ initialization.
- `DELETE /api/queue/completed` - Clear completed downloads
- `POST /api/queue/retry` - Retry failed downloads
### Provider Management (October 2025)
### WebSocket
The provider system has been enhanced with comprehensive health monitoring,
automatic failover, performance tracking, and dynamic configuration.
**Provider Health Monitoring:**
- `GET /api/providers/health` - Get overall provider health summary
- `GET /api/providers/health/{provider_name}` - Get specific provider health
- `GET /api/providers/available` - List currently available providers
- `GET /api/providers/best` - Get best performing provider
- `POST /api/providers/health/{provider_name}/reset` - Reset provider metrics
**Provider Configuration:**
- `GET /api/providers/config` - Get all provider configurations
- `GET /api/providers/config/{provider_name}` - Get specific provider config
- `PUT /api/providers/config/{provider_name}` - Update provider settings
- `POST /api/providers/config/{provider_name}/enable` - Enable provider
- `POST /api/providers/config/{provider_name}/disable` - Disable provider
**Failover Management:**
- `GET /api/providers/failover` - Get failover statistics
- `POST /api/providers/failover/{provider_name}/add` - Add to failover chain
- `DELETE /api/providers/failover/{provider_name}` - Remove from failover
**Provider Enhancement Features:**
- **Health Monitoring**: Real-time tracking of provider availability, response
times, success rates, and bandwidth usage. Automatic marking of providers as
unavailable after consecutive failures.
- **Automatic Failover**: Seamless switching between providers when primary
fails. Configurable retry attempts and delays.
- **Performance Tracking**: Wrapped provider interface that automatically
records metrics for all operations (search, download, metadata retrieval).
- **Dynamic Configuration**: Runtime updates to provider settings without
application restart. Configurable timeouts, retries, bandwidth limits.
- **Best Provider Selection**: Intelligent selection based on success rate,
response time, and availability.
**Provider Metrics Tracked:**
- Total requests (successful/failed)
- Average response time (milliseconds)
- Success rate (percentage)
- Consecutive failures count
- Total bytes downloaded
- Uptime percentage (last 60 minutes)
- Last error message and timestamp
**Implementation:**
- `src/core/providers/health_monitor.py` - ProviderHealthMonitor class
- `src/core/providers/failover.py` - ProviderFailover system
- `src/core/providers/monitored_provider.py` - Performance tracking wrapper
- `src/core/providers/config_manager.py` - Dynamic configuration manager
- `src/server/api/providers.py` - Provider management API endpoints
**Testing:**
- 34 unit tests covering health monitoring, failover, and configuration
- Tests for provider availability tracking and failover scenarios
- Configuration persistence and validation tests
### Search
- `GET /api/search?q={query}` - Search for anime
- `POST /api/search/add` - Add anime to library
- `WS /api/ws` - WebSocket connection for real-time updates
- Real-time download progress notifications
- Queue status updates
- System notifications
## Logging
@ -345,7 +269,7 @@ automatic failover, performance tracking, and dynamic configuration.
- Master password protection for application access
- Secure session management with JWT tokens
- Input validation and sanitization
- Rate limiting on API endpoints
- Built-in rate limiting in authentication middleware
- HTTPS enforcement in production
- Secure file path handling to prevent directory traversal

View File

@ -1,331 +0,0 @@
"""Rate limiting middleware for API endpoints.
This module provides comprehensive rate limiting with support for:
- Endpoint-specific rate limits
- IP-based limiting
- User-based rate limiting
- Bypass mechanisms for authenticated users
"""
import time
from collections import defaultdict
from typing import Callable, Dict, Optional, Tuple
from fastapi import Request, status
from starlette.middleware.base import BaseHTTPMiddleware
from starlette.responses import JSONResponse
class RateLimitConfig:
"""Configuration for rate limiting rules."""
def __init__(
self,
requests_per_minute: int = 60,
requests_per_hour: int = 1000,
authenticated_multiplier: float = 2.0,
):
"""Initialize rate limit configuration.
Args:
requests_per_minute: Max requests per minute for
unauthenticated users
requests_per_hour: Max requests per hour for
unauthenticated users
authenticated_multiplier: Multiplier for authenticated users
"""
self.requests_per_minute = requests_per_minute
self.requests_per_hour = requests_per_hour
self.authenticated_multiplier = authenticated_multiplier
class RateLimitStore:
"""In-memory store for rate limit tracking."""
def __init__(self):
"""Initialize the rate limit store."""
# Store format: {identifier: [(timestamp, count), ...]}
self._minute_store: Dict[str, list] = defaultdict(list)
self._hour_store: Dict[str, list] = defaultdict(list)
def check_limit(
self,
identifier: str,
max_per_minute: int,
max_per_hour: int,
) -> Tuple[bool, Optional[int]]:
"""Check if the identifier has exceeded rate limits.
Args:
identifier: Unique identifier (IP or user ID)
max_per_minute: Maximum requests allowed per minute
max_per_hour: Maximum requests allowed per hour
Returns:
Tuple of (allowed, retry_after_seconds)
"""
current_time = time.time()
# Clean up old entries
self._cleanup_old_entries(identifier, current_time)
# Check minute limit
minute_count = len(self._minute_store[identifier])
if minute_count >= max_per_minute:
# Calculate retry after time
oldest_entry = self._minute_store[identifier][0]
retry_after = int(60 - (current_time - oldest_entry))
return False, max(retry_after, 1)
# Check hour limit
hour_count = len(self._hour_store[identifier])
if hour_count >= max_per_hour:
# Calculate retry after time
oldest_entry = self._hour_store[identifier][0]
retry_after = int(3600 - (current_time - oldest_entry))
return False, max(retry_after, 1)
return True, None
def record_request(self, identifier: str) -> None:
"""Record a request for the identifier.
Args:
identifier: Unique identifier (IP or user ID)
"""
current_time = time.time()
self._minute_store[identifier].append(current_time)
self._hour_store[identifier].append(current_time)
def get_remaining_requests(
self, identifier: str, max_per_minute: int, max_per_hour: int
) -> Tuple[int, int]:
"""Get remaining requests for the identifier.
Args:
identifier: Unique identifier
max_per_minute: Maximum per minute
max_per_hour: Maximum per hour
Returns:
Tuple of (remaining_per_minute, remaining_per_hour)
"""
minute_used = len(self._minute_store.get(identifier, []))
hour_used = len(self._hour_store.get(identifier, []))
return (
max(0, max_per_minute - minute_used),
max(0, max_per_hour - hour_used)
)
def _cleanup_old_entries(
self, identifier: str, current_time: float
) -> None:
"""Remove entries older than the time windows.
Args:
identifier: Unique identifier
current_time: Current timestamp
"""
# Remove entries older than 1 minute
minute_cutoff = current_time - 60
self._minute_store[identifier] = [
ts for ts in self._minute_store[identifier] if ts > minute_cutoff
]
# Remove entries older than 1 hour
hour_cutoff = current_time - 3600
self._hour_store[identifier] = [
ts for ts in self._hour_store[identifier] if ts > hour_cutoff
]
# Clean up empty entries
if not self._minute_store[identifier]:
del self._minute_store[identifier]
if not self._hour_store[identifier]:
del self._hour_store[identifier]
class RateLimitMiddleware(BaseHTTPMiddleware):
"""Middleware for API rate limiting."""
# Endpoint-specific rate limits (overrides defaults)
ENDPOINT_LIMITS: Dict[str, RateLimitConfig] = {
"/api/auth/login": RateLimitConfig(
requests_per_minute=5,
requests_per_hour=20,
),
"/api/auth/register": RateLimitConfig(
requests_per_minute=3,
requests_per_hour=10,
),
"/api/download": RateLimitConfig(
requests_per_minute=10,
requests_per_hour=100,
authenticated_multiplier=3.0,
),
}
# Paths that bypass rate limiting
BYPASS_PATHS = {
"/health",
"/health/detailed",
"/docs",
"/redoc",
"/openapi.json",
"/static",
"/ws",
}
def __init__(
self,
app,
default_config: Optional[RateLimitConfig] = None,
):
"""Initialize rate limiting middleware.
Args:
app: FastAPI application
default_config: Default rate limit configuration
"""
super().__init__(app)
self.default_config = default_config or RateLimitConfig()
self.store = RateLimitStore()
async def dispatch(self, request: Request, call_next: Callable):
"""Process request and apply rate limiting.
Args:
request: Incoming HTTP request
call_next: Next middleware or endpoint handler
Returns:
HTTP response (either rate limit error or normal response)
"""
# Check if path should bypass rate limiting
if self._should_bypass(request.url.path):
return await call_next(request)
# Get identifier (user ID if authenticated, otherwise IP)
identifier = self._get_identifier(request)
# Get rate limit configuration for this endpoint
config = self._get_endpoint_config(request.url.path)
# Apply authenticated user multiplier if applicable
is_authenticated = self._is_authenticated(request)
max_per_minute = int(
config.requests_per_minute *
(config.authenticated_multiplier if is_authenticated else 1.0)
)
max_per_hour = int(
config.requests_per_hour *
(config.authenticated_multiplier if is_authenticated else 1.0)
)
# Check rate limit
allowed, retry_after = self.store.check_limit(
identifier,
max_per_minute,
max_per_hour,
)
if not allowed:
return JSONResponse(
status_code=status.HTTP_429_TOO_MANY_REQUESTS,
content={"detail": "Rate limit exceeded"},
headers={"Retry-After": str(retry_after)},
)
# Record the request
self.store.record_request(identifier)
# Add rate limit headers to response
response = await call_next(request)
response.headers["X-RateLimit-Limit-Minute"] = str(max_per_minute)
response.headers["X-RateLimit-Limit-Hour"] = str(max_per_hour)
minute_remaining, hour_remaining = self.store.get_remaining_requests(
identifier, max_per_minute, max_per_hour
)
response.headers["X-RateLimit-Remaining-Minute"] = str(
minute_remaining
)
response.headers["X-RateLimit-Remaining-Hour"] = str(
hour_remaining
)
return response
def _should_bypass(self, path: str) -> bool:
"""Check if path should bypass rate limiting.
Args:
path: Request path
Returns:
True if path should bypass rate limiting
"""
for bypass_path in self.BYPASS_PATHS:
if path.startswith(bypass_path):
return True
return False
def _get_identifier(self, request: Request) -> str:
"""Get unique identifier for rate limiting.
Args:
request: HTTP request
Returns:
Unique identifier (user ID or IP address)
"""
# Try to get user ID from request state (set by auth middleware)
user_id = getattr(request.state, "user_id", None)
if user_id:
return f"user:{user_id}"
# Fall back to IP address
# Check for X-Forwarded-For header (proxy/load balancer)
forwarded_for = request.headers.get("X-Forwarded-For")
if forwarded_for:
# Take the first IP in the chain
client_ip = forwarded_for.split(",")[0].strip()
else:
client_ip = request.client.host if request.client else "unknown"
return f"ip:{client_ip}"
def _get_endpoint_config(self, path: str) -> RateLimitConfig:
"""Get rate limit configuration for endpoint.
Args:
path: Request path
Returns:
Rate limit configuration
"""
# Check for exact match
if path in self.ENDPOINT_LIMITS:
return self.ENDPOINT_LIMITS[path]
# Check for prefix match
for endpoint_path, config in self.ENDPOINT_LIMITS.items():
if path.startswith(endpoint_path):
return config
return self.default_config
def _is_authenticated(self, request: Request) -> bool:
"""Check if request is from authenticated user.
Args:
request: HTTP request
Returns:
True if user is authenticated
"""
return (
hasattr(request.state, "user_id") and
request.state.user_id is not None
)

View File

@ -1,423 +0,0 @@
"""Analytics service for downloads, popularity, and performance metrics.
This module provides comprehensive analytics tracking including download
statistics, series popularity analysis, storage usage trends, and
performance reporting.
"""
import json
import logging
from dataclasses import asdict, dataclass, field
from datetime import datetime, timedelta
from pathlib import Path
from typing import Any, Dict, List, Optional
import psutil
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
from src.server.database.models import DownloadQueueItem, DownloadStatus
logger = logging.getLogger(__name__)
ANALYTICS_FILE = Path("data") / "analytics.json"
@dataclass
class DownloadStats:
"""Download statistics snapshot."""
total_downloads: int = 0
successful_downloads: int = 0
failed_downloads: int = 0
total_bytes_downloaded: int = 0
average_speed_mbps: float = 0.0
success_rate: float = 0.0
average_duration_seconds: float = 0.0
@dataclass
class SeriesPopularity:
"""Series popularity metrics."""
series_name: str
download_count: int
total_size_bytes: int
last_download: Optional[str] = None
success_rate: float = 0.0
@dataclass
class StorageAnalysis:
"""Storage usage analysis."""
total_storage_bytes: int = 0
used_storage_bytes: int = 0
free_storage_bytes: int = 0
storage_percent_used: float = 0.0
downloads_directory_size_bytes: int = 0
cache_directory_size_bytes: int = 0
logs_directory_size_bytes: int = 0
@dataclass
class PerformanceReport:
"""Performance metrics and trends."""
period_start: str
period_end: str
downloads_per_hour: float = 0.0
average_queue_size: float = 0.0
peak_memory_usage_mb: float = 0.0
average_cpu_percent: float = 0.0
uptime_seconds: float = 0.0
error_rate: float = 0.0
samples: List[Dict[str, Any]] = field(default_factory=list)
class AnalyticsService:
"""Service for tracking and reporting analytics data."""
def __init__(self):
"""Initialize the analytics service."""
self.analytics_file = ANALYTICS_FILE
self._ensure_analytics_file()
def _ensure_analytics_file(self) -> None:
"""Ensure analytics file exists with default data."""
if not self.analytics_file.exists():
default_data = {
"created_at": datetime.now().isoformat(),
"last_updated": datetime.now().isoformat(),
"download_stats": asdict(DownloadStats()),
"series_popularity": [],
"storage_history": [],
"performance_samples": [],
}
self.analytics_file.write_text(json.dumps(default_data, indent=2))
def _load_analytics(self) -> Dict[str, Any]:
"""Load analytics data from file."""
try:
return json.loads(self.analytics_file.read_text())
except (FileNotFoundError, json.JSONDecodeError):
self._ensure_analytics_file()
return json.loads(self.analytics_file.read_text())
def _save_analytics(self, data: Dict[str, Any]) -> None:
"""Save analytics data to file."""
data["last_updated"] = datetime.now().isoformat()
self.analytics_file.write_text(json.dumps(data, indent=2))
async def get_download_stats(
self, db: AsyncSession, days: int = 30
) -> DownloadStats:
"""Get download statistics for the specified period.
Args:
db: Database session
days: Number of days to analyze
Returns:
DownloadStats with aggregated download data
"""
cutoff_date = datetime.now() - timedelta(days=days)
# Query downloads within period
stmt = select(DownloadQueueItem).where(
DownloadQueueItem.created_at >= cutoff_date
)
result = await db.execute(stmt)
downloads = result.scalars().all()
if not downloads:
return DownloadStats()
successful = [d for d in downloads
if d.status == DownloadStatus.COMPLETED]
failed = [d for d in downloads
if d.status == DownloadStatus.FAILED]
total_bytes = sum(d.total_bytes or 0 for d in successful)
avg_speed_list = [
d.download_speed or 0.0 for d in successful if d.download_speed
]
avg_speed_mbps = (
sum(avg_speed_list) / len(avg_speed_list) / (1024 * 1024)
if avg_speed_list
else 0.0
)
success_rate = (
len(successful) / len(downloads) * 100 if downloads else 0.0
)
return DownloadStats(
total_downloads=len(downloads),
successful_downloads=len(successful),
failed_downloads=len(failed),
total_bytes_downloaded=total_bytes,
average_speed_mbps=avg_speed_mbps,
success_rate=success_rate,
average_duration_seconds=0.0, # Not available in model
)
async def get_series_popularity(
self, db: AsyncSession, limit: int = 10
) -> List[SeriesPopularity]:
"""Get most popular series by download count.
Args:
db: Database session
limit: Maximum number of series to return
Returns:
List of SeriesPopularity objects
"""
# Use raw SQL approach since we need to group and join
from sqlalchemy import text
query = text("""
SELECT
s.title as series_name,
COUNT(d.id) as download_count,
SUM(d.total_bytes) as total_size,
MAX(d.created_at) as last_download,
SUM(CASE WHEN d.status = 'COMPLETED'
THEN 1 ELSE 0 END) as successful
FROM download_queue d
JOIN anime_series s ON d.series_id = s.id
GROUP BY s.id, s.title
ORDER BY download_count DESC
LIMIT :limit
""")
result = await db.execute(query, {"limit": limit})
rows = result.all()
popularity = []
for row in rows:
success_rate = 0.0
download_count = row[1] or 0
if download_count > 0:
successful = row[4] or 0
success_rate = (successful / download_count * 100)
popularity.append(
SeriesPopularity(
series_name=row[0] or "Unknown",
download_count=download_count,
total_size_bytes=row[2] or 0,
last_download=row[3].isoformat()
if row[3]
else None,
success_rate=success_rate,
)
)
return popularity
def get_storage_analysis(self) -> StorageAnalysis:
"""Get current storage usage analysis.
Returns:
StorageAnalysis with storage breakdown
"""
try:
# Get disk usage for data directory
disk = psutil.disk_usage("/")
total = disk.total
used = disk.used
free = disk.free
analysis = StorageAnalysis(
total_storage_bytes=total,
used_storage_bytes=used,
free_storage_bytes=free,
storage_percent_used=disk.percent,
downloads_directory_size_bytes=self._get_dir_size(
Path("data")
),
cache_directory_size_bytes=self._get_dir_size(
Path("data") / "cache"
),
logs_directory_size_bytes=self._get_dir_size(
Path("logs")
),
)
return analysis
except Exception as e:
logger.error(f"Storage analysis failed: {e}")
return StorageAnalysis()
def _get_dir_size(self, path: Path) -> int:
"""Calculate total size of directory.
Args:
path: Directory path
Returns:
Total size in bytes
"""
if not path.exists():
return 0
total = 0
try:
for item in path.rglob("*"):
if item.is_file():
total += item.stat().st_size
except (OSError, PermissionError):
pass
return total
async def get_performance_report(
self, db: AsyncSession, hours: int = 24
) -> PerformanceReport:
"""Get performance metrics for the specified period.
Args:
db: Database session
hours: Number of hours to analyze
Returns:
PerformanceReport with performance metrics
"""
cutoff_time = datetime.now() - timedelta(hours=hours)
# Get download metrics
stmt = select(DownloadQueueItem).where(
DownloadQueueItem.created_at >= cutoff_time
)
result = await db.execute(stmt)
downloads = result.scalars().all()
downloads_per_hour = len(downloads) / max(hours, 1)
# Get queue size over time (estimated from analytics)
analytics = self._load_analytics()
performance_samples = analytics.get("performance_samples", [])
# Filter recent samples
recent_samples = [
s
for s in performance_samples
if datetime.fromisoformat(s.get("timestamp", "2000-01-01"))
>= cutoff_time
]
avg_queue = sum(
s.get("queue_size", 0) for s in recent_samples
) / len(recent_samples) if recent_samples else 0.0
# Get memory and CPU stats
process = psutil.Process()
memory_info = process.memory_info()
peak_memory_mb = memory_info.rss / (1024 * 1024)
cpu_percent = process.cpu_percent(interval=1)
# Calculate error rate
failed_count = sum(
1 for d in downloads
if d.status == DownloadStatus.FAILED
)
error_rate = (
failed_count / len(downloads) * 100 if downloads else 0.0
)
# Get uptime
boot_time = datetime.fromtimestamp(psutil.boot_time())
uptime_seconds = (datetime.now() - boot_time).total_seconds()
return PerformanceReport(
period_start=cutoff_time.isoformat(),
period_end=datetime.now().isoformat(),
downloads_per_hour=downloads_per_hour,
average_queue_size=avg_queue,
peak_memory_usage_mb=peak_memory_mb,
average_cpu_percent=cpu_percent,
uptime_seconds=uptime_seconds,
error_rate=error_rate,
samples=recent_samples[-100:], # Keep last 100 samples
)
def record_performance_sample(
self,
queue_size: int,
active_downloads: int,
cpu_percent: float,
memory_mb: float,
) -> None:
"""Record a performance metric sample.
Args:
queue_size: Current queue size
active_downloads: Number of active downloads
cpu_percent: Current CPU usage percentage
memory_mb: Current memory usage in MB
"""
analytics = self._load_analytics()
samples = analytics.get("performance_samples", [])
sample = {
"timestamp": datetime.now().isoformat(),
"queue_size": queue_size,
"active_downloads": active_downloads,
"cpu_percent": cpu_percent,
"memory_mb": memory_mb,
}
samples.append(sample)
# Keep only recent samples (7 days worth at 1 sample per minute)
max_samples = 7 * 24 * 60
if len(samples) > max_samples:
samples = samples[-max_samples:]
analytics["performance_samples"] = samples
self._save_analytics(analytics)
async def generate_summary_report(
self, db: AsyncSession
) -> Dict[str, Any]:
"""Generate comprehensive analytics summary.
Args:
db: Database session
Returns:
Summary report with all analytics
"""
download_stats = await self.get_download_stats(db)
series_popularity = await self.get_series_popularity(db, limit=5)
storage = self.get_storage_analysis()
performance = await self.get_performance_report(db)
return {
"timestamp": datetime.now().isoformat(),
"download_stats": asdict(download_stats),
"series_popularity": [
asdict(s) for s in series_popularity
],
"storage_analysis": asdict(storage),
"performance_report": asdict(performance),
}
_analytics_service_instance: Optional[AnalyticsService] = None
def get_analytics_service() -> AnalyticsService:
"""Get or create singleton analytics service instance.
Returns:
AnalyticsService instance
"""
global _analytics_service_instance
if _analytics_service_instance is None:
_analytics_service_instance = AnalyticsService()
return _analytics_service_instance

View File

@ -1,610 +0,0 @@
"""
Audit Service for AniWorld.
This module provides comprehensive audit logging for security-critical
operations including authentication, configuration changes, and downloads.
"""
import json
import logging
from datetime import datetime, timedelta
from enum import Enum
from pathlib import Path
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Field
logger = logging.getLogger(__name__)
class AuditEventType(str, Enum):
"""Types of audit events."""
# Authentication events
AUTH_SETUP = "auth.setup"
AUTH_LOGIN_SUCCESS = "auth.login.success"
AUTH_LOGIN_FAILURE = "auth.login.failure"
AUTH_LOGOUT = "auth.logout"
AUTH_TOKEN_REFRESH = "auth.token.refresh"
AUTH_TOKEN_INVALID = "auth.token.invalid"
# Configuration events
CONFIG_READ = "config.read"
CONFIG_UPDATE = "config.update"
CONFIG_BACKUP = "config.backup"
CONFIG_RESTORE = "config.restore"
CONFIG_DELETE = "config.delete"
# Download events
DOWNLOAD_ADDED = "download.added"
DOWNLOAD_STARTED = "download.started"
DOWNLOAD_COMPLETED = "download.completed"
DOWNLOAD_FAILED = "download.failed"
DOWNLOAD_CANCELLED = "download.cancelled"
DOWNLOAD_REMOVED = "download.removed"
# Queue events
QUEUE_STARTED = "queue.started"
QUEUE_STOPPED = "queue.stopped"
QUEUE_PAUSED = "queue.paused"
QUEUE_RESUMED = "queue.resumed"
QUEUE_CLEARED = "queue.cleared"
# System events
SYSTEM_STARTUP = "system.startup"
SYSTEM_SHUTDOWN = "system.shutdown"
SYSTEM_ERROR = "system.error"
class AuditEventSeverity(str, Enum):
"""Severity levels for audit events."""
DEBUG = "debug"
INFO = "info"
WARNING = "warning"
ERROR = "error"
CRITICAL = "critical"
class AuditEvent(BaseModel):
"""Audit event model."""
timestamp: datetime = Field(default_factory=datetime.utcnow)
event_type: AuditEventType
severity: AuditEventSeverity = AuditEventSeverity.INFO
user_id: Optional[str] = None
ip_address: Optional[str] = None
user_agent: Optional[str] = None
resource: Optional[str] = None
action: Optional[str] = None
status: str = "success"
message: str
details: Optional[Dict[str, Any]] = None
session_id: Optional[str] = None
class Config:
"""Pydantic config."""
json_encoders = {datetime: lambda v: v.isoformat()}
class AuditLogStorage:
"""Base class for audit log storage backends."""
async def write_event(self, event: AuditEvent) -> None:
"""
Write an audit event to storage.
Args:
event: Audit event to write
"""
raise NotImplementedError
async def read_events(
self,
start_time: Optional[datetime] = None,
end_time: Optional[datetime] = None,
event_types: Optional[List[AuditEventType]] = None,
user_id: Optional[str] = None,
limit: int = 100,
) -> List[AuditEvent]:
"""
Read audit events from storage.
Args:
start_time: Start of time range
end_time: End of time range
event_types: Filter by event types
user_id: Filter by user ID
limit: Maximum number of events to return
Returns:
List of audit events
"""
raise NotImplementedError
async def cleanup_old_events(self, days: int = 90) -> int:
"""
Clean up audit events older than specified days.
Args:
days: Number of days to retain
Returns:
Number of events deleted
"""
raise NotImplementedError
class FileAuditLogStorage(AuditLogStorage):
"""File-based audit log storage."""
def __init__(self, log_directory: str = "logs/audit"):
"""
Initialize file-based audit log storage.
Args:
log_directory: Directory to store audit logs
"""
self.log_directory = Path(log_directory)
self.log_directory.mkdir(parents=True, exist_ok=True)
self._current_date: Optional[str] = None
self._current_file: Optional[Path] = None
def _get_log_file(self, date: datetime) -> Path:
"""
Get log file path for a specific date.
Args:
date: Date for log file
Returns:
Path to log file
"""
date_str = date.strftime("%Y-%m-%d")
return self.log_directory / f"audit_{date_str}.jsonl"
async def write_event(self, event: AuditEvent) -> None:
"""
Write an audit event to file.
Args:
event: Audit event to write
"""
log_file = self._get_log_file(event.timestamp)
try:
with open(log_file, "a", encoding="utf-8") as f:
f.write(event.model_dump_json() + "\n")
except Exception as e:
logger.error(f"Failed to write audit event to file: {e}")
async def read_events(
self,
start_time: Optional[datetime] = None,
end_time: Optional[datetime] = None,
event_types: Optional[List[AuditEventType]] = None,
user_id: Optional[str] = None,
limit: int = 100,
) -> List[AuditEvent]:
"""
Read audit events from files.
Args:
start_time: Start of time range
end_time: End of time range
event_types: Filter by event types
user_id: Filter by user ID
limit: Maximum number of events to return
Returns:
List of audit events
"""
if start_time is None:
start_time = datetime.utcnow() - timedelta(days=7)
if end_time is None:
end_time = datetime.utcnow()
events: List[AuditEvent] = []
current_date = start_time.date()
end_date = end_time.date()
# Read from all log files in date range
while current_date <= end_date and len(events) < limit:
log_file = self._get_log_file(datetime.combine(current_date, datetime.min.time()))
if log_file.exists():
try:
with open(log_file, "r", encoding="utf-8") as f:
for line in f:
if len(events) >= limit:
break
try:
event_data = json.loads(line.strip())
event = AuditEvent(**event_data)
# Apply filters
if event.timestamp < start_time or event.timestamp > end_time:
continue
if event_types and event.event_type not in event_types:
continue
if user_id and event.user_id != user_id:
continue
events.append(event)
except (json.JSONDecodeError, ValueError) as e:
logger.warning(f"Failed to parse audit event: {e}")
except Exception as e:
logger.error(f"Failed to read audit log file {log_file}: {e}")
current_date += timedelta(days=1)
# Sort by timestamp descending
events.sort(key=lambda e: e.timestamp, reverse=True)
return events[:limit]
async def cleanup_old_events(self, days: int = 90) -> int:
"""
Clean up audit events older than specified days.
Args:
days: Number of days to retain
Returns:
Number of files deleted
"""
cutoff_date = datetime.utcnow() - timedelta(days=days)
deleted_count = 0
for log_file in self.log_directory.glob("audit_*.jsonl"):
try:
# Extract date from filename
date_str = log_file.stem.replace("audit_", "")
file_date = datetime.strptime(date_str, "%Y-%m-%d")
if file_date < cutoff_date:
log_file.unlink()
deleted_count += 1
logger.info(f"Deleted old audit log: {log_file}")
except (ValueError, OSError) as e:
logger.warning(f"Failed to process audit log file {log_file}: {e}")
return deleted_count
class AuditService:
"""Main audit service for logging security events."""
def __init__(self, storage: Optional[AuditLogStorage] = None):
"""
Initialize audit service.
Args:
storage: Storage backend for audit logs
"""
self.storage = storage or FileAuditLogStorage()
async def log_event(
self,
event_type: AuditEventType,
message: str,
severity: AuditEventSeverity = AuditEventSeverity.INFO,
user_id: Optional[str] = None,
ip_address: Optional[str] = None,
user_agent: Optional[str] = None,
resource: Optional[str] = None,
action: Optional[str] = None,
status: str = "success",
details: Optional[Dict[str, Any]] = None,
session_id: Optional[str] = None,
) -> None:
"""
Log an audit event.
Args:
event_type: Type of event
message: Human-readable message
severity: Event severity
user_id: User identifier
ip_address: Client IP address
user_agent: Client user agent
resource: Resource being accessed
action: Action performed
status: Operation status
details: Additional details
session_id: Session identifier
"""
event = AuditEvent(
event_type=event_type,
severity=severity,
user_id=user_id,
ip_address=ip_address,
user_agent=user_agent,
resource=resource,
action=action,
status=status,
message=message,
details=details,
session_id=session_id,
)
await self.storage.write_event(event)
# Also log to application logger for high severity events
if severity in [AuditEventSeverity.ERROR, AuditEventSeverity.CRITICAL]:
logger.error(f"Audit: {message}", extra={"audit_event": event.model_dump()})
elif severity == AuditEventSeverity.WARNING:
logger.warning(f"Audit: {message}", extra={"audit_event": event.model_dump()})
async def log_auth_setup(
self, user_id: str, ip_address: Optional[str] = None
) -> None:
"""Log initial authentication setup."""
await self.log_event(
event_type=AuditEventType.AUTH_SETUP,
message=f"Authentication configured by user {user_id}",
user_id=user_id,
ip_address=ip_address,
action="setup",
)
async def log_login_success(
self,
user_id: str,
ip_address: Optional[str] = None,
user_agent: Optional[str] = None,
session_id: Optional[str] = None,
) -> None:
"""Log successful login."""
await self.log_event(
event_type=AuditEventType.AUTH_LOGIN_SUCCESS,
message=f"User {user_id} logged in successfully",
user_id=user_id,
ip_address=ip_address,
user_agent=user_agent,
session_id=session_id,
action="login",
)
async def log_login_failure(
self,
user_id: Optional[str] = None,
ip_address: Optional[str] = None,
user_agent: Optional[str] = None,
reason: str = "Invalid credentials",
) -> None:
"""Log failed login attempt."""
await self.log_event(
event_type=AuditEventType.AUTH_LOGIN_FAILURE,
message=f"Login failed for user {user_id or 'unknown'}: {reason}",
severity=AuditEventSeverity.WARNING,
user_id=user_id,
ip_address=ip_address,
user_agent=user_agent,
status="failure",
action="login",
details={"reason": reason},
)
async def log_logout(
self,
user_id: str,
ip_address: Optional[str] = None,
session_id: Optional[str] = None,
) -> None:
"""Log user logout."""
await self.log_event(
event_type=AuditEventType.AUTH_LOGOUT,
message=f"User {user_id} logged out",
user_id=user_id,
ip_address=ip_address,
session_id=session_id,
action="logout",
)
async def log_config_update(
self,
user_id: str,
changes: Dict[str, Any],
ip_address: Optional[str] = None,
) -> None:
"""Log configuration update."""
await self.log_event(
event_type=AuditEventType.CONFIG_UPDATE,
message=f"Configuration updated by user {user_id}",
user_id=user_id,
ip_address=ip_address,
resource="config",
action="update",
details={"changes": changes},
)
async def log_config_backup(
self, user_id: str, backup_file: str, ip_address: Optional[str] = None
) -> None:
"""Log configuration backup."""
await self.log_event(
event_type=AuditEventType.CONFIG_BACKUP,
message=f"Configuration backed up by user {user_id}",
user_id=user_id,
ip_address=ip_address,
resource="config",
action="backup",
details={"backup_file": backup_file},
)
async def log_config_restore(
self, user_id: str, backup_file: str, ip_address: Optional[str] = None
) -> None:
"""Log configuration restore."""
await self.log_event(
event_type=AuditEventType.CONFIG_RESTORE,
message=f"Configuration restored by user {user_id}",
user_id=user_id,
ip_address=ip_address,
resource="config",
action="restore",
details={"backup_file": backup_file},
)
async def log_download_added(
self,
user_id: str,
series_name: str,
episodes: List[str],
ip_address: Optional[str] = None,
) -> None:
"""Log download added to queue."""
await self.log_event(
event_type=AuditEventType.DOWNLOAD_ADDED,
message=f"Download added by user {user_id}: {series_name}",
user_id=user_id,
ip_address=ip_address,
resource=series_name,
action="add",
details={"episodes": episodes},
)
async def log_download_completed(
self, series_name: str, episode: str, file_path: str
) -> None:
"""Log completed download."""
await self.log_event(
event_type=AuditEventType.DOWNLOAD_COMPLETED,
message=f"Download completed: {series_name} - {episode}",
resource=series_name,
action="download",
details={"episode": episode, "file_path": file_path},
)
async def log_download_failed(
self, series_name: str, episode: str, error: str
) -> None:
"""Log failed download."""
await self.log_event(
event_type=AuditEventType.DOWNLOAD_FAILED,
message=f"Download failed: {series_name} - {episode}",
severity=AuditEventSeverity.ERROR,
resource=series_name,
action="download",
status="failure",
details={"episode": episode, "error": error},
)
async def log_queue_operation(
self,
user_id: str,
operation: str,
ip_address: Optional[str] = None,
details: Optional[Dict[str, Any]] = None,
) -> None:
"""Log queue operation."""
event_type_map = {
"start": AuditEventType.QUEUE_STARTED,
"stop": AuditEventType.QUEUE_STOPPED,
"pause": AuditEventType.QUEUE_PAUSED,
"resume": AuditEventType.QUEUE_RESUMED,
"clear": AuditEventType.QUEUE_CLEARED,
}
event_type = event_type_map.get(operation, AuditEventType.SYSTEM_ERROR)
await self.log_event(
event_type=event_type,
message=f"Queue {operation} by user {user_id}",
user_id=user_id,
ip_address=ip_address,
resource="queue",
action=operation,
details=details,
)
async def log_system_error(
self, error: str, details: Optional[Dict[str, Any]] = None
) -> None:
"""Log system error."""
await self.log_event(
event_type=AuditEventType.SYSTEM_ERROR,
message=f"System error: {error}",
severity=AuditEventSeverity.ERROR,
status="error",
details=details,
)
async def get_events(
self,
start_time: Optional[datetime] = None,
end_time: Optional[datetime] = None,
event_types: Optional[List[AuditEventType]] = None,
user_id: Optional[str] = None,
limit: int = 100,
) -> List[AuditEvent]:
"""
Get audit events with filters.
Args:
start_time: Start of time range
end_time: End of time range
event_types: Filter by event types
user_id: Filter by user ID
limit: Maximum number of events to return
Returns:
List of audit events
"""
return await self.storage.read_events(
start_time=start_time,
end_time=end_time,
event_types=event_types,
user_id=user_id,
limit=limit,
)
async def cleanup_old_events(self, days: int = 90) -> int:
"""
Clean up old audit events.
Args:
days: Number of days to retain
Returns:
Number of events deleted
"""
return await self.storage.cleanup_old_events(days)
# Global audit service instance
_audit_service: Optional[AuditService] = None
def get_audit_service() -> AuditService:
"""
Get the global audit service instance.
Returns:
AuditService instance
"""
global _audit_service
if _audit_service is None:
_audit_service = AuditService()
return _audit_service
def configure_audit_service(storage: Optional[AuditLogStorage] = None) -> AuditService:
"""
Configure the global audit service.
Args:
storage: Custom storage backend
Returns:
Configured AuditService instance
"""
global _audit_service
_audit_service = AuditService(storage=storage)
return _audit_service

View File

@ -1,432 +0,0 @@
"""Backup and restore service for configuration and data management."""
import json
import logging
import os
import shutil
import tarfile
from dataclasses import dataclass
from datetime import datetime
from pathlib import Path
from typing import Any, Dict, List, Optional
logger = logging.getLogger(__name__)
@dataclass
class BackupInfo:
"""Information about a backup."""
name: str
timestamp: datetime
size_bytes: int
backup_type: str # 'config', 'data', 'full'
description: Optional[str] = None
class BackupService:
"""Service for managing backups and restores."""
def __init__(
self,
backup_dir: str = "data/backups",
config_dir: str = "data",
database_path: str = "data/aniworld.db",
):
"""Initialize backup service.
Args:
backup_dir: Directory to store backups.
config_dir: Directory containing configuration files.
database_path: Path to the database file.
"""
self.backup_dir = Path(backup_dir)
self.config_dir = Path(config_dir)
self.database_path = Path(database_path)
# Create backup directory if it doesn't exist
self.backup_dir.mkdir(parents=True, exist_ok=True)
def backup_configuration(
self, description: str = ""
) -> Optional[BackupInfo]:
"""Create a configuration backup.
Args:
description: Optional description for the backup.
Returns:
BackupInfo: Information about the created backup.
"""
try:
timestamp = datetime.now()
backup_name = (
f"config_{timestamp.strftime('%Y%m%d_%H%M%S')}.tar.gz"
)
backup_path = self.backup_dir / backup_name
with tarfile.open(backup_path, "w:gz") as tar:
# Add configuration files
config_files = [
self.config_dir / "config.json",
]
for config_file in config_files:
if config_file.exists():
tar.add(config_file, arcname=config_file.name)
size_bytes = backup_path.stat().st_size
info = BackupInfo(
name=backup_name,
timestamp=timestamp,
size_bytes=size_bytes,
backup_type="config",
description=description,
)
logger.info(f"Configuration backup created: {backup_name}")
return info
except Exception as e:
logger.error(f"Failed to create configuration backup: {e}")
return None
def backup_database(
self, description: str = ""
) -> Optional[BackupInfo]:
"""Create a database backup.
Args:
description: Optional description for the backup.
Returns:
BackupInfo: Information about the created backup.
"""
try:
if not self.database_path.exists():
logger.warning(
f"Database file not found: {self.database_path}"
)
return None
timestamp = datetime.now()
backup_name = (
f"database_{timestamp.strftime('%Y%m%d_%H%M%S')}.tar.gz"
)
backup_path = self.backup_dir / backup_name
with tarfile.open(backup_path, "w:gz") as tar:
tar.add(self.database_path, arcname=self.database_path.name)
size_bytes = backup_path.stat().st_size
info = BackupInfo(
name=backup_name,
timestamp=timestamp,
size_bytes=size_bytes,
backup_type="data",
description=description,
)
logger.info(f"Database backup created: {backup_name}")
return info
except Exception as e:
logger.error(f"Failed to create database backup: {e}")
return None
def backup_full(
self, description: str = ""
) -> Optional[BackupInfo]:
"""Create a full system backup.
Args:
description: Optional description for the backup.
Returns:
BackupInfo: Information about the created backup.
"""
try:
timestamp = datetime.now()
backup_name = f"full_{timestamp.strftime('%Y%m%d_%H%M%S')}.tar.gz"
backup_path = self.backup_dir / backup_name
with tarfile.open(backup_path, "w:gz") as tar:
# Add configuration
config_file = self.config_dir / "config.json"
if config_file.exists():
tar.add(config_file, arcname=config_file.name)
# Add database
if self.database_path.exists():
tar.add(
self.database_path,
arcname=self.database_path.name,
)
# Add download queue
queue_file = self.config_dir / "download_queue.json"
if queue_file.exists():
tar.add(queue_file, arcname=queue_file.name)
size_bytes = backup_path.stat().st_size
info = BackupInfo(
name=backup_name,
timestamp=timestamp,
size_bytes=size_bytes,
backup_type="full",
description=description,
)
logger.info(f"Full backup created: {backup_name}")
return info
except Exception as e:
logger.error(f"Failed to create full backup: {e}")
return None
def restore_configuration(self, backup_name: str) -> bool:
"""Restore configuration from backup.
Args:
backup_name: Name of the backup to restore.
Returns:
bool: True if restore was successful.
"""
try:
backup_path = self.backup_dir / backup_name
if not backup_path.exists():
logger.error(f"Backup file not found: {backup_name}")
return False
# Extract to temporary directory
temp_dir = self.backup_dir / "temp_restore"
temp_dir.mkdir(exist_ok=True)
with tarfile.open(backup_path, "r:gz") as tar:
tar.extractall(temp_dir)
# Copy configuration file back
config_file = temp_dir / "config.json"
if config_file.exists():
shutil.copy(config_file, self.config_dir / "config.json")
# Cleanup
shutil.rmtree(temp_dir)
logger.info(f"Configuration restored from: {backup_name}")
return True
except Exception as e:
logger.error(f"Failed to restore configuration: {e}")
return False
def restore_database(self, backup_name: str) -> bool:
"""Restore database from backup.
Args:
backup_name: Name of the backup to restore.
Returns:
bool: True if restore was successful.
"""
try:
backup_path = self.backup_dir / backup_name
if not backup_path.exists():
logger.error(f"Backup file not found: {backup_name}")
return False
# Create backup of current database
if self.database_path.exists():
current_backup = (
self.database_path.parent
/ f"{self.database_path.name}.backup"
)
shutil.copy(self.database_path, current_backup)
logger.info(f"Current database backed up to: {current_backup}")
# Extract to temporary directory
temp_dir = self.backup_dir / "temp_restore"
temp_dir.mkdir(exist_ok=True)
with tarfile.open(backup_path, "r:gz") as tar:
tar.extractall(temp_dir)
# Copy database file back
db_file = temp_dir / self.database_path.name
if db_file.exists():
shutil.copy(db_file, self.database_path)
# Cleanup
shutil.rmtree(temp_dir)
logger.info(f"Database restored from: {backup_name}")
return True
except Exception as e:
logger.error(f"Failed to restore database: {e}")
return False
def list_backups(
self, backup_type: Optional[str] = None
) -> List[Dict[str, Any]]:
"""List available backups.
Args:
backup_type: Optional filter by backup type.
Returns:
list: List of backup information.
"""
try:
backups = []
for backup_file in sorted(self.backup_dir.glob("*.tar.gz")):
# Extract type from filename
filename = backup_file.name
file_type = filename.split("_")[0]
if backup_type and file_type != backup_type:
continue
# Extract timestamp
timestamp_str = (
filename.split("_", 1)[1].replace(".tar.gz", "")
)
backups.append(
{
"name": filename,
"type": file_type,
"size_bytes": backup_file.stat().st_size,
"created": timestamp_str,
}
)
return sorted(backups, key=lambda x: x["created"], reverse=True)
except Exception as e:
logger.error(f"Failed to list backups: {e}")
return []
def delete_backup(self, backup_name: str) -> bool:
"""Delete a backup.
Args:
backup_name: Name of the backup to delete.
Returns:
bool: True if delete was successful.
"""
try:
backup_path = self.backup_dir / backup_name
if not backup_path.exists():
logger.warning(f"Backup not found: {backup_name}")
return False
backup_path.unlink()
logger.info(f"Backup deleted: {backup_name}")
return True
except Exception as e:
logger.error(f"Failed to delete backup: {e}")
return False
def cleanup_old_backups(
self, max_backups: int = 10, backup_type: Optional[str] = None
) -> int:
"""Remove old backups, keeping only the most recent ones.
Args:
max_backups: Maximum number of backups to keep.
backup_type: Optional filter by backup type.
Returns:
int: Number of backups deleted.
"""
try:
backups = self.list_backups(backup_type)
if len(backups) <= max_backups:
return 0
deleted_count = 0
for backup in backups[max_backups:]:
if self.delete_backup(backup["name"]):
deleted_count += 1
logger.info(f"Cleaned up {deleted_count} old backups")
return deleted_count
except Exception as e:
logger.error(f"Failed to cleanup old backups: {e}")
return 0
def export_anime_data(
self, output_file: str
) -> bool:
"""Export anime library data to JSON.
Args:
output_file: Path to export file.
Returns:
bool: True if export was successful.
"""
try:
# This would integrate with the anime service
# to export anime library data
export_data = {
"timestamp": datetime.now().isoformat(),
"anime_count": 0,
"data": [],
}
with open(output_file, "w") as f:
json.dump(export_data, f, indent=2)
logger.info(f"Anime data exported to: {output_file}")
return True
except Exception as e:
logger.error(f"Failed to export anime data: {e}")
return False
def import_anime_data(self, input_file: str) -> bool:
"""Import anime library data from JSON.
Args:
input_file: Path to import file.
Returns:
bool: True if import was successful.
"""
try:
if not os.path.exists(input_file):
logger.error(f"Import file not found: {input_file}")
return False
with open(input_file, "r") as f:
json.load(f) # Load and validate JSON
# This would integrate with the anime service
# to import anime library data
logger.info(f"Anime data imported from: {input_file}")
return True
except Exception as e:
logger.error(f"Failed to import anime data: {e}")
return False
# Global backup service instance
_backup_service: Optional[BackupService] = None
def get_backup_service() -> BackupService:
"""Get or create the global backup service instance.
Returns:
BackupService: The backup service instance.
"""
global _backup_service
if _backup_service is None:
_backup_service = BackupService()
return _backup_service

View File

@ -1,324 +0,0 @@
"""Monitoring service for system resource tracking and metrics collection."""
import logging
from dataclasses import dataclass, field
from datetime import datetime, timedelta
from typing import Any, Dict, List, Optional
import psutil
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
from src.server.database.models import DownloadQueueItem
logger = logging.getLogger(__name__)
@dataclass
class QueueMetrics:
"""Download queue statistics and metrics."""
total_items: int = 0
pending_items: int = 0
downloading_items: int = 0
completed_items: int = 0
failed_items: int = 0
total_size_bytes: int = 0
downloaded_bytes: int = 0
average_speed_mbps: float = 0.0
estimated_time_remaining: Optional[timedelta] = None
success_rate: float = 0.0
@dataclass
class SystemMetrics:
"""System resource metrics at a point in time."""
timestamp: datetime
cpu_percent: float
memory_percent: float
memory_available_mb: float
disk_percent: float
disk_free_mb: float
uptime_seconds: float
@dataclass
class ErrorMetrics:
"""Error tracking and statistics."""
total_errors: int = 0
errors_24h: int = 0
most_common_errors: Dict[str, int] = field(default_factory=dict)
last_error_time: Optional[datetime] = None
error_rate_per_hour: float = 0.0
class MonitoringService:
"""Service for monitoring system resources and application metrics."""
def __init__(self):
"""Initialize monitoring service."""
self._error_log: List[tuple[datetime, str]] = []
self._performance_samples: List[SystemMetrics] = []
self._max_samples = 1440 # Keep 24 hours of minute samples
def get_system_metrics(self) -> SystemMetrics:
"""Get current system resource metrics.
Returns:
SystemMetrics: Current system metrics.
"""
try:
import time
cpu_percent = psutil.cpu_percent(interval=1)
memory_info = psutil.virtual_memory()
disk_info = psutil.disk_usage("/")
boot_time = psutil.boot_time()
uptime_seconds = time.time() - boot_time
metrics = SystemMetrics(
timestamp=datetime.now(),
cpu_percent=cpu_percent,
memory_percent=memory_info.percent,
memory_available_mb=memory_info.available / (1024 * 1024),
disk_percent=disk_info.percent,
disk_free_mb=disk_info.free / (1024 * 1024),
uptime_seconds=uptime_seconds,
)
# Store sample
self._performance_samples.append(metrics)
if len(self._performance_samples) > self._max_samples:
self._performance_samples.pop(0)
return metrics
except Exception as e:
logger.error(f"Failed to get system metrics: {e}")
raise
async def get_queue_metrics(self, db: AsyncSession) -> QueueMetrics:
"""Get download queue metrics.
Args:
db: Database session.
Returns:
QueueMetrics: Queue statistics and progress.
"""
try:
# Get all queue items
result = await db.execute(select(DownloadQueueItem))
items = result.scalars().all()
if not items:
return QueueMetrics()
# Calculate metrics
total_items = len(items)
pending_items = sum(1 for i in items if i.status == "PENDING")
downloading_items = sum(
1 for i in items if i.status == "DOWNLOADING"
)
completed_items = sum(1 for i in items if i.status == "COMPLETED")
failed_items = sum(1 for i in items if i.status == "FAILED")
total_size_bytes = sum(
(i.total_bytes or 0) for i in items
)
downloaded_bytes = sum(
(i.downloaded_bytes or 0) for i in items
)
# Calculate average speed from active downloads
speeds = [
i.download_speed for i in items
if i.status == "DOWNLOADING" and i.download_speed
]
average_speed_mbps = (
sum(speeds) / len(speeds) / (1024 * 1024) if speeds else 0
)
# Calculate success rate
success_rate = (
(completed_items / total_items * 100) if total_items > 0 else 0
)
# Estimate time remaining
estimated_time_remaining = None
if average_speed_mbps > 0 and total_size_bytes > downloaded_bytes:
remaining_bytes = total_size_bytes - downloaded_bytes
remaining_seconds = remaining_bytes / average_speed_mbps
estimated_time_remaining = timedelta(seconds=remaining_seconds)
return QueueMetrics(
total_items=total_items,
pending_items=pending_items,
downloading_items=downloading_items,
completed_items=completed_items,
failed_items=failed_items,
total_size_bytes=total_size_bytes,
downloaded_bytes=downloaded_bytes,
average_speed_mbps=average_speed_mbps,
estimated_time_remaining=estimated_time_remaining,
success_rate=success_rate,
)
except Exception as e:
logger.error(f"Failed to get queue metrics: {e}")
raise
def log_error(self, error_message: str) -> None:
"""Log an error for tracking purposes.
Args:
error_message: The error message to log.
"""
self._error_log.append((datetime.now(), error_message))
logger.debug(f"Error logged: {error_message}")
def get_error_metrics(self) -> ErrorMetrics:
"""Get error tracking metrics.
Returns:
ErrorMetrics: Error statistics and trends.
"""
total_errors = len(self._error_log)
# Get errors from last 24 hours
cutoff_time = datetime.now() - timedelta(hours=24)
recent_errors = [
(time, msg) for time, msg in self._error_log
if time >= cutoff_time
]
errors_24h = len(recent_errors)
# Count error types
error_counts: Dict[str, int] = {}
for _, msg in recent_errors:
error_type = msg.split(":")[0]
error_counts[error_type] = error_counts.get(error_type, 0) + 1
# Sort by count
most_common_errors = dict(
sorted(error_counts.items(), key=lambda x: x[1], reverse=True)[:10]
)
# Get last error time
last_error_time = (
recent_errors[-1][0] if recent_errors else None
)
# Calculate error rate per hour
error_rate_per_hour = (
errors_24h / 24 if errors_24h > 0 else 0
)
return ErrorMetrics(
total_errors=total_errors,
errors_24h=errors_24h,
most_common_errors=most_common_errors,
last_error_time=last_error_time,
error_rate_per_hour=error_rate_per_hour,
)
def get_performance_summary(self) -> Dict[str, Any]:
"""Get performance summary from collected samples.
Returns:
dict: Performance statistics.
"""
if not self._performance_samples:
return {}
cpu_values = [m.cpu_percent for m in self._performance_samples]
memory_values = [m.memory_percent for m in self._performance_samples]
disk_values = [m.disk_percent for m in self._performance_samples]
return {
"cpu": {
"current": cpu_values[-1],
"average": sum(cpu_values) / len(cpu_values),
"max": max(cpu_values),
"min": min(cpu_values),
},
"memory": {
"current": memory_values[-1],
"average": sum(memory_values) / len(memory_values),
"max": max(memory_values),
"min": min(memory_values),
},
"disk": {
"current": disk_values[-1],
"average": sum(disk_values) / len(disk_values),
"max": max(disk_values),
"min": min(disk_values),
},
"sample_count": len(self._performance_samples),
}
async def get_comprehensive_status(
self, db: AsyncSession
) -> Dict[str, Any]:
"""Get comprehensive system status summary.
Args:
db: Database session.
Returns:
dict: Complete system status.
"""
try:
system_metrics = self.get_system_metrics()
queue_metrics = await self.get_queue_metrics(db)
error_metrics = self.get_error_metrics()
performance = self.get_performance_summary()
return {
"timestamp": datetime.now().isoformat(),
"system": {
"cpu_percent": system_metrics.cpu_percent,
"memory_percent": system_metrics.memory_percent,
"disk_percent": system_metrics.disk_percent,
"uptime_seconds": system_metrics.uptime_seconds,
},
"queue": {
"total_items": queue_metrics.total_items,
"pending": queue_metrics.pending_items,
"downloading": queue_metrics.downloading_items,
"completed": queue_metrics.completed_items,
"failed": queue_metrics.failed_items,
"success_rate": round(queue_metrics.success_rate, 2),
"average_speed_mbps": round(
queue_metrics.average_speed_mbps, 2
),
},
"errors": {
"total": error_metrics.total_errors,
"last_24h": error_metrics.errors_24h,
"rate_per_hour": round(
error_metrics.error_rate_per_hour, 2
),
"most_common": error_metrics.most_common_errors,
},
"performance": performance,
}
except Exception as e:
logger.error(f"Failed to get comprehensive status: {e}")
raise
# Global monitoring service instance
_monitoring_service: Optional[MonitoringService] = None
def get_monitoring_service() -> MonitoringService:
"""Get or create the global monitoring service instance.
Returns:
MonitoringService: The monitoring service instance.
"""
global _monitoring_service
if _monitoring_service is None:
_monitoring_service = MonitoringService()
return _monitoring_service

View File

@ -1,315 +0,0 @@
"""Unit tests for analytics service.
Tests analytics service functionality including download statistics,
series popularity tracking, storage analysis, and performance reporting.
"""
import json
from datetime import datetime
from pathlib import Path
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from sqlalchemy.ext.asyncio import AsyncSession
from src.server.services.analytics_service import (
AnalyticsService,
DownloadStats,
PerformanceReport,
StorageAnalysis,
)
@pytest.fixture
def analytics_service(tmp_path):
"""Create analytics service with temp directory."""
with patch("src.server.services.analytics_service.ANALYTICS_FILE",
tmp_path / "analytics.json"):
service = AnalyticsService()
yield service
@pytest.fixture
async def mock_db():
"""Create mock database session."""
db = AsyncMock(spec=AsyncSession)
return db
@pytest.mark.asyncio
async def test_analytics_service_initialization(analytics_service):
"""Test analytics service initializes with default data."""
assert analytics_service.analytics_file.exists()
data = json.loads(analytics_service.analytics_file.read_text())
assert "created_at" in data
assert "download_stats" in data
assert "series_popularity" in data
assert data["download_stats"]["total_downloads"] == 0
@pytest.mark.asyncio
async def test_get_download_stats_no_data(
analytics_service, mock_db
):
"""Test download statistics with no download data."""
mock_db.execute = AsyncMock(return_value=MagicMock(
scalars=MagicMock(return_value=MagicMock(all=MagicMock(
return_value=[]
)))
))
stats = await analytics_service.get_download_stats(mock_db)
assert isinstance(stats, DownloadStats)
assert stats.total_downloads == 0
assert stats.successful_downloads == 0
assert stats.success_rate == 0.0
@pytest.mark.asyncio
async def test_get_download_stats_with_data(
analytics_service, mock_db
):
"""Test download statistics with download data."""
# Mock downloads - updated to use actual model fields
download1 = MagicMock()
download1.status = "completed"
download1.total_bytes = 1024 * 1024 * 100 # 100 MB
download1.download_speed = 1024 * 1024 * 10 # 10 MB/s
download2 = MagicMock()
download2.status = "failed"
download2.total_bytes = 0
download2.download_speed = None
mock_db.execute = AsyncMock(return_value=MagicMock(
scalars=MagicMock(return_value=MagicMock(all=MagicMock(
return_value=[download1, download2]
)))
))
stats = await analytics_service.get_download_stats(mock_db)
assert stats.total_downloads == 2
assert stats.successful_downloads == 1
assert stats.failed_downloads == 1
assert stats.success_rate == 50.0
assert stats.total_bytes_downloaded == 1024 * 1024 * 100
@pytest.mark.asyncio
async def test_get_series_popularity_empty(
analytics_service, mock_db
):
"""Test series popularity with no data."""
mock_db.execute = AsyncMock(return_value=MagicMock(
all=MagicMock(return_value=[])
))
popularity = await analytics_service.get_series_popularity(
mock_db, limit=10
)
assert isinstance(popularity, list)
assert len(popularity) == 0
@pytest.mark.asyncio
async def test_get_series_popularity_with_data(
analytics_service, mock_db
):
"""Test series popularity with data."""
# Mock returns tuples:
# (series_name, download_count, total_size, last_download, successful)
row = (
"Test Anime",
5,
1024 * 1024 * 500,
datetime.now(),
4
)
mock_db.execute = AsyncMock(return_value=MagicMock(
all=MagicMock(return_value=[row])
))
popularity = await analytics_service.get_series_popularity(
mock_db, limit=10
)
assert len(popularity) == 1
assert popularity[0].series_name == "Test Anime"
assert popularity[0].download_count == 5
assert popularity[0].success_rate == 80.0
@pytest.mark.asyncio
async def test_get_storage_analysis(analytics_service):
"""Test storage analysis retrieval."""
with patch("psutil.disk_usage") as mock_disk:
mock_disk.return_value = MagicMock(
total=1024 * 1024 * 1024 * 1024,
used=512 * 1024 * 1024 * 1024,
free=512 * 1024 * 1024 * 1024,
percent=50.0,
)
analysis = analytics_service.get_storage_analysis()
assert isinstance(analysis, StorageAnalysis)
assert analysis.total_storage_bytes > 0
assert analysis.storage_percent_used == 50.0
@pytest.mark.asyncio
async def test_get_performance_report_no_data(
analytics_service, mock_db
):
"""Test performance report with no data."""
mock_db.execute = AsyncMock(return_value=MagicMock(
scalars=MagicMock(return_value=MagicMock(all=MagicMock(
return_value=[]
)))
))
with patch("psutil.Process") as mock_process:
mock_process.return_value = MagicMock(
memory_info=MagicMock(
return_value=MagicMock(rss=100 * 1024 * 1024)
),
cpu_percent=MagicMock(return_value=10.0),
)
report = await analytics_service.get_performance_report(
mock_db, hours=24
)
assert isinstance(report, PerformanceReport)
assert report.downloads_per_hour == 0.0
@pytest.mark.asyncio
async def test_record_performance_sample(analytics_service):
"""Test recording performance samples."""
analytics_service.record_performance_sample(
queue_size=5,
active_downloads=2,
cpu_percent=25.0,
memory_mb=512.0,
)
data = json.loads(
analytics_service.analytics_file.read_text()
)
assert len(data["performance_samples"]) == 1
sample = data["performance_samples"][0]
assert sample["queue_size"] == 5
assert sample["active_downloads"] == 2
@pytest.mark.asyncio
async def test_record_multiple_performance_samples(
analytics_service
):
"""Test recording multiple performance samples."""
for i in range(5):
analytics_service.record_performance_sample(
queue_size=i,
active_downloads=i % 2,
cpu_percent=10.0 + i,
memory_mb=256.0 + i * 50,
)
data = json.loads(
analytics_service.analytics_file.read_text()
)
assert len(data["performance_samples"]) == 5
@pytest.mark.asyncio
async def test_generate_summary_report(
analytics_service, mock_db
):
"""Test generating comprehensive summary report."""
mock_db.execute = AsyncMock(return_value=MagicMock(
scalars=MagicMock(return_value=MagicMock(all=MagicMock(
return_value=[]
))),
all=MagicMock(return_value=[]),
))
with patch("psutil.disk_usage") as mock_disk:
mock_disk.return_value = MagicMock(
total=1024 * 1024 * 1024,
used=512 * 1024 * 1024,
free=512 * 1024 * 1024,
percent=50.0,
)
with patch("psutil.Process"):
report = await analytics_service.generate_summary_report(
mock_db
)
assert "timestamp" in report
assert "download_stats" in report
assert "series_popularity" in report
assert "storage_analysis" in report
assert "performance_report" in report
@pytest.mark.asyncio
async def test_get_dir_size(analytics_service, tmp_path):
"""Test directory size calculation."""
# Create test files
(tmp_path / "file1.txt").write_text("test content")
(tmp_path / "file2.txt").write_text("more test content")
subdir = tmp_path / "subdir"
subdir.mkdir()
(subdir / "file3.txt").write_text("nested content")
size = analytics_service._get_dir_size(tmp_path)
assert size > 0
@pytest.mark.asyncio
async def test_get_dir_size_nonexistent(analytics_service):
"""Test directory size for nonexistent directory."""
size = analytics_service._get_dir_size(
Path("/nonexistent/directory")
)
assert size == 0
@pytest.mark.asyncio
async def test_analytics_persistence(analytics_service):
"""Test analytics data persistence."""
analytics_service.record_performance_sample(
queue_size=10,
active_downloads=3,
cpu_percent=50.0,
memory_mb=1024.0,
)
# Create new service instance
analytics_service2 = AnalyticsService()
analytics_service2.analytics_file = analytics_service.analytics_file
data = json.loads(
analytics_service2.analytics_file.read_text()
)
assert len(data["performance_samples"]) == 1
@pytest.mark.asyncio
async def test_analytics_service_singleton(analytics_service):
"""Test analytics service singleton pattern."""
from src.server.services.analytics_service import get_analytics_service
service1 = get_analytics_service()
service2 = get_analytics_service()
assert service1 is service2

View File

@ -1,259 +0,0 @@
"""Unit tests for backup service."""
import tempfile
from pathlib import Path
import pytest
from src.server.services.backup_service import BackupService, get_backup_service
@pytest.fixture
def temp_backup_env():
"""Create temporary directories for testing."""
with tempfile.TemporaryDirectory() as tmpdir:
backup_dir = Path(tmpdir) / "backups"
config_dir = Path(tmpdir) / "config"
config_dir.mkdir()
# Create mock config files
(config_dir / "config.json").write_text('{"test": "config"}')
(config_dir / "download_queue.json").write_text('{"queue": []}')
yield {
"backup_dir": str(backup_dir),
"config_dir": str(config_dir),
"tmpdir": tmpdir,
}
def test_backup_service_initialization(temp_backup_env):
"""Test backup service initialization."""
service = BackupService(
backup_dir=temp_backup_env["backup_dir"],
config_dir=temp_backup_env["config_dir"],
)
assert service is not None
assert service.backup_dir.exists()
def test_backup_configuration(temp_backup_env):
"""Test configuration backup creation."""
service = BackupService(
backup_dir=temp_backup_env["backup_dir"],
config_dir=temp_backup_env["config_dir"],
)
backup_info = service.backup_configuration("Test backup")
assert backup_info is not None
assert backup_info.backup_type == "config"
assert backup_info.size_bytes > 0
assert "config_" in backup_info.name
def test_backup_configuration_no_config(temp_backup_env):
"""Test configuration backup with missing config file."""
service = BackupService(
backup_dir=temp_backup_env["backup_dir"],
config_dir=temp_backup_env["config_dir"],
)
# Remove config file
(Path(temp_backup_env["config_dir"]) / "config.json").unlink()
# Should still create backup (empty tar)
backup_info = service.backup_configuration()
assert backup_info is not None
def test_backup_database(temp_backup_env):
"""Test database backup creation."""
# Create mock database file
db_path = Path(temp_backup_env["tmpdir"]) / "aniworld.db"
db_path.write_bytes(b"mock database content")
service = BackupService(
backup_dir=temp_backup_env["backup_dir"],
config_dir=temp_backup_env["config_dir"],
database_path=str(db_path),
)
backup_info = service.backup_database("DB backup")
assert backup_info is not None
assert backup_info.backup_type == "data"
assert backup_info.size_bytes > 0
assert "database_" in backup_info.name
def test_backup_database_not_found(temp_backup_env):
"""Test database backup with missing database."""
service = BackupService(
backup_dir=temp_backup_env["backup_dir"],
config_dir=temp_backup_env["config_dir"],
database_path="/nonexistent/database.db",
)
backup_info = service.backup_database()
assert backup_info is None
def test_backup_full(temp_backup_env):
"""Test full system backup."""
service = BackupService(
backup_dir=temp_backup_env["backup_dir"],
config_dir=temp_backup_env["config_dir"],
)
backup_info = service.backup_full("Full backup")
assert backup_info is not None
assert backup_info.backup_type == "full"
assert backup_info.size_bytes > 0
def test_list_backups(temp_backup_env):
"""Test listing backups."""
service = BackupService(
backup_dir=temp_backup_env["backup_dir"],
config_dir=temp_backup_env["config_dir"],
)
# Create several backups
service.backup_configuration()
service.backup_full()
backups = service.list_backups()
assert len(backups) >= 2
assert all("name" in b for b in backups)
assert all("type" in b for b in backups)
def test_list_backups_by_type(temp_backup_env):
"""Test listing backups filtered by type."""
service = BackupService(
backup_dir=temp_backup_env["backup_dir"],
config_dir=temp_backup_env["config_dir"],
)
# Create different types of backups
service.backup_configuration()
service.backup_full()
config_backups = service.list_backups("config")
assert all(b["type"] == "config" for b in config_backups)
def test_delete_backup(temp_backup_env):
"""Test backup deletion."""
service = BackupService(
backup_dir=temp_backup_env["backup_dir"],
config_dir=temp_backup_env["config_dir"],
)
backup_info = service.backup_configuration()
assert backup_info is not None
backups_before = service.list_backups()
assert len(backups_before) > 0
result = service.delete_backup(backup_info.name)
assert result is True
backups_after = service.list_backups()
assert len(backups_after) < len(backups_before)
def test_delete_backup_not_found(temp_backup_env):
"""Test deleting non-existent backup."""
service = BackupService(
backup_dir=temp_backup_env["backup_dir"],
config_dir=temp_backup_env["config_dir"],
)
result = service.delete_backup("nonexistent_backup.tar.gz")
assert result is False
def test_cleanup_old_backups(temp_backup_env):
"""Test cleanup of old backups."""
import time
service = BackupService(
backup_dir=temp_backup_env["backup_dir"],
config_dir=temp_backup_env["config_dir"],
)
# Create multiple backups with small delays to ensure unique timestamps
for i in range(5):
service.backup_configuration()
time.sleep(1) # Ensure different timestamps
backups_before = service.list_backups()
assert len(backups_before) == 5
# Keep only 2 backups
deleted = service.cleanup_old_backups(max_backups=2)
backups_after = service.list_backups()
assert len(backups_after) <= 2
assert deleted == 3
def test_export_anime_data(temp_backup_env):
"""Test anime data export."""
service = BackupService(
backup_dir=temp_backup_env["backup_dir"],
config_dir=temp_backup_env["config_dir"],
)
export_file = Path(temp_backup_env["tmpdir"]) / "anime_export.json"
result = service.export_anime_data(str(export_file))
assert result is True
assert export_file.exists()
assert "timestamp" in export_file.read_text()
def test_import_anime_data(temp_backup_env):
"""Test anime data import."""
service = BackupService(
backup_dir=temp_backup_env["backup_dir"],
config_dir=temp_backup_env["config_dir"],
)
# Create import file
import_file = Path(temp_backup_env["tmpdir"]) / "anime_import.json"
import_file.write_text('{"timestamp": "2025-01-01T00:00:00", "data": []}')
result = service.import_anime_data(str(import_file))
assert result is True
def test_import_anime_data_not_found(temp_backup_env):
"""Test anime data import with missing file."""
service = BackupService(
backup_dir=temp_backup_env["backup_dir"],
config_dir=temp_backup_env["config_dir"],
)
result = service.import_anime_data("/nonexistent/file.json")
assert result is False
def test_get_backup_service():
"""Test singleton backup service."""
service1 = get_backup_service()
service2 = get_backup_service()
assert service1 is service2
assert isinstance(service1, BackupService)

View File

@ -1,237 +0,0 @@
"""Unit tests for monitoring service."""
from datetime import datetime, timedelta
from unittest.mock import AsyncMock, MagicMock
import pytest
from src.server.services.monitoring_service import (
ErrorMetrics,
MonitoringService,
QueueMetrics,
SystemMetrics,
get_monitoring_service,
)
def test_monitoring_service_initialization():
"""Test monitoring service initialization."""
service = MonitoringService()
assert service is not None
assert service._error_log == []
assert service._performance_samples == []
def test_get_system_metrics():
"""Test system metrics collection."""
service = MonitoringService()
metrics = service.get_system_metrics()
assert isinstance(metrics, SystemMetrics)
assert metrics.cpu_percent >= 0
assert metrics.memory_percent >= 0
assert metrics.disk_percent >= 0
assert metrics.uptime_seconds > 0
assert metrics.memory_available_mb > 0
assert metrics.disk_free_mb > 0
def test_system_metrics_stored():
"""Test that system metrics are stored for performance tracking."""
service = MonitoringService()
metrics1 = service.get_system_metrics()
metrics2 = service.get_system_metrics()
assert len(service._performance_samples) == 2
assert service._performance_samples[0] == metrics1
assert service._performance_samples[1] == metrics2
@pytest.mark.asyncio
async def test_get_queue_metrics_empty():
"""Test queue metrics with no items."""
service = MonitoringService()
mock_db = AsyncMock()
# Mock empty result
mock_scalars = AsyncMock()
mock_scalars.all = MagicMock(return_value=[])
mock_result = AsyncMock()
mock_result.scalars = MagicMock(return_value=mock_scalars)
mock_db.execute = AsyncMock(return_value=mock_result)
metrics = await service.get_queue_metrics(mock_db)
assert isinstance(metrics, QueueMetrics)
assert metrics.total_items == 0
assert metrics.success_rate == 0.0
@pytest.mark.asyncio
async def test_get_queue_metrics_with_items():
"""Test queue metrics with download items."""
service = MonitoringService()
mock_db = AsyncMock()
# Create mock queue items
item1 = MagicMock()
item1.status = "COMPLETED"
item1.total_bytes = 1000000
item1.downloaded_bytes = 1000000
item1.download_speed = 1000000
item2 = MagicMock()
item2.status = "DOWNLOADING"
item2.total_bytes = 2000000
item2.downloaded_bytes = 1000000
item2.download_speed = 500000
item3 = MagicMock()
item3.status = "FAILED"
item3.total_bytes = 500000
item3.downloaded_bytes = 0
item3.download_speed = None
# Mock result
mock_scalars = AsyncMock()
mock_scalars.all = MagicMock(return_value=[item1, item2, item3])
mock_result = AsyncMock()
mock_result.scalars = MagicMock(return_value=mock_scalars)
mock_db.execute = AsyncMock(return_value=mock_result)
metrics = await service.get_queue_metrics(mock_db)
assert metrics.total_items == 3
assert metrics.completed_items == 1
assert metrics.downloading_items == 1
assert metrics.failed_items == 1
assert metrics.total_size_bytes == 3500000
assert metrics.downloaded_bytes == 2000000
assert metrics.success_rate > 0
def test_log_error():
"""Test error logging."""
service = MonitoringService()
service.log_error("Test error 1")
service.log_error("Test error 2")
assert len(service._error_log) == 2
assert service._error_log[0][1] == "Test error 1"
assert service._error_log[1][1] == "Test error 2"
def test_get_error_metrics_empty():
"""Test error metrics with no errors."""
service = MonitoringService()
metrics = service.get_error_metrics()
assert isinstance(metrics, ErrorMetrics)
assert metrics.total_errors == 0
assert metrics.errors_24h == 0
assert metrics.error_rate_per_hour == 0.0
def test_get_error_metrics_with_errors():
"""Test error metrics with multiple errors."""
service = MonitoringService()
service.log_error("ConnectionError: Failed to connect")
service.log_error("ConnectionError: Timeout")
service.log_error("TimeoutError: Download timeout")
metrics = service.get_error_metrics()
assert metrics.total_errors == 3
assert metrics.errors_24h == 3
assert metrics.last_error_time is not None
assert len(metrics.most_common_errors) > 0
def test_get_error_metrics_old_errors():
"""Test error metrics excludes old errors."""
service = MonitoringService()
# Add old error (simulate by directly adding to log)
old_time = datetime.now() - timedelta(hours=25)
service._error_log.append((old_time, "Old error"))
# Add recent error
service.log_error("Recent error")
metrics = service.get_error_metrics()
assert metrics.total_errors == 2
assert metrics.errors_24h == 1
def test_get_performance_summary():
"""Test performance summary generation."""
service = MonitoringService()
# Collect some samples
service.get_system_metrics()
service.get_system_metrics()
service.get_system_metrics()
summary = service.get_performance_summary()
assert "cpu" in summary
assert "memory" in summary
assert "disk" in summary
assert "sample_count" in summary
assert summary["sample_count"] == 3
assert "current" in summary["cpu"]
assert "average" in summary["cpu"]
assert "max" in summary["cpu"]
assert "min" in summary["cpu"]
def test_get_performance_summary_empty():
"""Test performance summary with no samples."""
service = MonitoringService()
summary = service.get_performance_summary()
assert summary == {}
@pytest.mark.asyncio
async def test_get_comprehensive_status():
"""Test comprehensive system status."""
service = MonitoringService()
mock_db = AsyncMock()
# Mock empty queue
mock_scalars = AsyncMock()
mock_scalars.all = MagicMock(return_value=[])
mock_result = AsyncMock()
mock_result.scalars = MagicMock(return_value=mock_scalars)
mock_db.execute = AsyncMock(return_value=mock_result)
status = await service.get_comprehensive_status(mock_db)
assert "timestamp" in status
assert "system" in status
assert "queue" in status
assert "errors" in status
assert "performance" in status
assert status["system"]["cpu_percent"] >= 0
assert status["queue"]["total_items"] == 0
def test_get_monitoring_service():
"""Test singleton monitoring service."""
service1 = get_monitoring_service()
service2 = get_monitoring_service()
assert service1 is service2
assert isinstance(service1, MonitoringService)

View File

@ -1,269 +0,0 @@
"""Tests for rate limiting middleware."""
from typing import Optional
import httpx
from fastapi import FastAPI, Request
from fastapi.testclient import TestClient
from src.server.middleware.rate_limit import (
RateLimitConfig,
RateLimitMiddleware,
RateLimitStore,
)
# Shim for environments where httpx.Client.__init__ doesn't accept an
# 'app' kwarg (some httpx versions have a different signature). The
# TestClient in Starlette passes `app=` through; to keep tests portable
# we pop it before calling the real initializer.
_orig_httpx_init = httpx.Client.__init__
def _httpx_init_shim(self, *args, **kwargs):
kwargs.pop("app", None)
return _orig_httpx_init(self, *args, **kwargs)
httpx.Client.__init__ = _httpx_init_shim
class TestRateLimitStore:
"""Tests for RateLimitStore class."""
def test_check_limit_allows_within_limits(self):
"""Test that requests within limits are allowed."""
store = RateLimitStore()
# First request should be allowed
allowed, retry_after = store.check_limit("test_id", 10, 100)
assert allowed is True
assert retry_after is None
# Record the request
store.record_request("test_id")
# Next request should still be allowed
allowed, retry_after = store.check_limit("test_id", 10, 100)
assert allowed is True
assert retry_after is None
def test_check_limit_blocks_over_minute_limit(self):
"""Test that requests over minute limit are blocked."""
store = RateLimitStore()
# Fill up to the minute limit
for _ in range(5):
store.record_request("test_id")
# Next request should be blocked
allowed, retry_after = store.check_limit("test_id", 5, 100)
assert allowed is False
assert retry_after is not None
assert retry_after > 0
def test_check_limit_blocks_over_hour_limit(self):
"""Test that requests over hour limit are blocked."""
store = RateLimitStore()
# Fill up to hour limit
for _ in range(10):
store.record_request("test_id")
# Next request should be blocked
allowed, retry_after = store.check_limit("test_id", 100, 10)
assert allowed is False
assert retry_after is not None
assert retry_after > 0
def test_get_remaining_requests(self):
"""Test getting remaining requests."""
store = RateLimitStore()
# Initially, all requests are remaining
minute_rem, hour_rem = store.get_remaining_requests(
"test_id", 10, 100
)
assert minute_rem == 10
assert hour_rem == 100
# After one request
store.record_request("test_id")
minute_rem, hour_rem = store.get_remaining_requests(
"test_id", 10, 100
)
assert minute_rem == 9
assert hour_rem == 99
class TestRateLimitConfig:
"""Tests for RateLimitConfig class."""
def test_default_config(self):
"""Test default configuration values."""
config = RateLimitConfig()
assert config.requests_per_minute == 60
assert config.requests_per_hour == 1000
assert config.authenticated_multiplier == 2.0
def test_custom_config(self):
"""Test custom configuration values."""
config = RateLimitConfig(
requests_per_minute=10,
requests_per_hour=100,
authenticated_multiplier=3.0,
)
assert config.requests_per_minute == 10
assert config.requests_per_hour == 100
assert config.authenticated_multiplier == 3.0
class TestRateLimitMiddleware:
"""Tests for RateLimitMiddleware class."""
def create_app(
self, default_config: Optional[RateLimitConfig] = None
) -> FastAPI:
"""Create a test FastAPI app with rate limiting.
Args:
default_config: Optional default configuration
Returns:
Configured FastAPI app
"""
app = FastAPI()
# Add rate limiting middleware
app.add_middleware(
RateLimitMiddleware,
default_config=default_config,
)
@app.get("/api/test")
async def test_endpoint():
return {"message": "success"}
@app.get("/health")
async def health_endpoint():
return {"status": "ok"}
@app.get("/api/auth/login")
async def login_endpoint():
return {"message": "login"}
return app
def test_allows_requests_within_limit(self):
"""Test that requests within limit are allowed."""
app = self.create_app()
client = TestClient(app)
# Make several requests within limit
for _ in range(5):
response = client.get("/api/test")
assert response.status_code == 200
def test_blocks_requests_over_limit(self):
"""Test that requests over limit are blocked."""
config = RateLimitConfig(
requests_per_minute=3,
requests_per_hour=100,
)
app = self.create_app(config)
client = TestClient(app, raise_server_exceptions=False)
# Make requests up to limit
for _ in range(3):
response = client.get("/api/test")
assert response.status_code == 200
# Next request should be rate limited
response = client.get("/api/test")
assert response.status_code == 429
assert "Retry-After" in response.headers
def test_bypass_health_endpoint(self):
"""Test that health endpoint bypasses rate limiting."""
config = RateLimitConfig(
requests_per_minute=1,
requests_per_hour=1,
)
app = self.create_app(config)
client = TestClient(app)
# Make many requests to health endpoint
for _ in range(10):
response = client.get("/health")
assert response.status_code == 200
def test_endpoint_specific_limits(self):
"""Test that endpoint-specific limits are applied."""
app = self.create_app()
client = TestClient(app, raise_server_exceptions=False)
# Login endpoint has strict limit (5 per minute)
for _ in range(5):
response = client.get("/api/auth/login")
assert response.status_code == 200
# Next login request should be rate limited
response = client.get("/api/auth/login")
assert response.status_code == 429
def test_rate_limit_headers(self):
"""Test that rate limit headers are added to response."""
app = self.create_app()
client = TestClient(app)
response = client.get("/api/test")
assert response.status_code == 200
assert "X-RateLimit-Limit-Minute" in response.headers
assert "X-RateLimit-Limit-Hour" in response.headers
assert "X-RateLimit-Remaining-Minute" in response.headers
assert "X-RateLimit-Remaining-Hour" in response.headers
def test_authenticated_user_multiplier(self):
"""Test that authenticated users get higher limits."""
config = RateLimitConfig(
requests_per_minute=5,
requests_per_hour=100,
authenticated_multiplier=2.0,
)
app = self.create_app(config)
# Add middleware to simulate authentication
@app.middleware("http")
async def add_user_to_state(request: Request, call_next):
request.state.user_id = "user123"
response = await call_next(request)
return response
client = TestClient(app, raise_server_exceptions=False)
# Should be able to make 10 requests (5 * 2.0)
for _ in range(10):
response = client.get("/api/test")
assert response.status_code == 200
# Next request should be rate limited
response = client.get("/api/test")
assert response.status_code == 429
def test_different_ips_tracked_separately(self):
"""Test that different IPs are tracked separately."""
config = RateLimitConfig(
requests_per_minute=2,
requests_per_hour=100,
)
app = self.create_app(config)
client = TestClient(app, raise_server_exceptions=False)
# Make requests from "different" IPs
# Note: TestClient uses same IP, but we can test the logic
for _ in range(2):
response = client.get("/api/test")
assert response.status_code == 200
# Third request should be rate limited
response = client.get("/api/test")
assert response.status_code == 429