remove part 3
This commit is contained in:
@@ -1,331 +0,0 @@
|
||||
"""Rate limiting middleware for API endpoints.
|
||||
|
||||
This module provides comprehensive rate limiting with support for:
|
||||
- Endpoint-specific rate limits
|
||||
- IP-based limiting
|
||||
- User-based rate limiting
|
||||
- Bypass mechanisms for authenticated users
|
||||
"""
|
||||
|
||||
import time
|
||||
from collections import defaultdict
|
||||
from typing import Callable, Dict, Optional, Tuple
|
||||
|
||||
from fastapi import Request, status
|
||||
from starlette.middleware.base import BaseHTTPMiddleware
|
||||
from starlette.responses import JSONResponse
|
||||
|
||||
|
||||
class RateLimitConfig:
|
||||
"""Configuration for rate limiting rules."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
requests_per_minute: int = 60,
|
||||
requests_per_hour: int = 1000,
|
||||
authenticated_multiplier: float = 2.0,
|
||||
):
|
||||
"""Initialize rate limit configuration.
|
||||
|
||||
Args:
|
||||
requests_per_minute: Max requests per minute for
|
||||
unauthenticated users
|
||||
requests_per_hour: Max requests per hour for
|
||||
unauthenticated users
|
||||
authenticated_multiplier: Multiplier for authenticated users
|
||||
"""
|
||||
self.requests_per_minute = requests_per_minute
|
||||
self.requests_per_hour = requests_per_hour
|
||||
self.authenticated_multiplier = authenticated_multiplier
|
||||
|
||||
|
||||
class RateLimitStore:
|
||||
"""In-memory store for rate limit tracking."""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize the rate limit store."""
|
||||
# Store format: {identifier: [(timestamp, count), ...]}
|
||||
self._minute_store: Dict[str, list] = defaultdict(list)
|
||||
self._hour_store: Dict[str, list] = defaultdict(list)
|
||||
|
||||
def check_limit(
|
||||
self,
|
||||
identifier: str,
|
||||
max_per_minute: int,
|
||||
max_per_hour: int,
|
||||
) -> Tuple[bool, Optional[int]]:
|
||||
"""Check if the identifier has exceeded rate limits.
|
||||
|
||||
Args:
|
||||
identifier: Unique identifier (IP or user ID)
|
||||
max_per_minute: Maximum requests allowed per minute
|
||||
max_per_hour: Maximum requests allowed per hour
|
||||
|
||||
Returns:
|
||||
Tuple of (allowed, retry_after_seconds)
|
||||
"""
|
||||
current_time = time.time()
|
||||
|
||||
# Clean up old entries
|
||||
self._cleanup_old_entries(identifier, current_time)
|
||||
|
||||
# Check minute limit
|
||||
minute_count = len(self._minute_store[identifier])
|
||||
if minute_count >= max_per_minute:
|
||||
# Calculate retry after time
|
||||
oldest_entry = self._minute_store[identifier][0]
|
||||
retry_after = int(60 - (current_time - oldest_entry))
|
||||
return False, max(retry_after, 1)
|
||||
|
||||
# Check hour limit
|
||||
hour_count = len(self._hour_store[identifier])
|
||||
if hour_count >= max_per_hour:
|
||||
# Calculate retry after time
|
||||
oldest_entry = self._hour_store[identifier][0]
|
||||
retry_after = int(3600 - (current_time - oldest_entry))
|
||||
return False, max(retry_after, 1)
|
||||
|
||||
return True, None
|
||||
|
||||
def record_request(self, identifier: str) -> None:
|
||||
"""Record a request for the identifier.
|
||||
|
||||
Args:
|
||||
identifier: Unique identifier (IP or user ID)
|
||||
"""
|
||||
current_time = time.time()
|
||||
self._minute_store[identifier].append(current_time)
|
||||
self._hour_store[identifier].append(current_time)
|
||||
|
||||
def get_remaining_requests(
|
||||
self, identifier: str, max_per_minute: int, max_per_hour: int
|
||||
) -> Tuple[int, int]:
|
||||
"""Get remaining requests for the identifier.
|
||||
|
||||
Args:
|
||||
identifier: Unique identifier
|
||||
max_per_minute: Maximum per minute
|
||||
max_per_hour: Maximum per hour
|
||||
|
||||
Returns:
|
||||
Tuple of (remaining_per_minute, remaining_per_hour)
|
||||
"""
|
||||
minute_used = len(self._minute_store.get(identifier, []))
|
||||
hour_used = len(self._hour_store.get(identifier, []))
|
||||
return (
|
||||
max(0, max_per_minute - minute_used),
|
||||
max(0, max_per_hour - hour_used)
|
||||
)
|
||||
|
||||
def _cleanup_old_entries(
|
||||
self, identifier: str, current_time: float
|
||||
) -> None:
|
||||
"""Remove entries older than the time windows.
|
||||
|
||||
Args:
|
||||
identifier: Unique identifier
|
||||
current_time: Current timestamp
|
||||
"""
|
||||
# Remove entries older than 1 minute
|
||||
minute_cutoff = current_time - 60
|
||||
self._minute_store[identifier] = [
|
||||
ts for ts in self._minute_store[identifier] if ts > minute_cutoff
|
||||
]
|
||||
|
||||
# Remove entries older than 1 hour
|
||||
hour_cutoff = current_time - 3600
|
||||
self._hour_store[identifier] = [
|
||||
ts for ts in self._hour_store[identifier] if ts > hour_cutoff
|
||||
]
|
||||
|
||||
# Clean up empty entries
|
||||
if not self._minute_store[identifier]:
|
||||
del self._minute_store[identifier]
|
||||
if not self._hour_store[identifier]:
|
||||
del self._hour_store[identifier]
|
||||
|
||||
|
||||
class RateLimitMiddleware(BaseHTTPMiddleware):
|
||||
"""Middleware for API rate limiting."""
|
||||
|
||||
# Endpoint-specific rate limits (overrides defaults)
|
||||
ENDPOINT_LIMITS: Dict[str, RateLimitConfig] = {
|
||||
"/api/auth/login": RateLimitConfig(
|
||||
requests_per_minute=5,
|
||||
requests_per_hour=20,
|
||||
),
|
||||
"/api/auth/register": RateLimitConfig(
|
||||
requests_per_minute=3,
|
||||
requests_per_hour=10,
|
||||
),
|
||||
"/api/download": RateLimitConfig(
|
||||
requests_per_minute=10,
|
||||
requests_per_hour=100,
|
||||
authenticated_multiplier=3.0,
|
||||
),
|
||||
}
|
||||
|
||||
# Paths that bypass rate limiting
|
||||
BYPASS_PATHS = {
|
||||
"/health",
|
||||
"/health/detailed",
|
||||
"/docs",
|
||||
"/redoc",
|
||||
"/openapi.json",
|
||||
"/static",
|
||||
"/ws",
|
||||
}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
app,
|
||||
default_config: Optional[RateLimitConfig] = None,
|
||||
):
|
||||
"""Initialize rate limiting middleware.
|
||||
|
||||
Args:
|
||||
app: FastAPI application
|
||||
default_config: Default rate limit configuration
|
||||
"""
|
||||
super().__init__(app)
|
||||
self.default_config = default_config or RateLimitConfig()
|
||||
self.store = RateLimitStore()
|
||||
|
||||
async def dispatch(self, request: Request, call_next: Callable):
|
||||
"""Process request and apply rate limiting.
|
||||
|
||||
Args:
|
||||
request: Incoming HTTP request
|
||||
call_next: Next middleware or endpoint handler
|
||||
|
||||
Returns:
|
||||
HTTP response (either rate limit error or normal response)
|
||||
"""
|
||||
# Check if path should bypass rate limiting
|
||||
if self._should_bypass(request.url.path):
|
||||
return await call_next(request)
|
||||
|
||||
# Get identifier (user ID if authenticated, otherwise IP)
|
||||
identifier = self._get_identifier(request)
|
||||
|
||||
# Get rate limit configuration for this endpoint
|
||||
config = self._get_endpoint_config(request.url.path)
|
||||
|
||||
# Apply authenticated user multiplier if applicable
|
||||
is_authenticated = self._is_authenticated(request)
|
||||
max_per_minute = int(
|
||||
config.requests_per_minute *
|
||||
(config.authenticated_multiplier if is_authenticated else 1.0)
|
||||
)
|
||||
max_per_hour = int(
|
||||
config.requests_per_hour *
|
||||
(config.authenticated_multiplier if is_authenticated else 1.0)
|
||||
)
|
||||
|
||||
# Check rate limit
|
||||
allowed, retry_after = self.store.check_limit(
|
||||
identifier,
|
||||
max_per_minute,
|
||||
max_per_hour,
|
||||
)
|
||||
|
||||
if not allowed:
|
||||
return JSONResponse(
|
||||
status_code=status.HTTP_429_TOO_MANY_REQUESTS,
|
||||
content={"detail": "Rate limit exceeded"},
|
||||
headers={"Retry-After": str(retry_after)},
|
||||
)
|
||||
|
||||
# Record the request
|
||||
self.store.record_request(identifier)
|
||||
|
||||
# Add rate limit headers to response
|
||||
response = await call_next(request)
|
||||
response.headers["X-RateLimit-Limit-Minute"] = str(max_per_minute)
|
||||
response.headers["X-RateLimit-Limit-Hour"] = str(max_per_hour)
|
||||
|
||||
minute_remaining, hour_remaining = self.store.get_remaining_requests(
|
||||
identifier, max_per_minute, max_per_hour
|
||||
)
|
||||
|
||||
response.headers["X-RateLimit-Remaining-Minute"] = str(
|
||||
minute_remaining
|
||||
)
|
||||
response.headers["X-RateLimit-Remaining-Hour"] = str(
|
||||
hour_remaining
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
def _should_bypass(self, path: str) -> bool:
|
||||
"""Check if path should bypass rate limiting.
|
||||
|
||||
Args:
|
||||
path: Request path
|
||||
|
||||
Returns:
|
||||
True if path should bypass rate limiting
|
||||
"""
|
||||
for bypass_path in self.BYPASS_PATHS:
|
||||
if path.startswith(bypass_path):
|
||||
return True
|
||||
return False
|
||||
|
||||
def _get_identifier(self, request: Request) -> str:
|
||||
"""Get unique identifier for rate limiting.
|
||||
|
||||
Args:
|
||||
request: HTTP request
|
||||
|
||||
Returns:
|
||||
Unique identifier (user ID or IP address)
|
||||
"""
|
||||
# Try to get user ID from request state (set by auth middleware)
|
||||
user_id = getattr(request.state, "user_id", None)
|
||||
if user_id:
|
||||
return f"user:{user_id}"
|
||||
|
||||
# Fall back to IP address
|
||||
# Check for X-Forwarded-For header (proxy/load balancer)
|
||||
forwarded_for = request.headers.get("X-Forwarded-For")
|
||||
if forwarded_for:
|
||||
# Take the first IP in the chain
|
||||
client_ip = forwarded_for.split(",")[0].strip()
|
||||
else:
|
||||
client_ip = request.client.host if request.client else "unknown"
|
||||
|
||||
return f"ip:{client_ip}"
|
||||
|
||||
def _get_endpoint_config(self, path: str) -> RateLimitConfig:
|
||||
"""Get rate limit configuration for endpoint.
|
||||
|
||||
Args:
|
||||
path: Request path
|
||||
|
||||
Returns:
|
||||
Rate limit configuration
|
||||
"""
|
||||
# Check for exact match
|
||||
if path in self.ENDPOINT_LIMITS:
|
||||
return self.ENDPOINT_LIMITS[path]
|
||||
|
||||
# Check for prefix match
|
||||
for endpoint_path, config in self.ENDPOINT_LIMITS.items():
|
||||
if path.startswith(endpoint_path):
|
||||
return config
|
||||
|
||||
return self.default_config
|
||||
|
||||
def _is_authenticated(self, request: Request) -> bool:
|
||||
"""Check if request is from authenticated user.
|
||||
|
||||
Args:
|
||||
request: HTTP request
|
||||
|
||||
Returns:
|
||||
True if user is authenticated
|
||||
"""
|
||||
return (
|
||||
hasattr(request.state, "user_id") and
|
||||
request.state.user_id is not None
|
||||
)
|
||||
@@ -1,423 +0,0 @@
|
||||
"""Analytics service for downloads, popularity, and performance metrics.
|
||||
|
||||
This module provides comprehensive analytics tracking including download
|
||||
statistics, series popularity analysis, storage usage trends, and
|
||||
performance reporting.
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
from dataclasses import asdict, dataclass, field
|
||||
from datetime import datetime, timedelta
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
import psutil
|
||||
from sqlalchemy import select
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from src.server.database.models import DownloadQueueItem, DownloadStatus
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
ANALYTICS_FILE = Path("data") / "analytics.json"
|
||||
|
||||
|
||||
@dataclass
|
||||
class DownloadStats:
|
||||
"""Download statistics snapshot."""
|
||||
|
||||
total_downloads: int = 0
|
||||
successful_downloads: int = 0
|
||||
failed_downloads: int = 0
|
||||
total_bytes_downloaded: int = 0
|
||||
average_speed_mbps: float = 0.0
|
||||
success_rate: float = 0.0
|
||||
average_duration_seconds: float = 0.0
|
||||
|
||||
|
||||
@dataclass
|
||||
class SeriesPopularity:
|
||||
"""Series popularity metrics."""
|
||||
|
||||
series_name: str
|
||||
download_count: int
|
||||
total_size_bytes: int
|
||||
last_download: Optional[str] = None
|
||||
success_rate: float = 0.0
|
||||
|
||||
|
||||
@dataclass
|
||||
class StorageAnalysis:
|
||||
"""Storage usage analysis."""
|
||||
|
||||
total_storage_bytes: int = 0
|
||||
used_storage_bytes: int = 0
|
||||
free_storage_bytes: int = 0
|
||||
storage_percent_used: float = 0.0
|
||||
downloads_directory_size_bytes: int = 0
|
||||
cache_directory_size_bytes: int = 0
|
||||
logs_directory_size_bytes: int = 0
|
||||
|
||||
|
||||
@dataclass
|
||||
class PerformanceReport:
|
||||
"""Performance metrics and trends."""
|
||||
|
||||
period_start: str
|
||||
period_end: str
|
||||
downloads_per_hour: float = 0.0
|
||||
average_queue_size: float = 0.0
|
||||
peak_memory_usage_mb: float = 0.0
|
||||
average_cpu_percent: float = 0.0
|
||||
uptime_seconds: float = 0.0
|
||||
error_rate: float = 0.0
|
||||
samples: List[Dict[str, Any]] = field(default_factory=list)
|
||||
|
||||
|
||||
class AnalyticsService:
|
||||
"""Service for tracking and reporting analytics data."""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize the analytics service."""
|
||||
self.analytics_file = ANALYTICS_FILE
|
||||
self._ensure_analytics_file()
|
||||
|
||||
def _ensure_analytics_file(self) -> None:
|
||||
"""Ensure analytics file exists with default data."""
|
||||
if not self.analytics_file.exists():
|
||||
default_data = {
|
||||
"created_at": datetime.now().isoformat(),
|
||||
"last_updated": datetime.now().isoformat(),
|
||||
"download_stats": asdict(DownloadStats()),
|
||||
"series_popularity": [],
|
||||
"storage_history": [],
|
||||
"performance_samples": [],
|
||||
}
|
||||
self.analytics_file.write_text(json.dumps(default_data, indent=2))
|
||||
|
||||
def _load_analytics(self) -> Dict[str, Any]:
|
||||
"""Load analytics data from file."""
|
||||
try:
|
||||
return json.loads(self.analytics_file.read_text())
|
||||
except (FileNotFoundError, json.JSONDecodeError):
|
||||
self._ensure_analytics_file()
|
||||
return json.loads(self.analytics_file.read_text())
|
||||
|
||||
def _save_analytics(self, data: Dict[str, Any]) -> None:
|
||||
"""Save analytics data to file."""
|
||||
data["last_updated"] = datetime.now().isoformat()
|
||||
self.analytics_file.write_text(json.dumps(data, indent=2))
|
||||
|
||||
async def get_download_stats(
|
||||
self, db: AsyncSession, days: int = 30
|
||||
) -> DownloadStats:
|
||||
"""Get download statistics for the specified period.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
days: Number of days to analyze
|
||||
|
||||
Returns:
|
||||
DownloadStats with aggregated download data
|
||||
"""
|
||||
cutoff_date = datetime.now() - timedelta(days=days)
|
||||
|
||||
# Query downloads within period
|
||||
stmt = select(DownloadQueueItem).where(
|
||||
DownloadQueueItem.created_at >= cutoff_date
|
||||
)
|
||||
result = await db.execute(stmt)
|
||||
downloads = result.scalars().all()
|
||||
|
||||
if not downloads:
|
||||
return DownloadStats()
|
||||
|
||||
successful = [d for d in downloads
|
||||
if d.status == DownloadStatus.COMPLETED]
|
||||
failed = [d for d in downloads
|
||||
if d.status == DownloadStatus.FAILED]
|
||||
|
||||
total_bytes = sum(d.total_bytes or 0 for d in successful)
|
||||
avg_speed_list = [
|
||||
d.download_speed or 0.0 for d in successful if d.download_speed
|
||||
]
|
||||
avg_speed_mbps = (
|
||||
sum(avg_speed_list) / len(avg_speed_list) / (1024 * 1024)
|
||||
if avg_speed_list
|
||||
else 0.0
|
||||
)
|
||||
|
||||
success_rate = (
|
||||
len(successful) / len(downloads) * 100 if downloads else 0.0
|
||||
)
|
||||
|
||||
return DownloadStats(
|
||||
total_downloads=len(downloads),
|
||||
successful_downloads=len(successful),
|
||||
failed_downloads=len(failed),
|
||||
total_bytes_downloaded=total_bytes,
|
||||
average_speed_mbps=avg_speed_mbps,
|
||||
success_rate=success_rate,
|
||||
average_duration_seconds=0.0, # Not available in model
|
||||
)
|
||||
|
||||
async def get_series_popularity(
|
||||
self, db: AsyncSession, limit: int = 10
|
||||
) -> List[SeriesPopularity]:
|
||||
"""Get most popular series by download count.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
limit: Maximum number of series to return
|
||||
|
||||
Returns:
|
||||
List of SeriesPopularity objects
|
||||
"""
|
||||
# Use raw SQL approach since we need to group and join
|
||||
from sqlalchemy import text
|
||||
|
||||
query = text("""
|
||||
SELECT
|
||||
s.title as series_name,
|
||||
COUNT(d.id) as download_count,
|
||||
SUM(d.total_bytes) as total_size,
|
||||
MAX(d.created_at) as last_download,
|
||||
SUM(CASE WHEN d.status = 'COMPLETED'
|
||||
THEN 1 ELSE 0 END) as successful
|
||||
FROM download_queue d
|
||||
JOIN anime_series s ON d.series_id = s.id
|
||||
GROUP BY s.id, s.title
|
||||
ORDER BY download_count DESC
|
||||
LIMIT :limit
|
||||
""")
|
||||
|
||||
result = await db.execute(query, {"limit": limit})
|
||||
rows = result.all()
|
||||
|
||||
popularity = []
|
||||
for row in rows:
|
||||
success_rate = 0.0
|
||||
download_count = row[1] or 0
|
||||
if download_count > 0:
|
||||
successful = row[4] or 0
|
||||
success_rate = (successful / download_count * 100)
|
||||
|
||||
popularity.append(
|
||||
SeriesPopularity(
|
||||
series_name=row[0] or "Unknown",
|
||||
download_count=download_count,
|
||||
total_size_bytes=row[2] or 0,
|
||||
last_download=row[3].isoformat()
|
||||
if row[3]
|
||||
else None,
|
||||
success_rate=success_rate,
|
||||
)
|
||||
)
|
||||
|
||||
return popularity
|
||||
|
||||
def get_storage_analysis(self) -> StorageAnalysis:
|
||||
"""Get current storage usage analysis.
|
||||
|
||||
Returns:
|
||||
StorageAnalysis with storage breakdown
|
||||
"""
|
||||
try:
|
||||
# Get disk usage for data directory
|
||||
disk = psutil.disk_usage("/")
|
||||
total = disk.total
|
||||
used = disk.used
|
||||
free = disk.free
|
||||
|
||||
analysis = StorageAnalysis(
|
||||
total_storage_bytes=total,
|
||||
used_storage_bytes=used,
|
||||
free_storage_bytes=free,
|
||||
storage_percent_used=disk.percent,
|
||||
downloads_directory_size_bytes=self._get_dir_size(
|
||||
Path("data")
|
||||
),
|
||||
cache_directory_size_bytes=self._get_dir_size(
|
||||
Path("data") / "cache"
|
||||
),
|
||||
logs_directory_size_bytes=self._get_dir_size(
|
||||
Path("logs")
|
||||
),
|
||||
)
|
||||
|
||||
return analysis
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Storage analysis failed: {e}")
|
||||
return StorageAnalysis()
|
||||
|
||||
def _get_dir_size(self, path: Path) -> int:
|
||||
"""Calculate total size of directory.
|
||||
|
||||
Args:
|
||||
path: Directory path
|
||||
|
||||
Returns:
|
||||
Total size in bytes
|
||||
"""
|
||||
if not path.exists():
|
||||
return 0
|
||||
|
||||
total = 0
|
||||
try:
|
||||
for item in path.rglob("*"):
|
||||
if item.is_file():
|
||||
total += item.stat().st_size
|
||||
except (OSError, PermissionError):
|
||||
pass
|
||||
|
||||
return total
|
||||
|
||||
async def get_performance_report(
|
||||
self, db: AsyncSession, hours: int = 24
|
||||
) -> PerformanceReport:
|
||||
"""Get performance metrics for the specified period.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
hours: Number of hours to analyze
|
||||
|
||||
Returns:
|
||||
PerformanceReport with performance metrics
|
||||
"""
|
||||
cutoff_time = datetime.now() - timedelta(hours=hours)
|
||||
|
||||
# Get download metrics
|
||||
stmt = select(DownloadQueueItem).where(
|
||||
DownloadQueueItem.created_at >= cutoff_time
|
||||
)
|
||||
result = await db.execute(stmt)
|
||||
downloads = result.scalars().all()
|
||||
|
||||
downloads_per_hour = len(downloads) / max(hours, 1)
|
||||
|
||||
# Get queue size over time (estimated from analytics)
|
||||
analytics = self._load_analytics()
|
||||
performance_samples = analytics.get("performance_samples", [])
|
||||
|
||||
# Filter recent samples
|
||||
recent_samples = [
|
||||
s
|
||||
for s in performance_samples
|
||||
if datetime.fromisoformat(s.get("timestamp", "2000-01-01"))
|
||||
>= cutoff_time
|
||||
]
|
||||
|
||||
avg_queue = sum(
|
||||
s.get("queue_size", 0) for s in recent_samples
|
||||
) / len(recent_samples) if recent_samples else 0.0
|
||||
|
||||
# Get memory and CPU stats
|
||||
process = psutil.Process()
|
||||
memory_info = process.memory_info()
|
||||
peak_memory_mb = memory_info.rss / (1024 * 1024)
|
||||
|
||||
cpu_percent = process.cpu_percent(interval=1)
|
||||
|
||||
# Calculate error rate
|
||||
failed_count = sum(
|
||||
1 for d in downloads
|
||||
if d.status == DownloadStatus.FAILED
|
||||
)
|
||||
error_rate = (
|
||||
failed_count / len(downloads) * 100 if downloads else 0.0
|
||||
)
|
||||
|
||||
# Get uptime
|
||||
boot_time = datetime.fromtimestamp(psutil.boot_time())
|
||||
uptime_seconds = (datetime.now() - boot_time).total_seconds()
|
||||
|
||||
return PerformanceReport(
|
||||
period_start=cutoff_time.isoformat(),
|
||||
period_end=datetime.now().isoformat(),
|
||||
downloads_per_hour=downloads_per_hour,
|
||||
average_queue_size=avg_queue,
|
||||
peak_memory_usage_mb=peak_memory_mb,
|
||||
average_cpu_percent=cpu_percent,
|
||||
uptime_seconds=uptime_seconds,
|
||||
error_rate=error_rate,
|
||||
samples=recent_samples[-100:], # Keep last 100 samples
|
||||
)
|
||||
|
||||
def record_performance_sample(
|
||||
self,
|
||||
queue_size: int,
|
||||
active_downloads: int,
|
||||
cpu_percent: float,
|
||||
memory_mb: float,
|
||||
) -> None:
|
||||
"""Record a performance metric sample.
|
||||
|
||||
Args:
|
||||
queue_size: Current queue size
|
||||
active_downloads: Number of active downloads
|
||||
cpu_percent: Current CPU usage percentage
|
||||
memory_mb: Current memory usage in MB
|
||||
"""
|
||||
analytics = self._load_analytics()
|
||||
samples = analytics.get("performance_samples", [])
|
||||
|
||||
sample = {
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"queue_size": queue_size,
|
||||
"active_downloads": active_downloads,
|
||||
"cpu_percent": cpu_percent,
|
||||
"memory_mb": memory_mb,
|
||||
}
|
||||
|
||||
samples.append(sample)
|
||||
|
||||
# Keep only recent samples (7 days worth at 1 sample per minute)
|
||||
max_samples = 7 * 24 * 60
|
||||
if len(samples) > max_samples:
|
||||
samples = samples[-max_samples:]
|
||||
|
||||
analytics["performance_samples"] = samples
|
||||
self._save_analytics(analytics)
|
||||
|
||||
async def generate_summary_report(
|
||||
self, db: AsyncSession
|
||||
) -> Dict[str, Any]:
|
||||
"""Generate comprehensive analytics summary.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
|
||||
Returns:
|
||||
Summary report with all analytics
|
||||
"""
|
||||
download_stats = await self.get_download_stats(db)
|
||||
series_popularity = await self.get_series_popularity(db, limit=5)
|
||||
storage = self.get_storage_analysis()
|
||||
performance = await self.get_performance_report(db)
|
||||
|
||||
return {
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"download_stats": asdict(download_stats),
|
||||
"series_popularity": [
|
||||
asdict(s) for s in series_popularity
|
||||
],
|
||||
"storage_analysis": asdict(storage),
|
||||
"performance_report": asdict(performance),
|
||||
}
|
||||
|
||||
|
||||
_analytics_service_instance: Optional[AnalyticsService] = None
|
||||
|
||||
|
||||
def get_analytics_service() -> AnalyticsService:
|
||||
"""Get or create singleton analytics service instance.
|
||||
|
||||
Returns:
|
||||
AnalyticsService instance
|
||||
"""
|
||||
global _analytics_service_instance
|
||||
if _analytics_service_instance is None:
|
||||
_analytics_service_instance = AnalyticsService()
|
||||
return _analytics_service_instance
|
||||
@@ -1,610 +0,0 @@
|
||||
"""
|
||||
Audit Service for AniWorld.
|
||||
|
||||
This module provides comprehensive audit logging for security-critical
|
||||
operations including authentication, configuration changes, and downloads.
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
from datetime import datetime, timedelta
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AuditEventType(str, Enum):
|
||||
"""Types of audit events."""
|
||||
|
||||
# Authentication events
|
||||
AUTH_SETUP = "auth.setup"
|
||||
AUTH_LOGIN_SUCCESS = "auth.login.success"
|
||||
AUTH_LOGIN_FAILURE = "auth.login.failure"
|
||||
AUTH_LOGOUT = "auth.logout"
|
||||
AUTH_TOKEN_REFRESH = "auth.token.refresh"
|
||||
AUTH_TOKEN_INVALID = "auth.token.invalid"
|
||||
|
||||
# Configuration events
|
||||
CONFIG_READ = "config.read"
|
||||
CONFIG_UPDATE = "config.update"
|
||||
CONFIG_BACKUP = "config.backup"
|
||||
CONFIG_RESTORE = "config.restore"
|
||||
CONFIG_DELETE = "config.delete"
|
||||
|
||||
# Download events
|
||||
DOWNLOAD_ADDED = "download.added"
|
||||
DOWNLOAD_STARTED = "download.started"
|
||||
DOWNLOAD_COMPLETED = "download.completed"
|
||||
DOWNLOAD_FAILED = "download.failed"
|
||||
DOWNLOAD_CANCELLED = "download.cancelled"
|
||||
DOWNLOAD_REMOVED = "download.removed"
|
||||
|
||||
# Queue events
|
||||
QUEUE_STARTED = "queue.started"
|
||||
QUEUE_STOPPED = "queue.stopped"
|
||||
QUEUE_PAUSED = "queue.paused"
|
||||
QUEUE_RESUMED = "queue.resumed"
|
||||
QUEUE_CLEARED = "queue.cleared"
|
||||
|
||||
# System events
|
||||
SYSTEM_STARTUP = "system.startup"
|
||||
SYSTEM_SHUTDOWN = "system.shutdown"
|
||||
SYSTEM_ERROR = "system.error"
|
||||
|
||||
|
||||
class AuditEventSeverity(str, Enum):
|
||||
"""Severity levels for audit events."""
|
||||
|
||||
DEBUG = "debug"
|
||||
INFO = "info"
|
||||
WARNING = "warning"
|
||||
ERROR = "error"
|
||||
CRITICAL = "critical"
|
||||
|
||||
|
||||
class AuditEvent(BaseModel):
|
||||
"""Audit event model."""
|
||||
|
||||
timestamp: datetime = Field(default_factory=datetime.utcnow)
|
||||
event_type: AuditEventType
|
||||
severity: AuditEventSeverity = AuditEventSeverity.INFO
|
||||
user_id: Optional[str] = None
|
||||
ip_address: Optional[str] = None
|
||||
user_agent: Optional[str] = None
|
||||
resource: Optional[str] = None
|
||||
action: Optional[str] = None
|
||||
status: str = "success"
|
||||
message: str
|
||||
details: Optional[Dict[str, Any]] = None
|
||||
session_id: Optional[str] = None
|
||||
|
||||
class Config:
|
||||
"""Pydantic config."""
|
||||
|
||||
json_encoders = {datetime: lambda v: v.isoformat()}
|
||||
|
||||
|
||||
class AuditLogStorage:
|
||||
"""Base class for audit log storage backends."""
|
||||
|
||||
async def write_event(self, event: AuditEvent) -> None:
|
||||
"""
|
||||
Write an audit event to storage.
|
||||
|
||||
Args:
|
||||
event: Audit event to write
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
async def read_events(
|
||||
self,
|
||||
start_time: Optional[datetime] = None,
|
||||
end_time: Optional[datetime] = None,
|
||||
event_types: Optional[List[AuditEventType]] = None,
|
||||
user_id: Optional[str] = None,
|
||||
limit: int = 100,
|
||||
) -> List[AuditEvent]:
|
||||
"""
|
||||
Read audit events from storage.
|
||||
|
||||
Args:
|
||||
start_time: Start of time range
|
||||
end_time: End of time range
|
||||
event_types: Filter by event types
|
||||
user_id: Filter by user ID
|
||||
limit: Maximum number of events to return
|
||||
|
||||
Returns:
|
||||
List of audit events
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
async def cleanup_old_events(self, days: int = 90) -> int:
|
||||
"""
|
||||
Clean up audit events older than specified days.
|
||||
|
||||
Args:
|
||||
days: Number of days to retain
|
||||
|
||||
Returns:
|
||||
Number of events deleted
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class FileAuditLogStorage(AuditLogStorage):
|
||||
"""File-based audit log storage."""
|
||||
|
||||
def __init__(self, log_directory: str = "logs/audit"):
|
||||
"""
|
||||
Initialize file-based audit log storage.
|
||||
|
||||
Args:
|
||||
log_directory: Directory to store audit logs
|
||||
"""
|
||||
self.log_directory = Path(log_directory)
|
||||
self.log_directory.mkdir(parents=True, exist_ok=True)
|
||||
self._current_date: Optional[str] = None
|
||||
self._current_file: Optional[Path] = None
|
||||
|
||||
def _get_log_file(self, date: datetime) -> Path:
|
||||
"""
|
||||
Get log file path for a specific date.
|
||||
|
||||
Args:
|
||||
date: Date for log file
|
||||
|
||||
Returns:
|
||||
Path to log file
|
||||
"""
|
||||
date_str = date.strftime("%Y-%m-%d")
|
||||
return self.log_directory / f"audit_{date_str}.jsonl"
|
||||
|
||||
async def write_event(self, event: AuditEvent) -> None:
|
||||
"""
|
||||
Write an audit event to file.
|
||||
|
||||
Args:
|
||||
event: Audit event to write
|
||||
"""
|
||||
log_file = self._get_log_file(event.timestamp)
|
||||
|
||||
try:
|
||||
with open(log_file, "a", encoding="utf-8") as f:
|
||||
f.write(event.model_dump_json() + "\n")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to write audit event to file: {e}")
|
||||
|
||||
async def read_events(
|
||||
self,
|
||||
start_time: Optional[datetime] = None,
|
||||
end_time: Optional[datetime] = None,
|
||||
event_types: Optional[List[AuditEventType]] = None,
|
||||
user_id: Optional[str] = None,
|
||||
limit: int = 100,
|
||||
) -> List[AuditEvent]:
|
||||
"""
|
||||
Read audit events from files.
|
||||
|
||||
Args:
|
||||
start_time: Start of time range
|
||||
end_time: End of time range
|
||||
event_types: Filter by event types
|
||||
user_id: Filter by user ID
|
||||
limit: Maximum number of events to return
|
||||
|
||||
Returns:
|
||||
List of audit events
|
||||
"""
|
||||
if start_time is None:
|
||||
start_time = datetime.utcnow() - timedelta(days=7)
|
||||
if end_time is None:
|
||||
end_time = datetime.utcnow()
|
||||
|
||||
events: List[AuditEvent] = []
|
||||
current_date = start_time.date()
|
||||
end_date = end_time.date()
|
||||
|
||||
# Read from all log files in date range
|
||||
while current_date <= end_date and len(events) < limit:
|
||||
log_file = self._get_log_file(datetime.combine(current_date, datetime.min.time()))
|
||||
|
||||
if log_file.exists():
|
||||
try:
|
||||
with open(log_file, "r", encoding="utf-8") as f:
|
||||
for line in f:
|
||||
if len(events) >= limit:
|
||||
break
|
||||
|
||||
try:
|
||||
event_data = json.loads(line.strip())
|
||||
event = AuditEvent(**event_data)
|
||||
|
||||
# Apply filters
|
||||
if event.timestamp < start_time or event.timestamp > end_time:
|
||||
continue
|
||||
|
||||
if event_types and event.event_type not in event_types:
|
||||
continue
|
||||
|
||||
if user_id and event.user_id != user_id:
|
||||
continue
|
||||
|
||||
events.append(event)
|
||||
|
||||
except (json.JSONDecodeError, ValueError) as e:
|
||||
logger.warning(f"Failed to parse audit event: {e}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to read audit log file {log_file}: {e}")
|
||||
|
||||
current_date += timedelta(days=1)
|
||||
|
||||
# Sort by timestamp descending
|
||||
events.sort(key=lambda e: e.timestamp, reverse=True)
|
||||
return events[:limit]
|
||||
|
||||
async def cleanup_old_events(self, days: int = 90) -> int:
|
||||
"""
|
||||
Clean up audit events older than specified days.
|
||||
|
||||
Args:
|
||||
days: Number of days to retain
|
||||
|
||||
Returns:
|
||||
Number of files deleted
|
||||
"""
|
||||
cutoff_date = datetime.utcnow() - timedelta(days=days)
|
||||
deleted_count = 0
|
||||
|
||||
for log_file in self.log_directory.glob("audit_*.jsonl"):
|
||||
try:
|
||||
# Extract date from filename
|
||||
date_str = log_file.stem.replace("audit_", "")
|
||||
file_date = datetime.strptime(date_str, "%Y-%m-%d")
|
||||
|
||||
if file_date < cutoff_date:
|
||||
log_file.unlink()
|
||||
deleted_count += 1
|
||||
logger.info(f"Deleted old audit log: {log_file}")
|
||||
|
||||
except (ValueError, OSError) as e:
|
||||
logger.warning(f"Failed to process audit log file {log_file}: {e}")
|
||||
|
||||
return deleted_count
|
||||
|
||||
|
||||
class AuditService:
|
||||
"""Main audit service for logging security events."""
|
||||
|
||||
def __init__(self, storage: Optional[AuditLogStorage] = None):
|
||||
"""
|
||||
Initialize audit service.
|
||||
|
||||
Args:
|
||||
storage: Storage backend for audit logs
|
||||
"""
|
||||
self.storage = storage or FileAuditLogStorage()
|
||||
|
||||
async def log_event(
|
||||
self,
|
||||
event_type: AuditEventType,
|
||||
message: str,
|
||||
severity: AuditEventSeverity = AuditEventSeverity.INFO,
|
||||
user_id: Optional[str] = None,
|
||||
ip_address: Optional[str] = None,
|
||||
user_agent: Optional[str] = None,
|
||||
resource: Optional[str] = None,
|
||||
action: Optional[str] = None,
|
||||
status: str = "success",
|
||||
details: Optional[Dict[str, Any]] = None,
|
||||
session_id: Optional[str] = None,
|
||||
) -> None:
|
||||
"""
|
||||
Log an audit event.
|
||||
|
||||
Args:
|
||||
event_type: Type of event
|
||||
message: Human-readable message
|
||||
severity: Event severity
|
||||
user_id: User identifier
|
||||
ip_address: Client IP address
|
||||
user_agent: Client user agent
|
||||
resource: Resource being accessed
|
||||
action: Action performed
|
||||
status: Operation status
|
||||
details: Additional details
|
||||
session_id: Session identifier
|
||||
"""
|
||||
event = AuditEvent(
|
||||
event_type=event_type,
|
||||
severity=severity,
|
||||
user_id=user_id,
|
||||
ip_address=ip_address,
|
||||
user_agent=user_agent,
|
||||
resource=resource,
|
||||
action=action,
|
||||
status=status,
|
||||
message=message,
|
||||
details=details,
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
await self.storage.write_event(event)
|
||||
|
||||
# Also log to application logger for high severity events
|
||||
if severity in [AuditEventSeverity.ERROR, AuditEventSeverity.CRITICAL]:
|
||||
logger.error(f"Audit: {message}", extra={"audit_event": event.model_dump()})
|
||||
elif severity == AuditEventSeverity.WARNING:
|
||||
logger.warning(f"Audit: {message}", extra={"audit_event": event.model_dump()})
|
||||
|
||||
async def log_auth_setup(
|
||||
self, user_id: str, ip_address: Optional[str] = None
|
||||
) -> None:
|
||||
"""Log initial authentication setup."""
|
||||
await self.log_event(
|
||||
event_type=AuditEventType.AUTH_SETUP,
|
||||
message=f"Authentication configured by user {user_id}",
|
||||
user_id=user_id,
|
||||
ip_address=ip_address,
|
||||
action="setup",
|
||||
)
|
||||
|
||||
async def log_login_success(
|
||||
self,
|
||||
user_id: str,
|
||||
ip_address: Optional[str] = None,
|
||||
user_agent: Optional[str] = None,
|
||||
session_id: Optional[str] = None,
|
||||
) -> None:
|
||||
"""Log successful login."""
|
||||
await self.log_event(
|
||||
event_type=AuditEventType.AUTH_LOGIN_SUCCESS,
|
||||
message=f"User {user_id} logged in successfully",
|
||||
user_id=user_id,
|
||||
ip_address=ip_address,
|
||||
user_agent=user_agent,
|
||||
session_id=session_id,
|
||||
action="login",
|
||||
)
|
||||
|
||||
async def log_login_failure(
|
||||
self,
|
||||
user_id: Optional[str] = None,
|
||||
ip_address: Optional[str] = None,
|
||||
user_agent: Optional[str] = None,
|
||||
reason: str = "Invalid credentials",
|
||||
) -> None:
|
||||
"""Log failed login attempt."""
|
||||
await self.log_event(
|
||||
event_type=AuditEventType.AUTH_LOGIN_FAILURE,
|
||||
message=f"Login failed for user {user_id or 'unknown'}: {reason}",
|
||||
severity=AuditEventSeverity.WARNING,
|
||||
user_id=user_id,
|
||||
ip_address=ip_address,
|
||||
user_agent=user_agent,
|
||||
status="failure",
|
||||
action="login",
|
||||
details={"reason": reason},
|
||||
)
|
||||
|
||||
async def log_logout(
|
||||
self,
|
||||
user_id: str,
|
||||
ip_address: Optional[str] = None,
|
||||
session_id: Optional[str] = None,
|
||||
) -> None:
|
||||
"""Log user logout."""
|
||||
await self.log_event(
|
||||
event_type=AuditEventType.AUTH_LOGOUT,
|
||||
message=f"User {user_id} logged out",
|
||||
user_id=user_id,
|
||||
ip_address=ip_address,
|
||||
session_id=session_id,
|
||||
action="logout",
|
||||
)
|
||||
|
||||
async def log_config_update(
|
||||
self,
|
||||
user_id: str,
|
||||
changes: Dict[str, Any],
|
||||
ip_address: Optional[str] = None,
|
||||
) -> None:
|
||||
"""Log configuration update."""
|
||||
await self.log_event(
|
||||
event_type=AuditEventType.CONFIG_UPDATE,
|
||||
message=f"Configuration updated by user {user_id}",
|
||||
user_id=user_id,
|
||||
ip_address=ip_address,
|
||||
resource="config",
|
||||
action="update",
|
||||
details={"changes": changes},
|
||||
)
|
||||
|
||||
async def log_config_backup(
|
||||
self, user_id: str, backup_file: str, ip_address: Optional[str] = None
|
||||
) -> None:
|
||||
"""Log configuration backup."""
|
||||
await self.log_event(
|
||||
event_type=AuditEventType.CONFIG_BACKUP,
|
||||
message=f"Configuration backed up by user {user_id}",
|
||||
user_id=user_id,
|
||||
ip_address=ip_address,
|
||||
resource="config",
|
||||
action="backup",
|
||||
details={"backup_file": backup_file},
|
||||
)
|
||||
|
||||
async def log_config_restore(
|
||||
self, user_id: str, backup_file: str, ip_address: Optional[str] = None
|
||||
) -> None:
|
||||
"""Log configuration restore."""
|
||||
await self.log_event(
|
||||
event_type=AuditEventType.CONFIG_RESTORE,
|
||||
message=f"Configuration restored by user {user_id}",
|
||||
user_id=user_id,
|
||||
ip_address=ip_address,
|
||||
resource="config",
|
||||
action="restore",
|
||||
details={"backup_file": backup_file},
|
||||
)
|
||||
|
||||
async def log_download_added(
|
||||
self,
|
||||
user_id: str,
|
||||
series_name: str,
|
||||
episodes: List[str],
|
||||
ip_address: Optional[str] = None,
|
||||
) -> None:
|
||||
"""Log download added to queue."""
|
||||
await self.log_event(
|
||||
event_type=AuditEventType.DOWNLOAD_ADDED,
|
||||
message=f"Download added by user {user_id}: {series_name}",
|
||||
user_id=user_id,
|
||||
ip_address=ip_address,
|
||||
resource=series_name,
|
||||
action="add",
|
||||
details={"episodes": episodes},
|
||||
)
|
||||
|
||||
async def log_download_completed(
|
||||
self, series_name: str, episode: str, file_path: str
|
||||
) -> None:
|
||||
"""Log completed download."""
|
||||
await self.log_event(
|
||||
event_type=AuditEventType.DOWNLOAD_COMPLETED,
|
||||
message=f"Download completed: {series_name} - {episode}",
|
||||
resource=series_name,
|
||||
action="download",
|
||||
details={"episode": episode, "file_path": file_path},
|
||||
)
|
||||
|
||||
async def log_download_failed(
|
||||
self, series_name: str, episode: str, error: str
|
||||
) -> None:
|
||||
"""Log failed download."""
|
||||
await self.log_event(
|
||||
event_type=AuditEventType.DOWNLOAD_FAILED,
|
||||
message=f"Download failed: {series_name} - {episode}",
|
||||
severity=AuditEventSeverity.ERROR,
|
||||
resource=series_name,
|
||||
action="download",
|
||||
status="failure",
|
||||
details={"episode": episode, "error": error},
|
||||
)
|
||||
|
||||
async def log_queue_operation(
|
||||
self,
|
||||
user_id: str,
|
||||
operation: str,
|
||||
ip_address: Optional[str] = None,
|
||||
details: Optional[Dict[str, Any]] = None,
|
||||
) -> None:
|
||||
"""Log queue operation."""
|
||||
event_type_map = {
|
||||
"start": AuditEventType.QUEUE_STARTED,
|
||||
"stop": AuditEventType.QUEUE_STOPPED,
|
||||
"pause": AuditEventType.QUEUE_PAUSED,
|
||||
"resume": AuditEventType.QUEUE_RESUMED,
|
||||
"clear": AuditEventType.QUEUE_CLEARED,
|
||||
}
|
||||
|
||||
event_type = event_type_map.get(operation, AuditEventType.SYSTEM_ERROR)
|
||||
await self.log_event(
|
||||
event_type=event_type,
|
||||
message=f"Queue {operation} by user {user_id}",
|
||||
user_id=user_id,
|
||||
ip_address=ip_address,
|
||||
resource="queue",
|
||||
action=operation,
|
||||
details=details,
|
||||
)
|
||||
|
||||
async def log_system_error(
|
||||
self, error: str, details: Optional[Dict[str, Any]] = None
|
||||
) -> None:
|
||||
"""Log system error."""
|
||||
await self.log_event(
|
||||
event_type=AuditEventType.SYSTEM_ERROR,
|
||||
message=f"System error: {error}",
|
||||
severity=AuditEventSeverity.ERROR,
|
||||
status="error",
|
||||
details=details,
|
||||
)
|
||||
|
||||
async def get_events(
|
||||
self,
|
||||
start_time: Optional[datetime] = None,
|
||||
end_time: Optional[datetime] = None,
|
||||
event_types: Optional[List[AuditEventType]] = None,
|
||||
user_id: Optional[str] = None,
|
||||
limit: int = 100,
|
||||
) -> List[AuditEvent]:
|
||||
"""
|
||||
Get audit events with filters.
|
||||
|
||||
Args:
|
||||
start_time: Start of time range
|
||||
end_time: End of time range
|
||||
event_types: Filter by event types
|
||||
user_id: Filter by user ID
|
||||
limit: Maximum number of events to return
|
||||
|
||||
Returns:
|
||||
List of audit events
|
||||
"""
|
||||
return await self.storage.read_events(
|
||||
start_time=start_time,
|
||||
end_time=end_time,
|
||||
event_types=event_types,
|
||||
user_id=user_id,
|
||||
limit=limit,
|
||||
)
|
||||
|
||||
async def cleanup_old_events(self, days: int = 90) -> int:
|
||||
"""
|
||||
Clean up old audit events.
|
||||
|
||||
Args:
|
||||
days: Number of days to retain
|
||||
|
||||
Returns:
|
||||
Number of events deleted
|
||||
"""
|
||||
return await self.storage.cleanup_old_events(days)
|
||||
|
||||
|
||||
# Global audit service instance
|
||||
_audit_service: Optional[AuditService] = None
|
||||
|
||||
|
||||
def get_audit_service() -> AuditService:
|
||||
"""
|
||||
Get the global audit service instance.
|
||||
|
||||
Returns:
|
||||
AuditService instance
|
||||
"""
|
||||
global _audit_service
|
||||
if _audit_service is None:
|
||||
_audit_service = AuditService()
|
||||
return _audit_service
|
||||
|
||||
|
||||
def configure_audit_service(storage: Optional[AuditLogStorage] = None) -> AuditService:
|
||||
"""
|
||||
Configure the global audit service.
|
||||
|
||||
Args:
|
||||
storage: Custom storage backend
|
||||
|
||||
Returns:
|
||||
Configured AuditService instance
|
||||
"""
|
||||
global _audit_service
|
||||
_audit_service = AuditService(storage=storage)
|
||||
return _audit_service
|
||||
@@ -1,432 +0,0 @@
|
||||
"""Backup and restore service for configuration and data management."""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
import tarfile
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class BackupInfo:
|
||||
"""Information about a backup."""
|
||||
|
||||
name: str
|
||||
timestamp: datetime
|
||||
size_bytes: int
|
||||
backup_type: str # 'config', 'data', 'full'
|
||||
description: Optional[str] = None
|
||||
|
||||
|
||||
class BackupService:
|
||||
"""Service for managing backups and restores."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
backup_dir: str = "data/backups",
|
||||
config_dir: str = "data",
|
||||
database_path: str = "data/aniworld.db",
|
||||
):
|
||||
"""Initialize backup service.
|
||||
|
||||
Args:
|
||||
backup_dir: Directory to store backups.
|
||||
config_dir: Directory containing configuration files.
|
||||
database_path: Path to the database file.
|
||||
"""
|
||||
self.backup_dir = Path(backup_dir)
|
||||
self.config_dir = Path(config_dir)
|
||||
self.database_path = Path(database_path)
|
||||
|
||||
# Create backup directory if it doesn't exist
|
||||
self.backup_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
def backup_configuration(
|
||||
self, description: str = ""
|
||||
) -> Optional[BackupInfo]:
|
||||
"""Create a configuration backup.
|
||||
|
||||
Args:
|
||||
description: Optional description for the backup.
|
||||
|
||||
Returns:
|
||||
BackupInfo: Information about the created backup.
|
||||
"""
|
||||
try:
|
||||
timestamp = datetime.now()
|
||||
backup_name = (
|
||||
f"config_{timestamp.strftime('%Y%m%d_%H%M%S')}.tar.gz"
|
||||
)
|
||||
backup_path = self.backup_dir / backup_name
|
||||
|
||||
with tarfile.open(backup_path, "w:gz") as tar:
|
||||
# Add configuration files
|
||||
config_files = [
|
||||
self.config_dir / "config.json",
|
||||
]
|
||||
|
||||
for config_file in config_files:
|
||||
if config_file.exists():
|
||||
tar.add(config_file, arcname=config_file.name)
|
||||
|
||||
size_bytes = backup_path.stat().st_size
|
||||
|
||||
info = BackupInfo(
|
||||
name=backup_name,
|
||||
timestamp=timestamp,
|
||||
size_bytes=size_bytes,
|
||||
backup_type="config",
|
||||
description=description,
|
||||
)
|
||||
|
||||
logger.info(f"Configuration backup created: {backup_name}")
|
||||
return info
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to create configuration backup: {e}")
|
||||
return None
|
||||
|
||||
def backup_database(
|
||||
self, description: str = ""
|
||||
) -> Optional[BackupInfo]:
|
||||
"""Create a database backup.
|
||||
|
||||
Args:
|
||||
description: Optional description for the backup.
|
||||
|
||||
Returns:
|
||||
BackupInfo: Information about the created backup.
|
||||
"""
|
||||
try:
|
||||
if not self.database_path.exists():
|
||||
logger.warning(
|
||||
f"Database file not found: {self.database_path}"
|
||||
)
|
||||
return None
|
||||
|
||||
timestamp = datetime.now()
|
||||
backup_name = (
|
||||
f"database_{timestamp.strftime('%Y%m%d_%H%M%S')}.tar.gz"
|
||||
)
|
||||
backup_path = self.backup_dir / backup_name
|
||||
|
||||
with tarfile.open(backup_path, "w:gz") as tar:
|
||||
tar.add(self.database_path, arcname=self.database_path.name)
|
||||
|
||||
size_bytes = backup_path.stat().st_size
|
||||
|
||||
info = BackupInfo(
|
||||
name=backup_name,
|
||||
timestamp=timestamp,
|
||||
size_bytes=size_bytes,
|
||||
backup_type="data",
|
||||
description=description,
|
||||
)
|
||||
|
||||
logger.info(f"Database backup created: {backup_name}")
|
||||
return info
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to create database backup: {e}")
|
||||
return None
|
||||
|
||||
def backup_full(
|
||||
self, description: str = ""
|
||||
) -> Optional[BackupInfo]:
|
||||
"""Create a full system backup.
|
||||
|
||||
Args:
|
||||
description: Optional description for the backup.
|
||||
|
||||
Returns:
|
||||
BackupInfo: Information about the created backup.
|
||||
"""
|
||||
try:
|
||||
timestamp = datetime.now()
|
||||
backup_name = f"full_{timestamp.strftime('%Y%m%d_%H%M%S')}.tar.gz"
|
||||
backup_path = self.backup_dir / backup_name
|
||||
|
||||
with tarfile.open(backup_path, "w:gz") as tar:
|
||||
# Add configuration
|
||||
config_file = self.config_dir / "config.json"
|
||||
if config_file.exists():
|
||||
tar.add(config_file, arcname=config_file.name)
|
||||
|
||||
# Add database
|
||||
if self.database_path.exists():
|
||||
tar.add(
|
||||
self.database_path,
|
||||
arcname=self.database_path.name,
|
||||
)
|
||||
|
||||
# Add download queue
|
||||
queue_file = self.config_dir / "download_queue.json"
|
||||
if queue_file.exists():
|
||||
tar.add(queue_file, arcname=queue_file.name)
|
||||
|
||||
size_bytes = backup_path.stat().st_size
|
||||
|
||||
info = BackupInfo(
|
||||
name=backup_name,
|
||||
timestamp=timestamp,
|
||||
size_bytes=size_bytes,
|
||||
backup_type="full",
|
||||
description=description,
|
||||
)
|
||||
|
||||
logger.info(f"Full backup created: {backup_name}")
|
||||
return info
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to create full backup: {e}")
|
||||
return None
|
||||
|
||||
def restore_configuration(self, backup_name: str) -> bool:
|
||||
"""Restore configuration from backup.
|
||||
|
||||
Args:
|
||||
backup_name: Name of the backup to restore.
|
||||
|
||||
Returns:
|
||||
bool: True if restore was successful.
|
||||
"""
|
||||
try:
|
||||
backup_path = self.backup_dir / backup_name
|
||||
|
||||
if not backup_path.exists():
|
||||
logger.error(f"Backup file not found: {backup_name}")
|
||||
return False
|
||||
|
||||
# Extract to temporary directory
|
||||
temp_dir = self.backup_dir / "temp_restore"
|
||||
temp_dir.mkdir(exist_ok=True)
|
||||
|
||||
with tarfile.open(backup_path, "r:gz") as tar:
|
||||
tar.extractall(temp_dir)
|
||||
|
||||
# Copy configuration file back
|
||||
config_file = temp_dir / "config.json"
|
||||
if config_file.exists():
|
||||
shutil.copy(config_file, self.config_dir / "config.json")
|
||||
|
||||
# Cleanup
|
||||
shutil.rmtree(temp_dir)
|
||||
|
||||
logger.info(f"Configuration restored from: {backup_name}")
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to restore configuration: {e}")
|
||||
return False
|
||||
|
||||
def restore_database(self, backup_name: str) -> bool:
|
||||
"""Restore database from backup.
|
||||
|
||||
Args:
|
||||
backup_name: Name of the backup to restore.
|
||||
|
||||
Returns:
|
||||
bool: True if restore was successful.
|
||||
"""
|
||||
try:
|
||||
backup_path = self.backup_dir / backup_name
|
||||
|
||||
if not backup_path.exists():
|
||||
logger.error(f"Backup file not found: {backup_name}")
|
||||
return False
|
||||
|
||||
# Create backup of current database
|
||||
if self.database_path.exists():
|
||||
current_backup = (
|
||||
self.database_path.parent
|
||||
/ f"{self.database_path.name}.backup"
|
||||
)
|
||||
shutil.copy(self.database_path, current_backup)
|
||||
logger.info(f"Current database backed up to: {current_backup}")
|
||||
|
||||
# Extract to temporary directory
|
||||
temp_dir = self.backup_dir / "temp_restore"
|
||||
temp_dir.mkdir(exist_ok=True)
|
||||
|
||||
with tarfile.open(backup_path, "r:gz") as tar:
|
||||
tar.extractall(temp_dir)
|
||||
|
||||
# Copy database file back
|
||||
db_file = temp_dir / self.database_path.name
|
||||
if db_file.exists():
|
||||
shutil.copy(db_file, self.database_path)
|
||||
|
||||
# Cleanup
|
||||
shutil.rmtree(temp_dir)
|
||||
|
||||
logger.info(f"Database restored from: {backup_name}")
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to restore database: {e}")
|
||||
return False
|
||||
|
||||
def list_backups(
|
||||
self, backup_type: Optional[str] = None
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""List available backups.
|
||||
|
||||
Args:
|
||||
backup_type: Optional filter by backup type.
|
||||
|
||||
Returns:
|
||||
list: List of backup information.
|
||||
"""
|
||||
try:
|
||||
backups = []
|
||||
|
||||
for backup_file in sorted(self.backup_dir.glob("*.tar.gz")):
|
||||
# Extract type from filename
|
||||
filename = backup_file.name
|
||||
file_type = filename.split("_")[0]
|
||||
|
||||
if backup_type and file_type != backup_type:
|
||||
continue
|
||||
|
||||
# Extract timestamp
|
||||
timestamp_str = (
|
||||
filename.split("_", 1)[1].replace(".tar.gz", "")
|
||||
)
|
||||
|
||||
backups.append(
|
||||
{
|
||||
"name": filename,
|
||||
"type": file_type,
|
||||
"size_bytes": backup_file.stat().st_size,
|
||||
"created": timestamp_str,
|
||||
}
|
||||
)
|
||||
|
||||
return sorted(backups, key=lambda x: x["created"], reverse=True)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to list backups: {e}")
|
||||
return []
|
||||
|
||||
def delete_backup(self, backup_name: str) -> bool:
|
||||
"""Delete a backup.
|
||||
|
||||
Args:
|
||||
backup_name: Name of the backup to delete.
|
||||
|
||||
Returns:
|
||||
bool: True if delete was successful.
|
||||
"""
|
||||
try:
|
||||
backup_path = self.backup_dir / backup_name
|
||||
|
||||
if not backup_path.exists():
|
||||
logger.warning(f"Backup not found: {backup_name}")
|
||||
return False
|
||||
|
||||
backup_path.unlink()
|
||||
logger.info(f"Backup deleted: {backup_name}")
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to delete backup: {e}")
|
||||
return False
|
||||
|
||||
def cleanup_old_backups(
|
||||
self, max_backups: int = 10, backup_type: Optional[str] = None
|
||||
) -> int:
|
||||
"""Remove old backups, keeping only the most recent ones.
|
||||
|
||||
Args:
|
||||
max_backups: Maximum number of backups to keep.
|
||||
backup_type: Optional filter by backup type.
|
||||
|
||||
Returns:
|
||||
int: Number of backups deleted.
|
||||
"""
|
||||
try:
|
||||
backups = self.list_backups(backup_type)
|
||||
|
||||
if len(backups) <= max_backups:
|
||||
return 0
|
||||
|
||||
deleted_count = 0
|
||||
for backup in backups[max_backups:]:
|
||||
if self.delete_backup(backup["name"]):
|
||||
deleted_count += 1
|
||||
|
||||
logger.info(f"Cleaned up {deleted_count} old backups")
|
||||
return deleted_count
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to cleanup old backups: {e}")
|
||||
return 0
|
||||
|
||||
def export_anime_data(
|
||||
self, output_file: str
|
||||
) -> bool:
|
||||
"""Export anime library data to JSON.
|
||||
|
||||
Args:
|
||||
output_file: Path to export file.
|
||||
|
||||
Returns:
|
||||
bool: True if export was successful.
|
||||
"""
|
||||
try:
|
||||
# This would integrate with the anime service
|
||||
# to export anime library data
|
||||
export_data = {
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"anime_count": 0,
|
||||
"data": [],
|
||||
}
|
||||
|
||||
with open(output_file, "w") as f:
|
||||
json.dump(export_data, f, indent=2)
|
||||
|
||||
logger.info(f"Anime data exported to: {output_file}")
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to export anime data: {e}")
|
||||
return False
|
||||
|
||||
def import_anime_data(self, input_file: str) -> bool:
|
||||
"""Import anime library data from JSON.
|
||||
|
||||
Args:
|
||||
input_file: Path to import file.
|
||||
|
||||
Returns:
|
||||
bool: True if import was successful.
|
||||
"""
|
||||
try:
|
||||
if not os.path.exists(input_file):
|
||||
logger.error(f"Import file not found: {input_file}")
|
||||
return False
|
||||
|
||||
with open(input_file, "r") as f:
|
||||
json.load(f) # Load and validate JSON
|
||||
|
||||
# This would integrate with the anime service
|
||||
# to import anime library data
|
||||
|
||||
logger.info(f"Anime data imported from: {input_file}")
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to import anime data: {e}")
|
||||
return False
|
||||
|
||||
|
||||
# Global backup service instance
|
||||
_backup_service: Optional[BackupService] = None
|
||||
|
||||
|
||||
def get_backup_service() -> BackupService:
|
||||
"""Get or create the global backup service instance.
|
||||
|
||||
Returns:
|
||||
BackupService: The backup service instance.
|
||||
"""
|
||||
global _backup_service
|
||||
if _backup_service is None:
|
||||
_backup_service = BackupService()
|
||||
return _backup_service
|
||||
@@ -1,324 +0,0 @@
|
||||
"""Monitoring service for system resource tracking and metrics collection."""
|
||||
|
||||
import logging
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
import psutil
|
||||
from sqlalchemy import select
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from src.server.database.models import DownloadQueueItem
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class QueueMetrics:
|
||||
"""Download queue statistics and metrics."""
|
||||
|
||||
total_items: int = 0
|
||||
pending_items: int = 0
|
||||
downloading_items: int = 0
|
||||
completed_items: int = 0
|
||||
failed_items: int = 0
|
||||
total_size_bytes: int = 0
|
||||
downloaded_bytes: int = 0
|
||||
average_speed_mbps: float = 0.0
|
||||
estimated_time_remaining: Optional[timedelta] = None
|
||||
success_rate: float = 0.0
|
||||
|
||||
|
||||
@dataclass
|
||||
class SystemMetrics:
|
||||
"""System resource metrics at a point in time."""
|
||||
|
||||
timestamp: datetime
|
||||
cpu_percent: float
|
||||
memory_percent: float
|
||||
memory_available_mb: float
|
||||
disk_percent: float
|
||||
disk_free_mb: float
|
||||
uptime_seconds: float
|
||||
|
||||
|
||||
@dataclass
|
||||
class ErrorMetrics:
|
||||
"""Error tracking and statistics."""
|
||||
|
||||
total_errors: int = 0
|
||||
errors_24h: int = 0
|
||||
most_common_errors: Dict[str, int] = field(default_factory=dict)
|
||||
last_error_time: Optional[datetime] = None
|
||||
error_rate_per_hour: float = 0.0
|
||||
|
||||
|
||||
class MonitoringService:
|
||||
"""Service for monitoring system resources and application metrics."""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize monitoring service."""
|
||||
self._error_log: List[tuple[datetime, str]] = []
|
||||
self._performance_samples: List[SystemMetrics] = []
|
||||
self._max_samples = 1440 # Keep 24 hours of minute samples
|
||||
|
||||
def get_system_metrics(self) -> SystemMetrics:
|
||||
"""Get current system resource metrics.
|
||||
|
||||
Returns:
|
||||
SystemMetrics: Current system metrics.
|
||||
"""
|
||||
try:
|
||||
import time
|
||||
|
||||
cpu_percent = psutil.cpu_percent(interval=1)
|
||||
memory_info = psutil.virtual_memory()
|
||||
disk_info = psutil.disk_usage("/")
|
||||
boot_time = psutil.boot_time()
|
||||
uptime_seconds = time.time() - boot_time
|
||||
|
||||
metrics = SystemMetrics(
|
||||
timestamp=datetime.now(),
|
||||
cpu_percent=cpu_percent,
|
||||
memory_percent=memory_info.percent,
|
||||
memory_available_mb=memory_info.available / (1024 * 1024),
|
||||
disk_percent=disk_info.percent,
|
||||
disk_free_mb=disk_info.free / (1024 * 1024),
|
||||
uptime_seconds=uptime_seconds,
|
||||
)
|
||||
|
||||
# Store sample
|
||||
self._performance_samples.append(metrics)
|
||||
if len(self._performance_samples) > self._max_samples:
|
||||
self._performance_samples.pop(0)
|
||||
|
||||
return metrics
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get system metrics: {e}")
|
||||
raise
|
||||
|
||||
async def get_queue_metrics(self, db: AsyncSession) -> QueueMetrics:
|
||||
"""Get download queue metrics.
|
||||
|
||||
Args:
|
||||
db: Database session.
|
||||
|
||||
Returns:
|
||||
QueueMetrics: Queue statistics and progress.
|
||||
"""
|
||||
try:
|
||||
# Get all queue items
|
||||
result = await db.execute(select(DownloadQueueItem))
|
||||
items = result.scalars().all()
|
||||
|
||||
if not items:
|
||||
return QueueMetrics()
|
||||
|
||||
# Calculate metrics
|
||||
total_items = len(items)
|
||||
pending_items = sum(1 for i in items if i.status == "PENDING")
|
||||
downloading_items = sum(
|
||||
1 for i in items if i.status == "DOWNLOADING"
|
||||
)
|
||||
completed_items = sum(1 for i in items if i.status == "COMPLETED")
|
||||
failed_items = sum(1 for i in items if i.status == "FAILED")
|
||||
|
||||
total_size_bytes = sum(
|
||||
(i.total_bytes or 0) for i in items
|
||||
)
|
||||
downloaded_bytes = sum(
|
||||
(i.downloaded_bytes or 0) for i in items
|
||||
)
|
||||
|
||||
# Calculate average speed from active downloads
|
||||
speeds = [
|
||||
i.download_speed for i in items
|
||||
if i.status == "DOWNLOADING" and i.download_speed
|
||||
]
|
||||
average_speed_mbps = (
|
||||
sum(speeds) / len(speeds) / (1024 * 1024) if speeds else 0
|
||||
)
|
||||
|
||||
# Calculate success rate
|
||||
success_rate = (
|
||||
(completed_items / total_items * 100) if total_items > 0 else 0
|
||||
)
|
||||
|
||||
# Estimate time remaining
|
||||
estimated_time_remaining = None
|
||||
if average_speed_mbps > 0 and total_size_bytes > downloaded_bytes:
|
||||
remaining_bytes = total_size_bytes - downloaded_bytes
|
||||
remaining_seconds = remaining_bytes / average_speed_mbps
|
||||
estimated_time_remaining = timedelta(seconds=remaining_seconds)
|
||||
|
||||
return QueueMetrics(
|
||||
total_items=total_items,
|
||||
pending_items=pending_items,
|
||||
downloading_items=downloading_items,
|
||||
completed_items=completed_items,
|
||||
failed_items=failed_items,
|
||||
total_size_bytes=total_size_bytes,
|
||||
downloaded_bytes=downloaded_bytes,
|
||||
average_speed_mbps=average_speed_mbps,
|
||||
estimated_time_remaining=estimated_time_remaining,
|
||||
success_rate=success_rate,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get queue metrics: {e}")
|
||||
raise
|
||||
|
||||
def log_error(self, error_message: str) -> None:
|
||||
"""Log an error for tracking purposes.
|
||||
|
||||
Args:
|
||||
error_message: The error message to log.
|
||||
"""
|
||||
self._error_log.append((datetime.now(), error_message))
|
||||
logger.debug(f"Error logged: {error_message}")
|
||||
|
||||
def get_error_metrics(self) -> ErrorMetrics:
|
||||
"""Get error tracking metrics.
|
||||
|
||||
Returns:
|
||||
ErrorMetrics: Error statistics and trends.
|
||||
"""
|
||||
total_errors = len(self._error_log)
|
||||
|
||||
# Get errors from last 24 hours
|
||||
cutoff_time = datetime.now() - timedelta(hours=24)
|
||||
recent_errors = [
|
||||
(time, msg) for time, msg in self._error_log
|
||||
if time >= cutoff_time
|
||||
]
|
||||
errors_24h = len(recent_errors)
|
||||
|
||||
# Count error types
|
||||
error_counts: Dict[str, int] = {}
|
||||
for _, msg in recent_errors:
|
||||
error_type = msg.split(":")[0]
|
||||
error_counts[error_type] = error_counts.get(error_type, 0) + 1
|
||||
|
||||
# Sort by count
|
||||
most_common_errors = dict(
|
||||
sorted(error_counts.items(), key=lambda x: x[1], reverse=True)[:10]
|
||||
)
|
||||
|
||||
# Get last error time
|
||||
last_error_time = (
|
||||
recent_errors[-1][0] if recent_errors else None
|
||||
)
|
||||
|
||||
# Calculate error rate per hour
|
||||
error_rate_per_hour = (
|
||||
errors_24h / 24 if errors_24h > 0 else 0
|
||||
)
|
||||
|
||||
return ErrorMetrics(
|
||||
total_errors=total_errors,
|
||||
errors_24h=errors_24h,
|
||||
most_common_errors=most_common_errors,
|
||||
last_error_time=last_error_time,
|
||||
error_rate_per_hour=error_rate_per_hour,
|
||||
)
|
||||
|
||||
def get_performance_summary(self) -> Dict[str, Any]:
|
||||
"""Get performance summary from collected samples.
|
||||
|
||||
Returns:
|
||||
dict: Performance statistics.
|
||||
"""
|
||||
if not self._performance_samples:
|
||||
return {}
|
||||
|
||||
cpu_values = [m.cpu_percent for m in self._performance_samples]
|
||||
memory_values = [m.memory_percent for m in self._performance_samples]
|
||||
disk_values = [m.disk_percent for m in self._performance_samples]
|
||||
|
||||
return {
|
||||
"cpu": {
|
||||
"current": cpu_values[-1],
|
||||
"average": sum(cpu_values) / len(cpu_values),
|
||||
"max": max(cpu_values),
|
||||
"min": min(cpu_values),
|
||||
},
|
||||
"memory": {
|
||||
"current": memory_values[-1],
|
||||
"average": sum(memory_values) / len(memory_values),
|
||||
"max": max(memory_values),
|
||||
"min": min(memory_values),
|
||||
},
|
||||
"disk": {
|
||||
"current": disk_values[-1],
|
||||
"average": sum(disk_values) / len(disk_values),
|
||||
"max": max(disk_values),
|
||||
"min": min(disk_values),
|
||||
},
|
||||
"sample_count": len(self._performance_samples),
|
||||
}
|
||||
|
||||
async def get_comprehensive_status(
|
||||
self, db: AsyncSession
|
||||
) -> Dict[str, Any]:
|
||||
"""Get comprehensive system status summary.
|
||||
|
||||
Args:
|
||||
db: Database session.
|
||||
|
||||
Returns:
|
||||
dict: Complete system status.
|
||||
"""
|
||||
try:
|
||||
system_metrics = self.get_system_metrics()
|
||||
queue_metrics = await self.get_queue_metrics(db)
|
||||
error_metrics = self.get_error_metrics()
|
||||
performance = self.get_performance_summary()
|
||||
|
||||
return {
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"system": {
|
||||
"cpu_percent": system_metrics.cpu_percent,
|
||||
"memory_percent": system_metrics.memory_percent,
|
||||
"disk_percent": system_metrics.disk_percent,
|
||||
"uptime_seconds": system_metrics.uptime_seconds,
|
||||
},
|
||||
"queue": {
|
||||
"total_items": queue_metrics.total_items,
|
||||
"pending": queue_metrics.pending_items,
|
||||
"downloading": queue_metrics.downloading_items,
|
||||
"completed": queue_metrics.completed_items,
|
||||
"failed": queue_metrics.failed_items,
|
||||
"success_rate": round(queue_metrics.success_rate, 2),
|
||||
"average_speed_mbps": round(
|
||||
queue_metrics.average_speed_mbps, 2
|
||||
),
|
||||
},
|
||||
"errors": {
|
||||
"total": error_metrics.total_errors,
|
||||
"last_24h": error_metrics.errors_24h,
|
||||
"rate_per_hour": round(
|
||||
error_metrics.error_rate_per_hour, 2
|
||||
),
|
||||
"most_common": error_metrics.most_common_errors,
|
||||
},
|
||||
"performance": performance,
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get comprehensive status: {e}")
|
||||
raise
|
||||
|
||||
|
||||
# Global monitoring service instance
|
||||
_monitoring_service: Optional[MonitoringService] = None
|
||||
|
||||
|
||||
def get_monitoring_service() -> MonitoringService:
|
||||
"""Get or create the global monitoring service instance.
|
||||
|
||||
Returns:
|
||||
MonitoringService: The monitoring service instance.
|
||||
"""
|
||||
global _monitoring_service
|
||||
if _monitoring_service is None:
|
||||
_monitoring_service = MonitoringService()
|
||||
return _monitoring_service
|
||||
Reference in New Issue
Block a user