Aniworld/tests/unit/test_monitoring_service.py
2025-10-22 09:20:35 +02:00

226 lines
6.2 KiB
Python

"""Unit tests for monitoring service."""
from datetime import datetime, timedelta
from unittest.mock import AsyncMock, MagicMock
import pytest
from src.server.services.monitoring_service import (
ErrorMetrics,
MonitoringService,
QueueMetrics,
SystemMetrics,
get_monitoring_service,
)
def test_monitoring_service_initialization():
"""Test monitoring service initialization."""
service = MonitoringService()
assert service is not None
assert service._error_log == []
assert service._performance_samples == []
def test_get_system_metrics():
"""Test system metrics collection."""
service = MonitoringService()
metrics = service.get_system_metrics()
assert isinstance(metrics, SystemMetrics)
assert metrics.cpu_percent >= 0
assert metrics.memory_percent >= 0
assert metrics.disk_percent >= 0
assert metrics.uptime_seconds > 0
assert metrics.memory_available_mb > 0
assert metrics.disk_free_mb > 0
def test_system_metrics_stored():
"""Test that system metrics are stored for performance tracking."""
service = MonitoringService()
metrics1 = service.get_system_metrics()
metrics2 = service.get_system_metrics()
assert len(service._performance_samples) == 2
assert service._performance_samples[0] == metrics1
assert service._performance_samples[1] == metrics2
@pytest.mark.asyncio
async def test_get_queue_metrics_empty():
"""Test queue metrics with no items."""
service = MonitoringService()
mock_db = AsyncMock()
# Mock empty result
mock_result = AsyncMock()
mock_result.scalars().all.return_value = []
mock_db.execute = AsyncMock(return_value=mock_result)
metrics = await service.get_queue_metrics(mock_db)
assert isinstance(metrics, QueueMetrics)
assert metrics.total_items == 0
assert metrics.success_rate == 0.0
@pytest.mark.asyncio
async def test_get_queue_metrics_with_items():
"""Test queue metrics with download items."""
service = MonitoringService()
mock_db = AsyncMock()
# Create mock queue items
item1 = MagicMock()
item1.status = "COMPLETED"
item1.total_bytes = 1000000
item1.downloaded_bytes = 1000000
item1.download_speed = 1000000
item2 = MagicMock()
item2.status = "DOWNLOADING"
item2.total_bytes = 2000000
item2.downloaded_bytes = 1000000
item2.download_speed = 500000
item3 = MagicMock()
item3.status = "FAILED"
item3.total_bytes = 500000
item3.downloaded_bytes = 0
item3.download_speed = None
# Mock result
mock_result = AsyncMock()
mock_result.scalars().all.return_value = [item1, item2, item3]
mock_db.execute = AsyncMock(return_value=mock_result)
metrics = await service.get_queue_metrics(mock_db)
assert metrics.total_items == 3
assert metrics.completed_items == 1
assert metrics.downloading_items == 1
assert metrics.failed_items == 1
assert metrics.total_size_bytes == 3500000
assert metrics.downloaded_bytes == 2000000
assert metrics.success_rate > 0
def test_log_error():
"""Test error logging."""
service = MonitoringService()
service.log_error("Test error 1")
service.log_error("Test error 2")
assert len(service._error_log) == 2
assert service._error_log[0][1] == "Test error 1"
assert service._error_log[1][1] == "Test error 2"
def test_get_error_metrics_empty():
"""Test error metrics with no errors."""
service = MonitoringService()
metrics = service.get_error_metrics()
assert isinstance(metrics, ErrorMetrics)
assert metrics.total_errors == 0
assert metrics.errors_24h == 0
assert metrics.error_rate_per_hour == 0.0
def test_get_error_metrics_with_errors():
"""Test error metrics with multiple errors."""
service = MonitoringService()
service.log_error("ConnectionError: Failed to connect")
service.log_error("ConnectionError: Timeout")
service.log_error("TimeoutError: Download timeout")
metrics = service.get_error_metrics()
assert metrics.total_errors == 3
assert metrics.errors_24h == 3
assert metrics.last_error_time is not None
assert len(metrics.most_common_errors) > 0
def test_get_error_metrics_old_errors():
"""Test error metrics excludes old errors."""
service = MonitoringService()
# Add old error (simulate by directly adding to log)
old_time = datetime.now() - timedelta(hours=25)
service._error_log.append((old_time, "Old error"))
# Add recent error
service.log_error("Recent error")
metrics = service.get_error_metrics()
assert metrics.total_errors == 2
assert metrics.errors_24h == 1
def test_get_performance_summary():
"""Test performance summary generation."""
service = MonitoringService()
# Collect some samples
service.get_system_metrics()
service.get_system_metrics()
service.get_system_metrics()
summary = service.get_performance_summary()
assert "cpu" in summary
assert "memory" in summary
assert "disk" in summary
assert "sample_count" in summary
assert summary["sample_count"] == 3
assert "current" in summary["cpu"]
assert "average" in summary["cpu"]
assert "max" in summary["cpu"]
assert "min" in summary["cpu"]
def test_get_performance_summary_empty():
"""Test performance summary with no samples."""
service = MonitoringService()
summary = service.get_performance_summary()
assert summary == {}
@pytest.mark.asyncio
async def test_get_comprehensive_status():
"""Test comprehensive system status."""
service = MonitoringService()
mock_db = AsyncMock()
# Mock empty queue
mock_result = AsyncMock()
mock_result.scalars().all.return_value = []
mock_db.execute = AsyncMock(return_value=mock_result)
status = await service.get_comprehensive_status(mock_db)
assert "timestamp" in status
assert "system" in status
assert "queue" in status
assert "errors" in status
assert "performance" in status
assert status["system"]["cpu_percent"] >= 0
assert status["queue"]["total_items"] == 0
def test_get_monitoring_service():
"""Test singleton monitoring service."""
service1 = get_monitoring_service()
service2 = get_monitoring_service()
assert service1 is service2
assert isinstance(service1, MonitoringService)