Files
Aniworld/tests/integration/test_provider_selection.py

346 lines
12 KiB
Python

"""Integration tests for provider selection based on availability, health status, priority."""
from unittest.mock import MagicMock, patch
import pytest
from src.core.providers.config_manager import ProviderConfigManager, ProviderSettings
from src.core.providers.failover import ProviderFailover
from src.core.providers.health_monitor import (
ProviderHealthMetrics,
ProviderHealthMonitor,
)
class TestProviderSelectionByHealth:
"""Test provider selection based on health metrics."""
def test_best_provider_selected_by_success_rate(self):
"""Provider with highest success rate should be selected as best."""
monitor = ProviderHealthMonitor(failure_threshold=5)
# Provider1: 80% success rate
for i in range(10):
monitor.record_request(
"provider1",
success=(i < 8),
response_time_ms=100,
error_message=None if i < 8 else "fail",
)
# Provider2: 90% success rate
for i in range(10):
monitor.record_request(
"provider2",
success=(i < 9),
response_time_ms=100,
error_message=None if i < 9 else "fail",
)
best = monitor.get_best_provider()
assert best == "provider2"
def test_unavailable_provider_not_selected(self):
"""Provider marked unavailable should not be selected as best."""
monitor = ProviderHealthMonitor(failure_threshold=3)
# Make provider1 unavailable with consecutive failures
for _ in range(5):
monitor.record_request(
"provider1",
success=False,
response_time_ms=500,
error_message="Connection refused",
)
# Provider2 is healthy
monitor.record_request(
"provider2", success=True, response_time_ms=100
)
best = monitor.get_best_provider()
assert best == "provider2"
available = monitor.get_available_providers()
assert "provider1" not in available
assert "provider2" in available
def test_recovery_after_failures(self):
"""Provider should recover availability after successful request."""
monitor = ProviderHealthMonitor(failure_threshold=3)
# Make provider fail
for _ in range(4):
monitor.record_request(
"provider1", success=False, response_time_ms=200,
error_message="fail"
)
assert "provider1" not in monitor.get_available_providers()
# Successful request should reset consecutive failures
monitor.record_request(
"provider1", success=True, response_time_ms=100
)
assert "provider1" in monitor.get_available_providers()
def test_response_time_tiebreaker(self):
"""When success rates are equal, faster provider should be preferred."""
monitor = ProviderHealthMonitor()
# Both have 100% success, but different response times
monitor.record_request(
"slow_provider", success=True, response_time_ms=500
)
monitor.record_request(
"fast_provider", success=True, response_time_ms=50
)
best = monitor.get_best_provider()
assert best == "fast_provider"
class TestProviderSelectionWithConfig:
"""Test provider selection using configuration manager."""
def test_enabled_providers_only(self):
"""Only enabled providers should be available for selection."""
config = ProviderConfigManager()
config.set_provider_settings(
"p1", ProviderSettings(name="p1", enabled=True, priority=1)
)
config.set_provider_settings(
"p2", ProviderSettings(name="p2", enabled=False, priority=0)
)
config.set_provider_settings(
"p3", ProviderSettings(name="p3", enabled=True, priority=2)
)
enabled = config.get_enabled_providers()
assert "p1" in enabled
assert "p2" not in enabled
assert "p3" in enabled
def test_priority_ordering(self):
"""Providers should be ordered by priority value."""
config = ProviderConfigManager()
config.set_provider_settings(
"low_priority",
ProviderSettings(name="low_priority", priority=10),
)
config.set_provider_settings(
"high_priority",
ProviderSettings(name="high_priority", priority=1),
)
config.set_provider_settings(
"mid_priority",
ProviderSettings(name="mid_priority", priority=5),
)
ordered = config.get_providers_by_priority()
assert ordered == ["high_priority", "mid_priority", "low_priority"]
def test_dynamic_priority_update(self):
"""Priority changes should immediately affect ordering."""
config = ProviderConfigManager()
config.set_provider_settings(
"p1", ProviderSettings(name="p1", priority=1)
)
config.set_provider_settings(
"p2", ProviderSettings(name="p2", priority=2)
)
# Initially p1 is higher priority
assert config.get_providers_by_priority()[0] == "p1"
# Change p2 to higher priority
config.set_provider_priority("p2", 0)
assert config.get_providers_by_priority()[0] == "p2"
class TestProviderSelectionWithFailover:
"""Test provider selection integration with failover system."""
def test_failover_respects_health_status(self):
"""Failover should prefer healthy providers."""
monitor = ProviderHealthMonitor(failure_threshold=2)
# Mark p1 as unhealthy
monitor.record_request("p1", False, 100, error_message="fail")
monitor.record_request("p1", False, 100, error_message="fail")
# p2 is healthy
monitor.record_request("p2", True, 50)
with patch(
"src.core.providers.failover.get_health_monitor",
return_value=monitor,
):
failover = ProviderFailover(
providers=["p1", "p2"],
enable_health_monitoring=True,
)
current = failover.get_current_provider()
# Should prefer the healthy provider
assert current == "p2"
def test_failover_falls_back_to_round_robin(self):
"""Without health data, should use round-robin selection."""
failover = ProviderFailover(
providers=["p1", "p2", "p3"],
enable_health_monitoring=False,
)
# Should cycle through providers
first = failover.get_current_provider()
assert first in ["p1", "p2", "p3"]
class TestHealthMonitorMetrics:
"""Test health monitor metric collection scenarios."""
def test_metrics_tracking_accuracy(self):
"""Metrics should accurately reflect request history."""
monitor = ProviderHealthMonitor()
# Record 7 successes and 3 failures
for i in range(10):
monitor.record_request(
"test_provider",
success=(i < 7),
response_time_ms=100 + i * 10,
bytes_transferred=1024 * (i + 1),
error_message=None if i < 7 else f"error_{i}",
)
metrics = monitor.get_provider_metrics("test_provider")
assert metrics is not None
assert metrics.total_requests == 10
assert metrics.successful_requests == 7
assert metrics.failed_requests == 3
assert metrics.success_rate == 70.0
assert metrics.total_bytes_downloaded == sum(
1024 * (i + 1) for i in range(10)
)
def test_consecutive_failure_tracking(self):
"""Consecutive failures should be tracked accurately."""
monitor = ProviderHealthMonitor(failure_threshold=3)
# 2 successes then 3 failures
monitor.record_request("p1", True, 100)
monitor.record_request("p1", True, 100)
monitor.record_request("p1", False, 100, error_message="e1")
monitor.record_request("p1", False, 100, error_message="e2")
monitor.record_request("p1", False, 100, error_message="e3")
metrics = monitor.get_provider_metrics("p1")
assert metrics.consecutive_failures == 3
assert metrics.is_available is False
def test_success_resets_consecutive_failures(self):
"""A success should reset the consecutive failure counter."""
monitor = ProviderHealthMonitor(failure_threshold=5)
# 3 failures then 1 success
monitor.record_request("p1", False, 100, error_message="e1")
monitor.record_request("p1", False, 100, error_message="e2")
monitor.record_request("p1", False, 100, error_message="e3")
monitor.record_request("p1", True, 100)
metrics = monitor.get_provider_metrics("p1")
assert metrics.consecutive_failures == 0
assert metrics.is_available is True
def test_health_summary(self):
"""Health summary should aggregate all provider metrics."""
monitor = ProviderHealthMonitor()
monitor.record_request("p1", True, 100)
monitor.record_request("p2", True, 200)
monitor.record_request("p3", False, 300, error_message="err")
summary = monitor.get_health_summary()
assert summary["total_providers"] == 3
assert summary["available_providers"] >= 2
assert "average_success_rate" in summary
assert "providers" in summary
assert len(summary["providers"]) == 3
def test_reset_provider_metrics(self):
"""Resetting metrics should clear all data for a provider."""
monitor = ProviderHealthMonitor()
monitor.record_request("p1", True, 100, bytes_transferred=1024)
monitor.record_request("p1", False, 200, error_message="fail")
result = monitor.reset_provider_metrics("p1")
assert result is True
metrics = monitor.get_provider_metrics("p1")
assert metrics.total_requests == 0
assert metrics.successful_requests == 0
assert metrics.total_bytes_downloaded == 0
def test_reset_unknown_provider(self):
"""Resetting unknown provider should return False."""
monitor = ProviderHealthMonitor()
assert monitor.reset_provider_metrics("unknown") is False
def test_empty_summary(self):
"""Summary with no providers should return zeros."""
monitor = ProviderHealthMonitor()
summary = monitor.get_health_summary()
assert summary["total_providers"] == 0
assert summary["average_success_rate"] == 0.0
class TestMultiProviderHealthScenarios:
"""Test complex multi-provider health scenarios."""
def test_three_providers_degraded_service(self):
"""With 3 providers, partial failure should still select best."""
monitor = ProviderHealthMonitor(failure_threshold=3)
# Provider A: fully down
for _ in range(5):
monitor.record_request("A", False, 500, error_message="down")
# Provider B: degraded (50% success)
for i in range(10):
monitor.record_request(
"B", success=(i % 2 == 0), response_time_ms=200,
error_message=None if i % 2 == 0 else "intermittent"
)
# Provider C: healthy (100% success)
for _ in range(5):
monitor.record_request("C", True, 100)
best = monitor.get_best_provider()
assert best == "C"
available = monitor.get_available_providers()
assert "A" not in available
assert "B" in available
assert "C" in available
def test_all_providers_healthy(self):
"""When all healthy, fastest should be selected."""
monitor = ProviderHealthMonitor()
monitor.record_request("slow", True, 500)
monitor.record_request("medium", True, 200)
monitor.record_request("fast", True, 50)
best = monitor.get_best_provider()
assert best == "fast"
def test_no_providers_tracked(self):
"""With no tracked providers, best should be None."""
monitor = ProviderHealthMonitor()
assert monitor.get_best_provider() is None
assert monitor.get_available_providers() == []