Add provider system tests: 211 tests covering base, factory, config, monitoring, failover, and selection
This commit is contained in:
312
tests/integration/test_provider_failover_scenarios.py
Normal file
312
tests/integration/test_provider_failover_scenarios.py
Normal file
@@ -0,0 +1,312 @@
|
||||
"""Integration tests for provider failover scenarios - End-to-end provider switching."""
|
||||
|
||||
import asyncio
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from src.core.providers.failover import (
|
||||
ProviderFailover,
|
||||
configure_failover,
|
||||
get_failover,
|
||||
)
|
||||
from src.core.providers.health_monitor import ProviderHealthMonitor
|
||||
|
||||
|
||||
class TestProviderFailoverScenarios:
|
||||
"""Test end-to-end failover scenarios with multiple providers."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_primary_fails_switches_to_backup(self):
|
||||
"""When primary provider fails, should switch to backup and succeed."""
|
||||
call_log = []
|
||||
|
||||
async def operation(provider: str) -> str:
|
||||
call_log.append(provider)
|
||||
if provider == "provider1":
|
||||
raise ConnectionError("Provider1 is down")
|
||||
return f"Success from {provider}"
|
||||
|
||||
failover = ProviderFailover(
|
||||
providers=["provider1", "provider2", "provider3"],
|
||||
max_retries=1,
|
||||
retry_delay=0.01,
|
||||
enable_health_monitoring=False,
|
||||
)
|
||||
|
||||
result = await failover.execute_with_failover(
|
||||
operation=operation,
|
||||
operation_name="test_failover",
|
||||
)
|
||||
|
||||
assert "Success" in result
|
||||
assert "provider1" in call_log
|
||||
assert len(call_log) >= 2
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_first_two_fail_third_succeeds(self):
|
||||
"""When first two providers fail, third should be tried."""
|
||||
attempts = {}
|
||||
|
||||
async def operation(provider: str) -> str:
|
||||
attempts[provider] = attempts.get(provider, 0) + 1
|
||||
if provider in ("provider1", "provider2"):
|
||||
raise ConnectionError(f"{provider} is down")
|
||||
return f"Success from {provider}"
|
||||
|
||||
failover = ProviderFailover(
|
||||
providers=["provider1", "provider2", "provider3"],
|
||||
max_retries=1,
|
||||
retry_delay=0.01,
|
||||
enable_health_monitoring=False,
|
||||
)
|
||||
|
||||
result = await failover.execute_with_failover(
|
||||
operation=operation,
|
||||
operation_name="test_two_fail",
|
||||
)
|
||||
|
||||
assert "provider3" in result
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_all_providers_fail_raises(self):
|
||||
"""When all providers fail, should raise exception."""
|
||||
async def operation(provider: str) -> str:
|
||||
raise ConnectionError(f"{provider} is down")
|
||||
|
||||
failover = ProviderFailover(
|
||||
providers=["provider1", "provider2"],
|
||||
max_retries=1,
|
||||
retry_delay=0.01,
|
||||
enable_health_monitoring=False,
|
||||
)
|
||||
|
||||
with pytest.raises(Exception, match="failed with all providers"):
|
||||
await failover.execute_with_failover(
|
||||
operation=operation,
|
||||
operation_name="test_all_fail",
|
||||
)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_retry_within_single_provider(self):
|
||||
"""Should retry with same provider before moving to next."""
|
||||
call_count = 0
|
||||
|
||||
async def operation(provider: str) -> str:
|
||||
nonlocal call_count
|
||||
call_count += 1
|
||||
if call_count <= 2:
|
||||
raise ConnectionError("Temporary failure")
|
||||
return f"Success on attempt {call_count}"
|
||||
|
||||
failover = ProviderFailover(
|
||||
providers=["provider1"],
|
||||
max_retries=3,
|
||||
retry_delay=0.01,
|
||||
enable_health_monitoring=False,
|
||||
)
|
||||
|
||||
result = await failover.execute_with_failover(
|
||||
operation=operation,
|
||||
operation_name="test_retry",
|
||||
)
|
||||
|
||||
assert "Success" in result
|
||||
assert call_count == 3
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_failover_with_health_monitoring(self):
|
||||
"""Failover should integrate with health monitoring."""
|
||||
monitor = ProviderHealthMonitor(failure_threshold=2)
|
||||
|
||||
# Pre-record failures for provider1 to make it unavailable
|
||||
for _ in range(3):
|
||||
monitor.record_request(
|
||||
provider_name="provider1",
|
||||
success=False,
|
||||
response_time_ms=100,
|
||||
error_message="Simulated failure",
|
||||
)
|
||||
|
||||
# Provider1 should now be unavailable
|
||||
assert "provider1" not in monitor.get_available_providers()
|
||||
|
||||
with patch(
|
||||
"src.core.providers.failover.get_health_monitor",
|
||||
return_value=monitor,
|
||||
):
|
||||
failover = ProviderFailover(
|
||||
providers=["provider1", "provider2"],
|
||||
max_retries=1,
|
||||
retry_delay=0.01,
|
||||
enable_health_monitoring=True,
|
||||
)
|
||||
|
||||
# Current provider should prefer provider2
|
||||
current = failover.get_current_provider()
|
||||
# Best provider selection should favor available ones
|
||||
available = monitor.get_available_providers()
|
||||
if available:
|
||||
assert current in available or current == "provider2"
|
||||
|
||||
|
||||
class TestProviderFailoverChainManagement:
|
||||
"""Test failover chain add/remove/priority operations."""
|
||||
|
||||
def test_add_provider_to_chain(self):
|
||||
"""Should add new provider to the failover chain."""
|
||||
failover = ProviderFailover(
|
||||
providers=["p1", "p2"],
|
||||
enable_health_monitoring=False,
|
||||
)
|
||||
failover.add_provider("p3")
|
||||
assert "p3" in failover.get_providers()
|
||||
|
||||
def test_add_duplicate_provider_no_effect(self):
|
||||
"""Adding existing provider should not create duplicate."""
|
||||
failover = ProviderFailover(
|
||||
providers=["p1", "p2"],
|
||||
enable_health_monitoring=False,
|
||||
)
|
||||
failover.add_provider("p1")
|
||||
assert failover.get_providers().count("p1") == 1
|
||||
|
||||
def test_remove_provider_from_chain(self):
|
||||
"""Should remove provider from the failover chain."""
|
||||
failover = ProviderFailover(
|
||||
providers=["p1", "p2", "p3"],
|
||||
enable_health_monitoring=False,
|
||||
)
|
||||
result = failover.remove_provider("p2")
|
||||
assert result is True
|
||||
assert "p2" not in failover.get_providers()
|
||||
|
||||
def test_remove_nonexistent_provider(self):
|
||||
"""Removing non-existent provider should return False."""
|
||||
failover = ProviderFailover(
|
||||
providers=["p1"],
|
||||
enable_health_monitoring=False,
|
||||
)
|
||||
assert failover.remove_provider("p99") is False
|
||||
|
||||
def test_set_provider_priority(self):
|
||||
"""Should reorder provider in the chain."""
|
||||
failover = ProviderFailover(
|
||||
providers=["p1", "p2", "p3"],
|
||||
enable_health_monitoring=False,
|
||||
)
|
||||
result = failover.set_provider_priority("p3", 0)
|
||||
assert result is True
|
||||
providers = failover.get_providers()
|
||||
assert providers[0] == "p3"
|
||||
|
||||
def test_set_priority_unknown_provider(self):
|
||||
"""Setting priority for unknown provider should return False."""
|
||||
failover = ProviderFailover(
|
||||
providers=["p1"],
|
||||
enable_health_monitoring=False,
|
||||
)
|
||||
assert failover.set_provider_priority("unknown", 0) is False
|
||||
|
||||
|
||||
class TestFailoverStats:
|
||||
"""Test failover statistics reporting."""
|
||||
|
||||
def test_get_failover_stats(self):
|
||||
"""Should return comprehensive stats."""
|
||||
failover = ProviderFailover(
|
||||
providers=["p1", "p2"],
|
||||
max_retries=3,
|
||||
retry_delay=1.0,
|
||||
enable_health_monitoring=False,
|
||||
)
|
||||
|
||||
stats = failover.get_failover_stats()
|
||||
assert stats["total_providers"] == 2
|
||||
assert stats["max_retries"] == 3
|
||||
assert stats["retry_delay"] == 1.0
|
||||
assert len(stats["providers"]) == 2
|
||||
|
||||
def test_stats_with_health_monitoring(self):
|
||||
"""Stats should include availability info when monitoring enabled."""
|
||||
monitor = ProviderHealthMonitor()
|
||||
monitor.record_request("p1", True, 100)
|
||||
monitor.record_request("p2", False, 200, error_message="fail")
|
||||
monitor.record_request("p2", False, 200, error_message="fail")
|
||||
monitor.record_request("p2", False, 200, error_message="fail")
|
||||
|
||||
with patch(
|
||||
"src.core.providers.failover.get_health_monitor",
|
||||
return_value=monitor,
|
||||
):
|
||||
failover = ProviderFailover(
|
||||
providers=["p1", "p2"],
|
||||
enable_health_monitoring=True,
|
||||
)
|
||||
stats = failover.get_failover_stats()
|
||||
assert "available_providers" in stats
|
||||
assert "unavailable_providers" in stats
|
||||
|
||||
|
||||
class TestConfigureFailover:
|
||||
"""Test the global failover configuration function."""
|
||||
|
||||
def test_configure_failover(self):
|
||||
"""configure_failover should create a new global instance."""
|
||||
import src.core.providers.failover as fo
|
||||
fo._failover = None
|
||||
|
||||
failover = configure_failover(
|
||||
providers=["custom1", "custom2"],
|
||||
max_retries=5,
|
||||
retry_delay=0.5,
|
||||
)
|
||||
|
||||
assert isinstance(failover, ProviderFailover)
|
||||
assert failover.get_providers() == ["custom1", "custom2"]
|
||||
assert failover._max_retries == 5
|
||||
|
||||
# Cleanup
|
||||
fo._failover = None
|
||||
|
||||
def test_get_failover_singleton(self):
|
||||
"""get_failover should return same instance."""
|
||||
import src.core.providers.failover as fo
|
||||
fo._failover = None
|
||||
|
||||
first = get_failover()
|
||||
second = get_failover()
|
||||
assert first is second
|
||||
|
||||
fo._failover = None
|
||||
|
||||
|
||||
class TestNextProviderRotation:
|
||||
"""Test provider rotation logic."""
|
||||
|
||||
def test_get_next_cycles_through_all(self):
|
||||
"""get_next_provider should cycle through all providers."""
|
||||
failover = ProviderFailover(
|
||||
providers=["p1", "p2", "p3"],
|
||||
enable_health_monitoring=False,
|
||||
)
|
||||
|
||||
seen = set()
|
||||
for _ in range(3):
|
||||
provider = failover.get_next_provider()
|
||||
if provider:
|
||||
seen.add(provider)
|
||||
|
||||
assert len(seen) >= 2 # Should see at least 2 different providers
|
||||
|
||||
def test_current_provider_is_from_list(self):
|
||||
"""get_current_provider should always return from provider list."""
|
||||
failover = ProviderFailover(
|
||||
providers=["p1", "p2", "p3"],
|
||||
enable_health_monitoring=False,
|
||||
)
|
||||
|
||||
for _ in range(10):
|
||||
current = failover.get_current_provider()
|
||||
assert current in ["p1", "p2", "p3"]
|
||||
failover.get_next_provider()
|
||||
348
tests/integration/test_provider_selection.py
Normal file
348
tests/integration/test_provider_selection.py
Normal file
@@ -0,0 +1,348 @@
|
||||
"""Integration tests for provider selection based on availability, health status, priority."""
|
||||
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from src.core.providers.config_manager import (
|
||||
ProviderConfigManager,
|
||||
ProviderSettings,
|
||||
)
|
||||
from src.core.providers.failover import ProviderFailover
|
||||
from src.core.providers.health_monitor import (
|
||||
ProviderHealthMetrics,
|
||||
ProviderHealthMonitor,
|
||||
)
|
||||
|
||||
|
||||
class TestProviderSelectionByHealth:
|
||||
"""Test provider selection based on health metrics."""
|
||||
|
||||
def test_best_provider_selected_by_success_rate(self):
|
||||
"""Provider with highest success rate should be selected as best."""
|
||||
monitor = ProviderHealthMonitor(failure_threshold=5)
|
||||
|
||||
# Provider1: 80% success rate
|
||||
for i in range(10):
|
||||
monitor.record_request(
|
||||
"provider1",
|
||||
success=(i < 8),
|
||||
response_time_ms=100,
|
||||
error_message=None if i < 8 else "fail",
|
||||
)
|
||||
|
||||
# Provider2: 90% success rate
|
||||
for i in range(10):
|
||||
monitor.record_request(
|
||||
"provider2",
|
||||
success=(i < 9),
|
||||
response_time_ms=100,
|
||||
error_message=None if i < 9 else "fail",
|
||||
)
|
||||
|
||||
best = monitor.get_best_provider()
|
||||
assert best == "provider2"
|
||||
|
||||
def test_unavailable_provider_not_selected(self):
|
||||
"""Provider marked unavailable should not be selected as best."""
|
||||
monitor = ProviderHealthMonitor(failure_threshold=3)
|
||||
|
||||
# Make provider1 unavailable with consecutive failures
|
||||
for _ in range(5):
|
||||
monitor.record_request(
|
||||
"provider1",
|
||||
success=False,
|
||||
response_time_ms=500,
|
||||
error_message="Connection refused",
|
||||
)
|
||||
|
||||
# Provider2 is healthy
|
||||
monitor.record_request(
|
||||
"provider2", success=True, response_time_ms=100
|
||||
)
|
||||
|
||||
best = monitor.get_best_provider()
|
||||
assert best == "provider2"
|
||||
|
||||
available = monitor.get_available_providers()
|
||||
assert "provider1" not in available
|
||||
assert "provider2" in available
|
||||
|
||||
def test_recovery_after_failures(self):
|
||||
"""Provider should recover availability after successful request."""
|
||||
monitor = ProviderHealthMonitor(failure_threshold=3)
|
||||
|
||||
# Make provider fail
|
||||
for _ in range(4):
|
||||
monitor.record_request(
|
||||
"provider1", success=False, response_time_ms=200,
|
||||
error_message="fail"
|
||||
)
|
||||
|
||||
assert "provider1" not in monitor.get_available_providers()
|
||||
|
||||
# Successful request should reset consecutive failures
|
||||
monitor.record_request(
|
||||
"provider1", success=True, response_time_ms=100
|
||||
)
|
||||
|
||||
assert "provider1" in monitor.get_available_providers()
|
||||
|
||||
def test_response_time_tiebreaker(self):
|
||||
"""When success rates are equal, faster provider should be preferred."""
|
||||
monitor = ProviderHealthMonitor()
|
||||
|
||||
# Both have 100% success, but different response times
|
||||
monitor.record_request(
|
||||
"slow_provider", success=True, response_time_ms=500
|
||||
)
|
||||
monitor.record_request(
|
||||
"fast_provider", success=True, response_time_ms=50
|
||||
)
|
||||
|
||||
best = monitor.get_best_provider()
|
||||
assert best == "fast_provider"
|
||||
|
||||
|
||||
class TestProviderSelectionWithConfig:
|
||||
"""Test provider selection using configuration manager."""
|
||||
|
||||
def test_enabled_providers_only(self):
|
||||
"""Only enabled providers should be available for selection."""
|
||||
config = ProviderConfigManager()
|
||||
config.set_provider_settings(
|
||||
"p1", ProviderSettings(name="p1", enabled=True, priority=1)
|
||||
)
|
||||
config.set_provider_settings(
|
||||
"p2", ProviderSettings(name="p2", enabled=False, priority=0)
|
||||
)
|
||||
config.set_provider_settings(
|
||||
"p3", ProviderSettings(name="p3", enabled=True, priority=2)
|
||||
)
|
||||
|
||||
enabled = config.get_enabled_providers()
|
||||
assert "p1" in enabled
|
||||
assert "p2" not in enabled
|
||||
assert "p3" in enabled
|
||||
|
||||
def test_priority_ordering(self):
|
||||
"""Providers should be ordered by priority value."""
|
||||
config = ProviderConfigManager()
|
||||
config.set_provider_settings(
|
||||
"low_priority",
|
||||
ProviderSettings(name="low_priority", priority=10),
|
||||
)
|
||||
config.set_provider_settings(
|
||||
"high_priority",
|
||||
ProviderSettings(name="high_priority", priority=1),
|
||||
)
|
||||
config.set_provider_settings(
|
||||
"mid_priority",
|
||||
ProviderSettings(name="mid_priority", priority=5),
|
||||
)
|
||||
|
||||
ordered = config.get_providers_by_priority()
|
||||
assert ordered == ["high_priority", "mid_priority", "low_priority"]
|
||||
|
||||
def test_dynamic_priority_update(self):
|
||||
"""Priority changes should immediately affect ordering."""
|
||||
config = ProviderConfigManager()
|
||||
config.set_provider_settings(
|
||||
"p1", ProviderSettings(name="p1", priority=1)
|
||||
)
|
||||
config.set_provider_settings(
|
||||
"p2", ProviderSettings(name="p2", priority=2)
|
||||
)
|
||||
|
||||
# Initially p1 is higher priority
|
||||
assert config.get_providers_by_priority()[0] == "p1"
|
||||
|
||||
# Change p2 to higher priority
|
||||
config.set_provider_priority("p2", 0)
|
||||
assert config.get_providers_by_priority()[0] == "p2"
|
||||
|
||||
|
||||
class TestProviderSelectionWithFailover:
|
||||
"""Test provider selection integration with failover system."""
|
||||
|
||||
def test_failover_respects_health_status(self):
|
||||
"""Failover should prefer healthy providers."""
|
||||
monitor = ProviderHealthMonitor(failure_threshold=2)
|
||||
|
||||
# Mark p1 as unhealthy
|
||||
monitor.record_request("p1", False, 100, error_message="fail")
|
||||
monitor.record_request("p1", False, 100, error_message="fail")
|
||||
|
||||
# p2 is healthy
|
||||
monitor.record_request("p2", True, 50)
|
||||
|
||||
with patch(
|
||||
"src.core.providers.failover.get_health_monitor",
|
||||
return_value=monitor,
|
||||
):
|
||||
failover = ProviderFailover(
|
||||
providers=["p1", "p2"],
|
||||
enable_health_monitoring=True,
|
||||
)
|
||||
|
||||
current = failover.get_current_provider()
|
||||
# Should prefer the healthy provider
|
||||
assert current == "p2"
|
||||
|
||||
def test_failover_falls_back_to_round_robin(self):
|
||||
"""Without health data, should use round-robin selection."""
|
||||
failover = ProviderFailover(
|
||||
providers=["p1", "p2", "p3"],
|
||||
enable_health_monitoring=False,
|
||||
)
|
||||
|
||||
# Should cycle through providers
|
||||
first = failover.get_current_provider()
|
||||
assert first in ["p1", "p2", "p3"]
|
||||
|
||||
|
||||
class TestHealthMonitorMetrics:
|
||||
"""Test health monitor metric collection scenarios."""
|
||||
|
||||
def test_metrics_tracking_accuracy(self):
|
||||
"""Metrics should accurately reflect request history."""
|
||||
monitor = ProviderHealthMonitor()
|
||||
|
||||
# Record 7 successes and 3 failures
|
||||
for i in range(10):
|
||||
monitor.record_request(
|
||||
"test_provider",
|
||||
success=(i < 7),
|
||||
response_time_ms=100 + i * 10,
|
||||
bytes_transferred=1024 * (i + 1),
|
||||
error_message=None if i < 7 else f"error_{i}",
|
||||
)
|
||||
|
||||
metrics = monitor.get_provider_metrics("test_provider")
|
||||
assert metrics is not None
|
||||
assert metrics.total_requests == 10
|
||||
assert metrics.successful_requests == 7
|
||||
assert metrics.failed_requests == 3
|
||||
assert metrics.success_rate == 70.0
|
||||
assert metrics.total_bytes_downloaded == sum(
|
||||
1024 * (i + 1) for i in range(10)
|
||||
)
|
||||
|
||||
def test_consecutive_failure_tracking(self):
|
||||
"""Consecutive failures should be tracked accurately."""
|
||||
monitor = ProviderHealthMonitor(failure_threshold=3)
|
||||
|
||||
# 2 successes then 3 failures
|
||||
monitor.record_request("p1", True, 100)
|
||||
monitor.record_request("p1", True, 100)
|
||||
monitor.record_request("p1", False, 100, error_message="e1")
|
||||
monitor.record_request("p1", False, 100, error_message="e2")
|
||||
monitor.record_request("p1", False, 100, error_message="e3")
|
||||
|
||||
metrics = monitor.get_provider_metrics("p1")
|
||||
assert metrics.consecutive_failures == 3
|
||||
assert metrics.is_available is False
|
||||
|
||||
def test_success_resets_consecutive_failures(self):
|
||||
"""A success should reset the consecutive failure counter."""
|
||||
monitor = ProviderHealthMonitor(failure_threshold=5)
|
||||
|
||||
# 3 failures then 1 success
|
||||
monitor.record_request("p1", False, 100, error_message="e1")
|
||||
monitor.record_request("p1", False, 100, error_message="e2")
|
||||
monitor.record_request("p1", False, 100, error_message="e3")
|
||||
monitor.record_request("p1", True, 100)
|
||||
|
||||
metrics = monitor.get_provider_metrics("p1")
|
||||
assert metrics.consecutive_failures == 0
|
||||
assert metrics.is_available is True
|
||||
|
||||
def test_health_summary(self):
|
||||
"""Health summary should aggregate all provider metrics."""
|
||||
monitor = ProviderHealthMonitor()
|
||||
|
||||
monitor.record_request("p1", True, 100)
|
||||
monitor.record_request("p2", True, 200)
|
||||
monitor.record_request("p3", False, 300, error_message="err")
|
||||
|
||||
summary = monitor.get_health_summary()
|
||||
assert summary["total_providers"] == 3
|
||||
assert summary["available_providers"] >= 2
|
||||
assert "average_success_rate" in summary
|
||||
assert "providers" in summary
|
||||
assert len(summary["providers"]) == 3
|
||||
|
||||
def test_reset_provider_metrics(self):
|
||||
"""Resetting metrics should clear all data for a provider."""
|
||||
monitor = ProviderHealthMonitor()
|
||||
|
||||
monitor.record_request("p1", True, 100, bytes_transferred=1024)
|
||||
monitor.record_request("p1", False, 200, error_message="fail")
|
||||
|
||||
result = monitor.reset_provider_metrics("p1")
|
||||
assert result is True
|
||||
|
||||
metrics = monitor.get_provider_metrics("p1")
|
||||
assert metrics.total_requests == 0
|
||||
assert metrics.successful_requests == 0
|
||||
assert metrics.total_bytes_downloaded == 0
|
||||
|
||||
def test_reset_unknown_provider(self):
|
||||
"""Resetting unknown provider should return False."""
|
||||
monitor = ProviderHealthMonitor()
|
||||
assert monitor.reset_provider_metrics("unknown") is False
|
||||
|
||||
def test_empty_summary(self):
|
||||
"""Summary with no providers should return zeros."""
|
||||
monitor = ProviderHealthMonitor()
|
||||
summary = monitor.get_health_summary()
|
||||
assert summary["total_providers"] == 0
|
||||
assert summary["average_success_rate"] == 0.0
|
||||
|
||||
|
||||
class TestMultiProviderHealthScenarios:
|
||||
"""Test complex multi-provider health scenarios."""
|
||||
|
||||
def test_three_providers_degraded_service(self):
|
||||
"""With 3 providers, partial failure should still select best."""
|
||||
monitor = ProviderHealthMonitor(failure_threshold=3)
|
||||
|
||||
# Provider A: fully down
|
||||
for _ in range(5):
|
||||
monitor.record_request("A", False, 500, error_message="down")
|
||||
|
||||
# Provider B: degraded (50% success)
|
||||
for i in range(10):
|
||||
monitor.record_request(
|
||||
"B", success=(i % 2 == 0), response_time_ms=200,
|
||||
error_message=None if i % 2 == 0 else "intermittent"
|
||||
)
|
||||
|
||||
# Provider C: healthy (100% success)
|
||||
for _ in range(5):
|
||||
monitor.record_request("C", True, 100)
|
||||
|
||||
best = monitor.get_best_provider()
|
||||
assert best == "C"
|
||||
|
||||
available = monitor.get_available_providers()
|
||||
assert "A" not in available
|
||||
assert "B" in available
|
||||
assert "C" in available
|
||||
|
||||
def test_all_providers_healthy(self):
|
||||
"""When all healthy, fastest should be selected."""
|
||||
monitor = ProviderHealthMonitor()
|
||||
|
||||
monitor.record_request("slow", True, 500)
|
||||
monitor.record_request("medium", True, 200)
|
||||
monitor.record_request("fast", True, 50)
|
||||
|
||||
best = monitor.get_best_provider()
|
||||
assert best == "fast"
|
||||
|
||||
def test_no_providers_tracked(self):
|
||||
"""With no tracked providers, best should be None."""
|
||||
monitor = ProviderHealthMonitor()
|
||||
assert monitor.get_best_provider() is None
|
||||
assert monitor.get_available_providers() == []
|
||||
Reference in New Issue
Block a user