323 lines
12 KiB
Python
323 lines
12 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
Test runner for comprehensive API testing.
|
|
|
|
This script runs all API-related tests and provides detailed reporting
|
|
on test coverage and results.
|
|
"""
|
|
|
|
import unittest
|
|
import sys
|
|
import os
|
|
from io import StringIO
|
|
import json
|
|
from datetime import datetime
|
|
|
|
# Add paths for imports
|
|
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..', 'src'))
|
|
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..', 'src', 'server'))
|
|
|
|
def run_api_tests():
|
|
"""Run all API tests and generate comprehensive report."""
|
|
|
|
print("🚀 Starting Aniworld API Test Suite")
|
|
print("=" * 60)
|
|
|
|
# Test discovery
|
|
loader = unittest.TestLoader()
|
|
start_dir = os.path.dirname(__file__)
|
|
|
|
# Discover tests from different modules
|
|
test_suites = []
|
|
|
|
# Unit tests
|
|
try:
|
|
from test_api_endpoints import (
|
|
TestAuthenticationEndpoints,
|
|
TestConfigurationEndpoints,
|
|
TestSeriesEndpoints,
|
|
TestDownloadEndpoints,
|
|
TestProcessManagementEndpoints,
|
|
TestLoggingEndpoints,
|
|
TestBackupEndpoints,
|
|
TestDiagnosticsEndpoints,
|
|
TestErrorHandling
|
|
)
|
|
|
|
unit_test_classes = [
|
|
TestAuthenticationEndpoints,
|
|
TestConfigurationEndpoints,
|
|
TestSeriesEndpoints,
|
|
TestDownloadEndpoints,
|
|
TestProcessManagementEndpoints,
|
|
TestLoggingEndpoints,
|
|
TestBackupEndpoints,
|
|
TestDiagnosticsEndpoints,
|
|
TestErrorHandling
|
|
]
|
|
|
|
print("✅ Loaded unit test classes")
|
|
|
|
for test_class in unit_test_classes:
|
|
suite = loader.loadTestsFromTestCase(test_class)
|
|
test_suites.append(('Unit Tests', test_class.__name__, suite))
|
|
|
|
except ImportError as e:
|
|
print(f"⚠️ Could not load unit test classes: {e}")
|
|
|
|
# Integration tests
|
|
try:
|
|
integration_path = os.path.join(os.path.dirname(__file__), '..', '..', 'integration')
|
|
integration_file = os.path.join(integration_path, 'test_api_integration.py')
|
|
|
|
if os.path.exists(integration_file):
|
|
sys.path.insert(0, integration_path)
|
|
|
|
# Import dynamically to handle potential import errors gracefully
|
|
import importlib.util
|
|
spec = importlib.util.spec_from_file_location("test_api_integration", integration_file)
|
|
if spec and spec.loader:
|
|
test_api_integration = importlib.util.module_from_spec(spec)
|
|
spec.loader.exec_module(test_api_integration)
|
|
|
|
# Get test classes dynamically
|
|
integration_test_classes = []
|
|
for name in dir(test_api_integration):
|
|
obj = getattr(test_api_integration, name)
|
|
if (isinstance(obj, type) and
|
|
issubclass(obj, unittest.TestCase) and
|
|
name.startswith('Test') and
|
|
name != 'APIIntegrationTestBase'):
|
|
integration_test_classes.append(obj)
|
|
|
|
print(f"✅ Loaded {len(integration_test_classes)} integration test classes")
|
|
|
|
for test_class in integration_test_classes:
|
|
suite = loader.loadTestsFromTestCase(test_class)
|
|
test_suites.append(('Integration Tests', test_class.__name__, suite))
|
|
else:
|
|
print("⚠️ Could not create module spec for integration tests")
|
|
else:
|
|
print(f"⚠️ Integration test file not found: {integration_file}")
|
|
|
|
except ImportError as e:
|
|
print(f"⚠️ Could not load integration test classes: {e}")
|
|
|
|
# Run tests and collect results
|
|
total_results = {
|
|
'total_tests': 0,
|
|
'total_failures': 0,
|
|
'total_errors': 0,
|
|
'total_skipped': 0,
|
|
'suite_results': []
|
|
}
|
|
|
|
print(f"\n🧪 Running {len(test_suites)} test suites...")
|
|
print("-" * 60)
|
|
|
|
for suite_type, suite_name, suite in test_suites:
|
|
print(f"\n📋 {suite_type}: {suite_name}")
|
|
|
|
# Capture output
|
|
test_output = StringIO()
|
|
runner = unittest.TextTestRunner(
|
|
stream=test_output,
|
|
verbosity=1,
|
|
buffer=True
|
|
)
|
|
|
|
# Run the test suite
|
|
result = runner.run(suite)
|
|
|
|
# Update totals
|
|
total_results['total_tests'] += result.testsRun
|
|
total_results['total_failures'] += len(result.failures)
|
|
total_results['total_errors'] += len(result.errors)
|
|
total_results['total_skipped'] += len(result.skipped) if hasattr(result, 'skipped') else 0
|
|
|
|
# Store suite result
|
|
suite_result = {
|
|
'suite_type': suite_type,
|
|
'suite_name': suite_name,
|
|
'tests_run': result.testsRun,
|
|
'failures': len(result.failures),
|
|
'errors': len(result.errors),
|
|
'skipped': len(result.skipped) if hasattr(result, 'skipped') else 0,
|
|
'success_rate': ((result.testsRun - len(result.failures) - len(result.errors)) / result.testsRun * 100) if result.testsRun > 0 else 0,
|
|
'failure_details': [f"{test}: {traceback.split('AssertionError: ')[-1].split(chr(10))[0] if 'AssertionError:' in traceback else 'See details'}" for test, traceback in result.failures],
|
|
'error_details': [f"{test}: {traceback.split(chr(10))[-2] if len(traceback.split(chr(10))) > 1 else 'Unknown error'}" for test, traceback in result.errors]
|
|
}
|
|
|
|
total_results['suite_results'].append(suite_result)
|
|
|
|
# Print immediate results
|
|
status = "✅" if result.wasSuccessful() else "❌"
|
|
print(f" {status} Tests: {result.testsRun}, Failures: {len(result.failures)}, Errors: {len(result.errors)}")
|
|
|
|
if result.failures:
|
|
print(" 🔥 Failures:")
|
|
for test, _ in result.failures[:3]: # Show first 3 failures
|
|
print(f" - {test}")
|
|
|
|
if result.errors:
|
|
print(" 💥 Errors:")
|
|
for test, _ in result.errors[:3]: # Show first 3 errors
|
|
print(f" - {test}")
|
|
|
|
# Generate comprehensive report
|
|
print("\n" + "=" * 60)
|
|
print("📊 COMPREHENSIVE TEST REPORT")
|
|
print("=" * 60)
|
|
|
|
# Overall statistics
|
|
print(f"📈 OVERALL STATISTICS:")
|
|
print(f" Total Tests Run: {total_results['total_tests']}")
|
|
print(f" Total Failures: {total_results['total_failures']}")
|
|
print(f" Total Errors: {total_results['total_errors']}")
|
|
print(f" Total Skipped: {total_results['total_skipped']}")
|
|
|
|
if total_results['total_tests'] > 0:
|
|
overall_success_rate = ((total_results['total_tests'] - total_results['total_failures'] - total_results['total_errors']) / total_results['total_tests'] * 100)
|
|
print(f" Overall Success Rate: {overall_success_rate:.1f}%")
|
|
|
|
# Per-suite breakdown
|
|
print(f"\n📊 PER-SUITE BREAKDOWN:")
|
|
for suite_result in total_results['suite_results']:
|
|
status_icon = "✅" if suite_result['failures'] == 0 and suite_result['errors'] == 0 else "❌"
|
|
print(f" {status_icon} {suite_result['suite_name']}")
|
|
print(f" Tests: {suite_result['tests_run']}, Success Rate: {suite_result['success_rate']:.1f}%")
|
|
|
|
if suite_result['failures'] > 0:
|
|
print(f" Failures ({suite_result['failures']}):")
|
|
for failure in suite_result['failure_details'][:2]:
|
|
print(f" - {failure}")
|
|
|
|
if suite_result['errors'] > 0:
|
|
print(f" Errors ({suite_result['errors']}):")
|
|
for error in suite_result['error_details'][:2]:
|
|
print(f" - {error}")
|
|
|
|
# API Coverage Report
|
|
print(f"\n🎯 API ENDPOINT COVERAGE:")
|
|
|
|
tested_endpoints = {
|
|
'Authentication': [
|
|
'POST /api/auth/setup',
|
|
'POST /api/auth/login',
|
|
'POST /api/auth/logout',
|
|
'GET /api/auth/status'
|
|
],
|
|
'Configuration': [
|
|
'POST /api/config/directory',
|
|
'GET /api/scheduler/config',
|
|
'POST /api/scheduler/config',
|
|
'GET /api/config/section/advanced',
|
|
'POST /api/config/section/advanced'
|
|
],
|
|
'Series Management': [
|
|
'GET /api/series',
|
|
'POST /api/search',
|
|
'POST /api/rescan'
|
|
],
|
|
'Download Management': [
|
|
'POST /api/download'
|
|
],
|
|
'System Status': [
|
|
'GET /api/process/locks/status',
|
|
'GET /api/status'
|
|
],
|
|
'Logging': [
|
|
'GET /api/logging/config',
|
|
'POST /api/logging/config',
|
|
'GET /api/logging/files',
|
|
'POST /api/logging/test',
|
|
'POST /api/logging/cleanup',
|
|
'GET /api/logging/files/<filename>/tail'
|
|
],
|
|
'Backup Management': [
|
|
'POST /api/config/backup',
|
|
'GET /api/config/backups',
|
|
'POST /api/config/backup/<filename>/restore',
|
|
'GET /api/config/backup/<filename>/download'
|
|
],
|
|
'Diagnostics': [
|
|
'GET /api/diagnostics/network',
|
|
'GET /api/diagnostics/errors',
|
|
'POST /api/recovery/clear-blacklist',
|
|
'GET /api/recovery/retry-counts',
|
|
'GET /api/diagnostics/system-status'
|
|
]
|
|
}
|
|
|
|
total_endpoints = sum(len(endpoints) for endpoints in tested_endpoints.values())
|
|
|
|
for category, endpoints in tested_endpoints.items():
|
|
print(f" 📂 {category}: {len(endpoints)} endpoints")
|
|
for endpoint in endpoints:
|
|
print(f" ✓ {endpoint}")
|
|
|
|
print(f"\n 🎯 Total API Endpoints Covered: {total_endpoints}")
|
|
|
|
# Recommendations
|
|
print(f"\n💡 RECOMMENDATIONS:")
|
|
|
|
if total_results['total_failures'] > 0:
|
|
print(" 🔧 Address test failures to improve code reliability")
|
|
|
|
if total_results['total_errors'] > 0:
|
|
print(" 🛠️ Fix test errors - these often indicate setup/import issues")
|
|
|
|
if overall_success_rate < 80:
|
|
print(" ⚠️ Success rate below 80% - consider improving test coverage")
|
|
elif overall_success_rate >= 95:
|
|
print(" 🎉 Excellent test success rate! Consider adding more edge cases")
|
|
|
|
print(" 📋 Consider adding performance tests for API endpoints")
|
|
print(" 🔒 Add security testing for authentication endpoints")
|
|
print(" 📝 Add API documentation tests (OpenAPI/Swagger validation)")
|
|
|
|
# Save detailed report to file
|
|
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
report_file = f"api_test_report_{timestamp}.json"
|
|
|
|
try:
|
|
report_data = {
|
|
'timestamp': datetime.now().isoformat(),
|
|
'summary': {
|
|
'total_tests': total_results['total_tests'],
|
|
'total_failures': total_results['total_failures'],
|
|
'total_errors': total_results['total_errors'],
|
|
'total_skipped': total_results['total_skipped'],
|
|
'overall_success_rate': overall_success_rate if total_results['total_tests'] > 0 else 0
|
|
},
|
|
'suite_results': total_results['suite_results'],
|
|
'endpoint_coverage': tested_endpoints
|
|
}
|
|
|
|
with open(report_file, 'w', encoding='utf-8') as f:
|
|
json.dump(report_data, f, indent=2, ensure_ascii=False)
|
|
|
|
print(f"\n💾 Detailed report saved to: {report_file}")
|
|
|
|
except Exception as e:
|
|
print(f"\n⚠️ Could not save detailed report: {e}")
|
|
|
|
# Final summary
|
|
print("\n" + "=" * 60)
|
|
|
|
if total_results['total_failures'] == 0 and total_results['total_errors'] == 0:
|
|
print("🎉 ALL TESTS PASSED! API is working correctly.")
|
|
exit_code = 0
|
|
else:
|
|
print("❌ Some tests failed. Please review the issues above.")
|
|
exit_code = 1
|
|
|
|
print(f"🏁 Test run completed at {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
|
|
print("=" * 60)
|
|
|
|
return exit_code
|
|
|
|
|
|
if __name__ == '__main__':
|
|
exit_code = run_api_tests()
|
|
sys.exit(exit_code) |