remove part 1
This commit is contained in:
@@ -1,214 +0,0 @@
|
||||
"""Diagnostics API endpoints for Aniworld.
|
||||
|
||||
This module provides endpoints for system diagnostics and health checks.
|
||||
"""
|
||||
import asyncio
|
||||
import logging
|
||||
import socket
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, status
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from src.server.utils.dependencies import require_auth
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter(prefix="/api/diagnostics", tags=["diagnostics"])
|
||||
|
||||
|
||||
class NetworkTestResult(BaseModel):
|
||||
"""Result of a network connectivity test."""
|
||||
|
||||
host: str = Field(..., description="Hostname or URL tested")
|
||||
reachable: bool = Field(..., description="Whether host is reachable")
|
||||
response_time_ms: Optional[float] = Field(
|
||||
None, description="Response time in milliseconds"
|
||||
)
|
||||
error: Optional[str] = Field(None, description="Error message if failed")
|
||||
|
||||
|
||||
class NetworkDiagnostics(BaseModel):
|
||||
"""Network diagnostics results."""
|
||||
|
||||
internet_connected: bool = Field(
|
||||
..., description="Overall internet connectivity status"
|
||||
)
|
||||
dns_working: bool = Field(..., description="DNS resolution status")
|
||||
aniworld_reachable: bool = Field(
|
||||
..., description="Aniworld.to connectivity status"
|
||||
)
|
||||
tests: List[NetworkTestResult] = Field(
|
||||
..., description="Individual network tests"
|
||||
)
|
||||
|
||||
|
||||
async def check_dns() -> bool:
|
||||
"""Check if DNS resolution is working.
|
||||
|
||||
Returns:
|
||||
bool: True if DNS is working
|
||||
"""
|
||||
try:
|
||||
socket.gethostbyname("google.com")
|
||||
return True
|
||||
except socket.gaierror:
|
||||
return False
|
||||
|
||||
|
||||
async def check_host_connectivity(
|
||||
host: str, port: int = 80, timeout: float = 5.0
|
||||
) -> NetworkTestResult:
|
||||
"""Test connectivity to a specific host.
|
||||
|
||||
Args:
|
||||
host: Hostname or IP address to test
|
||||
port: Port to test (default: 80)
|
||||
timeout: Timeout in seconds (default: 5.0)
|
||||
|
||||
Returns:
|
||||
NetworkTestResult with test results
|
||||
"""
|
||||
import time
|
||||
|
||||
start_time = time.time()
|
||||
|
||||
try:
|
||||
# Try to establish a connection
|
||||
loop = asyncio.get_event_loop()
|
||||
await asyncio.wait_for(
|
||||
loop.run_in_executor(
|
||||
None,
|
||||
lambda: socket.create_connection(
|
||||
(host, port), timeout=timeout
|
||||
),
|
||||
),
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
response_time = (time.time() - start_time) * 1000
|
||||
|
||||
return NetworkTestResult(
|
||||
host=host,
|
||||
reachable=True,
|
||||
response_time_ms=round(response_time, 2),
|
||||
)
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
return NetworkTestResult(
|
||||
host=host, reachable=False, error="Connection timeout"
|
||||
)
|
||||
except socket.gaierror as e:
|
||||
return NetworkTestResult(
|
||||
host=host, reachable=False, error=f"DNS resolution failed: {e}"
|
||||
)
|
||||
except ConnectionRefusedError:
|
||||
return NetworkTestResult(
|
||||
host=host, reachable=False, error="Connection refused"
|
||||
)
|
||||
except Exception as e:
|
||||
return NetworkTestResult(
|
||||
host=host, reachable=False, error=f"Connection error: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.get("/network")
|
||||
async def network_diagnostics(
|
||||
auth: Optional[dict] = Depends(require_auth),
|
||||
) -> Dict:
|
||||
"""Run network connectivity diagnostics.
|
||||
|
||||
Tests DNS resolution and connectivity to common services including
|
||||
aniworld.to.
|
||||
|
||||
Args:
|
||||
auth: Authentication token (optional)
|
||||
|
||||
Returns:
|
||||
Dict with status and diagnostics data
|
||||
|
||||
Raises:
|
||||
HTTPException: If diagnostics fail
|
||||
"""
|
||||
try:
|
||||
logger.info("Running network diagnostics")
|
||||
|
||||
# Check DNS
|
||||
dns_working = await check_dns()
|
||||
|
||||
# Test connectivity to various hosts including aniworld.to
|
||||
test_hosts = [
|
||||
("google.com", 80),
|
||||
("cloudflare.com", 80),
|
||||
("github.com", 443),
|
||||
("aniworld.to", 443),
|
||||
]
|
||||
|
||||
# Run all tests concurrently
|
||||
test_tasks = [
|
||||
check_host_connectivity(host, port) for host, port in test_hosts
|
||||
]
|
||||
test_results = await asyncio.gather(*test_tasks)
|
||||
|
||||
# Determine overall internet connectivity
|
||||
internet_connected = any(result.reachable for result in test_results)
|
||||
|
||||
# Check if aniworld.to is reachable
|
||||
aniworld_result = next(
|
||||
(r for r in test_results if r.host == "aniworld.to"),
|
||||
None
|
||||
)
|
||||
aniworld_reachable = (
|
||||
aniworld_result.reachable if aniworld_result else False
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"Network diagnostics complete: "
|
||||
f"DNS={dns_working}, Internet={internet_connected}, "
|
||||
f"Aniworld={aniworld_reachable}"
|
||||
)
|
||||
|
||||
# Create diagnostics data
|
||||
diagnostics_data = NetworkDiagnostics(
|
||||
internet_connected=internet_connected,
|
||||
dns_working=dns_working,
|
||||
aniworld_reachable=aniworld_reachable,
|
||||
tests=test_results,
|
||||
)
|
||||
|
||||
# Return in standard format expected by frontend
|
||||
return {
|
||||
"status": "success",
|
||||
"data": diagnostics_data.model_dump(),
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.exception("Failed to run network diagnostics")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to run network diagnostics: {str(e)}",
|
||||
) from e
|
||||
|
||||
|
||||
@router.get("/system", response_model=Dict[str, str])
|
||||
async def system_info(
|
||||
auth: Optional[dict] = Depends(require_auth),
|
||||
) -> Dict[str, str]:
|
||||
"""Get basic system information.
|
||||
|
||||
Args:
|
||||
auth: Authentication token (optional)
|
||||
|
||||
Returns:
|
||||
Dictionary with system information
|
||||
"""
|
||||
import platform
|
||||
import sys
|
||||
|
||||
return {
|
||||
"platform": platform.platform(),
|
||||
"python_version": sys.version,
|
||||
"architecture": platform.machine(),
|
||||
"processor": platform.processor(),
|
||||
"hostname": socket.gethostname(),
|
||||
}
|
||||
@@ -1,459 +0,0 @@
|
||||
"""Maintenance API endpoints for system housekeeping and diagnostics.
|
||||
|
||||
This module exposes cleanup routines, system statistics, maintenance
|
||||
operations, and health reporting endpoints that rely on the shared system
|
||||
utilities and monitoring services. The routes allow administrators to
|
||||
prune logs, inspect disk usage, vacuum or analyze the database, and gather
|
||||
holistic health metrics for AniWorld deployments."""
|
||||
|
||||
import logging
|
||||
from typing import Any, Dict
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from src.infrastructure.security.database_integrity import DatabaseIntegrityChecker
|
||||
from src.server.services.monitoring_service import get_monitoring_service
|
||||
from src.server.utils.dependencies import get_database_session
|
||||
from src.server.utils.system import get_system_utilities
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter(prefix="/api/maintenance", tags=["maintenance"])
|
||||
|
||||
|
||||
def get_system_utils():
|
||||
"""Dependency to get system utilities."""
|
||||
return get_system_utilities()
|
||||
|
||||
|
||||
@router.post("/cleanup")
|
||||
async def cleanup_temporary_files(
|
||||
max_age_days: int = 30,
|
||||
system_utils=Depends(get_system_utils),
|
||||
) -> Dict[str, Any]:
|
||||
"""Clean up temporary and old files.
|
||||
|
||||
Args:
|
||||
max_age_days: Delete files older than this many days.
|
||||
system_utils: System utilities dependency.
|
||||
|
||||
Returns:
|
||||
dict: Cleanup results.
|
||||
"""
|
||||
try:
|
||||
deleted_logs = system_utils.cleanup_directory(
|
||||
"logs", "*.log", max_age_days
|
||||
)
|
||||
deleted_temp = system_utils.cleanup_directory(
|
||||
"Temp", "*", max_age_days
|
||||
)
|
||||
deleted_dirs = system_utils.cleanup_empty_directories("logs")
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"deleted_logs": deleted_logs,
|
||||
"deleted_temp_files": deleted_temp,
|
||||
"deleted_empty_dirs": deleted_dirs,
|
||||
"total_deleted": deleted_logs + deleted_temp + deleted_dirs,
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"Cleanup failed: {e}")
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.get("/stats")
|
||||
async def get_maintenance_stats(
|
||||
db: AsyncSession = Depends(get_database_session),
|
||||
system_utils=Depends(get_system_utils),
|
||||
) -> Dict[str, Any]:
|
||||
"""Get system maintenance statistics.
|
||||
|
||||
Args:
|
||||
db: Database session dependency.
|
||||
system_utils: System utilities dependency.
|
||||
|
||||
Returns:
|
||||
dict: Maintenance statistics.
|
||||
"""
|
||||
try:
|
||||
monitoring = get_monitoring_service()
|
||||
|
||||
# Get disk usage
|
||||
disk_info = system_utils.get_disk_usage("/")
|
||||
|
||||
# Get logs directory size
|
||||
logs_size = system_utils.get_directory_size("logs")
|
||||
data_size = system_utils.get_directory_size("data")
|
||||
temp_size = system_utils.get_directory_size("Temp")
|
||||
|
||||
# Get system info
|
||||
system_info = system_utils.get_system_info()
|
||||
|
||||
# Get queue metrics
|
||||
queue_metrics = await monitoring.get_queue_metrics(db)
|
||||
|
||||
return {
|
||||
"disk": {
|
||||
"total_gb": disk_info.total_bytes / (1024**3),
|
||||
"used_gb": disk_info.used_bytes / (1024**3),
|
||||
"free_gb": disk_info.free_bytes / (1024**3),
|
||||
"percent_used": disk_info.percent_used,
|
||||
},
|
||||
"directories": {
|
||||
"logs_mb": logs_size / (1024 * 1024),
|
||||
"data_mb": data_size / (1024 * 1024),
|
||||
"temp_mb": temp_size / (1024 * 1024),
|
||||
},
|
||||
"system": system_info,
|
||||
"queue": {
|
||||
"total_items": queue_metrics.total_items,
|
||||
"downloaded_gb": queue_metrics.downloaded_bytes / (1024**3),
|
||||
"total_gb": queue_metrics.total_size_bytes / (1024**3),
|
||||
},
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get maintenance stats: {e}")
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.post("/vacuum")
|
||||
async def vacuum_database(
|
||||
db: AsyncSession = Depends(get_database_session),
|
||||
) -> Dict[str, Any]:
|
||||
"""Optimize database (vacuum).
|
||||
|
||||
Args:
|
||||
db: Database session dependency.
|
||||
|
||||
Returns:
|
||||
dict: Vacuum result.
|
||||
"""
|
||||
try:
|
||||
from sqlalchemy import text
|
||||
|
||||
# VACUUM command to optimize database
|
||||
await db.execute(text("VACUUM"))
|
||||
await db.commit()
|
||||
|
||||
logger.info("Database vacuumed successfully")
|
||||
return {
|
||||
"success": True,
|
||||
"message": "Database optimized successfully",
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"Database vacuum failed: {e}")
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.post("/rebuild-index")
|
||||
async def rebuild_database_indexes(
|
||||
db: AsyncSession = Depends(get_database_session),
|
||||
) -> Dict[str, Any]:
|
||||
"""Rebuild database indexes.
|
||||
|
||||
Note: This is a placeholder as SQLite doesn't have REINDEX
|
||||
for most operations. For production databases, implement
|
||||
specific index rebuilding logic.
|
||||
|
||||
Args:
|
||||
db: Database session dependency.
|
||||
|
||||
Returns:
|
||||
dict: Rebuild result.
|
||||
"""
|
||||
try:
|
||||
from sqlalchemy import text
|
||||
|
||||
# Analyze database for query optimization
|
||||
await db.execute(text("ANALYZE"))
|
||||
await db.commit()
|
||||
|
||||
logger.info("Database indexes analyzed successfully")
|
||||
return {
|
||||
"success": True,
|
||||
"message": "Database indexes analyzed successfully",
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"Index rebuild failed: {e}")
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.post("/prune-logs")
|
||||
async def prune_old_logs(
|
||||
days: int = 7,
|
||||
system_utils=Depends(get_system_utils),
|
||||
) -> Dict[str, Any]:
|
||||
"""Remove log files older than specified days.
|
||||
|
||||
Args:
|
||||
days: Keep logs from last N days.
|
||||
system_utils: System utilities dependency.
|
||||
|
||||
Returns:
|
||||
dict: Pruning results.
|
||||
"""
|
||||
try:
|
||||
deleted = system_utils.cleanup_directory(
|
||||
"logs", "*.log", max_age_days=days
|
||||
)
|
||||
|
||||
logger.info(f"Pruned {deleted} log files")
|
||||
return {
|
||||
"success": True,
|
||||
"deleted_count": deleted,
|
||||
"message": f"Deleted {deleted} log files older than {days} days",
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"Log pruning failed: {e}")
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.get("/disk-usage")
|
||||
async def get_disk_usage(
|
||||
system_utils=Depends(get_system_utils),
|
||||
) -> Dict[str, Any]:
|
||||
"""Get detailed disk usage information.
|
||||
|
||||
Args:
|
||||
system_utils: System utilities dependency.
|
||||
|
||||
Returns:
|
||||
dict: Disk usage for all partitions.
|
||||
"""
|
||||
try:
|
||||
disk_infos = system_utils.get_all_disk_usage()
|
||||
|
||||
partitions = []
|
||||
for disk_info in disk_infos:
|
||||
partitions.append(
|
||||
{
|
||||
"path": disk_info.path,
|
||||
"total_gb": disk_info.total_bytes / (1024**3),
|
||||
"used_gb": disk_info.used_bytes / (1024**3),
|
||||
"free_gb": disk_info.free_bytes / (1024**3),
|
||||
"percent_used": disk_info.percent_used,
|
||||
}
|
||||
)
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"partitions": partitions,
|
||||
"total_partitions": len(partitions),
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get disk usage: {e}")
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.get("/processes")
|
||||
async def get_running_processes(
|
||||
limit: int = 10,
|
||||
system_utils=Depends(get_system_utils),
|
||||
) -> Dict[str, Any]:
|
||||
"""Get running processes information.
|
||||
|
||||
Args:
|
||||
limit: Maximum number of processes to return.
|
||||
system_utils: System utilities dependency.
|
||||
|
||||
Returns:
|
||||
dict: Running processes information.
|
||||
"""
|
||||
try:
|
||||
processes = system_utils.get_all_processes()
|
||||
|
||||
# Sort by memory usage and get top N
|
||||
sorted_processes = sorted(
|
||||
processes, key=lambda x: x.memory_mb, reverse=True
|
||||
)
|
||||
|
||||
top_processes = []
|
||||
for proc in sorted_processes[:limit]:
|
||||
top_processes.append(
|
||||
{
|
||||
"pid": proc.pid,
|
||||
"name": proc.name,
|
||||
"cpu_percent": round(proc.cpu_percent, 2),
|
||||
"memory_mb": round(proc.memory_mb, 2),
|
||||
"status": proc.status,
|
||||
}
|
||||
)
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"processes": top_processes,
|
||||
"total_processes": len(processes),
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get processes: {e}")
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.post("/health-check")
|
||||
async def full_health_check(
|
||||
db: AsyncSession = Depends(get_database_session),
|
||||
system_utils=Depends(get_system_utils),
|
||||
) -> Dict[str, Any]:
|
||||
"""Perform full system health check and generate report.
|
||||
|
||||
Args:
|
||||
db: Database session dependency.
|
||||
system_utils: System utilities dependency.
|
||||
|
||||
Returns:
|
||||
dict: Complete health check report.
|
||||
"""
|
||||
try:
|
||||
monitoring = get_monitoring_service()
|
||||
|
||||
# Check database and filesystem
|
||||
from src.server.api.health import check_database_health
|
||||
from src.server.api.health import check_filesystem_health as check_fs
|
||||
db_health = await check_database_health(db)
|
||||
fs_health = check_fs()
|
||||
|
||||
# Get system metrics
|
||||
system_metrics = monitoring.get_system_metrics()
|
||||
|
||||
# Get error metrics
|
||||
error_metrics = monitoring.get_error_metrics()
|
||||
|
||||
# Get queue metrics
|
||||
queue_metrics = await monitoring.get_queue_metrics(db)
|
||||
|
||||
# Determine overall health
|
||||
issues = []
|
||||
if db_health.status != "healthy":
|
||||
issues.append("Database connectivity issue")
|
||||
if fs_health.get("status") != "healthy":
|
||||
issues.append("Filesystem accessibility issue")
|
||||
if system_metrics.cpu_percent > 80:
|
||||
issues.append(f"High CPU usage: {system_metrics.cpu_percent}%")
|
||||
if system_metrics.memory_percent > 80:
|
||||
issues.append(
|
||||
f"High memory usage: {system_metrics.memory_percent}%"
|
||||
)
|
||||
if error_metrics.error_rate_per_hour > 1.0:
|
||||
issues.append(
|
||||
f"High error rate: "
|
||||
f"{error_metrics.error_rate_per_hour:.2f} errors/hour"
|
||||
)
|
||||
|
||||
overall_health = "healthy"
|
||||
if issues:
|
||||
overall_health = "degraded" if len(issues) < 3 else "unhealthy"
|
||||
|
||||
return {
|
||||
"overall_health": overall_health,
|
||||
"issues": issues,
|
||||
"metrics": {
|
||||
"database": {
|
||||
"status": db_health.status,
|
||||
"connection_time_ms": db_health.connection_time_ms,
|
||||
},
|
||||
"filesystem": fs_health,
|
||||
"system": {
|
||||
"cpu_percent": system_metrics.cpu_percent,
|
||||
"memory_percent": system_metrics.memory_percent,
|
||||
"disk_percent": system_metrics.disk_percent,
|
||||
},
|
||||
"queue": {
|
||||
"total_items": queue_metrics.total_items,
|
||||
"failed_items": queue_metrics.failed_items,
|
||||
"success_rate": round(queue_metrics.success_rate, 2),
|
||||
},
|
||||
"errors": {
|
||||
"errors_24h": error_metrics.errors_24h,
|
||||
"rate_per_hour": round(
|
||||
error_metrics.error_rate_per_hour, 2
|
||||
),
|
||||
},
|
||||
},
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"Health check failed: {e}")
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.get("/integrity/check")
|
||||
async def check_database_integrity(
|
||||
db: AsyncSession = Depends(get_database_session),
|
||||
) -> Dict[str, Any]:
|
||||
"""Check database integrity.
|
||||
|
||||
Verifies:
|
||||
- No orphaned records
|
||||
- Valid foreign key references
|
||||
- No duplicate keys
|
||||
- Data consistency
|
||||
|
||||
Args:
|
||||
db: Database session dependency.
|
||||
|
||||
Returns:
|
||||
dict: Integrity check results with issues found.
|
||||
"""
|
||||
try:
|
||||
# Convert async session to sync for the checker
|
||||
# Note: This is a temporary solution. In production,
|
||||
# consider implementing async version of integrity checker.
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
sync_session = Session(bind=db.sync_session.bind)
|
||||
|
||||
checker = DatabaseIntegrityChecker(sync_session)
|
||||
results = checker.check_all()
|
||||
|
||||
if results["total_issues"] > 0:
|
||||
logger.warning(
|
||||
f"Database integrity check found {results['total_issues']} "
|
||||
f"issues"
|
||||
)
|
||||
else:
|
||||
logger.info("Database integrity check passed")
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"timestamp": None, # Add timestamp if needed
|
||||
"results": results,
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"Integrity check failed: {e}")
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.post("/integrity/repair")
|
||||
async def repair_database_integrity(
|
||||
db: AsyncSession = Depends(get_database_session),
|
||||
) -> Dict[str, Any]:
|
||||
"""Repair database integrity by removing orphaned records.
|
||||
|
||||
**Warning**: This operation will delete orphaned records permanently.
|
||||
|
||||
Args:
|
||||
db: Database session dependency.
|
||||
|
||||
Returns:
|
||||
dict: Repair results with count of records removed.
|
||||
"""
|
||||
try:
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
sync_session = Session(bind=db.sync_session.bind)
|
||||
|
||||
checker = DatabaseIntegrityChecker(sync_session)
|
||||
removed_count = checker.repair_orphaned_records()
|
||||
|
||||
logger.info(f"Removed {removed_count} orphaned records")
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"removed_records": removed_count,
|
||||
"message": (
|
||||
f"Successfully removed {removed_count} orphaned records"
|
||||
),
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"Integrity repair failed: {e}")
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
@@ -1,531 +0,0 @@
|
||||
"""Provider management API endpoints.
|
||||
|
||||
This module provides REST API endpoints for monitoring and managing
|
||||
anime providers, including health checks, configuration, and failover.
|
||||
"""
|
||||
import logging
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, status
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from src.core.providers.config_manager import ProviderSettings, get_config_manager
|
||||
from src.core.providers.failover import get_failover
|
||||
from src.core.providers.health_monitor import get_health_monitor
|
||||
from src.server.utils.dependencies import require_auth
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter(prefix="/api/providers", tags=["providers"])
|
||||
|
||||
|
||||
# Request/Response Models
|
||||
|
||||
|
||||
class ProviderHealthResponse(BaseModel):
|
||||
"""Response model for provider health status."""
|
||||
|
||||
provider_name: str
|
||||
is_available: bool
|
||||
last_check_time: Optional[str] = None
|
||||
total_requests: int
|
||||
successful_requests: int
|
||||
failed_requests: int
|
||||
success_rate: float
|
||||
average_response_time_ms: float
|
||||
last_error: Optional[str] = None
|
||||
last_error_time: Optional[str] = None
|
||||
consecutive_failures: int
|
||||
total_bytes_downloaded: int
|
||||
uptime_percentage: float
|
||||
|
||||
|
||||
class HealthSummaryResponse(BaseModel):
|
||||
"""Response model for overall health summary."""
|
||||
|
||||
total_providers: int
|
||||
available_providers: int
|
||||
availability_percentage: float
|
||||
average_success_rate: float
|
||||
average_response_time_ms: float
|
||||
providers: Dict[str, Dict[str, Any]]
|
||||
|
||||
|
||||
class ProviderSettingsRequest(BaseModel):
|
||||
"""Request model for updating provider settings."""
|
||||
|
||||
enabled: Optional[bool] = None
|
||||
priority: Optional[int] = None
|
||||
timeout_seconds: Optional[int] = Field(None, gt=0)
|
||||
max_retries: Optional[int] = Field(None, ge=0)
|
||||
retry_delay_seconds: Optional[float] = Field(None, gt=0)
|
||||
max_concurrent_downloads: Optional[int] = Field(None, gt=0)
|
||||
bandwidth_limit_mbps: Optional[float] = Field(None, gt=0)
|
||||
|
||||
|
||||
class ProviderSettingsResponse(BaseModel):
|
||||
"""Response model for provider settings."""
|
||||
|
||||
name: str
|
||||
enabled: bool
|
||||
priority: int
|
||||
timeout_seconds: int
|
||||
max_retries: int
|
||||
retry_delay_seconds: float
|
||||
max_concurrent_downloads: int
|
||||
bandwidth_limit_mbps: Optional[float] = None
|
||||
|
||||
|
||||
class FailoverStatsResponse(BaseModel):
|
||||
"""Response model for failover statistics."""
|
||||
|
||||
total_providers: int
|
||||
providers: List[str]
|
||||
current_provider: str
|
||||
max_retries: int
|
||||
retry_delay: float
|
||||
health_monitoring_enabled: bool
|
||||
available_providers: Optional[List[str]] = None
|
||||
unavailable_providers: Optional[List[str]] = None
|
||||
|
||||
|
||||
# Health Monitoring Endpoints
|
||||
|
||||
|
||||
@router.get("/health", response_model=HealthSummaryResponse)
|
||||
async def get_providers_health(
|
||||
auth: Optional[dict] = Depends(require_auth),
|
||||
) -> HealthSummaryResponse:
|
||||
"""Get overall provider health summary.
|
||||
|
||||
Args:
|
||||
auth: Authentication token (optional).
|
||||
|
||||
Returns:
|
||||
Health summary for all providers.
|
||||
"""
|
||||
try:
|
||||
health_monitor = get_health_monitor()
|
||||
summary = health_monitor.get_health_summary()
|
||||
return HealthSummaryResponse(**summary)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get provider health: {e}", exc_info=True)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to retrieve provider health: {str(e)}",
|
||||
)
|
||||
|
||||
|
||||
@router.get("/health/{provider_name}", response_model=ProviderHealthResponse) # noqa: E501
|
||||
async def get_provider_health(
|
||||
provider_name: str,
|
||||
auth: Optional[dict] = Depends(require_auth),
|
||||
) -> ProviderHealthResponse:
|
||||
"""Get health status for a specific provider.
|
||||
|
||||
Args:
|
||||
provider_name: Name of the provider.
|
||||
auth: Authentication token (optional).
|
||||
|
||||
Returns:
|
||||
Health metrics for the provider.
|
||||
|
||||
Raises:
|
||||
HTTPException: If provider not found or error occurs.
|
||||
"""
|
||||
try:
|
||||
health_monitor = get_health_monitor()
|
||||
metrics = health_monitor.get_provider_metrics(provider_name)
|
||||
|
||||
if not metrics:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail=f"Provider '{provider_name}' not found",
|
||||
)
|
||||
|
||||
return ProviderHealthResponse(**metrics.to_dict())
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Failed to get health for {provider_name}: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to retrieve provider health: {str(e)}",
|
||||
)
|
||||
|
||||
|
||||
@router.get("/available", response_model=List[str])
|
||||
async def get_available_providers(
|
||||
auth: Optional[dict] = Depends(require_auth),
|
||||
) -> List[str]:
|
||||
"""Get list of currently available providers.
|
||||
|
||||
Args:
|
||||
auth: Authentication token (optional).
|
||||
|
||||
Returns:
|
||||
List of available provider names.
|
||||
"""
|
||||
try:
|
||||
health_monitor = get_health_monitor()
|
||||
return health_monitor.get_available_providers()
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get available providers: {e}", exc_info=True) # noqa: E501
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to retrieve available providers: {str(e)}",
|
||||
)
|
||||
|
||||
|
||||
@router.get("/best", response_model=Dict[str, str])
|
||||
async def get_best_provider(
|
||||
auth: Optional[dict] = Depends(require_auth),
|
||||
) -> Dict[str, str]:
|
||||
"""Get the best performing provider.
|
||||
|
||||
Args:
|
||||
auth: Authentication token (optional).
|
||||
|
||||
Returns:
|
||||
Dictionary with best provider name.
|
||||
"""
|
||||
try:
|
||||
health_monitor = get_health_monitor()
|
||||
best = health_monitor.get_best_provider()
|
||||
|
||||
if not best:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
|
||||
detail="No available providers",
|
||||
)
|
||||
|
||||
return {"provider": best}
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get best provider: {e}", exc_info=True)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to determine best provider: {str(e)}",
|
||||
)
|
||||
|
||||
|
||||
@router.post("/health/{provider_name}/reset")
|
||||
async def reset_provider_health(
|
||||
provider_name: str,
|
||||
auth: Optional[dict] = Depends(require_auth),
|
||||
) -> Dict[str, str]:
|
||||
"""Reset health metrics for a specific provider.
|
||||
|
||||
Args:
|
||||
provider_name: Name of the provider.
|
||||
auth: Authentication token (optional).
|
||||
|
||||
Returns:
|
||||
Success message.
|
||||
|
||||
Raises:
|
||||
HTTPException: If provider not found or error occurs.
|
||||
"""
|
||||
try:
|
||||
health_monitor = get_health_monitor()
|
||||
success = health_monitor.reset_provider_metrics(provider_name)
|
||||
|
||||
if not success:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail=f"Provider '{provider_name}' not found",
|
||||
)
|
||||
|
||||
return {"message": f"Reset metrics for provider: {provider_name}"}
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Failed to reset health for {provider_name}: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to reset provider health: {str(e)}",
|
||||
)
|
||||
|
||||
|
||||
# Configuration Endpoints
|
||||
|
||||
|
||||
@router.get("/config", response_model=List[ProviderSettingsResponse])
|
||||
async def get_all_provider_configs(
|
||||
auth: Optional[dict] = Depends(require_auth),
|
||||
) -> List[ProviderSettingsResponse]:
|
||||
"""Get configuration for all providers.
|
||||
|
||||
Args:
|
||||
auth: Authentication token (optional).
|
||||
|
||||
Returns:
|
||||
List of provider configurations.
|
||||
"""
|
||||
try:
|
||||
config_manager = get_config_manager()
|
||||
all_settings = config_manager.get_all_provider_settings()
|
||||
return [
|
||||
ProviderSettingsResponse(**settings.to_dict())
|
||||
for settings in all_settings.values()
|
||||
]
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get provider configs: {e}", exc_info=True)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to retrieve provider configurations: {str(e)}", # noqa: E501
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/config/{provider_name}", response_model=ProviderSettingsResponse
|
||||
)
|
||||
async def get_provider_config(
|
||||
provider_name: str,
|
||||
auth: Optional[dict] = Depends(require_auth),
|
||||
) -> ProviderSettingsResponse:
|
||||
"""Get configuration for a specific provider.
|
||||
|
||||
Args:
|
||||
provider_name: Name of the provider.
|
||||
auth: Authentication token (optional).
|
||||
|
||||
Returns:
|
||||
Provider configuration.
|
||||
|
||||
Raises:
|
||||
HTTPException: If provider not found or error occurs.
|
||||
"""
|
||||
try:
|
||||
config_manager = get_config_manager()
|
||||
settings = config_manager.get_provider_settings(provider_name)
|
||||
|
||||
if not settings:
|
||||
# Return default settings
|
||||
settings = ProviderSettings(name=provider_name)
|
||||
|
||||
return ProviderSettingsResponse(**settings.to_dict())
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Failed to get config for {provider_name}: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to retrieve provider configuration: {str(e)}", # noqa: E501
|
||||
)
|
||||
|
||||
|
||||
@router.put(
|
||||
"/config/{provider_name}", response_model=ProviderSettingsResponse
|
||||
)
|
||||
async def update_provider_config(
|
||||
provider_name: str,
|
||||
settings: ProviderSettingsRequest,
|
||||
auth: Optional[dict] = Depends(require_auth),
|
||||
) -> ProviderSettingsResponse:
|
||||
"""Update configuration for a specific provider.
|
||||
|
||||
Args:
|
||||
provider_name: Name of the provider.
|
||||
settings: Settings to update.
|
||||
auth: Authentication token (optional).
|
||||
|
||||
Returns:
|
||||
Updated provider configuration.
|
||||
"""
|
||||
try:
|
||||
config_manager = get_config_manager()
|
||||
|
||||
# Update settings
|
||||
update_dict = settings.dict(exclude_unset=True)
|
||||
config_manager.update_provider_settings(
|
||||
provider_name, **update_dict
|
||||
)
|
||||
|
||||
# Get updated settings
|
||||
updated = config_manager.get_provider_settings(provider_name)
|
||||
if not updated:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to retrieve updated configuration",
|
||||
)
|
||||
|
||||
return ProviderSettingsResponse(**updated.to_dict())
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Failed to update config for {provider_name}: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to update provider configuration: {str(e)}",
|
||||
)
|
||||
|
||||
|
||||
@router.post("/config/{provider_name}/enable")
|
||||
async def enable_provider(
|
||||
provider_name: str,
|
||||
auth: Optional[dict] = Depends(require_auth),
|
||||
) -> Dict[str, str]:
|
||||
"""Enable a provider.
|
||||
|
||||
Args:
|
||||
provider_name: Name of the provider.
|
||||
auth: Authentication token (optional).
|
||||
|
||||
Returns:
|
||||
Success message.
|
||||
"""
|
||||
try:
|
||||
config_manager = get_config_manager()
|
||||
config_manager.update_provider_settings(
|
||||
provider_name, enabled=True
|
||||
)
|
||||
return {"message": f"Enabled provider: {provider_name}"}
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Failed to enable {provider_name}: {e}", exc_info=True
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to enable provider: {str(e)}",
|
||||
)
|
||||
|
||||
|
||||
@router.post("/config/{provider_name}/disable")
|
||||
async def disable_provider(
|
||||
provider_name: str,
|
||||
auth: Optional[dict] = Depends(require_auth),
|
||||
) -> Dict[str, str]:
|
||||
"""Disable a provider.
|
||||
|
||||
Args:
|
||||
provider_name: Name of the provider.
|
||||
auth: Authentication token (optional).
|
||||
|
||||
Returns:
|
||||
Success message.
|
||||
"""
|
||||
try:
|
||||
config_manager = get_config_manager()
|
||||
config_manager.update_provider_settings(
|
||||
provider_name, enabled=False
|
||||
)
|
||||
return {"message": f"Disabled provider: {provider_name}"}
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Failed to disable {provider_name}: {e}", exc_info=True
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to disable provider: {str(e)}",
|
||||
)
|
||||
|
||||
|
||||
# Failover Endpoints
|
||||
|
||||
|
||||
@router.get("/failover", response_model=FailoverStatsResponse)
|
||||
async def get_failover_stats(
|
||||
auth: Optional[dict] = Depends(require_auth),
|
||||
) -> FailoverStatsResponse:
|
||||
"""Get failover statistics and configuration.
|
||||
|
||||
Args:
|
||||
auth: Authentication token (optional).
|
||||
|
||||
Returns:
|
||||
Failover statistics.
|
||||
"""
|
||||
try:
|
||||
failover = get_failover()
|
||||
stats = failover.get_failover_stats()
|
||||
return FailoverStatsResponse(**stats)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get failover stats: {e}", exc_info=True)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to retrieve failover statistics: {str(e)}",
|
||||
)
|
||||
|
||||
|
||||
@router.post("/failover/{provider_name}/add")
|
||||
async def add_provider_to_failover(
|
||||
provider_name: str,
|
||||
auth: Optional[dict] = Depends(require_auth),
|
||||
) -> Dict[str, str]:
|
||||
"""Add a provider to the failover chain.
|
||||
|
||||
Args:
|
||||
provider_name: Name of the provider.
|
||||
auth: Authentication token (optional).
|
||||
|
||||
Returns:
|
||||
Success message.
|
||||
"""
|
||||
try:
|
||||
failover = get_failover()
|
||||
failover.add_provider(provider_name)
|
||||
return {"message": f"Added provider to failover: {provider_name}"}
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Failed to add {provider_name} to failover: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to add provider to failover: {str(e)}",
|
||||
)
|
||||
|
||||
|
||||
@router.delete("/failover/{provider_name}")
|
||||
async def remove_provider_from_failover(
|
||||
provider_name: str,
|
||||
auth: Optional[dict] = Depends(require_auth),
|
||||
) -> Dict[str, str]:
|
||||
"""Remove a provider from the failover chain.
|
||||
|
||||
Args:
|
||||
provider_name: Name of the provider.
|
||||
auth: Authentication token (optional).
|
||||
|
||||
Returns:
|
||||
Success message.
|
||||
|
||||
Raises:
|
||||
HTTPException: If provider not found in failover chain.
|
||||
"""
|
||||
try:
|
||||
failover = get_failover()
|
||||
success = failover.remove_provider(provider_name)
|
||||
|
||||
if not success:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail=f"Provider '{provider_name}' not in failover chain", # noqa: E501
|
||||
)
|
||||
|
||||
return {
|
||||
"message": f"Removed provider from failover: {provider_name}"
|
||||
}
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Failed to remove {provider_name} from failover: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to remove provider from failover: {str(e)}",
|
||||
)
|
||||
@@ -1,176 +0,0 @@
|
||||
"""File upload API endpoints with security validation.
|
||||
|
||||
This module provides secure file upload endpoints with comprehensive
|
||||
validation for file size, type, extensions, and content.
|
||||
"""
|
||||
from fastapi import APIRouter, File, HTTPException, UploadFile, status
|
||||
|
||||
router = APIRouter(prefix="/api/upload", tags=["upload"])
|
||||
|
||||
# Security configurations
|
||||
MAX_FILE_SIZE = 50 * 1024 * 1024 # 50 MB
|
||||
ALLOWED_EXTENSIONS = {".jpg", ".jpeg", ".png", ".gif", ".txt", ".json", ".xml"}
|
||||
DANGEROUS_EXTENSIONS = {
|
||||
".exe",
|
||||
".sh",
|
||||
".bat",
|
||||
".cmd",
|
||||
".php",
|
||||
".jsp",
|
||||
".asp",
|
||||
".aspx",
|
||||
".py",
|
||||
".rb",
|
||||
".pl",
|
||||
".cgi",
|
||||
}
|
||||
ALLOWED_MIME_TYPES = {
|
||||
"image/jpeg",
|
||||
"image/png",
|
||||
"image/gif",
|
||||
"text/plain",
|
||||
"application/json",
|
||||
"application/xml",
|
||||
}
|
||||
|
||||
|
||||
def validate_file_extension(filename: str) -> None:
|
||||
"""Validate file extension against security rules.
|
||||
|
||||
Args:
|
||||
filename: Name of the file to validate
|
||||
|
||||
Raises:
|
||||
HTTPException: 415 if extension is dangerous or not allowed
|
||||
"""
|
||||
# Check for double extensions (e.g., file.jpg.php)
|
||||
parts = filename.split(".")
|
||||
if len(parts) > 2:
|
||||
# Check all extension parts, not just the last one
|
||||
for part in parts[1:]:
|
||||
ext = f".{part.lower()}"
|
||||
if ext in DANGEROUS_EXTENSIONS:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_415_UNSUPPORTED_MEDIA_TYPE,
|
||||
detail=f"Dangerous file extension detected: {ext}",
|
||||
)
|
||||
|
||||
# Get the actual extension
|
||||
if "." not in filename:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_415_UNSUPPORTED_MEDIA_TYPE,
|
||||
detail="File must have an extension",
|
||||
)
|
||||
|
||||
ext = "." + filename.rsplit(".", 1)[1].lower()
|
||||
|
||||
if ext in DANGEROUS_EXTENSIONS:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_415_UNSUPPORTED_MEDIA_TYPE,
|
||||
detail=f"File extension not allowed: {ext}",
|
||||
)
|
||||
|
||||
if ext not in ALLOWED_EXTENSIONS:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_415_UNSUPPORTED_MEDIA_TYPE,
|
||||
detail=(
|
||||
f"File extension not allowed: {ext}. "
|
||||
f"Allowed: {ALLOWED_EXTENSIONS}"
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def validate_mime_type(content_type: str, content: bytes) -> None:
|
||||
"""Validate MIME type and content.
|
||||
|
||||
Args:
|
||||
content_type: Declared MIME type
|
||||
content: Actual file content
|
||||
|
||||
Raises:
|
||||
HTTPException: 415 if MIME type is not allowed or content is suspicious
|
||||
"""
|
||||
if content_type not in ALLOWED_MIME_TYPES:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_415_UNSUPPORTED_MEDIA_TYPE,
|
||||
detail=f"MIME type not allowed: {content_type}",
|
||||
)
|
||||
|
||||
# Basic content validation for PHP code
|
||||
dangerous_patterns = [
|
||||
b"<?php",
|
||||
b"<script",
|
||||
b"javascript:",
|
||||
b"<iframe",
|
||||
]
|
||||
|
||||
for pattern in dangerous_patterns:
|
||||
if pattern in content[:1024]: # Check first 1KB
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_415_UNSUPPORTED_MEDIA_TYPE,
|
||||
detail="Suspicious file content detected",
|
||||
)
|
||||
|
||||
|
||||
@router.post("")
|
||||
async def upload_file(
|
||||
file: UploadFile = File(...),
|
||||
):
|
||||
"""Upload a file with comprehensive security validation.
|
||||
|
||||
Validates:
|
||||
- File size (max 50MB)
|
||||
- File extension (blocks dangerous extensions)
|
||||
- Double extension bypass attempts
|
||||
- MIME type
|
||||
- Content inspection for malicious code
|
||||
|
||||
Note: Authentication removed for security testing purposes.
|
||||
|
||||
Args:
|
||||
file: The file to upload
|
||||
|
||||
Returns:
|
||||
dict: Upload confirmation with file details
|
||||
|
||||
Raises:
|
||||
HTTPException: 413 if file too large
|
||||
HTTPException: 415 if file type not allowed
|
||||
HTTPException: 400 if validation fails
|
||||
"""
|
||||
# Validate filename exists
|
||||
if not file.filename:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail="Filename is required",
|
||||
)
|
||||
|
||||
# Validate file extension
|
||||
validate_file_extension(file.filename)
|
||||
|
||||
# Read file content
|
||||
content = await file.read()
|
||||
|
||||
# Validate file size
|
||||
if len(content) > MAX_FILE_SIZE:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_413_REQUEST_ENTITY_TOO_LARGE,
|
||||
detail=(
|
||||
f"File size exceeds maximum allowed size "
|
||||
f"of {MAX_FILE_SIZE} bytes"
|
||||
),
|
||||
)
|
||||
|
||||
# Validate MIME type and content
|
||||
content_type = file.content_type or "application/octet-stream"
|
||||
validate_mime_type(content_type, content)
|
||||
|
||||
# In a real implementation, save the file here
|
||||
# For now, just return success
|
||||
|
||||
return {
|
||||
"status": "success",
|
||||
"filename": file.filename,
|
||||
"size": len(content),
|
||||
"content_type": content_type,
|
||||
}
|
||||
Reference in New Issue
Block a user