This commit is contained in:
2025-10-12 18:05:31 +02:00
parent 57d49bcf78
commit 7a71715183
130 changed files with 30010 additions and 50631 deletions

View File

@@ -1,131 +1,131 @@
import os
import re
import logging
from .entities.series import Serie
import traceback
from ..infrastructure.logging.GlobalLogger import error_logger, noKeyFound_logger
from .exceptions.Exceptions import NoKeyFoundException, MatchNotFoundError
from .providers.base_provider import Loader
class SerieScanner:
def __init__(self, basePath: str, loader: Loader):
self.directory = basePath
self.folderDict: dict[str, Serie] = {} # Proper initialization
self.loader = loader
logging.info(f"Initialized Loader with base path: {self.directory}")
def Reinit(self):
self.folderDict: dict[str, Serie] = {} # Proper initialization
def is_null_or_whitespace(self, s):
return s is None or s.strip() == ""
def GetTotalToScan(self):
result = self.__find_mp4_files()
return sum(1 for _ in result)
def Scan(self, callback):
logging.info("Starting process to load missing episodes")
result = self.__find_mp4_files()
counter = 0
for folder, mp4_files in result:
try:
counter += 1
callback(folder, counter)
serie = self.__ReadDataFromFile(folder)
if (serie != None and not self.is_null_or_whitespace(serie.key)):
missings, site = self.__GetMissingEpisodesAndSeason(serie.key, mp4_files)
serie.episodeDict = missings
serie.folder = folder
serie.save_to_file(os.path.join(os.path.join(self.directory, folder), 'data'))
if (serie.key in self.folderDict):
logging.ERROR(f"dublication found: {serie.key}");
pass
self.folderDict[serie.key] = serie
noKeyFound_logger.info(f"Saved Serie: '{str(serie)}'")
except NoKeyFoundException as nkfe:
NoKeyFoundException.error(f"Error processing folder '{folder}': {nkfe}")
except Exception as e:
error_logger.error(f"Folder: '{folder}' - Unexpected error processing folder '{folder}': {e} \n {traceback.format_exc()}")
continue
def __find_mp4_files(self):
logging.info("Scanning for .mp4 files")
for anime_name in os.listdir(self.directory):
anime_path = os.path.join(self.directory, anime_name)
if os.path.isdir(anime_path):
mp4_files = []
has_files = False
for root, _, files in os.walk(anime_path):
for file in files:
if file.endswith(".mp4"):
mp4_files.append(os.path.join(root, file))
has_files = True
yield anime_name, mp4_files if has_files else []
def __remove_year(self, input_string: str):
cleaned_string = re.sub(r'\(\d{4}\)', '', input_string).strip()
logging.debug(f"Removed year from '{input_string}' -> '{cleaned_string}'")
return cleaned_string
def __ReadDataFromFile(self, folder_name: str):
folder_path = os.path.join(self.directory, folder_name)
key = None
key_file = os.path.join(folder_path, 'key')
serie_file = os.path.join(folder_path, 'data')
if os.path.exists(key_file):
with open(key_file, 'r') as file:
key = file.read().strip()
logging.info(f"Key found for folder '{folder_name}': {key}")
return Serie(key, "", "aniworld.to", folder_name, dict())
if os.path.exists(serie_file):
with open(serie_file, "rb") as file:
logging.info(f"load serie_file from '{folder_name}': {serie_file}")
return Serie.load_from_file(serie_file)
return None
def __GetEpisodeAndSeason(self, filename: str):
pattern = r'S(\d+)E(\d+)'
match = re.search(pattern, filename)
if match:
season = match.group(1)
episode = match.group(2)
logging.debug(f"Extracted season {season}, episode {episode} from '{filename}'")
return int(season), int(episode)
else:
logging.error(f"Failed to find season/episode pattern in '{filename}'")
raise MatchNotFoundError("Season and episode pattern not found in the filename.")
def __GetEpisodesAndSeasons(self, mp4_files: []):
episodes_dict = {}
for file in mp4_files:
season, episode = self.__GetEpisodeAndSeason(file)
if season in episodes_dict:
episodes_dict[season].append(episode)
else:
episodes_dict[season] = [episode]
return episodes_dict
def __GetMissingEpisodesAndSeason(self, key: str, mp4_files: []):
expected_dict = self.loader.get_season_episode_count(key) # key season , value count of episodes
filedict = self.__GetEpisodesAndSeasons(mp4_files)
episodes_dict = {}
for season, expected_count in expected_dict.items():
existing_episodes = filedict.get(season, [])
missing_episodes = [ep for ep in range(1, expected_count + 1) if ep not in existing_episodes and self.loader.IsLanguage(season, ep, key)]
if missing_episodes:
episodes_dict[season] = missing_episodes
return episodes_dict, "aniworld.to"
import os
import re
import logging
from .entities.series import Serie
import traceback
from ..infrastructure.logging.GlobalLogger import error_logger, noKeyFound_logger
from .exceptions.Exceptions import NoKeyFoundException, MatchNotFoundError
from .providers.base_provider import Loader
class SerieScanner:
def __init__(self, basePath: str, loader: Loader):
self.directory = basePath
self.folderDict: dict[str, Serie] = {} # Proper initialization
self.loader = loader
logging.info(f"Initialized Loader with base path: {self.directory}")
def Reinit(self):
self.folderDict: dict[str, Serie] = {} # Proper initialization
def is_null_or_whitespace(self, s):
return s is None or s.strip() == ""
def GetTotalToScan(self):
result = self.__find_mp4_files()
return sum(1 for _ in result)
def Scan(self, callback):
logging.info("Starting process to load missing episodes")
result = self.__find_mp4_files()
counter = 0
for folder, mp4_files in result:
try:
counter += 1
callback(folder, counter)
serie = self.__ReadDataFromFile(folder)
if (serie != None and not self.is_null_or_whitespace(serie.key)):
missings, site = self.__GetMissingEpisodesAndSeason(serie.key, mp4_files)
serie.episodeDict = missings
serie.folder = folder
serie.save_to_file(os.path.join(os.path.join(self.directory, folder), 'data'))
if (serie.key in self.folderDict):
logging.ERROR(f"dublication found: {serie.key}");
pass
self.folderDict[serie.key] = serie
noKeyFound_logger.info(f"Saved Serie: '{str(serie)}'")
except NoKeyFoundException as nkfe:
NoKeyFoundException.error(f"Error processing folder '{folder}': {nkfe}")
except Exception as e:
error_logger.error(f"Folder: '{folder}' - Unexpected error processing folder '{folder}': {e} \n {traceback.format_exc()}")
continue
def __find_mp4_files(self):
logging.info("Scanning for .mp4 files")
for anime_name in os.listdir(self.directory):
anime_path = os.path.join(self.directory, anime_name)
if os.path.isdir(anime_path):
mp4_files = []
has_files = False
for root, _, files in os.walk(anime_path):
for file in files:
if file.endswith(".mp4"):
mp4_files.append(os.path.join(root, file))
has_files = True
yield anime_name, mp4_files if has_files else []
def __remove_year(self, input_string: str):
cleaned_string = re.sub(r'\(\d{4}\)', '', input_string).strip()
logging.debug(f"Removed year from '{input_string}' -> '{cleaned_string}'")
return cleaned_string
def __ReadDataFromFile(self, folder_name: str):
folder_path = os.path.join(self.directory, folder_name)
key = None
key_file = os.path.join(folder_path, 'key')
serie_file = os.path.join(folder_path, 'data')
if os.path.exists(key_file):
with open(key_file, 'r') as file:
key = file.read().strip()
logging.info(f"Key found for folder '{folder_name}': {key}")
return Serie(key, "", "aniworld.to", folder_name, dict())
if os.path.exists(serie_file):
with open(serie_file, "rb") as file:
logging.info(f"load serie_file from '{folder_name}': {serie_file}")
return Serie.load_from_file(serie_file)
return None
def __GetEpisodeAndSeason(self, filename: str):
pattern = r'S(\d+)E(\d+)'
match = re.search(pattern, filename)
if match:
season = match.group(1)
episode = match.group(2)
logging.debug(f"Extracted season {season}, episode {episode} from '{filename}'")
return int(season), int(episode)
else:
logging.error(f"Failed to find season/episode pattern in '{filename}'")
raise MatchNotFoundError("Season and episode pattern not found in the filename.")
def __GetEpisodesAndSeasons(self, mp4_files: []):
episodes_dict = {}
for file in mp4_files:
season, episode = self.__GetEpisodeAndSeason(file)
if season in episodes_dict:
episodes_dict[season].append(episode)
else:
episodes_dict[season] = [episode]
return episodes_dict
def __GetMissingEpisodesAndSeason(self, key: str, mp4_files: []):
expected_dict = self.loader.get_season_episode_count(key) # key season , value count of episodes
filedict = self.__GetEpisodesAndSeasons(mp4_files)
episodes_dict = {}
for season, expected_count in expected_dict.items():
existing_episodes = filedict.get(season, [])
missing_episodes = [ep for ep in range(1, expected_count + 1) if ep not in existing_episodes and self.loader.IsLanguage(season, ep, key)]
if missing_episodes:
episodes_dict[season] = missing_episodes
return episodes_dict, "aniworld.to"

View File

@@ -1,38 +1,38 @@
from src.core.entities.SerieList import SerieList
from src.core.providers.provider_factory import Loaders
from src.core.SerieScanner import SerieScanner
class SeriesApp:
_initialization_count = 0
def __init__(self, directory_to_search: str):
SeriesApp._initialization_count += 1 # Only show initialization message for the first instance
if SeriesApp._initialization_count <= 1:
print("Please wait while initializing...")
self.progress = None
self.directory_to_search = directory_to_search
self.Loaders = Loaders()
self.loader = self.Loaders.GetLoader(key="aniworld.to")
self.SerieScanner = SerieScanner(directory_to_search, self.loader)
self.List = SerieList(self.directory_to_search)
self.__InitList__()
def __InitList__(self):
self.series_list = self.List.GetMissingEpisode()
def search(self, words: str) -> list:
return self.loader.Search(words)
def download(self, serieFolder: str, season: int, episode: int, key: str, callback) -> bool:
self.loader.Download(self.directory_to_search, serieFolder, season, episode, key, "German Dub", callback)
def ReScan(self, callback):
self.SerieScanner.Reinit()
self.SerieScanner.Scan(callback)
self.List = SerieList(self.directory_to_search)
self.__InitList__()
from src.core.entities.SerieList import SerieList
from src.core.providers.provider_factory import Loaders
from src.core.SerieScanner import SerieScanner
class SeriesApp:
_initialization_count = 0
def __init__(self, directory_to_search: str):
SeriesApp._initialization_count += 1 # Only show initialization message for the first instance
if SeriesApp._initialization_count <= 1:
print("Please wait while initializing...")
self.progress = None
self.directory_to_search = directory_to_search
self.Loaders = Loaders()
self.loader = self.Loaders.GetLoader(key="aniworld.to")
self.SerieScanner = SerieScanner(directory_to_search, self.loader)
self.List = SerieList(self.directory_to_search)
self.__InitList__()
def __InitList__(self):
self.series_list = self.List.GetMissingEpisode()
def search(self, words: str) -> list:
return self.loader.Search(words)
def download(self, serieFolder: str, season: int, episode: int, key: str, callback) -> bool:
self.loader.Download(self.directory_to_search, serieFolder, season, episode, key, "German Dub", callback)
def ReScan(self, callback):
self.SerieScanner.Reinit()
self.SerieScanner.Scan(callback)
self.List = SerieList(self.directory_to_search)
self.__InitList__()

View File

@@ -1,12 +1,12 @@
"""
Core module for AniWorld application.
Contains domain entities, interfaces, application services, and exceptions.
"""
from . import entities
from . import exceptions
from . import interfaces
from . import application
from . import providers
"""
Core module for AniWorld application.
Contains domain entities, interfaces, application services, and exceptions.
"""
from . import entities
from . import exceptions
from . import interfaces
from . import application
from . import providers
__all__ = ['entities', 'exceptions', 'interfaces', 'application', 'providers']

File diff suppressed because it is too large Load Diff

View File

@@ -1,981 +0,0 @@
"""
User Preferences and Settings Persistence Manager
This module provides user preferences management, settings persistence,
and customization options for the AniWorld web interface.
"""
import json
import os
from typing import Dict, Any, Optional
from datetime import datetime
from flask import Blueprint, request, jsonify, session
class UserPreferencesManager:
"""Manages user preferences and settings persistence."""
def __init__(self, app=None):
self.app = app
self.preferences_file = 'data/user_preferences.json'
self.preferences = {} # Initialize preferences attribute
self.default_preferences = {
'ui': {
'theme': 'auto', # 'light', 'dark', 'auto'
'density': 'comfortable', # 'compact', 'comfortable', 'spacious'
'language': 'en',
'animations_enabled': True,
'sidebar_collapsed': False,
'grid_view': True,
'items_per_page': 20
},
'downloads': {
'auto_download': False,
'download_quality': 'best',
'concurrent_downloads': 3,
'retry_failed': True,
'notification_sound': True,
'auto_organize': True
},
'notifications': {
'browser_notifications': True,
'email_notifications': False,
'webhook_notifications': False,
'notification_types': {
'download_complete': True,
'download_error': True,
'series_updated': False,
'system_alerts': True
}
},
'keyboard_shortcuts': {
'enabled': True,
'shortcuts': {
'search': 'ctrl+f',
'download': 'ctrl+d',
'refresh': 'f5',
'select_all': 'ctrl+a',
'help': 'f1',
'settings': 'ctrl+comma'
}
},
'advanced': {
'debug_mode': False,
'performance_mode': False,
'cache_enabled': True,
'auto_backup': True,
'log_level': 'info'
}
}
# Initialize with defaults if no app provided
if app is None:
self.preferences = self.default_preferences.copy()
else:
self.init_app(app)
def init_app(self, app):
"""Initialize with Flask app."""
self.app = app
self.preferences_file = os.path.join(app.instance_path, 'data/user_preferences.json')
# Ensure instance path exists
os.makedirs(app.instance_path, exist_ok=True)
# Load or create preferences file
self.load_preferences()
def load_preferences(self) -> Dict[str, Any]:
"""Load preferences from file."""
try:
if os.path.exists(self.preferences_file):
with open(self.preferences_file, 'r', encoding='utf-8') as f:
loaded_prefs = json.load(f)
# Merge with defaults to ensure all keys exist
self.preferences = self.merge_preferences(self.default_preferences, loaded_prefs)
else:
self.preferences = self.default_preferences.copy()
self.save_preferences()
except Exception as e:
print(f"Error loading preferences: {e}")
self.preferences = self.default_preferences.copy()
return self.preferences
def save_preferences(self) -> bool:
"""Save preferences to file."""
try:
with open(self.preferences_file, 'w', encoding='utf-8') as f:
json.dump(self.preferences, f, indent=2, ensure_ascii=False)
return True
except Exception as e:
print(f"Error saving preferences: {e}")
return False
def merge_preferences(self, defaults: Dict, user_prefs: Dict) -> Dict:
"""Recursively merge user preferences with defaults."""
result = defaults.copy()
for key, value in user_prefs.items():
if key in result and isinstance(result[key], dict) and isinstance(value, dict):
result[key] = self.merge_preferences(result[key], value)
else:
result[key] = value
return result
def get_preference(self, key: str, default: Any = None) -> Any:
"""Get a specific preference using dot notation (e.g., 'ui.theme')."""
keys = key.split('.')
value = self.preferences
try:
for k in keys:
value = value[k]
return value
except (KeyError, TypeError):
return default
def set_preference(self, key: str, value: Any) -> bool:
"""Set a specific preference using dot notation."""
keys = key.split('.')
pref_dict = self.preferences
try:
# Navigate to parent dictionary
for k in keys[:-1]:
if k not in pref_dict:
pref_dict[k] = {}
pref_dict = pref_dict[k]
# Set the value
pref_dict[keys[-1]] = value
# Save to file
return self.save_preferences()
except Exception as e:
print(f"Error setting preference {key}: {e}")
return False
def reset_preferences(self) -> bool:
"""Reset all preferences to defaults."""
self.preferences = self.default_preferences.copy()
return self.save_preferences()
def export_preferences(self) -> str:
"""Export preferences as JSON string."""
try:
return json.dumps(self.preferences, indent=2, ensure_ascii=False)
except Exception as e:
print(f"Error exporting preferences: {e}")
return "{}"
def import_preferences(self, json_data: str) -> bool:
"""Import preferences from JSON string."""
try:
imported_prefs = json.loads(json_data)
self.preferences = self.merge_preferences(self.default_preferences, imported_prefs)
return self.save_preferences()
except Exception as e:
print(f"Error importing preferences: {e}")
return False
def get_user_session_preferences(self) -> Dict[str, Any]:
"""Get preferences for current user session."""
# For now, return global preferences
# In the future, could be user-specific
return self.preferences.copy()
def get_preferences_js(self):
"""Generate JavaScript code for preferences management."""
return f"""
// AniWorld User Preferences Manager
class UserPreferencesManager {{
constructor() {{
this.preferences = {json.dumps(self.preferences)};
this.defaultPreferences = {json.dumps(self.default_preferences)};
this.changeListeners = new Map();
this.init();
}}
init() {{
this.loadFromServer();
this.applyPreferences();
this.setupPreferencesUI();
this.setupAutoSave();
}}
async loadFromServer() {{
try {{
const response = await fetch('/api/preferences');
if (response.ok) {{
this.preferences = await response.json();
this.applyPreferences();
}}
}} catch (error) {{
console.error('Error loading preferences:', error);
}}
}}
async saveToServer() {{
try {{
const response = await fetch('/api/preferences', {{
method: 'PUT',
headers: {{
'Content-Type': 'application/json'
}},
body: JSON.stringify(this.preferences)
}});
if (!response.ok) {{
console.error('Error saving preferences to server');
}}
}} catch (error) {{
console.error('Error saving preferences:', error);
}}
}}
get(key, defaultValue = null) {{
const keys = key.split('.');
let value = this.preferences;
try {{
for (const k of keys) {{
value = value[k];
}}
return value !== undefined ? value : defaultValue;
}} catch (error) {{
return defaultValue;
}}
}}
set(key, value, save = true) {{
const keys = key.split('.');
let obj = this.preferences;
// Navigate to parent object
for (let i = 0; i < keys.length - 1; i++) {{
const k = keys[i];
if (!obj[k] || typeof obj[k] !== 'object') {{
obj[k] = {{}};
}}
obj = obj[k];
}}
// Set the value
const lastKey = keys[keys.length - 1];
const oldValue = obj[lastKey];
obj[lastKey] = value;
// Apply the change immediately
this.applyPreference(key, value);
// Notify listeners
this.notifyChangeListeners(key, value, oldValue);
// Save to server
if (save) {{
this.saveToServer();
}}
// Store in localStorage as backup
localStorage.setItem('aniworld_preferences', JSON.stringify(this.preferences));
}}
applyPreferences() {{
// Apply all preferences
this.applyTheme();
this.applyUIPreferences();
this.applyKeyboardShortcuts();
this.applyNotificationSettings();
}}
applyPreference(key, value) {{
// Apply individual preference change
if (key.startsWith('ui.theme')) {{
this.applyTheme();
}} else if (key.startsWith('ui.')) {{
this.applyUIPreferences();
}} else if (key.startsWith('keyboard_shortcuts.')) {{
this.applyKeyboardShortcuts();
}} else if (key.startsWith('notifications.')) {{
this.applyNotificationSettings();
}}
}}
applyTheme() {{
const theme = this.get('ui.theme', 'auto');
const html = document.documentElement;
html.classList.remove('theme-light', 'theme-dark');
if (theme === 'auto') {{
// Use system preference
const prefersDark = window.matchMedia('(prefers-color-scheme: dark)').matches;
html.classList.add(prefersDark ? 'theme-dark' : 'theme-light');
}} else {{
html.classList.add(`theme-${{theme}}`);
}}
// Update Bootstrap theme
html.setAttribute('data-bs-theme', theme === 'dark' || (theme === 'auto' && window.matchMedia('(prefers-color-scheme: dark)').matches) ? 'dark' : 'light');
}}
applyUIPreferences() {{
const density = this.get('ui.density', 'comfortable');
const animations = this.get('ui.animations_enabled', true);
const gridView = this.get('ui.grid_view', true);
// Apply UI density
document.body.className = document.body.className.replace(/density-\\w+/g, '');
document.body.classList.add(`density-${{density}}`);
// Apply animations
if (!animations) {{
document.body.classList.add('no-animations');
}} else {{
document.body.classList.remove('no-animations');
}}
// Apply view mode
const viewToggle = document.querySelector('.view-toggle');
if (viewToggle) {{
viewToggle.classList.toggle('grid-view', gridView);
viewToggle.classList.toggle('list-view', !gridView);
}}
}}
applyKeyboardShortcuts() {{
const enabled = this.get('keyboard_shortcuts.enabled', true);
const shortcuts = this.get('keyboard_shortcuts.shortcuts', {{}});
if (window.keyboardManager) {{
window.keyboardManager.setEnabled(enabled);
window.keyboardManager.updateShortcuts(shortcuts);
}}
}}
applyNotificationSettings() {{
const browserNotifications = this.get('notifications.browser_notifications', true);
// Request notification permission if needed
if (browserNotifications && 'Notification' in window && Notification.permission === 'default') {{
Notification.requestPermission();
}}
}}
setupPreferencesUI() {{
this.createSettingsModal();
this.bindSettingsEvents();
}}
createSettingsModal() {{
const existingModal = document.getElementById('preferences-modal');
if (existingModal) return;
const modal = document.createElement('div');
modal.id = 'preferences-modal';
modal.className = 'modal fade';
modal.innerHTML = `
<div class="modal-dialog modal-lg">
<div class="modal-content">
<div class="modal-header">
<h5 class="modal-title">Preferences</h5>
<button type="button" class="btn-close" data-bs-dismiss="modal"></button>
</div>
<div class="modal-body">
<ul class="nav nav-tabs mb-3">
<li class="nav-item">
<a class="nav-link active" data-bs-toggle="tab" href="#ui-tab">Interface</a>
</li>
<li class="nav-item">
<a class="nav-link" data-bs-toggle="tab" href="#downloads-tab">Downloads</a>
</li>
<li class="nav-item">
<a class="nav-link" data-bs-toggle="tab" href="#notifications-tab">Notifications</a>
</li>
<li class="nav-item">
<a class="nav-link" data-bs-toggle="tab" href="#shortcuts-tab">Shortcuts</a>
</li>
<li class="nav-item">
<a class="nav-link" data-bs-toggle="tab" href="#advanced-tab">Advanced</a>
</li>
</ul>
<div class="tab-content">
${{this.createUITab()}}
${{this.createDownloadsTab()}}
${{this.createNotificationsTab()}}
${{this.createShortcutsTab()}}
${{this.createAdvancedTab()}}
</div>
</div>
<div class="modal-footer">
<button type="button" class="btn btn-secondary" data-bs-dismiss="modal">Close</button>
<button type="button" class="btn btn-outline-danger" id="reset-preferences">Reset to Defaults</button>
<button type="button" class="btn btn-outline-primary" id="export-preferences">Export</button>
<button type="button" class="btn btn-outline-primary" id="import-preferences">Import</button>
<button type="button" class="btn btn-primary" id="save-preferences">Save</button>
</div>
</div>
</div>
`;
document.body.appendChild(modal);
}}
createUITab() {{
return `
<div class="tab-pane fade show active" id="ui-tab">
<div class="row">
<div class="col-md-6">
<div class="mb-3">
<label class="form-label">Theme</label>
<select class="form-select" id="pref-theme">
<option value="auto">Auto (System)</option>
<option value="light">Light</option>
<option value="dark">Dark</option>
</select>
</div>
<div class="mb-3">
<label class="form-label">UI Density</label>
<select class="form-select" id="pref-density">
<option value="compact">Compact</option>
<option value="comfortable">Comfortable</option>
<option value="spacious">Spacious</option>
</select>
</div>
<div class="mb-3">
<label class="form-label">Language</label>
<select class="form-select" id="pref-language">
<option value="en">English</option>
<option value="de">German</option>
<option value="ja">Japanese</option>
</select>
</div>
</div>
<div class="col-md-6">
<div class="mb-3">
<label class="form-label">Items per page</label>
<select class="form-select" id="pref-items-per-page">
<option value="10">10</option>
<option value="20">20</option>
<option value="50">50</option>
<option value="100">100</option>
</select>
</div>
<div class="form-check mb-3">
<input class="form-check-input" type="checkbox" id="pref-animations">
<label class="form-check-label" for="pref-animations">
Enable animations
</label>
</div>
<div class="form-check mb-3">
<input class="form-check-input" type="checkbox" id="pref-grid-view">
<label class="form-check-label" for="pref-grid-view">
Default to grid view
</label>
</div>
</div>
</div>
</div>
`;
}}
createDownloadsTab() {{
return `
<div class="tab-pane fade" id="downloads-tab">
<div class="row">
<div class="col-md-6">
<div class="mb-3">
<label class="form-label">Download Quality</label>
<select class="form-select" id="pref-download-quality">
<option value="best">Best Available</option>
<option value="1080p">1080p</option>
<option value="720p">720p</option>
<option value="480p">480p</option>
</select>
</div>
<div class="mb-3">
<label class="form-label">Concurrent Downloads</label>
<input type="number" class="form-control" id="pref-concurrent-downloads" min="1" max="10">
</div>
</div>
<div class="col-md-6">
<div class="form-check mb-3">
<input class="form-check-input" type="checkbox" id="pref-auto-download">
<label class="form-check-label" for="pref-auto-download">
Auto-download new episodes
</label>
</div>
<div class="form-check mb-3">
<input class="form-check-input" type="checkbox" id="pref-retry-failed">
<label class="form-check-label" for="pref-retry-failed">
Retry failed downloads
</label>
</div>
<div class="form-check mb-3">
<input class="form-check-input" type="checkbox" id="pref-auto-organize">
<label class="form-check-label" for="pref-auto-organize">
Auto-organize downloads
</label>
</div>
</div>
</div>
</div>
`;
}}
createNotificationsTab() {{
return `
<div class="tab-pane fade" id="notifications-tab">
<div class="row">
<div class="col-md-6">
<h6>General</h6>
<div class="form-check mb-3">
<input class="form-check-input" type="checkbox" id="pref-browser-notifications">
<label class="form-check-label" for="pref-browser-notifications">
Browser notifications
</label>
</div>
<div class="form-check mb-3">
<input class="form-check-input" type="checkbox" id="pref-notification-sound">
<label class="form-check-label" for="pref-notification-sound">
Notification sound
</label>
</div>
</div>
<div class="col-md-6">
<h6>Notification Types</h6>
<div class="form-check mb-2">
<input class="form-check-input" type="checkbox" id="pref-notify-download-complete">
<label class="form-check-label" for="pref-notify-download-complete">
Download complete
</label>
</div>
<div class="form-check mb-2">
<input class="form-check-input" type="checkbox" id="pref-notify-download-error">
<label class="form-check-label" for="pref-notify-download-error">
Download errors
</label>
</div>
<div class="form-check mb-2">
<input class="form-check-input" type="checkbox" id="pref-notify-series-updated">
<label class="form-check-label" for="pref-notify-series-updated">
Series updates
</label>
</div>
</div>
</div>
</div>
`;
}}
createShortcutsTab() {{
return `
<div class="tab-pane fade" id="shortcuts-tab">
<div class="form-check mb-3">
<input class="form-check-input" type="checkbox" id="pref-shortcuts-enabled">
<label class="form-check-label" for="pref-shortcuts-enabled">
Enable keyboard shortcuts
</label>
</div>
<div id="shortcuts-list">
<!-- Shortcuts will be populated dynamically -->
</div>
</div>
`;
}}
createAdvancedTab() {{
return `
<div class="tab-pane fade" id="advanced-tab">
<div class="row">
<div class="col-md-6">
<div class="form-check mb-3">
<input class="form-check-input" type="checkbox" id="pref-debug-mode">
<label class="form-check-label" for="pref-debug-mode">
Debug mode
</label>
</div>
<div class="form-check mb-3">
<input class="form-check-input" type="checkbox" id="pref-performance-mode">
<label class="form-check-label" for="pref-performance-mode">
Performance mode
</label>
</div>
</div>
<div class="col-md-6">
<div class="form-check mb-3">
<input class="form-check-input" type="checkbox" id="pref-cache-enabled">
<label class="form-check-label" for="pref-cache-enabled">
Enable caching
</label>
</div>
<div class="form-check mb-3">
<input class="form-check-input" type="checkbox" id="pref-auto-backup">
<label class="form-check-label" for="pref-auto-backup">
Auto backup settings
</label>
</div>
</div>
</div>
</div>
`;
}}
bindSettingsEvents() {{
// Theme system preference listener
window.matchMedia('(prefers-color-scheme: dark)').addEventListener('change', () => {{
if (this.get('ui.theme') === 'auto') {{
this.applyTheme();
}}
}});
// Settings modal events will be bound when modal is shown
document.addEventListener('show.bs.modal', (e) => {{
if (e.target.id === 'preferences-modal') {{
this.populateSettingsForm();
}}
}});
}}
populateSettingsForm() {{
// Populate form fields with current preferences
const fields = [
{{ id: 'pref-theme', key: 'ui.theme' }},
{{ id: 'pref-density', key: 'ui.density' }},
{{ id: 'pref-language', key: 'ui.language' }},
{{ id: 'pref-items-per-page', key: 'ui.items_per_page' }},
{{ id: 'pref-animations', key: 'ui.animations_enabled' }},
{{ id: 'pref-grid-view', key: 'ui.grid_view' }},
{{ id: 'pref-download-quality', key: 'downloads.download_quality' }},
{{ id: 'pref-concurrent-downloads', key: 'downloads.concurrent_downloads' }},
{{ id: 'pref-auto-download', key: 'downloads.auto_download' }},
{{ id: 'pref-retry-failed', key: 'downloads.retry_failed' }},
{{ id: 'pref-auto-organize', key: 'downloads.auto_organize' }},
{{ id: 'pref-browser-notifications', key: 'notifications.browser_notifications' }},
{{ id: 'pref-notification-sound', key: 'downloads.notification_sound' }},
{{ id: 'pref-shortcuts-enabled', key: 'keyboard_shortcuts.enabled' }},
{{ id: 'pref-debug-mode', key: 'advanced.debug_mode' }},
{{ id: 'pref-performance-mode', key: 'advanced.performance_mode' }},
{{ id: 'pref-cache-enabled', key: 'advanced.cache_enabled' }},
{{ id: 'pref-auto-backup', key: 'advanced.auto_backup' }}
];
fields.forEach(field => {{
const element = document.getElementById(field.id);
if (element) {{
const value = this.get(field.key);
if (element.type === 'checkbox') {{
element.checked = value;
}} else {{
element.value = value;
}}
}}
}});
}}
setupAutoSave() {{
// Auto-save preferences on change
document.addEventListener('change', (e) => {{
if (e.target.id && e.target.id.startsWith('pref-')) {{
this.saveFormValue(e.target);
}}
}});
}}
saveFormValue(element) {{
const keyMap = {{
'pref-theme': 'ui.theme',
'pref-density': 'ui.density',
'pref-language': 'ui.language',
'pref-items-per-page': 'ui.items_per_page',
'pref-animations': 'ui.animations_enabled',
'pref-grid-view': 'ui.grid_view',
'pref-download-quality': 'downloads.download_quality',
'pref-concurrent-downloads': 'downloads.concurrent_downloads',
'pref-auto-download': 'downloads.auto_download',
'pref-retry-failed': 'downloads.retry_failed',
'pref-auto-organize': 'downloads.auto_organize',
'pref-browser-notifications': 'notifications.browser_notifications',
'pref-notification-sound': 'downloads.notification_sound',
'pref-shortcuts-enabled': 'keyboard_shortcuts.enabled',
'pref-debug-mode': 'advanced.debug_mode',
'pref-performance-mode': 'advanced.performance_mode',
'pref-cache-enabled': 'advanced.cache_enabled',
'pref-auto-backup': 'advanced.auto_backup'
}};
const key = keyMap[element.id];
if (key) {{
let value = element.type === 'checkbox' ? element.checked : element.value;
if (element.type === 'number') {{
value = parseInt(value, 10);
}}
this.set(key, value);
}}
}}
showPreferences() {{
const modal = document.getElementById('preferences-modal');
if (modal) {{
const bsModal = new bootstrap.Modal(modal);
bsModal.show();
}}
}}
onPreferenceChange(key, callback) {{
if (!this.changeListeners.has(key)) {{
this.changeListeners.set(key, []);
}}
this.changeListeners.get(key).push(callback);
}}
notifyChangeListeners(key, newValue, oldValue) {{
const listeners = this.changeListeners.get(key) || [];
listeners.forEach(callback => {{
try {{
callback(newValue, oldValue, key);
}} catch (error) {{
console.error('Error in preference change listener:', error);
}}
}});
}}
reset() {{
this.preferences = JSON.parse(JSON.stringify(this.defaultPreferences));
this.applyPreferences();
this.saveToServer();
localStorage.removeItem('aniworld_preferences');
}}
export() {{
const data = JSON.stringify(this.preferences, null, 2);
const blob = new Blob([data], {{ type: 'application/json' }});
const url = URL.createObjectURL(blob);
const a = document.createElement('a');
a.href = url;
a.download = 'aniworld_preferences.json';
document.body.appendChild(a);
a.click();
document.body.removeChild(a);
URL.revokeObjectURL(url);
}}
import(file) {{
return new Promise((resolve, reject) => {{
const reader = new FileReader();
reader.onload = (e) => {{
try {{
const imported = JSON.parse(e.target.result);
this.preferences = this.mergePreferences(this.defaultPreferences, imported);
this.applyPreferences();
this.saveToServer();
resolve(true);
}} catch (error) {{
reject(error);
}}
}};
reader.onerror = reject;
reader.readAsText(file);
}});
}}
mergePreferences(defaults, userPrefs) {{
const result = {{ ...defaults }};
for (const [key, value] of Object.entries(userPrefs)) {{
if (key in result && typeof result[key] === 'object' && typeof value === 'object') {{
result[key] = this.mergePreferences(result[key], value);
}} else {{
result[key] = value;
}}
}}
return result;
}}
}}
// Initialize preferences when DOM is loaded
document.addEventListener('DOMContentLoaded', () => {{
window.preferencesManager = new UserPreferencesManager();
}});
"""
def get_css(self):
"""Generate CSS for user preferences."""
return """
/* User Preferences Styles */
.density-compact {
--spacing: 0.5rem;
--font-size: 0.875rem;
}
.density-comfortable {
--spacing: 1rem;
--font-size: 1rem;
}
.density-spacious {
--spacing: 1.5rem;
--font-size: 1.125rem;
}
.no-animations * {
animation-duration: 0s !important;
transition-duration: 0s !important;
}
.theme-light {
--bs-body-bg: #ffffff;
--bs-body-color: #212529;
--bs-primary: #0d6efd;
}
.theme-dark {
--bs-body-bg: #121212;
--bs-body-color: #e9ecef;
--bs-primary: #0d6efd;
}
#preferences-modal .nav-tabs {
border-bottom: 1px solid var(--bs-border-color);
}
#preferences-modal .tab-pane {
min-height: 300px;
}
.preference-group {
margin-bottom: 2rem;
}
.preference-group h6 {
color: var(--bs-secondary);
margin-bottom: 1rem;
}
/* Responsive preferences modal */
@media (max-width: 768px) {
#preferences-modal .modal-dialog {
max-width: 95vw;
margin: 0.5rem;
}
#preferences-modal .nav-tabs {
flex-wrap: wrap;
}
#preferences-modal .nav-link {
font-size: 0.875rem;
padding: 0.5rem;
}
}
"""
# Create the preferences API blueprint
preferences_bp = Blueprint('preferences', __name__, url_prefix='/api')
# Global preferences manager instance
preferences_manager = UserPreferencesManager()
@preferences_bp.route('/preferences', methods=['GET'])
def get_preferences():
"""Get user preferences."""
try:
return jsonify(preferences_manager.get_user_session_preferences())
except Exception as e:
return jsonify({'error': str(e)}), 500
@preferences_bp.route('/preferences', methods=['PUT'])
def update_preferences():
"""Update user preferences."""
try:
data = request.get_json()
preferences_manager.preferences = preferences_manager.merge_preferences(
preferences_manager.default_preferences,
data
)
if preferences_manager.save_preferences():
return jsonify({'success': True, 'message': 'Preferences updated'})
else:
return jsonify({'error': 'Failed to save preferences'}), 500
except Exception as e:
return jsonify({'error': str(e)}), 500
@preferences_bp.route('/preferences/<key>', methods=['GET'])
def get_preference(key):
"""Get a specific preference."""
try:
value = preferences_manager.get_preference(key)
return jsonify({'key': key, 'value': value})
except Exception as e:
return jsonify({'error': str(e)}), 500
@preferences_bp.route('/preferences/<key>', methods=['PUT'])
def set_preference(key):
"""Set a specific preference."""
try:
data = request.get_json()
value = data.get('value')
if preferences_manager.set_preference(key, value):
return jsonify({'success': True, 'key': key, 'value': value})
else:
return jsonify({'error': 'Failed to set preference'}), 500
except Exception as e:
return jsonify({'error': str(e)}), 500
@preferences_bp.route('/preferences/reset', methods=['POST'])
def reset_preferences():
"""Reset preferences to defaults."""
try:
if preferences_manager.reset_preferences():
return jsonify({'success': True, 'message': 'Preferences reset to defaults'})
else:
return jsonify({'error': 'Failed to reset preferences'}), 500
except Exception as e:
return jsonify({'error': str(e)}), 500
@preferences_bp.route('/preferences/export', methods=['GET'])
def export_preferences():
"""Export preferences as JSON file."""
try:
from flask import Response
json_data = preferences_manager.export_preferences()
return Response(
json_data,
mimetype='application/json',
headers={'Content-Disposition': 'attachment; filename=aniworld_preferences.json'}
)
except Exception as e:
return jsonify({'error': str(e)}), 500
@preferences_bp.route('/preferences/import', methods=['POST'])
def import_preferences():
"""Import preferences from JSON file."""
try:
if 'file' not in request.files:
return jsonify({'error': 'No file provided'}), 400
file = request.files['file']
if file.filename == '':
return jsonify({'error': 'No file selected'}), 400
json_data = file.read().decode('utf-8')
if preferences_manager.import_preferences(json_data):
return jsonify({'success': True, 'message': 'Preferences imported successfully'})
else:
return jsonify({'error': 'Failed to import preferences'}), 500
except Exception as e:
return jsonify({'error': str(e)}), 500

View File

@@ -1,565 +0,0 @@
"""
System Health Monitoring for AniWorld App
This module provides comprehensive system health checks and monitoring
for the anime downloading application.
"""
import psutil
import logging
import threading
import time
from typing import Dict, List, Optional, Any
from datetime import datetime, timedelta
from dataclasses import dataclass
from flask import Blueprint, jsonify, request
import os
import socket
import requests
from auth import require_auth, optional_auth
@dataclass
class HealthMetric:
"""Represents a health metric measurement."""
name: str
value: Any
unit: str
status: str # 'healthy', 'warning', 'critical'
threshold_warning: Optional[float] = None
threshold_critical: Optional[float] = None
timestamp: Optional[datetime] = None
def __post_init__(self):
if self.timestamp is None:
self.timestamp = datetime.now()
class SystemHealthMonitor:
"""Monitor system health metrics and performance."""
def __init__(self, check_interval: int = 60):
self.check_interval = check_interval
self.logger = logging.getLogger(__name__)
self.metrics_history: Dict[str, List[HealthMetric]] = {}
self.alerts: List[Dict] = []
self.monitoring_enabled = True
self.monitor_thread = None
self._lock = threading.Lock()
# Configurable thresholds
self.thresholds = {
'cpu_percent': {'warning': 80.0, 'critical': 95.0},
'memory_percent': {'warning': 85.0, 'critical': 95.0},
'disk_percent': {'warning': 90.0, 'critical': 98.0},
'disk_free_gb': {'warning': 5.0, 'critical': 1.0},
'network_latency_ms': {'warning': 1000, 'critical': 5000},
}
def start_monitoring(self):
"""Start continuous health monitoring."""
if self.monitor_thread and self.monitor_thread.is_alive():
self.logger.warning("Health monitoring already running")
return
self.monitoring_enabled = True
self.monitor_thread = threading.Thread(target=self._monitoring_loop, daemon=True)
self.monitor_thread.start()
self.logger.info("System health monitoring started")
def stop_monitoring(self):
"""Stop health monitoring."""
self.monitoring_enabled = False
if self.monitor_thread:
self.monitor_thread.join(timeout=5)
self.logger.info("System health monitoring stopped")
def _monitoring_loop(self):
"""Main monitoring loop."""
while self.monitoring_enabled:
try:
self.collect_all_metrics()
time.sleep(self.check_interval)
except Exception as e:
self.logger.error(f"Error in monitoring loop: {e}", exc_info=True)
time.sleep(self.check_interval)
def collect_all_metrics(self):
"""Collect all health metrics."""
metrics = []
# System metrics
metrics.extend(self.get_cpu_metrics())
metrics.extend(self.get_memory_metrics())
metrics.extend(self.get_disk_metrics())
metrics.extend(self.get_network_metrics())
# Application metrics
metrics.extend(self.get_process_metrics())
# Store metrics
with self._lock:
for metric in metrics:
if metric.name not in self.metrics_history:
self.metrics_history[metric.name] = []
self.metrics_history[metric.name].append(metric)
# Keep only last 24 hours of data
cutoff = datetime.now() - timedelta(hours=24)
self.metrics_history[metric.name] = [
m for m in self.metrics_history[metric.name]
if m.timestamp > cutoff
]
# Check for alerts
self._check_alert_conditions(metric)
def get_cpu_metrics(self) -> List[HealthMetric]:
"""Get CPU-related metrics."""
metrics = []
# CPU usage percentage
cpu_percent = psutil.cpu_percent(interval=1)
status = self._get_status_for_metric('cpu_percent', cpu_percent)
metrics.append(HealthMetric(
name='cpu_percent',
value=cpu_percent,
unit='%',
status=status,
threshold_warning=self.thresholds['cpu_percent']['warning'],
threshold_critical=self.thresholds['cpu_percent']['critical']
))
# CPU count
metrics.append(HealthMetric(
name='cpu_count',
value=psutil.cpu_count(),
unit='cores',
status='healthy'
))
# Load average (Unix-like systems only)
try:
load_avg = psutil.getloadavg()
metrics.append(HealthMetric(
name='load_average_1m',
value=load_avg[0],
unit='',
status='healthy'
))
except AttributeError:
pass # Not available on Windows
return metrics
def get_memory_metrics(self) -> List[HealthMetric]:
"""Get memory-related metrics."""
metrics = []
# Virtual memory
memory = psutil.virtual_memory()
status = self._get_status_for_metric('memory_percent', memory.percent)
metrics.append(HealthMetric(
name='memory_percent',
value=memory.percent,
unit='%',
status=status,
threshold_warning=self.thresholds['memory_percent']['warning'],
threshold_critical=self.thresholds['memory_percent']['critical']
))
metrics.append(HealthMetric(
name='memory_total_gb',
value=round(memory.total / (1024**3), 2),
unit='GB',
status='healthy'
))
metrics.append(HealthMetric(
name='memory_available_gb',
value=round(memory.available / (1024**3), 2),
unit='GB',
status='healthy'
))
# Swap memory
swap = psutil.swap_memory()
if swap.total > 0:
metrics.append(HealthMetric(
name='swap_percent',
value=swap.percent,
unit='%',
status='warning' if swap.percent > 50 else 'healthy'
))
return metrics
def get_disk_metrics(self) -> List[HealthMetric]:
"""Get disk-related metrics."""
metrics = []
# Check main disk partitions
partitions = psutil.disk_partitions()
for partition in partitions:
if 'cdrom' in partition.opts or partition.fstype == '':
continue
try:
usage = psutil.disk_usage(partition.mountpoint)
disk_percent = (usage.used / usage.total) * 100
free_gb = usage.free / (1024**3)
# Disk usage percentage
status_percent = self._get_status_for_metric('disk_percent', disk_percent)
device_name = partition.device.replace(":", "").replace("\\", "")
metrics.append(HealthMetric(
name=f'disk_percent_{device_name}',
value=round(disk_percent, 1),
unit='%',
status=status_percent,
threshold_warning=self.thresholds['disk_percent']['warning'],
threshold_critical=self.thresholds['disk_percent']['critical']
))
# Free space in GB
status_free = 'critical' if free_gb < self.thresholds['disk_free_gb']['critical'] \
else 'warning' if free_gb < self.thresholds['disk_free_gb']['warning'] \
else 'healthy'
metrics.append(HealthMetric(
name=f'disk_free_gb_{device_name}',
value=round(free_gb, 2),
unit='GB',
status=status_free,
threshold_warning=self.thresholds['disk_free_gb']['warning'],
threshold_critical=self.thresholds['disk_free_gb']['critical']
))
except PermissionError:
continue
# Disk I/O
try:
disk_io = psutil.disk_io_counters()
if disk_io:
metrics.append(HealthMetric(
name='disk_read_mb',
value=round(disk_io.read_bytes / (1024**2), 2),
unit='MB',
status='healthy'
))
metrics.append(HealthMetric(
name='disk_write_mb',
value=round(disk_io.write_bytes / (1024**2), 2),
unit='MB',
status='healthy'
))
except Exception:
pass
return metrics
def get_network_metrics(self) -> List[HealthMetric]:
"""Get network-related metrics."""
metrics = []
# Network I/O
try:
net_io = psutil.net_io_counters()
if net_io:
metrics.append(HealthMetric(
name='network_sent_mb',
value=round(net_io.bytes_sent / (1024**2), 2),
unit='MB',
status='healthy'
))
metrics.append(HealthMetric(
name='network_recv_mb',
value=round(net_io.bytes_recv / (1024**2), 2),
unit='MB',
status='healthy'
))
except Exception:
pass
# Network connectivity test
try:
start_time = time.time()
socket.create_connection(("8.8.8.8", 53), timeout=5)
latency = (time.time() - start_time) * 1000 # Convert to ms
status = self._get_status_for_metric('network_latency_ms', latency)
metrics.append(HealthMetric(
name='network_latency_ms',
value=round(latency, 2),
unit='ms',
status=status,
threshold_warning=self.thresholds['network_latency_ms']['warning'],
threshold_critical=self.thresholds['network_latency_ms']['critical']
))
except Exception:
metrics.append(HealthMetric(
name='network_latency_ms',
value=-1,
unit='ms',
status='critical'
))
return metrics
def get_process_metrics(self) -> List[HealthMetric]:
"""Get process-specific metrics."""
metrics = []
try:
# Current process metrics
process = psutil.Process()
# Process CPU usage
cpu_percent = process.cpu_percent()
metrics.append(HealthMetric(
name='process_cpu_percent',
value=cpu_percent,
unit='%',
status='warning' if cpu_percent > 50 else 'healthy'
))
# Process memory usage
memory_info = process.memory_info()
memory_mb = memory_info.rss / (1024**2)
metrics.append(HealthMetric(
name='process_memory_mb',
value=round(memory_mb, 2),
unit='MB',
status='warning' if memory_mb > 1024 else 'healthy' # Warning if > 1GB
))
# Process threads
threads = process.num_threads()
metrics.append(HealthMetric(
name='process_threads',
value=threads,
unit='',
status='warning' if threads > 50 else 'healthy'
))
# Process connections
try:
connections = len(process.connections())
metrics.append(HealthMetric(
name='process_connections',
value=connections,
unit='',
status='warning' if connections > 100 else 'healthy'
))
except psutil.AccessDenied:
pass
except Exception as e:
self.logger.error(f"Failed to get process metrics: {e}")
return metrics
def _get_status_for_metric(self, metric_name: str, value: float) -> str:
"""Determine status based on thresholds."""
if metric_name in self.thresholds:
thresholds = self.thresholds[metric_name]
if value >= thresholds['critical']:
return 'critical'
elif value >= thresholds['warning']:
return 'warning'
return 'healthy'
def _check_alert_conditions(self, metric: HealthMetric):
"""Check if metric triggers an alert."""
if metric.status in ['critical', 'warning']:
alert = {
'timestamp': metric.timestamp.isoformat(),
'metric_name': metric.name,
'value': metric.value,
'unit': metric.unit,
'status': metric.status,
'message': f"{metric.name} is {metric.status}: {metric.value}{metric.unit}"
}
with self._lock:
self.alerts.append(alert)
# Keep only last 100 alerts
if len(self.alerts) > 100:
self.alerts = self.alerts[-100:]
def get_current_health_status(self) -> Dict[str, Any]:
"""Get current system health status."""
with self._lock:
latest_metrics = {}
for name, history in self.metrics_history.items():
if history:
latest_metrics[name] = {
'value': history[-1].value,
'unit': history[-1].unit,
'status': history[-1].status,
'timestamp': history[-1].timestamp.isoformat()
}
# Calculate overall health status
statuses = [metric['status'] for metric in latest_metrics.values()]
if 'critical' in statuses:
overall_status = 'critical'
elif 'warning' in statuses:
overall_status = 'warning'
else:
overall_status = 'healthy'
return {
'overall_status': overall_status,
'metrics': latest_metrics,
'recent_alerts': self.alerts[-10:], # Last 10 alerts
'timestamp': datetime.now().isoformat()
}
def get_metric_history(self, metric_name: str, hours: int = 24) -> List[Dict]:
"""Get history for a specific metric."""
with self._lock:
if metric_name not in self.metrics_history:
return []
cutoff = datetime.now() - timedelta(hours=hours)
history = [
{
'value': m.value,
'status': m.status,
'timestamp': m.timestamp.isoformat()
}
for m in self.metrics_history[metric_name]
if m.timestamp > cutoff
]
return history
# Blueprint for health endpoints
health_bp = Blueprint('health', __name__)
# Global health monitor instance
health_monitor = SystemHealthMonitor()
@health_bp.route('/api/health/status')
@optional_auth
def get_health_status():
"""Get current system health status."""
try:
status = health_monitor.get_current_health_status()
return jsonify({
'status': 'success',
'data': status
})
except Exception as e:
return jsonify({
'status': 'error',
'message': str(e)
}), 500
@health_bp.route('/api/health/metrics/<metric_name>')
@optional_auth
def get_metric_history(metric_name):
"""Get history for a specific metric."""
try:
hours = int(request.args.get('hours', 24))
history = health_monitor.get_metric_history(metric_name, hours)
return jsonify({
'status': 'success',
'data': {
'metric_name': metric_name,
'history': history
}
})
except Exception as e:
return jsonify({
'status': 'error',
'message': str(e)
}), 500
@health_bp.route('/api/health/alerts')
@optional_auth
def get_health_alerts():
"""Get recent health alerts."""
try:
with health_monitor._lock:
alerts = health_monitor.alerts[-50:] # Last 50 alerts
return jsonify({
'status': 'success',
'data': {
'alerts': alerts,
'count': len(alerts)
}
})
except Exception as e:
return jsonify({
'status': 'error',
'message': str(e)
}), 500
@health_bp.route('/api/health/start', methods=['POST'])
@require_auth
def start_health_monitoring():
"""Start health monitoring."""
try:
health_monitor.start_monitoring()
return jsonify({
'status': 'success',
'message': 'Health monitoring started'
})
except Exception as e:
return jsonify({
'status': 'error',
'message': str(e)
}), 500
@health_bp.route('/api/health/stop', methods=['POST'])
@require_auth
def stop_health_monitoring():
"""Stop health monitoring."""
try:
health_monitor.stop_monitoring()
return jsonify({
'status': 'success',
'message': 'Health monitoring stopped'
})
except Exception as e:
return jsonify({
'status': 'error',
'message': str(e)
}), 500
def init_health_monitoring():
"""Initialize and start health monitoring."""
health_monitor.start_monitoring()
def cleanup_health_monitoring():
"""Clean up health monitoring resources."""
health_monitor.stop_monitoring()
# Export main components
__all__ = [
'SystemHealthMonitor',
'HealthMetric',
'health_bp',
'health_monitor',
'init_health_monitoring',
'cleanup_health_monitoring'
]

View File

@@ -1,303 +0,0 @@
from flask import Blueprint, render_template, request, jsonify
from web.controllers.auth_controller import optional_auth
import threading
import time
from datetime import datetime, timedelta
# Create blueprint for download queue management
download_queue_bp = Blueprint('download_queue', __name__)
# Global download queue state
download_queue_state = {
'active_downloads': [],
'pending_queue': [],
'completed_downloads': [],
'failed_downloads': [],
'queue_lock': threading.Lock(),
'statistics': {
'total_items': 0,
'completed_items': 0,
'failed_items': 0,
'estimated_time_remaining': None,
'current_speed': '0 MB/s',
'average_speed': '0 MB/s'
}
}
@download_queue_bp.route('/queue')
@optional_auth
def queue_page():
"""Download queue management page."""
return render_template('queue.html')
@download_queue_bp.route('/api/queue/status')
@optional_auth
def get_queue_status():
"""Get detailed download queue status."""
with download_queue_state['queue_lock']:
# Calculate ETA
eta = None
if download_queue_state['active_downloads']:
active_download = download_queue_state['active_downloads'][0]
if 'progress' in active_download and active_download['progress'].get('speed_mbps', 0) > 0:
remaining_items = len(download_queue_state['pending_queue'])
avg_speed = active_download['progress']['speed_mbps']
# Rough estimation: assume 500MB per episode
estimated_mb_remaining = remaining_items * 500
eta_seconds = estimated_mb_remaining / avg_speed if avg_speed > 0 else None
if eta_seconds:
eta = datetime.now() + timedelta(seconds=eta_seconds)
return jsonify({
'active_downloads': download_queue_state['active_downloads'],
'pending_queue': download_queue_state['pending_queue'],
'completed_downloads': download_queue_state['completed_downloads'][-10:], # Last 10
'failed_downloads': download_queue_state['failed_downloads'][-10:], # Last 10
'statistics': {
**download_queue_state['statistics'],
'eta': eta.isoformat() if eta else None
}
})
@download_queue_bp.route('/api/queue/clear', methods=['POST'])
@optional_auth
def clear_queue():
"""Clear completed and failed downloads from queue."""
try:
data = request.get_json() or {}
queue_type = data.get('type', 'completed') # 'completed', 'failed', or 'all'
with download_queue_state['queue_lock']:
if queue_type == 'completed' or queue_type == 'all':
download_queue_state['completed_downloads'].clear()
if queue_type == 'failed' or queue_type == 'all':
download_queue_state['failed_downloads'].clear()
return jsonify({
'status': 'success',
'message': f'Cleared {queue_type} downloads'
})
except Exception as e:
return jsonify({
'status': 'error',
'message': str(e)
}), 500
@download_queue_bp.route('/api/queue/retry', methods=['POST'])
@optional_auth
def retry_failed_download():
"""Retry a failed download."""
try:
data = request.get_json()
download_id = data.get('id')
if not download_id:
return jsonify({
'status': 'error',
'message': 'Download ID is required'
}), 400
with download_queue_state['queue_lock']:
# Find failed download
failed_download = None
for i, download in enumerate(download_queue_state['failed_downloads']):
if download['id'] == download_id:
failed_download = download_queue_state['failed_downloads'].pop(i)
break
if not failed_download:
return jsonify({
'status': 'error',
'message': 'Failed download not found'
}), 404
# Reset download status and add back to queue
failed_download['status'] = 'queued'
failed_download['error'] = None
failed_download['retry_count'] = failed_download.get('retry_count', 0) + 1
download_queue_state['pending_queue'].append(failed_download)
return jsonify({
'status': 'success',
'message': 'Download added back to queue'
})
except Exception as e:
return jsonify({
'status': 'error',
'message': str(e)
}), 500
@download_queue_bp.route('/api/queue/remove', methods=['POST'])
@optional_auth
def remove_from_queue():
"""Remove an item from the pending queue."""
try:
data = request.get_json()
download_id = data.get('id')
if not download_id:
return jsonify({
'status': 'error',
'message': 'Download ID is required'
}), 400
with download_queue_state['queue_lock']:
# Find and remove from pending queue
removed = False
for i, download in enumerate(download_queue_state['pending_queue']):
if download['id'] == download_id:
download_queue_state['pending_queue'].pop(i)
removed = True
break
if not removed:
return jsonify({
'status': 'error',
'message': 'Download not found in queue'
}), 404
return jsonify({
'status': 'success',
'message': 'Download removed from queue'
})
except Exception as e:
return jsonify({
'status': 'error',
'message': str(e)
}), 500
@download_queue_bp.route('/api/queue/reorder', methods=['POST'])
@optional_auth
def reorder_queue():
"""Reorder items in the pending queue."""
try:
data = request.get_json()
new_order = data.get('order') # Array of download IDs in new order
if not new_order or not isinstance(new_order, list):
return jsonify({
'status': 'error',
'message': 'Valid order array is required'
}), 400
with download_queue_state['queue_lock']:
# Create new queue based on the provided order
old_queue = download_queue_state['pending_queue'].copy()
new_queue = []
# Add items in the specified order
for download_id in new_order:
for download in old_queue:
if download['id'] == download_id:
new_queue.append(download)
break
# Add any remaining items that weren't in the new order
for download in old_queue:
if download not in new_queue:
new_queue.append(download)
download_queue_state['pending_queue'] = new_queue
return jsonify({
'status': 'success',
'message': 'Queue reordered successfully'
})
except Exception as e:
return jsonify({
'status': 'error',
'message': str(e)
}), 500
# Helper functions for queue management
def add_to_download_queue(serie_name, episode_info, priority='normal'):
"""Add a download to the queue."""
import uuid
download_item = {
'id': str(uuid.uuid4()),
'serie_name': serie_name,
'episode': episode_info,
'status': 'queued',
'priority': priority,
'added_at': datetime.now().isoformat(),
'started_at': None,
'completed_at': None,
'error': None,
'retry_count': 0,
'progress': {
'percent': 0,
'downloaded_mb': 0,
'total_mb': 0,
'speed_mbps': 0,
'eta_seconds': None
}
}
with download_queue_state['queue_lock']:
# Insert based on priority
if priority == 'high':
download_queue_state['pending_queue'].insert(0, download_item)
else:
download_queue_state['pending_queue'].append(download_item)
download_queue_state['statistics']['total_items'] += 1
return download_item['id']
def update_download_progress(download_id, progress_data):
"""Update progress for an active download."""
with download_queue_state['queue_lock']:
for download in download_queue_state['active_downloads']:
if download['id'] == download_id:
download['progress'].update(progress_data)
# Update global statistics
if 'speed_mbps' in progress_data:
download_queue_state['statistics']['current_speed'] = f"{progress_data['speed_mbps']:.1f} MB/s"
break
def move_download_to_completed(download_id, success=True, error=None):
"""Move download from active to completed/failed."""
with download_queue_state['queue_lock']:
download = None
for i, item in enumerate(download_queue_state['active_downloads']):
if item['id'] == download_id:
download = download_queue_state['active_downloads'].pop(i)
break
if download:
download['completed_at'] = datetime.now().isoformat()
if success:
download['status'] = 'completed'
download['progress']['percent'] = 100
download_queue_state['completed_downloads'].append(download)
download_queue_state['statistics']['completed_items'] += 1
else:
download['status'] = 'failed'
download['error'] = error
download_queue_state['failed_downloads'].append(download)
download_queue_state['statistics']['failed_items'] += 1
def start_next_download():
"""Move next queued download to active state."""
with download_queue_state['queue_lock']:
if download_queue_state['pending_queue'] and len(download_queue_state['active_downloads']) < 3: # Max 3 concurrent
download = download_queue_state['pending_queue'].pop(0)
download['status'] = 'downloading'
download['started_at'] = datetime.now().isoformat()
download_queue_state['active_downloads'].append(download)
return download
return None
def get_queue_statistics():
"""Get current queue statistics."""
with download_queue_state['queue_lock']:
return download_queue_state['statistics'].copy()

View File

@@ -1,252 +0,0 @@
import threading
import time
import schedule
from datetime import datetime, timedelta
from typing import Optional, Callable, Dict, Any
import logging
from shared.utils.process_utils import (with_process_lock, RESCAN_LOCK,
ProcessLockError, is_process_running)
logger = logging.getLogger(__name__)
class ScheduledOperations:
"""Handle scheduled operations like automatic rescans and downloads."""
def __init__(self, config_manager, socketio=None):
self.config = config_manager
self.socketio = socketio
self.scheduler_thread = None
self.running = False
self.rescan_callback: Optional[Callable] = None
self.download_callback: Optional[Callable] = None
self.last_scheduled_rescan: Optional[datetime] = None
# Load scheduled rescan settings
self.scheduled_rescan_enabled = getattr(self.config, 'scheduled_rescan_enabled', False)
self.scheduled_rescan_time = getattr(self.config, 'scheduled_rescan_time', '03:00')
self.auto_download_after_rescan = getattr(self.config, 'auto_download_after_rescan', False)
def set_rescan_callback(self, callback: Callable):
"""Set callback function for performing rescan operations."""
self.rescan_callback = callback
def set_download_callback(self, callback: Callable):
"""Set callback function for performing download operations."""
self.download_callback = callback
def start_scheduler(self):
"""Start the background scheduler thread."""
if self.running:
logger.warning("Scheduler is already running")
return
self.running = True
self.scheduler_thread = threading.Thread(target=self._scheduler_loop, daemon=True)
self.scheduler_thread.start()
logger.info("Scheduled operations started")
def stop_scheduler(self):
"""Stop the background scheduler."""
self.running = False
schedule.clear()
if self.scheduler_thread and self.scheduler_thread.is_alive():
self.scheduler_thread.join(timeout=5)
logger.info("Scheduled operations stopped")
def _scheduler_loop(self):
"""Main scheduler loop that runs in background thread."""
self._setup_scheduled_jobs()
while self.running:
try:
schedule.run_pending()
time.sleep(60) # Check every minute
except Exception as e:
logger.error(f"Scheduler error: {e}")
time.sleep(60)
def _setup_scheduled_jobs(self):
"""Setup all scheduled jobs based on configuration."""
schedule.clear()
if self.scheduled_rescan_enabled and self.scheduled_rescan_time:
try:
schedule.every().day.at(self.scheduled_rescan_time).do(self._perform_scheduled_rescan)
logger.info(f"Scheduled daily rescan at {self.scheduled_rescan_time}")
except Exception as e:
logger.error(f"Error setting up scheduled rescan: {e}")
def _perform_scheduled_rescan(self):
"""Perform the scheduled rescan operation."""
try:
logger.info("Starting scheduled rescan...")
# Emit scheduled rescan started event
if self.socketio:
self.socketio.emit('scheduled_rescan_started')
# Check if rescan is already running
if is_process_running(RESCAN_LOCK):
logger.warning("Rescan is already running, skipping scheduled rescan")
if self.socketio:
self.socketio.emit('scheduled_rescan_skipped', {
'reason': 'Rescan already in progress'
})
return
# Perform the rescan using process lock
@with_process_lock(RESCAN_LOCK, timeout_minutes=180)
def perform_rescan():
self.last_scheduled_rescan = datetime.now()
if self.rescan_callback:
result = self.rescan_callback()
logger.info("Scheduled rescan completed successfully")
if self.socketio:
self.socketio.emit('scheduled_rescan_completed', {
'timestamp': self.last_scheduled_rescan.isoformat(),
'result': result
})
# Auto-start download if configured
if self.auto_download_after_rescan and self.download_callback:
logger.info("Starting auto-download after scheduled rescan")
threading.Thread(
target=self._perform_auto_download,
daemon=True
).start()
else:
logger.warning("No rescan callback configured")
perform_rescan(_locked_by='scheduled_operation')
except ProcessLockError:
logger.warning("Could not acquire rescan lock for scheduled operation")
if self.socketio:
self.socketio.emit('scheduled_rescan_error', {
'error': 'Could not acquire rescan lock'
})
except Exception as e:
logger.error(f"Scheduled rescan failed: {e}")
if self.socketio:
self.socketio.emit('scheduled_rescan_error', {
'error': str(e)
})
def _perform_auto_download(self):
"""Perform automatic download after scheduled rescan."""
try:
# Wait a bit after rescan to let UI update
time.sleep(10)
if self.download_callback:
# Find series with missing episodes and start download
logger.info("Starting auto-download of missing episodes")
result = self.download_callback()
if self.socketio:
self.socketio.emit('auto_download_started', {
'timestamp': datetime.now().isoformat(),
'result': result
})
else:
logger.warning("No download callback configured for auto-download")
except Exception as e:
logger.error(f"Auto-download after scheduled rescan failed: {e}")
if self.socketio:
self.socketio.emit('auto_download_error', {
'error': str(e)
})
def update_scheduled_rescan_config(self, enabled: bool, time_str: str, auto_download: bool = False):
"""Update scheduled rescan configuration."""
try:
# Validate time format
if enabled and time_str:
datetime.strptime(time_str, '%H:%M')
# Update configuration
self.scheduled_rescan_enabled = enabled
self.scheduled_rescan_time = time_str
self.auto_download_after_rescan = auto_download
# Save to config
self.config.scheduled_rescan_enabled = enabled
self.config.scheduled_rescan_time = time_str
self.config.auto_download_after_rescan = auto_download
self.config.save_config()
# Restart scheduler with new settings
if self.running:
self._setup_scheduled_jobs()
logger.info(f"Updated scheduled rescan config: enabled={enabled}, time={time_str}, auto_download={auto_download}")
return True
except ValueError as e:
logger.error(f"Invalid time format: {time_str}")
raise ValueError(f"Invalid time format. Use HH:MM format.")
except Exception as e:
logger.error(f"Error updating scheduled rescan config: {e}")
raise
def get_scheduled_rescan_config(self) -> Dict[str, Any]:
"""Get current scheduled rescan configuration."""
next_run = None
if self.scheduled_rescan_enabled and self.scheduled_rescan_time:
try:
# Calculate next run time
now = datetime.now()
today_run = datetime.strptime(f"{now.strftime('%Y-%m-%d')} {self.scheduled_rescan_time}", '%Y-%m-%d %H:%M')
if now > today_run:
# Next run is tomorrow
next_run = today_run + timedelta(days=1)
else:
# Next run is today
next_run = today_run
except Exception as e:
logger.error(f"Error calculating next run time: {e}")
return {
'enabled': self.scheduled_rescan_enabled,
'time': self.scheduled_rescan_time,
'auto_download_after_rescan': self.auto_download_after_rescan,
'next_run': next_run.isoformat() if next_run else None,
'last_run': self.last_scheduled_rescan.isoformat() if self.last_scheduled_rescan else None,
'is_running': self.running
}
def trigger_manual_scheduled_rescan(self):
"""Manually trigger a scheduled rescan (for testing purposes)."""
logger.info("Manually triggering scheduled rescan")
threading.Thread(target=self._perform_scheduled_rescan, daemon=True).start()
def get_next_scheduled_jobs(self) -> list:
"""Get list of all scheduled jobs with their next run times."""
jobs = []
for job in schedule.jobs:
jobs.append({
'job_func': job.job_func.__name__ if hasattr(job.job_func, '__name__') else str(job.job_func),
'next_run': job.next_run.isoformat() if job.next_run else None,
'interval': str(job.interval),
'unit': job.unit
})
return jobs
# Global scheduler instance
scheduled_operations = None
def init_scheduler(config_manager, socketio=None):
"""Initialize the global scheduler."""
global scheduled_operations
scheduled_operations = ScheduledOperations(config_manager, socketio)
return scheduled_operations
def get_scheduler():
"""Get the global scheduler instance."""
return scheduled_operations

File diff suppressed because it is too large Load Diff

View File

@@ -1,10 +0,0 @@
"""
Configuration package for the Aniworld server.
This package provides configuration management and environment
variable handling for secure application deployment.
"""
from .env_config import EnvironmentConfig, env_config
__all__ = ['EnvironmentConfig', 'env_config']

View File

@@ -1,217 +0,0 @@
"""
Environment configuration for secure handling of sensitive data.
This module provides secure environment variable handling and configuration
management for the Aniworld server application.
"""
import os
import secrets
from typing import Optional, Dict, Any
from dotenv import load_dotenv
import logging
logger = logging.getLogger(__name__)
# Load environment variables from .env file
load_dotenv()
class EnvironmentConfig:
"""Manages environment variables and secure configuration."""
# Security
SECRET_KEY: str = os.getenv('SECRET_KEY', secrets.token_urlsafe(32))
JWT_SECRET_KEY: str = os.getenv('JWT_SECRET_KEY', secrets.token_urlsafe(32))
PASSWORD_SALT: str = os.getenv('PASSWORD_SALT', secrets.token_hex(32))
# Database
DATABASE_URL: str = os.getenv('DATABASE_URL', 'sqlite:///data/aniworld.db')
DATABASE_PASSWORD: Optional[str] = os.getenv('DATABASE_PASSWORD')
# Redis (for caching and sessions)
REDIS_URL: str = os.getenv('REDIS_URL', 'redis://localhost:6379/0')
REDIS_PASSWORD: Optional[str] = os.getenv('REDIS_PASSWORD')
# API Keys and External Services
ANIME_PROVIDER_API_KEY: Optional[str] = os.getenv('ANIME_PROVIDER_API_KEY')
TMDB_API_KEY: Optional[str] = os.getenv('TMDB_API_KEY')
# Email Configuration (for password reset)
SMTP_SERVER: str = os.getenv('SMTP_SERVER', 'localhost')
SMTP_PORT: int = int(os.getenv('SMTP_PORT', '587'))
SMTP_USERNAME: Optional[str] = os.getenv('SMTP_USERNAME')
SMTP_PASSWORD: Optional[str] = os.getenv('SMTP_PASSWORD')
SMTP_USE_TLS: bool = os.getenv('SMTP_USE_TLS', 'true').lower() == 'true'
FROM_EMAIL: str = os.getenv('FROM_EMAIL', 'noreply@aniworld.local')
# Security Settings
SESSION_TIMEOUT_HOURS: int = int(os.getenv('SESSION_TIMEOUT_HOURS', '24'))
MAX_FAILED_LOGIN_ATTEMPTS: int = int(os.getenv('MAX_FAILED_LOGIN_ATTEMPTS', '5'))
LOCKOUT_DURATION_MINUTES: int = int(os.getenv('LOCKOUT_DURATION_MINUTES', '30'))
# Rate Limiting
RATE_LIMIT_PER_MINUTE: int = int(os.getenv('RATE_LIMIT_PER_MINUTE', '60'))
API_RATE_LIMIT_PER_MINUTE: int = int(os.getenv('API_RATE_LIMIT_PER_MINUTE', '100'))
# Application Settings
DEBUG: bool = os.getenv('DEBUG', 'false').lower() == 'true'
HOST: str = os.getenv('HOST', '127.0.0.1')
PORT: int = int(os.getenv('PORT', '5000'))
# Anime Directory and Download Settings
ANIME_DIRECTORY: str = os.getenv('ANIME_DIRECTORY', './downloads')
MAX_CONCURRENT_DOWNLOADS: int = int(os.getenv('MAX_CONCURRENT_DOWNLOADS', '3'))
DOWNLOAD_SPEED_LIMIT: Optional[int] = int(os.getenv('DOWNLOAD_SPEED_LIMIT', '0')) or None
# Logging
LOG_LEVEL: str = os.getenv('LOG_LEVEL', 'INFO')
LOG_FILE: str = os.getenv('LOG_FILE', './logs/aniworld.log')
@classmethod
def get_database_config(cls) -> Dict[str, Any]:
"""Get database configuration."""
return {
'url': cls.DATABASE_URL,
'password': cls.DATABASE_PASSWORD,
'pool_size': int(os.getenv('DATABASE_POOL_SIZE', '10')),
'max_overflow': int(os.getenv('DATABASE_MAX_OVERFLOW', '20')),
'pool_timeout': int(os.getenv('DATABASE_POOL_TIMEOUT', '30')),
'pool_recycle': int(os.getenv('DATABASE_POOL_RECYCLE', '3600'))
}
@classmethod
def get_redis_config(cls) -> Dict[str, Any]:
"""Get Redis configuration."""
return {
'url': cls.REDIS_URL,
'password': cls.REDIS_PASSWORD,
'max_connections': int(os.getenv('REDIS_MAX_CONNECTIONS', '10')),
'retry_on_timeout': True,
'socket_timeout': int(os.getenv('REDIS_SOCKET_TIMEOUT', '5'))
}
@classmethod
def get_email_config(cls) -> Dict[str, Any]:
"""Get email configuration."""
return {
'server': cls.SMTP_SERVER,
'port': cls.SMTP_PORT,
'username': cls.SMTP_USERNAME,
'password': cls.SMTP_PASSWORD,
'use_tls': cls.SMTP_USE_TLS,
'from_email': cls.FROM_EMAIL
}
@classmethod
def get_security_config(cls) -> Dict[str, Any]:
"""Get security configuration."""
return {
'secret_key': cls.SECRET_KEY,
'jwt_secret_key': cls.JWT_SECRET_KEY,
'password_salt': cls.PASSWORD_SALT,
'session_timeout_hours': cls.SESSION_TIMEOUT_HOURS,
'max_failed_attempts': cls.MAX_FAILED_LOGIN_ATTEMPTS,
'lockout_duration_minutes': cls.LOCKOUT_DURATION_MINUTES,
'rate_limit_per_minute': cls.RATE_LIMIT_PER_MINUTE,
'api_rate_limit_per_minute': cls.API_RATE_LIMIT_PER_MINUTE
}
@classmethod
def validate_config(cls) -> bool:
"""Validate that required configuration is present."""
required_vars = [
'SECRET_KEY',
'JWT_SECRET_KEY',
'PASSWORD_SALT'
]
missing_vars = []
for var in required_vars:
if not getattr(cls, var):
missing_vars.append(var)
if missing_vars:
logger.error(f"Missing required environment variables: {missing_vars}")
return False
return True
@classmethod
def generate_env_template(cls, file_path: str = '.env.template') -> bool:
"""Generate a template .env file with all available configuration options."""
try:
template_content = """# Aniworld Server Environment Configuration
# Copy this file to .env and fill in your values
# Security (REQUIRED - Generate secure random values)
SECRET_KEY=your_secret_key_here
JWT_SECRET_KEY=your_jwt_secret_here
PASSWORD_SALT=your_password_salt_here
# Database Configuration
DATABASE_URL=sqlite:///data/aniworld.db
# DATABASE_PASSWORD=your_db_password_here
DATABASE_POOL_SIZE=10
DATABASE_MAX_OVERFLOW=20
DATABASE_POOL_TIMEOUT=30
DATABASE_POOL_RECYCLE=3600
# Redis Configuration (for caching and sessions)
REDIS_URL=redis://localhost:6379/0
# REDIS_PASSWORD=your_redis_password_here
REDIS_MAX_CONNECTIONS=10
REDIS_SOCKET_TIMEOUT=5
# Email Configuration (for password reset emails)
SMTP_SERVER=localhost
SMTP_PORT=587
# SMTP_USERNAME=your_smtp_username
# SMTP_PASSWORD=your_smtp_password
SMTP_USE_TLS=true
FROM_EMAIL=noreply@aniworld.local
# External API Keys
# ANIME_PROVIDER_API_KEY=your_anime_provider_api_key
# TMDB_API_KEY=your_tmdb_api_key
# Security Settings
SESSION_TIMEOUT_HOURS=24
MAX_FAILED_LOGIN_ATTEMPTS=5
LOCKOUT_DURATION_MINUTES=30
# Rate Limiting
RATE_LIMIT_PER_MINUTE=60
API_RATE_LIMIT_PER_MINUTE=100
# Application Settings
DEBUG=false
HOST=127.0.0.1
PORT=5000
# Anime and Download Settings
ANIME_DIRECTORY=./downloads
MAX_CONCURRENT_DOWNLOADS=3
# DOWNLOAD_SPEED_LIMIT=1000000 # bytes per second
# Logging
LOG_LEVEL=INFO
LOG_FILE=./logs/aniworld.log
"""
with open(file_path, 'w', encoding='utf-8') as f:
f.write(template_content)
logger.info(f"Environment template created at {file_path}")
return True
except Exception as e:
logger.error(f"Error creating environment template: {e}")
return False
# Create global instance
env_config = EnvironmentConfig()
# Validate configuration on import
if not env_config.validate_config():
logger.warning("Invalid environment configuration detected. Please check your .env file.")

View File

@@ -1,56 +1,56 @@
import os
import json
import logging
from .series import Serie
class SerieList:
def __init__(self, basePath: str):
self.directory = basePath
self.folderDict: dict[str, Serie] = {} # Proper initialization
self.load_series()
def add(self, serie: Serie):
if (not self.contains(serie.key)):
dataPath = os.path.join(self.directory, serie.folder, "data")
animePath = os.path.join(self.directory, serie.folder)
os.makedirs(animePath, exist_ok=True)
if not os.path.isfile(dataPath):
serie.save_to_file(dataPath)
self.folderDict[serie.folder] = serie;
def contains(self, key: str) -> bool:
for k, value in self.folderDict.items():
if value.key == key:
return True
return False
def load_series(self):
""" Scan folders and load data files """
logging.info(f"Scanning anime folders in: {self.directory}")
for anime_folder in os.listdir(self.directory):
anime_path = os.path.join(self.directory, anime_folder, "data")
if os.path.isfile(anime_path):
logging.debug(f"Found data folder: {anime_path}")
self.load_data(anime_folder, anime_path)
else:
logging.warning(f"Skipping {anime_folder} - No data folder found")
def load_data(self, anime_folder, data_path):
""" Load pickle files from the data folder """
try:
self.folderDict[anime_folder] = Serie.load_from_file(data_path)
logging.debug(f"Successfully loaded {data_path} for {anime_folder}")
except Exception as e:
logging.error(f"Failed to load {data_path} in {anime_folder}: {e}")
def GetMissingEpisode(self):
"""Find all series with a non-empty episodeDict"""
return [serie for serie in self.folderDict.values() if len(serie.episodeDict) > 0]
def GetList(self):
"""Get all series in the list"""
return list(self.folderDict.values())
#k = AnimeList("\\\\sshfs.r\\ubuntu@192.168.178.43\\media\\serien\\Serien")
#bbabab = k.GetMissingEpisode()
import os
import json
import logging
from .series import Serie
class SerieList:
def __init__(self, basePath: str):
self.directory = basePath
self.folderDict: dict[str, Serie] = {} # Proper initialization
self.load_series()
def add(self, serie: Serie):
if (not self.contains(serie.key)):
dataPath = os.path.join(self.directory, serie.folder, "data")
animePath = os.path.join(self.directory, serie.folder)
os.makedirs(animePath, exist_ok=True)
if not os.path.isfile(dataPath):
serie.save_to_file(dataPath)
self.folderDict[serie.folder] = serie;
def contains(self, key: str) -> bool:
for k, value in self.folderDict.items():
if value.key == key:
return True
return False
def load_series(self):
""" Scan folders and load data files """
logging.info(f"Scanning anime folders in: {self.directory}")
for anime_folder in os.listdir(self.directory):
anime_path = os.path.join(self.directory, anime_folder, "data")
if os.path.isfile(anime_path):
logging.debug(f"Found data folder: {anime_path}")
self.load_data(anime_folder, anime_path)
else:
logging.warning(f"Skipping {anime_folder} - No data folder found")
def load_data(self, anime_folder, data_path):
""" Load pickle files from the data folder """
try:
self.folderDict[anime_folder] = Serie.load_from_file(data_path)
logging.debug(f"Successfully loaded {data_path} for {anime_folder}")
except Exception as e:
logging.error(f"Failed to load {data_path} in {anime_folder}: {e}")
def GetMissingEpisode(self):
"""Find all series with a non-empty episodeDict"""
return [serie for serie in self.folderDict.values() if len(serie.episodeDict) > 0]
def GetList(self):
"""Get all series in the list"""
return list(self.folderDict.values())
#k = AnimeList("\\\\sshfs.r\\ubuntu@192.168.178.43\\media\\serien\\Serien")
#bbabab = k.GetMissingEpisode()
#print(bbabab)

View File

@@ -1,82 +1,82 @@
import json
class Serie:
def __init__(self, key: str, name: str, site: str, folder: str, episodeDict: dict[int, list[int]]):
self._key = key
self._name = name
self._site = site
self._folder = folder
self._episodeDict = episodeDict
def __str__(self):
"""String representation of Serie object"""
return f"Serie(key='{self.key}', name='{self.name}', site='{self.site}', folder='{self.folder}', episodeDict={self.episodeDict})"
@property
def key(self) -> str:
return self._key
@key.setter
def key(self, value: str):
self._key = value
@property
def name(self) -> str:
return self._name
@name.setter
def name(self, value: str):
self._name = value
@property
def site(self) -> str:
return self._site
@site.setter
def site(self, value: str):
self._site = value
@property
def folder(self) -> str:
return self._folder
@folder.setter
def folder(self, value: str):
self._folder = value
@property
def episodeDict(self) -> dict[int, list[int]]:
return self._episodeDict
@episodeDict.setter
def episodeDict(self, value: dict[int, list[int]]):
self._episodeDict = value
def to_dict(self):
"""Convert Serie object to dictionary for JSON serialization."""
return {
"key": self.key,
"name": self.name,
"site": self.site,
"folder": self.folder,
"episodeDict": {str(k): list(v) for k, v in self.episodeDict.items()}
}
@staticmethod
def from_dict(data: dict):
"""Create a Serie object from dictionary."""
episode_dict = {int(k): v for k, v in data["episodeDict"].items()} # Convert keys to int
return Serie(data["key"], data["name"], data["site"], data["folder"], episode_dict)
def save_to_file(self, filename: str):
"""Save Serie object to JSON file."""
with open(filename, "w") as file:
json.dump(self.to_dict(), file, indent=4)
@classmethod
def load_from_file(cls, filename: str) -> "Serie":
"""Load Serie object from JSON file."""
with open(filename, "r") as file:
data = json.load(file)
import json
class Serie:
def __init__(self, key: str, name: str, site: str, folder: str, episodeDict: dict[int, list[int]]):
self._key = key
self._name = name
self._site = site
self._folder = folder
self._episodeDict = episodeDict
def __str__(self):
"""String representation of Serie object"""
return f"Serie(key='{self.key}', name='{self.name}', site='{self.site}', folder='{self.folder}', episodeDict={self.episodeDict})"
@property
def key(self) -> str:
return self._key
@key.setter
def key(self, value: str):
self._key = value
@property
def name(self) -> str:
return self._name
@name.setter
def name(self, value: str):
self._name = value
@property
def site(self) -> str:
return self._site
@site.setter
def site(self, value: str):
self._site = value
@property
def folder(self) -> str:
return self._folder
@folder.setter
def folder(self, value: str):
self._folder = value
@property
def episodeDict(self) -> dict[int, list[int]]:
return self._episodeDict
@episodeDict.setter
def episodeDict(self, value: dict[int, list[int]]):
self._episodeDict = value
def to_dict(self):
"""Convert Serie object to dictionary for JSON serialization."""
return {
"key": self.key,
"name": self.name,
"site": self.site,
"folder": self.folder,
"episodeDict": {str(k): list(v) for k, v in self.episodeDict.items()}
}
@staticmethod
def from_dict(data: dict):
"""Create a Serie object from dictionary."""
episode_dict = {int(k): v for k, v in data["episodeDict"].items()} # Convert keys to int
return Serie(data["key"], data["name"], data["site"], data["folder"], episode_dict)
def save_to_file(self, filename: str):
"""Save Serie object to JSON file."""
with open(filename, "w") as file:
json.dump(self.to_dict(), file, indent=4)
@classmethod
def load_from_file(cls, filename: str) -> "Serie":
"""Load Serie object from JSON file."""
with open(filename, "r") as file:
data = json.load(file)
return cls.from_dict(data)

View File

@@ -1,7 +1,7 @@
class NoKeyFoundException(Exception):
"""Exception raised when an anime key cannot be found."""
pass
class MatchNotFoundError(Exception):
"""Exception raised when an anime key cannot be found."""
class NoKeyFoundException(Exception):
"""Exception raised when an anime key cannot be found."""
pass
class MatchNotFoundError(Exception):
"""Exception raised when an anime key cannot be found."""
pass

View File

@@ -1,11 +1,11 @@
from ..providers.streaming.Provider import Provider
from ..providers.streaming.voe import VOE
class Providers:
def __init__(self):
self.dict = {"VOE": VOE()}
def GetProvider(self, key: str) -> Provider:
return self.dict[key]
from ..providers.streaming.Provider import Provider
from ..providers.streaming.voe import VOE
class Providers:
def __init__(self):
self.dict = {"VOE": VOE()}
def GetProvider(self, key: str) -> Provider:
return self.dict[key]

View File

@@ -1,343 +1,343 @@
import os
import re
import logging
import json
import requests
import html
from urllib.parse import quote
from bs4 import BeautifulSoup
from fake_useragent import UserAgent
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
from .base_provider import Loader
from ..interfaces.providers import Providers
from yt_dlp import YoutubeDL
import shutil
# Read timeout from environment variable, default to 600 seconds (10 minutes)
timeout = int(os.getenv("DOWNLOAD_TIMEOUT", 600))
download_error_logger = logging.getLogger("DownloadErrors")
download_error_handler = logging.FileHandler("../../download_errors.log")
download_error_handler.setLevel(logging.ERROR)
noKeyFound_logger = logging.getLogger("NoKeyFound")
noKeyFound_handler = logging.FileHandler("../../NoKeyFound.log")
noKeyFound_handler.setLevel(logging.ERROR)
class AniworldLoader(Loader):
def __init__(self):
self.SUPPORTED_PROVIDERS = ["VOE", "Doodstream", "Vidmoly", "Vidoza", "SpeedFiles", "Streamtape", "Luluvdo"]
self.AniworldHeaders = {
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8",
"accept-encoding": "gzip, deflate, br, zstd",
"accept-language": "de,de-DE;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
"cache-control": "max-age=0",
"priority": "u=0, i",
"sec-ch-ua": '"Chromium";v="136", "Microsoft Edge";v="136", "Not.A/Brand";v="99"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Windows"',
"sec-fetch-dest": "document",
"sec-fetch-mode": "navigate",
"sec-fetch-site": "none",
"sec-fetch-user": "?1",
"upgrade-insecure-requests": "1",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/136.0.0.0 Safari/537.36 Edg/136.0.0.0"
}
self.INVALID_PATH_CHARS = ['<', '>', ':', '"', '/', '\\', '|', '?', '*', '&']
self.RANDOM_USER_AGENT = UserAgent().random
self.LULUVDO_USER_AGENT = "Mozilla/5.0 (Android 15; Mobile; rv:132.0) Gecko/132.0 Firefox/132.0"
self.PROVIDER_HEADERS = {
"Vidmoly": ['Referer: "https://vidmoly.to"'],
"Doodstream": ['Referer: "https://dood.li/"'],
"VOE": [f'User-Agent: {self.RANDOM_USER_AGENT}'],
"Luluvdo": [
f'User-Agent: {self.LULUVDO_USER_AGENT}',
'Accept-Language: de-DE,de;q=0.9,en-US;q=0.8,en;q=0.7',
'Origin: "https://luluvdo.com"',
'Referer: "https://luluvdo.com/"'
]}
self.ANIWORLD_TO = "https://aniworld.to"
self.session = requests.Session()
# Configure retries with backoff
retries = Retry(
total=5, # Number of retries
backoff_factor=1, # Delay multiplier (1s, 2s, 4s, ...)
status_forcelist=[500, 502, 503, 504], # Retry for specific HTTP errors
allowed_methods=["GET"]
)
adapter = HTTPAdapter(max_retries=retries)
self.session.mount("https://", adapter)
self.DEFAULT_REQUEST_TIMEOUT = 30
self._KeyHTMLDict = {}
self._EpisodeHTMLDict = {}
self.Providers = Providers()
def ClearCache(self):
self._KeyHTMLDict = {}
self._EpisodeHTMLDict = {}
def RemoveFromCache(self):
self._EpisodeHTMLDict = {}
def Search(self, word: str) -> list:
search_url = f"{self.ANIWORLD_TO}/ajax/seriesSearch?keyword={quote(word)}"
anime_list = self.fetch_anime_list(search_url)
return anime_list
def fetch_anime_list(self, url: str) -> list:
response = self.session.get(url, timeout=self.DEFAULT_REQUEST_TIMEOUT)
response.raise_for_status()
clean_text = response.text.strip()
try:
decoded_data = json.loads(html.unescape(clean_text))
return decoded_data if isinstance(decoded_data, list) else []
except json.JSONDecodeError:
try:
# Remove BOM and problematic characters
clean_text = clean_text.encode('utf-8').decode('utf-8-sig')
# Remove problematic characters
clean_text = re.sub(r'[\x00-\x1F\x7F-\x9F]', '', clean_text)
# Parse the new text
decoded_data = json.loads(clean_text)
return decoded_data if isinstance(decoded_data, list) else []
except (requests.RequestException, json.JSONDecodeError) as exc:
raise ValueError("Could not get valid anime: ") from exc
def _GetLanguageKey(self, language: str) -> int:
languageCode = 0
if (language == "German Dub"):
languageCode = 1
if (language == "English Sub"):
languageCode = 2
if (language == "German Sub"):
languageCode = 3
return languageCode
def IsLanguage(self, season: int, episode: int, key: str, language: str = "German Dub") -> bool:
"""
Language Codes:
1: German Dub
2: English Sub
3: German Sub
"""
languageCode = self._GetLanguageKey(language)
episode_soup = BeautifulSoup(self._GetEpisodeHTML(season, episode, key).content, 'html.parser')
change_language_box_div = episode_soup.find(
'div', class_='changeLanguageBox')
languages = []
if change_language_box_div:
img_tags = change_language_box_div.find_all('img')
for img in img_tags:
lang_key = img.get('data-lang-key')
if lang_key and lang_key.isdigit():
languages.append(int(lang_key))
return languageCode in languages
def Download(self, baseDirectory: str, serieFolder: str, season: int, episode: int, key: str, language: str = "German Dub", progress_callback: callable = None) -> bool:
sanitized_anime_title = ''.join(
char for char in self.GetTitle(key) if char not in self.INVALID_PATH_CHARS
)
if season == 0:
output_file = (
f"{sanitized_anime_title} - "
f"Movie {episode:02} - "
f"({language}).mp4"
)
else:
output_file = (
f"{sanitized_anime_title} - "
f"S{season:02}E{episode:03} - "
f"({language}).mp4"
)
folderPath = os.path.join(os.path.join(baseDirectory, serieFolder), f"Season {season}")
output_path = os.path.join(folderPath, output_file)
os.makedirs(os.path.dirname(output_path), exist_ok=True)
temp_dir = "./Temp/"
os.makedirs(os.path.dirname(temp_dir), exist_ok=True)
temp_Path = os.path.join(temp_dir, output_file)
for provider in self.SUPPORTED_PROVIDERS:
link, header = self._get_direct_link_from_provider(season, episode, key, language)
ydl_opts = {
'fragment_retries': float('inf'),
'outtmpl': temp_Path,
'quiet': True,
'no_warnings': True,
'progress_with_newline': False,
'nocheckcertificate': True,
}
if header:
ydl_opts['http_headers'] = header
if progress_callback:
ydl_opts['progress_hooks'] = [progress_callback]
with YoutubeDL(ydl_opts) as ydl:
ydl.download([link])
if (os.path.exists(temp_Path)):
shutil.copy(temp_Path, output_path)
os.remove(temp_Path)
break
self.ClearCache()
def GetSiteKey(self) -> str:
return "aniworld.to"
def GetTitle(self, key: str) -> str:
soup = BeautifulSoup(self._GetKeyHTML(key).content, 'html.parser')
title_div = soup.find('div', class_='series-title')
if title_div:
return title_div.find('h1').find('span').text
return ""
def _GetKeyHTML(self, key: str):
if key in self._KeyHTMLDict:
return self._KeyHTMLDict[key]
self._KeyHTMLDict[key] = self.session.get(
f"{self.ANIWORLD_TO}/anime/stream/{key}",
timeout=self.DEFAULT_REQUEST_TIMEOUT
)
return self._KeyHTMLDict[key]
def _GetEpisodeHTML(self, season: int, episode: int, key: str):
if key in self._EpisodeHTMLDict:
return self._EpisodeHTMLDict[(key, season, episode)]
link = (
f"{self.ANIWORLD_TO}/anime/stream/{key}/"
f"staffel-{season}/episode-{episode}"
)
html = self.session.get(link, timeout=self.DEFAULT_REQUEST_TIMEOUT)
self._EpisodeHTMLDict[(key, season, episode)] = html
return self._EpisodeHTMLDict[(key, season, episode)]
def _get_provider_from_html(self, season: int, episode: int, key: str) -> dict:
"""
Parses the HTML content to extract streaming providers,
their language keys, and redirect links.
Returns a dictionary with provider names as keys
and language key-to-redirect URL mappings as values.
Example:
{
'VOE': {1: 'https://aniworld.to/redirect/1766412',
2: 'https://aniworld.to/redirect/1766405'},
'Doodstream': {1: 'https://aniworld.to/redirect/1987922',
2: 'https://aniworld.to/redirect/2700342'},
...
}
Access redirect link with:
print(self.provider["VOE"][2])
"""
soup = BeautifulSoup(self._GetEpisodeHTML(season, episode, key).content, 'html.parser')
providers = {}
episode_links = soup.find_all(
'li', class_=lambda x: x and x.startswith('episodeLink')
)
if not episode_links:
return providers
for link in episode_links:
provider_name_tag = link.find('h4')
provider_name = provider_name_tag.text.strip() if provider_name_tag else None
redirect_link_tag = link.find('a', class_='watchEpisode')
redirect_link = redirect_link_tag['href'] if redirect_link_tag else None
lang_key = link.get('data-lang-key')
lang_key = int(
lang_key) if lang_key and lang_key.isdigit() else None
if provider_name and redirect_link and lang_key:
if provider_name not in providers:
providers[provider_name] = {}
providers[provider_name][lang_key] = f"{self.ANIWORLD_TO}{redirect_link}"
return providers
def _get_redirect_link(self, season: int, episode: int, key: str, language: str = "German Dub") -> str:
languageCode = self._GetLanguageKey(language)
if (self.IsLanguage(season, episode, key, language)):
for provider_name, lang_dict in self._get_provider_from_html(season, episode, key).items():
if languageCode in lang_dict:
return(lang_dict[languageCode], provider_name)
break
return None
def _get_embeded_link(self, season: int, episode: int, key: str, language: str = "German Dub"):
redirect_link, provider_name = self._get_redirect_link(season, episode, key, language)
embeded_link = self.session.get(
redirect_link, timeout=self.DEFAULT_REQUEST_TIMEOUT,
headers={'User-Agent': self.RANDOM_USER_AGENT}).url
return embeded_link
def _get_direct_link_from_provider(self, season: int, episode: int, key: str, language: str = "German Dub") -> str:
"""
providers = {
"Vidmoly": get_direct_link_from_vidmoly,
"Vidoza": get_direct_link_from_vidoza,
"VOE": get_direct_link_from_voe,
"Doodstream": get_direct_link_from_doodstream,
"SpeedFiles": get_direct_link_from_speedfiles,
"Luluvdo": get_direct_link_from_luluvdo
}
"""
embeded_link = self._get_embeded_link(season, episode, key, language)
if embeded_link is None:
return None
return self.Providers.GetProvider("VOE").GetLink(embeded_link, self.DEFAULT_REQUEST_TIMEOUT)
def get_season_episode_count(self, slug : str) -> dict:
base_url = f"{self.ANIWORLD_TO}/anime/stream/{slug}/"
response = requests.get(base_url, timeout=self.DEFAULT_REQUEST_TIMEOUT)
soup = BeautifulSoup(response.content, 'html.parser')
season_meta = soup.find('meta', itemprop='numberOfSeasons')
number_of_seasons = int(season_meta['content']) if season_meta else 0
episode_counts = {}
for season in range(1, number_of_seasons + 1):
season_url = f"{base_url}staffel-{season}"
response = requests.get(season_url, timeout=self.DEFAULT_REQUEST_TIMEOUT)
soup = BeautifulSoup(response.content, 'html.parser')
episode_links = soup.find_all('a', href=True)
unique_links = set(
link['href']
for link in episode_links
if f"staffel-{season}/episode-" in link['href']
)
episode_counts[season] = len(unique_links)
return episode_counts
import os
import re
import logging
import json
import requests
import html
from urllib.parse import quote
from bs4 import BeautifulSoup
from fake_useragent import UserAgent
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
from .base_provider import Loader
from ..interfaces.providers import Providers
from yt_dlp import YoutubeDL
import shutil
# Read timeout from environment variable, default to 600 seconds (10 minutes)
timeout = int(os.getenv("DOWNLOAD_TIMEOUT", 600))
download_error_logger = logging.getLogger("DownloadErrors")
download_error_handler = logging.FileHandler("../../download_errors.log")
download_error_handler.setLevel(logging.ERROR)
noKeyFound_logger = logging.getLogger("NoKeyFound")
noKeyFound_handler = logging.FileHandler("../../NoKeyFound.log")
noKeyFound_handler.setLevel(logging.ERROR)
class AniworldLoader(Loader):
def __init__(self):
self.SUPPORTED_PROVIDERS = ["VOE", "Doodstream", "Vidmoly", "Vidoza", "SpeedFiles", "Streamtape", "Luluvdo"]
self.AniworldHeaders = {
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8",
"accept-encoding": "gzip, deflate, br, zstd",
"accept-language": "de,de-DE;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
"cache-control": "max-age=0",
"priority": "u=0, i",
"sec-ch-ua": '"Chromium";v="136", "Microsoft Edge";v="136", "Not.A/Brand";v="99"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Windows"',
"sec-fetch-dest": "document",
"sec-fetch-mode": "navigate",
"sec-fetch-site": "none",
"sec-fetch-user": "?1",
"upgrade-insecure-requests": "1",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/136.0.0.0 Safari/537.36 Edg/136.0.0.0"
}
self.INVALID_PATH_CHARS = ['<', '>', ':', '"', '/', '\\', '|', '?', '*', '&']
self.RANDOM_USER_AGENT = UserAgent().random
self.LULUVDO_USER_AGENT = "Mozilla/5.0 (Android 15; Mobile; rv:132.0) Gecko/132.0 Firefox/132.0"
self.PROVIDER_HEADERS = {
"Vidmoly": ['Referer: "https://vidmoly.to"'],
"Doodstream": ['Referer: "https://dood.li/"'],
"VOE": [f'User-Agent: {self.RANDOM_USER_AGENT}'],
"Luluvdo": [
f'User-Agent: {self.LULUVDO_USER_AGENT}',
'Accept-Language: de-DE,de;q=0.9,en-US;q=0.8,en;q=0.7',
'Origin: "https://luluvdo.com"',
'Referer: "https://luluvdo.com/"'
]}
self.ANIWORLD_TO = "https://aniworld.to"
self.session = requests.Session()
# Configure retries with backoff
retries = Retry(
total=5, # Number of retries
backoff_factor=1, # Delay multiplier (1s, 2s, 4s, ...)
status_forcelist=[500, 502, 503, 504], # Retry for specific HTTP errors
allowed_methods=["GET"]
)
adapter = HTTPAdapter(max_retries=retries)
self.session.mount("https://", adapter)
self.DEFAULT_REQUEST_TIMEOUT = 30
self._KeyHTMLDict = {}
self._EpisodeHTMLDict = {}
self.Providers = Providers()
def ClearCache(self):
self._KeyHTMLDict = {}
self._EpisodeHTMLDict = {}
def RemoveFromCache(self):
self._EpisodeHTMLDict = {}
def Search(self, word: str) -> list:
search_url = f"{self.ANIWORLD_TO}/ajax/seriesSearch?keyword={quote(word)}"
anime_list = self.fetch_anime_list(search_url)
return anime_list
def fetch_anime_list(self, url: str) -> list:
response = self.session.get(url, timeout=self.DEFAULT_REQUEST_TIMEOUT)
response.raise_for_status()
clean_text = response.text.strip()
try:
decoded_data = json.loads(html.unescape(clean_text))
return decoded_data if isinstance(decoded_data, list) else []
except json.JSONDecodeError:
try:
# Remove BOM and problematic characters
clean_text = clean_text.encode('utf-8').decode('utf-8-sig')
# Remove problematic characters
clean_text = re.sub(r'[\x00-\x1F\x7F-\x9F]', '', clean_text)
# Parse the new text
decoded_data = json.loads(clean_text)
return decoded_data if isinstance(decoded_data, list) else []
except (requests.RequestException, json.JSONDecodeError) as exc:
raise ValueError("Could not get valid anime: ") from exc
def _GetLanguageKey(self, language: str) -> int:
languageCode = 0
if (language == "German Dub"):
languageCode = 1
if (language == "English Sub"):
languageCode = 2
if (language == "German Sub"):
languageCode = 3
return languageCode
def IsLanguage(self, season: int, episode: int, key: str, language: str = "German Dub") -> bool:
"""
Language Codes:
1: German Dub
2: English Sub
3: German Sub
"""
languageCode = self._GetLanguageKey(language)
episode_soup = BeautifulSoup(self._GetEpisodeHTML(season, episode, key).content, 'html.parser')
change_language_box_div = episode_soup.find(
'div', class_='changeLanguageBox')
languages = []
if change_language_box_div:
img_tags = change_language_box_div.find_all('img')
for img in img_tags:
lang_key = img.get('data-lang-key')
if lang_key and lang_key.isdigit():
languages.append(int(lang_key))
return languageCode in languages
def Download(self, baseDirectory: str, serieFolder: str, season: int, episode: int, key: str, language: str = "German Dub", progress_callback: callable = None) -> bool:
sanitized_anime_title = ''.join(
char for char in self.GetTitle(key) if char not in self.INVALID_PATH_CHARS
)
if season == 0:
output_file = (
f"{sanitized_anime_title} - "
f"Movie {episode:02} - "
f"({language}).mp4"
)
else:
output_file = (
f"{sanitized_anime_title} - "
f"S{season:02}E{episode:03} - "
f"({language}).mp4"
)
folderPath = os.path.join(os.path.join(baseDirectory, serieFolder), f"Season {season}")
output_path = os.path.join(folderPath, output_file)
os.makedirs(os.path.dirname(output_path), exist_ok=True)
temp_dir = "./Temp/"
os.makedirs(os.path.dirname(temp_dir), exist_ok=True)
temp_Path = os.path.join(temp_dir, output_file)
for provider in self.SUPPORTED_PROVIDERS:
link, header = self._get_direct_link_from_provider(season, episode, key, language)
ydl_opts = {
'fragment_retries': float('inf'),
'outtmpl': temp_Path,
'quiet': True,
'no_warnings': True,
'progress_with_newline': False,
'nocheckcertificate': True,
}
if header:
ydl_opts['http_headers'] = header
if progress_callback:
ydl_opts['progress_hooks'] = [progress_callback]
with YoutubeDL(ydl_opts) as ydl:
ydl.download([link])
if (os.path.exists(temp_Path)):
shutil.copy(temp_Path, output_path)
os.remove(temp_Path)
break
self.ClearCache()
def GetSiteKey(self) -> str:
return "aniworld.to"
def GetTitle(self, key: str) -> str:
soup = BeautifulSoup(self._GetKeyHTML(key).content, 'html.parser')
title_div = soup.find('div', class_='series-title')
if title_div:
return title_div.find('h1').find('span').text
return ""
def _GetKeyHTML(self, key: str):
if key in self._KeyHTMLDict:
return self._KeyHTMLDict[key]
self._KeyHTMLDict[key] = self.session.get(
f"{self.ANIWORLD_TO}/anime/stream/{key}",
timeout=self.DEFAULT_REQUEST_TIMEOUT
)
return self._KeyHTMLDict[key]
def _GetEpisodeHTML(self, season: int, episode: int, key: str):
if key in self._EpisodeHTMLDict:
return self._EpisodeHTMLDict[(key, season, episode)]
link = (
f"{self.ANIWORLD_TO}/anime/stream/{key}/"
f"staffel-{season}/episode-{episode}"
)
html = self.session.get(link, timeout=self.DEFAULT_REQUEST_TIMEOUT)
self._EpisodeHTMLDict[(key, season, episode)] = html
return self._EpisodeHTMLDict[(key, season, episode)]
def _get_provider_from_html(self, season: int, episode: int, key: str) -> dict:
"""
Parses the HTML content to extract streaming providers,
their language keys, and redirect links.
Returns a dictionary with provider names as keys
and language key-to-redirect URL mappings as values.
Example:
{
'VOE': {1: 'https://aniworld.to/redirect/1766412',
2: 'https://aniworld.to/redirect/1766405'},
'Doodstream': {1: 'https://aniworld.to/redirect/1987922',
2: 'https://aniworld.to/redirect/2700342'},
...
}
Access redirect link with:
print(self.provider["VOE"][2])
"""
soup = BeautifulSoup(self._GetEpisodeHTML(season, episode, key).content, 'html.parser')
providers = {}
episode_links = soup.find_all(
'li', class_=lambda x: x and x.startswith('episodeLink')
)
if not episode_links:
return providers
for link in episode_links:
provider_name_tag = link.find('h4')
provider_name = provider_name_tag.text.strip() if provider_name_tag else None
redirect_link_tag = link.find('a', class_='watchEpisode')
redirect_link = redirect_link_tag['href'] if redirect_link_tag else None
lang_key = link.get('data-lang-key')
lang_key = int(
lang_key) if lang_key and lang_key.isdigit() else None
if provider_name and redirect_link and lang_key:
if provider_name not in providers:
providers[provider_name] = {}
providers[provider_name][lang_key] = f"{self.ANIWORLD_TO}{redirect_link}"
return providers
def _get_redirect_link(self, season: int, episode: int, key: str, language: str = "German Dub") -> str:
languageCode = self._GetLanguageKey(language)
if (self.IsLanguage(season, episode, key, language)):
for provider_name, lang_dict in self._get_provider_from_html(season, episode, key).items():
if languageCode in lang_dict:
return(lang_dict[languageCode], provider_name)
break
return None
def _get_embeded_link(self, season: int, episode: int, key: str, language: str = "German Dub"):
redirect_link, provider_name = self._get_redirect_link(season, episode, key, language)
embeded_link = self.session.get(
redirect_link, timeout=self.DEFAULT_REQUEST_TIMEOUT,
headers={'User-Agent': self.RANDOM_USER_AGENT}).url
return embeded_link
def _get_direct_link_from_provider(self, season: int, episode: int, key: str, language: str = "German Dub") -> str:
"""
providers = {
"Vidmoly": get_direct_link_from_vidmoly,
"Vidoza": get_direct_link_from_vidoza,
"VOE": get_direct_link_from_voe,
"Doodstream": get_direct_link_from_doodstream,
"SpeedFiles": get_direct_link_from_speedfiles,
"Luluvdo": get_direct_link_from_luluvdo
}
"""
embeded_link = self._get_embeded_link(season, episode, key, language)
if embeded_link is None:
return None
return self.Providers.GetProvider("VOE").GetLink(embeded_link, self.DEFAULT_REQUEST_TIMEOUT)
def get_season_episode_count(self, slug : str) -> dict:
base_url = f"{self.ANIWORLD_TO}/anime/stream/{slug}/"
response = requests.get(base_url, timeout=self.DEFAULT_REQUEST_TIMEOUT)
soup = BeautifulSoup(response.content, 'html.parser')
season_meta = soup.find('meta', itemprop='numberOfSeasons')
number_of_seasons = int(season_meta['content']) if season_meta else 0
episode_counts = {}
for season in range(1, number_of_seasons + 1):
season_url = f"{base_url}staffel-{season}"
response = requests.get(season_url, timeout=self.DEFAULT_REQUEST_TIMEOUT)
soup = BeautifulSoup(response.content, 'html.parser')
episode_links = soup.find_all('a', href=True)
unique_links = set(
link['href']
for link in episode_links
if f"staffel-{season}/episode-" in link['href']
)
episode_counts[season] = len(unique_links)
return episode_counts

View File

@@ -1,27 +1,27 @@
from abc import ABC, abstractmethod
class Loader(ABC):
@abstractmethod
def Search(self, word: str) -> list:
pass
@abstractmethod
def IsLanguage(self, season: int, episode: int, key: str, language: str = "German Dub") -> bool:
pass
@abstractmethod
def Download(self, baseDirectory: str, serieFolder: str, season: int, episode: int, key: str, progress_callback: callable = None) -> bool:
pass
@abstractmethod
def GetSiteKey(self) -> str:
pass
@abstractmethod
def GetTitle(self) -> str:
pass
@abstractmethod
def get_season_episode_count(self, slug: str) -> dict:
from abc import ABC, abstractmethod
class Loader(ABC):
@abstractmethod
def Search(self, word: str) -> list:
pass
@abstractmethod
def IsLanguage(self, season: int, episode: int, key: str, language: str = "German Dub") -> bool:
pass
@abstractmethod
def Download(self, baseDirectory: str, serieFolder: str, season: int, episode: int, key: str, progress_callback: callable = None) -> bool:
pass
@abstractmethod
def GetSiteKey(self) -> str:
pass
@abstractmethod
def GetTitle(self) -> str:
pass
@abstractmethod
def get_season_episode_count(self, slug: str) -> dict:
pass

File diff suppressed because it is too large Load Diff

View File

@@ -1,10 +1,10 @@
from .aniworld_provider import AniworldLoader
from .base_provider import Loader
class Loaders:
def __init__(self):
self.dict = {"aniworld.to": AniworldLoader()}
def GetLoader(self, key: str) -> Loader:
return self.dict[key]
from .aniworld_provider import AniworldLoader
from .base_provider import Loader
class Loaders:
def __init__(self):
self.dict = {"aniworld.to": AniworldLoader()}
def GetLoader(self, key: str) -> Loader:
return self.dict[key]

View File

@@ -1,7 +1,7 @@
from abc import ABC, abstractmethod
class Provider(ABC):
@abstractmethod
def GetLink(self, embededLink: str, DEFAULT_REQUEST_TIMEOUT: int) -> (str, [str]):
pass
from abc import ABC, abstractmethod
class Provider(ABC):
@abstractmethod
def GetLink(self, embededLink: str, DEFAULT_REQUEST_TIMEOUT: int) -> (str, [str]):
pass

View File

@@ -1,59 +1,59 @@
import re
import random
import time
from fake_useragent import UserAgent
import requests
from .Provider import Provider
class Doodstream(Provider):
def __init__(self):
self.RANDOM_USER_AGENT = UserAgent().random
def GetLink(self, embededLink: str, DEFAULT_REQUEST_TIMEOUT: int) -> str:
headers = {
'User-Agent': self.RANDOM_USER_AGENT,
'Referer': 'https://dood.li/'
}
def extract_data(pattern, content):
match = re.search(pattern, content)
return match.group(1) if match else None
def generate_random_string(length=10):
characters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'
return ''.join(random.choice(characters) for _ in range(length))
response = requests.get(
embededLink,
headers=headers,
timeout=DEFAULT_REQUEST_TIMEOUT,
verify=False
)
response.raise_for_status()
pass_md5_pattern = r"\$\.get\('([^']*\/pass_md5\/[^']*)'"
pass_md5_url = extract_data(pass_md5_pattern, response.text)
if not pass_md5_url:
raise ValueError(
f'pass_md5 URL not found using {embededLink}.')
full_md5_url = f"https://dood.li{pass_md5_url}"
token_pattern = r"token=([a-zA-Z0-9]+)"
token = extract_data(token_pattern, response.text)
if not token:
raise ValueError(f'Token not found using {embededLink}.')
md5_response = requests.get(
full_md5_url, headers=headers, timeout=DEFAULT_REQUEST_TIMEOUT, verify=False)
md5_response.raise_for_status()
video_base_url = md5_response.text.strip()
random_string = generate_random_string(10)
expiry = int(time.time())
direct_link = f"{video_base_url}{random_string}?token={token}&expiry={expiry}"
# print(direct_link)
import re
import random
import time
from fake_useragent import UserAgent
import requests
from .Provider import Provider
class Doodstream(Provider):
def __init__(self):
self.RANDOM_USER_AGENT = UserAgent().random
def GetLink(self, embededLink: str, DEFAULT_REQUEST_TIMEOUT: int) -> str:
headers = {
'User-Agent': self.RANDOM_USER_AGENT,
'Referer': 'https://dood.li/'
}
def extract_data(pattern, content):
match = re.search(pattern, content)
return match.group(1) if match else None
def generate_random_string(length=10):
characters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'
return ''.join(random.choice(characters) for _ in range(length))
response = requests.get(
embededLink,
headers=headers,
timeout=DEFAULT_REQUEST_TIMEOUT,
verify=False
)
response.raise_for_status()
pass_md5_pattern = r"\$\.get\('([^']*\/pass_md5\/[^']*)'"
pass_md5_url = extract_data(pass_md5_pattern, response.text)
if not pass_md5_url:
raise ValueError(
f'pass_md5 URL not found using {embededLink}.')
full_md5_url = f"https://dood.li{pass_md5_url}"
token_pattern = r"token=([a-zA-Z0-9]+)"
token = extract_data(token_pattern, response.text)
if not token:
raise ValueError(f'Token not found using {embededLink}.')
md5_response = requests.get(
full_md5_url, headers=headers, timeout=DEFAULT_REQUEST_TIMEOUT, verify=False)
md5_response.raise_for_status()
video_base_url = md5_response.text.strip()
random_string = generate_random_string(10)
expiry = int(time.time())
direct_link = f"{video_base_url}{random_string}?token={token}&expiry={expiry}"
# print(direct_link)
return direct_link

View File

@@ -1,51 +1,51 @@
import re
import requests
# import jsbeautifier.unpackers.packer as packer
from aniworld import config
REDIRECT_REGEX = re.compile(
r'<iframe *(?:[^>]+ )?src=(?:\'([^\']+)\'|"([^"]+)")[^>]*>')
SCRIPT_REGEX = re.compile(
r'(?s)<script\s+[^>]*?data-cfasync=["\']?false["\']?[^>]*>(.+?)</script>')
VIDEO_URL_REGEX = re.compile(r'file:\s*"([^"]+\.m3u8[^"]*)"')
# TODO Implement this script fully
def get_direct_link_from_filemoon(embeded_filemoon_link: str):
session = requests.Session()
session.verify = False
headers = {
"User-Agent": config.RANDOM_USER_AGENT,
"Referer": embeded_filemoon_link,
}
response = session.get(embeded_filemoon_link, headers=headers)
source = response.text
match = REDIRECT_REGEX.search(source)
if match:
redirect_url = match.group(1) or match.group(2)
response = session.get(redirect_url, headers=headers)
source = response.text
for script_match in SCRIPT_REGEX.finditer(source):
script_content = script_match.group(1).strip()
if not script_content.startswith("eval("):
continue
if packer.detect(script_content):
unpacked = packer.unpack(script_content)
video_match = VIDEO_URL_REGEX.search(unpacked)
if video_match:
return video_match.group(1)
raise Exception("No Video link found!")
if __name__ == '__main__':
url = input("Enter Filemoon Link: ")
print(get_direct_link_from_filemoon(url))
import re
import requests
# import jsbeautifier.unpackers.packer as packer
from aniworld import config
REDIRECT_REGEX = re.compile(
r'<iframe *(?:[^>]+ )?src=(?:\'([^\']+)\'|"([^"]+)")[^>]*>')
SCRIPT_REGEX = re.compile(
r'(?s)<script\s+[^>]*?data-cfasync=["\']?false["\']?[^>]*>(.+?)</script>')
VIDEO_URL_REGEX = re.compile(r'file:\s*"([^"]+\.m3u8[^"]*)"')
# TODO Implement this script fully
def get_direct_link_from_filemoon(embeded_filemoon_link: str):
session = requests.Session()
session.verify = False
headers = {
"User-Agent": config.RANDOM_USER_AGENT,
"Referer": embeded_filemoon_link,
}
response = session.get(embeded_filemoon_link, headers=headers)
source = response.text
match = REDIRECT_REGEX.search(source)
if match:
redirect_url = match.group(1) or match.group(2)
response = session.get(redirect_url, headers=headers)
source = response.text
for script_match in SCRIPT_REGEX.finditer(source):
script_content = script_match.group(1).strip()
if not script_content.startswith("eval("):
continue
if packer.detect(script_content):
unpacked = packer.unpack(script_content)
video_match = VIDEO_URL_REGEX.search(unpacked)
if video_match:
return video_match.group(1)
raise Exception("No Video link found!")
if __name__ == '__main__':
url = input("Enter Filemoon Link: ")
print(get_direct_link_from_filemoon(url))

View File

@@ -1,90 +1,90 @@
import re
import json
import sys
import requests
from aniworld.config import DEFAULT_REQUEST_TIMEOUT
def fetch_page_content(url):
try:
response = requests.get(url, timeout=DEFAULT_REQUEST_TIMEOUT)
response.raise_for_status()
return response.text
except requests.exceptions.RequestException as e:
print(f"Failed to fetch the page content: {e}")
return None
def extract_video_data(page_content):
match = re.search(r'^.*videos_manifest.*$', page_content, re.MULTILINE)
if not match:
raise ValueError("Failed to extract video manifest from the response.")
json_str = match.group(0)[match.group(0).find(
'{'):match.group(0).rfind('}') + 1]
return json.loads(json_str)
def get_streams(url):
page_content = fetch_page_content(url)
data = extract_video_data(page_content)
video_info = data['state']['data']['video']
name = video_info['hentai_video']['name']
streams = video_info['videos_manifest']['servers'][0]['streams']
return {"name": name, "streams": streams}
def display_streams(streams):
if not streams:
print("No streams available.")
return
print("Available qualities:")
for i, stream in enumerate(streams, 1):
premium_tag = "(Premium)" if not stream['is_guest_allowed'] else ""
print(
f"{i}. {stream['width']}x{stream['height']}\t"
f"({stream['filesize_mbs']}MB) {premium_tag}")
def get_user_selection(streams):
try:
selected_index = int(input("Select a stream: ").strip()) - 1
if 0 <= selected_index < len(streams):
return selected_index
print("Invalid selection.")
return None
except ValueError:
print("Invalid input.")
return None
def get_direct_link_from_hanime(url=None):
try:
if url is None:
if len(sys.argv) > 1:
url = sys.argv[1]
else:
url = input("Please enter the hanime.tv video URL: ").strip()
try:
video_data = get_streams(url)
print(f"Video: {video_data['name']}")
print('*' * 40)
display_streams(video_data['streams'])
selected_index = None
while selected_index is None:
selected_index = get_user_selection(video_data['streams'])
print(f"M3U8 URL: {video_data['streams'][selected_index]['url']}")
except ValueError as e:
print(f"Error: {e}")
except KeyboardInterrupt:
pass
if __name__ == "__main__":
get_direct_link_from_hanime()
import re
import json
import sys
import requests
from aniworld.config import DEFAULT_REQUEST_TIMEOUT
def fetch_page_content(url):
try:
response = requests.get(url, timeout=DEFAULT_REQUEST_TIMEOUT)
response.raise_for_status()
return response.text
except requests.exceptions.RequestException as e:
print(f"Failed to fetch the page content: {e}")
return None
def extract_video_data(page_content):
match = re.search(r'^.*videos_manifest.*$', page_content, re.MULTILINE)
if not match:
raise ValueError("Failed to extract video manifest from the response.")
json_str = match.group(0)[match.group(0).find(
'{'):match.group(0).rfind('}') + 1]
return json.loads(json_str)
def get_streams(url):
page_content = fetch_page_content(url)
data = extract_video_data(page_content)
video_info = data['state']['data']['video']
name = video_info['hentai_video']['name']
streams = video_info['videos_manifest']['servers'][0]['streams']
return {"name": name, "streams": streams}
def display_streams(streams):
if not streams:
print("No streams available.")
return
print("Available qualities:")
for i, stream in enumerate(streams, 1):
premium_tag = "(Premium)" if not stream['is_guest_allowed'] else ""
print(
f"{i}. {stream['width']}x{stream['height']}\t"
f"({stream['filesize_mbs']}MB) {premium_tag}")
def get_user_selection(streams):
try:
selected_index = int(input("Select a stream: ").strip()) - 1
if 0 <= selected_index < len(streams):
return selected_index
print("Invalid selection.")
return None
except ValueError:
print("Invalid input.")
return None
def get_direct_link_from_hanime(url=None):
try:
if url is None:
if len(sys.argv) > 1:
url = sys.argv[1]
else:
url = input("Please enter the hanime.tv video URL: ").strip()
try:
video_data = get_streams(url)
print(f"Video: {video_data['name']}")
print('*' * 40)
display_streams(video_data['streams'])
selected_index = None
while selected_index is None:
selected_index = get_user_selection(video_data['streams'])
print(f"M3U8 URL: {video_data['streams'][selected_index]['url']}")
except ValueError as e:
print(f"Error: {e}")
except KeyboardInterrupt:
pass
if __name__ == "__main__":
get_direct_link_from_hanime()

View File

@@ -1,35 +1,35 @@
import requests
import json
from urllib.parse import urlparse
# TODO Doesn't work on download yet and has to be implemented
def get_direct_link_from_loadx(embeded_loadx_link: str):
response = requests.head(
embeded_loadx_link, allow_redirects=True, verify=False)
parsed_url = urlparse(response.url)
path_parts = parsed_url.path.split("/")
if len(path_parts) < 3:
raise ValueError("Invalid path!")
id_hash = path_parts[2]
host = parsed_url.netloc
post_url = f"https://{host}/player/index.php?data={id_hash}&do=getVideo"
headers = {"X-Requested-With": "XMLHttpRequest"}
response = requests.post(post_url, headers=headers, verify=False)
data = json.loads(response.text)
print(data)
video_url = data.get("videoSource")
if not video_url:
raise ValueError("No Video link found!")
return video_url
if __name__ == '__main__':
url = input("Enter Loadx Link: ")
print(get_direct_link_from_loadx(url))
import requests
import json
from urllib.parse import urlparse
# TODO Doesn't work on download yet and has to be implemented
def get_direct_link_from_loadx(embeded_loadx_link: str):
response = requests.head(
embeded_loadx_link, allow_redirects=True, verify=False)
parsed_url = urlparse(response.url)
path_parts = parsed_url.path.split("/")
if len(path_parts) < 3:
raise ValueError("Invalid path!")
id_hash = path_parts[2]
host = parsed_url.netloc
post_url = f"https://{host}/player/index.php?data={id_hash}&do=getVideo"
headers = {"X-Requested-With": "XMLHttpRequest"}
response = requests.post(post_url, headers=headers, verify=False)
data = json.loads(response.text)
print(data)
video_url = data.get("videoSource")
if not video_url:
raise ValueError("No Video link found!")
return video_url
if __name__ == '__main__':
url = input("Enter Loadx Link: ")
print(get_direct_link_from_loadx(url))

View File

@@ -1,39 +1,39 @@
import re
import requests
from aniworld import config
def get_direct_link_from_luluvdo(embeded_luluvdo_link, arguments=None):
luluvdo_id = embeded_luluvdo_link.split('/')[-1]
filelink = (
f"https://luluvdo.com/dl?op=embed&file_code={luluvdo_id}&embed=1&referer=luluvdo.com&adb=0"
)
# The User-Agent needs to be the same as the direct-link ones to work
headers = {
"Origin": "https://luluvdo.com",
"Referer": "https://luluvdo.com/",
"User-Agent": config.LULUVDO_USER_AGENT
}
if arguments.action == "Download":
headers["Accept-Language"] = "de-DE,de;q=0.9,en-US;q=0.8,en;q=0.7"
response = requests.get(filelink, headers=headers,
timeout=config.DEFAULT_REQUEST_TIMEOUT)
if response.status_code == 200:
pattern = r'file:\s*"([^"]+)"'
matches = re.findall(pattern, str(response.text))
if matches:
return matches[0]
raise ValueError("No match found")
if __name__ == '__main__':
url = input("Enter Luluvdo Link: ")
print(get_direct_link_from_luluvdo(url))
import re
import requests
from aniworld import config
def get_direct_link_from_luluvdo(embeded_luluvdo_link, arguments=None):
luluvdo_id = embeded_luluvdo_link.split('/')[-1]
filelink = (
f"https://luluvdo.com/dl?op=embed&file_code={luluvdo_id}&embed=1&referer=luluvdo.com&adb=0"
)
# The User-Agent needs to be the same as the direct-link ones to work
headers = {
"Origin": "https://luluvdo.com",
"Referer": "https://luluvdo.com/",
"User-Agent": config.LULUVDO_USER_AGENT
}
if arguments.action == "Download":
headers["Accept-Language"] = "de-DE,de;q=0.9,en-US;q=0.8,en;q=0.7"
response = requests.get(filelink, headers=headers,
timeout=config.DEFAULT_REQUEST_TIMEOUT)
if response.status_code == 200:
pattern = r'file:\s*"([^"]+)"'
matches = re.findall(pattern, str(response.text))
if matches:
return matches[0]
raise ValueError("No match found")
if __name__ == '__main__':
url = input("Enter Luluvdo Link: ")
print(get_direct_link_from_luluvdo(url))

View File

@@ -1,43 +1,43 @@
import re
import base64
import requests
from aniworld.config import DEFAULT_REQUEST_TIMEOUT, RANDOM_USER_AGENT
SPEEDFILES_PATTERN = re.compile(r'var _0x5opu234 = "(?P<encoded_data>.*?)";')
def get_direct_link_from_speedfiles(embeded_speedfiles_link):
response = requests.get(
embeded_speedfiles_link,
timeout=DEFAULT_REQUEST_TIMEOUT,
headers={'User-Agent': RANDOM_USER_AGENT}
)
if "<span class=\"inline-block\">Web server is down</span>" in response.text:
raise ValueError(
"The SpeedFiles server is currently down.\n"
"Please try again later or choose a different hoster."
)
match = SPEEDFILES_PATTERN.search(response.text)
if not match:
raise ValueError("Pattern not found in the response.")
encoded_data = match.group("encoded_data")
decoded = base64.b64decode(encoded_data).decode()
decoded = decoded.swapcase()[::-1]
decoded = base64.b64decode(decoded).decode()[::-1]
decoded_hex = ''.join(chr(int(decoded[i:i + 2], 16))
for i in range(0, len(decoded), 2))
shifted = ''.join(chr(ord(char) - 3) for char in decoded_hex)
result = base64.b64decode(shifted.swapcase()[::-1]).decode()
return result
if __name__ == '__main__':
speedfiles_link = input("Enter Speedfiles Link: ")
print(get_direct_link_from_speedfiles(
embeded_speedfiles_link=speedfiles_link))
import re
import base64
import requests
from aniworld.config import DEFAULT_REQUEST_TIMEOUT, RANDOM_USER_AGENT
SPEEDFILES_PATTERN = re.compile(r'var _0x5opu234 = "(?P<encoded_data>.*?)";')
def get_direct_link_from_speedfiles(embeded_speedfiles_link):
response = requests.get(
embeded_speedfiles_link,
timeout=DEFAULT_REQUEST_TIMEOUT,
headers={'User-Agent': RANDOM_USER_AGENT}
)
if "<span class=\"inline-block\">Web server is down</span>" in response.text:
raise ValueError(
"The SpeedFiles server is currently down.\n"
"Please try again later or choose a different hoster."
)
match = SPEEDFILES_PATTERN.search(response.text)
if not match:
raise ValueError("Pattern not found in the response.")
encoded_data = match.group("encoded_data")
decoded = base64.b64decode(encoded_data).decode()
decoded = decoded.swapcase()[::-1]
decoded = base64.b64decode(decoded).decode()[::-1]
decoded_hex = ''.join(chr(int(decoded[i:i + 2], 16))
for i in range(0, len(decoded), 2))
shifted = ''.join(chr(ord(char) - 3) for char in decoded_hex)
result = base64.b64decode(shifted.swapcase()[::-1]).decode()
return result
if __name__ == '__main__':
speedfiles_link = input("Enter Speedfiles Link: ")
print(get_direct_link_from_speedfiles(
embeded_speedfiles_link=speedfiles_link))

View File

@@ -1,2 +1,2 @@
def get_direct_link_from_streamtape(embeded_streamtape_link: str) -> str:
pass
def get_direct_link_from_streamtape(embeded_streamtape_link: str) -> str:
pass

View File

@@ -1,34 +1,34 @@
import re
import requests
from bs4 import BeautifulSoup
from aniworld.config import DEFAULT_REQUEST_TIMEOUT, RANDOM_USER_AGENT
def get_direct_link_from_vidmoly(embeded_vidmoly_link: str):
response = requests.get(
embeded_vidmoly_link,
headers={'User-Agent': RANDOM_USER_AGENT},
timeout=DEFAULT_REQUEST_TIMEOUT
)
html_content = response.text
soup = BeautifulSoup(html_content, 'html.parser')
scripts = soup.find_all('script')
file_link_pattern = r'file:\s*"(https?://.*?)"'
for script in scripts:
if script.string:
match = re.search(file_link_pattern, script.string)
if match:
file_link = match.group(1)
return file_link
raise ValueError("No direct link found.")
if __name__ == '__main__':
link = input("Enter Vidmoly Link: ")
print('Note: --referer "https://vidmoly.to"')
print(get_direct_link_from_vidmoly(embeded_vidmoly_link=link))
import re
import requests
from bs4 import BeautifulSoup
from aniworld.config import DEFAULT_REQUEST_TIMEOUT, RANDOM_USER_AGENT
def get_direct_link_from_vidmoly(embeded_vidmoly_link: str):
response = requests.get(
embeded_vidmoly_link,
headers={'User-Agent': RANDOM_USER_AGENT},
timeout=DEFAULT_REQUEST_TIMEOUT
)
html_content = response.text
soup = BeautifulSoup(html_content, 'html.parser')
scripts = soup.find_all('script')
file_link_pattern = r'file:\s*"(https?://.*?)"'
for script in scripts:
if script.string:
match = re.search(file_link_pattern, script.string)
if match:
file_link = match.group(1)
return file_link
raise ValueError("No direct link found.")
if __name__ == '__main__':
link = input("Enter Vidmoly Link: ")
print('Note: --referer "https://vidmoly.to"')
print(get_direct_link_from_vidmoly(embeded_vidmoly_link=link))

View File

@@ -1,29 +1,29 @@
import re
import requests
from bs4 import BeautifulSoup
from aniworld.config import DEFAULT_REQUEST_TIMEOUT, RANDOM_USER_AGENT
def get_direct_link_from_vidoza(embeded_vidoza_link: str) -> str:
response = requests.get(
embeded_vidoza_link,
headers={'User-Agent': RANDOM_USER_AGENT},
timeout=DEFAULT_REQUEST_TIMEOUT
)
soup = BeautifulSoup(response.content, "html.parser")
for tag in soup.find_all('script'):
if 'sourcesCode:' in tag.text:
match = re.search(r'src: "(.*?)"', tag.text)
if match:
return match.group(1)
raise ValueError("No direct link found.")
if __name__ == '__main__':
link = input("Enter Vidoza Link: ")
print(get_direct_link_from_vidoza(embeded_vidoza_link=link))
import re
import requests
from bs4 import BeautifulSoup
from aniworld.config import DEFAULT_REQUEST_TIMEOUT, RANDOM_USER_AGENT
def get_direct_link_from_vidoza(embeded_vidoza_link: str) -> str:
response = requests.get(
embeded_vidoza_link,
headers={'User-Agent': RANDOM_USER_AGENT},
timeout=DEFAULT_REQUEST_TIMEOUT
)
soup = BeautifulSoup(response.content, "html.parser")
for tag in soup.find_all('script'):
if 'sourcesCode:' in tag.text:
match = re.search(r'src: "(.*?)"', tag.text)
if match:
return match.group(1)
raise ValueError("No direct link found.")
if __name__ == '__main__':
link = input("Enter Vidoza Link: ")
print(get_direct_link_from_vidoza(embeded_vidoza_link=link))

View File

@@ -1,113 +1,113 @@
import re
import base64
import json
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
import requests
from bs4 import BeautifulSoup
from fake_useragent import UserAgent
from .Provider import Provider
# Compile regex patterns once for better performance
REDIRECT_PATTERN = re.compile(r"https?://[^'\"<>]+")
B64_PATTERN = re.compile(r"var a168c='([^']+)'")
HLS_PATTERN = re.compile(r"'hls': '(?P<hls>[^']+)'")
class VOE(Provider):
def __init__(self):
self.RANDOM_USER_AGENT = UserAgent().random
self.Header = {
"User-Agent": self.RANDOM_USER_AGENT
}
def GetLink(self, embededLink: str, DEFAULT_REQUEST_TIMEOUT: int) -> (str, [str]):
self.session = requests.Session()
# Configure retries with backoff
retries = Retry(
total=5, # Number of retries
backoff_factor=1, # Delay multiplier (1s, 2s, 4s, ...)
status_forcelist=[500, 502, 503, 504], # Retry for specific HTTP errors
allowed_methods=["GET"]
)
adapter = HTTPAdapter(max_retries=retries)
self.session.mount("https://", adapter)
DEFAULT_REQUEST_TIMEOUT = 30
response = self.session.get(
embededLink,
headers={'User-Agent': self.RANDOM_USER_AGENT},
timeout=DEFAULT_REQUEST_TIMEOUT
)
redirect = re.search(r"https?://[^'\"<>]+", response.text)
if not redirect:
raise ValueError("No redirect found.")
redirect_url = redirect.group(0)
parts = redirect_url.strip().split("/")
self.Header["Referer"] = f"{parts[0]}//{parts[2]}/"
response = self.session.get(redirect_url, headers={'User-Agent': self.RANDOM_USER_AGENT})
html = response.content
# Method 1: Extract from script tag
extracted = self.extract_voe_from_script(html)
if extracted:
return extracted, self.Header
# Method 2: Extract from base64 encoded variable
htmlText = html.decode('utf-8')
b64_match = B64_PATTERN.search(htmlText)
if b64_match:
decoded = base64.b64decode(b64_match.group(1)).decode()[::-1]
source = json.loads(decoded).get("source")
if source:
return source, self.Header
# Method 3: Extract HLS source
hls_match = HLS_PATTERN.search(htmlText)
if hls_match:
return base64.b64decode(hls_match.group("hls")).decode(), self.Header
def shift_letters(self, input_str):
result = ''
for c in input_str:
code = ord(c)
if 65 <= code <= 90:
code = (code - 65 + 13) % 26 + 65
elif 97 <= code <= 122:
code = (code - 97 + 13) % 26 + 97
result += chr(code)
return result
def replace_junk(self, input_str):
junk_parts = ['@$', '^^', '~@', '%?', '*~', '!!', '#&']
for part in junk_parts:
input_str = re.sub(re.escape(part), '_', input_str)
return input_str
def shift_back(self, s, n):
return ''.join(chr(ord(c) - n) for c in s)
def decode_voe_string(self, encoded):
step1 = self.shift_letters(encoded)
step2 = self.replace_junk(step1).replace('_', '')
step3 = base64.b64decode(step2).decode()
step4 = self.shift_back(step3, 3)
step5 = base64.b64decode(step4[::-1]).decode()
return json.loads(step5)
def extract_voe_from_script(self, html):
soup = BeautifulSoup(html, "html.parser")
script = soup.find("script", type="application/json")
return self.decode_voe_string(script.text[2:-2])["source"]
import re
import base64
import json
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
import requests
from bs4 import BeautifulSoup
from fake_useragent import UserAgent
from .Provider import Provider
# Compile regex patterns once for better performance
REDIRECT_PATTERN = re.compile(r"https?://[^'\"<>]+")
B64_PATTERN = re.compile(r"var a168c='([^']+)'")
HLS_PATTERN = re.compile(r"'hls': '(?P<hls>[^']+)'")
class VOE(Provider):
def __init__(self):
self.RANDOM_USER_AGENT = UserAgent().random
self.Header = {
"User-Agent": self.RANDOM_USER_AGENT
}
def GetLink(self, embededLink: str, DEFAULT_REQUEST_TIMEOUT: int) -> (str, [str]):
self.session = requests.Session()
# Configure retries with backoff
retries = Retry(
total=5, # Number of retries
backoff_factor=1, # Delay multiplier (1s, 2s, 4s, ...)
status_forcelist=[500, 502, 503, 504], # Retry for specific HTTP errors
allowed_methods=["GET"]
)
adapter = HTTPAdapter(max_retries=retries)
self.session.mount("https://", adapter)
DEFAULT_REQUEST_TIMEOUT = 30
response = self.session.get(
embededLink,
headers={'User-Agent': self.RANDOM_USER_AGENT},
timeout=DEFAULT_REQUEST_TIMEOUT
)
redirect = re.search(r"https?://[^'\"<>]+", response.text)
if not redirect:
raise ValueError("No redirect found.")
redirect_url = redirect.group(0)
parts = redirect_url.strip().split("/")
self.Header["Referer"] = f"{parts[0]}//{parts[2]}/"
response = self.session.get(redirect_url, headers={'User-Agent': self.RANDOM_USER_AGENT})
html = response.content
# Method 1: Extract from script tag
extracted = self.extract_voe_from_script(html)
if extracted:
return extracted, self.Header
# Method 2: Extract from base64 encoded variable
htmlText = html.decode('utf-8')
b64_match = B64_PATTERN.search(htmlText)
if b64_match:
decoded = base64.b64decode(b64_match.group(1)).decode()[::-1]
source = json.loads(decoded).get("source")
if source:
return source, self.Header
# Method 3: Extract HLS source
hls_match = HLS_PATTERN.search(htmlText)
if hls_match:
return base64.b64decode(hls_match.group("hls")).decode(), self.Header
def shift_letters(self, input_str):
result = ''
for c in input_str:
code = ord(c)
if 65 <= code <= 90:
code = (code - 65 + 13) % 26 + 65
elif 97 <= code <= 122:
code = (code - 97 + 13) % 26 + 97
result += chr(code)
return result
def replace_junk(self, input_str):
junk_parts = ['@$', '^^', '~@', '%?', '*~', '!!', '#&']
for part in junk_parts:
input_str = re.sub(re.escape(part), '_', input_str)
return input_str
def shift_back(self, s, n):
return ''.join(chr(ord(c) - n) for c in s)
def decode_voe_string(self, encoded):
step1 = self.shift_letters(encoded)
step2 = self.replace_junk(step1).replace('_', '')
step3 = base64.b64decode(step2).decode()
step4 = self.shift_back(step3, 3)
step5 = base64.b64decode(step4[::-1]).decode()
return json.loads(step5)
def extract_voe_from_script(self, html):
soup = BeautifulSoup(html, "html.parser")
script = soup.find("script", type="application/json")
return self.decode_voe_string(script.text[2:-2])["source"]