Remove streaming provider modules
This commit is contained in:
@@ -1,88 +0,0 @@
|
|||||||
"""Resolve Doodstream embed players into direct download URLs."""
|
|
||||||
|
|
||||||
import random
|
|
||||||
import re
|
|
||||||
import string
|
|
||||||
import time
|
|
||||||
from typing import Any
|
|
||||||
|
|
||||||
import requests
|
|
||||||
from fake_useragent import UserAgent
|
|
||||||
|
|
||||||
from .Provider import Provider
|
|
||||||
|
|
||||||
# Precompiled regex patterns to extract the ``pass_md5`` endpoint and the
|
|
||||||
# session token embedded in the obfuscated player script. Compiling once keeps
|
|
||||||
# repeated invocations fast and documents the parsing intent.
|
|
||||||
PASS_MD5_PATTERN = re.compile(r"\$\.get\('([^']*/pass_md5/[^']*)'")
|
|
||||||
TOKEN_PATTERN = re.compile(r"token=([a-zA-Z0-9]+)")
|
|
||||||
|
|
||||||
|
|
||||||
class Doodstream(Provider):
|
|
||||||
"""Doodstream video provider implementation."""
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
self.RANDOM_USER_AGENT = UserAgent().random
|
|
||||||
|
|
||||||
def get_link(
|
|
||||||
self, embedded_link: str, timeout: int
|
|
||||||
) -> tuple[str, dict[str, Any]]:
|
|
||||||
"""
|
|
||||||
Extract direct download link from Doodstream embedded player.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
embedded_link: URL of the embedded Doodstream player
|
|
||||||
timeout: Request timeout in seconds
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Tuple of (direct_link, headers)
|
|
||||||
"""
|
|
||||||
headers = {
|
|
||||||
"User-Agent": self.RANDOM_USER_AGENT,
|
|
||||||
"Referer": "https://dood.li/",
|
|
||||||
}
|
|
||||||
|
|
||||||
def extract_data(pattern: re.Pattern[str], content: str) -> str | None:
|
|
||||||
"""Extract data using a compiled regex pattern."""
|
|
||||||
match = pattern.search(content)
|
|
||||||
return match.group(1) if match else None
|
|
||||||
|
|
||||||
def generate_random_string(length: int = 10) -> str:
|
|
||||||
"""Generate random alphanumeric string."""
|
|
||||||
charset = string.ascii_letters + string.digits
|
|
||||||
return "".join(random.choices(charset, k=length))
|
|
||||||
|
|
||||||
# WARNING: SSL verification disabled for doodstream compatibility
|
|
||||||
# This is a known limitation with this streaming provider
|
|
||||||
response = requests.get(
|
|
||||||
embedded_link,
|
|
||||||
headers=headers,
|
|
||||||
timeout=timeout,
|
|
||||||
verify=True, # Changed from False for security
|
|
||||||
)
|
|
||||||
response.raise_for_status()
|
|
||||||
|
|
||||||
pass_md5_url = extract_data(PASS_MD5_PATTERN, response.text)
|
|
||||||
if not pass_md5_url:
|
|
||||||
raise ValueError(f"pass_md5 URL not found using {embedded_link}.")
|
|
||||||
|
|
||||||
full_md5_url = f"https://dood.li{pass_md5_url}"
|
|
||||||
|
|
||||||
token = extract_data(TOKEN_PATTERN, response.text)
|
|
||||||
if not token:
|
|
||||||
raise ValueError(f"Token not found using {embedded_link}.")
|
|
||||||
|
|
||||||
md5_response = requests.get(
|
|
||||||
full_md5_url, headers=headers, timeout=timeout, verify=True
|
|
||||||
)
|
|
||||||
md5_response.raise_for_status()
|
|
||||||
video_base_url = md5_response.text.strip()
|
|
||||||
|
|
||||||
random_string = generate_random_string(10)
|
|
||||||
expiry = int(time.time())
|
|
||||||
|
|
||||||
direct_link = (
|
|
||||||
f"{video_base_url}{random_string}?token={token}&expiry={expiry}"
|
|
||||||
)
|
|
||||||
|
|
||||||
return direct_link, headers
|
|
||||||
@@ -1,59 +0,0 @@
|
|||||||
"""Resolve Filemoon embed pages into direct streaming asset URLs."""
|
|
||||||
|
|
||||||
import re
|
|
||||||
|
|
||||||
import requests
|
|
||||||
from aniworld import config
|
|
||||||
|
|
||||||
# import jsbeautifier.unpackers.packer as packer
|
|
||||||
|
|
||||||
|
|
||||||
# Match the embedded ``iframe`` pointing to the actual Filemoon player.
|
|
||||||
REDIRECT_REGEX = re.compile(
|
|
||||||
r'<iframe *(?:[^>]+ )?src=(?:\'([^\']+)\'|"([^"]+)")[^>]*>')
|
|
||||||
# The player HTML hides an ``eval`` wrapped script with ``data-cfasync``
|
|
||||||
# disabled; capture the entire script body for unpacking.
|
|
||||||
SCRIPT_REGEX = re.compile(
|
|
||||||
r'(?s)<script\s+[^>]*?data-cfasync=["\']?false["\']?[^>]*>(.+?)</script>')
|
|
||||||
# Extract the direct ``file:"<m3u8>"`` URL once the script is unpacked.
|
|
||||||
VIDEO_URL_REGEX = re.compile(r'file:\s*"([^"]+\.m3u8[^"]*)"')
|
|
||||||
|
|
||||||
# TODO Implement this script fully
|
|
||||||
|
|
||||||
|
|
||||||
def get_direct_link_from_filemoon(embeded_filemoon_link: str):
|
|
||||||
session = requests.Session()
|
|
||||||
session.verify = False
|
|
||||||
|
|
||||||
headers = {
|
|
||||||
"User-Agent": config.RANDOM_USER_AGENT,
|
|
||||||
"Referer": embeded_filemoon_link,
|
|
||||||
}
|
|
||||||
|
|
||||||
response = session.get(embeded_filemoon_link, headers=headers)
|
|
||||||
source = response.text
|
|
||||||
|
|
||||||
match = REDIRECT_REGEX.search(source)
|
|
||||||
if match:
|
|
||||||
redirect_url = match.group(1) or match.group(2)
|
|
||||||
response = session.get(redirect_url, headers=headers)
|
|
||||||
source = response.text
|
|
||||||
|
|
||||||
for script_match in SCRIPT_REGEX.finditer(source):
|
|
||||||
script_content = script_match.group(1).strip()
|
|
||||||
|
|
||||||
if not script_content.startswith("eval("):
|
|
||||||
continue
|
|
||||||
|
|
||||||
if packer.detect(script_content):
|
|
||||||
unpacked = packer.unpack(script_content)
|
|
||||||
video_match = VIDEO_URL_REGEX.search(unpacked)
|
|
||||||
if video_match:
|
|
||||||
return video_match.group(1)
|
|
||||||
|
|
||||||
raise Exception("No Video link found!")
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
url = input("Enter Filemoon Link: ")
|
|
||||||
print(get_direct_link_from_filemoon(url))
|
|
||||||
@@ -1,95 +0,0 @@
|
|||||||
"""Helpers for extracting direct stream URLs from hanime.tv pages."""
|
|
||||||
|
|
||||||
import json
|
|
||||||
import re
|
|
||||||
import sys
|
|
||||||
|
|
||||||
import requests
|
|
||||||
from aniworld.config import DEFAULT_REQUEST_TIMEOUT
|
|
||||||
|
|
||||||
|
|
||||||
def fetch_page_content(url):
|
|
||||||
try:
|
|
||||||
response = requests.get(url, timeout=DEFAULT_REQUEST_TIMEOUT)
|
|
||||||
response.raise_for_status()
|
|
||||||
return response.text
|
|
||||||
except requests.exceptions.RequestException as e:
|
|
||||||
print(f"Failed to fetch the page content: {e}")
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def extract_video_data(page_content):
|
|
||||||
# ``videos_manifest`` lines embed a JSON blob with the stream metadata
|
|
||||||
# inside a larger script tag; grab that entire line for further parsing.
|
|
||||||
match = re.search(r'^.*videos_manifest.*$', page_content, re.MULTILINE)
|
|
||||||
if not match:
|
|
||||||
raise ValueError("Failed to extract video manifest from the response.")
|
|
||||||
|
|
||||||
json_str = match.group(0)[match.group(0).find(
|
|
||||||
'{'):match.group(0).rfind('}') + 1]
|
|
||||||
return json.loads(json_str)
|
|
||||||
|
|
||||||
|
|
||||||
def get_streams(url):
|
|
||||||
page_content = fetch_page_content(url)
|
|
||||||
data = extract_video_data(page_content)
|
|
||||||
video_info = data['state']['data']['video']
|
|
||||||
name = video_info['hentai_video']['name']
|
|
||||||
streams = video_info['videos_manifest']['servers'][0]['streams']
|
|
||||||
|
|
||||||
return {"name": name, "streams": streams}
|
|
||||||
|
|
||||||
|
|
||||||
def display_streams(streams):
|
|
||||||
if not streams:
|
|
||||||
print("No streams available.")
|
|
||||||
return
|
|
||||||
|
|
||||||
print("Available qualities:")
|
|
||||||
for i, stream in enumerate(streams, 1):
|
|
||||||
premium_tag = "(Premium)" if not stream['is_guest_allowed'] else ""
|
|
||||||
print(
|
|
||||||
f"{i}. {stream['width']}x{stream['height']}\t"
|
|
||||||
f"({stream['filesize_mbs']}MB) {premium_tag}")
|
|
||||||
|
|
||||||
|
|
||||||
def get_user_selection(streams):
|
|
||||||
try:
|
|
||||||
selected_index = int(input("Select a stream: ").strip()) - 1
|
|
||||||
if 0 <= selected_index < len(streams):
|
|
||||||
return selected_index
|
|
||||||
|
|
||||||
print("Invalid selection.")
|
|
||||||
return None
|
|
||||||
except ValueError:
|
|
||||||
print("Invalid input.")
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def get_direct_link_from_hanime(url=None):
|
|
||||||
try:
|
|
||||||
if url is None:
|
|
||||||
if len(sys.argv) > 1:
|
|
||||||
url = sys.argv[1]
|
|
||||||
else:
|
|
||||||
url = input("Please enter the hanime.tv video URL: ").strip()
|
|
||||||
|
|
||||||
try:
|
|
||||||
video_data = get_streams(url)
|
|
||||||
print(f"Video: {video_data['name']}")
|
|
||||||
print('*' * 40)
|
|
||||||
display_streams(video_data['streams'])
|
|
||||||
|
|
||||||
selected_index = None
|
|
||||||
while selected_index is None:
|
|
||||||
selected_index = get_user_selection(video_data['streams'])
|
|
||||||
|
|
||||||
print(f"M3U8 URL: {video_data['streams'][selected_index]['url']}")
|
|
||||||
except ValueError as e:
|
|
||||||
print(f"Error: {e}")
|
|
||||||
except KeyboardInterrupt:
|
|
||||||
print("\nOperation cancelled by user.")
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
get_direct_link_from_hanime()
|
|
||||||
@@ -1,59 +0,0 @@
|
|||||||
import json
|
|
||||||
from urllib.parse import urlparse
|
|
||||||
|
|
||||||
import requests
|
|
||||||
|
|
||||||
# TODO Doesn't work on download yet and has to be implemented
|
|
||||||
|
|
||||||
|
|
||||||
def get_direct_link_from_loadx(embeded_loadx_link: str):
|
|
||||||
"""Extract direct download link from LoadX streaming provider.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
embeded_loadx_link: Embedded LoadX link
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: Direct video URL
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
ValueError: If link extraction fails
|
|
||||||
"""
|
|
||||||
# Default timeout for network requests
|
|
||||||
timeout = 30
|
|
||||||
|
|
||||||
response = requests.head(
|
|
||||||
embeded_loadx_link,
|
|
||||||
allow_redirects=True,
|
|
||||||
verify=True,
|
|
||||||
timeout=timeout
|
|
||||||
)
|
|
||||||
|
|
||||||
parsed_url = urlparse(response.url)
|
|
||||||
path_parts = parsed_url.path.split("/")
|
|
||||||
if len(path_parts) < 3:
|
|
||||||
raise ValueError("Invalid path!")
|
|
||||||
|
|
||||||
id_hash = path_parts[2]
|
|
||||||
host = parsed_url.netloc
|
|
||||||
|
|
||||||
post_url = f"https://{host}/player/index.php?data={id_hash}&do=getVideo"
|
|
||||||
headers = {"X-Requested-With": "XMLHttpRequest"}
|
|
||||||
response = requests.post(
|
|
||||||
post_url,
|
|
||||||
headers=headers,
|
|
||||||
verify=True,
|
|
||||||
timeout=timeout
|
|
||||||
)
|
|
||||||
|
|
||||||
data = json.loads(response.text)
|
|
||||||
print(data)
|
|
||||||
video_url = data.get("videoSource")
|
|
||||||
if not video_url:
|
|
||||||
raise ValueError("No Video link found!")
|
|
||||||
|
|
||||||
return video_url
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
url = input("Enter Loadx Link: ")
|
|
||||||
print(get_direct_link_from_loadx(url))
|
|
||||||
@@ -1,40 +0,0 @@
|
|||||||
import re
|
|
||||||
|
|
||||||
import requests
|
|
||||||
from aniworld import config
|
|
||||||
|
|
||||||
|
|
||||||
def get_direct_link_from_luluvdo(embeded_luluvdo_link, arguments=None):
|
|
||||||
luluvdo_id = embeded_luluvdo_link.split('/')[-1]
|
|
||||||
filelink = (
|
|
||||||
f"https://luluvdo.com/dl?op=embed&file_code={luluvdo_id}&embed=1&referer=luluvdo.com&adb=0"
|
|
||||||
)
|
|
||||||
|
|
||||||
# The User-Agent needs to be the same as the direct-link ones to work
|
|
||||||
headers = {
|
|
||||||
"Origin": "https://luluvdo.com",
|
|
||||||
"Referer": "https://luluvdo.com/",
|
|
||||||
"User-Agent": config.LULUVDO_USER_AGENT
|
|
||||||
}
|
|
||||||
|
|
||||||
if arguments.action == "Download":
|
|
||||||
headers["Accept-Language"] = "de-DE,de;q=0.9,en-US;q=0.8,en;q=0.7"
|
|
||||||
|
|
||||||
response = requests.get(filelink, headers=headers,
|
|
||||||
timeout=config.DEFAULT_REQUEST_TIMEOUT)
|
|
||||||
|
|
||||||
if response.status_code == 200:
|
|
||||||
# Capture the ``file:"<url>"`` assignment embedded in the player
|
|
||||||
# configuration so we can return the stream URL.
|
|
||||||
pattern = r'file:\s*"([^"]+)"'
|
|
||||||
matches = re.findall(pattern, str(response.text))
|
|
||||||
|
|
||||||
if matches:
|
|
||||||
return matches[0]
|
|
||||||
|
|
||||||
raise ValueError("No match found")
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
url = input("Enter Luluvdo Link: ")
|
|
||||||
print(get_direct_link_from_luluvdo(url))
|
|
||||||
@@ -1,45 +0,0 @@
|
|||||||
import base64
|
|
||||||
import re
|
|
||||||
|
|
||||||
import requests
|
|
||||||
from aniworld.config import DEFAULT_REQUEST_TIMEOUT, RANDOM_USER_AGENT
|
|
||||||
|
|
||||||
# Capture the base64 payload hidden inside the obfuscated ``_0x5opu234``
|
|
||||||
# assignment. The named group lets us pull out the encoded blob directly.
|
|
||||||
SPEEDFILES_PATTERN = re.compile(r'var _0x5opu234 = "(?P<encoded_data>.*?)";')
|
|
||||||
|
|
||||||
|
|
||||||
def get_direct_link_from_speedfiles(embeded_speedfiles_link):
|
|
||||||
response = requests.get(
|
|
||||||
embeded_speedfiles_link,
|
|
||||||
timeout=DEFAULT_REQUEST_TIMEOUT,
|
|
||||||
headers={'User-Agent': RANDOM_USER_AGENT}
|
|
||||||
)
|
|
||||||
|
|
||||||
if "<span class=\"inline-block\">Web server is down</span>" in response.text:
|
|
||||||
raise ValueError(
|
|
||||||
"The SpeedFiles server is currently down.\n"
|
|
||||||
"Please try again later or choose a different hoster."
|
|
||||||
)
|
|
||||||
|
|
||||||
match = SPEEDFILES_PATTERN.search(response.text)
|
|
||||||
|
|
||||||
if not match:
|
|
||||||
raise ValueError("Pattern not found in the response.")
|
|
||||||
|
|
||||||
encoded_data = match.group("encoded_data")
|
|
||||||
decoded = base64.b64decode(encoded_data).decode()
|
|
||||||
decoded = decoded.swapcase()[::-1]
|
|
||||||
decoded = base64.b64decode(decoded).decode()[::-1]
|
|
||||||
decoded_hex = ''.join(chr(int(decoded[i:i + 2], 16))
|
|
||||||
for i in range(0, len(decoded), 2))
|
|
||||||
shifted = ''.join(chr(ord(char) - 3) for char in decoded_hex)
|
|
||||||
result = base64.b64decode(shifted.swapcase()[::-1]).decode()
|
|
||||||
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
speedfiles_link = input("Enter Speedfiles Link: ")
|
|
||||||
print(get_direct_link_from_speedfiles(
|
|
||||||
embeded_speedfiles_link=speedfiles_link))
|
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
def get_direct_link_from_streamtape(embeded_streamtape_link: str) -> str:
|
|
||||||
pass
|
|
||||||
@@ -1,35 +0,0 @@
|
|||||||
import re
|
|
||||||
|
|
||||||
import requests
|
|
||||||
from aniworld.config import DEFAULT_REQUEST_TIMEOUT, RANDOM_USER_AGENT
|
|
||||||
from bs4 import BeautifulSoup
|
|
||||||
|
|
||||||
|
|
||||||
def get_direct_link_from_vidmoly(embeded_vidmoly_link: str):
|
|
||||||
response = requests.get(
|
|
||||||
embeded_vidmoly_link,
|
|
||||||
headers={'User-Agent': RANDOM_USER_AGENT},
|
|
||||||
timeout=DEFAULT_REQUEST_TIMEOUT
|
|
||||||
)
|
|
||||||
html_content = response.text
|
|
||||||
soup = BeautifulSoup(html_content, 'html.parser')
|
|
||||||
scripts = soup.find_all('script')
|
|
||||||
|
|
||||||
# Match the ``file:"<url>"`` assignment inside the obfuscated player
|
|
||||||
# script so we can recover the direct MP4 source URL.
|
|
||||||
file_link_pattern = r'file:\s*"(https?://.*?)"'
|
|
||||||
|
|
||||||
for script in scripts:
|
|
||||||
if script.string:
|
|
||||||
match = re.search(file_link_pattern, script.string)
|
|
||||||
if match:
|
|
||||||
file_link = match.group(1)
|
|
||||||
return file_link
|
|
||||||
|
|
||||||
raise ValueError("No direct link found.")
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
link = input("Enter Vidmoly Link: ")
|
|
||||||
print('Note: --referer "https://vidmoly.to"')
|
|
||||||
print(get_direct_link_from_vidmoly(embeded_vidmoly_link=link))
|
|
||||||
@@ -1,30 +0,0 @@
|
|||||||
import re
|
|
||||||
|
|
||||||
import requests
|
|
||||||
from aniworld.config import DEFAULT_REQUEST_TIMEOUT, RANDOM_USER_AGENT
|
|
||||||
from bs4 import BeautifulSoup
|
|
||||||
|
|
||||||
|
|
||||||
def get_direct_link_from_vidoza(embeded_vidoza_link: str) -> str:
|
|
||||||
response = requests.get(
|
|
||||||
embeded_vidoza_link,
|
|
||||||
headers={'User-Agent': RANDOM_USER_AGENT},
|
|
||||||
timeout=DEFAULT_REQUEST_TIMEOUT
|
|
||||||
)
|
|
||||||
|
|
||||||
soup = BeautifulSoup(response.content, "html.parser")
|
|
||||||
|
|
||||||
for tag in soup.find_all('script'):
|
|
||||||
if 'sourcesCode:' in tag.text:
|
|
||||||
# Script blocks contain a ``sourcesCode`` object with ``src``
|
|
||||||
# assignments; extract the first URL between the quotes.
|
|
||||||
match = re.search(r'src: "(.*?)"', tag.text)
|
|
||||||
if match:
|
|
||||||
return match.group(1)
|
|
||||||
|
|
||||||
raise ValueError("No direct link found.")
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
link = input("Enter Vidoza Link: ")
|
|
||||||
print(get_direct_link_from_vidoza(embeded_vidoza_link=link))
|
|
||||||
Reference in New Issue
Block a user