Merge branch 'dev' into Config-Filenames
This commit is contained in:
@@ -1 +1 @@
|
||||
__version__ = "2.3.1"
|
||||
__version__ = "3.1.0"
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import atexit
|
||||
import logging
|
||||
from datetime import datetime
|
||||
|
||||
import click
|
||||
import urllib3
|
||||
@@ -58,7 +59,7 @@ def main(version: bool, debug: bool) -> None:
|
||||
r" ▀▀▀ ▀▀ █▪ ▀▀▀▀ ▀▀▀ · ▀ ▀ ·▀▀▀ ·▀ ▀.▀▀▀ ▀▀▀ ",
|
||||
style="ascii.art",
|
||||
),
|
||||
f"v [repr.number]{__version__}[/] - © 2025 - github.com/unshackle-dl/unshackle",
|
||||
f"v [repr.number]{__version__}[/] - © 2025-{datetime.now().year} - github.com/unshackle-dl/unshackle",
|
||||
),
|
||||
(1, 11, 1, 10),
|
||||
expand=True,
|
||||
|
||||
@@ -1,145 +0,0 @@
|
||||
"""API key tier management for remote services."""
|
||||
|
||||
import logging
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from aiohttp import web
|
||||
|
||||
log = logging.getLogger("api.keys")
|
||||
|
||||
|
||||
def get_api_key_from_request(request: web.Request) -> Optional[str]:
|
||||
"""
|
||||
Extract API key from request headers.
|
||||
|
||||
Args:
|
||||
request: aiohttp request object
|
||||
|
||||
Returns:
|
||||
API key string or None
|
||||
"""
|
||||
api_key = request.headers.get("X-API-Key")
|
||||
if api_key:
|
||||
return api_key
|
||||
|
||||
auth_header = request.headers.get("Authorization", "")
|
||||
if auth_header.startswith("Bearer "):
|
||||
return auth_header[7:] # len("Bearer ") == 7
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def get_api_key_config(app: web.Application, api_key: str) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Get configuration for a specific API key.
|
||||
|
||||
Args:
|
||||
app: aiohttp application
|
||||
api_key: API key to look up
|
||||
|
||||
Returns:
|
||||
API key configuration dict or None if not found
|
||||
"""
|
||||
config = app.get("config", {})
|
||||
|
||||
# Check new-style tiered API keys
|
||||
api_keys = config.get("api_keys", [])
|
||||
for key_config in api_keys:
|
||||
if isinstance(key_config, dict) and key_config.get("key") == api_key:
|
||||
return key_config
|
||||
|
||||
# Check legacy users list (backward compatibility)
|
||||
users = config.get("users", [])
|
||||
if api_key in users:
|
||||
return {
|
||||
"key": api_key,
|
||||
"tier": "basic",
|
||||
"allowed_cdms": []
|
||||
}
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def is_premium_user(app: web.Application, api_key: str) -> bool:
|
||||
"""
|
||||
Check if an API key belongs to a premium user.
|
||||
|
||||
Premium users can use server-side CDM for decryption.
|
||||
|
||||
Args:
|
||||
app: aiohttp application
|
||||
api_key: API key to check
|
||||
|
||||
Returns:
|
||||
True if premium user, False otherwise
|
||||
"""
|
||||
key_config = get_api_key_config(app, api_key)
|
||||
if not key_config:
|
||||
return False
|
||||
|
||||
tier = key_config.get("tier", "basic")
|
||||
return tier == "premium"
|
||||
|
||||
|
||||
def get_allowed_cdms(app: web.Application, api_key: str) -> List[str]:
|
||||
"""
|
||||
Get list of CDMs that an API key is allowed to use.
|
||||
|
||||
Args:
|
||||
app: aiohttp application
|
||||
api_key: API key to check
|
||||
|
||||
Returns:
|
||||
List of allowed CDM names, or empty list if not premium
|
||||
"""
|
||||
key_config = get_api_key_config(app, api_key)
|
||||
if not key_config:
|
||||
return []
|
||||
|
||||
allowed_cdms = key_config.get("allowed_cdms", [])
|
||||
|
||||
# Handle wildcard
|
||||
if allowed_cdms == "*" or allowed_cdms == ["*"]:
|
||||
return ["*"]
|
||||
|
||||
return allowed_cdms if isinstance(allowed_cdms, list) else []
|
||||
|
||||
|
||||
def get_default_cdm(app: web.Application, api_key: str) -> Optional[str]:
|
||||
"""
|
||||
Get default CDM for an API key.
|
||||
|
||||
Args:
|
||||
app: aiohttp application
|
||||
api_key: API key to check
|
||||
|
||||
Returns:
|
||||
Default CDM name or None
|
||||
"""
|
||||
key_config = get_api_key_config(app, api_key)
|
||||
if not key_config:
|
||||
return None
|
||||
|
||||
return key_config.get("default_cdm")
|
||||
|
||||
|
||||
def can_use_cdm(app: web.Application, api_key: str, cdm_name: str) -> bool:
|
||||
"""
|
||||
Check if an API key can use a specific CDM.
|
||||
|
||||
Args:
|
||||
app: aiohttp application
|
||||
api_key: API key to check
|
||||
cdm_name: CDM name to check access for
|
||||
|
||||
Returns:
|
||||
True if allowed, False otherwise
|
||||
"""
|
||||
allowed_cdms = get_allowed_cdms(app, api_key)
|
||||
|
||||
# Wildcard access
|
||||
if "*" in allowed_cdms:
|
||||
return True
|
||||
|
||||
# Specific CDM access
|
||||
return cdm_name in allowed_cdms
|
||||
@@ -207,6 +207,7 @@ def serialize_drm(drm_list) -> Optional[List[Dict[str, Any]]]:
|
||||
|
||||
# Get PSSH - handle both Widevine and PlayReady
|
||||
if hasattr(drm, "_pssh") and drm._pssh:
|
||||
pssh_obj = None
|
||||
try:
|
||||
pssh_obj = drm._pssh
|
||||
# Try to get base64 representation
|
||||
@@ -225,8 +226,24 @@ def serialize_drm(drm_list) -> Optional[List[Dict[str, Any]]]:
|
||||
# Check if it's already base64-like or an object repr
|
||||
if not pssh_str.startswith("<"):
|
||||
drm_info["pssh"] = pssh_str
|
||||
except (ValueError, TypeError, KeyError):
|
||||
# Some PSSH implementations can fail to parse/serialize; log and continue.
|
||||
pssh_type = type(pssh_obj).__name__ if pssh_obj is not None else None
|
||||
log.warning(
|
||||
"Failed to extract/serialize PSSH for DRM type=%s pssh_type=%s",
|
||||
drm_class,
|
||||
pssh_type,
|
||||
exc_info=True,
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
# Don't silently swallow unexpected failures; make them visible and propagate.
|
||||
pssh_type = type(pssh_obj).__name__ if pssh_obj is not None else None
|
||||
log.exception(
|
||||
"Unexpected error while extracting/serializing PSSH for DRM type=%s pssh_type=%s",
|
||||
drm_class,
|
||||
pssh_type,
|
||||
)
|
||||
raise
|
||||
|
||||
# Get KIDs
|
||||
if hasattr(drm, "kids") and drm.kids:
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -8,9 +8,6 @@ from unshackle.core import __version__
|
||||
from unshackle.core.api.errors import APIError, APIErrorCode, build_error_response, handle_api_exception
|
||||
from unshackle.core.api.handlers import (cancel_download_job_handler, download_handler, get_download_job_handler,
|
||||
list_download_jobs_handler, list_titles_handler, list_tracks_handler)
|
||||
from unshackle.core.api.remote_handlers import (remote_decrypt, remote_get_chapters, remote_get_license,
|
||||
remote_get_manifest, remote_get_titles, remote_get_tracks,
|
||||
remote_list_services, remote_search)
|
||||
from unshackle.core.services import Services
|
||||
from unshackle.core.update_checker import UpdateChecker
|
||||
|
||||
@@ -733,16 +730,6 @@ def setup_routes(app: web.Application) -> None:
|
||||
app.router.add_get("/api/download/jobs/{job_id}", download_job_detail)
|
||||
app.router.add_delete("/api/download/jobs/{job_id}", cancel_download_job)
|
||||
|
||||
# Remote service endpoints
|
||||
app.router.add_get("/api/remote/services", remote_list_services)
|
||||
app.router.add_post("/api/remote/{service}/search", remote_search)
|
||||
app.router.add_post("/api/remote/{service}/titles", remote_get_titles)
|
||||
app.router.add_post("/api/remote/{service}/tracks", remote_get_tracks)
|
||||
app.router.add_post("/api/remote/{service}/manifest", remote_get_manifest)
|
||||
app.router.add_post("/api/remote/{service}/chapters", remote_get_chapters)
|
||||
app.router.add_post("/api/remote/{service}/license", remote_get_license)
|
||||
app.router.add_post("/api/remote/{service}/decrypt", remote_decrypt)
|
||||
|
||||
|
||||
def setup_swagger(app: web.Application) -> None:
|
||||
"""Setup Swagger UI documentation."""
|
||||
@@ -767,14 +754,5 @@ def setup_swagger(app: web.Application) -> None:
|
||||
web.get("/api/download/jobs", download_jobs),
|
||||
web.get("/api/download/jobs/{job_id}", download_job_detail),
|
||||
web.delete("/api/download/jobs/{job_id}", cancel_download_job),
|
||||
# Remote service routes
|
||||
web.get("/api/remote/services", remote_list_services),
|
||||
web.post("/api/remote/{service}/search", remote_search),
|
||||
web.post("/api/remote/{service}/titles", remote_get_titles),
|
||||
web.post("/api/remote/{service}/tracks", remote_get_tracks),
|
||||
web.post("/api/remote/{service}/manifest", remote_get_manifest),
|
||||
web.post("/api/remote/{service}/chapters", remote_get_chapters),
|
||||
web.post("/api/remote/{service}/license", remote_get_license),
|
||||
web.post("/api/remote/{service}/decrypt", remote_decrypt),
|
||||
]
|
||||
)
|
||||
|
||||
@@ -1,236 +0,0 @@
|
||||
"""Session serialization helpers for remote services."""
|
||||
|
||||
from http.cookiejar import CookieJar
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
import requests
|
||||
|
||||
from unshackle.core.credential import Credential
|
||||
|
||||
|
||||
def serialize_session(session: requests.Session) -> Dict[str, Any]:
|
||||
"""
|
||||
Serialize a requests.Session into a JSON-serializable dictionary.
|
||||
|
||||
Extracts cookies, headers, and other session data that can be
|
||||
transferred to a remote client for downloading.
|
||||
|
||||
Args:
|
||||
session: The requests.Session to serialize
|
||||
|
||||
Returns:
|
||||
Dictionary containing serialized session data
|
||||
"""
|
||||
session_data = {
|
||||
"cookies": {},
|
||||
"headers": {},
|
||||
"proxies": session.proxies.copy() if session.proxies else {},
|
||||
}
|
||||
|
||||
# Serialize cookies
|
||||
if session.cookies:
|
||||
for cookie in session.cookies:
|
||||
session_data["cookies"][cookie.name] = {
|
||||
"value": cookie.value,
|
||||
"domain": cookie.domain,
|
||||
"path": cookie.path,
|
||||
"secure": cookie.secure,
|
||||
"expires": cookie.expires,
|
||||
}
|
||||
|
||||
# Serialize headers (exclude proxy-authorization for security)
|
||||
if session.headers:
|
||||
for key, value in session.headers.items():
|
||||
# Skip proxy-related headers as they're server-specific
|
||||
if key.lower() not in ["proxy-authorization"]:
|
||||
session_data["headers"][key] = value
|
||||
|
||||
return session_data
|
||||
|
||||
|
||||
def deserialize_session(
|
||||
session_data: Dict[str, Any], target_session: Optional[requests.Session] = None
|
||||
) -> requests.Session:
|
||||
"""
|
||||
Deserialize session data into a requests.Session.
|
||||
|
||||
Applies cookies, headers, and other session data from a remote server
|
||||
to a local session for downloading.
|
||||
|
||||
Args:
|
||||
session_data: Dictionary containing serialized session data
|
||||
target_session: Optional existing session to update (creates new if None)
|
||||
|
||||
Returns:
|
||||
requests.Session with applied session data
|
||||
"""
|
||||
if target_session is None:
|
||||
target_session = requests.Session()
|
||||
|
||||
# Apply cookies
|
||||
if "cookies" in session_data:
|
||||
for cookie_name, cookie_data in session_data["cookies"].items():
|
||||
target_session.cookies.set(
|
||||
name=cookie_name,
|
||||
value=cookie_data["value"],
|
||||
domain=cookie_data.get("domain"),
|
||||
path=cookie_data.get("path", "/"),
|
||||
secure=cookie_data.get("secure", False),
|
||||
expires=cookie_data.get("expires"),
|
||||
)
|
||||
|
||||
# Apply headers
|
||||
if "headers" in session_data:
|
||||
target_session.headers.update(session_data["headers"])
|
||||
|
||||
# Note: We don't apply proxies from remote as the local client
|
||||
# should use its own proxy configuration
|
||||
|
||||
return target_session
|
||||
|
||||
|
||||
def extract_session_tokens(session: requests.Session) -> Dict[str, Any]:
|
||||
"""
|
||||
Extract authentication tokens and similar data from a session.
|
||||
|
||||
Looks for common authentication patterns like Bearer tokens,
|
||||
API keys in headers, etc.
|
||||
|
||||
Args:
|
||||
session: The requests.Session to extract tokens from
|
||||
|
||||
Returns:
|
||||
Dictionary containing extracted tokens
|
||||
"""
|
||||
tokens = {}
|
||||
|
||||
# Check for Authorization header
|
||||
if "Authorization" in session.headers:
|
||||
tokens["authorization"] = session.headers["Authorization"]
|
||||
|
||||
# Check for common API key headers
|
||||
for key in ["X-API-Key", "Api-Key", "X-Auth-Token"]:
|
||||
if key in session.headers:
|
||||
tokens[key.lower().replace("-", "_")] = session.headers[key]
|
||||
|
||||
return tokens
|
||||
|
||||
|
||||
def apply_session_tokens(tokens: Dict[str, Any], target_session: requests.Session) -> None:
|
||||
"""
|
||||
Apply authentication tokens to a session.
|
||||
|
||||
Args:
|
||||
tokens: Dictionary containing tokens to apply
|
||||
target_session: Session to apply tokens to
|
||||
"""
|
||||
# Apply Authorization header
|
||||
if "authorization" in tokens:
|
||||
target_session.headers["Authorization"] = tokens["authorization"]
|
||||
|
||||
# Apply other token headers
|
||||
token_header_map = {
|
||||
"x_api_key": "X-API-Key",
|
||||
"api_key": "Api-Key",
|
||||
"x_auth_token": "X-Auth-Token",
|
||||
}
|
||||
|
||||
for token_key, header_name in token_header_map.items():
|
||||
if token_key in tokens:
|
||||
target_session.headers[header_name] = tokens[token_key]
|
||||
|
||||
|
||||
def serialize_cookies(cookie_jar: Optional[CookieJar]) -> Dict[str, Any]:
|
||||
"""
|
||||
Serialize a CookieJar into a JSON-serializable dictionary.
|
||||
|
||||
Args:
|
||||
cookie_jar: The CookieJar to serialize
|
||||
|
||||
Returns:
|
||||
Dictionary containing serialized cookies
|
||||
"""
|
||||
if not cookie_jar:
|
||||
return {}
|
||||
|
||||
cookies = {}
|
||||
for cookie in cookie_jar:
|
||||
cookies[cookie.name] = {
|
||||
"value": cookie.value,
|
||||
"domain": cookie.domain,
|
||||
"path": cookie.path,
|
||||
"secure": cookie.secure,
|
||||
"expires": cookie.expires,
|
||||
}
|
||||
|
||||
return cookies
|
||||
|
||||
|
||||
def deserialize_cookies(cookies_data: Dict[str, Any]) -> CookieJar:
|
||||
"""
|
||||
Deserialize cookies into a CookieJar.
|
||||
|
||||
Args:
|
||||
cookies_data: Dictionary containing serialized cookies
|
||||
|
||||
Returns:
|
||||
CookieJar with cookies
|
||||
"""
|
||||
import http.cookiejar
|
||||
|
||||
cookie_jar = http.cookiejar.CookieJar()
|
||||
|
||||
for cookie_name, cookie_data in cookies_data.items():
|
||||
cookie = http.cookiejar.Cookie(
|
||||
version=0,
|
||||
name=cookie_name,
|
||||
value=cookie_data["value"],
|
||||
port=None,
|
||||
port_specified=False,
|
||||
domain=cookie_data.get("domain", ""),
|
||||
domain_specified=bool(cookie_data.get("domain")),
|
||||
domain_initial_dot=cookie_data.get("domain", "").startswith("."),
|
||||
path=cookie_data.get("path", "/"),
|
||||
path_specified=True,
|
||||
secure=cookie_data.get("secure", False),
|
||||
expires=cookie_data.get("expires"),
|
||||
discard=False,
|
||||
comment=None,
|
||||
comment_url=None,
|
||||
rest={},
|
||||
)
|
||||
cookie_jar.set_cookie(cookie)
|
||||
|
||||
return cookie_jar
|
||||
|
||||
|
||||
def serialize_credential(credential: Optional[Credential]) -> Optional[Dict[str, str]]:
|
||||
"""
|
||||
Serialize a Credential into a JSON-serializable dictionary.
|
||||
|
||||
Args:
|
||||
credential: The Credential to serialize
|
||||
|
||||
Returns:
|
||||
Dictionary containing username and password, or None
|
||||
"""
|
||||
if not credential:
|
||||
return None
|
||||
|
||||
return {"username": credential.username, "password": credential.password}
|
||||
|
||||
|
||||
def deserialize_credential(credential_data: Optional[Dict[str, str]]) -> Optional[Credential]:
|
||||
"""
|
||||
Deserialize credential data into a Credential object.
|
||||
|
||||
Args:
|
||||
credential_data: Dictionary containing username and password
|
||||
|
||||
Returns:
|
||||
Credential object or None
|
||||
"""
|
||||
if not credential_data:
|
||||
return None
|
||||
|
||||
return Credential(username=credential_data["username"], password=credential_data["password"])
|
||||
@@ -1,5 +1,57 @@
|
||||
from .custom_remote_cdm import CustomRemoteCDM
|
||||
from .decrypt_labs_remote_cdm import DecryptLabsRemoteCDM
|
||||
from .monalisa import MonaLisaCDM
|
||||
"""
|
||||
CDM helpers and implementations.
|
||||
|
||||
__all__ = ["DecryptLabsRemoteCDM", "CustomRemoteCDM", "MonaLisaCDM"]
|
||||
Keep this module import-light: downstream code frequently imports helpers from
|
||||
`unshackle.core.cdm.detect`, which requires importing this package first.
|
||||
Some CDM implementations pull in optional/heavy dependencies, so we lazily
|
||||
import them via `__getattr__` (PEP 562).
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
__all__ = [
|
||||
"DecryptLabsRemoteCDM",
|
||||
"CustomRemoteCDM",
|
||||
"MonaLisaCDM",
|
||||
"is_remote_cdm",
|
||||
"is_local_cdm",
|
||||
"cdm_location",
|
||||
"is_playready_cdm",
|
||||
"is_widevine_cdm",
|
||||
]
|
||||
|
||||
|
||||
def __getattr__(name: str) -> Any:
|
||||
if name == "DecryptLabsRemoteCDM":
|
||||
from .decrypt_labs_remote_cdm import DecryptLabsRemoteCDM
|
||||
|
||||
return DecryptLabsRemoteCDM
|
||||
if name == "CustomRemoteCDM":
|
||||
from .custom_remote_cdm import CustomRemoteCDM
|
||||
|
||||
return CustomRemoteCDM
|
||||
if name == "MonaLisaCDM":
|
||||
from .monalisa import MonaLisaCDM
|
||||
|
||||
return MonaLisaCDM
|
||||
|
||||
if name in {
|
||||
"is_remote_cdm",
|
||||
"is_local_cdm",
|
||||
"cdm_location",
|
||||
"is_playready_cdm",
|
||||
"is_widevine_cdm",
|
||||
}:
|
||||
from .detect import cdm_location, is_local_cdm, is_playready_cdm, is_remote_cdm, is_widevine_cdm
|
||||
|
||||
return {
|
||||
"is_remote_cdm": is_remote_cdm,
|
||||
"is_local_cdm": is_local_cdm,
|
||||
"cdm_location": cdm_location,
|
||||
"is_playready_cdm": is_playready_cdm,
|
||||
"is_widevine_cdm": is_widevine_cdm,
|
||||
}[name]
|
||||
|
||||
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
|
||||
|
||||
187
unshackle/core/cdm/detect.py
Normal file
187
unshackle/core/cdm/detect.py
Normal file
@@ -0,0 +1,187 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
|
||||
def is_remote_cdm(cdm: Any) -> bool:
|
||||
"""
|
||||
Return True if the CDM instance is backed by a remote/service CDM.
|
||||
|
||||
This is useful for service logic that needs to know whether the CDM runs
|
||||
locally (in-process) vs over HTTP/RPC (remote).
|
||||
"""
|
||||
|
||||
if cdm is None:
|
||||
return False
|
||||
|
||||
if hasattr(cdm, "is_remote_cdm"):
|
||||
try:
|
||||
return bool(getattr(cdm, "is_remote_cdm"))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
try:
|
||||
from pyplayready.remote.remotecdm import RemoteCdm as PlayReadyRemoteCdm
|
||||
except Exception:
|
||||
PlayReadyRemoteCdm = None
|
||||
|
||||
if PlayReadyRemoteCdm is not None:
|
||||
try:
|
||||
if isinstance(cdm, PlayReadyRemoteCdm):
|
||||
return True
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
try:
|
||||
from pywidevine.remotecdm import RemoteCdm as WidevineRemoteCdm
|
||||
except Exception:
|
||||
WidevineRemoteCdm = None
|
||||
|
||||
if WidevineRemoteCdm is not None:
|
||||
try:
|
||||
if isinstance(cdm, WidevineRemoteCdm):
|
||||
return True
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
cls = getattr(cdm, "__class__", None)
|
||||
mod = getattr(cls, "__module__", "") or ""
|
||||
name = getattr(cls, "__name__", "") or ""
|
||||
|
||||
if mod == "unshackle.core.cdm.decrypt_labs_remote_cdm" and name == "DecryptLabsRemoteCDM":
|
||||
return True
|
||||
if mod == "unshackle.core.cdm.custom_remote_cdm" and name == "CustomRemoteCDM":
|
||||
return True
|
||||
|
||||
if mod.startswith("pyplayready.remote") or mod.startswith("pywidevine.remote"):
|
||||
return True
|
||||
if "remote" in mod.lower() and name.lower().endswith("cdm"):
|
||||
return True
|
||||
if name.lower().endswith("remotecdm"):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def is_local_cdm(cdm: Any) -> bool:
|
||||
"""
|
||||
Return True if the CDM instance is local/in-process.
|
||||
|
||||
Unknown CDM types return False (use `cdm_location()` if you need 3-state).
|
||||
"""
|
||||
|
||||
if cdm is None:
|
||||
return False
|
||||
|
||||
if is_remote_cdm(cdm):
|
||||
return False
|
||||
|
||||
if is_playready_cdm(cdm) or is_widevine_cdm(cdm):
|
||||
return True
|
||||
|
||||
cls = getattr(cdm, "__class__", None)
|
||||
mod = getattr(cls, "__module__", "") or ""
|
||||
name = getattr(cls, "__name__", "") or ""
|
||||
if mod == "unshackle.core.cdm.monalisa.monalisa_cdm" and name == "MonaLisaCDM":
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def cdm_location(cdm: Any) -> str:
|
||||
"""
|
||||
Return one of: "local", "remote", "unknown".
|
||||
"""
|
||||
|
||||
if is_remote_cdm(cdm):
|
||||
return "remote"
|
||||
if is_local_cdm(cdm):
|
||||
return "local"
|
||||
return "unknown"
|
||||
|
||||
|
||||
def is_playready_cdm(cdm: Any) -> bool:
|
||||
"""
|
||||
Return True if the given CDM should be treated as PlayReady.
|
||||
|
||||
This intentionally supports both:
|
||||
- Local PlayReady CDMs (pyplayready.cdm.Cdm)
|
||||
- Remote/wrapper CDMs (e.g. DecryptLabsRemoteCDM) that expose `is_playready`
|
||||
"""
|
||||
|
||||
if cdm is None:
|
||||
return False
|
||||
|
||||
if hasattr(cdm, "is_playready"):
|
||||
try:
|
||||
return bool(getattr(cdm, "is_playready"))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
try:
|
||||
from pyplayready.cdm import Cdm as PlayReadyCdm
|
||||
except Exception:
|
||||
PlayReadyCdm = None
|
||||
|
||||
if PlayReadyCdm is not None:
|
||||
try:
|
||||
return isinstance(cdm, PlayReadyCdm)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
try:
|
||||
from pyplayready.remote.remotecdm import RemoteCdm as PlayReadyRemoteCdm
|
||||
except Exception:
|
||||
PlayReadyRemoteCdm = None
|
||||
|
||||
if PlayReadyRemoteCdm is not None:
|
||||
try:
|
||||
return isinstance(cdm, PlayReadyRemoteCdm)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
mod = getattr(getattr(cdm, "__class__", None), "__module__", "") or ""
|
||||
return "pyplayready" in mod
|
||||
|
||||
|
||||
def is_widevine_cdm(cdm: Any) -> bool:
|
||||
"""
|
||||
Return True if the given CDM should be treated as Widevine.
|
||||
|
||||
Note: for remote/wrapper CDMs that expose `is_playready`, Widevine is treated
|
||||
as the logical opposite.
|
||||
"""
|
||||
|
||||
if cdm is None:
|
||||
return False
|
||||
|
||||
if hasattr(cdm, "is_playready"):
|
||||
try:
|
||||
return not bool(getattr(cdm, "is_playready"))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
try:
|
||||
from pywidevine.cdm import Cdm as WidevineCdm
|
||||
except Exception:
|
||||
WidevineCdm = None
|
||||
|
||||
if WidevineCdm is not None:
|
||||
try:
|
||||
return isinstance(cdm, WidevineCdm)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
try:
|
||||
from pywidevine.remotecdm import RemoteCdm as WidevineRemoteCdm
|
||||
except Exception:
|
||||
WidevineRemoteCdm = None
|
||||
|
||||
if WidevineRemoteCdm is not None:
|
||||
try:
|
||||
return isinstance(cdm, WidevineRemoteCdm)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
mod = getattr(getattr(cdm, "__class__", None), "__module__", "") or ""
|
||||
return "pywidevine" in mod
|
||||
@@ -7,8 +7,11 @@ a WebAssembly module that runs locally via wasmtime.
|
||||
|
||||
import base64
|
||||
import ctypes
|
||||
import hashlib
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
import sys
|
||||
import uuid
|
||||
from pathlib import Path
|
||||
from typing import Dict, Optional, Union
|
||||
@@ -17,6 +20,8 @@ import wasmtime
|
||||
|
||||
from unshackle.core import binaries
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MonaLisaCDM:
|
||||
"""
|
||||
@@ -128,10 +133,27 @@ class MonaLisaCDM:
|
||||
}
|
||||
|
||||
self.exports["___wasm_call_ctors"](self.store)
|
||||
self.ctx = self.exports["_monalisa_context_alloc"](self.store)
|
||||
ctx = self.exports["_monalisa_context_alloc"](self.store)
|
||||
self.ctx = ctx
|
||||
|
||||
# _monalisa_context_alloc is expected to return a positive pointer/handle.
|
||||
# Treat 0/negative/non-int-like values as allocation failure.
|
||||
try:
|
||||
ctx_int = int(ctx)
|
||||
except Exception:
|
||||
ctx_int = None
|
||||
|
||||
if ctx_int is None or ctx_int <= 0:
|
||||
# Ensure we don't leave a partially-initialized instance around.
|
||||
self.close()
|
||||
raise RuntimeError(f"Failed to allocate MonaLisa context (ctx={ctx!r})")
|
||||
return 1
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Failed to initialize session: {e}")
|
||||
# Clean up partial state (e.g., store/memory/instance) before propagating failure.
|
||||
self.close()
|
||||
if isinstance(e, RuntimeError):
|
||||
raise
|
||||
raise RuntimeError(f"Failed to initialize session: {e}") from e
|
||||
|
||||
def close(self, session_id: int = 1) -> None:
|
||||
"""
|
||||
@@ -188,7 +210,9 @@ class MonaLisaCDM:
|
||||
# Extract DCID from license to generate KID
|
||||
try:
|
||||
decoded = base64.b64decode(license_b64).decode("ascii", errors="ignore")
|
||||
except Exception:
|
||||
except Exception as e:
|
||||
# Avoid logging raw license content; log only safe metadata.
|
||||
logger.exception("Failed to base64-decode MonaLisa license (len=%s): %s", len(license_b64), e)
|
||||
decoded = ""
|
||||
|
||||
m = re.search(
|
||||
@@ -198,7 +222,14 @@ class MonaLisaCDM:
|
||||
if m:
|
||||
kid_bytes = uuid.uuid5(uuid.NAMESPACE_DNS, m.group()).bytes
|
||||
else:
|
||||
kid_bytes = uuid.UUID(int=0).bytes
|
||||
# No DCID in the license: derive a deterministic per-license KID to avoid collisions.
|
||||
try:
|
||||
license_raw = base64.b64decode(license_b64)
|
||||
except Exception:
|
||||
license_raw = license_b64.encode("utf-8", errors="replace")
|
||||
|
||||
license_hash = hashlib.sha256(license_raw).hexdigest()
|
||||
kid_bytes = uuid.uuid5(uuid.NAMESPACE_DNS, f"monalisa:license:{license_hash}").bytes
|
||||
|
||||
return {"kid": kid_bytes.hex(), "key": key_bytes.hex(), "type": "CONTENT"}
|
||||
|
||||
@@ -221,21 +252,29 @@ class MonaLisaCDM:
|
||||
stack = 0
|
||||
converted_args = []
|
||||
|
||||
for arg in args:
|
||||
if isinstance(arg, str):
|
||||
if stack == 0:
|
||||
stack = self.exports["stackSave"](self.store)
|
||||
max_length = (len(arg) << 2) + 1
|
||||
ptr = self.exports["stackAlloc"](self.store, max_length)
|
||||
self._string_to_utf8(arg, ptr, max_length)
|
||||
converted_args.append(ptr)
|
||||
else:
|
||||
converted_args.append(arg)
|
||||
try:
|
||||
for arg in args:
|
||||
if isinstance(arg, str):
|
||||
if stack == 0:
|
||||
stack = self.exports["stackSave"](self.store)
|
||||
max_length = (len(arg) << 2) + 1
|
||||
ptr = self.exports["stackAlloc"](self.store, max_length)
|
||||
self._string_to_utf8(arg, ptr, max_length)
|
||||
converted_args.append(ptr)
|
||||
else:
|
||||
converted_args.append(arg)
|
||||
|
||||
result = self.exports[func_name](self.store, *converted_args)
|
||||
|
||||
if stack != 0:
|
||||
self.exports["stackRestore"](self.store, stack)
|
||||
result = self.exports[func_name](self.store, *converted_args)
|
||||
finally:
|
||||
# stackAlloc pointers live on the WASM stack; always restore even if the call throws.
|
||||
if stack != 0:
|
||||
exc = sys.exc_info()[1]
|
||||
try:
|
||||
self.exports["stackRestore"](self.store, stack)
|
||||
except Exception:
|
||||
# If we're already failing, don't mask the original exception.
|
||||
if exc is None:
|
||||
raise
|
||||
|
||||
if return_type is bool:
|
||||
return bool(result)
|
||||
@@ -243,6 +282,13 @@ class MonaLisaCDM:
|
||||
|
||||
def _write_i32(self, addr: int, value: int) -> None:
|
||||
"""Write a 32-bit integer to WASM memory."""
|
||||
if addr % 4 != 0:
|
||||
raise ValueError(f"Unaligned i32 write: addr={addr} (must be 4-byte aligned)")
|
||||
|
||||
data_len = self.memory.data_len(self.store)
|
||||
if addr < 0 or addr + 4 > data_len:
|
||||
raise IndexError(f"i32 write out of bounds: addr={addr}, mem_len={data_len}")
|
||||
|
||||
data = self.memory.data_ptr(self.store)
|
||||
mem_ptr = ctypes.cast(data, ctypes.POINTER(ctypes.c_int32))
|
||||
mem_ptr[addr >> 2] = value
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import logging
|
||||
import os
|
||||
import subprocess
|
||||
import textwrap
|
||||
@@ -54,11 +55,13 @@ class _Aria2Manager:
|
||||
"""Singleton manager to run one aria2c process and enqueue downloads via RPC."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._logger = logging.getLogger(__name__)
|
||||
self._proc: Optional[subprocess.Popen] = None
|
||||
self._rpc_port: Optional[int] = None
|
||||
self._rpc_secret: Optional[str] = None
|
||||
self._rpc_uri: Optional[str] = None
|
||||
self._session: Session = Session()
|
||||
self._max_workers: Optional[int] = None
|
||||
self._max_concurrent_downloads: int = 0
|
||||
self._max_connection_per_server: int = 1
|
||||
self._split_default: int = 5
|
||||
@@ -66,6 +69,47 @@ class _Aria2Manager:
|
||||
self._proxy: Optional[str] = None
|
||||
self._lock: threading.Lock = threading.Lock()
|
||||
|
||||
def _wait_for_rpc_ready(self, timeout_s: float = 8.0, interval_s: float = 0.1) -> None:
|
||||
assert self._proc is not None
|
||||
assert self._rpc_uri is not None
|
||||
assert self._rpc_secret is not None
|
||||
|
||||
deadline = time.monotonic() + timeout_s
|
||||
|
||||
payload = {
|
||||
"jsonrpc": "2.0",
|
||||
"id": get_random_bytes(16).hex(),
|
||||
"method": "aria2.getVersion",
|
||||
"params": [f"token:{self._rpc_secret}"],
|
||||
}
|
||||
|
||||
while time.monotonic() < deadline:
|
||||
if self._proc.poll() is not None:
|
||||
raise RuntimeError(
|
||||
f"aria2c exited before RPC became ready (exit code {self._proc.returncode})"
|
||||
)
|
||||
try:
|
||||
res = self._session.post(self._rpc_uri, json=payload, timeout=0.25)
|
||||
data = res.json()
|
||||
if isinstance(data, dict) and data.get("result") is not None:
|
||||
return
|
||||
except (requests.exceptions.RequestException, ValueError):
|
||||
# Not ready yet (connection refused / bad response / etc.)
|
||||
pass
|
||||
time.sleep(interval_s)
|
||||
|
||||
# Timed out: ensure we don't leave a zombie/stray aria2c process behind.
|
||||
try:
|
||||
self._proc.terminate()
|
||||
self._proc.wait(timeout=2)
|
||||
except Exception:
|
||||
try:
|
||||
self._proc.kill()
|
||||
self._proc.wait(timeout=2)
|
||||
except Exception:
|
||||
pass
|
||||
raise TimeoutError(f"aria2c RPC did not become ready within {timeout_s:.1f}s")
|
||||
|
||||
def _build_args(self) -> list[str]:
|
||||
args = [
|
||||
"--continue=true",
|
||||
@@ -95,9 +139,6 @@ class _Aria2Manager:
|
||||
max_workers: Optional[int],
|
||||
) -> None:
|
||||
with self._lock:
|
||||
if self._proc and self._proc.poll() is None:
|
||||
return
|
||||
|
||||
if not binaries.Aria2:
|
||||
debug_logger = get_debug_logger()
|
||||
if debug_logger:
|
||||
@@ -109,27 +150,45 @@ class _Aria2Manager:
|
||||
)
|
||||
raise EnvironmentError("Aria2c executable not found...")
|
||||
|
||||
effective_proxy = proxy or None
|
||||
|
||||
if not max_workers:
|
||||
max_workers = min(32, (os.cpu_count() or 1) + 4)
|
||||
effective_max_workers = min(32, (os.cpu_count() or 1) + 4)
|
||||
elif not isinstance(max_workers, int):
|
||||
raise TypeError(f"Expected max_workers to be {int}, not {type(max_workers)}")
|
||||
else:
|
||||
effective_max_workers = max_workers
|
||||
|
||||
if self._proc and self._proc.poll() is None:
|
||||
if effective_proxy != self._proxy or effective_max_workers != self._max_workers:
|
||||
self._logger.warning(
|
||||
"aria2c process is already running; requested proxy=%r, max_workers=%r, "
|
||||
"but running process will continue with proxy=%r, max_workers=%r",
|
||||
effective_proxy,
|
||||
effective_max_workers,
|
||||
self._proxy,
|
||||
self._max_workers,
|
||||
)
|
||||
return
|
||||
|
||||
self._rpc_port = get_free_port()
|
||||
self._rpc_secret = get_random_bytes(16).hex()
|
||||
self._rpc_uri = f"http://127.0.0.1:{self._rpc_port}/jsonrpc"
|
||||
|
||||
self._max_concurrent_downloads = int(config.aria2c.get("max_concurrent_downloads", max_workers))
|
||||
self._max_workers = effective_max_workers
|
||||
self._max_concurrent_downloads = int(
|
||||
config.aria2c.get("max_concurrent_downloads", effective_max_workers)
|
||||
)
|
||||
self._max_connection_per_server = int(config.aria2c.get("max_connection_per_server", 1))
|
||||
self._split_default = int(config.aria2c.get("split", 5))
|
||||
self._file_allocation = config.aria2c.get("file_allocation", "prealloc")
|
||||
self._proxy = proxy or None
|
||||
self._proxy = effective_proxy
|
||||
|
||||
args = self._build_args()
|
||||
self._proc = subprocess.Popen(
|
||||
[binaries.Aria2, *args], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL
|
||||
)
|
||||
# Give aria2c a moment to start up and bind to the RPC port
|
||||
time.sleep(0.5)
|
||||
self._wait_for_rpc_ready()
|
||||
|
||||
@property
|
||||
def rpc_uri(self) -> str:
|
||||
|
||||
@@ -192,8 +192,10 @@ def build_download_args(
|
||||
if ad_keyword:
|
||||
args["--ad-keyword"] = ad_keyword
|
||||
|
||||
key_args = []
|
||||
if content_keys:
|
||||
args["--key"] = next((f"{kid.hex}:{key.lower()}" for kid, key in content_keys.items()), None)
|
||||
for kid, key in content_keys.items():
|
||||
key_args.extend(["--key", f"{kid.hex}:{key.lower()}"])
|
||||
|
||||
decryption_config = config.decryption.lower()
|
||||
engine_name = DECRYPTION_ENGINE.get(decryption_config) or "SHAKA_PACKAGER"
|
||||
@@ -221,6 +223,9 @@ def build_download_args(
|
||||
elif value is not False and value is not None:
|
||||
command.extend([flag, str(value)])
|
||||
|
||||
# Append all content keys (multiple --key flags supported by N_m3u8DL-RE)
|
||||
command.extend(key_args)
|
||||
|
||||
if headers:
|
||||
for key, value in headers.items():
|
||||
if key.lower() not in ("accept-encoding", "cookie"):
|
||||
|
||||
@@ -260,11 +260,18 @@ def requests(
|
||||
},
|
||||
)
|
||||
|
||||
yield dict(total=len(urls))
|
||||
# If we're downloading more than one URL, treat them as "segments" for progress purposes.
|
||||
# For single-URL downloads we want per-chunk progress (and the inner `download()` will yield
|
||||
# a chunk-based `total`), so don't set a segment total of 1 here.
|
||||
segmented_batch = len(urls) > 1
|
||||
if segmented_batch:
|
||||
yield dict(total=len(urls))
|
||||
|
||||
try:
|
||||
with ThreadPoolExecutor(max_workers=max_workers) as pool:
|
||||
for future in as_completed(pool.submit(download, session=session, segmented=True, **url) for url in urls):
|
||||
for future in as_completed(
|
||||
pool.submit(download, session=session, segmented=segmented_batch, **url) for url in urls
|
||||
):
|
||||
try:
|
||||
yield from future.result()
|
||||
except KeyboardInterrupt:
|
||||
|
||||
@@ -7,6 +7,7 @@ segment decryption (ML-Worker binary + AES-ECB).
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
@@ -17,6 +18,8 @@ from uuid import UUID
|
||||
from Cryptodome.Cipher import AES
|
||||
from Cryptodome.Util.Padding import unpad
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MonaLisa:
|
||||
"""
|
||||
@@ -142,7 +145,16 @@ class MonaLisa:
|
||||
The raw PSSH value as a base64 string.
|
||||
"""
|
||||
if isinstance(self._ticket, bytes):
|
||||
return self._ticket.decode("utf-8")
|
||||
try:
|
||||
return self._ticket.decode("utf-8")
|
||||
except UnicodeDecodeError:
|
||||
# Tickets are typically base64, so ASCII is a reasonable fallback.
|
||||
try:
|
||||
return self._ticket.decode("ascii")
|
||||
except UnicodeDecodeError as e:
|
||||
raise ValueError(
|
||||
f"Ticket bytes must be UTF-8 text or ASCII base64; got undecodable bytes (len={len(self._ticket)})"
|
||||
) from e
|
||||
return self._ticket
|
||||
|
||||
@property
|
||||
@@ -222,19 +234,21 @@ class MonaLisa:
|
||||
raise MonaLisa.Exceptions.DecryptionFailed(f"Segment file does not exist: {segment_path}")
|
||||
|
||||
# Stage 1: ML-Worker decryption
|
||||
cmd = [str(worker_path), self._key, str(bbts_path), str(ents_path)]
|
||||
cmd = [str(worker_path), str(self._key), str(bbts_path), str(ents_path)]
|
||||
|
||||
startupinfo = None
|
||||
if sys.platform == "win32":
|
||||
startupinfo = subprocess.STARTUPINFO()
|
||||
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
|
||||
|
||||
worker_timeout_s = 60
|
||||
process = subprocess.run(
|
||||
cmd,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
text=True,
|
||||
startupinfo=startupinfo,
|
||||
timeout=worker_timeout_s,
|
||||
)
|
||||
|
||||
if process.returncode != 0:
|
||||
@@ -260,6 +274,11 @@ class MonaLisa:
|
||||
|
||||
except MonaLisa.Exceptions.DecryptionFailed:
|
||||
raise
|
||||
except subprocess.TimeoutExpired as e:
|
||||
log.error("ML-Worker timed out after %ss for %s", worker_timeout_s, segment_path.name)
|
||||
raise MonaLisa.Exceptions.DecryptionFailed(
|
||||
f"ML-Worker timed out after {worker_timeout_s}s for {segment_path.name}"
|
||||
) from e
|
||||
except Exception as e:
|
||||
raise MonaLisa.Exceptions.DecryptionFailed(f"Failed to decrypt segment {segment_path.name}: {e}")
|
||||
finally:
|
||||
|
||||
@@ -19,12 +19,12 @@ import requests
|
||||
from curl_cffi.requests import Session as CurlSession
|
||||
from langcodes import Language, tag_is_valid
|
||||
from lxml.etree import Element, ElementTree
|
||||
from pyplayready.cdm import Cdm as PlayReadyCdm
|
||||
from pyplayready.system.pssh import PSSH as PR_PSSH
|
||||
from pywidevine.cdm import Cdm as WidevineCdm
|
||||
from pywidevine.pssh import PSSH
|
||||
from requests import Session
|
||||
|
||||
from unshackle.core.cdm.detect import is_playready_cdm
|
||||
from unshackle.core.constants import DOWNLOAD_CANCELLED, DOWNLOAD_LICENCE_ONLY, AnyTrack
|
||||
from unshackle.core.downloaders import requests as requests_downloader
|
||||
from unshackle.core.drm import DRM_T, PlayReady, Widevine
|
||||
@@ -477,7 +477,7 @@ class DASH:
|
||||
track.data["dash"]["segment_durations"] = segment_durations
|
||||
|
||||
if not track.drm and init_data and isinstance(track, (Video, Audio)):
|
||||
prefers_playready = isinstance(cdm, PlayReadyCdm) or (hasattr(cdm, "is_playready") and cdm.is_playready)
|
||||
prefers_playready = is_playready_cdm(cdm)
|
||||
if prefers_playready:
|
||||
try:
|
||||
track.drm = [PlayReady.from_init_data(init_data)]
|
||||
|
||||
@@ -28,6 +28,7 @@ from pywidevine.pssh import PSSH as WV_PSSH
|
||||
from requests import Session
|
||||
|
||||
from unshackle.core import binaries
|
||||
from unshackle.core.cdm.detect import is_playready_cdm, is_widevine_cdm
|
||||
from unshackle.core.constants import DOWNLOAD_CANCELLED, DOWNLOAD_LICENCE_ONLY, AnyTrack
|
||||
from unshackle.core.downloaders import requests as requests_downloader
|
||||
from unshackle.core.drm import DRM_T, ClearKey, MonaLisa, PlayReady, Widevine
|
||||
@@ -115,9 +116,14 @@ class HLS:
|
||||
|
||||
for playlist in self.manifest.playlists:
|
||||
audio_group = playlist.stream_info.audio
|
||||
if audio_group:
|
||||
audio_codec = Audio.Codec.from_codecs(playlist.stream_info.codecs)
|
||||
audio_codecs_by_group_id[audio_group] = audio_codec
|
||||
audio_codec: Optional[Audio.Codec] = None
|
||||
if audio_group and playlist.stream_info.codecs:
|
||||
try:
|
||||
audio_codec = Audio.Codec.from_codecs(playlist.stream_info.codecs)
|
||||
except ValueError:
|
||||
audio_codec = None
|
||||
if audio_codec:
|
||||
audio_codecs_by_group_id[audio_group] = audio_codec
|
||||
|
||||
try:
|
||||
# TODO: Any better way to figure out the primary track type?
|
||||
@@ -225,6 +231,39 @@ class HLS:
|
||||
|
||||
return tracks
|
||||
|
||||
@staticmethod
|
||||
def _finalize_n_m3u8dl_re_output(*, track: AnyTrack, save_dir: Path, save_path: Path) -> Path:
|
||||
"""
|
||||
Finalize output from N_m3u8DL-RE.
|
||||
|
||||
We call N_m3u8DL-RE with `--save-name track.id`, so the final file should be `{track.id}.*` under `save_dir`.
|
||||
This moves that output to `save_path` (preserving the real suffix) and, for subtitles, updates `track.codec`
|
||||
to match the produced file extension.
|
||||
"""
|
||||
matches = [p for p in save_dir.rglob(f"{track.id}.*") if p.is_file()]
|
||||
if not matches:
|
||||
raise FileNotFoundError(f"No output files produced by N_m3u8DL-RE for save-name={track.id} in: {save_dir}")
|
||||
|
||||
primary = max(matches, key=lambda p: p.stat().st_size)
|
||||
|
||||
final_save_path = save_path.with_suffix(primary.suffix) if primary.suffix else save_path
|
||||
|
||||
final_save_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
if primary.absolute() != final_save_path.absolute():
|
||||
final_save_path.unlink(missing_ok=True)
|
||||
shutil.move(str(primary), str(final_save_path))
|
||||
|
||||
if isinstance(track, Subtitle):
|
||||
ext = final_save_path.suffix.lower().lstrip(".")
|
||||
try:
|
||||
track.codec = Subtitle.Codec.from_mime(ext)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
shutil.rmtree(save_dir, ignore_errors=True)
|
||||
|
||||
return final_save_path
|
||||
|
||||
@staticmethod
|
||||
def download_track(
|
||||
track: AnyTrack,
|
||||
@@ -255,7 +294,7 @@ class HLS:
|
||||
else:
|
||||
# Get the playlist text and handle both session types
|
||||
response = session.get(track.url)
|
||||
if isinstance(response, requests.Response):
|
||||
if isinstance(response, requests.Response) or isinstance(response, CurlResponse):
|
||||
if not response.ok:
|
||||
log.error(f"Failed to request the invariant M3U8 playlist: {response.status_code}")
|
||||
sys.exit(1)
|
||||
@@ -317,8 +356,16 @@ class HLS:
|
||||
raise
|
||||
|
||||
if not initial_drm_licensed and session_drm and isinstance(session_drm, MonaLisa):
|
||||
if license_widevine:
|
||||
try:
|
||||
if not license_widevine:
|
||||
raise ValueError("license_widevine func must be supplied to use DRM")
|
||||
progress(downloaded="LICENSING")
|
||||
license_widevine(session_drm)
|
||||
progress(downloaded="[yellow]LICENSED")
|
||||
except Exception: # noqa
|
||||
DOWNLOAD_CANCELLED.set() # skip pending track downloads
|
||||
progress(downloaded="[red]FAILED")
|
||||
raise
|
||||
|
||||
if DOWNLOAD_LICENCE_ONLY.is_set():
|
||||
progress(downloaded="[yellow]SKIPPED")
|
||||
@@ -420,222 +467,228 @@ class HLS:
|
||||
for control_file in segment_save_dir.glob("*.aria2__temp"):
|
||||
control_file.unlink()
|
||||
|
||||
if not skip_merge:
|
||||
progress(total=total_segments, completed=0, downloaded="Merging")
|
||||
if skip_merge:
|
||||
final_save_path = HLS._finalize_n_m3u8dl_re_output(track=track, save_dir=save_dir, save_path=save_path)
|
||||
progress(downloaded="Downloaded")
|
||||
track.path = final_save_path
|
||||
events.emit(events.Types.TRACK_DOWNLOADED, track=track)
|
||||
return
|
||||
|
||||
name_len = len(str(total_segments))
|
||||
discon_i = 0
|
||||
range_offset = 0
|
||||
map_data: Optional[tuple[m3u8.model.InitializationSection, bytes]] = None
|
||||
if session_drm:
|
||||
encryption_data: Optional[tuple[Optional[m3u8.Key], DRM_T]] = (initial_drm_key, session_drm)
|
||||
else:
|
||||
encryption_data: Optional[tuple[Optional[m3u8.Key], DRM_T]] = None
|
||||
progress(total=total_segments, completed=0, downloaded="Merging")
|
||||
|
||||
i = -1
|
||||
for real_i, segment in enumerate(master.segments):
|
||||
if segment not in unwanted_segments:
|
||||
i += 1
|
||||
name_len = len(str(total_segments))
|
||||
discon_i = 0
|
||||
range_offset = 0
|
||||
map_data: Optional[tuple[m3u8.model.InitializationSection, bytes]] = None
|
||||
if session_drm:
|
||||
encryption_data: Optional[tuple[Optional[m3u8.Key], DRM_T]] = (initial_drm_key, session_drm)
|
||||
else:
|
||||
encryption_data: Optional[tuple[Optional[m3u8.Key], DRM_T]] = None
|
||||
|
||||
is_last_segment = (real_i + 1) == len(master.segments)
|
||||
i = -1
|
||||
for real_i, segment in enumerate(master.segments):
|
||||
if segment not in unwanted_segments:
|
||||
i += 1
|
||||
|
||||
def merge(to: Path, via: list[Path], delete: bool = False, include_map_data: bool = False):
|
||||
"""
|
||||
Merge all files to a given path, optionally including map data.
|
||||
is_last_segment = (real_i + 1) == len(master.segments)
|
||||
|
||||
Parameters:
|
||||
to: The output file with all merged data.
|
||||
via: List of files to merge, in sequence.
|
||||
delete: Delete the file once it's been merged.
|
||||
include_map_data: Whether to include the init map data.
|
||||
"""
|
||||
with open(to, "wb") as x:
|
||||
if include_map_data and map_data and map_data[1]:
|
||||
x.write(map_data[1])
|
||||
for file in via:
|
||||
x.write(file.read_bytes())
|
||||
x.flush()
|
||||
if delete:
|
||||
file.unlink()
|
||||
def merge(to: Path, via: list[Path], delete: bool = False, include_map_data: bool = False):
|
||||
"""
|
||||
Merge all files to a given path, optionally including map data.
|
||||
|
||||
def decrypt(include_this_segment: bool) -> Path:
|
||||
"""
|
||||
Decrypt all segments that uses the currently set DRM.
|
||||
Parameters:
|
||||
to: The output file with all merged data.
|
||||
via: List of files to merge, in sequence.
|
||||
delete: Delete the file once it's been merged.
|
||||
include_map_data: Whether to include the init map data.
|
||||
"""
|
||||
with open(to, "wb") as x:
|
||||
if include_map_data and map_data and map_data[1]:
|
||||
x.write(map_data[1])
|
||||
for file in via:
|
||||
x.write(file.read_bytes())
|
||||
x.flush()
|
||||
if delete:
|
||||
file.unlink()
|
||||
|
||||
All segments that will be decrypted with this DRM will be merged together
|
||||
in sequence, prefixed with the init data (if any), and then deleted. Once
|
||||
merged they will be decrypted. The merged and decrypted file names state
|
||||
the range of segments that were used.
|
||||
def decrypt(include_this_segment: bool) -> Path:
|
||||
"""
|
||||
Decrypt all segments that uses the currently set DRM.
|
||||
|
||||
Parameters:
|
||||
include_this_segment: Whether to include the current segment in the
|
||||
list of segments to merge and decrypt. This should be False if
|
||||
decrypting on EXT-X-KEY changes, or True when decrypting on the
|
||||
last segment.
|
||||
All segments that will be decrypted with this DRM will be merged together
|
||||
in sequence, prefixed with the init data (if any), and then deleted. Once
|
||||
merged they will be decrypted. The merged and decrypted file names state
|
||||
the range of segments that were used.
|
||||
|
||||
Returns the decrypted path.
|
||||
"""
|
||||
drm = encryption_data[1]
|
||||
first_segment_i = next(
|
||||
int(file.stem) for file in sorted(segment_save_dir.iterdir()) if file.stem.isdigit()
|
||||
)
|
||||
last_segment_i = max(0, i - int(not include_this_segment))
|
||||
range_len = (last_segment_i - first_segment_i) + 1
|
||||
Parameters:
|
||||
include_this_segment: Whether to include the current segment in the
|
||||
list of segments to merge and decrypt. This should be False if
|
||||
decrypting on EXT-X-KEY changes, or True when decrypting on the
|
||||
last segment.
|
||||
|
||||
segment_range = f"{str(first_segment_i).zfill(name_len)}-{str(last_segment_i).zfill(name_len)}"
|
||||
merged_path = (
|
||||
segment_save_dir / f"{segment_range}{get_extension(master.segments[last_segment_i].uri)}"
|
||||
)
|
||||
decrypted_path = segment_save_dir / f"{merged_path.stem}_decrypted{merged_path.suffix}"
|
||||
Returns the decrypted path.
|
||||
"""
|
||||
drm = encryption_data[1]
|
||||
first_segment_i = next(
|
||||
int(file.stem) for file in sorted(segment_save_dir.iterdir()) if file.stem.isdigit()
|
||||
)
|
||||
last_segment_i = max(0, i - int(not include_this_segment))
|
||||
range_len = (last_segment_i - first_segment_i) + 1
|
||||
|
||||
files = [
|
||||
file
|
||||
for file in sorted(segment_save_dir.iterdir())
|
||||
if file.stem.isdigit() and first_segment_i <= int(file.stem) <= last_segment_i
|
||||
]
|
||||
if not files:
|
||||
raise ValueError(f"None of the segment files for {segment_range} exist...")
|
||||
elif len(files) != range_len:
|
||||
raise ValueError(f"Missing {range_len - len(files)} segment files for {segment_range}...")
|
||||
segment_range = f"{str(first_segment_i).zfill(name_len)}-{str(last_segment_i).zfill(name_len)}"
|
||||
merged_path = segment_save_dir / f"{segment_range}{get_extension(master.segments[last_segment_i].uri)}"
|
||||
decrypted_path = segment_save_dir / f"{merged_path.stem}_decrypted{merged_path.suffix}"
|
||||
|
||||
if isinstance(drm, (Widevine, PlayReady)):
|
||||
# with widevine we can merge all segments and decrypt once
|
||||
merge(to=merged_path, via=files, delete=True, include_map_data=True)
|
||||
drm.decrypt(merged_path)
|
||||
merged_path.rename(decrypted_path)
|
||||
else:
|
||||
# with other drm we must decrypt separately and then merge them
|
||||
# for aes this is because each segment likely has 16-byte padding
|
||||
for file in files:
|
||||
drm.decrypt(file)
|
||||
merge(to=merged_path, via=files, delete=True, include_map_data=True)
|
||||
files = [
|
||||
file
|
||||
for file in sorted(segment_save_dir.iterdir())
|
||||
if file.stem.isdigit() and first_segment_i <= int(file.stem) <= last_segment_i
|
||||
]
|
||||
if not files:
|
||||
raise ValueError(f"None of the segment files for {segment_range} exist...")
|
||||
elif len(files) != range_len:
|
||||
raise ValueError(f"Missing {range_len - len(files)} segment files for {segment_range}...")
|
||||
|
||||
events.emit(events.Types.TRACK_DECRYPTED, track=track, drm=drm, segment=decrypted_path)
|
||||
if isinstance(drm, (Widevine, PlayReady)):
|
||||
# with widevine we can merge all segments and decrypt once
|
||||
merge(to=merged_path, via=files, delete=True, include_map_data=True)
|
||||
drm.decrypt(merged_path)
|
||||
merged_path.rename(decrypted_path)
|
||||
else:
|
||||
# with other drm we must decrypt separately and then merge them
|
||||
# for aes this is because each segment likely has 16-byte padding
|
||||
for file in files:
|
||||
drm.decrypt(file)
|
||||
merge(to=merged_path, via=files, delete=True, include_map_data=True)
|
||||
|
||||
return decrypted_path
|
||||
events.emit(events.Types.TRACK_DECRYPTED, track=track, drm=drm, segment=decrypted_path)
|
||||
|
||||
def merge_discontinuity(include_this_segment: bool, include_map_data: bool = True):
|
||||
"""
|
||||
Merge all segments of the discontinuity.
|
||||
return decrypted_path
|
||||
|
||||
All segment files for this discontinuity must already be downloaded and
|
||||
already decrypted (if it needs to be decrypted).
|
||||
def merge_discontinuity(include_this_segment: bool, include_map_data: bool = True):
|
||||
"""
|
||||
Merge all segments of the discontinuity.
|
||||
|
||||
Parameters:
|
||||
include_this_segment: Whether to include the current segment in the
|
||||
list of segments to merge and decrypt. This should be False if
|
||||
decrypting on EXT-X-KEY changes, or True when decrypting on the
|
||||
last segment.
|
||||
include_map_data: Whether to prepend the init map data before the
|
||||
segment files when merging.
|
||||
"""
|
||||
last_segment_i = max(0, i - int(not include_this_segment))
|
||||
All segment files for this discontinuity must already be downloaded and
|
||||
already decrypted (if it needs to be decrypted).
|
||||
|
||||
files = [
|
||||
file
|
||||
for file in sorted(segment_save_dir.iterdir())
|
||||
if int(file.stem.replace("_decrypted", "").split("-")[-1]) <= last_segment_i
|
||||
]
|
||||
if files:
|
||||
to_dir = segment_save_dir.parent
|
||||
to_path = to_dir / f"{str(discon_i).zfill(name_len)}{files[-1].suffix}"
|
||||
merge(to=to_path, via=files, delete=True, include_map_data=include_map_data)
|
||||
Parameters:
|
||||
include_this_segment: Whether to include the current segment in the
|
||||
list of segments to merge and decrypt. This should be False if
|
||||
decrypting on EXT-X-KEY changes, or True when decrypting on the
|
||||
last segment.
|
||||
include_map_data: Whether to prepend the init map data before the
|
||||
segment files when merging.
|
||||
"""
|
||||
last_segment_i = max(0, i - int(not include_this_segment))
|
||||
|
||||
if segment not in unwanted_segments:
|
||||
if isinstance(track, Subtitle):
|
||||
segment_file_ext = get_extension(segment.uri)
|
||||
segment_file_path = segment_save_dir / f"{str(i).zfill(name_len)}{segment_file_ext}"
|
||||
segment_data = try_ensure_utf8(segment_file_path.read_bytes())
|
||||
if track.codec not in (Subtitle.Codec.fVTT, Subtitle.Codec.fTTML):
|
||||
segment_data = (
|
||||
segment_data.decode("utf8")
|
||||
.replace("‎", html.unescape("‎"))
|
||||
.replace("‏", html.unescape("‏"))
|
||||
.encode("utf8")
|
||||
)
|
||||
segment_file_path.write_bytes(segment_data)
|
||||
files = [
|
||||
file
|
||||
for file in sorted(segment_save_dir.iterdir())
|
||||
if int(file.stem.replace("_decrypted", "").split("-")[-1]) <= last_segment_i
|
||||
]
|
||||
if files:
|
||||
to_dir = segment_save_dir.parent
|
||||
to_path = to_dir / f"{str(discon_i).zfill(name_len)}{files[-1].suffix}"
|
||||
merge(to=to_path, via=files, delete=True, include_map_data=include_map_data)
|
||||
|
||||
if segment.discontinuity and i != 0:
|
||||
if encryption_data:
|
||||
decrypt(include_this_segment=False)
|
||||
merge_discontinuity(
|
||||
include_this_segment=False, include_map_data=not encryption_data or not encryption_data[1]
|
||||
if segment not in unwanted_segments:
|
||||
if isinstance(track, Subtitle):
|
||||
segment_file_ext = get_extension(segment.uri)
|
||||
segment_file_path = segment_save_dir / f"{str(i).zfill(name_len)}{segment_file_ext}"
|
||||
segment_data = try_ensure_utf8(segment_file_path.read_bytes())
|
||||
if track.codec not in (Subtitle.Codec.fVTT, Subtitle.Codec.fTTML):
|
||||
segment_data = (
|
||||
segment_data.decode("utf8")
|
||||
.replace("‎", html.unescape("‎"))
|
||||
.replace("‏", html.unescape("‏"))
|
||||
.encode("utf8")
|
||||
)
|
||||
segment_file_path.write_bytes(segment_data)
|
||||
|
||||
discon_i += 1
|
||||
range_offset = 0 # TODO: Should this be reset or not?
|
||||
map_data = None
|
||||
if encryption_data:
|
||||
encryption_data = (encryption_data[0], encryption_data[1])
|
||||
|
||||
if segment.init_section and (not map_data or segment.init_section != map_data[0]):
|
||||
if segment.init_section.byterange:
|
||||
init_byte_range = HLS.calculate_byte_range(segment.init_section.byterange, range_offset)
|
||||
range_offset = init_byte_range.split("-")[0]
|
||||
init_range_header = {"Range": f"bytes={init_byte_range}"}
|
||||
else:
|
||||
init_range_header = {}
|
||||
|
||||
# Handle both session types for init section request
|
||||
res = session.get(
|
||||
url=urljoin(segment.init_section.base_uri, segment.init_section.uri),
|
||||
headers=init_range_header,
|
||||
)
|
||||
|
||||
# Check response based on session type
|
||||
if isinstance(res, requests.Response):
|
||||
res.raise_for_status()
|
||||
init_content = res.content
|
||||
else:
|
||||
raise TypeError(
|
||||
f"Expected response to be requests.Response or curl_cffi.Response, not {type(res)}"
|
||||
)
|
||||
|
||||
map_data = (segment.init_section, init_content)
|
||||
|
||||
segment_keys = getattr(segment, "keys", None)
|
||||
if segment_keys:
|
||||
if cdm:
|
||||
cdm_segment_keys = HLS.filter_keys_for_cdm(segment_keys, cdm)
|
||||
key = HLS.get_supported_key(cdm_segment_keys) if cdm_segment_keys else HLS.get_supported_key(segment_keys)
|
||||
else:
|
||||
key = HLS.get_supported_key(segment_keys)
|
||||
if encryption_data and encryption_data[0] != key and i != 0 and segment not in unwanted_segments:
|
||||
decrypt(include_this_segment=False)
|
||||
|
||||
if key is None:
|
||||
encryption_data = None
|
||||
elif not encryption_data or encryption_data[0] != key:
|
||||
drm = HLS.get_drm(key, session)
|
||||
if isinstance(drm, (Widevine, PlayReady)):
|
||||
try:
|
||||
if map_data:
|
||||
track_kid = track.get_key_id(map_data[1])
|
||||
else:
|
||||
track_kid = None
|
||||
if not track_kid:
|
||||
track_kid = drm.kid
|
||||
progress(downloaded="LICENSING")
|
||||
license_widevine(drm, track_kid=track_kid)
|
||||
progress(downloaded="[yellow]LICENSED")
|
||||
except Exception: # noqa
|
||||
DOWNLOAD_CANCELLED.set() # skip pending track downloads
|
||||
progress(downloaded="[red]FAILED")
|
||||
raise
|
||||
encryption_data = (key, drm)
|
||||
|
||||
if DOWNLOAD_LICENCE_ONLY.is_set():
|
||||
continue
|
||||
|
||||
if is_last_segment:
|
||||
# required as it won't end with EXT-X-DISCONTINUITY nor a new key
|
||||
if segment.discontinuity and i != 0:
|
||||
if encryption_data:
|
||||
decrypt(include_this_segment=True)
|
||||
decrypt(include_this_segment=False)
|
||||
merge_discontinuity(
|
||||
include_this_segment=True, include_map_data=not encryption_data or not encryption_data[1]
|
||||
include_this_segment=False, include_map_data=not encryption_data or not encryption_data[1]
|
||||
)
|
||||
|
||||
progress(advance=1)
|
||||
discon_i += 1
|
||||
range_offset = 0 # TODO: Should this be reset or not?
|
||||
map_data = None
|
||||
|
||||
if segment.init_section and (not map_data or segment.init_section != map_data[0]):
|
||||
if segment.init_section.byterange:
|
||||
init_byte_range = HLS.calculate_byte_range(segment.init_section.byterange, range_offset)
|
||||
range_offset = int(init_byte_range.split("-")[0])
|
||||
init_range_header = {"Range": f"bytes={init_byte_range}"}
|
||||
else:
|
||||
init_range_header = {}
|
||||
|
||||
# Handle both session types for init section request
|
||||
res = session.get(
|
||||
url=urljoin(segment.init_section.base_uri, segment.init_section.uri),
|
||||
headers=init_range_header,
|
||||
)
|
||||
|
||||
# Check response based on session type
|
||||
if isinstance(res, requests.Response) or isinstance(res, CurlResponse):
|
||||
res.raise_for_status()
|
||||
init_content = res.content
|
||||
else:
|
||||
raise TypeError(
|
||||
f"Expected response to be requests.Response or curl_cffi.Response, not {type(res)}"
|
||||
)
|
||||
|
||||
map_data = (segment.init_section, init_content)
|
||||
|
||||
segment_keys = getattr(segment, "keys", None)
|
||||
if segment_keys:
|
||||
if cdm:
|
||||
cdm_segment_keys = HLS.filter_keys_for_cdm(segment_keys, cdm)
|
||||
key = (
|
||||
HLS.get_supported_key(cdm_segment_keys)
|
||||
if cdm_segment_keys
|
||||
else HLS.get_supported_key(segment_keys)
|
||||
)
|
||||
else:
|
||||
key = HLS.get_supported_key(segment_keys)
|
||||
if encryption_data and encryption_data[0] != key and i != 0 and segment not in unwanted_segments:
|
||||
decrypt(include_this_segment=False)
|
||||
|
||||
if key is None:
|
||||
encryption_data = None
|
||||
elif not encryption_data or encryption_data[0] != key:
|
||||
drm = HLS.get_drm(key, session)
|
||||
if isinstance(drm, (Widevine, PlayReady)):
|
||||
try:
|
||||
if map_data:
|
||||
track_kid = track.get_key_id(map_data[1])
|
||||
else:
|
||||
track_kid = None
|
||||
if not track_kid:
|
||||
track_kid = drm.kid
|
||||
progress(downloaded="LICENSING")
|
||||
license_widevine(drm, track_kid=track_kid)
|
||||
progress(downloaded="[yellow]LICENSED")
|
||||
except Exception: # noqa
|
||||
DOWNLOAD_CANCELLED.set() # skip pending track downloads
|
||||
progress(downloaded="[red]FAILED")
|
||||
raise
|
||||
encryption_data = (key, drm)
|
||||
|
||||
if DOWNLOAD_LICENCE_ONLY.is_set():
|
||||
continue
|
||||
|
||||
if is_last_segment:
|
||||
# required as it won't end with EXT-X-DISCONTINUITY nor a new key
|
||||
if encryption_data:
|
||||
decrypt(include_this_segment=True)
|
||||
merge_discontinuity(
|
||||
include_this_segment=True, include_map_data=not encryption_data or not encryption_data[1]
|
||||
)
|
||||
|
||||
progress(advance=1)
|
||||
|
||||
if DOWNLOAD_LICENCE_ONLY.is_set():
|
||||
return
|
||||
@@ -865,15 +918,10 @@ class HLS:
|
||||
"""
|
||||
playready_urn = f"urn:uuid:{PR_PSSH.SYSTEM_ID}"
|
||||
playready_keyformats = {playready_urn, "com.microsoft.playready"}
|
||||
if isinstance(cdm, WidevineCdm):
|
||||
if is_widevine_cdm(cdm):
|
||||
return [k for k in keys if k.keyformat and k.keyformat.lower() == WidevineCdm.urn]
|
||||
elif isinstance(cdm, PlayReadyCdm):
|
||||
elif is_playready_cdm(cdm):
|
||||
return [k for k in keys if k.keyformat and k.keyformat.lower() in playready_keyformats]
|
||||
elif hasattr(cdm, "is_playready"):
|
||||
if cdm.is_playready:
|
||||
return [k for k in keys if k.keyformat and k.keyformat.lower() in playready_keyformats]
|
||||
else:
|
||||
return [k for k in keys if k.keyformat and k.keyformat.lower() == WidevineCdm.urn]
|
||||
return keys
|
||||
|
||||
@staticmethod
|
||||
|
||||
@@ -2,7 +2,9 @@ import atexit
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import stat
|
||||
import subprocess
|
||||
import tempfile
|
||||
import threading
|
||||
import time
|
||||
from typing import Optional
|
||||
@@ -750,7 +752,8 @@ class Gluetun(Proxy):
|
||||
|
||||
# Debug log environment variables (redact sensitive values)
|
||||
if debug_logger:
|
||||
safe_env = {k: ("***" if "KEY" in k or "PASSWORD" in k else v) for k, v in env_vars.items()}
|
||||
redact_markers = ("KEY", "PASSWORD", "PASS", "TOKEN", "SECRET", "USER")
|
||||
safe_env = {k: ("***" if any(m in k for m in redact_markers) else v) for k, v in env_vars.items()}
|
||||
debug_logger.log(
|
||||
level="DEBUG",
|
||||
operation="gluetun_env_vars",
|
||||
@@ -771,23 +774,62 @@ class Gluetun(Proxy):
|
||||
f"127.0.0.1:{port}:8888/tcp",
|
||||
]
|
||||
|
||||
# Add environment variables
|
||||
for key, value in env_vars.items():
|
||||
cmd.extend(["-e", f"{key}={value}"])
|
||||
|
||||
# Add Gluetun image
|
||||
cmd.append("qmcgaw/gluetun:latest")
|
||||
|
||||
# Execute docker run
|
||||
# Avoid exposing credentials in process listings by using --env-file instead of many "-e KEY=VALUE".
|
||||
env_file_path: str | None = None
|
||||
try:
|
||||
result = subprocess.run(
|
||||
cmd,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=30,
|
||||
encoding="utf-8",
|
||||
errors="replace",
|
||||
)
|
||||
fd, env_file_path = tempfile.mkstemp(prefix=f"unshackle-{container_name}-", suffix=".env")
|
||||
try:
|
||||
# Best-effort restrictive permissions.
|
||||
if os.name != "nt":
|
||||
if hasattr(os, "fchmod"):
|
||||
os.fchmod(fd, 0o600)
|
||||
else:
|
||||
os.chmod(env_file_path, 0o600)
|
||||
else:
|
||||
os.chmod(env_file_path, stat.S_IREAD | stat.S_IWRITE)
|
||||
|
||||
with os.fdopen(fd, "w", encoding="utf-8", newline="\n") as f:
|
||||
for key, value in env_vars.items():
|
||||
if "=" in key:
|
||||
raise ValueError(f"Invalid env var name for docker env-file: {key!r}")
|
||||
v = "" if value is None else str(value)
|
||||
if "\n" in v or "\r" in v:
|
||||
raise ValueError(f"Invalid env var value (contains newline) for {key!r}")
|
||||
f.write(f"{key}={v}\n")
|
||||
except Exception:
|
||||
# If we fail before fdopen closes the descriptor, make sure it's not leaked.
|
||||
try:
|
||||
os.close(fd)
|
||||
except Exception:
|
||||
pass
|
||||
raise
|
||||
|
||||
cmd.extend(["--env-file", env_file_path])
|
||||
|
||||
# Add Gluetun image
|
||||
cmd.append(gluetun_image)
|
||||
|
||||
# Execute docker run
|
||||
try:
|
||||
result = subprocess.run(
|
||||
cmd,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=30,
|
||||
encoding="utf-8",
|
||||
errors="replace",
|
||||
)
|
||||
except subprocess.TimeoutExpired:
|
||||
if debug_logger:
|
||||
debug_logger.log(
|
||||
level="ERROR",
|
||||
operation="gluetun_container_create_timeout",
|
||||
message=f"Docker run timed out for {container_name}",
|
||||
context={"container_name": container_name},
|
||||
success=False,
|
||||
duration_ms=(time.time() - start_time) * 1000,
|
||||
)
|
||||
raise RuntimeError("Docker run command timed out")
|
||||
|
||||
if result.returncode != 0:
|
||||
error_msg = result.stderr or "unknown error"
|
||||
@@ -826,29 +868,51 @@ class Gluetun(Proxy):
|
||||
success=True,
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
except subprocess.TimeoutExpired:
|
||||
if debug_logger:
|
||||
debug_logger.log(
|
||||
level="ERROR",
|
||||
operation="gluetun_container_create_timeout",
|
||||
message=f"Docker run timed out for {container_name}",
|
||||
context={"container_name": container_name},
|
||||
success=False,
|
||||
duration_ms=(time.time() - start_time) * 1000,
|
||||
)
|
||||
raise RuntimeError("Docker run command timed out")
|
||||
finally:
|
||||
if env_file_path:
|
||||
# Best-effort "secure delete": overwrite then unlink (not guaranteed on all filesystems).
|
||||
try:
|
||||
with open(env_file_path, "r+b") as f:
|
||||
try:
|
||||
f.seek(0, os.SEEK_END)
|
||||
length = f.tell()
|
||||
f.seek(0)
|
||||
if length > 0:
|
||||
f.write(b"\x00" * length)
|
||||
f.flush()
|
||||
os.fsync(f.fileno())
|
||||
except Exception:
|
||||
pass
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
os.remove(env_file_path)
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def _is_container_running(self, container_name: str) -> bool:
|
||||
"""Check if a Docker container is running."""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["docker", "ps", "--filter", f"name={container_name}", "--format", "{{.Names}}"],
|
||||
[
|
||||
"docker",
|
||||
"ps",
|
||||
"--filter",
|
||||
f"name=^{re.escape(container_name)}$",
|
||||
"--format",
|
||||
"{{.Names}}",
|
||||
],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=5,
|
||||
)
|
||||
return result.returncode == 0 and container_name in result.stdout
|
||||
if result.returncode != 0:
|
||||
return False
|
||||
|
||||
names = [line.strip() for line in (result.stdout or "").splitlines() if line.strip()]
|
||||
return any(name == container_name for name in names)
|
||||
except (subprocess.TimeoutExpired, FileNotFoundError):
|
||||
return False
|
||||
|
||||
@@ -1132,98 +1196,104 @@ class Gluetun(Proxy):
|
||||
|
||||
# Create a session with the proxy configured
|
||||
session = requests.Session()
|
||||
session.proxies = {"http": proxy_url, "https": proxy_url}
|
||||
try:
|
||||
session.proxies = {"http": proxy_url, "https": proxy_url}
|
||||
|
||||
# Retry with exponential backoff
|
||||
for attempt in range(max_retries):
|
||||
try:
|
||||
# Get external IP through the proxy using shared utility
|
||||
ip_info = get_ip_info(session)
|
||||
# Retry with exponential backoff
|
||||
for attempt in range(max_retries):
|
||||
try:
|
||||
# Get external IP through the proxy using shared utility
|
||||
ip_info = get_ip_info(session)
|
||||
|
||||
if ip_info:
|
||||
actual_country = ip_info.get("country", "").upper()
|
||||
if ip_info:
|
||||
actual_country = ip_info.get("country", "").upper()
|
||||
|
||||
# Check if country matches (if we have an expected country)
|
||||
# ipinfo.io returns country codes (CA), but we may have full names (Canada)
|
||||
# Normalize both to country codes for comparison using shared utility
|
||||
if expected_country:
|
||||
# Convert expected country name to code if it's a full name
|
||||
expected_code = get_country_code(expected_country) or expected_country
|
||||
expected_code = expected_code.upper()
|
||||
# Check if country matches (if we have an expected country)
|
||||
# ipinfo.io returns country codes (CA), but we may have full names (Canada)
|
||||
# Normalize both to country codes for comparison using shared utility
|
||||
if expected_country:
|
||||
# Convert expected country name to code if it's a full name
|
||||
expected_code = get_country_code(expected_country) or expected_country
|
||||
expected_code = expected_code.upper()
|
||||
|
||||
if actual_country != expected_code:
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
if debug_logger:
|
||||
debug_logger.log(
|
||||
level="ERROR",
|
||||
operation="gluetun_verify_mismatch",
|
||||
message=f"Region mismatch for {query_key}",
|
||||
context={
|
||||
"query_key": query_key,
|
||||
"expected_country": expected_code,
|
||||
"actual_country": actual_country,
|
||||
"ip": ip_info.get("ip"),
|
||||
"city": ip_info.get("city"),
|
||||
"org": ip_info.get("org"),
|
||||
},
|
||||
success=False,
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
raise RuntimeError(
|
||||
if actual_country != expected_code:
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
if debug_logger:
|
||||
debug_logger.log(
|
||||
level="ERROR",
|
||||
operation="gluetun_verify_mismatch",
|
||||
message=f"Region mismatch for {query_key}",
|
||||
context={
|
||||
"query_key": query_key,
|
||||
"expected_country": expected_code,
|
||||
"actual_country": actual_country,
|
||||
"ip": ip_info.get("ip"),
|
||||
"city": ip_info.get("city"),
|
||||
"org": ip_info.get("org"),
|
||||
},
|
||||
success=False,
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
raise RuntimeError(
|
||||
f"Region mismatch for {container['provider']}:{container['region']}: "
|
||||
f"Expected '{expected_code}' but got '{actual_country}' "
|
||||
f"(IP: {ip_info.get('ip')}, City: {ip_info.get('city')})"
|
||||
)
|
||||
|
||||
# Verification successful - store IP info in container record
|
||||
if query_key in self.active_containers:
|
||||
self.active_containers[query_key]["public_ip"] = ip_info.get("ip")
|
||||
self.active_containers[query_key]["ip_country"] = actual_country
|
||||
self.active_containers[query_key]["ip_city"] = ip_info.get("city")
|
||||
self.active_containers[query_key]["ip_org"] = ip_info.get("org")
|
||||
# Verification successful - store IP info in container record
|
||||
if query_key in self.active_containers:
|
||||
self.active_containers[query_key]["public_ip"] = ip_info.get("ip")
|
||||
self.active_containers[query_key]["ip_country"] = actual_country
|
||||
self.active_containers[query_key]["ip_city"] = ip_info.get("city")
|
||||
self.active_containers[query_key]["ip_org"] = ip_info.get("org")
|
||||
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
if debug_logger:
|
||||
debug_logger.log(
|
||||
level="INFO",
|
||||
operation="gluetun_verify_success",
|
||||
message=f"VPN IP verified for: {query_key}",
|
||||
context={
|
||||
"query_key": query_key,
|
||||
"ip": ip_info.get("ip"),
|
||||
"country": actual_country,
|
||||
"city": ip_info.get("city"),
|
||||
"org": ip_info.get("org"),
|
||||
"attempts": attempt + 1,
|
||||
},
|
||||
success=True,
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
return
|
||||
|
||||
# ip_info was None, retry
|
||||
last_error = "Failed to get IP info from ipinfo.io"
|
||||
|
||||
except RuntimeError:
|
||||
raise # Re-raise region mismatch errors immediately
|
||||
except Exception as e:
|
||||
last_error = str(e)
|
||||
if debug_logger:
|
||||
debug_logger.log(
|
||||
level="INFO",
|
||||
operation="gluetun_verify_success",
|
||||
message=f"VPN IP verified for: {query_key}",
|
||||
level="DEBUG",
|
||||
operation="gluetun_verify_retry",
|
||||
message=f"Verification attempt {attempt + 1} failed, retrying",
|
||||
context={
|
||||
"query_key": query_key,
|
||||
"ip": ip_info.get("ip"),
|
||||
"country": actual_country,
|
||||
"city": ip_info.get("city"),
|
||||
"org": ip_info.get("org"),
|
||||
"attempts": attempt + 1,
|
||||
"attempt": attempt + 1,
|
||||
"error": last_error,
|
||||
},
|
||||
success=True,
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
return
|
||||
|
||||
# ip_info was None, retry
|
||||
last_error = "Failed to get IP info from ipinfo.io"
|
||||
|
||||
except RuntimeError:
|
||||
raise # Re-raise region mismatch errors immediately
|
||||
except Exception as e:
|
||||
last_error = str(e)
|
||||
if debug_logger:
|
||||
debug_logger.log(
|
||||
level="DEBUG",
|
||||
operation="gluetun_verify_retry",
|
||||
message=f"Verification attempt {attempt + 1} failed, retrying",
|
||||
context={
|
||||
"query_key": query_key,
|
||||
"attempt": attempt + 1,
|
||||
"error": last_error,
|
||||
},
|
||||
)
|
||||
|
||||
# Wait before retry (exponential backoff)
|
||||
if attempt < max_retries - 1:
|
||||
wait_time = 2**attempt # 1, 2, 4 seconds
|
||||
time.sleep(wait_time)
|
||||
# Wait before retry (exponential backoff)
|
||||
if attempt < max_retries - 1:
|
||||
wait_time = 2**attempt # 1, 2, 4 seconds
|
||||
time.sleep(wait_time)
|
||||
finally:
|
||||
try:
|
||||
session.close()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# All retries exhausted
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
|
||||
@@ -142,12 +142,17 @@ class SurfsharkVPN(Proxy):
|
||||
)
|
||||
|
||||
# Get connection names from filtered servers
|
||||
connection_names = [x["connectionName"] for x in servers]
|
||||
if not servers:
|
||||
raise ValueError(f"Could not get random server for country '{country_id}': no servers found.")
|
||||
|
||||
try:
|
||||
return random.choice(connection_names)
|
||||
except (IndexError, KeyError):
|
||||
raise ValueError(f"Could not get random server for country '{country_id}'.")
|
||||
# Only include servers that actually have a connection name to avoid KeyError.
|
||||
connection_names = [x["connectionName"] for x in servers if "connectionName" in x]
|
||||
if not connection_names:
|
||||
raise ValueError(
|
||||
f"Could not get random server for country '{country_id}': no servers with connectionName found."
|
||||
)
|
||||
|
||||
return random.choice(connection_names)
|
||||
|
||||
@staticmethod
|
||||
def get_countries() -> list[dict]:
|
||||
|
||||
@@ -47,6 +47,7 @@ class WindscribeVPN(Proxy):
|
||||
|
||||
Supports:
|
||||
- Country code: "us", "ca", "gb"
|
||||
- Specific server: "sg007", "us150"
|
||||
- City selection: "us:seattle", "ca:toronto"
|
||||
"""
|
||||
query = query.lower()
|
||||
@@ -61,10 +62,20 @@ class WindscribeVPN(Proxy):
|
||||
server_map_key = f"{query}:{city}" if city else query
|
||||
if server_map_key in self.server_map:
|
||||
hostname = self.server_map[server_map_key]
|
||||
elif query in self.server_map and not city:
|
||||
elif query in self.server_map:
|
||||
hostname = self.server_map[query]
|
||||
else:
|
||||
if re.match(r"^[a-z]+$", query):
|
||||
server_match = re.match(r"^([a-z]{2})(\d+)$", query)
|
||||
if server_match:
|
||||
# Specific server selection, e.g., sg007, us150
|
||||
country_code, server_num = server_match.groups()
|
||||
hostname = self.get_specific_server(country_code, server_num)
|
||||
if not hostname:
|
||||
raise ValueError(
|
||||
f"No WindscribeVPN server found matching '{query}'. "
|
||||
f"Check the server number or use just '{country_code}' for a random server."
|
||||
)
|
||||
elif re.match(r"^[a-z]+$", query):
|
||||
hostname = self.get_random_server(query, city)
|
||||
else:
|
||||
raise ValueError(f"The query provided is unsupported and unrecognized: {query}")
|
||||
@@ -75,6 +86,38 @@ class WindscribeVPN(Proxy):
|
||||
hostname = hostname.split(':')[0]
|
||||
return f"https://{self.username}:{self.password}@{hostname}:443"
|
||||
|
||||
def get_specific_server(self, country_code: str, server_num: str) -> Optional[str]:
|
||||
"""
|
||||
Find a specific server by country code and server number.
|
||||
|
||||
Matches against hostnames like "sg-007.totallyacdn.com" for query "sg007".
|
||||
Tries both the raw number and zero-padded variants.
|
||||
|
||||
Args:
|
||||
country_code: Two-letter country code (e.g., "sg", "us")
|
||||
server_num: Server number as string (e.g., "007", "7", "150")
|
||||
|
||||
Returns:
|
||||
The matching hostname, or None if not found.
|
||||
"""
|
||||
num_stripped = server_num.lstrip("0") or "0"
|
||||
candidates = {
|
||||
f"{country_code}-{server_num}.",
|
||||
f"{country_code}-{num_stripped}.",
|
||||
f"{country_code}-{server_num.zfill(3)}.",
|
||||
}
|
||||
|
||||
for location in self.countries:
|
||||
if location.get("country_code", "").lower() != country_code:
|
||||
continue
|
||||
for group in location.get("groups", []):
|
||||
for host in group.get("hosts", []):
|
||||
hostname = host.get("hostname", "")
|
||||
if any(hostname.startswith(prefix) for prefix in candidates):
|
||||
return hostname
|
||||
|
||||
return None
|
||||
|
||||
def get_random_server(self, country_code: str, city: Optional[str] = None) -> Optional[str]:
|
||||
"""
|
||||
Get a random server hostname for a country, optionally filtered by city.
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
import base64
|
||||
import logging
|
||||
from abc import ABCMeta, abstractmethod
|
||||
from collections.abc import Generator
|
||||
from collections.abc import Callable, Generator
|
||||
from dataclasses import dataclass, field
|
||||
from http.cookiejar import CookieJar
|
||||
from pathlib import Path
|
||||
from typing import Optional, Union
|
||||
from urllib.parse import urlparse
|
||||
from urllib.parse import urlparse, urlunparse
|
||||
|
||||
import click
|
||||
import m3u8
|
||||
@@ -24,9 +25,65 @@ from unshackle.core.search_result import SearchResult
|
||||
from unshackle.core.title_cacher import TitleCacher, get_account_hash, get_region_from_proxy
|
||||
from unshackle.core.titles import Title_T, Titles_T
|
||||
from unshackle.core.tracks import Chapters, Tracks
|
||||
from unshackle.core.tracks.video import Video
|
||||
from unshackle.core.utilities import get_cached_ip_info, get_ip_info
|
||||
|
||||
|
||||
@dataclass
|
||||
class TrackRequest:
|
||||
"""Holds what the user requested for video codec and range selection.
|
||||
|
||||
Services read from this instead of ctx.parent.params for vcodec/range.
|
||||
|
||||
Attributes:
|
||||
codecs: Requested codecs from CLI. Empty list means no filter (accept any).
|
||||
ranges: Requested ranges from CLI. Defaults to [SDR].
|
||||
"""
|
||||
|
||||
codecs: list[Video.Codec] = field(default_factory=list)
|
||||
ranges: list[Video.Range] = field(default_factory=lambda: [Video.Range.SDR])
|
||||
best_available: bool = False
|
||||
|
||||
|
||||
def sanitize_proxy_for_log(uri: Optional[str]) -> Optional[str]:
|
||||
"""
|
||||
Sanitize a proxy URI for logs by redacting any embedded userinfo (username/password).
|
||||
|
||||
Examples:
|
||||
- http://user:pass@host:8080 -> http://REDACTED@host:8080
|
||||
- socks5h://user@host:1080 -> socks5h://REDACTED@host:1080
|
||||
"""
|
||||
if uri is None:
|
||||
return None
|
||||
if not isinstance(uri, str):
|
||||
return str(uri)
|
||||
if not uri:
|
||||
return uri
|
||||
|
||||
try:
|
||||
parsed = urlparse(uri)
|
||||
|
||||
# Handle schemeless proxies like "user:pass@host:port"
|
||||
if not parsed.scheme and not parsed.netloc and "@" in uri and "://" not in uri:
|
||||
# Parse as netloc using a dummy scheme, then strip scheme back out.
|
||||
dummy = urlparse(f"http://{uri}")
|
||||
netloc = dummy.netloc
|
||||
if "@" in netloc:
|
||||
netloc = f"REDACTED@{netloc.split('@', 1)[1]}"
|
||||
# urlparse("http://...") sets path to "" for typical netloc-only strings; keep it just in case.
|
||||
return f"{netloc}{dummy.path}"
|
||||
|
||||
netloc = parsed.netloc
|
||||
if "@" in netloc:
|
||||
netloc = f"REDACTED@{netloc.split('@', 1)[1]}"
|
||||
|
||||
return urlunparse(parsed._replace(netloc=netloc))
|
||||
except Exception:
|
||||
if "@" in uri:
|
||||
return f"REDACTED@{uri.split('@', 1)[1]}"
|
||||
return uri
|
||||
|
||||
|
||||
class Service(metaclass=ABCMeta):
|
||||
"""The Service Base Class."""
|
||||
|
||||
@@ -50,6 +107,16 @@ class Service(metaclass=ABCMeta):
|
||||
self.credential = None # Will be set in authenticate()
|
||||
self.current_region = None # Will be set based on proxy/geolocation
|
||||
|
||||
# Set track request from CLI params - services can read/override in their __init__
|
||||
vcodec = ctx.parent.params.get("vcodec") if ctx.parent else None
|
||||
range_ = ctx.parent.params.get("range_") if ctx.parent else None
|
||||
best_available = ctx.parent.params.get("best_available", False) if ctx.parent else False
|
||||
self.track_request = TrackRequest(
|
||||
codecs=list(vcodec) if vcodec else [],
|
||||
ranges=list(range_) if range_ else [Video.Range.SDR],
|
||||
best_available=bool(best_available),
|
||||
)
|
||||
|
||||
if not ctx.parent or not ctx.parent.params.get("no_proxy"):
|
||||
if ctx.parent:
|
||||
proxy = ctx.parent.params["proxy"]
|
||||
@@ -75,7 +142,9 @@ class Service(metaclass=ABCMeta):
|
||||
# Check if there's a mapping for this query
|
||||
mapped_value = proxy_map.get(full_proxy_key)
|
||||
if mapped_value:
|
||||
self.log.info(f"Found service-specific proxy mapping: {full_proxy_key} -> {mapped_value}")
|
||||
self.log.info(
|
||||
f"Found service-specific proxy mapping: {full_proxy_key} -> {sanitize_proxy_for_log(mapped_value)}"
|
||||
)
|
||||
# Query the proxy provider with the mapped value
|
||||
if proxy_provider_name:
|
||||
# Specific provider requested
|
||||
@@ -87,9 +156,13 @@ class Service(metaclass=ABCMeta):
|
||||
mapped_proxy_uri = proxy_provider.get_proxy(mapped_value)
|
||||
if mapped_proxy_uri:
|
||||
proxy = mapped_proxy_uri
|
||||
self.log.info(f"Using mapped proxy from {proxy_provider.__class__.__name__}: {proxy}")
|
||||
self.log.info(
|
||||
f"Using mapped proxy from {proxy_provider.__class__.__name__}: {sanitize_proxy_for_log(proxy)}"
|
||||
)
|
||||
else:
|
||||
self.log.warning(f"Failed to get proxy for mapped value '{mapped_value}', using default")
|
||||
self.log.warning(
|
||||
f"Failed to get proxy for mapped value '{sanitize_proxy_for_log(mapped_value)}', using default"
|
||||
)
|
||||
else:
|
||||
self.log.warning(f"Proxy provider '{proxy_provider_name}' not found, using default proxy")
|
||||
else:
|
||||
@@ -98,10 +171,14 @@ class Service(metaclass=ABCMeta):
|
||||
mapped_proxy_uri = proxy_provider.get_proxy(mapped_value)
|
||||
if mapped_proxy_uri:
|
||||
proxy = mapped_proxy_uri
|
||||
self.log.info(f"Using mapped proxy from {proxy_provider.__class__.__name__}: {proxy}")
|
||||
self.log.info(
|
||||
f"Using mapped proxy from {proxy_provider.__class__.__name__}: {sanitize_proxy_for_log(proxy)}"
|
||||
)
|
||||
break
|
||||
else:
|
||||
self.log.warning(f"No provider could resolve mapped value '{mapped_value}', using default")
|
||||
self.log.warning(
|
||||
f"No provider could resolve mapped value '{sanitize_proxy_for_log(mapped_value)}', using default"
|
||||
)
|
||||
|
||||
if not proxy:
|
||||
# don't override the explicit proxy set by the user, even if they may be geoblocked
|
||||
@@ -156,6 +233,76 @@ class Service(metaclass=ABCMeta):
|
||||
self.log.debug(f"Failed to get cached IP info: {e}")
|
||||
self.current_region = None
|
||||
|
||||
def _get_tracks_for_variants(
|
||||
self,
|
||||
title: Title_T,
|
||||
fetch_fn: Callable[..., Tracks],
|
||||
) -> Tracks:
|
||||
"""Call fetch_fn for each codec/range combo in track_request, merge results.
|
||||
|
||||
Services that need separate API calls per codec/range combo can use this
|
||||
helper from their get_tracks() implementation.
|
||||
|
||||
The fetch_fn signature should be: (title, codec, range_) -> Tracks
|
||||
|
||||
For HYBRID range, fetch_fn is called with HDR10 and DV separately and
|
||||
the DV video tracks are merged into the HDR10 result.
|
||||
|
||||
Args:
|
||||
title: The title being processed.
|
||||
fetch_fn: A callable that fetches tracks for a specific codec/range.
|
||||
"""
|
||||
all_tracks = Tracks()
|
||||
first = True
|
||||
|
||||
codecs = self.track_request.codecs or [None]
|
||||
ranges = self.track_request.ranges or [Video.Range.SDR]
|
||||
|
||||
for range_val in ranges:
|
||||
if range_val == Video.Range.HYBRID:
|
||||
# HYBRID: fetch HDR10 first (full tracks), then DV (video only)
|
||||
for codec_val in codecs:
|
||||
try:
|
||||
hdr_tracks = fetch_fn(title, codec=codec_val, range_=Video.Range.HDR10)
|
||||
except (ValueError, SystemExit) as e:
|
||||
if self.track_request.best_available:
|
||||
self.log.warning(f" - HDR10 not available for HYBRID, skipping ({e})")
|
||||
continue
|
||||
raise
|
||||
if first:
|
||||
all_tracks.add(hdr_tracks, warn_only=True)
|
||||
first = False
|
||||
else:
|
||||
for video in hdr_tracks.videos:
|
||||
all_tracks.add(video, warn_only=True)
|
||||
|
||||
try:
|
||||
dv_tracks = fetch_fn(title, codec=codec_val, range_=Video.Range.DV)
|
||||
for video in dv_tracks.videos:
|
||||
all_tracks.add(video, warn_only=True)
|
||||
except (ValueError, SystemExit):
|
||||
self.log.info(" - No DolbyVision manifest available for HYBRID")
|
||||
else:
|
||||
for codec_val in codecs:
|
||||
try:
|
||||
tracks = fetch_fn(title, codec=codec_val, range_=range_val)
|
||||
except (ValueError, SystemExit) as e:
|
||||
if self.track_request.best_available:
|
||||
codec_name = codec_val.name if codec_val else "default"
|
||||
self.log.warning(
|
||||
f" - {range_val.name}/{codec_name} not available, skipping ({e})"
|
||||
)
|
||||
continue
|
||||
raise
|
||||
if first:
|
||||
all_tracks.add(tracks, warn_only=True)
|
||||
first = False
|
||||
else:
|
||||
for video in tracks.videos:
|
||||
all_tracks.add(video, warn_only=True)
|
||||
|
||||
return all_tracks
|
||||
|
||||
# Optional Abstract functions
|
||||
# The following functions may be implemented by the Service.
|
||||
# Otherwise, the base service code (if any) of the function will be executed on call.
|
||||
@@ -173,7 +320,7 @@ class Service(metaclass=ABCMeta):
|
||||
session.mount(
|
||||
"https://",
|
||||
HTTPAdapter(
|
||||
max_retries=Retry(total=15, backoff_factor=0.2, status_forcelist=[429, 500, 502, 503, 504]),
|
||||
max_retries=Retry(total=5, backoff_factor=0.2, status_forcelist=[429, 500, 502, 503, 504]),
|
||||
pool_block=True,
|
||||
),
|
||||
)
|
||||
@@ -412,4 +559,4 @@ class Service(metaclass=ABCMeta):
|
||||
"""
|
||||
|
||||
|
||||
__all__ = ("Service",)
|
||||
__all__ = ("Service", "TrackRequest")
|
||||
|
||||
@@ -56,7 +56,7 @@ class MaxRetriesError(exceptions.RequestException):
|
||||
class CurlSession(Session):
|
||||
def __init__(
|
||||
self,
|
||||
max_retries: int = 10,
|
||||
max_retries: int = 5,
|
||||
backoff_factor: float = 0.2,
|
||||
max_backoff: float = 60.0,
|
||||
status_forcelist: list[int] | None = None,
|
||||
@@ -150,7 +150,7 @@ def session(
|
||||
browser: Browser to impersonate (e.g. "chrome124", "firefox", "safari") OR
|
||||
fingerprint preset name (e.g. "okhttp4").
|
||||
Uses the configured default from curl_impersonate.browser if not specified.
|
||||
Available presets: okhttp4
|
||||
Available presets: okhttp4, okhttp5
|
||||
See https://github.com/lexiforest/curl_cffi#sessions for browser options.
|
||||
ja3: Custom JA3 TLS fingerprint string (format: "SSLVersion,Ciphers,Extensions,Curves,PointFormats").
|
||||
When provided, curl_cffi will use this exact TLS fingerprint instead of the browser's default.
|
||||
@@ -172,7 +172,7 @@ def session(
|
||||
- cert: Client certificate (str or tuple)
|
||||
|
||||
Extra arguments for retry handler:
|
||||
- max_retries: Maximum number of retries (int, default 10)
|
||||
- max_retries: Maximum number of retries (int, default 5)
|
||||
- backoff_factor: Backoff factor (float, default 0.2)
|
||||
- max_backoff: Maximum backoff time (float, default 60.0)
|
||||
- status_forcelist: List of status codes to force retry (list, default [429, 500, 502, 503, 504])
|
||||
|
||||
@@ -286,4 +286,4 @@ class Series(SortedKeyList, ABC):
|
||||
return tree
|
||||
|
||||
|
||||
__all__ = ("Episode", "Series")
|
||||
__all__ = ("Episode", "Series")
|
||||
@@ -67,109 +67,122 @@ class Movie(Title):
|
||||
primary_audio_track = sorted_audio[0]
|
||||
unique_audio_languages = len({x.language.split("-")[0] for x in media_info.audio_tracks if x.language})
|
||||
|
||||
def _get_resolution_token(track: Any) -> str:
|
||||
if not track or not getattr(track, "height", None):
|
||||
return ""
|
||||
resolution = track.height
|
||||
try:
|
||||
dar = getattr(track, "other_display_aspect_ratio", None) or []
|
||||
if dar and dar[0]:
|
||||
aspect_ratio = [int(float(plane)) for plane in str(dar[0]).split(":")]
|
||||
if len(aspect_ratio) == 1:
|
||||
aspect_ratio.append(1)
|
||||
if aspect_ratio[0] / aspect_ratio[1] not in (16 / 9, 4 / 3):
|
||||
resolution = int(track.width * (9 / 16))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
scan_suffix = "p"
|
||||
scan_type = getattr(track, "scan_type", None)
|
||||
if scan_type and str(scan_type).lower() == "interlaced":
|
||||
scan_suffix = "i"
|
||||
return f"{resolution}{scan_suffix}"
|
||||
|
||||
# Name (Year)
|
||||
name = str(self).replace("$", "S") # e.g., Arli$$
|
||||
|
||||
if config.scene_naming:
|
||||
# Resolution
|
||||
if primary_video_track:
|
||||
resolution = primary_video_track.height
|
||||
aspect_ratio = [
|
||||
int(float(plane)) for plane in primary_video_track.other_display_aspect_ratio[0].split(":")
|
||||
]
|
||||
if len(aspect_ratio) == 1:
|
||||
# e.g., aspect ratio of 2 (2.00:1) would end up as `(2.0,)`, add 1
|
||||
aspect_ratio.append(1)
|
||||
if aspect_ratio[0] / aspect_ratio[1] not in (16 / 9, 4 / 3):
|
||||
# We want the resolution represented in a 4:3 or 16:9 canvas.
|
||||
# If it's not 4:3 or 16:9, calculate as if it's inside a 16:9 canvas,
|
||||
# otherwise the track's height value is fine.
|
||||
# We are assuming this title is some weird aspect ratio so most
|
||||
# likely a movie or HD source, so it's most likely widescreen so
|
||||
# 16:9 canvas makes the most sense.
|
||||
resolution = int(primary_video_track.width * (9 / 16))
|
||||
# Determine scan type suffix - default to "p", use "i" only if explicitly interlaced
|
||||
scan_suffix = "p"
|
||||
scan_type = getattr(primary_video_track, 'scan_type', None)
|
||||
if scan_type and str(scan_type).lower() == "interlaced":
|
||||
scan_suffix = "i"
|
||||
name += f" {resolution}{scan_suffix}"
|
||||
if getattr(config, "repack", False):
|
||||
name += " REPACK"
|
||||
|
||||
# Service (use track source if available)
|
||||
if show_service:
|
||||
source_name = None
|
||||
if self.tracks:
|
||||
first_track = next(iter(self.tracks), None)
|
||||
if first_track and hasattr(first_track, "source") and first_track.source:
|
||||
source_name = first_track.source
|
||||
name += f" {source_name or self.service.__name__}"
|
||||
if primary_video_track:
|
||||
resolution_token = _get_resolution_token(primary_video_track)
|
||||
if resolution_token:
|
||||
name += f" {resolution_token}"
|
||||
|
||||
# 'WEB-DL'
|
||||
name += " WEB-DL"
|
||||
# Service (use track source if available)
|
||||
if show_service:
|
||||
source_name = None
|
||||
if self.tracks:
|
||||
first_track = next(iter(self.tracks), None)
|
||||
if first_track and hasattr(first_track, "source") and first_track.source:
|
||||
source_name = first_track.source
|
||||
name += f" {source_name or self.service.__name__}"
|
||||
|
||||
# DUAL
|
||||
if unique_audio_languages == 2:
|
||||
name += " DUAL"
|
||||
# 'WEB-DL'
|
||||
name += " WEB-DL"
|
||||
|
||||
# MULTi
|
||||
if unique_audio_languages > 2:
|
||||
name += " MULTi"
|
||||
# DUAL
|
||||
if unique_audio_languages == 2:
|
||||
name += " DUAL"
|
||||
|
||||
# Audio Codec + Channels (+ feature)
|
||||
if primary_audio_track:
|
||||
codec = primary_audio_track.format
|
||||
channel_layout = primary_audio_track.channel_layout or primary_audio_track.channellayout_original
|
||||
if channel_layout:
|
||||
channels = float(
|
||||
sum({"LFE": 0.1}.get(position.upper(), 1) for position in channel_layout.split(" "))
|
||||
)
|
||||
else:
|
||||
channel_count = primary_audio_track.channel_s or primary_audio_track.channels or 0
|
||||
channels = float(channel_count)
|
||||
# MULTi
|
||||
if unique_audio_languages > 2:
|
||||
name += " MULTi"
|
||||
|
||||
features = primary_audio_track.format_additionalfeatures or ""
|
||||
name += f" {AUDIO_CODEC_MAP.get(codec, codec)}{channels:.1f}"
|
||||
if "JOC" in features or primary_audio_track.joc:
|
||||
name += " Atmos"
|
||||
|
||||
# Video (dynamic range + hfr +) Codec
|
||||
if primary_video_track:
|
||||
codec = primary_video_track.format
|
||||
hdr_format = primary_video_track.hdr_format_commercial
|
||||
hdr_format_full = primary_video_track.hdr_format or ""
|
||||
trc = (
|
||||
primary_video_track.transfer_characteristics
|
||||
or primary_video_track.transfer_characteristics_original
|
||||
or ""
|
||||
# Audio Codec + Channels (+ feature)
|
||||
if primary_audio_track:
|
||||
codec = primary_audio_track.format
|
||||
channel_layout = primary_audio_track.channel_layout or primary_audio_track.channellayout_original
|
||||
if channel_layout:
|
||||
channels = float(
|
||||
sum({"LFE": 0.1}.get(position.upper(), 1) for position in channel_layout.split(" "))
|
||||
)
|
||||
frame_rate = float(primary_video_track.frame_rate)
|
||||
else:
|
||||
channel_count = primary_audio_track.channel_s or primary_audio_track.channels or 0
|
||||
channels = float(channel_count)
|
||||
|
||||
# Primary HDR format detection
|
||||
if hdr_format:
|
||||
if hdr_format_full.startswith("Dolby Vision"):
|
||||
name += " DV"
|
||||
if any(
|
||||
indicator in (hdr_format_full + " " + hdr_format)
|
||||
for indicator in ["HDR10", "SMPTE ST 2086"]
|
||||
):
|
||||
name += " HDR"
|
||||
else:
|
||||
name += f" {DYNAMIC_RANGE_MAP.get(hdr_format)} "
|
||||
elif "HLG" in trc or "Hybrid Log-Gamma" in trc or "ARIB STD-B67" in trc or "arib-std-b67" in trc.lower():
|
||||
name += " HLG"
|
||||
elif any(indicator in trc for indicator in ["PQ", "SMPTE ST 2084", "BT.2100"]) or "smpte2084" in trc.lower() or "bt.2020-10" in trc.lower():
|
||||
name += " HDR"
|
||||
if frame_rate > 30:
|
||||
name += " HFR"
|
||||
name += f" {VIDEO_CODEC_MAP.get(codec, codec)}"
|
||||
features = primary_audio_track.format_additionalfeatures or ""
|
||||
name += f" {AUDIO_CODEC_MAP.get(codec, codec)}{channels:.1f}"
|
||||
if "JOC" in features or primary_audio_track.joc:
|
||||
name += " Atmos"
|
||||
|
||||
if config.tag:
|
||||
name += f"-{config.tag}"
|
||||
# Video (dynamic range + hfr +) Codec
|
||||
if primary_video_track:
|
||||
codec = primary_video_track.format
|
||||
hdr_format = primary_video_track.hdr_format_commercial
|
||||
hdr_format_full = primary_video_track.hdr_format or ""
|
||||
trc = (
|
||||
primary_video_track.transfer_characteristics
|
||||
or primary_video_track.transfer_characteristics_original
|
||||
or ""
|
||||
)
|
||||
frame_rate = float(primary_video_track.frame_rate)
|
||||
|
||||
return sanitize_filename(name)
|
||||
else:
|
||||
# Simple naming style without technical details - use spaces instead of dots
|
||||
return sanitize_filename(name, " ")
|
||||
def _append_token(current: str, token: Optional[str]) -> str:
|
||||
token = (token or "").strip()
|
||||
current = current.rstrip()
|
||||
if not token:
|
||||
return current
|
||||
if current.endswith(f" {token}"):
|
||||
return current
|
||||
return f"{current} {token}"
|
||||
|
||||
# Primary HDR format detection
|
||||
if hdr_format:
|
||||
if hdr_format_full.startswith("Dolby Vision"):
|
||||
name = _append_token(name, "DV")
|
||||
if any(
|
||||
indicator in (hdr_format_full + " " + hdr_format)
|
||||
for indicator in ["HDR10", "SMPTE ST 2086"]
|
||||
):
|
||||
name = _append_token(name, "HDR")
|
||||
elif "HDR Vivid" in hdr_format:
|
||||
name = _append_token(name, "HDR")
|
||||
else:
|
||||
dynamic_range = DYNAMIC_RANGE_MAP.get(hdr_format) or hdr_format or ""
|
||||
name = _append_token(name, dynamic_range)
|
||||
elif "HLG" in trc or "Hybrid Log-Gamma" in trc or "ARIB STD-B67" in trc or "arib-std-b67" in trc.lower():
|
||||
name += " HLG"
|
||||
elif any(indicator in trc for indicator in ["PQ", "SMPTE ST 2084", "BT.2100"]) or "smpte2084" in trc.lower() or "bt.2020-10" in trc.lower():
|
||||
name += " HDR"
|
||||
if frame_rate > 30:
|
||||
name += " HFR"
|
||||
name += f" {VIDEO_CODEC_MAP.get(codec, codec)}"
|
||||
|
||||
if config.tag:
|
||||
name += f"-{config.tag}"
|
||||
|
||||
return sanitize_filename(name, "." if config.scene_naming else " ")
|
||||
|
||||
|
||||
class Movies(SortedKeyList, ABC):
|
||||
|
||||
@@ -100,31 +100,30 @@ class Song(Title):
|
||||
# NN. Song Name
|
||||
name = str(self).split(" / ")[1]
|
||||
|
||||
if config.scene_naming:
|
||||
# Service (use track source if available)
|
||||
if show_service:
|
||||
source_name = None
|
||||
if self.tracks:
|
||||
first_track = next(iter(self.tracks), None)
|
||||
if first_track and hasattr(first_track, "source") and first_track.source:
|
||||
source_name = first_track.source
|
||||
name += f" {source_name or self.service.__name__}"
|
||||
if getattr(config, "repack", False):
|
||||
name += " REPACK"
|
||||
|
||||
# 'WEB-DL'
|
||||
name += " WEB-DL"
|
||||
# Service (use track source if available)
|
||||
if show_service:
|
||||
source_name = None
|
||||
if self.tracks:
|
||||
first_track = next(iter(self.tracks), None)
|
||||
if first_track and hasattr(first_track, "source") and first_track.source:
|
||||
source_name = first_track.source
|
||||
name += f" {source_name or self.service.__name__}"
|
||||
|
||||
# Audio Codec + Channels (+ feature)
|
||||
name += f" {AUDIO_CODEC_MAP.get(codec, codec)}{channels:.1f}"
|
||||
if "JOC" in features or audio_track.joc:
|
||||
name += " Atmos"
|
||||
# 'WEB-DL'
|
||||
name += " WEB-DL"
|
||||
|
||||
if config.tag:
|
||||
name += f"-{config.tag}"
|
||||
# Audio Codec + Channels (+ feature)
|
||||
name += f" {AUDIO_CODEC_MAP.get(codec, codec)}{channels:.1f}"
|
||||
if "JOC" in features or audio_track.joc:
|
||||
name += " Atmos"
|
||||
|
||||
return sanitize_filename(name, " ")
|
||||
else:
|
||||
# Simple naming style without technical details
|
||||
return sanitize_filename(name, " ")
|
||||
if config.tag:
|
||||
name += f"-{config.tag}"
|
||||
|
||||
return sanitize_filename(name, " ")
|
||||
|
||||
|
||||
class Album(SortedKeyList, ABC):
|
||||
|
||||
@@ -2,6 +2,7 @@ from __future__ import annotations
|
||||
|
||||
import mimetypes
|
||||
import os
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import Optional, Union
|
||||
from urllib.parse import urlparse
|
||||
@@ -10,6 +11,7 @@ from zlib import crc32
|
||||
import requests
|
||||
|
||||
from unshackle.core.config import config
|
||||
from unshackle.core.constants import DOWNLOAD_LICENCE_ONLY
|
||||
|
||||
|
||||
class Attachment:
|
||||
@@ -43,6 +45,8 @@ class Attachment:
|
||||
if path is None and url is None:
|
||||
raise ValueError("Either path or url must be provided.")
|
||||
|
||||
self.url = url
|
||||
|
||||
if url:
|
||||
if not isinstance(url, str):
|
||||
raise ValueError("The attachment URL must be a string.")
|
||||
@@ -53,45 +57,60 @@ class Attachment:
|
||||
|
||||
# Use provided name for the file if available
|
||||
if name:
|
||||
file_name = f"{name.replace(' ', '_')}{os.path.splitext(file_name)[1]}"
|
||||
safe_name = re.sub(r'[<>:"/\\|?*]', "", name).replace(" ", "_")
|
||||
file_name = f"{safe_name}{os.path.splitext(file_name)[1]}"
|
||||
|
||||
download_path = config.directories.temp / file_name
|
||||
|
||||
# Download the file
|
||||
try:
|
||||
session = session or requests.Session()
|
||||
response = session.get(url, stream=True)
|
||||
response.raise_for_status()
|
||||
config.directories.temp.mkdir(parents=True, exist_ok=True)
|
||||
download_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
# Download the file unless we're in license-only mode
|
||||
if DOWNLOAD_LICENCE_ONLY.is_set():
|
||||
path = None
|
||||
else:
|
||||
try:
|
||||
if session is None:
|
||||
with requests.Session() as session:
|
||||
response = session.get(url, stream=True)
|
||||
response.raise_for_status()
|
||||
else:
|
||||
response = session.get(url, stream=True)
|
||||
response.raise_for_status()
|
||||
config.directories.temp.mkdir(parents=True, exist_ok=True)
|
||||
download_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
with open(download_path, "wb") as f:
|
||||
for chunk in response.iter_content(chunk_size=8192):
|
||||
f.write(chunk)
|
||||
with open(download_path, "wb") as f:
|
||||
for chunk in response.iter_content(chunk_size=8192):
|
||||
f.write(chunk)
|
||||
|
||||
path = download_path
|
||||
except Exception as e:
|
||||
raise ValueError(f"Failed to download attachment from URL: {e}")
|
||||
path = download_path
|
||||
except Exception as e:
|
||||
raise ValueError(f"Failed to download attachment from URL: {e}")
|
||||
|
||||
if not isinstance(path, (str, Path)):
|
||||
raise ValueError("The attachment path must be provided.")
|
||||
if path is not None and not isinstance(path, (str, Path)):
|
||||
raise ValueError(
|
||||
f"Invalid attachment path type: expected str or Path, got {type(path).__name__}."
|
||||
)
|
||||
|
||||
path = Path(path)
|
||||
if not path.exists():
|
||||
raise ValueError("The attachment file does not exist.")
|
||||
if path is not None:
|
||||
path = Path(path)
|
||||
if not path.exists():
|
||||
raise ValueError("The attachment file does not exist.")
|
||||
|
||||
name = (name or path.stem).strip()
|
||||
if path is not None:
|
||||
name = (name or path.stem).strip()
|
||||
else:
|
||||
name = (name or Path(file_name).stem).strip()
|
||||
mime_type = (mime_type or "").strip() or None
|
||||
description = (description or "").strip() or None
|
||||
|
||||
if not mime_type:
|
||||
suffix = path.suffix.lower() if path is not None else Path(file_name).suffix.lower()
|
||||
mime_type = {
|
||||
".ttf": "application/x-truetype-font",
|
||||
".otf": "application/vnd.ms-opentype",
|
||||
".jpg": "image/jpeg",
|
||||
".jpeg": "image/jpeg",
|
||||
".png": "image/png",
|
||||
}.get(path.suffix.lower(), mimetypes.guess_type(path)[0])
|
||||
}.get(suffix, mimetypes.guess_type(file_name if path is None else path)[0])
|
||||
if not mime_type:
|
||||
raise ValueError("The attachment mime-type could not be automatically detected.")
|
||||
|
||||
@@ -111,13 +130,18 @@ class Attachment:
|
||||
@property
|
||||
def id(self) -> str:
|
||||
"""Compute an ID from the attachment data."""
|
||||
checksum = crc32(self.path.read_bytes())
|
||||
if self.path and self.path.exists():
|
||||
checksum = crc32(self.path.read_bytes())
|
||||
elif self.url:
|
||||
checksum = crc32(self.url.encode("utf8"))
|
||||
else:
|
||||
checksum = crc32(self.name.encode("utf8"))
|
||||
return hex(checksum)
|
||||
|
||||
def delete(self) -> None:
|
||||
if self.path:
|
||||
if self.path and self.path.exists():
|
||||
self.path.unlink()
|
||||
self.path = None
|
||||
self.path = None
|
||||
|
||||
@classmethod
|
||||
def from_url(
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
@@ -8,14 +10,16 @@ from pathlib import Path
|
||||
from rich.padding import Padding
|
||||
from rich.rule import Rule
|
||||
|
||||
from unshackle.core.binaries import FFMPEG, DoviTool, HDR10PlusTool
|
||||
from unshackle.core.binaries import FFMPEG, DoviTool, FFProbe, HDR10PlusTool
|
||||
from unshackle.core.config import config
|
||||
from unshackle.core.console import console
|
||||
from unshackle.core.utilities import get_debug_logger
|
||||
|
||||
|
||||
class Hybrid:
|
||||
def __init__(self, videos, source) -> None:
|
||||
self.log = logging.getLogger("hybrid")
|
||||
self.debug_logger = get_debug_logger()
|
||||
|
||||
"""
|
||||
Takes the Dolby Vision and HDR10(+) streams out of the VideoTracks.
|
||||
@@ -41,6 +45,19 @@ class Hybrid:
|
||||
|
||||
console.print(Padding(Rule(f"[rule.text]HDR10+DV Hybrid ({self.resolution})"), (1, 2)))
|
||||
|
||||
if self.debug_logger:
|
||||
self.debug_logger.log(
|
||||
level="DEBUG",
|
||||
operation="hybrid_init",
|
||||
message="Starting HDR10+DV hybrid processing",
|
||||
context={
|
||||
"source": source,
|
||||
"resolution": self.resolution,
|
||||
"video_count": len(videos),
|
||||
"video_ranges": [str(v.range) for v in videos],
|
||||
},
|
||||
)
|
||||
|
||||
for video in self.videos:
|
||||
if not video.path or not os.path.exists(video.path):
|
||||
raise ValueError(f"Video track {video.id} was not downloaded before injection.")
|
||||
@@ -50,18 +67,18 @@ class Hybrid:
|
||||
has_hdr10 = any(video.range == Video.Range.HDR10 for video in self.videos)
|
||||
has_hdr10p = any(video.range == Video.Range.HDR10P for video in self.videos)
|
||||
|
||||
if not has_hdr10:
|
||||
raise ValueError("No HDR10 track available for hybrid processing.")
|
||||
if not has_hdr10 and not has_hdr10p:
|
||||
raise ValueError("No HDR10 or HDR10+ track available for hybrid processing.")
|
||||
|
||||
# If we have HDR10+ but no DV, we can convert HDR10+ to DV
|
||||
if not has_dv and has_hdr10p:
|
||||
self.log.info("✓ No DV track found, but HDR10+ is available. Will convert HDR10+ to DV.")
|
||||
console.status("No DV track found, but HDR10+ is available. Will convert HDR10+ to DV.")
|
||||
self.hdr10plus_to_dv = True
|
||||
elif not has_dv:
|
||||
raise ValueError("No DV track available and no HDR10+ to convert.")
|
||||
|
||||
if os.path.isfile(config.directories.temp / self.hevc_file):
|
||||
self.log.info("✓ Already Injected")
|
||||
console.status("Already Injected")
|
||||
return
|
||||
|
||||
for video in videos:
|
||||
@@ -89,14 +106,34 @@ class Hybrid:
|
||||
self.extract_rpu(dv_video)
|
||||
if os.path.isfile(config.directories.temp / "RPU_UNT.bin"):
|
||||
self.rpu_file = "RPU_UNT.bin"
|
||||
self.level_6()
|
||||
# Mode 3 conversion already done during extraction when not untouched
|
||||
elif os.path.isfile(config.directories.temp / "RPU.bin"):
|
||||
# RPU already extracted with mode 3
|
||||
pass
|
||||
|
||||
# Edit L6 with actual luminance values from RPU, then L5 active area
|
||||
self.level_6()
|
||||
base_video = next(
|
||||
(v for v in videos if v.range in (Video.Range.HDR10, Video.Range.HDR10P)), None
|
||||
)
|
||||
if base_video and base_video.path:
|
||||
self.level_5(base_video.path)
|
||||
|
||||
self.injecting()
|
||||
|
||||
if self.debug_logger:
|
||||
self.debug_logger.log(
|
||||
level="INFO",
|
||||
operation="hybrid_complete",
|
||||
message="Injection Completed",
|
||||
context={
|
||||
"hdr_type": self.hdr_type,
|
||||
"resolution": self.resolution,
|
||||
"hdr10plus_to_dv": self.hdr10plus_to_dv,
|
||||
"rpu_file": self.rpu_file,
|
||||
"output_file": self.hevc_file,
|
||||
},
|
||||
)
|
||||
self.log.info("✓ Injection Completed")
|
||||
if self.source == ("itunes" or "appletvplus"):
|
||||
Path.unlink(config.directories.temp / "hdr10.mkv")
|
||||
@@ -104,6 +141,10 @@ class Hybrid:
|
||||
Path.unlink(config.directories.temp / "HDR10.hevc", missing_ok=True)
|
||||
Path.unlink(config.directories.temp / "DV.hevc", missing_ok=True)
|
||||
Path.unlink(config.directories.temp / f"{self.rpu_file}", missing_ok=True)
|
||||
Path.unlink(config.directories.temp / "RPU_L6.bin", missing_ok=True)
|
||||
Path.unlink(config.directories.temp / "RPU_L5.bin", missing_ok=True)
|
||||
Path.unlink(config.directories.temp / "L5.json", missing_ok=True)
|
||||
Path.unlink(config.directories.temp / "L6.json", missing_ok=True)
|
||||
|
||||
def ffmpeg_simple(self, save_path, output):
|
||||
"""Simple ffmpeg execution without progress tracking"""
|
||||
@@ -121,20 +162,41 @@ class Hybrid:
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
)
|
||||
return p.returncode
|
||||
return p
|
||||
|
||||
def extract_stream(self, save_path, type_):
|
||||
output = Path(config.directories.temp / f"{type_}.hevc")
|
||||
|
||||
with console.status(f"Extracting {type_} stream...", spinner="dots"):
|
||||
returncode = self.ffmpeg_simple(save_path, output)
|
||||
result = self.ffmpeg_simple(save_path, output)
|
||||
|
||||
if returncode:
|
||||
if result.returncode:
|
||||
output.unlink(missing_ok=True)
|
||||
if self.debug_logger:
|
||||
self.debug_logger.log(
|
||||
level="ERROR",
|
||||
operation="hybrid_extract_stream",
|
||||
message=f"Failed extracting {type_} stream",
|
||||
context={
|
||||
"type": type_,
|
||||
"input": str(save_path),
|
||||
"output": str(output),
|
||||
"returncode": result.returncode,
|
||||
"stderr": (result.stderr or b"").decode(errors="replace"),
|
||||
"stdout": (result.stdout or b"").decode(errors="replace"),
|
||||
},
|
||||
)
|
||||
self.log.error(f"x Failed extracting {type_} stream")
|
||||
sys.exit(1)
|
||||
|
||||
self.log.info(f"Extracted {type_} stream")
|
||||
if self.debug_logger:
|
||||
self.debug_logger.log(
|
||||
level="DEBUG",
|
||||
operation="hybrid_extract_stream",
|
||||
message=f"Extracted {type_} stream",
|
||||
context={"type": type_, "input": str(save_path), "output": str(output)},
|
||||
success=True,
|
||||
)
|
||||
|
||||
def extract_rpu(self, video, untouched=False):
|
||||
if os.path.isfile(config.directories.temp / "RPU.bin") or os.path.isfile(
|
||||
@@ -161,58 +223,326 @@ class Hybrid:
|
||||
stderr=subprocess.PIPE,
|
||||
)
|
||||
|
||||
rpu_name = "RPU" if not untouched else "RPU_UNT"
|
||||
if rpu_extraction.returncode:
|
||||
Path.unlink(config.directories.temp / f"{'RPU' if not untouched else 'RPU_UNT'}.bin")
|
||||
Path.unlink(config.directories.temp / f"{rpu_name}.bin")
|
||||
stderr_text = rpu_extraction.stderr.decode(errors="replace") if rpu_extraction.stderr else ""
|
||||
if self.debug_logger:
|
||||
self.debug_logger.log(
|
||||
level="ERROR",
|
||||
operation="hybrid_extract_rpu",
|
||||
message=f"Failed extracting{' untouched ' if untouched else ' '}RPU",
|
||||
context={
|
||||
"untouched": untouched,
|
||||
"returncode": rpu_extraction.returncode,
|
||||
"stderr": stderr_text,
|
||||
"args": [str(a) for a in extraction_args],
|
||||
},
|
||||
)
|
||||
if b"MAX_PQ_LUMINANCE" in rpu_extraction.stderr:
|
||||
self.extract_rpu(video, untouched=True)
|
||||
elif b"Invalid PPS index" in rpu_extraction.stderr:
|
||||
raise ValueError("Dolby Vision VideoTrack seems to be corrupt")
|
||||
else:
|
||||
raise ValueError(f"Failed extracting{' untouched ' if untouched else ' '}RPU from Dolby Vision stream")
|
||||
elif self.debug_logger:
|
||||
self.debug_logger.log(
|
||||
level="DEBUG",
|
||||
operation="hybrid_extract_rpu",
|
||||
message=f"Extracted{' untouched ' if untouched else ' '}RPU from Dolby Vision stream",
|
||||
context={"untouched": untouched, "output": f"{rpu_name}.bin"},
|
||||
success=True,
|
||||
)
|
||||
|
||||
self.log.info(f"Extracted{' untouched ' if untouched else ' '}RPU from Dolby Vision stream")
|
||||
def level_5(self, input_video):
|
||||
"""Generate Level 5 active area metadata via crop detection on the HDR10 stream.
|
||||
|
||||
def level_6(self):
|
||||
"""Edit RPU Level 6 values"""
|
||||
with open(config.directories.temp / "L6.json", "w+") as level6_file:
|
||||
level6 = {
|
||||
"cm_version": "V29",
|
||||
"length": 0,
|
||||
"level6": {
|
||||
"max_display_mastering_luminance": 1000,
|
||||
"min_display_mastering_luminance": 1,
|
||||
"max_content_light_level": 0,
|
||||
"max_frame_average_light_level": 0,
|
||||
},
|
||||
}
|
||||
This resolves mismatches where DV has no black bars but HDR10 does (or vice versa)
|
||||
by telling the display the correct active area.
|
||||
"""
|
||||
if os.path.isfile(config.directories.temp / "RPU_L5.bin"):
|
||||
return
|
||||
|
||||
json.dump(level6, level6_file, indent=3)
|
||||
ffprobe_bin = str(FFProbe) if FFProbe else "ffprobe"
|
||||
ffmpeg_bin = str(FFMPEG) if FFMPEG else "ffmpeg"
|
||||
|
||||
if not os.path.isfile(config.directories.temp / "RPU_L6.bin"):
|
||||
with console.status("Editing RPU Level 6 values...", spinner="dots"):
|
||||
level6 = subprocess.run(
|
||||
# Get video duration for random sampling
|
||||
with console.status("Detecting active area (crop detection)...", spinner="dots"):
|
||||
result_duration = subprocess.run(
|
||||
[ffprobe_bin, "-v", "error", "-show_entries", "format=duration", "-of", "json", str(input_video)],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
text=True,
|
||||
)
|
||||
|
||||
if result_duration.returncode != 0:
|
||||
if self.debug_logger:
|
||||
self.debug_logger.log(
|
||||
level="WARNING",
|
||||
operation="hybrid_level5",
|
||||
message="Could not probe video duration",
|
||||
context={"returncode": result_duration.returncode, "stderr": (result_duration.stderr or "")},
|
||||
)
|
||||
self.log.warning("Could not probe video duration, skipping L5 crop detection")
|
||||
return
|
||||
|
||||
duration_info = json.loads(result_duration.stdout)
|
||||
duration = float(duration_info["format"]["duration"])
|
||||
|
||||
# Get video resolution for proper border calculation
|
||||
result_streams = subprocess.run(
|
||||
[
|
||||
ffprobe_bin,
|
||||
"-v",
|
||||
"error",
|
||||
"-select_streams",
|
||||
"v:0",
|
||||
"-show_entries",
|
||||
"stream=width,height",
|
||||
"-of",
|
||||
"json",
|
||||
str(input_video),
|
||||
],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
text=True,
|
||||
)
|
||||
|
||||
if result_streams.returncode != 0:
|
||||
if self.debug_logger:
|
||||
self.debug_logger.log(
|
||||
level="WARNING",
|
||||
operation="hybrid_level5",
|
||||
message="Could not probe video resolution",
|
||||
context={"returncode": result_streams.returncode, "stderr": (result_streams.stderr or "")},
|
||||
)
|
||||
self.log.warning("Could not probe video resolution, skipping L5 crop detection")
|
||||
return
|
||||
|
||||
stream_info = json.loads(result_streams.stdout)
|
||||
original_width = int(stream_info["streams"][0]["width"])
|
||||
original_height = int(stream_info["streams"][0]["height"])
|
||||
|
||||
# Sample 10 random timestamps and run cropdetect on each
|
||||
random_times = sorted(random.uniform(0, duration) for _ in range(10))
|
||||
|
||||
crop_results = []
|
||||
for t in random_times:
|
||||
result_cropdetect = subprocess.run(
|
||||
[
|
||||
str(DoviTool),
|
||||
"editor",
|
||||
ffmpeg_bin,
|
||||
"-y",
|
||||
"-nostdin",
|
||||
"-loglevel",
|
||||
"info",
|
||||
"-ss",
|
||||
f"{t:.2f}",
|
||||
"-i",
|
||||
config.directories.temp / self.rpu_file,
|
||||
"-j",
|
||||
config.directories.temp / "L6.json",
|
||||
"-o",
|
||||
config.directories.temp / "RPU_L6.bin",
|
||||
str(input_video),
|
||||
"-vf",
|
||||
"cropdetect=round=2",
|
||||
"-vframes",
|
||||
"10",
|
||||
"-f",
|
||||
"null",
|
||||
"-",
|
||||
],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
text=True,
|
||||
)
|
||||
|
||||
if level6.returncode:
|
||||
Path.unlink(config.directories.temp / "RPU_L6.bin")
|
||||
raise ValueError("Failed editing RPU Level 6 values")
|
||||
# cropdetect outputs crop=w:h:x:y
|
||||
crop_match = re.search(
|
||||
r"crop=(\d+):(\d+):(\d+):(\d+)",
|
||||
(result_cropdetect.stdout or "") + (result_cropdetect.stderr or ""),
|
||||
)
|
||||
if crop_match:
|
||||
w, h = int(crop_match.group(1)), int(crop_match.group(2))
|
||||
x, y = int(crop_match.group(3)), int(crop_match.group(4))
|
||||
# Calculate actual border sizes from crop geometry
|
||||
left = x
|
||||
top = y
|
||||
right = original_width - w - x
|
||||
bottom = original_height - h - y
|
||||
crop_results.append((left, top, right, bottom))
|
||||
|
||||
self.log.info("Edited RPU Level 6 values")
|
||||
if not crop_results:
|
||||
if self.debug_logger:
|
||||
self.debug_logger.log(
|
||||
level="WARNING",
|
||||
operation="hybrid_level5",
|
||||
message="No crop data detected, skipping L5",
|
||||
context={"samples": len(random_times)},
|
||||
)
|
||||
self.log.warning("No crop data detected, skipping L5")
|
||||
return
|
||||
|
||||
# Update rpu_file to use the edited version
|
||||
self.rpu_file = "RPU_L6.bin"
|
||||
# Find the most common crop values
|
||||
crop_counts = {}
|
||||
for crop in crop_results:
|
||||
crop_counts[crop] = crop_counts.get(crop, 0) + 1
|
||||
most_common = max(crop_counts, key=crop_counts.get)
|
||||
left, top, right, bottom = most_common
|
||||
|
||||
# If all borders are 0 there's nothing to correct
|
||||
if left == 0 and top == 0 and right == 0 and bottom == 0:
|
||||
return
|
||||
|
||||
l5_json = {
|
||||
"active_area": {
|
||||
"crop": False,
|
||||
"presets": [{"id": 0, "left": left, "right": right, "top": top, "bottom": bottom}],
|
||||
"edits": {"all": 0},
|
||||
}
|
||||
}
|
||||
|
||||
l5_path = config.directories.temp / "L5.json"
|
||||
with open(l5_path, "w") as f:
|
||||
json.dump(l5_json, f, indent=4)
|
||||
|
||||
with console.status("Editing RPU Level 5 active area...", spinner="dots"):
|
||||
result = subprocess.run(
|
||||
[
|
||||
str(DoviTool),
|
||||
"editor",
|
||||
"-i",
|
||||
str(config.directories.temp / self.rpu_file),
|
||||
"-j",
|
||||
str(l5_path),
|
||||
"-o",
|
||||
str(config.directories.temp / "RPU_L5.bin"),
|
||||
],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
)
|
||||
|
||||
if result.returncode:
|
||||
if self.debug_logger:
|
||||
self.debug_logger.log(
|
||||
level="ERROR",
|
||||
operation="hybrid_level5",
|
||||
message="Failed editing RPU Level 5 values",
|
||||
context={"returncode": result.returncode, "stderr": (result.stderr or b"").decode(errors="replace")},
|
||||
)
|
||||
Path.unlink(config.directories.temp / "RPU_L5.bin", missing_ok=True)
|
||||
raise ValueError("Failed editing RPU Level 5 values")
|
||||
|
||||
if self.debug_logger:
|
||||
self.debug_logger.log(
|
||||
level="DEBUG",
|
||||
operation="hybrid_level5",
|
||||
message="Edited RPU Level 5 active area",
|
||||
context={"crop": {"left": left, "right": right, "top": top, "bottom": bottom}, "samples": len(crop_results)},
|
||||
success=True,
|
||||
)
|
||||
self.rpu_file = "RPU_L5.bin"
|
||||
|
||||
def level_6(self):
|
||||
"""Edit RPU Level 6 values using actual luminance data from the RPU."""
|
||||
if os.path.isfile(config.directories.temp / "RPU_L6.bin"):
|
||||
return
|
||||
|
||||
with console.status("Reading RPU luminance metadata...", spinner="dots"):
|
||||
result = subprocess.run(
|
||||
[str(DoviTool), "info", "-i", str(config.directories.temp / self.rpu_file), "-s"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
|
||||
if result.returncode != 0:
|
||||
if self.debug_logger:
|
||||
self.debug_logger.log(
|
||||
level="ERROR",
|
||||
operation="hybrid_level6",
|
||||
message="Failed reading RPU metadata for Level 6 values",
|
||||
context={"returncode": result.returncode, "stderr": (result.stderr or "")},
|
||||
)
|
||||
raise ValueError("Failed reading RPU metadata for Level 6 values")
|
||||
|
||||
max_cll = None
|
||||
max_fall = None
|
||||
max_mdl = None
|
||||
min_mdl = None
|
||||
|
||||
for line in result.stdout.splitlines():
|
||||
if "RPU content light level (L1):" in line:
|
||||
parts = line.split("MaxCLL:")[1].split(",")
|
||||
max_cll = int(float(parts[0].strip().split()[0]))
|
||||
if len(parts) > 1 and "MaxFALL:" in parts[1]:
|
||||
max_fall = int(float(parts[1].split("MaxFALL:")[1].strip().split()[0]))
|
||||
elif "RPU mastering display:" in line:
|
||||
mastering = line.split(":", 1)[1].strip()
|
||||
min_lum, max_lum = mastering.split("/")[0], mastering.split("/")[1].split(" ")[0]
|
||||
min_mdl = int(float(min_lum) * 10000)
|
||||
max_mdl = int(float(max_lum))
|
||||
|
||||
if any(v is None for v in (max_cll, max_fall, max_mdl, min_mdl)):
|
||||
if self.debug_logger:
|
||||
self.debug_logger.log(
|
||||
level="ERROR",
|
||||
operation="hybrid_level6",
|
||||
message="Could not extract Level 6 luminance data from RPU",
|
||||
context={"max_cll": max_cll, "max_fall": max_fall, "max_mdl": max_mdl, "min_mdl": min_mdl},
|
||||
)
|
||||
raise ValueError("Could not extract Level 6 luminance data from RPU")
|
||||
|
||||
level6_data = {
|
||||
"level6": {
|
||||
"remove_cmv4": False,
|
||||
"remove_mapping": False,
|
||||
"max_display_mastering_luminance": max_mdl,
|
||||
"min_display_mastering_luminance": min_mdl,
|
||||
"max_content_light_level": max_cll,
|
||||
"max_frame_average_light_level": max_fall,
|
||||
}
|
||||
}
|
||||
|
||||
l6_path = config.directories.temp / "L6.json"
|
||||
with open(l6_path, "w") as f:
|
||||
json.dump(level6_data, f, indent=4)
|
||||
|
||||
with console.status("Editing RPU Level 6 values...", spinner="dots"):
|
||||
result = subprocess.run(
|
||||
[
|
||||
str(DoviTool),
|
||||
"editor",
|
||||
"-i",
|
||||
str(config.directories.temp / self.rpu_file),
|
||||
"-j",
|
||||
str(l6_path),
|
||||
"-o",
|
||||
str(config.directories.temp / "RPU_L6.bin"),
|
||||
],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
)
|
||||
|
||||
if result.returncode:
|
||||
if self.debug_logger:
|
||||
self.debug_logger.log(
|
||||
level="ERROR",
|
||||
operation="hybrid_level6",
|
||||
message="Failed editing RPU Level 6 values",
|
||||
context={"returncode": result.returncode, "stderr": (result.stderr or b"").decode(errors="replace")},
|
||||
)
|
||||
Path.unlink(config.directories.temp / "RPU_L6.bin", missing_ok=True)
|
||||
raise ValueError("Failed editing RPU Level 6 values")
|
||||
|
||||
if self.debug_logger:
|
||||
self.debug_logger.log(
|
||||
level="DEBUG",
|
||||
operation="hybrid_level6",
|
||||
message="Edited RPU Level 6 luminance values",
|
||||
context={
|
||||
"max_cll": max_cll,
|
||||
"max_fall": max_fall,
|
||||
"max_mdl": max_mdl,
|
||||
"min_mdl": min_mdl,
|
||||
},
|
||||
success=True,
|
||||
)
|
||||
self.rpu_file = "RPU_L6.bin"
|
||||
|
||||
def injecting(self):
|
||||
if os.path.isfile(config.directories.temp / self.hevc_file):
|
||||
@@ -228,12 +558,6 @@ class Hybrid:
|
||||
config.directories.temp / self.rpu_file,
|
||||
]
|
||||
|
||||
# If we converted from HDR10+, optionally remove HDR10+ metadata during injection
|
||||
# Default to removing HDR10+ metadata since we're converting to DV
|
||||
if self.hdr10plus_to_dv:
|
||||
inject_cmd.append("--drop-hdr10plus")
|
||||
self.log.info(" - Removing HDR10+ metadata during injection")
|
||||
|
||||
inject_cmd.extend(["-o", config.directories.temp / self.hevc_file])
|
||||
|
||||
inject = subprocess.run(
|
||||
@@ -243,10 +567,29 @@ class Hybrid:
|
||||
)
|
||||
|
||||
if inject.returncode:
|
||||
if self.debug_logger:
|
||||
self.debug_logger.log(
|
||||
level="ERROR",
|
||||
operation="hybrid_inject_rpu",
|
||||
message="Failed injecting Dolby Vision metadata into HDR10 stream",
|
||||
context={
|
||||
"returncode": inject.returncode,
|
||||
"stderr": (inject.stderr or b"").decode(errors="replace"),
|
||||
"stdout": (inject.stdout or b"").decode(errors="replace"),
|
||||
"cmd": [str(a) for a in inject_cmd],
|
||||
},
|
||||
)
|
||||
Path.unlink(config.directories.temp / self.hevc_file)
|
||||
raise ValueError("Failed injecting Dolby Vision metadata into HDR10 stream")
|
||||
|
||||
self.log.info(f"Injected Dolby Vision metadata into {self.hdr_type} stream")
|
||||
if self.debug_logger:
|
||||
self.debug_logger.log(
|
||||
level="DEBUG",
|
||||
operation="hybrid_inject_rpu",
|
||||
message=f"Injected Dolby Vision metadata into {self.hdr_type} stream",
|
||||
context={"hdr_type": self.hdr_type, "rpu_file": self.rpu_file, "output": self.hevc_file, "drop_hdr10plus": self.hdr10plus_to_dv},
|
||||
success=True,
|
||||
)
|
||||
|
||||
def extract_hdr10plus(self, _video):
|
||||
"""Extract HDR10+ metadata from the video stream"""
|
||||
@@ -271,13 +614,39 @@ class Hybrid:
|
||||
)
|
||||
|
||||
if extraction.returncode:
|
||||
if self.debug_logger:
|
||||
self.debug_logger.log(
|
||||
level="ERROR",
|
||||
operation="hybrid_extract_hdr10plus",
|
||||
message="Failed extracting HDR10+ metadata",
|
||||
context={
|
||||
"returncode": extraction.returncode,
|
||||
"stderr": (extraction.stderr or b"").decode(errors="replace"),
|
||||
"stdout": (extraction.stdout or b"").decode(errors="replace"),
|
||||
},
|
||||
)
|
||||
raise ValueError("Failed extracting HDR10+ metadata")
|
||||
|
||||
# Check if the extracted file has content
|
||||
if os.path.getsize(config.directories.temp / self.hdr10plus_file) == 0:
|
||||
file_size = os.path.getsize(config.directories.temp / self.hdr10plus_file)
|
||||
if file_size == 0:
|
||||
if self.debug_logger:
|
||||
self.debug_logger.log(
|
||||
level="ERROR",
|
||||
operation="hybrid_extract_hdr10plus",
|
||||
message="No HDR10+ metadata found in the stream",
|
||||
context={"file_size": 0},
|
||||
)
|
||||
raise ValueError("No HDR10+ metadata found in the stream")
|
||||
|
||||
self.log.info("Extracted HDR10+ metadata")
|
||||
if self.debug_logger:
|
||||
self.debug_logger.log(
|
||||
level="DEBUG",
|
||||
operation="hybrid_extract_hdr10plus",
|
||||
message="Extracted HDR10+ metadata",
|
||||
context={"output": self.hdr10plus_file, "file_size": file_size},
|
||||
success=True,
|
||||
)
|
||||
|
||||
def convert_hdr10plus_to_dv(self):
|
||||
"""Convert HDR10+ metadata to Dolby Vision RPU"""
|
||||
@@ -317,10 +686,26 @@ class Hybrid:
|
||||
)
|
||||
|
||||
if conversion.returncode:
|
||||
if self.debug_logger:
|
||||
self.debug_logger.log(
|
||||
level="ERROR",
|
||||
operation="hybrid_convert_hdr10plus",
|
||||
message="Failed converting HDR10+ to Dolby Vision",
|
||||
context={
|
||||
"returncode": conversion.returncode,
|
||||
"stderr": (conversion.stderr or b"").decode(errors="replace"),
|
||||
"stdout": (conversion.stdout or b"").decode(errors="replace"),
|
||||
},
|
||||
)
|
||||
raise ValueError("Failed converting HDR10+ to Dolby Vision")
|
||||
|
||||
self.log.info("Converted HDR10+ metadata to Dolby Vision")
|
||||
self.log.info("✓ HDR10+ successfully converted to Dolby Vision Profile 8")
|
||||
if self.debug_logger:
|
||||
self.debug_logger.log(
|
||||
level="DEBUG",
|
||||
operation="hybrid_convert_hdr10plus",
|
||||
message="Converted HDR10+ metadata to Dolby Vision Profile 8",
|
||||
success=True,
|
||||
)
|
||||
|
||||
# Clean up temporary files
|
||||
Path.unlink(config.directories.temp / "extra.json")
|
||||
|
||||
@@ -15,17 +15,16 @@ from zlib import crc32
|
||||
|
||||
from curl_cffi.requests import Session as CurlSession
|
||||
from langcodes import Language
|
||||
from pyplayready.cdm import Cdm as PlayReadyCdm
|
||||
from pywidevine.cdm import Cdm as WidevineCdm
|
||||
from requests import Session
|
||||
|
||||
from unshackle.core import binaries
|
||||
from unshackle.core.cdm.detect import is_playready_cdm, is_widevine_cdm
|
||||
from unshackle.core.config import config
|
||||
from unshackle.core.constants import DOWNLOAD_CANCELLED, DOWNLOAD_LICENCE_ONLY
|
||||
from unshackle.core.downloaders import aria2c, curl_impersonate, n_m3u8dl_re, requests
|
||||
from unshackle.core.drm import DRM_T, PlayReady, Widevine
|
||||
from unshackle.core.events import events
|
||||
from unshackle.core.utilities import get_boxes, get_extension, try_ensure_utf8
|
||||
from unshackle.core.utilities import get_boxes, try_ensure_utf8
|
||||
from unshackle.core.utils.subprocess import ffprobe
|
||||
|
||||
|
||||
@@ -211,23 +210,12 @@ class Track:
|
||||
save_path = config.directories.temp / f"{track_type}_{self.id}.mp4"
|
||||
if track_type == "Subtitle":
|
||||
save_path = save_path.with_suffix(f".{self.codec.extension}")
|
||||
# n_m3u8dl_re doesn't support directly downloading subtitles from URLs
|
||||
# or when the subtitle has a direct file extension
|
||||
if self.downloader.__name__ == "n_m3u8dl_re" and (
|
||||
self.descriptor == self.Descriptor.URL
|
||||
or get_extension(self.url)
|
||||
in {
|
||||
".srt",
|
||||
".vtt",
|
||||
".ttml",
|
||||
".ssa",
|
||||
".ass",
|
||||
".stpp",
|
||||
".wvtt",
|
||||
".xml",
|
||||
}
|
||||
):
|
||||
self.downloader = requests
|
||||
|
||||
if self.downloader.__name__ == "n_m3u8dl_re" and (
|
||||
self.descriptor == self.Descriptor.URL
|
||||
or track_type in ("Subtitle", "Attachment")
|
||||
):
|
||||
self.downloader = requests
|
||||
|
||||
if self.descriptor != self.Descriptor.URL:
|
||||
save_dir = save_path.with_name(save_path.name + "_segments")
|
||||
@@ -297,7 +285,7 @@ class Track:
|
||||
if not self.drm and track_type in ("Video", "Audio"):
|
||||
# the service might not have explicitly defined the `drm` property
|
||||
# try find DRM information from the init data of URL based on CDM type
|
||||
if isinstance(cdm, PlayReadyCdm):
|
||||
if is_playready_cdm(cdm):
|
||||
try:
|
||||
self.drm = [PlayReady.from_track(self, session)]
|
||||
except PlayReady.Exceptions.PSSHNotFound:
|
||||
@@ -451,23 +439,14 @@ class Track:
|
||||
if not self.drm:
|
||||
return None
|
||||
|
||||
if isinstance(cdm, WidevineCdm):
|
||||
if is_widevine_cdm(cdm):
|
||||
for drm in self.drm:
|
||||
if isinstance(drm, Widevine):
|
||||
return drm
|
||||
elif isinstance(cdm, PlayReadyCdm):
|
||||
elif is_playready_cdm(cdm):
|
||||
for drm in self.drm:
|
||||
if isinstance(drm, PlayReady):
|
||||
return drm
|
||||
elif hasattr(cdm, "is_playready"):
|
||||
if cdm.is_playready:
|
||||
for drm in self.drm:
|
||||
if isinstance(drm, PlayReady):
|
||||
return drm
|
||||
else:
|
||||
for drm in self.drm:
|
||||
if isinstance(drm, Widevine):
|
||||
return drm
|
||||
|
||||
return self.drm[0]
|
||||
|
||||
|
||||
@@ -95,7 +95,7 @@ class Tracks:
|
||||
|
||||
return rep
|
||||
|
||||
def tree(self, add_progress: bool = False) -> tuple[Tree, list[partial]]:
|
||||
def tree(self, add_progress: bool = False) -> tuple[Tree, list[Callable[..., None]]]:
|
||||
all_tracks = [*list(self), *self.chapters, *self.attachments]
|
||||
|
||||
progress_callables = []
|
||||
@@ -121,7 +121,29 @@ class Tracks:
|
||||
speed_estimate_period=10,
|
||||
)
|
||||
task = progress.add_task("", downloaded="-")
|
||||
progress_callables.append(partial(progress.update, task_id=task))
|
||||
state = {"total": 100.0}
|
||||
|
||||
def update_track_progress(
|
||||
task_id: int = task,
|
||||
_state: dict[str, float] = state,
|
||||
_progress: Progress = progress,
|
||||
**kwargs,
|
||||
) -> None:
|
||||
"""
|
||||
Ensure terminal status states render as a fully completed bar.
|
||||
|
||||
Some downloaders can report completed slightly below total
|
||||
before emitting the final "Downloaded" state.
|
||||
"""
|
||||
if "total" in kwargs and kwargs["total"] is not None:
|
||||
_state["total"] = kwargs["total"]
|
||||
|
||||
downloaded_state = kwargs.get("downloaded")
|
||||
if downloaded_state in {"Downloaded", "Decrypted", "[yellow]SKIPPED"}:
|
||||
kwargs["completed"] = _state["total"]
|
||||
_progress.update(task_id=task_id, **kwargs)
|
||||
|
||||
progress_callables.append(update_track_progress)
|
||||
track_table = Table.grid()
|
||||
track_table.add_row(str(track)[6:], style="text2")
|
||||
track_table.add_row(progress)
|
||||
@@ -199,13 +221,15 @@ class Tracks:
|
||||
self.videos.sort(key=lambda x: not is_close_match(language, [x.language]))
|
||||
|
||||
def sort_audio(self, by_language: Optional[Sequence[Union[str, Language]]] = None) -> None:
|
||||
"""Sort audio tracks by bitrate, descriptive, and optionally language."""
|
||||
"""Sort audio tracks by bitrate, Atmos, descriptive, and optionally language."""
|
||||
if not self.audio:
|
||||
return
|
||||
# descriptive
|
||||
self.audio.sort(key=lambda x: x.descriptive)
|
||||
# bitrate (within each descriptive group)
|
||||
# bitrate (highest first)
|
||||
self.audio.sort(key=lambda x: float(x.bitrate or 0.0), reverse=True)
|
||||
# Atmos tracks first (prioritize over higher bitrate non-Atmos)
|
||||
self.audio.sort(key=lambda x: not x.atmos)
|
||||
# descriptive tracks last
|
||||
self.audio.sort(key=lambda x: x.descriptive)
|
||||
# language
|
||||
for language in reversed(by_language or []):
|
||||
if str(language) in ("all", "best"):
|
||||
@@ -254,23 +278,30 @@ class Tracks:
|
||||
self.subtitles = list(filter(x, self.subtitles))
|
||||
|
||||
def select_hybrid(self, tracks, quality):
|
||||
hdr10_tracks = [
|
||||
v
|
||||
for v in tracks
|
||||
if v.range == Video.Range.HDR10 and (v.height in quality or int(v.width * 9 / 16) in quality)
|
||||
]
|
||||
hdr10 = []
|
||||
# Prefer HDR10+ over HDR10 as the base layer (preserves dynamic metadata)
|
||||
base_ranges = (Video.Range.HDR10P, Video.Range.HDR10)
|
||||
base_tracks = []
|
||||
for range_type in base_ranges:
|
||||
base_tracks = [
|
||||
v
|
||||
for v in tracks
|
||||
if v.range == range_type and (v.height in quality or int(v.width * 9 / 16) in quality)
|
||||
]
|
||||
if base_tracks:
|
||||
break
|
||||
|
||||
base_selected = []
|
||||
for res in quality:
|
||||
candidates = [v for v in hdr10_tracks if v.height == res or int(v.width * 9 / 16) == res]
|
||||
candidates = [v for v in base_tracks if v.height == res or int(v.width * 9 / 16) == res]
|
||||
if candidates:
|
||||
best = max(candidates, key=lambda v: v.bitrate) # assumes .bitrate exists
|
||||
hdr10.append(best)
|
||||
best = max(candidates, key=lambda v: v.bitrate)
|
||||
base_selected.append(best)
|
||||
|
||||
dv_tracks = [v for v in tracks if v.range == Video.Range.DV]
|
||||
lowest_dv = min(dv_tracks, key=lambda v: v.height) if dv_tracks else None
|
||||
|
||||
def select(x):
|
||||
if x in hdr10:
|
||||
if x in base_selected:
|
||||
return True
|
||||
if lowest_dv and x is lowest_dv:
|
||||
return True
|
||||
|
||||
@@ -44,6 +44,33 @@ class VideoCodecChoice(click.Choice):
|
||||
self.fail(f"'{value}' is not a valid video codec", param, ctx)
|
||||
|
||||
|
||||
class MultipleVideoCodecChoice(VideoCodecChoice):
|
||||
"""
|
||||
A multiple-value variant of VideoCodecChoice that accepts comma-separated codecs.
|
||||
|
||||
Accepts both enum names and values, e.g.: ``-v hevc,avc`` or ``-v H.264,H.265``
|
||||
"""
|
||||
|
||||
name = "multiple_video_codec_choice"
|
||||
|
||||
def convert(
|
||||
self, value: Any, param: Optional[click.Parameter] = None, ctx: Optional[click.Context] = None
|
||||
) -> list[Any]:
|
||||
if not value:
|
||||
return []
|
||||
if isinstance(value, list):
|
||||
values = value
|
||||
elif isinstance(value, str):
|
||||
values = value.split(",")
|
||||
else:
|
||||
self.fail(f"{value!r} is not a supported value.", param, ctx)
|
||||
|
||||
chosen_values: list[Any] = []
|
||||
for v in values:
|
||||
chosen_values.append(super().convert(v.strip(), param, ctx))
|
||||
return chosen_values
|
||||
|
||||
|
||||
class SubtitleCodecChoice(click.Choice):
|
||||
"""
|
||||
A custom Choice type for subtitle codecs that accepts both enum names, values, and common aliases.
|
||||
|
||||
310
unshackle/core/utils/selector.py
Normal file
310
unshackle/core/utils/selector.py
Normal file
@@ -0,0 +1,310 @@
|
||||
import sys
|
||||
|
||||
import click
|
||||
from rich.console import Group
|
||||
from rich.live import Live
|
||||
from rich.padding import Padding
|
||||
from rich.table import Table
|
||||
from rich.text import Text
|
||||
|
||||
from unshackle.core.console import console
|
||||
|
||||
IS_WINDOWS = sys.platform == "win32"
|
||||
if IS_WINDOWS:
|
||||
import msvcrt
|
||||
|
||||
|
||||
class Selector:
|
||||
"""
|
||||
A custom interactive selector class using the Rich library.
|
||||
Allows for multi-selection of items with pagination.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
options: list[str],
|
||||
cursor_style: str = "pink",
|
||||
text_style: str = "text",
|
||||
page_size: int = 8,
|
||||
minimal_count: int = 0,
|
||||
dependencies: dict[int, list[int]] = None,
|
||||
prefixes: list[str] = None,
|
||||
):
|
||||
"""
|
||||
Initialize the Selector.
|
||||
|
||||
Args:
|
||||
options: List of strings to select from.
|
||||
cursor_style: Rich style for the highlighted cursor item.
|
||||
text_style: Rich style for normal items.
|
||||
page_size: Number of items to show per page.
|
||||
minimal_count: Minimum number of items that must be selected.
|
||||
dependencies: Dictionary mapping parent index to list of child indices.
|
||||
"""
|
||||
self.options = options
|
||||
self.cursor_style = cursor_style
|
||||
self.text_style = text_style
|
||||
self.page_size = page_size
|
||||
self.minimal_count = minimal_count
|
||||
self.dependencies = dependencies or {}
|
||||
|
||||
self.cursor_index = 0
|
||||
self.selected_indices = set()
|
||||
self.scroll_offset = 0
|
||||
|
||||
def get_renderable(self):
|
||||
"""
|
||||
Constructs and returns the renderable object (Table + Info) for the current state.
|
||||
"""
|
||||
table = Table(show_header=False, show_edge=False, box=None, pad_edge=False, padding=(0, 1, 0, 0))
|
||||
table.add_column("Indicator", justify="right", no_wrap=True)
|
||||
table.add_column("Option", overflow="ellipsis", no_wrap=True)
|
||||
|
||||
for i in range(self.page_size):
|
||||
idx = self.scroll_offset + i
|
||||
|
||||
if idx < len(self.options):
|
||||
option = self.options[idx]
|
||||
is_cursor = idx == self.cursor_index
|
||||
is_selected = idx in self.selected_indices
|
||||
|
||||
symbol = "[X]" if is_selected else "[ ]"
|
||||
style = self.cursor_style if is_cursor else self.text_style
|
||||
indicator_text = Text(f"{symbol}", style=style)
|
||||
|
||||
content_text = Text.from_markup(option)
|
||||
content_text.style = style
|
||||
|
||||
table.add_row(indicator_text, content_text)
|
||||
else:
|
||||
table.add_row(Text(" "), Text(" "))
|
||||
|
||||
total_pages = (len(self.options) + self.page_size - 1) // self.page_size
|
||||
current_page = (self.scroll_offset // self.page_size) + 1
|
||||
|
||||
info_text = Text(
|
||||
f"\n[Space]: Toggle [a]: All [←/→]: Page [Enter]: Confirm (Page {current_page}/{total_pages})",
|
||||
style="gray",
|
||||
)
|
||||
|
||||
return Padding(Group(table, info_text), (0, 5))
|
||||
|
||||
def move_cursor(self, delta: int):
|
||||
"""
|
||||
Moves the cursor up or down by the specified delta.
|
||||
Updates the scroll offset if the cursor moves out of the current view.
|
||||
"""
|
||||
self.cursor_index = (self.cursor_index + delta) % len(self.options)
|
||||
new_page_idx = self.cursor_index // self.page_size
|
||||
self.scroll_offset = new_page_idx * self.page_size
|
||||
|
||||
def change_page(self, delta: int):
|
||||
"""
|
||||
Changes the current page view by the specified delta (previous/next page).
|
||||
Also moves the cursor to the first item of the new page.
|
||||
"""
|
||||
current_page = self.scroll_offset // self.page_size
|
||||
total_pages = (len(self.options) + self.page_size - 1) // self.page_size
|
||||
new_page = current_page + delta
|
||||
|
||||
if 0 <= new_page < total_pages:
|
||||
self.scroll_offset = new_page * self.page_size
|
||||
first_idx_of_page = self.scroll_offset
|
||||
if first_idx_of_page < len(self.options):
|
||||
self.cursor_index = first_idx_of_page
|
||||
else:
|
||||
self.cursor_index = len(self.options) - 1
|
||||
|
||||
def toggle_selection(self):
|
||||
"""
|
||||
Toggles the selection state of the item currently under the cursor.
|
||||
Propagates selection to children if defined in dependencies.
|
||||
"""
|
||||
target_indices = {self.cursor_index}
|
||||
|
||||
if self.cursor_index in self.dependencies:
|
||||
target_indices.update(self.dependencies[self.cursor_index])
|
||||
|
||||
should_select = self.cursor_index not in self.selected_indices
|
||||
|
||||
if should_select:
|
||||
self.selected_indices.update(target_indices)
|
||||
else:
|
||||
self.selected_indices.difference_update(target_indices)
|
||||
|
||||
def toggle_all(self):
|
||||
"""
|
||||
Toggles the selection of all items.
|
||||
If all are selected, clears selection. Otherwise, selects all.
|
||||
"""
|
||||
if len(self.selected_indices) == len(self.options):
|
||||
self.selected_indices.clear()
|
||||
else:
|
||||
self.selected_indices = set(range(len(self.options)))
|
||||
|
||||
def get_input_windows(self):
|
||||
"""
|
||||
Captures and parses keyboard input on Windows systems using msvcrt.
|
||||
Returns command strings like 'UP', 'DOWN', 'ENTER', etc.
|
||||
"""
|
||||
key = msvcrt.getch()
|
||||
if key == b"\x03" or key == b"\x1b":
|
||||
return "CANCEL"
|
||||
if key == b"\xe0" or key == b"\x00":
|
||||
try:
|
||||
key = msvcrt.getch()
|
||||
if key == b"H":
|
||||
return "UP"
|
||||
if key == b"P":
|
||||
return "DOWN"
|
||||
if key == b"K":
|
||||
return "LEFT"
|
||||
if key == b"M":
|
||||
return "RIGHT"
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
try:
|
||||
char = key.decode("utf-8", errors="ignore")
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
if char in ("\r", "\n"):
|
||||
return "ENTER"
|
||||
if char == " ":
|
||||
return "SPACE"
|
||||
if char in ("q", "Q"):
|
||||
return "QUIT"
|
||||
if char in ("a", "A"):
|
||||
return "ALL"
|
||||
if char in ("w", "W", "k", "K"):
|
||||
return "UP"
|
||||
if char in ("s", "S", "j", "J"):
|
||||
return "DOWN"
|
||||
if char in ("h", "H"):
|
||||
return "LEFT"
|
||||
if char in ("d", "D", "l", "L"):
|
||||
return "RIGHT"
|
||||
return None
|
||||
|
||||
def get_input_unix(self):
|
||||
"""
|
||||
Captures and parses keyboard input on Unix/Linux systems using click.getchar().
|
||||
Returns command strings like 'UP', 'DOWN', 'ENTER', etc.
|
||||
"""
|
||||
char = click.getchar()
|
||||
if char == "\x03":
|
||||
return "CANCEL"
|
||||
mapping = {
|
||||
"\x1b[A": "UP",
|
||||
"\x1b[B": "DOWN",
|
||||
"\x1b[C": "RIGHT",
|
||||
"\x1b[D": "LEFT",
|
||||
}
|
||||
if char in mapping:
|
||||
return mapping[char]
|
||||
if char == "\x1b":
|
||||
try:
|
||||
next1 = click.getchar()
|
||||
if next1 in ("[", "O"):
|
||||
next2 = click.getchar()
|
||||
if next2 == "A":
|
||||
return "UP"
|
||||
if next2 == "B":
|
||||
return "DOWN"
|
||||
if next2 == "C":
|
||||
return "RIGHT"
|
||||
if next2 == "D":
|
||||
return "LEFT"
|
||||
return "CANCEL"
|
||||
except Exception:
|
||||
return "CANCEL"
|
||||
|
||||
if char in ("\r", "\n"):
|
||||
return "ENTER"
|
||||
if char == " ":
|
||||
return "SPACE"
|
||||
if char in ("q", "Q"):
|
||||
return "QUIT"
|
||||
if char in ("a", "A"):
|
||||
return "ALL"
|
||||
if char in ("w", "W", "k", "K"):
|
||||
return "UP"
|
||||
if char in ("s", "S", "j", "J"):
|
||||
return "DOWN"
|
||||
if char in ("h", "H"):
|
||||
return "LEFT"
|
||||
if char in ("d", "D", "l", "L"):
|
||||
return "RIGHT"
|
||||
return None
|
||||
|
||||
def run(self) -> list[int]:
|
||||
"""
|
||||
Starts the main event loop for the selector.
|
||||
Renders the UI and processes input until confirmed or cancelled.
|
||||
|
||||
Returns:
|
||||
list[int]: A sorted list of selected indices.
|
||||
"""
|
||||
try:
|
||||
with Live(self.get_renderable(), console=console, auto_refresh=False, transient=True) as live:
|
||||
while True:
|
||||
live.update(self.get_renderable(), refresh=True)
|
||||
if IS_WINDOWS:
|
||||
action = self.get_input_windows()
|
||||
else:
|
||||
action = self.get_input_unix()
|
||||
|
||||
if action == "UP":
|
||||
self.move_cursor(-1)
|
||||
elif action == "DOWN":
|
||||
self.move_cursor(1)
|
||||
elif action == "LEFT":
|
||||
self.change_page(-1)
|
||||
elif action == "RIGHT":
|
||||
self.change_page(1)
|
||||
elif action == "SPACE":
|
||||
self.toggle_selection()
|
||||
elif action == "ALL":
|
||||
self.toggle_all()
|
||||
elif action in ("ENTER", "QUIT"):
|
||||
if len(self.selected_indices) >= self.minimal_count:
|
||||
return sorted(list(self.selected_indices))
|
||||
elif action == "CANCEL":
|
||||
raise KeyboardInterrupt
|
||||
except KeyboardInterrupt:
|
||||
return []
|
||||
|
||||
|
||||
def select_multiple(
|
||||
options: list[str],
|
||||
minimal_count: int = 1,
|
||||
page_size: int = 8,
|
||||
return_indices: bool = True,
|
||||
cursor_style: str = "pink",
|
||||
**kwargs,
|
||||
) -> list[int]:
|
||||
"""
|
||||
Drop-in replacement using custom Selector with global console.
|
||||
|
||||
Args:
|
||||
options: List of options to display.
|
||||
minimal_count: Minimum number of selections required.
|
||||
page_size: Number of items per page.
|
||||
return_indices: If True, returns indices; otherwise returns the option strings.
|
||||
cursor_style: Style color for the cursor.
|
||||
"""
|
||||
selector = Selector(
|
||||
options=options,
|
||||
cursor_style=cursor_style,
|
||||
text_style="text",
|
||||
page_size=page_size,
|
||||
minimal_count=minimal_count,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
selected_indices = selector.run()
|
||||
|
||||
if return_indices:
|
||||
return selected_indices
|
||||
return [options[i] for i in selected_indices]
|
||||
@@ -168,6 +168,16 @@ def merge_segmented_webvtt(vtt_raw: str, segment_durations: Optional[list[int]]
|
||||
duplicate_index: list[int] = []
|
||||
captions = vtt.get_captions(lang)
|
||||
|
||||
# Some providers can produce "segment_index" values that are
|
||||
# outside the provided segment_durations list after normalization/merge.
|
||||
# This used to crash with IndexError and abort the entire download.
|
||||
if segment_durations and captions:
|
||||
max_idx = max(getattr(c, "segment_index", 0) for c in captions)
|
||||
if max_idx >= len(segment_durations):
|
||||
# Pad with the last known duration (or 0 if empty) so indexing is safe.
|
||||
pad_val = segment_durations[-1] if segment_durations else 0
|
||||
segment_durations = segment_durations + [pad_val] * (max_idx - len(segment_durations) + 1)
|
||||
|
||||
if captions[0].segment_index == 0:
|
||||
first_segment_mpegts = captions[0].mpegts
|
||||
else:
|
||||
@@ -179,6 +189,9 @@ def merge_segmented_webvtt(vtt_raw: str, segment_durations: Optional[list[int]]
|
||||
# calculate the timestamp from SegmentTemplate/SegmentList duration.
|
||||
likely_dash = first_segment_mpegts == 0 and caption.mpegts == 0
|
||||
if likely_dash and segment_durations:
|
||||
# Defensive: segment_index can still be out of range if captions are malformed.
|
||||
if caption.segment_index < 0 or caption.segment_index >= len(segment_durations):
|
||||
continue
|
||||
duration = segment_durations[caption.segment_index]
|
||||
caption.mpegts = MPEG_TIMESCALE * (duration / timescale)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user