initial
This commit is contained in:
338
lib/ScreenShot.py
Normal file
338
lib/ScreenShot.py
Normal file
@@ -0,0 +1,338 @@
|
||||
import os
|
||||
import re
|
||||
import asyncio
|
||||
import subprocess
|
||||
import cv2
|
||||
import numpy as np
|
||||
import shutil
|
||||
import time
|
||||
from PIL import Image, ImageDraw, ImageFont
|
||||
from urllib.parse import urljoin
|
||||
import aiohttp
|
||||
from dotenv import dotenv_values
|
||||
from datetime import timedelta
|
||||
|
||||
from lib.logging_data import logger
|
||||
import json
|
||||
|
||||
class ScreenShot:
|
||||
FONT_PATH = "/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf"
|
||||
|
||||
def __init__(self, OUTPUT_DIR="temp_screenshots",console:logger=None):
|
||||
self.VIDEO_PATH = None
|
||||
self.OUTPUT_DIR = OUTPUT_DIR
|
||||
self.OUTPUT_IMAGE = None
|
||||
|
||||
self.GRID_COLS = 3
|
||||
self.GRID_ROWS = 5
|
||||
self.TOTAL_FRAMES = self.GRID_COLS * self.GRID_ROWS
|
||||
# self.WIDTH = 1600
|
||||
# self.HEIGHT = 900
|
||||
self.WIDTH = None
|
||||
self.HEIGHT = None
|
||||
self.HEADER_HEIGHT = 90
|
||||
|
||||
self.base_url = "https://imgbb.ws"
|
||||
self.imgbb_token = None
|
||||
self.session = None
|
||||
|
||||
self.console=console or logger(app_name="torrent_uploader",log_dir="./log")
|
||||
|
||||
def get_metadata(self):
|
||||
def ffprobe_entry(stream, entry):
|
||||
cmd = [
|
||||
"ffprobe", "-v", "error", "-select_streams", stream,
|
||||
"-show_entries", f"stream={entry}",
|
||||
"-of", "default=noprint_wrappers=1:nokey=1", self.VIDEO_PATH
|
||||
]
|
||||
# print(f"Running command: {' '.join(cmd)}")
|
||||
result = subprocess.run(cmd, stdout=subprocess.PIPE)
|
||||
return result.stdout.decode().strip()
|
||||
|
||||
def ffprobe_tag_languages(stream_type):
|
||||
cmd = [
|
||||
"ffprobe", "-v", "error", "-select_streams", stream_type,
|
||||
"-show_entries", "stream_tags=language",
|
||||
"-of", "csv=p=0", self.VIDEO_PATH
|
||||
]
|
||||
result = subprocess.run(cmd, stdout=subprocess.PIPE)
|
||||
langs = list(set([lang.strip() for lang in result.stdout.decode().splitlines() if lang.strip()]))
|
||||
return ",".join(langs) if langs else "und"
|
||||
|
||||
duration = float(self.get_duration(self.VIDEO_PATH))
|
||||
vcodec = ffprobe_entry("v:0", "codec_name").splitlines()[0] + " " + ffprobe_entry("v:0", "profile").splitlines()[0]
|
||||
acodec_profile = ffprobe_entry("a:0", "profile").splitlines()[0]
|
||||
acodec = ffprobe_entry("a:0", "codec_name").splitlines()[0]
|
||||
acodec += " DDP" if "Dolby Digital Plus" in acodec_profile else " DD" if "Dolby Digital" in acodec_profile else ""
|
||||
acodec += " Atmos" if "Atmos" in acodec_profile else ""
|
||||
audio_channels = ffprobe_entry("a:0", "channel_layout").splitlines()[0]
|
||||
resolution = f"{ffprobe_entry('v:0', 'width').splitlines()[0]}x{ffprobe_entry('v:0', 'height').splitlines()[0]}"
|
||||
self.WIDTH, self.HEIGHT = map(int, resolution.split('x'))
|
||||
self.WIDTH, self.HEIGHT=self.WIDTH/self.GRID_COLS, self.HEIGHT/self.GRID_COLS
|
||||
size_mb = os.path.getsize(self.VIDEO_PATH) / (1024 * 1024)
|
||||
audio_lang = ffprobe_tag_languages("a").upper()
|
||||
subtitle_lang = ffprobe_tag_languages("s").upper()
|
||||
|
||||
return duration, vcodec.upper(), acodec.upper(), audio_channels, resolution, size_mb, audio_lang, subtitle_lang
|
||||
|
||||
# def get_metadata(self):
|
||||
# def get_mkv_json():
|
||||
# cmd = ["mkvmerge", "-J", self.VIDEO_PATH]
|
||||
# result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
# return json.loads(result.stdout.decode())
|
||||
|
||||
# data = get_mkv_json()
|
||||
|
||||
# video_track = next(t for t in data["tracks"] if t["type"] == "video")
|
||||
# audio_track = next(t for t in data["tracks"] if t["type"] == "audio")
|
||||
|
||||
# # duration (ns -> sec)
|
||||
# duration = data["container"]["properties"]["duration"] / 1_000_000_000
|
||||
|
||||
# # video codec
|
||||
# vcodec = video_track["codec"]
|
||||
# profile = video_track.get("properties", {}).get("profile")
|
||||
# if profile:
|
||||
# vcodec = f"{vcodec} {profile}"
|
||||
|
||||
# # audio codec
|
||||
# acodec = audio_track["codec"]
|
||||
# acodec_profile = audio_track.get("properties", {}).get("profile", "")
|
||||
|
||||
# if "Dolby Digital Plus" in acodec_profile:
|
||||
# acodec += " DDP"
|
||||
# elif "Dolby Digital" in acodec_profile:
|
||||
# acodec += " DD"
|
||||
|
||||
# if "Atmos" in acodec_profile:
|
||||
# acodec += " Atmos"
|
||||
|
||||
# # channels
|
||||
# audio_channels = audio_track.get("properties", {}).get("audio_channels")
|
||||
|
||||
# # resolution
|
||||
# width = video_track["properties"]["pixel_dimensions"].split("x")[0]
|
||||
# height = video_track["properties"]["pixel_dimensions"].split("x")[1]
|
||||
|
||||
# resolution = f"{width}x{height}"
|
||||
|
||||
# self.WIDTH, self.HEIGHT = int(width), int(height)
|
||||
# self.WIDTH, self.HEIGHT = self.WIDTH/self.GRID_COLS, self.HEIGHT/self.GRID_COLS
|
||||
|
||||
# # size
|
||||
# size_mb = os.path.getsize(self.VIDEO_PATH) / (1024 * 1024)
|
||||
|
||||
# # audio languages
|
||||
# audio_langs = {
|
||||
# t.get("properties", {}).get("language", "und")
|
||||
# for t in data["tracks"] if t["type"] == "audio"
|
||||
# }
|
||||
|
||||
# subtitle_langs = {
|
||||
# t.get("properties", {}).get("language", "und")
|
||||
# for t in data["tracks"] if t["type"] == "subtitles"
|
||||
# }
|
||||
|
||||
# audio_lang = ",".join(sorted(audio_langs)).upper()
|
||||
# subtitle_lang = ",".join(sorted(subtitle_langs)).upper()
|
||||
|
||||
# return (
|
||||
# duration,
|
||||
# vcodec.upper(),
|
||||
# acodec.upper(),
|
||||
# audio_channels,
|
||||
# resolution,
|
||||
# size_mb,
|
||||
# audio_lang,
|
||||
# subtitle_lang
|
||||
# )
|
||||
@staticmethod
|
||||
def get_duration(filename):
|
||||
result = subprocess.run(
|
||||
["ffprobe", "-v", "error", "-show_entries", "format=duration",
|
||||
"-of", "default=noprint_wrappers=1:nokey=1", filename],
|
||||
stdout=subprocess.PIPE, stderr=subprocess.STDOUT
|
||||
)
|
||||
return float(result.stdout)
|
||||
|
||||
async def extract_screenshots(self, duration):
|
||||
return await asyncio.to_thread(self._extract_screenshots_blocking, duration)
|
||||
|
||||
def _extract_screenshots_blocking(self, duration):
|
||||
os.makedirs(self.OUTPUT_DIR, exist_ok=True)
|
||||
interval =duration / self.TOTAL_FRAMES
|
||||
timestamps = []
|
||||
|
||||
for i in range(self.TOTAL_FRAMES):
|
||||
timestamp = int(i * interval)
|
||||
if timestamp ==0:
|
||||
timestamp = 5
|
||||
output_file = os.path.join(self.OUTPUT_DIR, f"shot_{i:02d}.jpg")
|
||||
|
||||
# drawtext = (
|
||||
# f"drawtext=fontfile={self.FONT_PATH}:"
|
||||
# f"text='%{{pts\\:hms}}':"
|
||||
# f"x=10:y=10:fontsize=18:fontcolor=white:borderw=2"
|
||||
# )
|
||||
|
||||
cmd = [
|
||||
"ffmpeg", "-ss", str(timestamp), "-i", self.VIDEO_PATH,
|
||||
"-frames:v", "1", "-q:v", "2",
|
||||
"-vf",
|
||||
f"scale={self.WIDTH}:{self.HEIGHT}",
|
||||
output_file, "-y"
|
||||
]
|
||||
result = subprocess.run(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
|
||||
self.console.debug("result : ",result)
|
||||
# print(" ".join(cmd))
|
||||
# print(result)
|
||||
# Draw timestamp with Pillow
|
||||
timestamp_str = str(timedelta(seconds=timestamp)).split(".")[0]
|
||||
img = Image.open(output_file)
|
||||
draw = ImageDraw.Draw(img)
|
||||
font = ImageFont.truetype(self.FONT_PATH, 32)
|
||||
draw.text((10, 10), timestamp_str, font=font, fill="white", stroke_width=2, stroke_fill="black")
|
||||
img.save(output_file)
|
||||
|
||||
timestamps.append(timestamp)
|
||||
|
||||
return timestamps
|
||||
|
||||
|
||||
def stitch_images(self, metadata_text, timestamps):
|
||||
images = []
|
||||
for i in range(self.TOTAL_FRAMES):
|
||||
img_path = os.path.join(self.OUTPUT_DIR, f"shot_{i:02d}.jpg")
|
||||
img = cv2.imread(img_path)
|
||||
if img is not None:
|
||||
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
||||
images.append(img_rgb)
|
||||
|
||||
if len(images) != self.TOTAL_FRAMES:
|
||||
self.console.error("Not enough images. Exiting.")
|
||||
return
|
||||
|
||||
rows = [np.hstack(images[i*self.GRID_COLS:(i+1)*self.GRID_COLS]) for i in range(self.GRID_ROWS)]
|
||||
sheet = np.vstack(rows)
|
||||
|
||||
sheet_img = Image.fromarray(sheet)
|
||||
banner = Image.new("RGB", (sheet_img.width, self.HEADER_HEIGHT), color=(40, 40, 40))
|
||||
draw = ImageDraw.Draw(banner)
|
||||
font = ImageFont.truetype(self.FONT_PATH, 20)
|
||||
draw.text((10, 10), metadata_text, font=font, fill="white")
|
||||
|
||||
final_image = Image.new("RGB", (sheet_img.width, sheet_img.height + self.HEADER_HEIGHT))
|
||||
final_image.paste(banner, (0, 0))
|
||||
final_image.paste(sheet_img, (0, self.HEADER_HEIGHT))
|
||||
final_image.save(self.OUTPUT_IMAGE, quality=95, subsampling=0)
|
||||
|
||||
self.console.log(f"📷 Saved to {self.OUTPUT_IMAGE}")
|
||||
self.cleanup_screenshots()
|
||||
|
||||
def cleanup_screenshots(self):
|
||||
if os.path.exists(self.OUTPUT_DIR):
|
||||
shutil.rmtree(self.OUTPUT_DIR)
|
||||
self.console.log("✅ All extracted screenshots deleted.")
|
||||
|
||||
async def run(self, VIDEO_PATH, is_movie=False):
|
||||
self.VIDEO_PATH = VIDEO_PATH
|
||||
base = os.path.dirname(self.VIDEO_PATH)
|
||||
parent = os.path.dirname(base)
|
||||
filename = os.path.basename(self.VIDEO_PATH).replace("#", "") + "_screenshot.jpg"
|
||||
self.OUTPUT_IMAGE = os.path.join(base if is_movie else parent, filename)
|
||||
|
||||
duration, vcodec, acodec, audio_channels, resolution, size_mb, audio_lang, subtitle_lang = self.get_metadata()
|
||||
metadata_text = (
|
||||
f"{os.path.basename(self.VIDEO_PATH)}\n"
|
||||
f"{vcodec} | {acodec} {audio_channels} \n"
|
||||
f"{resolution} | {size_mb:.2f} MB | {duration/60:.2f} min | Audio: {audio_lang} | Subtitles: {subtitle_lang}"
|
||||
)
|
||||
self.console.log(f"🎬 Metadata: {metadata_text}")
|
||||
|
||||
timestamps = await self.extract_screenshots(duration)
|
||||
self.stitch_images(metadata_text, timestamps)
|
||||
return self.OUTPUT_IMAGE
|
||||
|
||||
async def upload_to_imgbb(self, image_path):
|
||||
|
||||
timestamp = str(int(time.time() * 1000))
|
||||
retry = 0
|
||||
|
||||
while retry < 5:
|
||||
form = aiohttp.FormData()
|
||||
form.add_field('source', open(image_path, 'rb'), filename=os.path.basename(image_path), content_type='image/jpeg')
|
||||
form.add_field('type', 'file')
|
||||
form.add_field('action', 'upload')
|
||||
form.add_field('timestamp', timestamp)
|
||||
form.add_field('auth_token', self.imgbb_token)
|
||||
form.add_field('nsfw', '0')
|
||||
form.add_field('mimetype', 'image/jpeg')
|
||||
|
||||
async with self.session.post(urljoin(self.base_url, '/json'), data=form) as response:
|
||||
await response.text() # drain the response
|
||||
if 200 <= response.status < 300:
|
||||
self.console.log("✅ Upload successful")
|
||||
await asyncio.sleep(5)
|
||||
data = await response.json()
|
||||
os.remove(image_path)
|
||||
return data['image']['url']
|
||||
else:
|
||||
self.console.warn(f"❌ Upload failed ({response.status})")
|
||||
retry += 1
|
||||
await asyncio.sleep(10)
|
||||
|
||||
self.console.error("❌ Max retries reached")
|
||||
return None
|
||||
|
||||
async def login(self):
|
||||
if not self.session:
|
||||
self.session = aiohttp.ClientSession(headers={
|
||||
'accept': 'application/json',
|
||||
'accept-language': 'en-US,en;q=0.9,th;q=0.8',
|
||||
'cache-control': 'no-cache',
|
||||
'origin': 'https://imgbb.ws',
|
||||
'pragma': 'no-cache',
|
||||
'referer': 'https://imgbb.ws/',
|
||||
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/145.0.0.0 Safari/537.36 Edg/145.0.0.0',
|
||||
})
|
||||
|
||||
async with self.session.get(self.base_url) as response:
|
||||
html = await response.text()
|
||||
if response.status != 200:
|
||||
self.console.error(f"❌ Failed to connect to {self.base_url}: {response.status}")
|
||||
exit(1)
|
||||
|
||||
match = re.search(r'auth_token\s*=\s*"([a-f0-9]{40})"', html)
|
||||
if match:
|
||||
self.imgbb_token = match.group(1)
|
||||
self.console.log("Auth token:", self.imgbb_token)
|
||||
else:
|
||||
self.console.error("Auth token not found.")
|
||||
|
||||
creds = dotenv_values(".env")
|
||||
data = {
|
||||
'login-subject': creds.get("imgbb_id"),
|
||||
'password': creds.get("imgbb_password"),
|
||||
'auth_token': self.imgbb_token,
|
||||
}
|
||||
async with self.session.post(urljoin(self.base_url, '/login'), data=data) as response:
|
||||
self.console.log(f"Login status: {response.status}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from lib.torrent_creator import TorrentCreator,ffprobe_streams_to_bbcode
|
||||
|
||||
torrent = TorrentCreator()
|
||||
# VIDEO_PATH='/Entertainment_1/Anime/Series/365 Days to the Wedding (2024) [tvdbid-433584]/S01/365.Days.to.the.Wedding.2024.S01E01.Why.Dont.We.Get.Married.CR.WEBDL-1080P.X264.AAC.[SeFree].mkv'
|
||||
VIDEO_PATH='/Entertainment_1/Anime/Movie/KPop Demon Hunters (2025) [tmdbid-803796]/KPop.Demon.Hunters.2025.NF.WEBDL-1080P.AV1.EAC3.ATMOS.[SeFree].mkv'
|
||||
|
||||
async def main():
|
||||
duration, video_codec, audio_codec, audio_channels, resolution, size_mb, audio_lang, subtitle_lang, json_metadata = torrent.get_metadata(VIDEO_PATH)
|
||||
with open("output.json","w") as f:
|
||||
json.dump(json_metadata,f,indent=4,ensure_ascii=True)
|
||||
|
||||
bb=ffprobe_streams_to_bbcode(json_metadata, os.path.basename(VIDEO_PATH))
|
||||
with open("output_bb.txt","w") as f:
|
||||
# json.dump(bb,f,indent=4,ensure_ascii=True)
|
||||
f.write(bb)
|
||||
asyncio.run(main())
|
||||
0
lib/__init__.py
Normal file
0
lib/__init__.py
Normal file
247
lib/check_track_detail.py
Normal file
247
lib/check_track_detail.py
Normal file
@@ -0,0 +1,247 @@
|
||||
import re
|
||||
from langcodes import Language, tag_is_valid
|
||||
|
||||
def first_valid_bcp47(parts):
|
||||
"""
|
||||
Return the first token in parts that is a valid BCP 47 tag,
|
||||
or None if none are valid.
|
||||
"""
|
||||
for p in parts:
|
||||
|
||||
tok = p.strip()
|
||||
# Remove bracketed markers like [Original]
|
||||
if tok.startswith("[") and tok.endswith("]"):
|
||||
continue
|
||||
# langcodes works with exact case; tags are typically case-insensitive
|
||||
# but language=lower, region/script=proper-case is okay.
|
||||
# We'll just feed the token as-is; tag_is_valid handles common cases.
|
||||
if tag_is_valid(tok):
|
||||
return tok
|
||||
return None
|
||||
|
||||
def extract_langs(text):
|
||||
audio = []
|
||||
subs = []
|
||||
LANG = r'([a-z]{2}(?:-[A-Z]{2})?)'
|
||||
for line in text.splitlines():
|
||||
# audio
|
||||
m_audio = re.search(
|
||||
rf'\[(AAC|DD\+?|AC-4|OPUS|VORB|DTS|ALAC|FLAC)\]\s*\|\s*{LANG}',
|
||||
line
|
||||
)
|
||||
if m_audio:
|
||||
lang = m_audio.group(2)
|
||||
if lang not in audio:
|
||||
audio.append(lang)
|
||||
|
||||
# subtitles
|
||||
m_sub = re.search(
|
||||
rf'\[(SRT|SSA|ASS|VTT|TTML|SMI|SUB|MPL2|TMP|STPP|WVTT)\]\s*\|\s*{LANG}',
|
||||
line
|
||||
)
|
||||
if m_sub:
|
||||
lang = m_sub.group(2)
|
||||
if lang not in subs:
|
||||
subs.append(lang)
|
||||
|
||||
return audio, subs
|
||||
|
||||
def check_langs_with_langcodes(stderr_text: str, audio_lang_cfg: list[str], sub_lang_cfg: list[str]):
|
||||
# audio_tags = find_audio_tags(stderr_text)
|
||||
# sub_tags = find_sub_tags(stderr_text)
|
||||
audio_tags,sub_tags=extract_langs(stderr_text)
|
||||
|
||||
|
||||
# Normalize found tags to their primary language subtags
|
||||
audio_langs_found = {Language.get(tag).language for tag in audio_tags}
|
||||
sub_langs_found = {Language.get(tag).language for tag in sub_tags}
|
||||
|
||||
return {
|
||||
"audio": {
|
||||
"configured": audio_lang_cfg,
|
||||
"found_tags": audio_tags,
|
||||
"found_langs": sorted(audio_langs_found),
|
||||
"exists_all": all(Language.get(c).language in audio_langs_found for c in audio_lang_cfg),
|
||||
},
|
||||
"subtitle": {
|
||||
"configured": sub_lang_cfg,
|
||||
"found_tags": sub_tags,
|
||||
"found_langs": sorted(sub_langs_found),
|
||||
"exists_all": all(Language.get(c).language in sub_langs_found for c in sub_lang_cfg),
|
||||
},
|
||||
}
|
||||
|
||||
def video_details(stderr_text: str):
|
||||
"""
|
||||
Parses the 'All Tracks' part (stopping at 'Selected Tracks') using a single regex.
|
||||
Returns a list of dicts with codec, range, resolution [w,h], bitrate (int kb/s),
|
||||
framerate (float or None if unknown), and size (e.g., '376.04 MiB').
|
||||
"""
|
||||
# One regex, anchored to 'VID | [ ... ]' so it won't ever read the log-level [I]
|
||||
VID_RE = re.compile(r"""
|
||||
VID\s*\|\s*\[\s*(?P<codec>[^,\]]+)\s*(?:,\s*(?P<range>[^\]]+))?\]\s*\|\s*
|
||||
(?P<width>\d{3,4})x(?P<height>\d{3,4})\s*@\s*(?P<kbps>[\d,]+)\s*kb/s
|
||||
(?:\s*\((?P<size>[^()]*?(?:MiB|GiB)[^()]*)\))?\s*,\s*(?P<fps>\d+(?:\.\d+)?)\s*FPS
|
||||
""", re.VERBOSE)
|
||||
|
||||
# Only parse the 'All Tracks' section if 'Selected Tracks' exists
|
||||
if "Selected Tracks" in stderr_text:
|
||||
all_section = stderr_text.split("Selected Tracks", 1)[0]
|
||||
else:
|
||||
all_section = stderr_text
|
||||
|
||||
results = []
|
||||
for m in VID_RE.finditer(all_section):
|
||||
bitrate_kbps = int(m.group("kbps").replace(",", ""))
|
||||
fps_val = None
|
||||
if m.group("fps"):
|
||||
try:
|
||||
fps_val = float(m.group("fps"))
|
||||
except ValueError:
|
||||
fps_val = None # fallback if numeric parse fails
|
||||
|
||||
results.append({
|
||||
"codec": m.group("codec").strip() if m.group("codec") else None,
|
||||
"range": (m.group("range").strip() if m.group("range") else None),
|
||||
"resolution": [m.group("width"), m.group("height")],
|
||||
"bitrate": bitrate_kbps,
|
||||
"framerate": fps_val, # None when 'Unknown FPS'
|
||||
"size": (m.group("size").strip() if m.group("size") else None),
|
||||
})
|
||||
|
||||
return results
|
||||
|
||||
def extract_chapters(stderr_text: str):
|
||||
"""
|
||||
Parse chapter lines from vinetrimmer-like logs.
|
||||
Returns: list of dicts: {'index': '01', 'time': '00:04:21.762', 'name': 'intro'}
|
||||
Stops parsing at 'Selected Tracks' to prefer the 'All Tracks' inventory if present.
|
||||
"""
|
||||
# Matches: "CHP | [01] | 00:04:21.762 | intro"
|
||||
CHAPTER_RE = re.compile(
|
||||
r"""
|
||||
^.*?\bCHP\b\s*\|\s*\[(?P<index>\d{1,3})\]\s*\|\s*
|
||||
(?P<time>\d{2}:\d{2}:\d{2}(?:\.\d{1,4})?)\s*\|\s*
|
||||
(?P<name>.+?)\s*$
|
||||
""",
|
||||
re.IGNORECASE | re.MULTILINE | re.VERBOSE
|
||||
)
|
||||
# Prefer 'All Tracks' (before 'Selected Tracks:' marker) to capture full menu
|
||||
section = stderr_text.split("Selected Tracks:", 1)[0]
|
||||
chapters = []
|
||||
for m in CHAPTER_RE.finditer(section):
|
||||
chapters.append({
|
||||
"index": m.group("index"),
|
||||
"time": m.group("time"),
|
||||
"name": m.group("name"),
|
||||
})
|
||||
return chapters
|
||||
|
||||
def extract_title(stderr_text: str) -> str | None:
|
||||
TITLE_RE = re.compile(r"Getting tracks for\s+(?P<title>.+?)\s*\[", re.IGNORECASE)
|
||||
|
||||
m = TITLE_RE.search(stderr_text)
|
||||
return m.group("title").strip() if m else None
|
||||
|
||||
def extract_file_path(stderr: str) -> str | None:
|
||||
import re
|
||||
m = re.search(r"File path -\s*\n([\s\S]*?)\n\s*\n", stderr)
|
||||
if not m:
|
||||
return None
|
||||
return "".join(line.strip() for line in m.group(1).splitlines())
|
||||
|
||||
def main():
|
||||
# Example usage
|
||||
stderr_example = """
|
||||
▄• ▄▌ ▐ ▄ .▄▄ · ▄ .▄ ▄▄▄· ▄▄· ▄ •▄ ▄▄▌ ▄▄▄ .
|
||||
█▪██▌•█▌▐█▐█ ▀. ██▪▐█▐█ ▀█ ▐█ ▌▪█▌▄▌▪██• ▀▄.▀·
|
||||
█▌▐█▌▐█▐▐▌▄▀▀▀█▄██▀▐█▄█▀▀█ ██ ▄▄▐▀▀▄·██▪ ▐▀▀▪▄
|
||||
▐█▄█▌██▐█▌▐█▄▪▐███▌▐▀▐█ ▪▐▌▐███▌▐█.█▌▐█▌▐▌▐█▄▄▌
|
||||
▀▀▀ ▀▀ █▪ ▀▀▀▀ ▀▀▀ · ▀ ▀ ·▀▀▀ ·▀ ▀.▀▀▀ ▀▀▀
|
||||
v 4.0.0 - © 2025-2026 - github.com/unshackle-dl/unshackle
|
||||
|
||||
Service Config loaded
|
||||
Loaded 1/1 Vaults
|
||||
Loaded Widevine CDM: 8159 (L3)
|
||||
|
||||
─────────────────────────────── Service: CR ────────────────────────────────
|
||||
|
||||
Authenticated with Service
|
||||
|
||||
─ Series: Noble Reincarnation: Born Blessed, So I’ll Obtain Ultimate Powe… ─
|
||||
|
||||
1 seasons, S1(12)
|
||||
|
||||
─ Noble Reincarnation: Born Blessed, So I’ll Obtain Ultimate Power 2026 S… ─
|
||||
|
||||
|
||||
1 Video
|
||||
└── [H.264, SDR] | ja-JP | 1920x1080 @ 11038 kb/s, 23.976 FPS
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ • 00:24 • Downloaded
|
||||
2 Audio
|
||||
├── [AAC] | ja-JP | 2.0 | 200 kb/s | Japanese
|
||||
│ ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ • 00:15 • Downloaded
|
||||
└── [AAC] | th-TH | 2.0 | 201 kb/s | Thai
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ • 00:15 • Downloaded
|
||||
2 Subtitles
|
||||
├── [ASS] | th-TH | Thai
|
||||
│ ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ • 00:01 • Downloaded
|
||||
└── [ASS] | en-US | English
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ • 00:01 • Downloaded
|
||||
6 Chapters
|
||||
├── 00:00:00.000 | Chapter 1
|
||||
├── 00:02:11.000 | Chapter 2
|
||||
├── 00:03:21.000 | Intro
|
||||
├── 00:04:50.000 | Chapter 3
|
||||
├── 00:21:56.000 | Credits
|
||||
└── 00:23:27.000 | Chapter 4
|
||||
|
||||
Widevine(AAAAgnBzc2gAAAAA7e+LqXnWSs6jyCfc1R0h7QAAAGIIARIQx4rAibzIP...)
|
||||
└── c78ac089bcc83e8ea8fe89729f1093c7:f4797a42fa189a1326dc3da31b8957ab*
|
||||
from Local SQLite
|
||||
|
||||
Widevine(AAAAgnBzc2gAAAAA7e+LqXnWSs6jyCfc1R0h7QAAAGIIARIQrm5MD9N8M...)
|
||||
└── ae6e4c0fd37c32d5be7a3188ce31a60b:d11e30c933334530a5e591e58978929c*
|
||||
from Local SQLite
|
||||
|
||||
Track downloads finished in 0m24s
|
||||
Using 'DejaVu Sans' as fallback for 'Arial Unicode MS'
|
||||
Using 'Liberation Sans' as fallback for 'Arial'
|
||||
Using 'Liberation Serif' as fallback for 'Times New Roman'
|
||||
Using 'DejaVu Sans' as fallback for 'Trebuchet MS'
|
||||
Attached 5 fonts for the Subtitles
|
||||
Repacked one or more tracks with FFMPEG
|
||||
Multiplexing... ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ • 00:03
|
||||
|
||||
🎉 Title downloaded in 0m29s!
|
||||
|
||||
File path -
|
||||
/Entertainment_1/Downloads/USCK/Noble.Reincarnation.Born.Blessed.So.Il
|
||||
l.Obtain.Ultimate.Power.2026.S01.1080p.CR.WEB-DL.DUAL.AAC2.0.H.264-[Se
|
||||
Free]/Noble.Reincarnation.Born.Blessed.So.Ill.Obtain.Ultimate.Power.20
|
||||
26.S01E11.Disinheritance.1080p.CR.WEB-DL.DUAL.AAC2.0.H.264-[SeFree].mk
|
||||
v
|
||||
|
||||
Processed all titles in 0m33s
|
||||
"""
|
||||
# audio_lang_cfg = "ja,th"
|
||||
# sub_lang_cfg = "th,en"
|
||||
|
||||
# audio_lang_cfg= audio_lang_cfg.split(",")
|
||||
# sub_lang_cfg = sub_lang_cfg.split(",")
|
||||
|
||||
# title = extract_title(stderr_example)
|
||||
# vid_details = video_details(stderr_example)
|
||||
# chapters = extract_chapters(stderr_example)
|
||||
# lang_result = check_langs_with_langcodes(stderr_example, audio_lang_cfg, sub_lang_cfg)
|
||||
|
||||
# print(title)
|
||||
# print(vid_details)
|
||||
# print(chapters)
|
||||
# print(lang_result)
|
||||
# print("dsfasdf")
|
||||
print(extract_file_path(stderr_example))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
395
lib/db.py
Normal file
395
lib/db.py
Normal file
@@ -0,0 +1,395 @@
|
||||
import sqlite3
|
||||
from datetime import datetime
|
||||
|
||||
class Today_Queue_Status:
|
||||
queue:str="queue"
|
||||
|
||||
waiting:str="waiting"
|
||||
failed_search:str="failed search"
|
||||
search_found:str="search found"
|
||||
|
||||
downloading:str="downloading"
|
||||
failed_download:str="failed download"
|
||||
downloaded:str="downloaded" ## finished
|
||||
|
||||
importing:str="importing"
|
||||
failed_import:str="failed import"
|
||||
imported:str="imported"
|
||||
|
||||
completed:str="completed" ## finished
|
||||
|
||||
uploading:str="uploading"
|
||||
uploaded:str="uploaded"
|
||||
fail_upload:str="failed upload"
|
||||
|
||||
class sqlite_db:
|
||||
def __init__(self,db_path, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
self.db_path=db_path
|
||||
|
||||
def find_title_config_db(self,title):
|
||||
try:
|
||||
conn = sqlite3.connect(self.db_path)
|
||||
conn.row_factory = sqlite3.Row # Enables dictionary access
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("SELECT * FROM watchlist WHERE Title = ?", (title,))
|
||||
title_data = cursor.fetchone()
|
||||
conn.close()
|
||||
|
||||
return dict(title_data) if title_data is not None else None
|
||||
except Exception:
|
||||
return
|
||||
|
||||
def add_today_queue(self,queue,is_clear_queue:bool=False,is_clear_title:bool=False):
|
||||
conn = sqlite3.connect(self.db_path)
|
||||
cursor = conn.cursor()
|
||||
if is_clear_queue:
|
||||
cursor.execute("DROP TABLE IF EXISTS today_queue")
|
||||
conn.commit()
|
||||
if is_clear_title:
|
||||
cursor.execute("DELETE FROM today_queue WHERE title = ? AND season = ? AND episode = ?", (queue[0]['title'], queue[0]['season'], queue[0]['episode']))
|
||||
conn.commit()
|
||||
cursor.execute("""
|
||||
CREATE TABLE IF NOT EXISTS today_queue (
|
||||
title TEXT,
|
||||
season INTEGER,
|
||||
episode INTEGER,
|
||||
start_timestamp INTEGER,
|
||||
status TEXT DEFAULT queue
|
||||
)
|
||||
""")
|
||||
cursor.executemany("""
|
||||
INSERT
|
||||
OR IGNORE INTO today_queue (title, season, episode, start_timestamp )
|
||||
VALUES (:title, :season, :episode, :start_timestamp)
|
||||
""", queue)
|
||||
conn.commit()
|
||||
conn.close()
|
||||
|
||||
def add_download_history(self,queue,is_clear_queue:bool=False,is_clear_title:bool=False):
|
||||
# print(queue)
|
||||
conn = sqlite3.connect(self.db_path)
|
||||
cursor = conn.cursor()
|
||||
if is_clear_queue:
|
||||
cursor.execute("DROP TABLE IF EXISTS download_history")
|
||||
conn.commit()
|
||||
if is_clear_title:
|
||||
cursor.execute("DELETE FROM today_queue WHERE title = ? AND season = ? AND episode = ?", (queue[0]['title'], queue[0]['season'], queue[0]['episode']))
|
||||
conn.commit()
|
||||
cursor.execute("""
|
||||
CREATE TABLE IF NOT EXISTS download_history (
|
||||
title TEXT,
|
||||
season INTEGER,
|
||||
episode INTEGER,
|
||||
start_timestamp INTEGER,
|
||||
status TEXT DEFAULT queue
|
||||
)
|
||||
""")
|
||||
|
||||
cursor.executemany("""
|
||||
INSERT
|
||||
OR IGNORE INTO download_history (title, season, episode, start_timestamp)
|
||||
VALUES (:title, :season, :episode, :start_timestamp)
|
||||
""", queue)
|
||||
conn.commit()
|
||||
conn.close()
|
||||
|
||||
def get_today_queue(self):
|
||||
conn = sqlite3.connect(self.db_path)
|
||||
conn.row_factory = sqlite3.Row
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("SELECT * FROM today_queue")
|
||||
today_queue = cursor.fetchall()
|
||||
conn.close()
|
||||
return[dict(x) for x in today_queue] if today_queue is not None else None
|
||||
|
||||
def get_watchlist(self):
|
||||
conn = sqlite3.connect(self.db_path)
|
||||
conn.row_factory = sqlite3.Row
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("SELECT * FROM watchlist")
|
||||
watchlist = cursor.fetchall()
|
||||
conn.close()
|
||||
return [dict(x) for x in watchlist] if watchlist is not None else None
|
||||
|
||||
def get_schedule(self):
|
||||
conn = sqlite3.connect(self.db_path)
|
||||
conn.row_factory = sqlite3.Row
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("SELECT * FROM schedule")
|
||||
schedule = cursor.fetchall()
|
||||
conn.close()
|
||||
return [dict(x) for x in schedule] if schedule is not None else None
|
||||
|
||||
def add_watchlist(self, entry):
|
||||
conn = sqlite3.connect(self.db_path)
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("""
|
||||
INSERT OR IGNORE INTO watchlist (ID, Service, Title, if_dub, url, url_org, audio_lang, sub_lang, quality, codec, range, audio_channel, title_lang, org_lang, season)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""", (entry['ID'], entry['Service'], entry['Title'], entry['if_dub'], entry['url'], entry['url_org'],
|
||||
entry['audio_lang'], entry['sub_lang'], entry['quality'], entry['codec'],
|
||||
entry['range'], entry['audio_channel'], entry['title_lang'], entry['org_lang'], entry['season']))
|
||||
conn.commit()
|
||||
conn.close()
|
||||
|
||||
def update_download_status(self, title, season, episode, status):
|
||||
conn = sqlite3.connect(self.db_path)
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("""
|
||||
UPDATE today_queue
|
||||
SET status = ?
|
||||
WHERE title = ? AND season = ? AND episode = ?
|
||||
""", (status, title, season, episode))
|
||||
|
||||
conn.commit()
|
||||
conn.close()
|
||||
|
||||
def update_download_history_status(self, title, season, episode, status):
|
||||
conn = sqlite3.connect(self.db_path)
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("""
|
||||
UPDATE download_history
|
||||
SET status = ?
|
||||
WHERE title = ? AND season = ? AND episode = ?
|
||||
""", (status, title, season, episode))
|
||||
conn.commit()
|
||||
conn.close()
|
||||
|
||||
def get_download_status(self, title, season,episode):
|
||||
# print('test')
|
||||
conn = sqlite3.connect(self.db_path)
|
||||
conn.row_factory = sqlite3.Row
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("""
|
||||
SELECT status FROM today_queue
|
||||
WHERE title = ? AND season = ? AND episode = ?
|
||||
""", (title, season, episode))
|
||||
status = cursor.fetchone()
|
||||
conn.close()
|
||||
# print('testss')
|
||||
return dict(status) if status is not None else None
|
||||
|
||||
# def get_overwrite_schedule(self) -> list[dict]:
|
||||
# overwrite_entries = []
|
||||
# weekday_map = {
|
||||
# 'Monday': 1,
|
||||
# 'Tuesday': 2,
|
||||
# 'Wednesday': 3,
|
||||
# 'Thursday': 4,
|
||||
# 'Friday': 5,
|
||||
# 'Saturday': 6,
|
||||
# 'Sunday': 7
|
||||
# }
|
||||
|
||||
# conn = sqlite3.connect(self.db_path)
|
||||
# conn.row_factory = sqlite3.Row
|
||||
# cursor = conn.cursor()
|
||||
# cursor.execute("""
|
||||
# SELECT * FROM schedule
|
||||
# WHERE title = ?
|
||||
# """, (entry['title'],))
|
||||
# overwrite_schedule = cursor.fetchone()
|
||||
# conn.close()
|
||||
|
||||
# overwrite_schedule = dict(overwrite_schedule) if overwrite_schedule is not None else {}
|
||||
|
||||
# dt=datetime.fromtimestamp(entry['start_timestamp'])
|
||||
|
||||
# if overwrite_schedule.get('air_time'):
|
||||
|
||||
# iso_time = datetime.fromisoformat(f"{dt.year}-{dt.month:02}-{dt.day:02}T{overwrite_schedule['air_time'][:2]:02}:{overwrite_schedule['air_time'][2:]:02}:{dt.second:02}")
|
||||
# dt_overwrite = int(iso_time.timestamp())
|
||||
|
||||
# entry['start_timestamp'] = dt_overwrite
|
||||
|
||||
# if overwrite_schedule.get('offset') and overwrite_schedule.get('offset') != 0:
|
||||
# entry['episode'] += overwrite_schedule['offset']
|
||||
|
||||
# if overwrite_schedule.get('day_of_week'):
|
||||
# for dow in overwrite_schedule.get('day_of_week').split(','):
|
||||
# day_of_week = dow
|
||||
|
||||
# if isinstance(day_of_week, str):
|
||||
# day_of_week = weekday_map.get(day_of_week, None)
|
||||
|
||||
# if day_of_week is None:
|
||||
# continue
|
||||
|
||||
# current_day = dt.isoweekday()
|
||||
# days_difference = (day_of_week - current_day)
|
||||
|
||||
# if days_difference == 0:
|
||||
# continue
|
||||
# # print(days_difference)
|
||||
# print(entry['title'],entry['episode'])
|
||||
# # print(datetime.fromtimestamp(entry['start_timestamp']))
|
||||
# # print(datetime.fromtimestamp(entry['start_timestamp']))
|
||||
# dt = datetime.fromtimestamp(entry['start_timestamp']).replace(hour=int(overwrite_schedule['air_time'][:2]), minute=int(overwrite_schedule['air_time'][2:]), second=0, microsecond=0) + timedelta(days=days_difference)
|
||||
# # print(dt)
|
||||
|
||||
# entry['start_timestamp'] = int(dt.timestamp())
|
||||
# print(datetime.fromtimestamp(entry['start_timestamp']))
|
||||
|
||||
# # print(datetime.today().strftime('%Y-%m-%d'))
|
||||
# # print(datetime.fromtimestamp(entry['start_timestamp']))
|
||||
# if datetime.today().strftime('%Y-%m-%d') != datetime.fromtimestamp(entry['start_timestamp']).strftime('%Y-%m-%d') :
|
||||
# continue
|
||||
|
||||
# overwrite_entries.append(entry)
|
||||
|
||||
# return overwrite_entries
|
||||
|
||||
def add_overwrite_schedule(self, entry) -> str:
|
||||
conn = sqlite3.connect(self.db_path)
|
||||
conn.row_factory = sqlite3.Row
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("SELECT * FROM schedule WHERE title = ?", (entry['title'],))
|
||||
title= cursor.fetchone()
|
||||
title = dict(title) if title is not None else None
|
||||
|
||||
if title == entry:
|
||||
conn.close()
|
||||
return 'No changes made, entry already exists.'
|
||||
|
||||
cursor.execute("""
|
||||
INSERT INTO schedule (title, air_time, day_of_week, offset)
|
||||
VALUES (?, ?, ?, ?)
|
||||
ON CONFLICT(title) DO UPDATE SET
|
||||
air_time=excluded.air_time,
|
||||
day_of_week=excluded.day_of_week,
|
||||
offset=excluded.offset
|
||||
""", (entry['title'], entry['air_time'], entry['day_of_week'], entry['offset']))
|
||||
conn.commit()
|
||||
conn.close()
|
||||
return 'Entry added or updated successfully.'
|
||||
|
||||
def get_today_schedule(self):
|
||||
weekday_map = {
|
||||
'Monday': 1,
|
||||
'Tuesday': 2,
|
||||
'Wednesday': 3,
|
||||
'Thursday': 4,
|
||||
'Friday': 5,
|
||||
'Saturday': 6,
|
||||
'Sunday': 7
|
||||
}
|
||||
schedule=self.get_schedule()
|
||||
watchlist=self.get_watchlist()
|
||||
|
||||
today_list=[]
|
||||
for entry in schedule:
|
||||
result = next((item for item in watchlist if item['Title'] == entry['title']), None)
|
||||
# print(entry)
|
||||
if entry['last_ep'] >= entry['end_ep']:
|
||||
continue
|
||||
|
||||
|
||||
if not entry['day_of_week']:
|
||||
continue
|
||||
for dow in entry['day_of_week'].split(','):
|
||||
if datetime.today().isoweekday() != weekday_map[dow.strip()]:
|
||||
continue
|
||||
if entry['last_date'] == datetime.now().date().strftime('%Y-%m-%d'):
|
||||
continue
|
||||
timestamp = int(datetime.now().replace(hour=int(entry['air_time'][:2]), minute=int(entry['air_time'][2:]), second=0, microsecond=0).timestamp())
|
||||
|
||||
for i in range(entry['multi_release']):
|
||||
|
||||
detail ={
|
||||
"title": result['Title'],
|
||||
"season": int(result['season']) if isinstance(result['season'], int) else 1,
|
||||
"episode": ((int(entry['last_ep'])) if entry['last_ep'] is not None else 0) + +i+1,
|
||||
"sonarr_id": result['ID'],
|
||||
"air_time": entry['air_time'],
|
||||
"day_of_week": entry['day_of_week'],
|
||||
"offset": entry['offset'],
|
||||
"start_timestamp":timestamp + (60*(i*5))
|
||||
}
|
||||
|
||||
# print(detail)
|
||||
today_list.append(detail)
|
||||
today_list = sorted(today_list, key=lambda x: x["start_timestamp"])
|
||||
return today_list
|
||||
|
||||
def update_schedule_episode(self, title, episode):
|
||||
conn = sqlite3.connect(self.db_path)
|
||||
cursor = conn.cursor()
|
||||
|
||||
cursor.execute("""
|
||||
UPDATE schedule
|
||||
SET last_ep = ?, last_date = ?
|
||||
WHERE title = ? AND last_ep = ?
|
||||
""", (episode, datetime.now().date().strftime('%Y-%m-%d'), title, episode-1))
|
||||
conn.commit()
|
||||
conn.close()
|
||||
|
||||
def get_show_by_date(self,date):
|
||||
weekday_map = {
|
||||
1: 'Monday',
|
||||
2: 'Tuesday',
|
||||
3: 'Wednesday',
|
||||
4: 'Thursday',
|
||||
5: 'Friday',
|
||||
6: 'Saturday',
|
||||
7: 'Sunday'
|
||||
}
|
||||
# if isinstance(date, int):
|
||||
# date = list(weekday_map.keys())[list(weekday_map.values()).index(date)]
|
||||
# date = (datetime.now() + timedelta((weekday_map[date] - datetime.now().isoweekday()) % 7)).strftime('%Y-%m-%d')
|
||||
conn = sqlite3.connect(self.db_path)
|
||||
conn.row_factory = sqlite3.Row
|
||||
cursor = conn.cursor()
|
||||
# print(weekday_map[date])
|
||||
cursor.execute("""
|
||||
SELECT *
|
||||
FROM schedule
|
||||
WHERE day_of_week LIKE ?
|
||||
""", (f"%{weekday_map[date]}%",))
|
||||
shows = cursor.fetchall()
|
||||
conn.close()
|
||||
return [dict(x) for x in shows] if shows is not None else None
|
||||
def get_show_by_title(self,title):
|
||||
|
||||
# if isinstance(date, int):
|
||||
# date = list(weekday_map.keys())[list(weekday_map.values()).index(date)]
|
||||
# date = (datetime.now() + timedelta((weekday_map[date] - datetime.now().isoweekday()) % 7)).strftime('%Y-%m-%d')
|
||||
conn = sqlite3.connect(self.db_path)
|
||||
conn.row_factory = sqlite3.Row
|
||||
cursor = conn.cursor()
|
||||
# print(weekday_map[date])
|
||||
cursor.execute("""
|
||||
SELECT *
|
||||
FROM schedule
|
||||
WHERE title = ?
|
||||
""", (title,))
|
||||
shows = cursor.fetchall()
|
||||
conn.close()
|
||||
return [dict(x) for x in shows] if shows is not None else None
|
||||
|
||||
|
||||
def get_torrent_detail(self,title):
|
||||
conn = sqlite3.connect(self.db_path)
|
||||
conn.row_factory = sqlite3.Row
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("""
|
||||
SELECT * FROM torrent
|
||||
WHERE title = ?
|
||||
""", (title,))
|
||||
torrent = cursor.fetchone()
|
||||
conn.close()
|
||||
# print('testss')
|
||||
return dict(torrent) if torrent is not None else None
|
||||
|
||||
def update_torrent_detail(self, title,qbit_name, episode):
|
||||
conn = sqlite3.connect(self.db_path)
|
||||
cursor = conn.cursor()
|
||||
|
||||
cursor.execute("""
|
||||
UPDATE torrent
|
||||
SET last_ep = ?, qbit_name = ?
|
||||
WHERE title = ?
|
||||
""", (episode,qbit_name,title))
|
||||
conn.commit()
|
||||
conn.close()
|
||||
|
||||
53
lib/discord_bot.py
Normal file
53
lib/discord_bot.py
Normal file
@@ -0,0 +1,53 @@
|
||||
import discord
|
||||
from discord.ext import commands
|
||||
# from apscheduler.schedulers.background import BackgroundScheduler
|
||||
from apscheduler.schedulers.asyncio import AsyncIOScheduler
|
||||
|
||||
|
||||
from dotenv import dotenv_values
|
||||
from lib.logging_data import logger
|
||||
|
||||
|
||||
|
||||
|
||||
# Bot configuration
|
||||
intents = discord.Intents.default()
|
||||
intents.message_content = True
|
||||
intents.members = True
|
||||
|
||||
|
||||
class ScheduleBot(commands.Bot):
|
||||
def __init__(self,console=None, **kwargs):
|
||||
super().__init__(
|
||||
command_prefix='!',
|
||||
intents=intents,
|
||||
help_command=None,
|
||||
**kwargs
|
||||
)
|
||||
self.console = console if console else logger("./log/app.log",gotify_config=dotenv_values('.env')['gotify_token'])
|
||||
|
||||
self.dotenv_path='.env'
|
||||
|
||||
self.scheduler = AsyncIOScheduler(
|
||||
job_defaults={
|
||||
'misfire_grace_time': 300, # run if up to 5 minutes late
|
||||
'max_instances': 1, # prevent overlapping runs for the same job
|
||||
'coalesce': True # useful for cron/interval jobs
|
||||
})
|
||||
# self.sonarr_ip = dotenv_values(self.dotenv_path)['sonarr_ip']
|
||||
# self.sonarr_key = dotenv_values(self.dotenv_path)['sonarr_key']
|
||||
# self.sonarr = Sonarr_API(self.sonarr_ip, self.sonarr_key)
|
||||
|
||||
|
||||
|
||||
async def setup_hook(self):
|
||||
"""Called when the bot is starting up"""
|
||||
self.console.log(f"Logged in as {self.user} (ID: {self.user.id})")
|
||||
|
||||
# Sync slash commands
|
||||
try:
|
||||
synced = await self.tree.sync()
|
||||
self.console.log(f"Synced {len(synced)} command(s)")
|
||||
# threading.Thread(target=vt_worker).start() # Start the download worker in the background
|
||||
except Exception as e:
|
||||
self.console.log(f"Failed to sync commands: {e}")
|
||||
232
lib/logging_data.py
Normal file
232
lib/logging_data.py
Normal file
@@ -0,0 +1,232 @@
|
||||
|
||||
import logging
|
||||
from logging.handlers import TimedRotatingFileHandler
|
||||
from pathlib import Path
|
||||
# from rich.logging import RichHandler # optional
|
||||
from discord import SyncWebhook
|
||||
import asyncio
|
||||
|
||||
from dotenv import dotenv_values
|
||||
import requests
|
||||
import discord
|
||||
|
||||
class logger:
|
||||
def __init__(
|
||||
self,
|
||||
app_name="app",
|
||||
log_dir="../log",
|
||||
gotify_config=None,
|
||||
discord_config=None,
|
||||
level=logging.DEBUG,
|
||||
use_utc=False, # rotate at UTC midnight if True
|
||||
keep_days=7, # retention
|
||||
):
|
||||
"""
|
||||
Continuous app logging with daily rotation.
|
||||
Current file name: <log_dir>/<app_name>.log
|
||||
Rotated backups: <app_name>.log.YYYY-MM-DD
|
||||
"""
|
||||
# ---- Ensure directory ----
|
||||
log_dir_path = Path(log_dir)
|
||||
log_dir_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
self.app_name=app_name
|
||||
self.log_dir_path=log_dir_path
|
||||
|
||||
base_log_path = log_dir_path / f"{app_name}.log"
|
||||
|
||||
# ---- Formatter using `{}` style ----
|
||||
LOG_FORMAT = "{asctime} [{levelname[0]}] {name} : {message}"
|
||||
LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
|
||||
LOG_STYLE = "{"
|
||||
LOG_FORMATTER = logging.Formatter(LOG_FORMAT, LOG_DATE_FORMAT, LOG_STYLE)
|
||||
|
||||
|
||||
# ---- Create handlers ----
|
||||
# Console (basic StreamHandler; swap to RichHandler if you want pretty output)
|
||||
console_handler = logging.StreamHandler()
|
||||
console_handler.setLevel(level)
|
||||
console_handler.setFormatter(LOG_FORMATTER)
|
||||
|
||||
|
||||
# Timed rotating file handler: rotates at midnight, keeps last N days
|
||||
file_handler = TimedRotatingFileHandler(
|
||||
filename=str(base_log_path),
|
||||
when="midnight",
|
||||
interval=1,
|
||||
backupCount=keep_days,
|
||||
encoding="utf-8",
|
||||
utc=use_utc,
|
||||
)
|
||||
# Suffix for rotated files (defaults to app.log.YYYY-MM-DD)
|
||||
file_handler.suffix = "%Y-%m-%d"
|
||||
file_handler.setLevel(level)
|
||||
file_handler.setFormatter(LOG_FORMATTER)
|
||||
|
||||
# ---- Configure a dedicated logger to avoid duplicating handlers ----
|
||||
self.logger = logging.getLogger(app_name) # change name if needed
|
||||
self.logger.setLevel(level)
|
||||
|
||||
# Remove any pre-existing handlers (optional, but avoids silent conflicts)
|
||||
for h in list(self.logger.handlers):
|
||||
self.logger.removeHandler(h)
|
||||
|
||||
self.logger.addHandler(console_handler)
|
||||
self.logger.addHandler(file_handler)
|
||||
|
||||
|
||||
# Optional: stop propagation to root to prevent double logging if root is configured elsewhere
|
||||
self.logger.propagate = False
|
||||
|
||||
# ---- Instance state ----
|
||||
self.client = None
|
||||
self.worker_started = False
|
||||
self.queue = asyncio.Queue()
|
||||
|
||||
# ---- Notification configs ----
|
||||
if gotify_config:
|
||||
self.gotify_token = gotify_config if isinstance(gotify_config, str) else gotify_config.get("token")
|
||||
if self.gotify_token:
|
||||
self.url = f"https://gotify.panitan.net/message?token={self.gotify_token}"
|
||||
else:
|
||||
self.url = None
|
||||
self.logger.warning("Gotify token missing in config.")
|
||||
else:
|
||||
self.url = None
|
||||
self.gotify_token = None
|
||||
|
||||
if discord_config:
|
||||
self.discord_channel_id = discord_config
|
||||
|
||||
# Inform where we’re logging
|
||||
self.logger.info(f"Logging to {base_log_path} (rotates daily, keep {keep_days} days).")
|
||||
|
||||
# ---- Internal helper ----
|
||||
def _log_lines(self, message, log_level):
|
||||
message = str(message)
|
||||
for line in message.split('\n'):
|
||||
if line:
|
||||
self.logger.log(log_level, line,exc_info=log_level==logging.ERROR)
|
||||
|
||||
|
||||
# ---- Public log APIs ----
|
||||
def log(self, *message, is_gotify=False, is_discord: dict = None, image_url=None):
|
||||
message = " ".join(str(m) for m in message)
|
||||
self._log_lines(message, logging.INFO)
|
||||
try:
|
||||
if is_gotify:
|
||||
self.gotify(message, "Logging", image_url)
|
||||
if is_discord:
|
||||
self.discord(is_discord)
|
||||
except Exception:
|
||||
return
|
||||
|
||||
def debug(self, *message, is_gotify=False, is_discord=None, image_url=None):
|
||||
message = " ".join(str(m) for m in message)
|
||||
self._log_lines(message, logging.DEBUG)
|
||||
try:
|
||||
if is_gotify:
|
||||
self.gotify(message, "Debug", image_url)
|
||||
if is_discord:
|
||||
self.discord(is_discord)
|
||||
except Exception:
|
||||
return
|
||||
|
||||
def error(self, *message, is_gotify=False, is_discord=None, image_url=None):
|
||||
message = " ".join(str(m) for m in message)
|
||||
self._log_lines(message, logging.ERROR)
|
||||
try:
|
||||
if is_gotify:
|
||||
self.gotify(message, "Error", image_url)
|
||||
if is_discord:
|
||||
self.discord(is_discord)
|
||||
except Exception:
|
||||
return
|
||||
|
||||
def warn(self, *message, is_gotify=False, is_discord=None, image_url=None):
|
||||
message = " ".join(str(m) for m in message)
|
||||
self._log_lines(message, logging.WARN)
|
||||
try:
|
||||
if is_gotify:
|
||||
self.gotify(message, "Warning", image_url)
|
||||
if is_discord:
|
||||
self.discord(is_discord)
|
||||
except Exception:
|
||||
return
|
||||
|
||||
# ---- Notifiers ----
|
||||
def gotify(self, msg, title, image_url=None):
|
||||
|
||||
if not self.url or not self.gotify_token:
|
||||
self.logger.warning("Gotify not configured; skipping notification.")
|
||||
# time.sleep(2)
|
||||
return
|
||||
|
||||
if image_url:
|
||||
msg = f"{msg}\n\n!Image"
|
||||
|
||||
try:
|
||||
requests.post(
|
||||
self.url,
|
||||
json={
|
||||
"message": msg,
|
||||
"title": title,
|
||||
"extras": {"client::display": {"contentType": "text/markdown"}}
|
||||
},
|
||||
headers={"X-Gotify-Key": self.gotify_token},
|
||||
timeout=10,
|
||||
)
|
||||
except Exception as e:
|
||||
self.logger.error(f"Gotify notification failed: {e}")
|
||||
# time.sleep(2)
|
||||
|
||||
def discord(self, config: dict):
|
||||
channel = config.get("channel")
|
||||
embed = config.get("embed")
|
||||
web_hook_urls = config.get("web_hook_urls",[])
|
||||
if not channel and embed:
|
||||
return
|
||||
try:
|
||||
if self.client is None:
|
||||
|
||||
self.client = discord.Client(intents=discord.Intents.default())
|
||||
|
||||
if not self.worker_started:
|
||||
self.client.loop.create_task(self.worker())
|
||||
self.worker_started = True
|
||||
|
||||
self.queue.put_nowait((channel, embed, web_hook_urls))
|
||||
# async def send_message():
|
||||
# await self.client.wait_until_ready()
|
||||
# await channel.send(embed=embed)
|
||||
# for url in web_hook_urls:
|
||||
# webhook = SyncWebhook.from_url(url)
|
||||
# webhook.send(embed=embed)
|
||||
|
||||
|
||||
# self.client.loop.create_task(send_message())
|
||||
except Exception as e:
|
||||
self.logger.error(f"Discord notification failed: {e}")
|
||||
# time.sleep(2)
|
||||
|
||||
async def worker(self):
|
||||
await self.client.wait_until_ready()
|
||||
|
||||
while True:
|
||||
channel, embed, web_hook_urls = await self.queue.get()
|
||||
|
||||
try:
|
||||
await channel.send(embed=embed)
|
||||
|
||||
for url in web_hook_urls:
|
||||
webhook = SyncWebhook.from_url(url)
|
||||
webhook.send(embed=embed)
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"Discord send error: {e}")
|
||||
|
||||
self.queue.task_done()
|
||||
if __name__ == "__main__":
|
||||
console = logger(app_name="scheduler",log_dir="./log",gotify_config=dotenv_values('.env').get("gotify_token"),discord_config=dotenv_values('.env')['DISCORD_CHANNEL_ID'],level=logging.DEBUG)
|
||||
print
|
||||
console.log("This is a test log message.","blah", is_gotify=True, is_discord={"channel": None, "embed": None, "web_hook_urls": []})
|
||||
170
lib/sonarr.py
Normal file
170
lib/sonarr.py
Normal file
@@ -0,0 +1,170 @@
|
||||
from dotenv import dotenv_values
|
||||
import requests
|
||||
|
||||
import pytz
|
||||
|
||||
class Sonarr_API:
|
||||
def __init__(self,ip:str,key:str,**kwargs):
|
||||
super().__init__(**kwargs)
|
||||
self.ip=ip
|
||||
self.key=key
|
||||
self.tz = pytz.timezone("Asia/Bangkok")
|
||||
|
||||
# self.db= sqlite_db(self.db_path)
|
||||
|
||||
self.calendar_url = f"http://{ip}/feed/v3/calendar/Sonarr.ics?unmonitored=true&apikey={key}"
|
||||
|
||||
def get_episodes(self, folder=None) -> dict:
|
||||
# print(folder)
|
||||
params = {
|
||||
# 'seriesId': id,
|
||||
'folder': folder
|
||||
}
|
||||
|
||||
base_url = f"http://{self.ip}/api/v3/manualimport"
|
||||
headers = {
|
||||
"X-Api-Key": self.key,
|
||||
}
|
||||
|
||||
r = requests.get(base_url, headers=headers,params=params)
|
||||
r.raise_for_status()
|
||||
# print(r.json())
|
||||
|
||||
return r.json()
|
||||
|
||||
def import_episodes(self, entry:dict,title_config:dict, episodes_dict:dict,mode:str="move") -> list[requests.Response]:
|
||||
base_url = f"http://{self.ip}/api/v3/command"
|
||||
headers = {
|
||||
'Accept': 'application/json, text/javascript, */*; q=0.01',
|
||||
'Content-Type': 'application/json',
|
||||
'X-Api-Key': self.key
|
||||
}
|
||||
responses=[]
|
||||
|
||||
for d in episodes_dict:
|
||||
|
||||
if not d.get('path') or not d.get('series') or not d.get('episodes'):
|
||||
continue
|
||||
for e in d['episodes']:
|
||||
season=int(entry['season'])
|
||||
if title_config['absolute_season']:
|
||||
season=int(title_config['season'])+int(title_config['absolute_season'])
|
||||
ep=int(entry['episode'])
|
||||
if title_config['absolute']:
|
||||
ep=int(entry['episode'])+int(title_config['absolute'])
|
||||
if not (str(season) == str(e['seasonNumber']) and str(ep) == str(e['episodeNumber'])\
|
||||
# and str(title_config['ID']) == str(e['seriesId'])\
|
||||
):
|
||||
|
||||
continue
|
||||
# print(title_config['ID'],str(entry['season']), str(e['seasonNumber']), str(entry['episode']), str(e['episodeNumber']))
|
||||
|
||||
|
||||
data = {
|
||||
"name": "ManualImport",
|
||||
"files": [{
|
||||
"path": d['path'],
|
||||
"seriesId": d['series']['id'],
|
||||
"episodeIds": [e['id']],
|
||||
"releaseGroup": d.get('releaseGroup'),
|
||||
"quality": d.get('quality'),
|
||||
"languages": d.get('languages'),
|
||||
"indexerFlags": d.get('indexerFlags')
|
||||
}],
|
||||
"importMode": mode
|
||||
}
|
||||
|
||||
r = requests.post(base_url, headers=headers, json=data)
|
||||
r.raise_for_status()
|
||||
responses.append(r)
|
||||
|
||||
return responses
|
||||
|
||||
def get_episode_detail(self, episodeIds) -> dict:
|
||||
# params = {
|
||||
# 'seriesId': seriesId,
|
||||
# 'episodeIds': episodeIds
|
||||
# }
|
||||
|
||||
base_url = f"http://{self.ip}/api/v3/episode/{episodeIds}"
|
||||
headers = {
|
||||
"X-Api-Key": self.key,
|
||||
}
|
||||
r = requests.get(base_url, headers=headers)
|
||||
r.raise_for_status()
|
||||
# print(r.json())
|
||||
|
||||
return r.json()
|
||||
|
||||
def get_series_detail(self,id):
|
||||
headers = {
|
||||
'Accept': 'application/json',
|
||||
'X-Api-Key': self.key,
|
||||
}
|
||||
|
||||
response = requests.get(f'http://{self.ip}/api/v3/series/{id}', headers=headers)
|
||||
return response.json()
|
||||
|
||||
def get_episode_detail_from_season(self,seriesId,season):
|
||||
response=requests.get(
|
||||
f"http://{self.ip}/api/v3/episode",
|
||||
headers={
|
||||
"Accept": "application/json",
|
||||
"X-Api-Key": self.key
|
||||
},
|
||||
params={
|
||||
"seriesId":seriesId,
|
||||
"seasonNumber":season
|
||||
}
|
||||
)
|
||||
return response.json()
|
||||
|
||||
|
||||
# def get_today_schedule(self) -> list[dict]:
|
||||
# response = requests.get(self.calendar_url)
|
||||
# response.raise_for_status() # Raises an error if the request failed
|
||||
|
||||
# # Parse the calendar
|
||||
# calendar = Calendar(response.text)
|
||||
|
||||
# db_watchlist=self.db.get_watchlist()
|
||||
|
||||
# # List all events
|
||||
# daily_list=[]
|
||||
|
||||
# for event in calendar.events:
|
||||
|
||||
# if date.today() != event.begin.date():
|
||||
# continue
|
||||
|
||||
# title_split = event.name.split(" - ")
|
||||
# title= title_split[0].strip()
|
||||
|
||||
# if not any(x['Title'] == title for x in db_watchlist):
|
||||
# continue
|
||||
|
||||
# ep = title_split[-2].split('x')
|
||||
# season = ep[-2].strip() if ep else "1"
|
||||
# episode = ep[-1].strip() if ep else "1"
|
||||
# episode_name = event.name.split(" - ")[2].strip()
|
||||
# if int(season) < 1 or int(episode) < 1:
|
||||
# continue
|
||||
|
||||
# detail ={
|
||||
# "title": title,
|
||||
# "season": int(season),
|
||||
# "episode": int(episode),
|
||||
# "episode_name": episode_name,
|
||||
# "start_timestamp": int(datetime.fromisoformat(str(event.begin)).astimezone(self.tz).timestamp()),
|
||||
# "end_timestamp": int(datetime.fromisoformat(str(event.end)).astimezone(self.tz).timestamp()),
|
||||
# "sonarr_id": event.uid.split("_")[-1],
|
||||
# }
|
||||
# daily_list.append(detail)
|
||||
|
||||
# return daily_list
|
||||
|
||||
|
||||
if "__main__" == __name__:
|
||||
sonarr=Sonarr_API("media-server.lan:8989",dotenv_values("/root/VT_Schedule_downloader/.env")['sonarr_key'],"/root/VT_Schedule_downloader/app.sqlite")
|
||||
print(sonarr.get_episode_detail(1171,43758))
|
||||
|
||||
1463
lib/torrent_creator.py
Normal file
1463
lib/torrent_creator.py
Normal file
File diff suppressed because it is too large
Load Diff
1211
lib/usk.py
Normal file
1211
lib/usk.py
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user