Compare commits

...

16 Commits

Author SHA1 Message Date
56af1f0735 fix: adjust initialData extraction for new a/b change 2025-12-29 23:32:06 +01:00
72266aad0f chore: remove unnecessary string from age-restricted videos' description 2025-11-22 21:42:47 +01:00
900cc92229 fix: play other formats than 16x9 on yattee
reports factually wrong, but close enough resolution. to be removed
when this will be fixed on yattee's end.
2025-11-21 22:38:43 +01:00
2687cc2bdc hotfix: fix typo 2025-11-21 10:15:37 +01:00
4a9d59c9b4 fix: filter out DRC and VB audio from wdata 2025-11-21 09:59:22 +01:00
4af581ab7c fix: use acodec for audio streams instead of vcodec
should fix some issues with more strict DASH players
2025-11-21 09:44:30 +01:00
d1f381220d fix: use exclusively tv player for age-restricted videos
also filter out DRC audio
2025-11-20 17:54:41 +01:00
eebf434f3e feat: support age-restricted videos when cookies are provided 2025-11-20 13:02:38 +01:00
c979c97077 feat: allow debugging with pdb
allows user to debug the webapp when admin API key is passed as a param.
also - an anniversary, 100th commit!
2025-11-13 11:28:40 +01:00
11c94c757e fix: don't show playlists in related videos feed
items in related feed are now checked by contentType to determine if
they are videos or not
2025-11-07 18:45:11 +01:00
4421e68d9d fix: format stream itag is now a string (as it should)
fixes playback on clipious
2025-11-07 18:43:07 +01:00
da2daab16a feat: preliminary DASH support (for freetube/yattee, no manifest)
rewrites struct returned by innertube with video url retrieved by yt-dlp
works on freetube with proxying disabled; enabling proxying causes
horrible performance. yattee seems unaffected.
2025-11-06 21:53:32 +01:00
05b81e55da chore: bump version strings 2025-11-02 04:07:41 +01:00
158dcc3b7f fix: comment reply count extraction
suspected a/b test, sorting comments with "top" option returns a new
ctoken which will return "Replies" text for comments without replies.
to get rid of this behavior it'd be best to create known good ctokens
for every request.
2025-10-19 23:32:12 +02:00
668e8c32aa feat: support for lockupViewModel inside of channels' video feed
this is rare and currently a/b tested
2025-10-18 14:39:55 +02:00
760aaccfff feat: add debug printing
allows to print messages to stdout only if the instance has debugging
enabled
2025-10-18 14:23:08 +02:00
6 changed files with 323 additions and 257 deletions

View File

@@ -7,13 +7,16 @@ debug = false # Whether to print verbose, d
cache = true # Whether to cache requests for 3 hours (temporary solution to long load times).
[api]
api_key = "" # Leave empty API key for public access to non-sensitive backend
api_key_admin = "CHANGEME" # Empty *admin* API key will autogenerate a random one every launch.
api_key = "" # Leave empty API key for public access to non-sensitive backend
api_key_admin = "CHANGEME" # Empty *admin* API key will autogenerate a random one every launch.
enable_debugger_halt = false # Whether to allow to trigger pdb using admin's API key.
[extractor]
user-agent = "" # Leave empty for default (Firefox ESR).
cookies_path = "" # Leave empty for none.
preferred_extractor = "" # Leave empty for default (android_vr).
user-agent = "" # Leave empty for default (Firefox ESR).
cookies_path = "" # Leave empty for none.
age_restricted_cookies_path = "" # Cookies to use when bypassing age-gated videos only. Leave empty to disable.
deno_path = "" # Required when using cookies.
preferred_extractor = "" # Leave empty for default (android_vr).
[proxy]
user-agent = "" # Leave empty for default (Firefox ESR).

View File

@@ -8,19 +8,20 @@ import ythdd_globals, ythdd_extractor
#from flask_sqlalchemy import SQLAlchemy
#import ythdd_api_v1_stats, ythdd_api_v1_user, ythdd_api_v1_info, ythdd_api_v1_query, ythdd_api_v1_meta, ythdd_api_v1_admin
def requireAuthentication(func):
@wraps(func)
def wrapper(*args, **kwargs):
token = kwargs["r"].args.get('token')
if token == lewy_globals.config['api']['api_key']:
try:
status, received, data = func(*args, **kwargs)
return status, received, data
except:
raise AssertionError(f"Function \"{func.__name__}\" does not return status, code, and data as it should!")
else:
return 401, "error", {'error_msg': "Unauthorized"}
return wrapper
def requireAuthentication(admin: bool = True):
def functionWrapper(func):
def wrapper(*args, **kwargs):
token = kwargs["r"].args.get('token')
if token == ythdd_globals.config['api']['api_key' + admin * '_admin']:
try:
status, received, data = func(*args, **kwargs)
return status, received, data
except:
raise AssertionError(f"Function \"{func.__name__}\" does not return status, code, and data as it should!")
else:
return 401, "error", {'error_msg': "Unauthorized"}
return wrapper
return functionWrapper
def incrementBadRequests():
ythdd_globals.apiFailedRequests += 1
@@ -143,6 +144,13 @@ def hot(data):
incrementBadRequests()
return notImplemented([data[1]]) # workaround before notImplemented is reworked
@requireAuthentication(admin=True)
def debugger_halt(r):
if not ythdd_globals.config["api"]["enable_debugger_halt"]:
return 403, "Administrator has disabled access for this endpoint.", []
breakpoint()
return 200, "Pdb triggered and ended successfully.", []
def lookup(data, request):
match data[0]:
case 'stats':
@@ -163,6 +171,8 @@ def lookup(data, request):
case 'admin':
# REQUIRE CREDENTIALS!
return stub_hello()
case 'halt':
return debugger_halt(r=request)
case _:
incrementBadRequests()
return notImplemented(data)

View File

@@ -1,5 +1,6 @@
#!/usr/bin/python3
import brotli, yt_dlp, requests, json, time
from http.cookiejar import MozillaCookieJar
from ythdd_globals import safeTraverse
import ythdd_proto
import ythdd_globals
@@ -19,7 +20,11 @@ ytdl_opts = {
# "formats": ["dashy"]
}
},
"simulate": True
"simulate": True,
"js_runtimes": {
"deno": {}
},
'remote_components': ['ejs:github']
}
stage1_headers = {
@@ -67,7 +72,7 @@ stage1_body = {
stage2_headers = {
"Connection": "keep-alive",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:143.0) Gecko/20100101 Firefox/143.0",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:144.0) Gecko/20100101 Firefox/144.0",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Language": "en-us,en;q=0.5",
"Sec-Fetch-Mode": "navigate",
@@ -76,13 +81,13 @@ stage2_headers = {
stage3_headers = {
"Connection": "keep-alive",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:143.0) Gecko/20100101 Firefox/143.0",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:144.0) Gecko/20100101 Firefox/144.0",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Language": "en-us,en;q=0.5",
"Sec-Fetch-Mode": "navigate",
"Content-Type": "application/json",
"X-Youtube-Client-Name": "1",
"X-Youtube-Client-Version": "2.20251014.01.00",
"X-Youtube-Client-Version": "2.20251103.01.00",
"Origin": "https://www.youtube.com",
"Accept-Encoding": "gzip, deflate, br",
"Cookie": "PREF=hl=en&tz=UTC; SOCS=CAI"
@@ -94,7 +99,7 @@ stage3_body = {
"client":
{
"clientName": "WEB",
"clientVersion": "2.20251014.01.00",
"clientVersion": "2.20251103.01.00",
"hl": "en",
"timeZone": "UTC",
"utcOffsetMinutes": 0
@@ -112,9 +117,9 @@ web_context_dict = {
'gl': 'US',
'deviceMake': '',
'deviceModel': '',
'userAgent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:143.0) Gecko/20100101 Firefox/143.0,gzip(gfe)',
'userAgent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:144.0) Gecko/20100101 Firefox/144.0,gzip(gfe)',
'clientName': 'WEB',
'clientVersion': '2.20251014.01.00',
'clientVersion': '2.20251103.01.00',
'osName': 'Windows',
'osVersion': '10.0',
'screenPixelDensity': 2,
@@ -129,7 +134,7 @@ web_context_dict = {
}
}
def extract(url: str, getcomments=False, maxcomments="", manifest_fix=False):
def extract(url: str, getcomments=False, maxcomments="", manifest_fix=False, use_cookies=None):
# TODO: check user-agent and cookiefile
ytdl_context = ytdl_opts.copy()
@@ -137,9 +142,6 @@ def extract(url: str, getcomments=False, maxcomments="", manifest_fix=False):
if ythdd_globals.config['extractor']['user-agent']:
yt_dlp.utils.std_headers['User-Agent'] = ythdd_globals.config['extractor']['user-agent']
if ythdd_globals.config['extractor']['cookies_path']:
ytdl_context['cookiefile'] = ythdd_globals.config['extractor']['cookies_path']
if len(url) == 11:
url = "https://www.youtube.com/watch?v=" + url
if getcomments:
@@ -153,7 +155,27 @@ def extract(url: str, getcomments=False, maxcomments="", manifest_fix=False):
ytdl_context['extractor_args']['youtube']['player_client'] = [ythdd_globals.config['extractor']['preferred_extractor']]
else:
ytdl_context['extractor_args']['youtube']['player_client'] = ['android_vr']
with yt_dlp.YoutubeDL(ytdl_opts) as ytdl:
if use_cookies is not None:
# can be either "global", "agegated" or None
deno_path = ythdd_globals.config['extractor']['deno_path']
match use_cookies:
case "global":
ytdl_context['cookiefile'] = ythdd_globals.config['extractor']['cookies_path']
ytdl_context['extractor_args']['youtube']['player_client'] = ['tv']
if not deno_path:
print("FATAL ERROR: deno path is required for playback using cookies!")
ytdl_context['js_runtimes']['deno']['path'] = deno_path if deno_path else ""
case "agegated":
ytdl_context['cookiefile'] = ythdd_globals.config['extractor']['age_restricted_cookies_path']
ytdl_context['extractor_args']['youtube']['player_client'] = ['tv']
if not deno_path:
print("FATAL ERROR: deno path is required for playback of age-restricted content!")
ytdl_context['js_runtimes']['deno']['path'] = deno_path if deno_path else ""
case None | _:
pass
with yt_dlp.YoutubeDL(ytdl_context) as ytdl:
result = ytdl.sanitize_info(ytdl.extract_info(url, download=False))
return result
@@ -177,7 +199,7 @@ def WEBrelated(url: str):
return extracted_json["contents"]['twoColumnWatchNextResults']["secondaryResults"]
def WEBextractSinglePage(uri: str):
def WEBextractSinglePage(uri: str, use_cookies=None):
# WARNING! HIGHLY EXPERIMENTAL, DUE TO BREAK ANYTIME
start_time = time.time()
@@ -185,11 +207,25 @@ def WEBextractSinglePage(uri: str):
if len(uri) != 11:
raise ValueError("WEBextractSinglePage expects a single, 11-character long argument")
response = requests.get("https://www.youtube.com/watch?v=" + uri, headers=ythdd_globals.getHeaders(caller='extractor'))
cookies = None
if use_cookies is not None:
match use_cookies:
case "global":
ythdd_globals.print_debug("wdata: using global cookies")
cookies = MozillaCookieJar(ythdd_globals.config["extractor"]["cookies_path"])
cookies.load()
case "agegated":
ythdd_globals.print_debug("wdata: using agegated cookies")
cookies = MozillaCookieJar(ythdd_globals.config["extractor"]["age_restricted_cookies_path"])
cookies.load()
case None | _:
pass
response = requests.get("https://www.youtube.com/watch?v=" + uri, headers=ythdd_globals.getHeaders(caller='extractor'), cookies=cookies)
extracted_string = str(response.content.decode('utf8', 'unicode_escape'))
start = extracted_string.find('{"responseContext":{"serviceTrackingParams":')
start = extracted_string.find('{"responseContext":')
end = extracted_string.find(';var ', start)
start2 = extracted_string.find('{"responseContext":{"serviceTrackingParams":', start + 1)
start2 = extracted_string.find('{"responseContext":', start + 1)
end2 = extracted_string.find(';</script>', start2)
extracted_json1 = json.loads(extracted_string[start:end])
extracted_json2 = json.loads(extracted_string[start2:end2])
@@ -518,6 +554,13 @@ def WEBgetVideoComments(ctoken: str) -> tuple:
reply_ctoken = safeTraverse(reply_renderer, ["commentThreadRenderer", "replies", "commentRepliesRenderer", "contents", 0, "continuationItemRenderer", "continuationEndpoint", "continuationCommand", "token"], default="")
reply_count = safeTraverse(reply_renderer, ["commentThreadRenderer", "replies", "commentRepliesRenderer", "viewReplies", "buttonRenderer", "text", "runs", 0, "text"], default="0 replies").split(" ")[0]
# suspected a/b test. can be worked arount with on-demand ctoken creation.
# workaround for yt not showing replies when sorting for "top" comments
try:
int(reply_count) # can be just "Replies"
except:
reply_count = "0"
for comment in actual_comments:
found_key = safeTraverse(comment, ["entityKey"], default="unknown-key")
# try to link a relevant ctoken if a comment has response

View File

@@ -32,7 +32,7 @@ def getConfig(configfile):
global randomly_generated_passcode
if not os.path.exists(configfile):
dummy_config = {'general': {'db_file_path': 'ythdd_db.sqlite', 'video_storage_directory_path': 'videos/', 'is_proxied': False, 'public_facing_url': 'http://127.0.0.1:5000/', 'debug': False, 'cache': True}, 'api': {'api_key': 'CHANGEME'}, 'proxy': {'user-agent': '', 'allow_proxying_videos': True, 'match_initcwndbps': True}, 'extractor': {'user-agent': '', 'cookies_path': ''}, 'admin': {'admins': ['admin']}, 'yt_dlp': {}, 'postprocessing': {'presets': [{'name': 'recommended: [N][<=720p] best V+A', 'format': 'bv[height<=720]+ba', 'reencode': ''}, {'name': '[N][1080p] best V+A', 'format': 'bv[height=1080]+ba', 'reencode': ''}, {'name': '[R][1080p] webm', 'format': 'bv[height=1080]+ba', 'reencode': 'webm'}, {'name': '[N][720p] best V+A', 'format': 'bv[height=720]+ba', 'reencode': ''}, {'name': '[R][720p] webm', 'format': 'bv[height=720]+ba', 'reencode': 'webm'}, {'name': '[N][480p] best V+A', 'format': 'bv[height=480]+ba', 'reencode': ''}, {'name': '[480p] VP9 webm/reencode', 'format': 'bv*[height=480][ext=webm]+ba/bv[height=480]+ba', 'reencode': 'webm'}, {'name': '[N][1080p] best video only', 'format': 'bv[height=1080]', 'reencode': ''}, {'name': '[N][opus] best audio only', 'format': 'ba', 'reencode': 'opus'}]}}
dummy_config = {'general': {'db_file_path': 'ythdd_db.sqlite', 'video_storage_directory_path': 'videos/', 'is_proxied': False, 'public_facing_url': 'http://127.0.0.1:5000/', 'debug': False, 'cache': True}, 'api': {'api_key': 'CHANGEME', 'enable_debugger_halt': False}, 'proxy': {'user-agent': '', 'allow_proxying_videos': True, 'match_initcwndbps': True}, 'extractor': {'user-agent': '', 'cookies_path': ''}, 'admin': {'admins': ['admin']}, 'yt_dlp': {}, 'postprocessing': {'presets': [{'name': 'recommended: [N][<=720p] best V+A', 'format': 'bv[height<=720]+ba', 'reencode': ''}, {'name': '[N][1080p] best V+A', 'format': 'bv[height=1080]+ba', 'reencode': ''}, {'name': '[R][1080p] webm', 'format': 'bv[height=1080]+ba', 'reencode': 'webm'}, {'name': '[N][720p] best V+A', 'format': 'bv[height=720]+ba', 'reencode': ''}, {'name': '[R][720p] webm', 'format': 'bv[height=720]+ba', 'reencode': 'webm'}, {'name': '[N][480p] best V+A', 'format': 'bv[height=480]+ba', 'reencode': ''}, {'name': '[480p] VP9 webm/reencode', 'format': 'bv*[height=480][ext=webm]+ba/bv[height=480]+ba', 'reencode': 'webm'}, {'name': '[N][1080p] best video only', 'format': 'bv[height=1080]', 'reencode': ''}, {'name': '[N][opus] best audio only', 'format': 'ba', 'reencode': 'opus'}]}}
# if a passcode has not been provided by the user (config file doesn't exist, and user didn't specify it using an argument)
print(f"{colors.WARNING}WARNING{colors.ENDC}: Using default, baked in config data. {colors.ENDL}"
f" Consider copying and editing the provided example file ({colors.OKCYAN}config.default.toml{colors.ENDC}).")
@@ -60,7 +60,7 @@ def getHeaders(caller="proxy"):
# NOTE: use ESR user-agent
# user_agent = 'Mozilla/5.0 (Windows NT 10.0; rv:130.0) Gecko/20100101 Firefox/130.0'
user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:143.0) Gecko/20100101 Firefox/143.0'
user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:144.0) Gecko/20100101 Firefox/144.0'
if config[caller]['user-agent']:
user_agent = config[caller]['user-agent']
@@ -151,3 +151,7 @@ def getCommit() -> str | None:
except Exception as e:
return None
def print_debug(text: str) -> None:
# Will print a string only if debugging is enabled.
if config["general"]["debug"]:
print(text)

View File

@@ -45,6 +45,7 @@ import ythdd_struct_parser
# IDEAS:
# [*] /api/v1/popular returns last requested videos by the IP (serving as multi-device history?)
# [*] /api/v1/trending returns recently archived videos
# [*] produce continuations instead of extracting them
# ----------
# NOT PLANNED/MAYBE IN THE FUTURE:
# [ ] /api/v1/auth/subscriptions (stub? db?)
@@ -134,215 +135,6 @@ def getError(wdata: dict):
return error
def rebuildFormats(data):
result = [{} for x in data]
formatStreams = []
best_bitrate_video = 0
best_bitrate_audio = -1
for x in range(len(data)):
try:
result[x]['audioChannels'] = data[x]['audioChannels']
isVideo = 0
except:
isVideo = 1
if not "initRange" in data[x]: # for livestreams?
continue
result[x]['init'] = str(data[x]['initRange']['start']) + "-" + str(data[x]['initRange']['end'])
result[x]['index'] = str(data[x]['indexRange']['start']) + "-" + str(data[x]['indexRange']['end'])
result[x]['bitrate'] = str(data[x]['averageBitrate'])
result[x]['url'] = data[x]['url']
result[x]['itag'] = str(data[x]['itag'])
result[x]['type'] = data[x]['mimeType']
result[x]['clen'] = data[x]['contentLength']
result[x]['lmt'] = data[x]['lastModified']
result[x]['projectionType'] = data[x]['projectionType']
try:
result[x]['colorInfo'] = data[x]['colorInfo']
except:
pass
if "audio" == data[x]['mimeType'][:5]:
isAudio = 1
else:
isAudio = 0
if isVideo:
result[x]['fps'] = str(data[x]['fps'])
else:
result[x]['audioQuality'] = data[x]['audioQuality']
result[x]['audioSampleRate'] = data[x]['audioSampleRate']
if data[x]['itag'] in invidious_formats.FORMATS.keys():
result[x]['container'] = invidious_formats.FORMATS[data[x]['itag']]['ext']
try:
result[x]['encoding'] = invidious_formats.FORMATS[data[x]['itag']]['vcodec']
except:
result[x]['encoding'] = invidious_formats.FORMATS[data[x]['itag']]['acodec']
if isVideo:
try:
result[x]['resolution'] = str(invidious_formats.FORMATS[data[x]['itag']]['height']) + "p"
result[x]['qualityLabel'] = str(invidious_formats.FORMATS[data[x]['itag']]['height']) + "p" + str(result[x]['fps']) * (data[x]['fps'] > 30) # NOT IMPLEMENTED, that's just a placeholder
result[x]['size'] = str(invidious_formats.FORMATS[data[x]['itag']]['width']) + "x" + str(invidious_formats.FORMATS[data[x]['itag']]['height'])
except:
pass
# we assume here that a stream with the highest bitrate must be a video stream- that may not be the case
if data[x]['averageBitrate'] > data[best_bitrate_video]['averageBitrate'] and isVideo:
best_bitrate_video = x
if data[x]['averageBitrate'] > data[best_bitrate_audio]['averageBitrate'] and isAudio:
best_bitrate_audio = x
# makes FreeTube work, unfortunately it's a video-only stream
formatStreams = [
{
"url": data[best_bitrate_video]['url'],
"itag": str(data[best_bitrate_video]['itag']),
"type": data[best_bitrate_video]['mimeType'],
"quality": data[best_bitrate_video]['quality'],
"bitrate": str(data[best_bitrate_video]['averageBitrate']),
"fps": data[best_bitrate_video]['fps'],
"size": "", # workaround for clipious, which requires ANYTHING to be passed, or else it will throw and error and won't load the video
"resolution": str(invidious_formats.FORMATS[data[best_bitrate_video]['itag']]['height']) + "p",
"qualityLabel": str(invidious_formats.FORMATS[data[best_bitrate_video]['itag']]['height']) + "p",
"container": invidious_formats.FORMATS[data[best_bitrate_video]['itag']]['ext'],
"encoding": invidious_formats.FORMATS[data[best_bitrate_video]['itag']]['vcodec']
},
# {
# "audioChannels": data[best_bitrate_audio]['audioChannels'],
# "init": result[best_bitrate_audio]['init'],
# "index": result[best_bitrate_audio]['index'],
# "bitrate": str(data[best_bitrate_audio]['averageBitrate']),
# "url": data[best_bitrate_audio]['url'],
# "itag": str(data[best_bitrate_audio]['itag']),
# "type": data[best_bitrate_audio]['mimeType'],
# "clen": result[best_bitrate_audio]['clen'],
# "lmt": result[best_bitrate_audio]['lmt'],
# "projectionType": result[best_bitrate_audio]['projectionType'],
# "audioQuality": result[best_bitrate_audio]['audioQuality'],
# "audioSampleRate": result[best_bitrate_audio]['audioSampleRate'],
# "qualityLabel": "audio"
# }
]
# not all itags have width and/or height
try:
formatStreams[0]["size"] = str(invidious_formats.FORMATS[data[best_bitrate]['itag']]['width']) + "x" + str(invidious_formats.FORMATS[data[best_bitrate]['itag']]['height'])
except:
pass
return result, formatStreams
def rebuildFormatsFromYtdlpApi(ydata: dict):
# Rebuild invidious-compatible formats from yt-dlp's output (ydata)
adaptive_formats = []
format_streams = []
for stream in safeTraverse(ydata, ["formats"], default=[]):
if safeTraverse(stream, ["protocol"], default="storyboard") not in ("http_dash_segments", "https"):
continue
newRow = {}
# Add from ...'s ... to ... as ...
newRow["bitrate"] = str(int(safeTraverse(stream, ["tbr"], default=0) * 1000))
newRow["url"] = safeTraverse(stream, ["url"])
newRow["itag"] = safeTraverse(stream, ["format_id"])
params = ythdd_extractor.paramsFromUrl(newRow["url"])
vcodec = safeTraverse(stream, ["vcodec"], default="none")
acodec = safeTraverse(stream, ["acodec"], default="none")
if vcodec == "none" and acodec == "none":
continue
if safeTraverse(stream, ["acodec"]) != "none":
# audio-only track
type = safeTraverse(stream, ["audio_ext"], default=None)
fnote = safeTraverse(stream, ["format_note"], default="low")
if type is None:
type = "mp4"
abr = safeTraverse(stream, ["abr"], default="0")
if abr is None:
abr = "0"
newRow[ "type"] = "audio/" + type
newRow[ "audioQuality"] = fnote
newRow["audioSampleRate"] = int(safeTraverse(stream, ["asr"], default="44100"))
newRow[ "audioChannels"] = int(safeTraverse(stream, ["audio_channels"]))
newRow[ "qualityLabel"] = str(int(abr)) + "k (audio)"
newRow[ "resolution"] = f"{fnote} quality"
newRow[ "size"] = "0x0"
if safeTraverse(stream, ["vcodec"]) != "none":
# either video-only or video+audio
type = safeTraverse(stream, ["video_ext"], default=None)
if type is None:
type = "mp4"
height = str(safeTraverse(stream, ["height"], default=0))
width = str(safeTraverse(stream, [ "width"], default=0))
newRow[ "type"] = "video/" + type
newRow[ "resolution"] = (height if height in ("144", "240", "360", "480", "720", "1080") else "360") + "p" # mpv won't play the video inside of Yattee if it's a non-standard resolution (bug?)
newRow[ "fps"] = safeTraverse(stream, ["fps"], default=30)
newRow[ "qualityLabel"] = height + "p" + str(int(newRow['fps'])) * (newRow["fps"] > 30) + " (video)" # also a placeholder
newRow[ "size"] = width + "x" + height
newRow[ "clen"] = safeTraverse(params, ["clen"], default=safeTraverse(stream, ["filesize"], default="0"))
newRow[ "lmt"] = safeTraverse(params, ["lmt"], default="0")
if newRow["clen"] is None:
# for clipious sake which expects a string
newRow["clen"] = "0"
newRow[ "projectionType"] = "RECTANGULAR" # clipious requires this to be mentioned explicitly and cannot be nulled
newRow[ "container"] = safeTraverse(stream, ["ext"], default="unknown_container")
newRow[ "encoding"] = safeTraverse(invidious_formats.FORMATS, [int("0" + newRow["itag"].split("-")[0]), "ext"], default="unknown_encoding") # not sure this split is necessary
newRow[ "quality"] = newRow["qualityLabel"]
newRow[ "init"] = "0-1" # dummy values
newRow[ "index"] = "2-3" # dummy values
if vcodec != "none" and acodec != "none":
# 360p stream
newRow["qualityLabel"] = height + "p" + str(int(newRow['fps'])) * (newRow["fps"] > 30)
format_streams.append(newRow)
if vcodec != "none" or acodec != "none":
adaptive_formats.append(newRow)
# {
# "url": data[best_bitrate_video]['url'],
# "itag": str(data[best_bitrate_video]['itag']),
# "type": data[best_bitrate_video]['mimeType'],
# "quality": data[best_bitrate_video]['quality'],
# "bitrate": str(data[best_bitrate_video]['averageBitrate']),
# "fps": data[best_bitrate_video]['fps'],
# "size": "", # workaround for clipious, which requires ANYTHING to be passed, or else it will throw and error and won't load the video
# "resolution": str(invidious_formats.FORMATS[data[best_bitrate_video]['itag']]['height']) + "p",
# "qualityLabel": str(invidious_formats.FORMATS[data[best_bitrate_video]['itag']]['height']) + "p",
# "container": invidious_formats.FORMATS[data[best_bitrate_video]['itag']]['ext'],
# "encoding": invidious_formats.FORMATS[data[best_bitrate_video]['itag']]['vcodec']
# }
# {
# "audioChannels": data[best_bitrate_audio]['audioChannels'],
# "init": result[best_bitrate_audio]['init'],
# "index": result[best_bitrate_audio]['index'],
# "bitrate": str(data[best_bitrate_audio]['averageBitrate']),
# "url": data[best_bitrate_audio]['url'],
# "itag": str(data[best_bitrate_audio]['itag']),
# "type": data[best_bitrate_audio]['mimeType'],
# "clen": result[best_bitrate_audio]['clen'],
# "lmt": result[best_bitrate_audio]['lmt'],
# "projectionType": result[best_bitrate_audio]['projectionType'],
# "audioQuality": result[best_bitrate_audio]['audioQuality'],
# "audioSampleRate": result[best_bitrate_audio]['audioSampleRate'],
# "qualityLabel": "audio"
# }
return adaptive_formats, format_streams
def videos(data):
# an attempt on a faithful rewrite of
# https://github.com/iv-org/invidious/blob/master/src/invidious/videos/parser.cr
@@ -371,11 +163,24 @@ def videos(data):
wdata = ythdd_extractor.WEBextractSinglePage(data[3])
age_restricted = False
error = getError(wdata)
if error is not None:
return send(500, {"status": "error", "error": error})
ydata = ythdd_extractor.extract(data[3])
if error.startswith("(LOGIN_REQUIRED)") and "inappropriate for some users" in error:
# check if user provided age-gated cookies
if ythdd_globals.config["extractor"]["age_restricted_cookies_path"]:
ythdd_globals.print_debug(f"videos({data[3]}): using agegated cookies to bypass restriction")
ydata = ythdd_extractor.extract(data[3], use_cookies="agegated")
wdata = ythdd_extractor.WEBextractSinglePage(data[3], use_cookies="agegated")
age_restricted = True
else:
# return error if no age-gated cookies are provided
return send(500, {"status": "error", "error": error})
else:
# return error if it doesn't mention age restriction
return send(500, {"status": "error", "error": error})
else:
ydata = ythdd_extractor.extract(data[3])
#return send(200, {'ydata': ydata, 'wdata': wdata})
#return send(200, {'idata': idata, 'wdata': wdata})
@@ -420,10 +225,10 @@ def videos(data):
y = safeTraverse(entry, ['lockupViewModel'])
if not isinstance(y, dict):
continue
is_mix_or_playlist = safeTraverse(entry, ["lockupViewModel", "contentImage", "collectionThumbnailViewModel", "primaryThumbnail", "thumbnailViewModel", "overlays", 0, "thumbnailOverlayBadgeViewModel", "thumbnailBadges", 0, "thumbnailBadgeViewModel", "icon", "sources", 0, "clientResource", "imageName"], default="") in ("MIX", "PLAYLISTS")
if is_mix_or_playlist:
if safeTraverse(y, ["contentType"], default="LOCKUP_CONTENT_TYPE_VIDEO") != "LOCKUP_CONTENT_TYPE_VIDEO":
# neither mixes nor playlists are currently supported by the invidious api
continue
# note: this model is similar, but not identical to the one in ythdd_struct_parser. perhaps they can be both handled in the struct parser some time.
lmvm = safeTraverse(y, ['metadata', 'lockupMetadataViewModel'], default=[])
related_entry['videoId'] = safeTraverse(y, ['contentId'])
related_entry['title'] = safeTraverse(lmvm, ['title', 'content'])
@@ -467,15 +272,57 @@ def videos(data):
author_verified = author_verified or safeTraverse(livm, [0, "listItemViewModel", "title", "attachmentRuns", 0, "element", "type", "imageType", "image", "sources", 0, "clientResource", "imageName"]) in ("AUDIO_BADGE", "CHECK_CIRCLE_FILLED")
author_thumbnail = ythdd_extractor.generateChannelAvatarsFromUrl(author_thumbnail)
wdata_streams = safeTraverse(wdata, ["ec1", "streamingData"], default=[])
adaptive_formats = []
format_streams = []
# adaptive_formats, format_streams = rebuildFormats(adaptive_formats)
if not live_now:
adaptive_formats, format_streams = rebuildFormatsFromYtdlpApi(ydata)
# adaptive_formats, format_streams = rebuildFormatsFromYtdlpApi(ydata)
initial_astreams_y = {} # itag is the key
initial_fstreams_y = {} # same here
initial_astreams_w = {}
initial_fstreams_w = {}
for video_stream in ydata["formats"]:
if video_stream["format_note"] in ("storyboard"):
# ignore non-audio/video formats (e.g. storyboards)
continue
if video_stream["format_id"] == "18": # todo: do this dynamically
initial_fstreams_y[int(video_stream["format_id"])] = video_stream
elif video_stream["format_id"].isdigit():
# filter out DRC audio
initial_astreams_y[int(video_stream["format_id"])] = video_stream
else:
continue
# format streams
for video_stream in wdata_streams["formats"]:
initial_fstreams_w[video_stream["itag"]] = video_stream
# adaptive streams
for audiovideo_stream in wdata_streams["adaptiveFormats"]:
if not "isVb" in audiovideo_stream and not "isDrc" in audiovideo_stream:
# skip DRC and VB formats
initial_astreams_w[audiovideo_stream["itag"]] = audiovideo_stream
for itag in initial_astreams_y:
if itag in initial_astreams_w:
adaptive_formats.append(ythdd_struct_parser.parseAdaptiveStreams(initial_astreams_w[itag], initial_astreams_y[itag]))
for itag in initial_fstreams_y:
if itag in initial_fstreams_w:
format_streams.append( ythdd_struct_parser.parseFormatStreams( initial_fstreams_w[itag], initial_fstreams_y[itag]))
hls_url = None
else:
adaptive_formats, format_streams = [{"url": f"http://a/?expire={int(time_start + 5.9 * 60 * 60)}", "itag": "18", "type": "", "clen": "0", "lmt": "", "projectionType": "RECTANGULAR"}], [] # freetube/clipious shenanigans, see: https://github.com/FreeTubeApp/FreeTube/pull/5997 and https://github.com/lamarios/clipious/blob/b9e7885/lib/videos/models/adaptive_format.g.dart
hls_url = safeTraverse(ydata, ["url"], default="ythdd: unable to retrieve stream url")
if age_restricted:
if not adaptive_formats:
adaptive_formats = [{"url": f"http://a/?expire={int(time_start + 5.9 * 60 * 60)}", "itag": "18", "type": "", "clen": "0", "lmt": "", "projectionType": "RECTANGULAR"}] # same as above
if live_now:
video_type = "livestream"
premiere_timestamp = published # ??? that works i guess

View File

@@ -1,5 +1,6 @@
from ythdd_globals import safeTraverse
from html import escape
from invidious_formats import FORMATS
from ythdd_globals import safeTraverse
import json
import dateparser
import ythdd_globals
@@ -50,6 +51,7 @@ def parseRenderers(entry: dict, context: dict = {}) -> dict:
match safeTraverse(list(entry.keys()), [0], default=""):
case "videoRenderer": # represents a video
# as of october 2025 slowly phased out in favor of lockupViewModel(?)
published_date = safeTraverse(entry, ["videoRenderer", "publishedTimeText", "simpleText"], default="now")
published_date = published_date.removeprefix("Streamed ")
@@ -118,6 +120,7 @@ def parseRenderers(entry: dict, context: dict = {}) -> dict:
# retrieve the main channel's avatar
avatar_url = safeTraverse(livm, [0, "listItemViewModel", "leadingAccessory", "avatarViewModel", "image", "sources", 0, "url"], default=DEFAULT_AVATAR)
ythdd_globals.print_debug("videoRenderer fired")
return {
"type": "video",
"title": safeTraverse(entry, ["videoRenderer", "title", "runs", 0, "text"]),
@@ -149,15 +152,67 @@ def parseRenderers(entry: dict, context: dict = {}) -> dict:
# modify the premiere timestamp afterwards here?
case "lockupViewModel": # represents playlists/mixes
case "lockupViewModel": # represents playlists/mixes (and videos since october 2025)
# related videos lvms are handled in ythdd_inv_tl.videos()
playlist_type = safeTraverse(entry, ["lockupViewModel", "contentImage", "collectionThumbnailViewModel", "primaryThumbnail", "thumbnailViewModel", "overlays", 0, "thumbnailOverlayBadgeViewModel", "thumbnailBadges", 0, "thumbnailBadgeViewModel", "icon", "sources", 0, "clientResource", "imageName"], default="PLAYLISTS")
lvm = entry["lockupViewModel"]
playlist_type = safeTraverse(lvm, ["contentImage", "collectionThumbnailViewModel", "primaryThumbnail", "thumbnailViewModel", "overlays", 0, "thumbnailOverlayBadgeViewModel", "thumbnailBadges", 0, "thumbnailBadgeViewModel", "icon", "sources", 0, "clientResource", "imageName"], default="")
if playlist_type == "MIX":
# mixes aren't currently supported
return
lvm = entry["lockupViewModel"]
if not playlist_type:
# struct represents a video
ythdd_globals.print_debug("lockupViewModel fired (not a playlist). this is an a/b test; any following errors stem from it.")
lmvm = safeTraverse(lvm, ['metadata', 'lockupMetadataViewModel'], default={})
video_id = safeTraverse(lvm, ['contentId'])
author_name = safeTraverse(context, ["author_name"], default="Unknown author")
author_ucid = safeTraverse(context, ["author_ucid"], default="UNKNOWNCHANNELID")
verified = safeTraverse(context, ["verified"], default=False) # TODO: check if this can be retrieved here
avatar_url = safeTraverse(context, ["avatar"], default=DEFAULT_AVATAR)
title = safeTraverse(lmvm, ["title", "content"], default="No title")
video_metadata = safeTraverse(lmvm, ["metadata", "contentMetadataViewModel", "metadataRows", 0, "metadataParts"], default=[])
view_count_text = safeTraverse(video_metadata, [0, "text", "content"], default="0 views")
published_date = safeTraverse(video_metadata, [1, "text", "content"], default="now")
length_text = safeTraverse(lvm, ["contentImage", "thumbnailViewModel", "overlays", ..., "thumbnailBottomOverlayViewModel", "badges", -1, "thumbnailBadgeViewModel", "text"], default="0:0")
view_count = parseViewsFromViewText(view_count_text)
length = parseLengthFromTimeBadge(length_text)
resp = {
"type": "video",
"title": title,
"videoId": video_id,
"author": author_name,
"authorId": author_ucid,
"authorUrl": "/channel/" + author_ucid,
"authorVerified": verified, # TODO
"authorThumbnails": ythdd_extractor.generateChannelAvatarsFromUrl(avatar_url),
"videoThumbnails": ythdd_struct_builder.genThumbs(video_id),
"description": "", # can't be retrieved from lockupViewModel
"descriptionHtml": "",
"viewCount": view_count,
"viewCountText": view_count_text,
"published": int(dateparser.parse(published_date).timestamp()), # sadly best we can do, invidious does this too
"publishedText": published_date,
"lengthSeconds": length,
"liveNow": False, # can't be live if it's in creator's video feed
"premium": False, # todo: check this
"isUpcoming": False,
"isNew": False,
"is4k": False,
"is8k": False,
"isVr180": False,
"isVr360": False,
"is3d": False,
"hasCaptions": False
}
return resp
# struct represents a playlist
meta = safeTraverse(lvm, ["metadata"], default=[])
lmvm = safeTraverse(meta, ["lockupMetadataViewModel", "metadata", "contentMetadataViewModel", "metadataRows"], default=[])
thumbnail = safeTraverse(lvm, ["contentImage", "collectionThumbnailViewModel", "primaryThumbnail", "thumbnailViewModel", "image", "sources", -1, "url"], default="no-url?")
@@ -168,7 +223,7 @@ def parseRenderers(entry: dict, context: dict = {}) -> dict:
length = safeTraverse(lvm, ["contentImage", "collectionThumbnailViewModel", "primaryThumbnail", "thumbnailViewModel", "overlays", 0, "thumbnailOverlayBadgeViewModel", "thumbnailBadges", 0, "thumbnailBadgeViewModel", "text"], default="0 videos")
length = parseViewsFromViewText(length.split(" ")[0])
# Turns out for some responses we do some data, while not on others.
# Turns out for some responses we do have some data, while not on others.
# Data from context should be prioritized, thus even if something is found with safeTraverse,
# the parser will ignore it in favour of the context.
ucid = safeTraverse(lmvm, [0, "metadataParts", 0, "text", "commandRuns", 0, "onTap", "innertubeCommand", "browseEndpoint", "browseId"], default="UNKNOWNCHANNELID")
@@ -176,6 +231,7 @@ def parseRenderers(entry: dict, context: dict = {}) -> dict:
ucid = safeTraverse(context, ["author_ucid"], default=ucid)
author = safeTraverse(context, ["author_name"], default=author)
ythdd_globals.print_debug("lockupViewModel fired (playlist)")
return {
"type": "playlist",
"title": safeTraverse(meta, ["lockupMetadataViewModel", "title", "content"], default="ythdd: unknown title"),
@@ -227,6 +283,7 @@ def parseRenderers(entry: dict, context: dict = {}) -> dict:
else:
avatar_url = "unknown"
ythdd_globals.print_debug("shortsLockupViewModel fired")
return {
"type": "video",
"title": title,
@@ -269,6 +326,7 @@ def parseRenderers(entry: dict, context: dict = {}) -> dict:
published_date = safeTraverse(entry, ["gridVideoRenderer", "publishedTimeText", "simpleText"], default="now")
published_date = published_date.removeprefix("Streamed ")
ythdd_globals.print_debug("gridVideoRenderer fired")
return {
"type": "video",
"title": safeTraverse(entry, ["gridVideoRenderer", "title", "simpleText"], default="unknown video title"),
@@ -303,6 +361,7 @@ def parseRenderers(entry: dict, context: dict = {}) -> dict:
description, description_html = parseDescriptionSnippet(safeTraverse(entry, ["channelRenderer", "descriptionSnippet", "runs"], default=[]))
isVerified = ythdd_extractor.isVerified(safeTraverse(entry, ["channelRenderer", "ownerBadges", 0], default=[]))
ythdd_globals.print_debug("channelRenderer fired")
return {
"type": "channel",
"author": safeTraverse(entry, ["channelRenderer", "title", "simpleText"], default="Unknown channel"),
@@ -353,6 +412,7 @@ def parseRenderers(entry: dict, context: dict = {}) -> dict:
avatar_url = safeTraverse(entry, ["playlistVideoRenderer", "thumbnailOverlays", ..., "thumbnailOverlayAvatarStackViewModel", "avatarStack", "avatarStackViewModel", "avatars", 0, "avatarViewModel", "image", "sources", 0, "url"])
avatars = None if avatar_url is None else ythdd_extractor.generateChannelAvatarsFromUrl(avatar_url)
ythdd_globals.print_debug("playlistVideoRenderer fired")
return {
"type": "video",
"title": title,
@@ -372,7 +432,7 @@ def parseRenderers(entry: dict, context: dict = {}) -> dict:
}
case _:
print("received an entry of unknown type:")
print("received an entry of unknown type (thus can't be parsed):")
print(entry)
print("")
# breakpoint()
@@ -457,3 +517,102 @@ def extractTextFromSimpleOrRuns(obj: dict, default: str = "") -> str:
else:
print(f"error(extractTextFromSimpleOrRuns): text extraction failed for {obj}")
return text
def findNearestResolution(width: int, height: int) -> int:
# Finds the nearest standard resolution (one of 144p, 240p, ...)
# So far only used for Yattee, as it has trouble playing anything
# without one of the standard resolutions. Playback on other
# clients is unaffected.
# failsafe behaviour
try:
width = int(width)
height = int(height)
res = min(width, height)
except:
return 360
standard_resolutions = [144, 240, 360, 720, 1080, 2160, 4320]
if res in standard_resolutions:
return res
# calculate relative distance to one of the standard resolutions
res_normalized = [abs(1 - (x / res)) for x in standard_resolutions]
# pick the one where the distance is the smallest
target_index = res_normalized.index(min(res_normalized))
target_res = standard_resolutions[target_index]
return target_res
def parseFormatStreams(wdata_fstream: dict, ydata_stream: dict) -> dict:
try:
stream_url = ydata_stream["url"]
except:
ythdd_globals.print_debug( "could not extract format stream URL from yt-dlp response:")
ythdd_globals.print_debug(f"wdata: {wdata_fstream}")
ythdd_globals.print_debug(f"ydata: {ydata_stream}")
fstream = {
"url": stream_url,
"itag": str(wdata_fstream["itag"]),
"type": wdata_fstream["mimeType"],
"quality": wdata_fstream["quality"],
"bitrate": str(wdata_fstream["bitrate"]),
"fps": wdata_fstream["fps"],
"size": f"{wdata_fstream['width']}x{wdata_fstream['height']}",
"resolution": f"{findNearestResolution(wdata_fstream['width'], wdata_fstream['height'])}p", # possibly not really needed here
"qualityLabel": wdata_fstream["qualityLabel"],
"container": safeTraverse(FORMATS.get(wdata_fstream["itag"]), [ "ext"], default="mp4"), # invidious_formats
"encoding": safeTraverse(FORMATS.get(wdata_fstream["itag"]), ["vcodec"], default="mp4") # invidious_formats
}
return fstream
def parseAdaptiveStreams(wdata_astream: dict, ydata_stream: dict) -> dict:
try:
stream_url = ydata_stream["url"]
except:
ythdd_globals.print_debug( "could not extract adaptive stream URL from yt-dlp response:")
ythdd_globals.print_debug(f"wdata: {wdata_fstream}")
ythdd_globals.print_debug(f"ydata: {ydata_stream}")
astream_common = {
"init": f"{wdata_astream[ 'initRange']['start']}-{wdata_astream[ 'initRange']['end']}",
"index": f"{wdata_astream['indexRange']['start']}-{wdata_astream['indexRange']['end']}",
"bitrate": str(wdata_astream["bitrate"]),
"url": stream_url,
"itag": str(wdata_astream["itag"]),
"type": wdata_astream["mimeType"],
"clen": wdata_astream["contentLength"],
"lmt": wdata_astream["lastModified"],
"projectionType": wdata_astream["projectionType"],
"container": safeTraverse(FORMATS.get(wdata_astream["itag"]), [ "ext"], default="mp4"), # invidious_formats
"encoding": safeTraverse(FORMATS.get(wdata_astream["itag"]), ["vcodec"], default="mp4") # invidious_formats
}
isVideo = True
if "audioQuality" in wdata_astream:
isVideo = False
if isVideo:
astream = astream_common
# video-specific metadata
astream["fps"] = wdata_astream["fps"]
astream["size"] = f"{wdata_astream['width']}x{wdata_astream['height']}"
astream["resolution"] = f"{findNearestResolution(wdata_astream['width'], wdata_astream['height'])}p"
astream["qualityLabel"] = wdata_astream["qualityLabel"]
astream["colorInfo"] = safeTraverse(wdata_astream, ["colorInfo"])
else:
astream = astream_common
# audio-specific metadata
astream["encoding"] = safeTraverse(FORMATS.get(wdata_astream["itag"]), ["acodec"], default="mp4")
astream["audioQuality"] = wdata_astream["audioQuality"],
astream["audioSampleRate"] = int(wdata_astream["audioSampleRate"]),
astream["audioChannels"] = wdata_astream["audioChannels"]
return astream