feat: playlist browsing

pagination still needs refinement for some of the clients.
on another note, this is an anniversary commit, as ythdd turns 1 year
today.
This commit is contained in:
2025-09-25 23:30:59 +02:00
parent 6d0c70696b
commit 1c9174c888
4 changed files with 228 additions and 3 deletions

View File

@@ -29,9 +29,10 @@ import ythdd_struct_parser
# [✓] /api/v1/channel/:ucid/videos, shorts, playlists, streams
# [✓] /api/v1/comments/:videoid?continuation=...
# [✓] /api/v1/videos/:videoIdXXXX
# [✓] /api/v1/playlists/:plid
# ----------
# PLANNED:
# [X] /api/v1/playlists/:plid
# [X] /api/v1/channel/{videos, shorts, playlists, streams, latest?}/:ucid (rewrite)
# [X] /api/v1/:videoIdXXXX/maxres.jpg redirects to best quality thumbnail
# [X] /api/v1/storyboards/:videoIdXXXX
# [X] /api/v1/videos/:videoIdXXXX does not depend on yt-dlp and offloads stream retrieval elsewhere (making initial response fast)
@@ -388,7 +389,7 @@ def videos(data):
title = safeTraverse(video_details, ['title'], default=video_id)
views = int(safeTraverse(video_details, ['viewCount'], default=0))
length = int(safeTraverse(video_details, ['lengthSeconds'], default=1))
published = dateToEpoch(safeTraverse(microformat, ['publishDate'], default="1970-01-02T00:00:00Z")) # ISO format to Unix timestamp
published = dateToEpoch(safeTraverse(microformat, ['publishDate'], default="2000-01-01T00:00:00Z")) # ISO format to Unix timestamp
published_date = epochToDate(published)
premiere_timestamp = safeTraverse(microformat, ['liveBroadcastDetails', 'startTimestamp'], default=None) # let's ignore the nitty gritty for the time being
premiere_timestamp = premiere_timestamp if premiere_timestamp else safeTraverse(microformat, ['playabilityStatus', 'liveStreamability', 'liveStreamabilityRenderer', 'offlineSlate', 'liveStreamOfflineSlateRenderer', 'scheduledStartTime'], default=None)
@@ -865,6 +866,104 @@ def channels(data, req, only_json: bool = False):
if ythdd_globals.config["general"]["debug"]:
response["wdata"] = wdata
# todo: cache response
if only_json:
return response
return send(200, response)
def playlists(data, req, only_json: bool = False):
# read playlist id and sanity check
if len(data) < 4:
return send(400, {"error": "No playlist specified."})
# todo: make clipious stop spamming requests for paginated response
page = req.args.get('page')
if page is not None and page != '1':
return send(404, {"error": "Paginated queries are not supported."})
plid = data[3]
if len(plid) not in (34, 36):
print("error(playlists): len(plid) is not 34 or 36...!")
response = {"error": "Only standard playlists are currently supported (no mixes, video-based playlists, etc.)"}
if only_json:
return response
return send(400, response)
# check if request has been cached within the last hour
if ythdd_globals.config['general']['cache'] and plid in ythdd_globals.general_cache['playlists']:
if ythdd_globals.general_cache['playlists'][plid]['cacheTime'] + 1 * 60 * 60 > time():
response = ythdd_globals.general_cache['playlists'][plid]
if only_json:
return response
else:
return send(200, response)
else:
del ythdd_globals.general_cache['playlists'][plid]
# browse the playlist iteratively, first fetch is without any continuation
all_unparsed_videos = []
meta, new_continuation, videos = ythdd_extractor.WEBextractPlaylist(plid=plid)
if isinstance(videos, list):
all_unparsed_videos = videos.copy()
while new_continuation != None:
# fetch subsequent playlist videos
_, new_continuation, videos = ythdd_extractor.WEBextractPlaylist(ctoken=new_continuation)
if videos is not None:
all_unparsed_videos.extend(videos)
# process videos
parsed_videos = []
for video in all_unparsed_videos:
parsed_video = ythdd_struct_parser.parseRenderers(video)
if parsed_video is not None:
parsed_videos.append(parsed_video)
# process metadata
primary_sidebar = safeTraverse(meta, ["sidebar", "playlistSidebarRenderer", "items", 0, "playlistSidebarPrimaryInfoRenderer"], default={})
secondary_sidebar = safeTraverse(meta, ["sidebar", "playlistSidebarRenderer", "items", 1, "playlistSidebarSecondaryInfoRenderer"], default={})
# apparently fields can be stored inside of simpleText one time, only to be stored inside of runs another time
title = ythdd_struct_parser.extractTextFromSimpleOrRuns(safeTraverse(primary_sidebar, ["title"]), default="Unknown playlist title")
playlist_thumb = ythdd_globals.translateLinks(safeTraverse(primary_sidebar, ["thumbnailRenderer", "playlistVideoThumbnailRenderer", "thumbnail", "thumbnails", -1, "url"], default=DEFAULT_VIDEO))
author = safeTraverse(secondary_sidebar, ["videoOwner", "videoOwnerRenderer", "title", "runs", 0, "text"], default="Unknown channel")
author_ucid = safeTraverse(secondary_sidebar, ["videoOwner", "videoOwnerRenderer", "title", "runs", 0, "navigationEndpoint", "browseEndpoint", "browseId"], default="UNKNOWNCHANNELID")
author_avatars = ythdd_extractor.generateChannelAvatarsFromUrl(safeTraverse(secondary_sidebar, ["videoOwner", "videoOwnerRenderer", "thumbnail", "thumbnails", 0, "url"], default=DEFAULT_AVATAR))
description = safeTraverse(meta, ["header", "pageHeaderRenderer", "content", "pageHeaderViewModel", "description", "descriptionPreviewViewModel", "description", "content"], default="(ythdd: failed to retrieve description, perhaps it's empty?)")
description_html = html.escape(description).replace("\r\n", "<br>").replace("\n", "<br>")
video_count = ythdd_struct_parser.parseViewsFromViewText(ythdd_struct_parser.extractTextFromSimpleOrRuns(safeTraverse(primary_sidebar, ["stats", 0]), default="No videos"))
view_count = ythdd_struct_parser.parseViewsFromViewText(ythdd_struct_parser.extractTextFromSimpleOrRuns(safeTraverse(primary_sidebar, ["stats", 1]), default="No views"))
updated = ythdd_struct_parser.extractTextFromSimpleOrRuns(safeTraverse(primary_sidebar, ["stats", 2]), default="2000-01-01").removeprefix("Last updated on ").removeprefix("Updated ")
updated = int(dateparser.parse(updated).timestamp())
is_unlisted = safeTraverse(primary_sidebar, ["badges", 0, "metadataBadgeRenderer", "icon", "iconType"], default="PRIVACY_LISTED") == "PRIVACY_UNLISTED" # this needs further research https://gitea.invidious.io/iv-org/invidious/src/commit/325e013e0d9e5670fa0df7635ff30a0ee029e05e/src/invidious/playlists.cr#L133
response = {
"type": "playlist",
"title": title,
"playlistId": plid,
"playlistThumbnail": playlist_thumb,
"author": author,
"authorId": author_ucid,
"authorUrl": "/channel/" + author_ucid,
"subtitle": None, # todo?
"authorThumbnails": author_avatars,
"description": description,
"descriptionHtml": description_html,
"videoCount": video_count,
"viewCount": view_count,
"updated": updated,
"isListed": not is_unlisted,
"videos": parsed_videos
}
# todo: cache videos and metadata separately, so that paginated queries can be supported as well
if ythdd_globals.config['general']['cache']:
ythdd_globals.general_cache['playlists'][plid] = response
ythdd_globals.general_cache['playlists'][plid]['cacheTime'] = time()
if only_json:
return response
@@ -891,6 +990,8 @@ def lookup(data, req):
return channels(data, req)
case 'comments':
return get_comments(data, req)
case 'playlists':
return playlists(data, req)
case _:
incrementBadRequests()
return notImplemented(data)