diff --git a/yt_dlp/extractor/reddit.py b/yt_dlp/extractor/reddit.py index 14592bc62..e5a1f6920 100644 --- a/yt_dlp/extractor/reddit.py +++ b/yt_dlp/extractor/reddit.py @@ -1,5 +1,4 @@ -from __future__ import unicode_literals - +import random from .common import InfoExtractor from ..utils import ( @@ -49,7 +48,7 @@ def _real_extract(self, url): class RedditRIE(InfoExtractor): - _VALID_URL = r'https?://(?:[^/]+\.)?reddit(?:media)?\.com/r/(?P[^/]+/comments/(?P[^/?#&]+))' + _VALID_URL = r'https?://(?P[^/]+\.)?reddit(?:media)?\.com/r/(?P[^/]+/comments/(?P[^/?#&]+))' _TESTS = [{ 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/', 'info_dict': { @@ -99,13 +98,22 @@ class RedditRIE(InfoExtractor): 'only_matching': True, }] + @staticmethod + def _gen_session_id(): + id_length = 16 + rand_max = 1 << (id_length * 4) + return '%0.*x' % (id_length, random.randrange(rand_max)) + def _real_extract(self, url): - slug, video_id = self._match_valid_url(url).group('slug', 'id') - - self._set_cookie('reddit.com', '_options', '%7B%22pref_quarantine_optin%22%3A%20true%7D') - data = self._download_json( - f'https://old.reddit.com/r/{slug}/.json', video_id)[0]['data']['children'][0]['data'] + subdomain, slug, video_id = self._match_valid_url(url).group('subdomain', 'slug', 'id') + self._set_cookie('.reddit.com', 'reddit_session', self._gen_session_id()) + self._set_cookie('.reddit.com', '_options', '%7B%22pref_quarantine_optin%22%3A%20true%7D') + data = self._download_json(f'https://{subdomain}.reddit.com/r/{slug}/.json', video_id, fatal=False) + if not data: + # Fall back to old.reddit.com in case the requested subdomain fails + data = self._download_json(f'https://old.reddit.com/r/{slug}/.json', video_id) + data = data[0]['data']['children'][0]['data'] video_url = data['url'] # Avoid recursing into the same reddit URL