mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-12-28 08:21:41 +00:00
TEDIE: download the best quality video and use the new _search_regex functions
Also extracts the description.
This commit is contained in:
parent
038a3a1a61
commit
f4c8bbcfc2
2 changed files with 22 additions and 26 deletions
|
@ -363,10 +363,10 @@
|
|||
"name": "TED",
|
||||
"url": "http://www.ted.com/talks/dan_dennett_on_our_consciousness.html",
|
||||
"file": "102.mp4",
|
||||
"md5": "7bc087e71d16f18f9b8ab9fa62a8a031",
|
||||
"md5": "8cd9dfa41ee000ce658fd48fb5d89a61",
|
||||
"info_dict": {
|
||||
"title": "Dan Dennett: The illusion of consciousness",
|
||||
"thumbnail": "http://images.ted.com/images/ted/488_389x292.jpg"
|
||||
"description": "md5:c6fa72e6eedbd938c9caf6b2702f5922"
|
||||
}
|
||||
},
|
||||
{
|
||||
|
|
|
@ -3792,10 +3792,6 @@ def _real_extract(self, url):
|
|||
self.to_screen(u'Getting info of playlist %s: "%s"' % (playlist_id,name))
|
||||
return [self._playlist_videos_info(url,name,playlist_id)]
|
||||
|
||||
def _talk_video_link(self,mediaSlug):
|
||||
'''Returns the video link for that mediaSlug'''
|
||||
return 'http://download.ted.com/talks/%s.mp4' % mediaSlug
|
||||
|
||||
def _playlist_videos_info(self,url,name,playlist_id=0):
|
||||
'''Returns the videos of the playlist'''
|
||||
video_RE=r'''
|
||||
|
@ -3808,9 +3804,8 @@ def _playlist_videos_info(self,url,name,playlist_id=0):
|
|||
m_videos=re.finditer(video_RE,webpage,re.VERBOSE)
|
||||
m_names=re.finditer(video_name_RE,webpage)
|
||||
|
||||
playlist_RE = r'div class="headline">(\s*?)<h1>(\s*?)<span>(?P<playlist_title>.*?)</span>'
|
||||
m_playlist = re.search(playlist_RE, webpage)
|
||||
playlist_title = m_playlist.group('playlist_title')
|
||||
playlist_title = self._html_search_regex(r'div class="headline">\s*?<h1>\s*?<span>(.*?)</span>',
|
||||
webpage, 'playlist title')
|
||||
|
||||
playlist_entries = []
|
||||
for m_video, m_name in zip(m_videos,m_names):
|
||||
|
@ -3821,27 +3816,28 @@ def _playlist_videos_info(self,url,name,playlist_id=0):
|
|||
|
||||
def _talk_info(self, url, video_id=0):
|
||||
"""Return the video for the talk in the url"""
|
||||
m=re.match(self._VALID_URL, url,re.VERBOSE)
|
||||
videoName=m.group('name')
|
||||
webpage=self._download_webpage(url, video_id, 'Downloading \"%s\" page' % videoName)
|
||||
m = re.match(self._VALID_URL, url,re.VERBOSE)
|
||||
video_name = m.group('name')
|
||||
webpage = self._download_webpage(url, video_id, 'Downloading \"%s\" page' % video_name)
|
||||
self.report_extraction(video_name)
|
||||
# If the url includes the language we get the title translated
|
||||
title_RE=r'<span id="altHeadline" >(?P<title>.*)</span>'
|
||||
title=re.search(title_RE, webpage).group('title')
|
||||
info_RE=r'''<script\ type="text/javascript">var\ talkDetails\ =(.*?)
|
||||
"id":(?P<videoID>[\d]+).*?
|
||||
"mediaSlug":"(?P<mediaSlug>[\w\d]+?)"'''
|
||||
thumb_RE=r'</span>[\s.]*</div>[\s.]*<img src="(?P<thumbnail>.*?)"'
|
||||
thumb_match=re.search(thumb_RE,webpage)
|
||||
info_match=re.search(info_RE,webpage,re.VERBOSE)
|
||||
video_id=info_match.group('videoID')
|
||||
mediaSlug=info_match.group('mediaSlug')
|
||||
video_url=self._talk_video_link(mediaSlug)
|
||||
title = self._html_search_regex(r'<span id="altHeadline" >(?P<title>.*)</span>',
|
||||
webpage, 'title')
|
||||
json_data = self._search_regex(r'<script.*?>var talkDetails = ({.*?})</script>',
|
||||
webpage, 'json data')
|
||||
info = json.loads(json_data)
|
||||
desc = self._html_search_regex(r'<div class="talk-intro">.*?<p.*?>(.*?)</p>',
|
||||
webpage, 'description', flags = re.DOTALL)
|
||||
|
||||
thumbnail = self._search_regex(r'</span>[\s.]*</div>[\s.]*<img src="(.*?)"',
|
||||
webpage, 'thumbnail')
|
||||
info = {
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'id': info['id'],
|
||||
'url': info['htmlStreams'][-1]['file'],
|
||||
'ext': 'mp4',
|
||||
'title': title,
|
||||
'thumbnail': thumb_match.group('thumbnail')
|
||||
'thumbnail': thumbnail,
|
||||
'description': desc,
|
||||
}
|
||||
return info
|
||||
|
||||
|
|
Loading…
Reference in a new issue