mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-21 20:46:36 -05:00
Merge pull request #32 from amigatomte/patch-1
Update to reflect website changes.
This commit is contained in:
commit
1808b9e28c
1 changed files with 28 additions and 21 deletions
|
@ -3,6 +3,7 @@
|
|||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import unified_timestamp
|
||||
import re
|
||||
|
||||
|
||||
class URPlayIE(InfoExtractor):
|
||||
|
@ -13,10 +14,10 @@ class URPlayIE(InfoExtractor):
|
|||
'info_dict': {
|
||||
'id': '203704',
|
||||
'ext': 'mp4',
|
||||
'title': 'UR Samtiden - Livet, universum och rymdens märkliga musik : Om vetenskap, kritiskt tänkande och motstånd',
|
||||
'title': 'Om vetenskap, kritiskt tänkande och motstånd',
|
||||
'description': 'md5:5344508a52aa78c1ced6c1b8b9e44e9a',
|
||||
'timestamp': 1513512768,
|
||||
'upload_date': '20171217',
|
||||
'timestamp': 1513292400,
|
||||
'upload_date': '20171214',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://urskola.se/Produkter/190031-Tripp-Trapp-Trad-Sovkudde',
|
||||
|
@ -37,35 +38,41 @@ def _real_extract(self, url):
|
|||
video_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
urplayer_data = self._parse_json(self._search_regex(
|
||||
r'urPlayer\.init\(({.+?})\);', webpage, 'urplayer data'), video_id)
|
||||
urplayer_data = re.sub(""", "\"", self._search_regex(
|
||||
r'components\/Player\/Player\" data-react-props=\"({.+?})\"',
|
||||
webpage, 'urplayer data'))
|
||||
urplayer_data = self._parse_json(urplayer_data, video_id)
|
||||
for i in range(len(urplayer_data['accessibleEpisodes'])):
|
||||
if urplayer_data.get('accessibleEpisodes', {})[i].get('id') == int(video_id):
|
||||
urplayer_data = urplayer_data['accessibleEpisodes'][i]
|
||||
break
|
||||
|
||||
host = self._download_json('http://streaming-loadbalancer.ur.se/loadbalancer.json', video_id)['redirect']
|
||||
|
||||
formats = []
|
||||
for quality_attr, quality, preference in (('', 'sd', 0), ('_hd', 'hd', 1)):
|
||||
file_http = urplayer_data.get('file_http' + quality_attr) or urplayer_data.get('file_http_sub' + quality_attr)
|
||||
if file_http:
|
||||
urplayer_streams = urplayer_data.get("streamingInfo")
|
||||
for quality in ('sd'), ('hd'):
|
||||
location = (urplayer_streams.get("raw", {}).get(quality, {}).get("location")
|
||||
or urplayer_streams.get("sweComplete", {}).get(quality, {}).get("location"))
|
||||
if location:
|
||||
formats.extend(self._extract_wowza_formats(
|
||||
'http://%s/%splaylist.m3u8' % (host, file_http), video_id, skip_protocols=['rtmp', 'rtsp']))
|
||||
'http://%s/%s/playlist.m3u8' % (host, location), video_id,
|
||||
skip_protocols=['f4m', 'rtmp', 'rtsp']))
|
||||
self._sort_formats(formats)
|
||||
|
||||
subtitles = {}
|
||||
for subtitle in urplayer_data.get('subtitles', []):
|
||||
subtitle_url = subtitle.get('file')
|
||||
kind = subtitle.get('kind')
|
||||
if not subtitle_url or (kind and kind != 'captions'):
|
||||
continue
|
||||
subtitles.setdefault(subtitle.get('label', 'Svenska'), []).append({
|
||||
'url': subtitle_url,
|
||||
subs = urplayer_streams.get("sweComplete", {}).get("tt", {}).get("location")
|
||||
if subs:
|
||||
subtitles.setdefault('Svenska', []).append({
|
||||
'url': subs,
|
||||
})
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': urplayer_data['title'],
|
||||
'description': self._og_search_description(webpage),
|
||||
'thumbnail': urplayer_data.get('image'),
|
||||
'timestamp': unified_timestamp(self._html_search_meta(('uploadDate', 'schema:uploadDate'), webpage, 'timestamp')),
|
||||
'series': urplayer_data.get('series_title'),
|
||||
'thumbnail': urplayer_data.get('image', {}).get('1280x720'),
|
||||
'timestamp': unified_timestamp(self._html_search_meta(('uploadDate', 'schema:uploadDate'),
|
||||
webpage, 'timestamp')),
|
||||
'series': urplayer_data.get('seriesTitle'),
|
||||
'subtitles': subtitles,
|
||||
'formats': formats,
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue