mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-21 20:46:36 -05:00
[pladform] Respect platform id and extract HLS formats (closes #15468)
This commit is contained in:
parent
64a12edb48
commit
3c3bceb41d
1 changed files with 44 additions and 18 deletions
|
@ -4,7 +4,9 @@
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from ..compat import compat_urlparse
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
determine_ext,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
xpath_text,
|
xpath_text,
|
||||||
|
@ -26,17 +28,15 @@ class PladformIE(InfoExtractor):
|
||||||
(?P<id>\d+)
|
(?P<id>\d+)
|
||||||
'''
|
'''
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
# http://muz-tv.ru/kinozal/view/7400/
|
'url': 'https://out.pladform.ru/player?pl=64471&videoid=3777899&vk_puid15=0&vk_puid34=0',
|
||||||
'url': 'http://out.pladform.ru/player?pl=24822&videoid=100183293',
|
'md5': '53362fac3a27352da20fa2803cc5cd6f',
|
||||||
'md5': '61f37b575dd27f1bb2e1854777fe31f4',
|
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '100183293',
|
'id': '3777899',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Тайны перевала Дятлова • 1 серия 2 часть',
|
'title': 'СТУДИЯ СОЮЗ • Шоу Студия Союз, 24 выпуск (01.02.2018) Нурлан Сабуров и Слава Комиссаренко',
|
||||||
'description': 'Документальный сериал-расследование одной из самых жутких тайн ХХ века',
|
'description': 'md5:05140e8bf1b7e2d46e7ba140be57fd95',
|
||||||
'thumbnail': r're:^https?://.*\.jpg$',
|
'thumbnail': r're:^https?://.*\.jpg$',
|
||||||
'duration': 694,
|
'duration': 3190,
|
||||||
'age_limit': 0,
|
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://static.pladform.ru/player.swf?pl=21469&videoid=100183293&vkcid=0',
|
'url': 'http://static.pladform.ru/player.swf?pl=21469&videoid=100183293&vkcid=0',
|
||||||
|
@ -56,22 +56,48 @@ def _extract_url(webpage):
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
|
|
||||||
|
qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
|
||||||
|
pl = qs.get('pl', ['1'])[0]
|
||||||
|
|
||||||
video = self._download_xml(
|
video = self._download_xml(
|
||||||
'http://out.pladform.ru/getVideo?pl=1&videoid=%s' % video_id,
|
'http://out.pladform.ru/getVideo', video_id, query={
|
||||||
video_id)
|
'pl': pl,
|
||||||
|
'videoid': video_id,
|
||||||
|
})
|
||||||
|
|
||||||
|
def fail(text):
|
||||||
|
raise ExtractorError(
|
||||||
|
'%s returned error: %s' % (self.IE_NAME, text),
|
||||||
|
expected=True)
|
||||||
|
|
||||||
if video.tag == 'error':
|
if video.tag == 'error':
|
||||||
raise ExtractorError(
|
fail(video.text)
|
||||||
'%s returned error: %s' % (self.IE_NAME, video.text),
|
|
||||||
expected=True)
|
|
||||||
|
|
||||||
quality = qualities(('ld', 'sd', 'hd'))
|
quality = qualities(('ld', 'sd', 'hd'))
|
||||||
|
|
||||||
formats = [{
|
formats = []
|
||||||
|
for src in video.findall('./src'):
|
||||||
|
if src is None:
|
||||||
|
continue
|
||||||
|
format_url = src.text
|
||||||
|
if not format_url:
|
||||||
|
continue
|
||||||
|
if src.get('type') == 'hls' or determine_ext(format_url) == 'm3u8':
|
||||||
|
formats.extend(self._extract_m3u8_formats(
|
||||||
|
format_url, video_id, 'mp4', entry_protocol='m3u8_native',
|
||||||
|
m3u8_id='hls', fatal=False))
|
||||||
|
else:
|
||||||
|
formats.append({
|
||||||
'url': src.text,
|
'url': src.text,
|
||||||
'format_id': src.get('quality'),
|
'format_id': src.get('quality'),
|
||||||
'quality': quality(src.get('quality')),
|
'quality': quality(src.get('quality')),
|
||||||
} for src in video.findall('./src')]
|
})
|
||||||
|
|
||||||
|
if not formats:
|
||||||
|
error = xpath_text(video, './cap', 'error', default=None)
|
||||||
|
if error:
|
||||||
|
fail(error)
|
||||||
|
|
||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
|
||||||
webpage = self._download_webpage(
|
webpage = self._download_webpage(
|
||||||
|
|
Loading…
Reference in a new issue