mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-14 20:38:11 -05:00
b827ee921f
* [scrippsnetworks] Add new extractor(closes #19857)(closes #22981) * [teachable] Improve locked lessons detection (#23528) * [teachable] Fail with error message if no video URL found * [extractors] add missing import for ScrippsNetworksIE * [brightcove] cache brightcove player policy keys * [prosiebensat1] improve geo restriction handling(closes #23571) * [soundcloud] automatically update client id on failing requests * [spankbang] Fix extraction (closes #23307, closes #23423, closes #23444) * [spankbang] Improve removed video detection (#23423) * [brightcove] update policy key on failing requests * [pornhub] Fix extraction and add support for m3u8 formats (closes #22749, closes #23082) * [pornhub] Improve locked videos detection (closes #22449, closes #22780) * [brightcove] invalidate policy key cache on failing requests * [soundcloud] fix client id extraction for non fatal requests * [ChangeLog] Actualize [ci skip] * [devscripts/create-github-release] Switch to using PAT for authentication Basic authentication will be deprecated soon * release 2020.01.01 * [redtube] Detect private videos (#23518) * [vice] improve extraction(closes #23631) * [devscripts/create-github-release] Remove unused import * [wistia] improve format extraction and extract subtitles(closes #22590) * [nrktv:seriebase] Fix extraction (closes #23625) (#23537) * [discovery] fix anonymous token extraction(closes #23650) * [scrippsnetworks] add support for www.discovery.com videos * [scrippsnetworks] correct test case URL * [dctp] fix format extraction(closes #23656) * [pandatv] Remove extractor (#23630) * [naver] improve extraction - improve geo-restriction handling - extract automatic captions - extract uploader metadata - extract VLive HLS formats * [naver] improve metadata extraction * [cloudflarestream] improve extraction - add support for bytehighway.net domain - add support for signed URLs - extract thumbnail * [cloudflarestream] import embed URL extraction * [lego] fix extraction and extract subtitle(closes #23687) * [safari] Fix kaltura session extraction (closes #23679) (#23670) * [orf:fm4] Fix extraction (#23599) * [orf:radio] Clean description and improve extraction * [twitter] add support for promo_video_website cards(closes #23711) * [vodplatform] add support for embed.kwikmotion.com domain * [ndr:base:embed] Improve thumbnails extraction (closes #23731) * [canvas] Add support for new API endpoint and update tests (closes #17680, closes #18629) * [travis] Add flake8 job (#23720) * [yourporn] Fix extraction (closes #21645, closes #22255, closes #23459) * [ChangeLog] Actualize [ci skip] * release 2020.01.15 * [soundcloud] Restore previews extraction (closes #23739) * [orf:tvthek] Improve geo restricted videos detection (closes #23741) * [zype] improve extraction - extract subtitles(closes #21258) - support URLs with alternative keys/tokens(#21258) - extract more metadata * [americastestkitchen] fix extraction * [nbc] add support for nbc multi network URLs(closes #23049) * [ard] improve extraction(closes #23761) - simplify extraction - extract age limit and series - bypass geo-restriction * [ivi:compilation] Fix entries extraction (closes #23770) * [24video] Add support for 24video.vip (closes #23753) * [businessinsider] Fix jwplatform id extraction (closes #22929) (#22954) * [ard] add a missing condition * [azmedien] fix extraction(closes #23783) * [voicerepublic] fix extraction * [stretchinternet] fix extraction(closes #4319) * [youtube] Fix sigfunc name extraction (closes #23819) * [ChangeLog] Actualize [ci skip] * release 2020.01.24 * [soundcloud] imporve private playlist/set tracks extraction https://github.com/ytdl-org/youtube-dl/issues/3707#issuecomment-577873539 * [svt] fix article extraction(closes #22897)(closes #22919) * [svt] fix series extraction(closes #22297) * [viewlift] improve extraction - fix extraction(closes #23851) - add add support for authentication - add support for more domains * [vimeo] fix album extraction(closes #23864) * [tva] Relax _VALID_URL (closes #23903) * [tv5mondeplus] Fix extraction (closes #23907, closes #23911) * [twitch:stream] Lowercase channel id for stream request (closes #23917) * [sportdeutschland] Update to new sportdeutschland API They switched to SSL, but under a different host AND path... Remove the old test cases because these videos have become unavailable. * [popcorntimes] Add extractor (closes #23949) * [thisoldhouse] fix extraction(closes #23951) * [toggle] Add support for mewatch.sg (closes #23895) (#23930) * [compat] Introduce compat_realpath (refs #23991) * [update] Fix updating via symlinks (closes #23991) * [nytimes] improve format sorting(closes #24010) * [abc:iview] Support 720p (#22907) (#22921) * [nova:embed] Fix extraction (closes #23672) * [nova:embed] Improve (closes #23690) * [nova] Improve extraction (refs #23690) * [jpopsuki] Remove extractor (closes #23858) * [YoutubeDL] Fix playlist entry indexing with --playlist-items (closes #10591, closes #10622) * [test_YoutubeDL] Fix get_ids * [test_YoutubeDL] Add tests for #10591 (closes #23873) * [24video] Add support for porn.24video.net (closes #23779, closes #23784) * [npr] Add support for streams (closes #24042) * [ChangeLog] Actualize [ci skip] * release 2020.02.16 * [tv2dk:bornholm:play] Fix extraction (#24076) * [imdb] Fix extraction (closes #23443) * [wistia] Add support for multiple generic embeds (closes #8347, closes #11385) * [teachable] Add support for multiple videos per lecture (closes #24101) * [pornhd] Fix extraction (closes #24128) * [options] Remove duplicate short option -v for --version (#24162) * [extractor/common] Convert ISM manifest to unicode before processing on python 2 (#24152) * [YoutubeDL] Force redirect URL to unicode on python 2 * Remove no longer needed compat_str around geturl * [youjizz] Fix extraction (closes #24181) * [test_subtitles] Remove obsolete test * [zdf:channel] Fix tests * [zapiks] Fix test * [xtube] Fix metadata extraction (closes #21073, closes #22455) * [xtube:user] Fix test * [telecinco] Fix extraction (refs #24195) * [telecinco] Add support for article opening videos * [franceculture] Fix extraction (closes #24204) * [xhamster] Fix extraction (closes #24205) * [ChangeLog] Actualize [ci skip] * release 2020.03.01 * [vimeo] Fix subtitles URLs (#24209) * [servus] Add support for new URL schema (closes #23475, closes #23583, closes #24142) * [youtube:playlist] Fix tests (closes #23872) (#23885) * [peertube] Improve extraction * [peertube] Fix issues and improve extraction (closes #23657) * [pornhub] Improve title extraction (closes #24184) * [vimeo] fix showcase password protected video extraction(closes #24224) * [youtube] Fix age-gated videos support without login (closes #24248) * [youtube] Fix tests * [ChangeLog] Actualize [ci skip] * release 2020.03.06 * [nhk] update API version(closes #24270) * [youtube] Improve extraction in 429 error conditions (closes #24283) * [youtube] Improve age-gated videos extraction in 429 error conditions (refs #24283) * [youtube] Remove outdated code Additional get_video_info requests don't seem to provide any extra itags any longer * [README.md] Clarify 429 error * [pornhub] Add support for pornhubpremium.com (#24288) * [utils] Add support for cookies with spaces used instead of tabs * [ChangeLog] Actualize [ci skip] * release 2020.03.08 * Revert "[utils] Add support for cookies with spaces used instead of tabs" According to [1] TABs must be used as separators between fields. Files produces by some tools with spaces as separators are considered malformed. 1. https://curl.haxx.se/docs/http-cookies.html This reverts commitcff99c91d1
. * [utils] Add reference to cookie file format * Revert "[vimeo] fix showcase password protected video extraction(closes #24224)" This reverts commit12ee431676
. * [nhk] Relax _VALID_URL (#24329) * [nhk] Remove obsolete rtmp formats (closes #24329) * [nhk] Update m3u8 URL and use native hls (#24329) * [ndr] Fix extraction (closes #24326) * [xtube] Fix formats extraction (closes #24348) * [xtube] Fix typo * [hellporno] Fix extraction (closes #24399) * [cbc:watch] Add support for authentication * [cbc:watch] Fix authenticated device token caching (closes #19160) * [soundcloud] fix download url extraction(closes #24394) * [limelight] remove disabled API requests(closes #24255) * [bilibili] Add support for new URL schema with BV ids (closes #24439, closes #24442) * [bilibili] Add support for player.bilibili.com (closes #24402) * [teachable] Extract chapter metadata (closes #24421) * [generic] Look for teachable embeds before wistia * [teachable] Update upskillcourses domain New version does not use teachable platform any longer * [teachable] Update gns3 domain * [teachable] Update test * [ChangeLog] Actualize [ci skip] * [ChangeLog] Actualize [ci skip] * release 2020.03.24 * [spankwire] Fix extraction (closes #18924, closes #20648) * [spankwire] Add support for generic embeds (refs #24633) * [youporn] Add support form generic embeds * [mofosex] Add support for generic embeds (closes #24633) * [tele5] Fix extraction (closes #24553) * [extractor/common] Skip malformed ISM manifest XMLs while extracting ISM formats (#24667) * [tv4] Fix ISM formats extraction (closes #24667) * [twitch:clips] Extend _VALID_URL (closes #24290) (#24642) * [motherless] Fix extraction (closes #24699) * [nova:embed] Fix extraction (closes #24700) * [youtube] Skip broken multifeed videos (closes #24711) * [soundcloud] Extract AAC format * [soundcloud] Improve AAC format extraction (closes #19173, closes #24708) * [thisoldhouse] Fix video id extraction (closes #24548) Added support for: with of without "www." and either ".chorus.build" or ".com" It now validated correctly on older URL's ``` <iframe src="https://thisoldhouse.chorus.build/videos/zype/5e33baec27d2e50001d5f52f ``` and newer ones ``` <iframe src="https://www.thisoldhouse.com/videos/zype/5e2b70e95216cc0001615120 ``` * [thisoldhouse] Improve video id extraction (closes #24549) * [youtube] Fix DRM videos detection (refs #24736) * [options] Clarify doc on --exec command (closes #19087) (#24883) * [prosiebensat1] Improve extraction and remove 7tv.de support (#24948) * [prosiebensat1] Extract series metadata * [tenplay] Relax _VALID_URL (closes #25001) * [tvplay] fix Viafree extraction(closes #15189)(closes #24473)(closes #24789) * [yahoo] fix GYAO Player extraction and relax title URL regex(closes #24178)(closes #24778) * [youtube] Use redirected video id if any (closes #25063) * [youtube] Improve player id extraction and add tests * [extractor/common] Extract multiple JSON-LD entries * [crunchyroll] Fix and improve extraction (closes #25096, closes #25060) * [ChangeLog] Actualize [ci skip] * release 2020.05.03 * [puhutv] Remove no longer available HTTP formats (closes #25124) * [utils] Improve cookie files support + Add support for UTF-8 in cookie files * Skip malformed cookie file entries instead of crashing (invalid entry len, invalid expires at) * [dailymotion] Fix typo * [compat] Introduce compat_cookiejar_Cookie * [extractor/common] Use compat_cookiejar_Cookie for _set_cookie (closes #23256, closes #24776) To always ensure cookie name and value are bytestrings on python 2. * [orf] Add support for more radio stations (closes #24938) (#24968) * [uol] fix extraction(closes #22007) * [downloader/http] Finish downloading once received data length matches expected Always do this if possible, i.e. if Content-Length or expected length is known, not only in test. This will save unnecessary last extra loop trying to read 0 bytes. * [downloader/http] Request last data block of exact remaining size Always request last data block of exact size remaining to download if possible not the current block size. * [iprima] Improve extraction (closes #25138) * [youtube] Improve signature cipher extraction (closes #25188) * [ChangeLog] Actualize [ci skip] * release 2020.05.08 * [spike] fix Bellator mgid extraction(closes #25195) * [bbccouk] PEP8 * [mailru] Fix extraction (closes #24530) (#25239) * [README.md] flake8 HTTPS URL (#25230) * [youtube] Add support for yewtu.be (#25226) * [soundcloud] reduce API playlist page limit(closes #25274) * [vimeo] improve format extraction and sorting(closes #25285) * [redtube] Improve title extraction (#25208) * [indavideo] Switch to HTTPS for API request (#25191) * [utils] Fix file permissions in write_json_file (closes #12471) (#25122) * [redtube] Improve formats extraction and extract m3u8 formats (closes #25311, closes #25321) * [ard] Improve _VALID_URL (closes #25134) (#25198) * [giantbomb] Extend _VALID_URL (#25222) * [postprocessor/ffmpeg] Embed series metadata with --add-metadata * [youtube] Add support for more invidious instances (#25417) * [ard:beta] Extend _VALID_URL (closes #25405) * [ChangeLog] Actualize [ci skip] * release 2020.05.29 * [jwplatform] Improve embeds extraction (closes #25467) * [periscope] Fix untitled broadcasts (#25482) * [twitter:broadcast] Add untitled periscope broadcast test * [malltv] Add support for sk.mall.tv (#25445) * [brightcove] Fix subtitles extraction (closes #25540) * [brightcove] Sort imports * [twitch] Pass v5 accept header and fix thumbnails extraction (closes #25531) * [twitch:stream] Fix extraction (closes #25528) * [twitch:stream] Expect 400 and 410 HTTP errors from API * [tele5] Prefer jwplatform over nexx (closes #25533) * [jwplatform] Add support for bypass geo restriction * [tele5] Bypass geo restriction * [ChangeLog] Actualize [ci skip] * release 2020.06.06 * [kaltura] Add support for multiple embeds on a webpage (closes #25523) * [youtube] Extract chapters from JSON (closes #24819) * [facebook] Support single-video ID links I stumbled upon this at https://www.facebook.com/bwfbadminton/posts/10157127020046316 . No idea how prevalent it is yet. * [youtube] Fix playlist and feed extraction (closes #25675) * [youtube] Fix thumbnails extraction and remove uploader id extraction warning (closes #25676) * [youtube] Fix upload date extraction * [youtube] Improve view count extraction * [youtube] Fix uploader id and uploader URL extraction * [ChangeLog] Actualize [ci skip] * release 2020.06.16 * [youtube] Fix categories and improve tags extraction * [youtube] Force old layout (closes #25682, closes #25683, closes #25680, closes #25686) * [ChangeLog] Actualize [ci skip] * release 2020.06.16.1 * [brightcove] Improve embed detection (closes #25674) * [bellmedia] add support for cp24.com clip URLs(closes #25764) * [youtube:playlists] Extend _VALID_URL (closes #25810) * [youtube] Prevent excess HTTP 301 (#25786) * [wistia] Restrict embed regex (closes #25969) * [youtube] Improve description extraction (closes #25937) (#25980) * [youtube] Fix sigfunc name extraction (closes #26134, closes #26135, closes #26136, closes #26137) * [ChangeLog] Actualize [ci skip] * release 2020.07.28 * [xhamster] Extend _VALID_URL (closes #25789) (#25804) * [xhamster] Fix extraction (closes #26157) (#26254) * [xhamster] Extend _VALID_URL (closes #25927) Co-authored-by: Remita Amine <remitamine@gmail.com> Co-authored-by: Sergey M․ <dstftw@gmail.com> Co-authored-by: nmeum <soeren+github@soeren-tempel.net> Co-authored-by: Roxedus <me@roxedus.dev> Co-authored-by: Singwai Chan <c.singwai@gmail.com> Co-authored-by: cdarlint <cdarlint@users.noreply.github.com> Co-authored-by: Johannes N <31795504+jonolt@users.noreply.github.com> Co-authored-by: jnozsc <jnozsc@gmail.com> Co-authored-by: Moritz Patelscheck <moritz.patelscheck@campus.tu-berlin.de> Co-authored-by: PB <3854688+uno20001@users.noreply.github.com> Co-authored-by: Philipp Hagemeister <phihag@phihag.de> Co-authored-by: Xaver Hellauer <software@hellauer.bayern> Co-authored-by: d2au <d2au.dev@gmail.com> Co-authored-by: Jan 'Yenda' Trmal <jtrmal@gmail.com> Co-authored-by: jxu <7989982+jxu@users.noreply.github.com> Co-authored-by: Martin Ström <name@my-domain.se> Co-authored-by: The Hatsune Daishi <nao20010128@gmail.com> Co-authored-by: tsia <github@tsia.de> Co-authored-by: 3risian <59593325+3risian@users.noreply.github.com> Co-authored-by: Tristan Waddington <tristan.waddington@gmail.com> Co-authored-by: Devon Meunier <devon.meunier@gmail.com> Co-authored-by: Felix Stupp <felix.stupp@outlook.com> Co-authored-by: tom <tomster954@gmail.com> Co-authored-by: AndrewMBL <62922222+AndrewMBL@users.noreply.github.com> Co-authored-by: willbeaufoy <will@willbeaufoy.net> Co-authored-by: Philipp Stehle <anderschwiedu@googlemail.com> Co-authored-by: hh0rva1h <61889859+hh0rva1h@users.noreply.github.com> Co-authored-by: comsomisha <shmelev1996@mail.ru> Co-authored-by: TotalCaesar659 <14265316+TotalCaesar659@users.noreply.github.com> Co-authored-by: Juan Francisco Cantero Hurtado <iam@juanfra.info> Co-authored-by: Dave Loyall <dave@the-good-guys.net> Co-authored-by: tlsssl <63866177+tlsssl@users.noreply.github.com> Co-authored-by: Rob <ankenyr@gmail.com> Co-authored-by: Michael Klein <github@a98shuttle.de> Co-authored-by: JordanWeatherby <47519158+JordanWeatherby@users.noreply.github.com> Co-authored-by: striker.sh <19488257+strikersh@users.noreply.github.com> Co-authored-by: Matej Dujava <mdujava@gmail.com> Co-authored-by: Glenn Slayden <5589855+glenn-slayden@users.noreply.github.com> Co-authored-by: MRWITEK <mrvvitek@gmail.com> Co-authored-by: JChris246 <43832407+JChris246@users.noreply.github.com> Co-authored-by: TheRealDude2 <the.real.dude@gmx.de>
377 lines
15 KiB
Python
377 lines
15 KiB
Python
# coding: utf-8
|
|
from __future__ import unicode_literals
|
|
|
|
import re
|
|
import base64
|
|
|
|
from .common import InfoExtractor
|
|
from ..compat import (
|
|
compat_urlparse,
|
|
compat_parse_qs,
|
|
)
|
|
from ..utils import (
|
|
clean_html,
|
|
ExtractorError,
|
|
int_or_none,
|
|
unsmuggle_url,
|
|
smuggle_url,
|
|
)
|
|
|
|
|
|
class KalturaIE(InfoExtractor):
|
|
_VALID_URL = r'''(?x)
|
|
(?:
|
|
kaltura:(?P<partner_id>\d+):(?P<id>[0-9a-z_]+)|
|
|
https?://
|
|
(:?(?:www|cdnapi(?:sec)?)\.)?kaltura\.com(?::\d+)?/
|
|
(?:
|
|
(?:
|
|
# flash player
|
|
index\.php/(?:kwidget|extwidget/preview)|
|
|
# html5 player
|
|
html5/html5lib/[^/]+/mwEmbedFrame\.php
|
|
)
|
|
)(?:/(?P<path>[^?]+))?(?:\?(?P<query>.*))?
|
|
)
|
|
'''
|
|
_SERVICE_URL = 'http://cdnapi.kaltura.com'
|
|
_SERVICE_BASE = '/api_v3/index.php'
|
|
# See https://github.com/kaltura/server/blob/master/plugins/content/caption/base/lib/model/enums/CaptionType.php
|
|
_CAPTION_TYPES = {
|
|
1: 'srt',
|
|
2: 'ttml',
|
|
3: 'vtt',
|
|
}
|
|
_TESTS = [
|
|
{
|
|
'url': 'kaltura:269692:1_1jc2y3e4',
|
|
'md5': '3adcbdb3dcc02d647539e53f284ba171',
|
|
'info_dict': {
|
|
'id': '1_1jc2y3e4',
|
|
'ext': 'mp4',
|
|
'title': 'Straight from the Heart',
|
|
'upload_date': '20131219',
|
|
'uploader_id': 'mlundberg@wolfgangsvault.com',
|
|
'description': 'The Allman Brothers Band, 12/16/1981',
|
|
'thumbnail': 're:^https?://.*/thumbnail/.*',
|
|
'timestamp': int,
|
|
},
|
|
},
|
|
{
|
|
'url': 'http://www.kaltura.com/index.php/kwidget/cache_st/1300318621/wid/_269692/uiconf_id/3873291/entry_id/1_1jc2y3e4',
|
|
'only_matching': True,
|
|
},
|
|
{
|
|
'url': 'https://cdnapisec.kaltura.com/index.php/kwidget/wid/_557781/uiconf_id/22845202/entry_id/1_plr1syf3',
|
|
'only_matching': True,
|
|
},
|
|
{
|
|
'url': 'https://cdnapisec.kaltura.com/html5/html5lib/v2.30.2/mwEmbedFrame.php/p/1337/uiconf_id/20540612/entry_id/1_sf5ovm7u?wid=_243342',
|
|
'only_matching': True,
|
|
},
|
|
{
|
|
# video with subtitles
|
|
'url': 'kaltura:111032:1_cw786r8q',
|
|
'only_matching': True,
|
|
},
|
|
{
|
|
# video with ttml subtitles (no fileExt)
|
|
'url': 'kaltura:1926081:0_l5ye1133',
|
|
'info_dict': {
|
|
'id': '0_l5ye1133',
|
|
'ext': 'mp4',
|
|
'title': 'What Can You Do With Python?',
|
|
'upload_date': '20160221',
|
|
'uploader_id': 'stork',
|
|
'thumbnail': 're:^https?://.*/thumbnail/.*',
|
|
'timestamp': int,
|
|
'subtitles': {
|
|
'en': [{
|
|
'ext': 'ttml',
|
|
}],
|
|
},
|
|
},
|
|
'skip': 'Gone. Maybe https://www.safaribooksonline.com/library/tutorials/introduction-to-python-anon/3469/',
|
|
'params': {
|
|
'skip_download': True,
|
|
},
|
|
},
|
|
{
|
|
'url': 'https://www.kaltura.com/index.php/extwidget/preview/partner_id/1770401/uiconf_id/37307382/entry_id/0_58u8kme7/embed/iframe?&flashvars[streamerType]=auto',
|
|
'only_matching': True,
|
|
},
|
|
{
|
|
'url': 'https://www.kaltura.com:443/index.php/extwidget/preview/partner_id/1770401/uiconf_id/37307382/entry_id/0_58u8kme7/embed/iframe?&flashvars[streamerType]=auto',
|
|
'only_matching': True,
|
|
},
|
|
{
|
|
# unavailable source format
|
|
'url': 'kaltura:513551:1_66x4rg7o',
|
|
'only_matching': True,
|
|
}
|
|
]
|
|
|
|
@staticmethod
|
|
def _extract_url(webpage):
|
|
urls = KalturaIE._extract_urls(webpage)
|
|
return urls[0] if urls else None
|
|
|
|
@staticmethod
|
|
def _extract_urls(webpage):
|
|
# Embed codes: https://knowledge.kaltura.com/embedding-kaltura-media-players-your-site
|
|
finditer = (
|
|
re.finditer(
|
|
r"""(?xs)
|
|
kWidget\.(?:thumb)?[Ee]mbed\(
|
|
\{.*?
|
|
(?P<q1>['"])wid(?P=q1)\s*:\s*
|
|
(?P<q2>['"])_?(?P<partner_id>(?:(?!(?P=q2)).)+)(?P=q2),.*?
|
|
(?P<q3>['"])entry_?[Ii]d(?P=q3)\s*:\s*
|
|
(?P<q4>['"])(?P<id>(?:(?!(?P=q4)).)+)(?P=q4)(?:,|\s*\})
|
|
""", webpage)
|
|
or re.finditer(
|
|
r'''(?xs)
|
|
(?P<q1>["'])
|
|
(?:https?:)?//cdnapi(?:sec)?\.kaltura\.com(?::\d+)?/(?:(?!(?P=q1)).)*\b(?:p|partner_id)/(?P<partner_id>\d+)(?:(?!(?P=q1)).)*
|
|
(?P=q1).*?
|
|
(?:
|
|
(?:
|
|
entry_?[Ii]d|
|
|
(?P<q2>["'])entry_?[Ii]d(?P=q2)
|
|
)\s*:\s*|
|
|
\[\s*(?P<q2_1>["'])entry_?[Ii]d(?P=q2_1)\s*\]\s*=\s*
|
|
)
|
|
(?P<q3>["'])(?P<id>(?:(?!(?P=q3)).)+)(?P=q3)
|
|
''', webpage)
|
|
or re.finditer(
|
|
r'''(?xs)
|
|
<(?:iframe[^>]+src|meta[^>]+\bcontent)=(?P<q1>["'])
|
|
(?:https?:)?//(?:(?:www|cdnapi(?:sec)?)\.)?kaltura\.com/(?:(?!(?P=q1)).)*\b(?:p|partner_id)/(?P<partner_id>\d+)
|
|
(?:(?!(?P=q1)).)*
|
|
[?&;]entry_id=(?P<id>(?:(?!(?P=q1))[^&])+)
|
|
(?:(?!(?P=q1)).)*
|
|
(?P=q1)
|
|
''', webpage)
|
|
)
|
|
urls = []
|
|
for mobj in finditer:
|
|
embed_info = mobj.groupdict()
|
|
for k, v in embed_info.items():
|
|
if v:
|
|
embed_info[k] = v.strip()
|
|
url = 'kaltura:%(partner_id)s:%(id)s' % embed_info
|
|
escaped_pid = re.escape(embed_info['partner_id'])
|
|
service_mobj = re.search(
|
|
r'<script[^>]+src=(["\'])(?P<id>(?:https?:)?//(?:(?!\1).)+)/p/%s/sp/%s00/embedIframeJs' % (escaped_pid, escaped_pid),
|
|
webpage)
|
|
if service_mobj:
|
|
url = smuggle_url(url, {'service_url': service_mobj.group('id')})
|
|
urls.append(url)
|
|
return urls
|
|
|
|
def _kaltura_api_call(self, video_id, actions, service_url=None, *args, **kwargs):
|
|
params = actions[0]
|
|
if len(actions) > 1:
|
|
for i, a in enumerate(actions[1:], start=1):
|
|
for k, v in a.items():
|
|
params['%d:%s' % (i, k)] = v
|
|
|
|
data = self._download_json(
|
|
(service_url or self._SERVICE_URL) + self._SERVICE_BASE,
|
|
video_id, query=params, *args, **kwargs)
|
|
|
|
status = data if len(actions) == 1 else data[0]
|
|
if status.get('objectType') == 'KalturaAPIException':
|
|
raise ExtractorError(
|
|
'%s said: %s' % (self.IE_NAME, status['message']))
|
|
|
|
return data
|
|
|
|
def _get_video_info(self, video_id, partner_id, service_url=None):
|
|
actions = [
|
|
{
|
|
'action': 'null',
|
|
'apiVersion': '3.1.5',
|
|
'clientTag': 'kdp:v3.8.5',
|
|
'format': 1, # JSON, 2 = XML, 3 = PHP
|
|
'service': 'multirequest',
|
|
},
|
|
{
|
|
'expiry': 86400,
|
|
'service': 'session',
|
|
'action': 'startWidgetSession',
|
|
'widgetId': '_%s' % partner_id,
|
|
},
|
|
{
|
|
'action': 'get',
|
|
'entryId': video_id,
|
|
'service': 'baseentry',
|
|
'ks': '{1:result:ks}',
|
|
'responseProfile:fields': 'createdAt,dataUrl,duration,name,plays,thumbnailUrl,userId',
|
|
'responseProfile:type': 1,
|
|
},
|
|
{
|
|
'action': 'getbyentryid',
|
|
'entryId': video_id,
|
|
'service': 'flavorAsset',
|
|
'ks': '{1:result:ks}',
|
|
},
|
|
{
|
|
'action': 'list',
|
|
'filter:entryIdEqual': video_id,
|
|
'service': 'caption_captionasset',
|
|
'ks': '{1:result:ks}',
|
|
},
|
|
]
|
|
return self._kaltura_api_call(
|
|
video_id, actions, service_url, note='Downloading video info JSON')
|
|
|
|
def _real_extract(self, url):
|
|
url, smuggled_data = unsmuggle_url(url, {})
|
|
|
|
mobj = re.match(self._VALID_URL, url)
|
|
partner_id, entry_id = mobj.group('partner_id', 'id')
|
|
ks = None
|
|
captions = None
|
|
if partner_id and entry_id:
|
|
_, info, flavor_assets, captions = self._get_video_info(entry_id, partner_id, smuggled_data.get('service_url'))
|
|
else:
|
|
path, query = mobj.group('path', 'query')
|
|
if not path and not query:
|
|
raise ExtractorError('Invalid URL', expected=True)
|
|
params = {}
|
|
if query:
|
|
params = compat_parse_qs(query)
|
|
if path:
|
|
splitted_path = path.split('/')
|
|
params.update(dict((zip(splitted_path[::2], [[v] for v in splitted_path[1::2]]))))
|
|
if 'wid' in params:
|
|
partner_id = params['wid'][0][1:]
|
|
elif 'p' in params:
|
|
partner_id = params['p'][0]
|
|
elif 'partner_id' in params:
|
|
partner_id = params['partner_id'][0]
|
|
else:
|
|
raise ExtractorError('Invalid URL', expected=True)
|
|
if 'entry_id' in params:
|
|
entry_id = params['entry_id'][0]
|
|
_, info, flavor_assets, captions = self._get_video_info(entry_id, partner_id)
|
|
elif 'uiconf_id' in params and 'flashvars[referenceId]' in params:
|
|
reference_id = params['flashvars[referenceId]'][0]
|
|
webpage = self._download_webpage(url, reference_id)
|
|
entry_data = self._parse_json(self._search_regex(
|
|
r'window\.kalturaIframePackageData\s*=\s*({.*});',
|
|
webpage, 'kalturaIframePackageData'),
|
|
reference_id)['entryResult']
|
|
info, flavor_assets = entry_data['meta'], entry_data['contextData']['flavorAssets']
|
|
entry_id = info['id']
|
|
# Unfortunately, data returned in kalturaIframePackageData lacks
|
|
# captions so we will try requesting the complete data using
|
|
# regular approach since we now know the entry_id
|
|
try:
|
|
_, info, flavor_assets, captions = self._get_video_info(
|
|
entry_id, partner_id)
|
|
except ExtractorError:
|
|
# Regular scenario failed but we already have everything
|
|
# extracted apart from captions and can process at least
|
|
# with this
|
|
pass
|
|
else:
|
|
raise ExtractorError('Invalid URL', expected=True)
|
|
ks = params.get('flashvars[ks]', [None])[0]
|
|
|
|
source_url = smuggled_data.get('source_url')
|
|
if source_url:
|
|
referrer = base64.b64encode(
|
|
'://'.join(compat_urlparse.urlparse(source_url)[:2])
|
|
.encode('utf-8')).decode('utf-8')
|
|
else:
|
|
referrer = None
|
|
|
|
def sign_url(unsigned_url):
|
|
if ks:
|
|
unsigned_url += '/ks/%s' % ks
|
|
if referrer:
|
|
unsigned_url += '?referrer=%s' % referrer
|
|
return unsigned_url
|
|
|
|
data_url = info['dataUrl']
|
|
if '/flvclipper/' in data_url:
|
|
data_url = re.sub(r'/flvclipper/.*', '/serveFlavor', data_url)
|
|
|
|
formats = []
|
|
for f in flavor_assets:
|
|
# Continue if asset is not ready
|
|
if f.get('status') != 2:
|
|
continue
|
|
# Original format that's not available (e.g. kaltura:1926081:0_c03e1b5g)
|
|
# skip for now.
|
|
if f.get('fileExt') == 'chun':
|
|
continue
|
|
# DRM-protected video, cannot be decrypted
|
|
if f.get('fileExt') == 'wvm':
|
|
continue
|
|
if not f.get('fileExt'):
|
|
# QT indicates QuickTime; some videos have broken fileExt
|
|
if f.get('containerFormat') == 'qt':
|
|
f['fileExt'] = 'mov'
|
|
else:
|
|
f['fileExt'] = 'mp4'
|
|
video_url = sign_url(
|
|
'%s/flavorId/%s' % (data_url, f['id']))
|
|
format_id = '%(fileExt)s-%(bitrate)s' % f
|
|
# Source format may not be available (e.g. kaltura:513551:1_66x4rg7o)
|
|
if f.get('isOriginal') is True and not self._is_valid_url(
|
|
video_url, entry_id, format_id):
|
|
continue
|
|
# audio-only has no videoCodecId (e.g. kaltura:1926081:0_c03e1b5g
|
|
# -f mp4-56)
|
|
vcodec = 'none' if 'videoCodecId' not in f and f.get(
|
|
'frameRate') == 0 else f.get('videoCodecId')
|
|
formats.append({
|
|
'format_id': format_id,
|
|
'ext': f.get('fileExt'),
|
|
'tbr': int_or_none(f['bitrate']),
|
|
'fps': int_or_none(f.get('frameRate')),
|
|
'filesize_approx': int_or_none(f.get('size'), invscale=1024),
|
|
'container': f.get('containerFormat'),
|
|
'vcodec': vcodec,
|
|
'height': int_or_none(f.get('height')),
|
|
'width': int_or_none(f.get('width')),
|
|
'url': video_url,
|
|
})
|
|
if '/playManifest/' in data_url:
|
|
m3u8_url = sign_url(data_url.replace(
|
|
'format/url', 'format/applehttp'))
|
|
formats.extend(self._extract_m3u8_formats(
|
|
m3u8_url, entry_id, 'mp4', 'm3u8_native',
|
|
m3u8_id='hls', fatal=False))
|
|
|
|
self._sort_formats(formats)
|
|
|
|
subtitles = {}
|
|
if captions:
|
|
for caption in captions.get('objects', []):
|
|
# Continue if caption is not ready
|
|
if caption.get('status') != 2:
|
|
continue
|
|
if not caption.get('id'):
|
|
continue
|
|
caption_format = int_or_none(caption.get('format'))
|
|
subtitles.setdefault(caption.get('languageCode') or caption.get('language'), []).append({
|
|
'url': '%s/api_v3/service/caption_captionasset/action/serve/captionAssetId/%s' % (self._SERVICE_URL, caption['id']),
|
|
'ext': caption.get('fileExt') or self._CAPTION_TYPES.get(caption_format) or 'ttml',
|
|
})
|
|
|
|
return {
|
|
'id': entry_id,
|
|
'title': info['name'],
|
|
'formats': formats,
|
|
'subtitles': subtitles,
|
|
'description': clean_html(info.get('description')),
|
|
'thumbnail': info.get('thumbnailUrl'),
|
|
'duration': info.get('duration'),
|
|
'timestamp': info.get('createdAt'),
|
|
'uploader_id': info.get('userId') if info.get('userId') != 'None' else None,
|
|
'view_count': info.get('plays'),
|
|
}
|