mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-14 20:38:11 -05:00
b827ee921f
* [scrippsnetworks] Add new extractor(closes #19857)(closes #22981) * [teachable] Improve locked lessons detection (#23528) * [teachable] Fail with error message if no video URL found * [extractors] add missing import for ScrippsNetworksIE * [brightcove] cache brightcove player policy keys * [prosiebensat1] improve geo restriction handling(closes #23571) * [soundcloud] automatically update client id on failing requests * [spankbang] Fix extraction (closes #23307, closes #23423, closes #23444) * [spankbang] Improve removed video detection (#23423) * [brightcove] update policy key on failing requests * [pornhub] Fix extraction and add support for m3u8 formats (closes #22749, closes #23082) * [pornhub] Improve locked videos detection (closes #22449, closes #22780) * [brightcove] invalidate policy key cache on failing requests * [soundcloud] fix client id extraction for non fatal requests * [ChangeLog] Actualize [ci skip] * [devscripts/create-github-release] Switch to using PAT for authentication Basic authentication will be deprecated soon * release 2020.01.01 * [redtube] Detect private videos (#23518) * [vice] improve extraction(closes #23631) * [devscripts/create-github-release] Remove unused import * [wistia] improve format extraction and extract subtitles(closes #22590) * [nrktv:seriebase] Fix extraction (closes #23625) (#23537) * [discovery] fix anonymous token extraction(closes #23650) * [scrippsnetworks] add support for www.discovery.com videos * [scrippsnetworks] correct test case URL * [dctp] fix format extraction(closes #23656) * [pandatv] Remove extractor (#23630) * [naver] improve extraction - improve geo-restriction handling - extract automatic captions - extract uploader metadata - extract VLive HLS formats * [naver] improve metadata extraction * [cloudflarestream] improve extraction - add support for bytehighway.net domain - add support for signed URLs - extract thumbnail * [cloudflarestream] import embed URL extraction * [lego] fix extraction and extract subtitle(closes #23687) * [safari] Fix kaltura session extraction (closes #23679) (#23670) * [orf:fm4] Fix extraction (#23599) * [orf:radio] Clean description and improve extraction * [twitter] add support for promo_video_website cards(closes #23711) * [vodplatform] add support for embed.kwikmotion.com domain * [ndr:base:embed] Improve thumbnails extraction (closes #23731) * [canvas] Add support for new API endpoint and update tests (closes #17680, closes #18629) * [travis] Add flake8 job (#23720) * [yourporn] Fix extraction (closes #21645, closes #22255, closes #23459) * [ChangeLog] Actualize [ci skip] * release 2020.01.15 * [soundcloud] Restore previews extraction (closes #23739) * [orf:tvthek] Improve geo restricted videos detection (closes #23741) * [zype] improve extraction - extract subtitles(closes #21258) - support URLs with alternative keys/tokens(#21258) - extract more metadata * [americastestkitchen] fix extraction * [nbc] add support for nbc multi network URLs(closes #23049) * [ard] improve extraction(closes #23761) - simplify extraction - extract age limit and series - bypass geo-restriction * [ivi:compilation] Fix entries extraction (closes #23770) * [24video] Add support for 24video.vip (closes #23753) * [businessinsider] Fix jwplatform id extraction (closes #22929) (#22954) * [ard] add a missing condition * [azmedien] fix extraction(closes #23783) * [voicerepublic] fix extraction * [stretchinternet] fix extraction(closes #4319) * [youtube] Fix sigfunc name extraction (closes #23819) * [ChangeLog] Actualize [ci skip] * release 2020.01.24 * [soundcloud] imporve private playlist/set tracks extraction https://github.com/ytdl-org/youtube-dl/issues/3707#issuecomment-577873539 * [svt] fix article extraction(closes #22897)(closes #22919) * [svt] fix series extraction(closes #22297) * [viewlift] improve extraction - fix extraction(closes #23851) - add add support for authentication - add support for more domains * [vimeo] fix album extraction(closes #23864) * [tva] Relax _VALID_URL (closes #23903) * [tv5mondeplus] Fix extraction (closes #23907, closes #23911) * [twitch:stream] Lowercase channel id for stream request (closes #23917) * [sportdeutschland] Update to new sportdeutschland API They switched to SSL, but under a different host AND path... Remove the old test cases because these videos have become unavailable. * [popcorntimes] Add extractor (closes #23949) * [thisoldhouse] fix extraction(closes #23951) * [toggle] Add support for mewatch.sg (closes #23895) (#23930) * [compat] Introduce compat_realpath (refs #23991) * [update] Fix updating via symlinks (closes #23991) * [nytimes] improve format sorting(closes #24010) * [abc:iview] Support 720p (#22907) (#22921) * [nova:embed] Fix extraction (closes #23672) * [nova:embed] Improve (closes #23690) * [nova] Improve extraction (refs #23690) * [jpopsuki] Remove extractor (closes #23858) * [YoutubeDL] Fix playlist entry indexing with --playlist-items (closes #10591, closes #10622) * [test_YoutubeDL] Fix get_ids * [test_YoutubeDL] Add tests for #10591 (closes #23873) * [24video] Add support for porn.24video.net (closes #23779, closes #23784) * [npr] Add support for streams (closes #24042) * [ChangeLog] Actualize [ci skip] * release 2020.02.16 * [tv2dk:bornholm:play] Fix extraction (#24076) * [imdb] Fix extraction (closes #23443) * [wistia] Add support for multiple generic embeds (closes #8347, closes #11385) * [teachable] Add support for multiple videos per lecture (closes #24101) * [pornhd] Fix extraction (closes #24128) * [options] Remove duplicate short option -v for --version (#24162) * [extractor/common] Convert ISM manifest to unicode before processing on python 2 (#24152) * [YoutubeDL] Force redirect URL to unicode on python 2 * Remove no longer needed compat_str around geturl * [youjizz] Fix extraction (closes #24181) * [test_subtitles] Remove obsolete test * [zdf:channel] Fix tests * [zapiks] Fix test * [xtube] Fix metadata extraction (closes #21073, closes #22455) * [xtube:user] Fix test * [telecinco] Fix extraction (refs #24195) * [telecinco] Add support for article opening videos * [franceculture] Fix extraction (closes #24204) * [xhamster] Fix extraction (closes #24205) * [ChangeLog] Actualize [ci skip] * release 2020.03.01 * [vimeo] Fix subtitles URLs (#24209) * [servus] Add support for new URL schema (closes #23475, closes #23583, closes #24142) * [youtube:playlist] Fix tests (closes #23872) (#23885) * [peertube] Improve extraction * [peertube] Fix issues and improve extraction (closes #23657) * [pornhub] Improve title extraction (closes #24184) * [vimeo] fix showcase password protected video extraction(closes #24224) * [youtube] Fix age-gated videos support without login (closes #24248) * [youtube] Fix tests * [ChangeLog] Actualize [ci skip] * release 2020.03.06 * [nhk] update API version(closes #24270) * [youtube] Improve extraction in 429 error conditions (closes #24283) * [youtube] Improve age-gated videos extraction in 429 error conditions (refs #24283) * [youtube] Remove outdated code Additional get_video_info requests don't seem to provide any extra itags any longer * [README.md] Clarify 429 error * [pornhub] Add support for pornhubpremium.com (#24288) * [utils] Add support for cookies with spaces used instead of tabs * [ChangeLog] Actualize [ci skip] * release 2020.03.08 * Revert "[utils] Add support for cookies with spaces used instead of tabs" According to [1] TABs must be used as separators between fields. Files produces by some tools with spaces as separators are considered malformed. 1. https://curl.haxx.se/docs/http-cookies.html This reverts commitcff99c91d1
. * [utils] Add reference to cookie file format * Revert "[vimeo] fix showcase password protected video extraction(closes #24224)" This reverts commit12ee431676
. * [nhk] Relax _VALID_URL (#24329) * [nhk] Remove obsolete rtmp formats (closes #24329) * [nhk] Update m3u8 URL and use native hls (#24329) * [ndr] Fix extraction (closes #24326) * [xtube] Fix formats extraction (closes #24348) * [xtube] Fix typo * [hellporno] Fix extraction (closes #24399) * [cbc:watch] Add support for authentication * [cbc:watch] Fix authenticated device token caching (closes #19160) * [soundcloud] fix download url extraction(closes #24394) * [limelight] remove disabled API requests(closes #24255) * [bilibili] Add support for new URL schema with BV ids (closes #24439, closes #24442) * [bilibili] Add support for player.bilibili.com (closes #24402) * [teachable] Extract chapter metadata (closes #24421) * [generic] Look for teachable embeds before wistia * [teachable] Update upskillcourses domain New version does not use teachable platform any longer * [teachable] Update gns3 domain * [teachable] Update test * [ChangeLog] Actualize [ci skip] * [ChangeLog] Actualize [ci skip] * release 2020.03.24 * [spankwire] Fix extraction (closes #18924, closes #20648) * [spankwire] Add support for generic embeds (refs #24633) * [youporn] Add support form generic embeds * [mofosex] Add support for generic embeds (closes #24633) * [tele5] Fix extraction (closes #24553) * [extractor/common] Skip malformed ISM manifest XMLs while extracting ISM formats (#24667) * [tv4] Fix ISM formats extraction (closes #24667) * [twitch:clips] Extend _VALID_URL (closes #24290) (#24642) * [motherless] Fix extraction (closes #24699) * [nova:embed] Fix extraction (closes #24700) * [youtube] Skip broken multifeed videos (closes #24711) * [soundcloud] Extract AAC format * [soundcloud] Improve AAC format extraction (closes #19173, closes #24708) * [thisoldhouse] Fix video id extraction (closes #24548) Added support for: with of without "www." and either ".chorus.build" or ".com" It now validated correctly on older URL's ``` <iframe src="https://thisoldhouse.chorus.build/videos/zype/5e33baec27d2e50001d5f52f ``` and newer ones ``` <iframe src="https://www.thisoldhouse.com/videos/zype/5e2b70e95216cc0001615120 ``` * [thisoldhouse] Improve video id extraction (closes #24549) * [youtube] Fix DRM videos detection (refs #24736) * [options] Clarify doc on --exec command (closes #19087) (#24883) * [prosiebensat1] Improve extraction and remove 7tv.de support (#24948) * [prosiebensat1] Extract series metadata * [tenplay] Relax _VALID_URL (closes #25001) * [tvplay] fix Viafree extraction(closes #15189)(closes #24473)(closes #24789) * [yahoo] fix GYAO Player extraction and relax title URL regex(closes #24178)(closes #24778) * [youtube] Use redirected video id if any (closes #25063) * [youtube] Improve player id extraction and add tests * [extractor/common] Extract multiple JSON-LD entries * [crunchyroll] Fix and improve extraction (closes #25096, closes #25060) * [ChangeLog] Actualize [ci skip] * release 2020.05.03 * [puhutv] Remove no longer available HTTP formats (closes #25124) * [utils] Improve cookie files support + Add support for UTF-8 in cookie files * Skip malformed cookie file entries instead of crashing (invalid entry len, invalid expires at) * [dailymotion] Fix typo * [compat] Introduce compat_cookiejar_Cookie * [extractor/common] Use compat_cookiejar_Cookie for _set_cookie (closes #23256, closes #24776) To always ensure cookie name and value are bytestrings on python 2. * [orf] Add support for more radio stations (closes #24938) (#24968) * [uol] fix extraction(closes #22007) * [downloader/http] Finish downloading once received data length matches expected Always do this if possible, i.e. if Content-Length or expected length is known, not only in test. This will save unnecessary last extra loop trying to read 0 bytes. * [downloader/http] Request last data block of exact remaining size Always request last data block of exact size remaining to download if possible not the current block size. * [iprima] Improve extraction (closes #25138) * [youtube] Improve signature cipher extraction (closes #25188) * [ChangeLog] Actualize [ci skip] * release 2020.05.08 * [spike] fix Bellator mgid extraction(closes #25195) * [bbccouk] PEP8 * [mailru] Fix extraction (closes #24530) (#25239) * [README.md] flake8 HTTPS URL (#25230) * [youtube] Add support for yewtu.be (#25226) * [soundcloud] reduce API playlist page limit(closes #25274) * [vimeo] improve format extraction and sorting(closes #25285) * [redtube] Improve title extraction (#25208) * [indavideo] Switch to HTTPS for API request (#25191) * [utils] Fix file permissions in write_json_file (closes #12471) (#25122) * [redtube] Improve formats extraction and extract m3u8 formats (closes #25311, closes #25321) * [ard] Improve _VALID_URL (closes #25134) (#25198) * [giantbomb] Extend _VALID_URL (#25222) * [postprocessor/ffmpeg] Embed series metadata with --add-metadata * [youtube] Add support for more invidious instances (#25417) * [ard:beta] Extend _VALID_URL (closes #25405) * [ChangeLog] Actualize [ci skip] * release 2020.05.29 * [jwplatform] Improve embeds extraction (closes #25467) * [periscope] Fix untitled broadcasts (#25482) * [twitter:broadcast] Add untitled periscope broadcast test * [malltv] Add support for sk.mall.tv (#25445) * [brightcove] Fix subtitles extraction (closes #25540) * [brightcove] Sort imports * [twitch] Pass v5 accept header and fix thumbnails extraction (closes #25531) * [twitch:stream] Fix extraction (closes #25528) * [twitch:stream] Expect 400 and 410 HTTP errors from API * [tele5] Prefer jwplatform over nexx (closes #25533) * [jwplatform] Add support for bypass geo restriction * [tele5] Bypass geo restriction * [ChangeLog] Actualize [ci skip] * release 2020.06.06 * [kaltura] Add support for multiple embeds on a webpage (closes #25523) * [youtube] Extract chapters from JSON (closes #24819) * [facebook] Support single-video ID links I stumbled upon this at https://www.facebook.com/bwfbadminton/posts/10157127020046316 . No idea how prevalent it is yet. * [youtube] Fix playlist and feed extraction (closes #25675) * [youtube] Fix thumbnails extraction and remove uploader id extraction warning (closes #25676) * [youtube] Fix upload date extraction * [youtube] Improve view count extraction * [youtube] Fix uploader id and uploader URL extraction * [ChangeLog] Actualize [ci skip] * release 2020.06.16 * [youtube] Fix categories and improve tags extraction * [youtube] Force old layout (closes #25682, closes #25683, closes #25680, closes #25686) * [ChangeLog] Actualize [ci skip] * release 2020.06.16.1 * [brightcove] Improve embed detection (closes #25674) * [bellmedia] add support for cp24.com clip URLs(closes #25764) * [youtube:playlists] Extend _VALID_URL (closes #25810) * [youtube] Prevent excess HTTP 301 (#25786) * [wistia] Restrict embed regex (closes #25969) * [youtube] Improve description extraction (closes #25937) (#25980) * [youtube] Fix sigfunc name extraction (closes #26134, closes #26135, closes #26136, closes #26137) * [ChangeLog] Actualize [ci skip] * release 2020.07.28 * [xhamster] Extend _VALID_URL (closes #25789) (#25804) * [xhamster] Fix extraction (closes #26157) (#26254) * [xhamster] Extend _VALID_URL (closes #25927) Co-authored-by: Remita Amine <remitamine@gmail.com> Co-authored-by: Sergey M․ <dstftw@gmail.com> Co-authored-by: nmeum <soeren+github@soeren-tempel.net> Co-authored-by: Roxedus <me@roxedus.dev> Co-authored-by: Singwai Chan <c.singwai@gmail.com> Co-authored-by: cdarlint <cdarlint@users.noreply.github.com> Co-authored-by: Johannes N <31795504+jonolt@users.noreply.github.com> Co-authored-by: jnozsc <jnozsc@gmail.com> Co-authored-by: Moritz Patelscheck <moritz.patelscheck@campus.tu-berlin.de> Co-authored-by: PB <3854688+uno20001@users.noreply.github.com> Co-authored-by: Philipp Hagemeister <phihag@phihag.de> Co-authored-by: Xaver Hellauer <software@hellauer.bayern> Co-authored-by: d2au <d2au.dev@gmail.com> Co-authored-by: Jan 'Yenda' Trmal <jtrmal@gmail.com> Co-authored-by: jxu <7989982+jxu@users.noreply.github.com> Co-authored-by: Martin Ström <name@my-domain.se> Co-authored-by: The Hatsune Daishi <nao20010128@gmail.com> Co-authored-by: tsia <github@tsia.de> Co-authored-by: 3risian <59593325+3risian@users.noreply.github.com> Co-authored-by: Tristan Waddington <tristan.waddington@gmail.com> Co-authored-by: Devon Meunier <devon.meunier@gmail.com> Co-authored-by: Felix Stupp <felix.stupp@outlook.com> Co-authored-by: tom <tomster954@gmail.com> Co-authored-by: AndrewMBL <62922222+AndrewMBL@users.noreply.github.com> Co-authored-by: willbeaufoy <will@willbeaufoy.net> Co-authored-by: Philipp Stehle <anderschwiedu@googlemail.com> Co-authored-by: hh0rva1h <61889859+hh0rva1h@users.noreply.github.com> Co-authored-by: comsomisha <shmelev1996@mail.ru> Co-authored-by: TotalCaesar659 <14265316+TotalCaesar659@users.noreply.github.com> Co-authored-by: Juan Francisco Cantero Hurtado <iam@juanfra.info> Co-authored-by: Dave Loyall <dave@the-good-guys.net> Co-authored-by: tlsssl <63866177+tlsssl@users.noreply.github.com> Co-authored-by: Rob <ankenyr@gmail.com> Co-authored-by: Michael Klein <github@a98shuttle.de> Co-authored-by: JordanWeatherby <47519158+JordanWeatherby@users.noreply.github.com> Co-authored-by: striker.sh <19488257+strikersh@users.noreply.github.com> Co-authored-by: Matej Dujava <mdujava@gmail.com> Co-authored-by: Glenn Slayden <5589855+glenn-slayden@users.noreply.github.com> Co-authored-by: MRWITEK <mrvvitek@gmail.com> Co-authored-by: JChris246 <43832407+JChris246@users.noreply.github.com> Co-authored-by: TheRealDude2 <the.real.dude@gmx.de>
1128 lines
45 KiB
Python
1128 lines
45 KiB
Python
# coding: utf-8
|
||
from __future__ import unicode_literals
|
||
|
||
import base64
|
||
import functools
|
||
import json
|
||
import re
|
||
import itertools
|
||
|
||
from .common import InfoExtractor
|
||
from ..compat import (
|
||
compat_kwargs,
|
||
compat_HTTPError,
|
||
compat_str,
|
||
compat_urlparse,
|
||
)
|
||
from ..utils import (
|
||
clean_html,
|
||
determine_ext,
|
||
dict_get,
|
||
ExtractorError,
|
||
js_to_json,
|
||
int_or_none,
|
||
merge_dicts,
|
||
OnDemandPagedList,
|
||
parse_filesize,
|
||
RegexNotFoundError,
|
||
sanitized_Request,
|
||
smuggle_url,
|
||
std_headers,
|
||
str_or_none,
|
||
try_get,
|
||
unified_timestamp,
|
||
unsmuggle_url,
|
||
urlencode_postdata,
|
||
urljoin,
|
||
unescapeHTML,
|
||
)
|
||
|
||
|
||
class VimeoBaseInfoExtractor(InfoExtractor):
|
||
_NETRC_MACHINE = 'vimeo'
|
||
_LOGIN_REQUIRED = False
|
||
_LOGIN_URL = 'https://vimeo.com/log_in'
|
||
|
||
def _login(self):
|
||
username, password = self._get_login_info()
|
||
if username is None:
|
||
if self._LOGIN_REQUIRED:
|
||
raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
|
||
return
|
||
webpage = self._download_webpage(
|
||
self._LOGIN_URL, None, 'Downloading login page')
|
||
token, vuid = self._extract_xsrft_and_vuid(webpage)
|
||
data = {
|
||
'action': 'login',
|
||
'email': username,
|
||
'password': password,
|
||
'service': 'vimeo',
|
||
'token': token,
|
||
}
|
||
self._set_vimeo_cookie('vuid', vuid)
|
||
try:
|
||
self._download_webpage(
|
||
self._LOGIN_URL, None, 'Logging in',
|
||
data=urlencode_postdata(data), headers={
|
||
'Content-Type': 'application/x-www-form-urlencoded',
|
||
'Referer': self._LOGIN_URL,
|
||
})
|
||
except ExtractorError as e:
|
||
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 418:
|
||
raise ExtractorError(
|
||
'Unable to log in: bad username or password',
|
||
expected=True)
|
||
raise ExtractorError('Unable to log in')
|
||
|
||
def _verify_video_password(self, url, video_id, webpage):
|
||
password = self._downloader.params.get('videopassword')
|
||
if password is None:
|
||
raise ExtractorError('This video is protected by a password, use the --video-password option', expected=True)
|
||
token, vuid = self._extract_xsrft_and_vuid(webpage)
|
||
data = urlencode_postdata({
|
||
'password': password,
|
||
'token': token,
|
||
})
|
||
if url.startswith('http://'):
|
||
# vimeo only supports https now, but the user can give an http url
|
||
url = url.replace('http://', 'https://')
|
||
password_request = sanitized_Request(url + '/password', data)
|
||
password_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||
password_request.add_header('Referer', url)
|
||
self._set_vimeo_cookie('vuid', vuid)
|
||
return self._download_webpage(
|
||
password_request, video_id,
|
||
'Verifying the password', 'Wrong password')
|
||
|
||
def _extract_xsrft_and_vuid(self, webpage):
|
||
xsrft = self._search_regex(
|
||
r'(?:(?P<q1>["\'])xsrft(?P=q1)\s*:|xsrft\s*[=:])\s*(?P<q>["\'])(?P<xsrft>.+?)(?P=q)',
|
||
webpage, 'login token', group='xsrft')
|
||
vuid = self._search_regex(
|
||
r'["\']vuid["\']\s*:\s*(["\'])(?P<vuid>.+?)\1',
|
||
webpage, 'vuid', group='vuid')
|
||
return xsrft, vuid
|
||
|
||
def _extract_vimeo_config(self, webpage, video_id, *args, **kwargs):
|
||
vimeo_config = self._search_regex(
|
||
r'vimeo\.config\s*=\s*(?:({.+?})|_extend\([^,]+,\s+({.+?})\));',
|
||
webpage, 'vimeo config', *args, **compat_kwargs(kwargs))
|
||
if vimeo_config:
|
||
return self._parse_json(vimeo_config, video_id)
|
||
|
||
def _set_vimeo_cookie(self, name, value):
|
||
self._set_cookie('vimeo.com', name, value)
|
||
|
||
def _vimeo_sort_formats(self, formats):
|
||
# Bitrates are completely broken. Single m3u8 may contain entries in kbps and bps
|
||
# at the same time without actual units specified. This lead to wrong sorting.
|
||
self._sort_formats(formats, field_preference=('preference', 'height', 'width', 'fps', 'tbr', 'format_id'))
|
||
|
||
def _parse_config(self, config, video_id):
|
||
video_data = config['video']
|
||
video_title = video_data['title']
|
||
live_event = video_data.get('live_event') or {}
|
||
is_live = live_event.get('status') == 'started'
|
||
|
||
formats = []
|
||
config_files = video_data.get('files') or config['request'].get('files', {})
|
||
for f in config_files.get('progressive', []):
|
||
video_url = f.get('url')
|
||
if not video_url:
|
||
continue
|
||
formats.append({
|
||
'url': video_url,
|
||
'format_id': 'http-%s' % f.get('quality'),
|
||
'width': int_or_none(f.get('width')),
|
||
'height': int_or_none(f.get('height')),
|
||
'fps': int_or_none(f.get('fps')),
|
||
'tbr': int_or_none(f.get('bitrate')),
|
||
})
|
||
|
||
# TODO: fix handling of 308 status code returned for live archive manifest requests
|
||
sep_pattern = r'/sep/video/'
|
||
for files_type in ('hls', 'dash'):
|
||
for cdn_name, cdn_data in config_files.get(files_type, {}).get('cdns', {}).items():
|
||
manifest_url = cdn_data.get('url')
|
||
if not manifest_url:
|
||
continue
|
||
format_id = '%s-%s' % (files_type, cdn_name)
|
||
sep_manifest_urls = []
|
||
if re.search(sep_pattern, manifest_url):
|
||
for suffix, repl in (('', 'video'), ('_sep', 'sep/video')):
|
||
sep_manifest_urls.append((format_id + suffix, re.sub(
|
||
sep_pattern, '/%s/' % repl, manifest_url)))
|
||
else:
|
||
sep_manifest_urls = [(format_id, manifest_url)]
|
||
for f_id, m_url in sep_manifest_urls:
|
||
if files_type == 'hls':
|
||
formats.extend(self._extract_m3u8_formats(
|
||
m_url, video_id, 'mp4',
|
||
'm3u8' if is_live else 'm3u8_native', m3u8_id=f_id,
|
||
note='Downloading %s m3u8 information' % cdn_name,
|
||
fatal=False))
|
||
elif files_type == 'dash':
|
||
if 'json=1' in m_url:
|
||
real_m_url = (self._download_json(m_url, video_id, fatal=False) or {}).get('url')
|
||
if real_m_url:
|
||
m_url = real_m_url
|
||
mpd_formats = self._extract_mpd_formats(
|
||
m_url.replace('/master.json', '/master.mpd'), video_id, f_id,
|
||
'Downloading %s MPD information' % cdn_name,
|
||
fatal=False)
|
||
formats.extend(mpd_formats)
|
||
|
||
live_archive = live_event.get('archive') or {}
|
||
live_archive_source_url = live_archive.get('source_url')
|
||
if live_archive_source_url and live_archive.get('status') == 'done':
|
||
formats.append({
|
||
'format_id': 'live-archive-source',
|
||
'url': live_archive_source_url,
|
||
'preference': 1,
|
||
})
|
||
|
||
for f in formats:
|
||
if f.get('vcodec') == 'none':
|
||
f['preference'] = -50
|
||
elif f.get('acodec') == 'none':
|
||
f['preference'] = -40
|
||
|
||
subtitles = {}
|
||
text_tracks = config['request'].get('text_tracks')
|
||
if text_tracks:
|
||
for tt in text_tracks:
|
||
subtitles[tt['lang']] = [{
|
||
'ext': 'vtt',
|
||
'url': urljoin('https://vimeo.com', tt['url']),
|
||
}]
|
||
|
||
thumbnails = []
|
||
if not is_live:
|
||
for key, thumb in video_data.get('thumbs', {}).items():
|
||
thumbnails.append({
|
||
'id': key,
|
||
'width': int_or_none(key),
|
||
'url': thumb,
|
||
})
|
||
thumbnail = video_data.get('thumbnail')
|
||
if thumbnail:
|
||
thumbnails.append({
|
||
'url': thumbnail,
|
||
})
|
||
|
||
owner = video_data.get('owner') or {}
|
||
video_uploader_url = owner.get('url')
|
||
|
||
return {
|
||
'id': str_or_none(video_data.get('id')) or video_id,
|
||
'title': self._live_title(video_title) if is_live else video_title,
|
||
'uploader': owner.get('name'),
|
||
'uploader_id': video_uploader_url.split('/')[-1] if video_uploader_url else None,
|
||
'uploader_url': video_uploader_url,
|
||
'thumbnails': thumbnails,
|
||
'duration': int_or_none(video_data.get('duration')),
|
||
'formats': formats,
|
||
'subtitles': subtitles,
|
||
'is_live': is_live,
|
||
}
|
||
|
||
def _extract_original_format(self, url, video_id):
|
||
download_data = self._download_json(
|
||
url, video_id, fatal=False,
|
||
query={'action': 'load_download_config'},
|
||
headers={'X-Requested-With': 'XMLHttpRequest'})
|
||
if download_data:
|
||
source_file = download_data.get('source_file')
|
||
if isinstance(source_file, dict):
|
||
download_url = source_file.get('download_url')
|
||
if download_url and not source_file.get('is_cold') and not source_file.get('is_defrosting'):
|
||
source_name = source_file.get('public_name', 'Original')
|
||
if self._is_valid_url(download_url, video_id, '%s video' % source_name):
|
||
ext = (try_get(
|
||
source_file, lambda x: x['extension'],
|
||
compat_str) or determine_ext(
|
||
download_url, None) or 'mp4').lower()
|
||
return {
|
||
'url': download_url,
|
||
'ext': ext,
|
||
'width': int_or_none(source_file.get('width')),
|
||
'height': int_or_none(source_file.get('height')),
|
||
'filesize': parse_filesize(source_file.get('size')),
|
||
'format_id': source_name,
|
||
'preference': 1,
|
||
}
|
||
|
||
|
||
class VimeoIE(VimeoBaseInfoExtractor):
|
||
"""Information extractor for vimeo.com."""
|
||
|
||
# _VALID_URL matches Vimeo URLs
|
||
_VALID_URL = r'''(?x)
|
||
https?://
|
||
(?:
|
||
(?:
|
||
www|
|
||
player
|
||
)
|
||
\.
|
||
)?
|
||
vimeo(?:pro)?\.com/
|
||
(?!(?:channels|album|showcase)/[^/?#]+/?(?:$|[?#])|[^/]+/review/|ondemand/)
|
||
(?:.*?/)?
|
||
(?:
|
||
(?:
|
||
play_redirect_hls|
|
||
moogaloop\.swf)\?clip_id=
|
||
)?
|
||
(?:videos?/)?
|
||
(?P<id>[0-9]+)
|
||
(?:/[\da-f]+)?
|
||
/?(?:[?&].*)?(?:[#].*)?$
|
||
'''
|
||
IE_NAME = 'vimeo'
|
||
_TESTS = [
|
||
{
|
||
'url': 'http://vimeo.com/56015672#at=0',
|
||
'md5': '8879b6cc097e987f02484baf890129e5',
|
||
'info_dict': {
|
||
'id': '56015672',
|
||
'ext': 'mp4',
|
||
'title': "youtube-dl test video - \u2605 \" ' \u5e78 / \\ \u00e4 \u21ad \U0001d550",
|
||
'description': 'md5:2d3305bad981a06ff79f027f19865021',
|
||
'timestamp': 1355990239,
|
||
'upload_date': '20121220',
|
||
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/user7108434',
|
||
'uploader_id': 'user7108434',
|
||
'uploader': 'Filippo Valsorda',
|
||
'duration': 10,
|
||
'license': 'by-sa',
|
||
},
|
||
'params': {
|
||
'format': 'best[protocol=https]',
|
||
},
|
||
},
|
||
{
|
||
'url': 'http://vimeopro.com/openstreetmapus/state-of-the-map-us-2013/video/68093876',
|
||
'md5': '3b5ca6aa22b60dfeeadf50b72e44ed82',
|
||
'note': 'Vimeo Pro video (#1197)',
|
||
'info_dict': {
|
||
'id': '68093876',
|
||
'ext': 'mp4',
|
||
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/openstreetmapus',
|
||
'uploader_id': 'openstreetmapus',
|
||
'uploader': 'OpenStreetMap US',
|
||
'title': 'Andy Allan - Putting the Carto into OpenStreetMap Cartography',
|
||
'description': 'md5:2c362968038d4499f4d79f88458590c1',
|
||
'duration': 1595,
|
||
'upload_date': '20130610',
|
||
'timestamp': 1370893156,
|
||
},
|
||
'params': {
|
||
'format': 'best[protocol=https]',
|
||
},
|
||
},
|
||
{
|
||
'url': 'http://player.vimeo.com/video/54469442',
|
||
'md5': '619b811a4417aa4abe78dc653becf511',
|
||
'note': 'Videos that embed the url in the player page',
|
||
'info_dict': {
|
||
'id': '54469442',
|
||
'ext': 'mp4',
|
||
'title': 'Kathy Sierra: Building the minimum Badass User, Business of Software 2012',
|
||
'uploader': 'The BLN & Business of Software',
|
||
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/theblnbusinessofsoftware',
|
||
'uploader_id': 'theblnbusinessofsoftware',
|
||
'duration': 3610,
|
||
'description': None,
|
||
},
|
||
'params': {
|
||
'format': 'best[protocol=https]',
|
||
},
|
||
'expected_warnings': ['Unable to download JSON metadata'],
|
||
},
|
||
{
|
||
'url': 'http://vimeo.com/68375962',
|
||
'md5': 'aaf896bdb7ddd6476df50007a0ac0ae7',
|
||
'note': 'Video protected with password',
|
||
'info_dict': {
|
||
'id': '68375962',
|
||
'ext': 'mp4',
|
||
'title': 'youtube-dl password protected test video',
|
||
'timestamp': 1371200155,
|
||
'upload_date': '20130614',
|
||
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/user18948128',
|
||
'uploader_id': 'user18948128',
|
||
'uploader': 'Jaime Marquínez Ferrándiz',
|
||
'duration': 10,
|
||
'description': 'md5:dca3ea23adb29ee387127bc4ddfce63f',
|
||
},
|
||
'params': {
|
||
'format': 'best[protocol=https]',
|
||
'videopassword': 'youtube-dl',
|
||
},
|
||
},
|
||
{
|
||
'url': 'http://vimeo.com/channels/keypeele/75629013',
|
||
'md5': '2f86a05afe9d7abc0b9126d229bbe15d',
|
||
'info_dict': {
|
||
'id': '75629013',
|
||
'ext': 'mp4',
|
||
'title': 'Key & Peele: Terrorist Interrogation',
|
||
'description': 'md5:8678b246399b070816b12313e8b4eb5c',
|
||
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/atencio',
|
||
'uploader_id': 'atencio',
|
||
'uploader': 'Peter Atencio',
|
||
'channel_id': 'keypeele',
|
||
'channel_url': r're:https?://(?:www\.)?vimeo\.com/channels/keypeele',
|
||
'timestamp': 1380339469,
|
||
'upload_date': '20130928',
|
||
'duration': 187,
|
||
},
|
||
'expected_warnings': ['Unable to download JSON metadata'],
|
||
},
|
||
{
|
||
'url': 'http://vimeo.com/76979871',
|
||
'note': 'Video with subtitles',
|
||
'info_dict': {
|
||
'id': '76979871',
|
||
'ext': 'mp4',
|
||
'title': 'The New Vimeo Player (You Know, For Videos)',
|
||
'description': 'md5:2ec900bf97c3f389378a96aee11260ea',
|
||
'timestamp': 1381846109,
|
||
'upload_date': '20131015',
|
||
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/staff',
|
||
'uploader_id': 'staff',
|
||
'uploader': 'Vimeo Staff',
|
||
'duration': 62,
|
||
}
|
||
},
|
||
{
|
||
# from https://www.ouya.tv/game/Pier-Solar-and-the-Great-Architects/
|
||
'url': 'https://player.vimeo.com/video/98044508',
|
||
'note': 'The js code contains assignments to the same variable as the config',
|
||
'info_dict': {
|
||
'id': '98044508',
|
||
'ext': 'mp4',
|
||
'title': 'Pier Solar OUYA Official Trailer',
|
||
'uploader': 'Tulio Gonçalves',
|
||
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/user28849593',
|
||
'uploader_id': 'user28849593',
|
||
},
|
||
},
|
||
{
|
||
# contains original format
|
||
'url': 'https://vimeo.com/33951933',
|
||
'md5': '53c688fa95a55bf4b7293d37a89c5c53',
|
||
'info_dict': {
|
||
'id': '33951933',
|
||
'ext': 'mp4',
|
||
'title': 'FOX CLASSICS - Forever Classic ID - A Full Minute',
|
||
'uploader': 'The DMCI',
|
||
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/dmci',
|
||
'uploader_id': 'dmci',
|
||
'timestamp': 1324343742,
|
||
'upload_date': '20111220',
|
||
'description': 'md5:ae23671e82d05415868f7ad1aec21147',
|
||
},
|
||
},
|
||
{
|
||
# only available via https://vimeo.com/channels/tributes/6213729 and
|
||
# not via https://vimeo.com/6213729
|
||
'url': 'https://vimeo.com/channels/tributes/6213729',
|
||
'info_dict': {
|
||
'id': '6213729',
|
||
'ext': 'mp4',
|
||
'title': 'Vimeo Tribute: The Shining',
|
||
'uploader': 'Casey Donahue',
|
||
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/caseydonahue',
|
||
'uploader_id': 'caseydonahue',
|
||
'channel_url': r're:https?://(?:www\.)?vimeo\.com/channels/tributes',
|
||
'channel_id': 'tributes',
|
||
'timestamp': 1250886430,
|
||
'upload_date': '20090821',
|
||
'description': 'md5:bdbf314014e58713e6e5b66eb252f4a6',
|
||
},
|
||
'params': {
|
||
'skip_download': True,
|
||
},
|
||
'expected_warnings': ['Unable to download JSON metadata'],
|
||
},
|
||
{
|
||
# redirects to ondemand extractor and should be passed through it
|
||
# for successful extraction
|
||
'url': 'https://vimeo.com/73445910',
|
||
'info_dict': {
|
||
'id': '73445910',
|
||
'ext': 'mp4',
|
||
'title': 'The Reluctant Revolutionary',
|
||
'uploader': '10Ft Films',
|
||
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/tenfootfilms',
|
||
'uploader_id': 'tenfootfilms',
|
||
'description': 'md5:0fa704e05b04f91f40b7f3ca2e801384',
|
||
'upload_date': '20130830',
|
||
'timestamp': 1377853339,
|
||
},
|
||
'params': {
|
||
'skip_download': True,
|
||
},
|
||
'expected_warnings': ['Unable to download JSON metadata'],
|
||
},
|
||
{
|
||
'url': 'http://player.vimeo.com/video/68375962',
|
||
'md5': 'aaf896bdb7ddd6476df50007a0ac0ae7',
|
||
'info_dict': {
|
||
'id': '68375962',
|
||
'ext': 'mp4',
|
||
'title': 'youtube-dl password protected test video',
|
||
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/user18948128',
|
||
'uploader_id': 'user18948128',
|
||
'uploader': 'Jaime Marquínez Ferrándiz',
|
||
'duration': 10,
|
||
},
|
||
'params': {
|
||
'format': 'best[protocol=https]',
|
||
'videopassword': 'youtube-dl',
|
||
},
|
||
},
|
||
{
|
||
'url': 'http://vimeo.com/moogaloop.swf?clip_id=2539741',
|
||
'only_matching': True,
|
||
},
|
||
{
|
||
'url': 'https://vimeo.com/109815029',
|
||
'note': 'Video not completely processed, "failed" seed status',
|
||
'only_matching': True,
|
||
},
|
||
{
|
||
'url': 'https://vimeo.com/groups/travelhd/videos/22439234',
|
||
'only_matching': True,
|
||
},
|
||
{
|
||
'url': 'https://vimeo.com/album/2632481/video/79010983',
|
||
'only_matching': True,
|
||
},
|
||
{
|
||
# source file returns 403: Forbidden
|
||
'url': 'https://vimeo.com/7809605',
|
||
'only_matching': True,
|
||
},
|
||
{
|
||
'url': 'https://vimeo.com/160743502/abd0e13fb4',
|
||
'only_matching': True,
|
||
}
|
||
# https://gettingthingsdone.com/workflowmap/
|
||
# vimeo embed with check-password page protected by Referer header
|
||
]
|
||
|
||
@staticmethod
|
||
def _smuggle_referrer(url, referrer_url):
|
||
return smuggle_url(url, {'http_headers': {'Referer': referrer_url}})
|
||
|
||
@staticmethod
|
||
def _extract_urls(url, webpage):
|
||
urls = []
|
||
# Look for embedded (iframe) Vimeo player
|
||
for mobj in re.finditer(
|
||
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//player\.vimeo\.com/video/\d+.*?)\1',
|
||
webpage):
|
||
urls.append(VimeoIE._smuggle_referrer(unescapeHTML(mobj.group('url')), url))
|
||
PLAIN_EMBED_RE = (
|
||
# Look for embedded (swf embed) Vimeo player
|
||
r'<embed[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?vimeo\.com/moogaloop\.swf.+?)\1',
|
||
# Look more for non-standard embedded Vimeo player
|
||
r'<video[^>]+src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?vimeo\.com/[0-9]+)\1',
|
||
)
|
||
for embed_re in PLAIN_EMBED_RE:
|
||
for mobj in re.finditer(embed_re, webpage):
|
||
urls.append(mobj.group('url'))
|
||
return urls
|
||
|
||
@staticmethod
|
||
def _extract_url(url, webpage):
|
||
urls = VimeoIE._extract_urls(url, webpage)
|
||
return urls[0] if urls else None
|
||
|
||
def _verify_player_video_password(self, url, video_id, headers):
|
||
password = self._downloader.params.get('videopassword')
|
||
if password is None:
|
||
raise ExtractorError('This video is protected by a password, use the --video-password option', expected=True)
|
||
data = urlencode_postdata({
|
||
'password': base64.b64encode(password.encode()),
|
||
})
|
||
headers = merge_dicts(headers, {
|
||
'Content-Type': 'application/x-www-form-urlencoded',
|
||
})
|
||
checked = self._download_json(
|
||
url + '/check-password', video_id,
|
||
'Verifying the password', data=data, headers=headers)
|
||
if checked is False:
|
||
raise ExtractorError('Wrong video password', expected=True)
|
||
return checked
|
||
|
||
def _real_initialize(self):
|
||
self._login()
|
||
|
||
def _real_extract(self, url):
|
||
url, data = unsmuggle_url(url, {})
|
||
headers = std_headers.copy()
|
||
if 'http_headers' in data:
|
||
headers.update(data['http_headers'])
|
||
if 'Referer' not in headers:
|
||
headers['Referer'] = url
|
||
|
||
channel_id = self._search_regex(
|
||
r'vimeo\.com/channels/([^/]+)', url, 'channel id', default=None)
|
||
|
||
# Extract ID from URL
|
||
video_id = self._match_id(url)
|
||
orig_url = url
|
||
is_pro = 'vimeopro.com/' in url
|
||
is_player = '://player.vimeo.com/video/' in url
|
||
if is_pro:
|
||
# some videos require portfolio_id to be present in player url
|
||
# https://github.com/ytdl-org/youtube-dl/issues/20070
|
||
url = self._extract_url(url, self._download_webpage(url, video_id))
|
||
if not url:
|
||
url = 'https://vimeo.com/' + video_id
|
||
elif is_player:
|
||
url = 'https://player.vimeo.com/video/' + video_id
|
||
elif any(p in url for p in ('play_redirect_hls', 'moogaloop.swf')):
|
||
url = 'https://vimeo.com/' + video_id
|
||
|
||
try:
|
||
# Retrieve video webpage to extract further information
|
||
webpage, urlh = self._download_webpage_handle(
|
||
url, video_id, headers=headers)
|
||
redirect_url = urlh.geturl()
|
||
except ExtractorError as ee:
|
||
if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 403:
|
||
errmsg = ee.cause.read()
|
||
if b'Because of its privacy settings, this video cannot be played here' in errmsg:
|
||
raise ExtractorError(
|
||
'Cannot download embed-only video without embedding '
|
||
'URL. Please call youtube-dl with the URL of the page '
|
||
'that embeds this video.',
|
||
expected=True)
|
||
raise
|
||
|
||
# Now we begin extracting as much information as we can from what we
|
||
# retrieved. First we extract the information common to all extractors,
|
||
# and latter we extract those that are Vimeo specific.
|
||
self.report_extraction(video_id)
|
||
|
||
vimeo_config = self._extract_vimeo_config(webpage, video_id, default=None)
|
||
if vimeo_config:
|
||
seed_status = vimeo_config.get('seed_status', {})
|
||
if seed_status.get('state') == 'failed':
|
||
raise ExtractorError(
|
||
'%s said: %s' % (self.IE_NAME, seed_status['title']),
|
||
expected=True)
|
||
|
||
cc_license = None
|
||
timestamp = None
|
||
video_description = None
|
||
|
||
# Extract the config JSON
|
||
try:
|
||
try:
|
||
config_url = self._html_search_regex(
|
||
r' data-config-url="(.+?)"', webpage,
|
||
'config URL', default=None)
|
||
if not config_url:
|
||
# Sometimes new react-based page is served instead of old one that require
|
||
# different config URL extraction approach (see
|
||
# https://github.com/ytdl-org/youtube-dl/pull/7209)
|
||
page_config = self._parse_json(self._search_regex(
|
||
r'vimeo\.(?:clip|vod_title)_page_config\s*=\s*({.+?});',
|
||
webpage, 'page config'), video_id)
|
||
config_url = page_config['player']['config_url']
|
||
cc_license = page_config.get('cc_license')
|
||
timestamp = try_get(
|
||
page_config, lambda x: x['clip']['uploaded_on'],
|
||
compat_str)
|
||
video_description = clean_html(dict_get(
|
||
page_config, ('description', 'description_html_escaped')))
|
||
config = self._download_json(config_url, video_id)
|
||
except RegexNotFoundError:
|
||
# For pro videos or player.vimeo.com urls
|
||
# We try to find out to which variable is assigned the config dic
|
||
m_variable_name = re.search(r'(\w)\.video\.id', webpage)
|
||
if m_variable_name is not None:
|
||
config_re = [r'%s=({[^}].+?});' % re.escape(m_variable_name.group(1))]
|
||
else:
|
||
config_re = [r' = {config:({.+?}),assets:', r'(?:[abc])=({.+?});']
|
||
config_re.append(r'\bvar\s+r\s*=\s*({.+?})\s*;')
|
||
config_re.append(r'\bconfig\s*=\s*({.+?})\s*;')
|
||
config = self._search_regex(config_re, webpage, 'info section',
|
||
flags=re.DOTALL)
|
||
config = json.loads(config)
|
||
except Exception as e:
|
||
if re.search('The creator of this video has not given you permission to embed it on this domain.', webpage):
|
||
raise ExtractorError('The author has restricted the access to this video, try with the "--referer" option')
|
||
|
||
if re.search(r'<form[^>]+?id="pw_form"', webpage) is not None:
|
||
if '_video_password_verified' in data:
|
||
raise ExtractorError('video password verification failed!')
|
||
self._verify_video_password(redirect_url, video_id, webpage)
|
||
return self._real_extract(
|
||
smuggle_url(redirect_url, {'_video_password_verified': 'verified'}))
|
||
else:
|
||
raise ExtractorError('Unable to extract info section',
|
||
cause=e)
|
||
else:
|
||
if config.get('view') == 4:
|
||
config = self._verify_player_video_password(redirect_url, video_id, headers)
|
||
|
||
vod = config.get('video', {}).get('vod', {})
|
||
|
||
def is_rented():
|
||
if '>You rented this title.<' in webpage:
|
||
return True
|
||
if config.get('user', {}).get('purchased'):
|
||
return True
|
||
for purchase_option in vod.get('purchase_options', []):
|
||
if purchase_option.get('purchased'):
|
||
return True
|
||
label = purchase_option.get('label_string')
|
||
if label and (label.startswith('You rented this') or label.endswith(' remaining')):
|
||
return True
|
||
return False
|
||
|
||
if is_rented() and vod.get('is_trailer'):
|
||
feature_id = vod.get('feature_id')
|
||
if feature_id and not data.get('force_feature_id', False):
|
||
return self.url_result(smuggle_url(
|
||
'https://player.vimeo.com/player/%s' % feature_id,
|
||
{'force_feature_id': True}), 'Vimeo')
|
||
|
||
# Extract video description
|
||
if not video_description:
|
||
video_description = self._html_search_regex(
|
||
r'(?s)<div\s+class="[^"]*description[^"]*"[^>]*>(.*?)</div>',
|
||
webpage, 'description', default=None)
|
||
if not video_description:
|
||
video_description = self._html_search_meta(
|
||
'description', webpage, default=None)
|
||
if not video_description and is_pro:
|
||
orig_webpage = self._download_webpage(
|
||
orig_url, video_id,
|
||
note='Downloading webpage for description',
|
||
fatal=False)
|
||
if orig_webpage:
|
||
video_description = self._html_search_meta(
|
||
'description', orig_webpage, default=None)
|
||
if not video_description and not is_player:
|
||
self._downloader.report_warning('Cannot find video description')
|
||
|
||
# Extract upload date
|
||
if not timestamp:
|
||
timestamp = self._search_regex(
|
||
r'<time[^>]+datetime="([^"]+)"', webpage,
|
||
'timestamp', default=None)
|
||
|
||
try:
|
||
view_count = int(self._search_regex(r'UserPlays:(\d+)', webpage, 'view count'))
|
||
like_count = int(self._search_regex(r'UserLikes:(\d+)', webpage, 'like count'))
|
||
comment_count = int(self._search_regex(r'UserComments:(\d+)', webpage, 'comment count'))
|
||
except RegexNotFoundError:
|
||
# This info is only available in vimeo.com/{id} urls
|
||
view_count = None
|
||
like_count = None
|
||
comment_count = None
|
||
|
||
formats = []
|
||
|
||
source_format = self._extract_original_format(
|
||
'https://vimeo.com/' + video_id, video_id)
|
||
if source_format:
|
||
formats.append(source_format)
|
||
|
||
info_dict_config = self._parse_config(config, video_id)
|
||
formats.extend(info_dict_config['formats'])
|
||
self._vimeo_sort_formats(formats)
|
||
|
||
json_ld = self._search_json_ld(webpage, video_id, default={})
|
||
|
||
if not cc_license:
|
||
cc_license = self._search_regex(
|
||
r'<link[^>]+rel=["\']license["\'][^>]+href=(["\'])(?P<license>(?:(?!\1).)+)\1',
|
||
webpage, 'license', default=None, group='license')
|
||
|
||
channel_url = 'https://vimeo.com/channels/%s' % channel_id if channel_id else None
|
||
|
||
info_dict = {
|
||
'formats': formats,
|
||
'timestamp': unified_timestamp(timestamp),
|
||
'description': video_description,
|
||
'webpage_url': url,
|
||
'view_count': view_count,
|
||
'like_count': like_count,
|
||
'comment_count': comment_count,
|
||
'license': cc_license,
|
||
'channel_id': channel_id,
|
||
'channel_url': channel_url,
|
||
}
|
||
|
||
info_dict = merge_dicts(info_dict, info_dict_config, json_ld)
|
||
|
||
return info_dict
|
||
|
||
|
||
class VimeoOndemandIE(VimeoIE):
|
||
IE_NAME = 'vimeo:ondemand'
|
||
_VALID_URL = r'https?://(?:www\.)?vimeo\.com/ondemand/([^/]+/)?(?P<id>[^/?#&]+)'
|
||
_TESTS = [{
|
||
# ondemand video not available via https://vimeo.com/id
|
||
'url': 'https://vimeo.com/ondemand/20704',
|
||
'md5': 'c424deda8c7f73c1dfb3edd7630e2f35',
|
||
'info_dict': {
|
||
'id': '105442900',
|
||
'ext': 'mp4',
|
||
'title': 'המעבדה - במאי יותם פלדמן',
|
||
'uploader': 'גם סרטים',
|
||
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/gumfilms',
|
||
'uploader_id': 'gumfilms',
|
||
'description': 'md5:4c027c965e439de4baab621e48b60791',
|
||
'upload_date': '20140906',
|
||
'timestamp': 1410032453,
|
||
},
|
||
'params': {
|
||
'format': 'best[protocol=https]',
|
||
},
|
||
'expected_warnings': ['Unable to download JSON metadata'],
|
||
}, {
|
||
# requires Referer to be passed along with og:video:url
|
||
'url': 'https://vimeo.com/ondemand/36938/126682985',
|
||
'info_dict': {
|
||
'id': '126584684',
|
||
'ext': 'mp4',
|
||
'title': 'Rävlock, rätt läte på rätt plats',
|
||
'uploader': 'Lindroth & Norin',
|
||
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/lindrothnorin',
|
||
'uploader_id': 'lindrothnorin',
|
||
'description': 'md5:c3c46a90529612c8279fb6af803fc0df',
|
||
'upload_date': '20150502',
|
||
'timestamp': 1430586422,
|
||
},
|
||
'params': {
|
||
'skip_download': True,
|
||
},
|
||
'expected_warnings': ['Unable to download JSON metadata'],
|
||
}, {
|
||
'url': 'https://vimeo.com/ondemand/nazmaalik',
|
||
'only_matching': True,
|
||
}, {
|
||
'url': 'https://vimeo.com/ondemand/141692381',
|
||
'only_matching': True,
|
||
}, {
|
||
'url': 'https://vimeo.com/ondemand/thelastcolony/150274832',
|
||
'only_matching': True,
|
||
}]
|
||
|
||
|
||
class VimeoChannelIE(VimeoBaseInfoExtractor):
|
||
IE_NAME = 'vimeo:channel'
|
||
_VALID_URL = r'https://vimeo\.com/channels/(?P<id>[^/?#]+)/?(?:$|[?#])'
|
||
_MORE_PAGES_INDICATOR = r'<a.+?rel="next"'
|
||
_TITLE = None
|
||
_TITLE_RE = r'<link rel="alternate"[^>]+?title="(.*?)"'
|
||
_TESTS = [{
|
||
'url': 'https://vimeo.com/channels/tributes',
|
||
'info_dict': {
|
||
'id': 'tributes',
|
||
'title': 'Vimeo Tributes',
|
||
},
|
||
'playlist_mincount': 25,
|
||
}]
|
||
_BASE_URL_TEMPL = 'https://vimeo.com/channels/%s'
|
||
|
||
def _page_url(self, base_url, pagenum):
|
||
return '%s/videos/page:%d/' % (base_url, pagenum)
|
||
|
||
def _extract_list_title(self, webpage):
|
||
return self._TITLE or self._html_search_regex(
|
||
self._TITLE_RE, webpage, 'list title', fatal=False)
|
||
|
||
def _title_and_entries(self, list_id, base_url):
|
||
for pagenum in itertools.count(1):
|
||
page_url = self._page_url(base_url, pagenum)
|
||
webpage = self._download_webpage(
|
||
page_url, list_id,
|
||
'Downloading page %s' % pagenum)
|
||
|
||
if pagenum == 1:
|
||
yield self._extract_list_title(webpage)
|
||
|
||
# Try extracting href first since not all videos are available via
|
||
# short https://vimeo.com/id URL (e.g. https://vimeo.com/channels/tributes/6213729)
|
||
clips = re.findall(
|
||
r'id="clip_(\d+)"[^>]*>\s*<a[^>]+href="(/(?:[^/]+/)*\1)(?:[^>]+\btitle="([^"]+)")?', webpage)
|
||
if clips:
|
||
for video_id, video_url, video_title in clips:
|
||
yield self.url_result(
|
||
compat_urlparse.urljoin(base_url, video_url),
|
||
VimeoIE.ie_key(), video_id=video_id, video_title=video_title)
|
||
# More relaxed fallback
|
||
else:
|
||
for video_id in re.findall(r'id=["\']clip_(\d+)', webpage):
|
||
yield self.url_result(
|
||
'https://vimeo.com/%s' % video_id,
|
||
VimeoIE.ie_key(), video_id=video_id)
|
||
|
||
if re.search(self._MORE_PAGES_INDICATOR, webpage, re.DOTALL) is None:
|
||
break
|
||
|
||
def _extract_videos(self, list_id, base_url):
|
||
title_and_entries = self._title_and_entries(list_id, base_url)
|
||
list_title = next(title_and_entries)
|
||
return self.playlist_result(title_and_entries, list_id, list_title)
|
||
|
||
def _real_extract(self, url):
|
||
channel_id = self._match_id(url)
|
||
return self._extract_videos(channel_id, self._BASE_URL_TEMPL % channel_id)
|
||
|
||
|
||
class VimeoUserIE(VimeoChannelIE):
|
||
IE_NAME = 'vimeo:user'
|
||
_VALID_URL = r'https://vimeo\.com/(?!(?:[0-9]+|watchlater)(?:$|[?#/]))(?P<id>[^/]+)(?:/videos|[#?]|$)'
|
||
_TITLE_RE = r'<a[^>]+?class="user">([^<>]+?)</a>'
|
||
_TESTS = [{
|
||
'url': 'https://vimeo.com/nkistudio/videos',
|
||
'info_dict': {
|
||
'title': 'Nki',
|
||
'id': 'nkistudio',
|
||
},
|
||
'playlist_mincount': 66,
|
||
}]
|
||
_BASE_URL_TEMPL = 'https://vimeo.com/%s'
|
||
|
||
|
||
class VimeoAlbumIE(VimeoBaseInfoExtractor):
|
||
IE_NAME = 'vimeo:album'
|
||
_VALID_URL = r'https://vimeo\.com/(?:album|showcase)/(?P<id>\d+)(?:$|[?#]|/(?!video))'
|
||
_TITLE_RE = r'<header id="page_header">\n\s*<h1>(.*?)</h1>'
|
||
_TESTS = [{
|
||
'url': 'https://vimeo.com/album/2632481',
|
||
'info_dict': {
|
||
'id': '2632481',
|
||
'title': 'Staff Favorites: November 2013',
|
||
},
|
||
'playlist_mincount': 13,
|
||
}, {
|
||
'note': 'Password-protected album',
|
||
'url': 'https://vimeo.com/album/3253534',
|
||
'info_dict': {
|
||
'title': 'test',
|
||
'id': '3253534',
|
||
},
|
||
'playlist_count': 1,
|
||
'params': {
|
||
'videopassword': 'youtube-dl',
|
||
}
|
||
}]
|
||
_PAGE_SIZE = 100
|
||
|
||
def _fetch_page(self, album_id, authorizaion, hashed_pass, page):
|
||
api_page = page + 1
|
||
query = {
|
||
'fields': 'link,uri',
|
||
'page': api_page,
|
||
'per_page': self._PAGE_SIZE,
|
||
}
|
||
if hashed_pass:
|
||
query['_hashed_pass'] = hashed_pass
|
||
videos = self._download_json(
|
||
'https://api.vimeo.com/albums/%s/videos' % album_id,
|
||
album_id, 'Downloading page %d' % api_page, query=query, headers={
|
||
'Authorization': 'jwt ' + authorizaion,
|
||
})['data']
|
||
for video in videos:
|
||
link = video.get('link')
|
||
if not link:
|
||
continue
|
||
uri = video.get('uri')
|
||
video_id = self._search_regex(r'/videos/(\d+)', uri, 'video_id', default=None) if uri else None
|
||
yield self.url_result(link, VimeoIE.ie_key(), video_id)
|
||
|
||
def _real_extract(self, url):
|
||
album_id = self._match_id(url)
|
||
webpage = self._download_webpage(url, album_id)
|
||
viewer = self._parse_json(self._search_regex(
|
||
r'bootstrap_data\s*=\s*({.+?})</script>',
|
||
webpage, 'bootstrap data'), album_id)['viewer']
|
||
jwt = viewer['jwt']
|
||
album = self._download_json(
|
||
'https://api.vimeo.com/albums/' + album_id,
|
||
album_id, headers={'Authorization': 'jwt ' + jwt},
|
||
query={'fields': 'description,name,privacy'})
|
||
hashed_pass = None
|
||
if try_get(album, lambda x: x['privacy']['view']) == 'password':
|
||
password = self._downloader.params.get('videopassword')
|
||
if not password:
|
||
raise ExtractorError(
|
||
'This album is protected by a password, use the --video-password option',
|
||
expected=True)
|
||
self._set_vimeo_cookie('vuid', viewer['vuid'])
|
||
try:
|
||
hashed_pass = self._download_json(
|
||
'https://vimeo.com/showcase/%s/auth' % album_id,
|
||
album_id, 'Verifying the password', data=urlencode_postdata({
|
||
'password': password,
|
||
'token': viewer['xsrft'],
|
||
}), headers={
|
||
'X-Requested-With': 'XMLHttpRequest',
|
||
})['hashed_pass']
|
||
except ExtractorError as e:
|
||
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
|
||
raise ExtractorError('Wrong password', expected=True)
|
||
raise
|
||
entries = OnDemandPagedList(functools.partial(
|
||
self._fetch_page, album_id, jwt, hashed_pass), self._PAGE_SIZE)
|
||
return self.playlist_result(
|
||
entries, album_id, album.get('name'), album.get('description'))
|
||
|
||
|
||
class VimeoGroupsIE(VimeoChannelIE):
|
||
IE_NAME = 'vimeo:group'
|
||
_VALID_URL = r'https://vimeo\.com/groups/(?P<id>[^/]+)(?:/(?!videos?/\d+)|$)'
|
||
_TESTS = [{
|
||
'url': 'https://vimeo.com/groups/kattykay',
|
||
'info_dict': {
|
||
'id': 'kattykay',
|
||
'title': 'Katty Kay',
|
||
},
|
||
'playlist_mincount': 27,
|
||
}]
|
||
_BASE_URL_TEMPL = 'https://vimeo.com/groups/%s'
|
||
|
||
|
||
class VimeoReviewIE(VimeoBaseInfoExtractor):
|
||
IE_NAME = 'vimeo:review'
|
||
IE_DESC = 'Review pages on vimeo'
|
||
_VALID_URL = r'(?P<url>https://vimeo\.com/[^/]+/review/(?P<id>[^/]+)/[0-9a-f]{10})'
|
||
_TESTS = [{
|
||
'url': 'https://vimeo.com/user21297594/review/75524534/3c257a1b5d',
|
||
'md5': 'c507a72f780cacc12b2248bb4006d253',
|
||
'info_dict': {
|
||
'id': '75524534',
|
||
'ext': 'mp4',
|
||
'title': "DICK HARDWICK 'Comedian'",
|
||
'uploader': 'Richard Hardwick',
|
||
'uploader_id': 'user21297594',
|
||
'description': "Comedian Dick Hardwick's five minute demo filmed in front of a live theater audience.\nEdit by Doug Mattocks",
|
||
},
|
||
'expected_warnings': ['Unable to download JSON metadata'],
|
||
}, {
|
||
'note': 'video player needs Referer',
|
||
'url': 'https://vimeo.com/user22258446/review/91613211/13f927e053',
|
||
'md5': '6295fdab8f4bf6a002d058b2c6dce276',
|
||
'info_dict': {
|
||
'id': '91613211',
|
||
'ext': 'mp4',
|
||
'title': 're:(?i)^Death by dogma versus assembling agile . Sander Hoogendoorn',
|
||
'uploader': 'DevWeek Events',
|
||
'duration': 2773,
|
||
'thumbnail': r're:^https?://.*\.jpg$',
|
||
'uploader_id': 'user22258446',
|
||
},
|
||
'skip': 'video gone',
|
||
}, {
|
||
'note': 'Password protected',
|
||
'url': 'https://vimeo.com/user37284429/review/138823582/c4d865efde',
|
||
'info_dict': {
|
||
'id': '138823582',
|
||
'ext': 'mp4',
|
||
'title': 'EFFICIENT PICKUP MASTERCLASS MODULE 1',
|
||
'uploader': 'TMB',
|
||
'uploader_id': 'user37284429',
|
||
},
|
||
'params': {
|
||
'videopassword': 'holygrail',
|
||
},
|
||
'skip': 'video gone',
|
||
}]
|
||
|
||
def _real_initialize(self):
|
||
self._login()
|
||
|
||
def _real_extract(self, url):
|
||
page_url, video_id = re.match(self._VALID_URL, url).groups()
|
||
clip_data = self._download_json(
|
||
page_url.replace('/review/', '/review/data/'),
|
||
video_id)['clipData']
|
||
config_url = clip_data['configUrl']
|
||
config = self._download_json(config_url, video_id)
|
||
info_dict = self._parse_config(config, video_id)
|
||
source_format = self._extract_original_format(
|
||
page_url + '/action', video_id)
|
||
if source_format:
|
||
info_dict['formats'].append(source_format)
|
||
self._vimeo_sort_formats(info_dict['formats'])
|
||
info_dict['description'] = clean_html(clip_data.get('description'))
|
||
return info_dict
|
||
|
||
|
||
class VimeoWatchLaterIE(VimeoChannelIE):
|
||
IE_NAME = 'vimeo:watchlater'
|
||
IE_DESC = 'Vimeo watch later list, "vimeowatchlater" keyword (requires authentication)'
|
||
_VALID_URL = r'https://vimeo\.com/(?:home/)?watchlater|:vimeowatchlater'
|
||
_TITLE = 'Watch Later'
|
||
_LOGIN_REQUIRED = True
|
||
_TESTS = [{
|
||
'url': 'https://vimeo.com/watchlater',
|
||
'only_matching': True,
|
||
}]
|
||
|
||
def _real_initialize(self):
|
||
self._login()
|
||
|
||
def _page_url(self, base_url, pagenum):
|
||
url = '%s/page:%d/' % (base_url, pagenum)
|
||
request = sanitized_Request(url)
|
||
# Set the header to get a partial html page with the ids,
|
||
# the normal page doesn't contain them.
|
||
request.add_header('X-Requested-With', 'XMLHttpRequest')
|
||
return request
|
||
|
||
def _real_extract(self, url):
|
||
return self._extract_videos('watchlater', 'https://vimeo.com/watchlater')
|
||
|
||
|
||
class VimeoLikesIE(VimeoChannelIE):
|
||
_VALID_URL = r'https://(?:www\.)?vimeo\.com/(?P<id>[^/]+)/likes/?(?:$|[?#]|sort:)'
|
||
IE_NAME = 'vimeo:likes'
|
||
IE_DESC = 'Vimeo user likes'
|
||
_TESTS = [{
|
||
'url': 'https://vimeo.com/user755559/likes/',
|
||
'playlist_mincount': 293,
|
||
'info_dict': {
|
||
'id': 'user755559',
|
||
'title': 'urza’s Likes',
|
||
},
|
||
}, {
|
||
'url': 'https://vimeo.com/stormlapse/likes',
|
||
'only_matching': True,
|
||
}]
|
||
|
||
def _page_url(self, base_url, pagenum):
|
||
return '%s/page:%d/' % (base_url, pagenum)
|
||
|
||
def _real_extract(self, url):
|
||
user_id = self._match_id(url)
|
||
return self._extract_videos(user_id, 'https://vimeo.com/%s/likes' % user_id)
|
||
|
||
|
||
class VHXEmbedIE(VimeoBaseInfoExtractor):
|
||
IE_NAME = 'vhx:embed'
|
||
_VALID_URL = r'https?://embed\.vhx\.tv/videos/(?P<id>\d+)'
|
||
|
||
def _real_extract(self, url):
|
||
video_id = self._match_id(url)
|
||
webpage = self._download_webpage(url, video_id)
|
||
config_url = self._parse_json(self._search_regex(
|
||
r'window\.OTTData\s*=\s*({.+})', webpage,
|
||
'ott data'), video_id, js_to_json)['config_url']
|
||
config = self._download_json(config_url, video_id)
|
||
info = self._parse_config(config, video_id)
|
||
self._vimeo_sort_formats(info['formats'])
|
||
return info
|