2013-10-23 10:31:53 -04:00
# encoding: utf-8
2014-01-06 17:38:16 -05:00
from __future__ import unicode_literals
2013-06-23 14:18:21 -04:00
import json
import re
2013-07-29 07:12:09 -04:00
import itertools
2013-06-23 14:18:21 -04:00
from . common import InfoExtractor
2014-02-03 08:02:58 -05:00
from . subtitles import SubtitlesInfoExtractor
2014-11-02 05:23:40 -05:00
from . . compat import (
2014-03-08 06:24:43 -05:00
compat_HTTPError ,
2013-06-23 14:18:21 -04:00
compat_urllib_parse ,
compat_urllib_request ,
2014-09-28 18:36:06 -04:00
compat_urlparse ,
2014-11-02 05:23:40 -05:00
)
from . . utils import (
2013-06-23 14:18:21 -04:00
ExtractorError ,
2014-09-28 18:36:06 -04:00
InAdvancePagedList ,
int_or_none ,
2013-10-23 08:38:03 -04:00
RegexNotFoundError ,
2013-06-23 14:18:21 -04:00
std_headers ,
2013-10-15 06:05:13 -04:00
unsmuggle_url ,
2014-04-24 08:44:27 -04:00
urlencode_postdata ,
2013-06-23 14:18:21 -04:00
)
2013-12-21 21:17:56 -05:00
2014-04-24 15:51:20 -04:00
class VimeoBaseInfoExtractor ( InfoExtractor ) :
_NETRC_MACHINE = ' vimeo '
_LOGIN_REQUIRED = False
def _login ( self ) :
( username , password ) = self . _get_login_info ( )
if username is None :
if self . _LOGIN_REQUIRED :
2014-05-04 16:27:56 -04:00
raise ExtractorError ( ' No login info available, needed for using %s . ' % self . IE_NAME , expected = True )
2014-04-24 15:51:20 -04:00
return
self . report_login ( )
login_url = ' https://vimeo.com/log_in '
webpage = self . _download_webpage ( login_url , None , False )
token = self . _search_regex ( r ' xsrft: \' (.*?) \' ' , webpage , ' login token ' )
data = urlencode_postdata ( {
' email ' : username ,
' password ' : password ,
' action ' : ' login ' ,
' service ' : ' vimeo ' ,
' token ' : token ,
} )
login_request = compat_urllib_request . Request ( login_url , data )
login_request . add_header ( ' Content-Type ' , ' application/x-www-form-urlencoded ' )
login_request . add_header ( ' Cookie ' , ' xsrft= %s ' % token )
self . _download_webpage ( login_request , None , False , ' Wrong login info ' )
class VimeoIE ( VimeoBaseInfoExtractor , SubtitlesInfoExtractor ) :
2013-06-23 14:18:21 -04:00
""" Information extractor for vimeo.com. """
# _VALID_URL matches Vimeo URLs
2013-12-21 21:17:56 -05:00
_VALID_URL = r ''' (?x)
2014-10-11 16:25:30 -04:00
https ? : / /
2013-12-21 21:17:56 -05:00
( ? : ( ? : www | ( ? P < player > player ) ) \. ) ?
vimeo ( ? P < pro > pro ) ? \. com /
2014-08-27 05:36:01 -04:00
( ? ! channels / [ ^ / ? #]+/?(?:$|[?#])|album/)
2013-12-21 21:17:56 -05:00
( ? : . * ? / ) ?
2013-12-21 21:34:13 -05:00
( ? : ( ? : play_redirect_hls | moogaloop \. swf ) \? clip_id = ) ?
2013-12-21 21:17:56 -05:00
( ? : videos ? / ) ?
( ? P < id > [ 0 - 9 ] + )
2013-12-21 21:34:13 -05:00
/ ? ( ? : [ ? & ] . * ) ? ( ? : [ #].*)?$'''
2014-01-06 17:38:16 -05:00
IE_NAME = ' vimeo '
2013-08-21 07:48:19 -04:00
_TESTS = [
{
2014-01-06 17:38:16 -05:00
' url ' : ' http://vimeo.com/56015672#at=0 ' ,
' md5 ' : ' 8879b6cc097e987f02484baf890129e5 ' ,
' info_dict ' : {
2014-02-17 05:44:24 -05:00
' id ' : ' 56015672 ' ,
' ext ' : ' mp4 ' ,
" upload_date " : " 20121220 " ,
" description " : " This is a test case for youtube-dl. \n For more information, see github.com/rg3/youtube-dl \n Test chars: \u2605 \" ' \u5e78 / \\ \u00e4 \u21ad \U0001d550 " ,
" uploader_id " : " user7108434 " ,
" uploader " : " Filippo Valsorda " ,
2014-01-06 17:38:16 -05:00
" title " : " youtube-dl test video - \u2605 \" ' \u5e78 / \\ \u00e4 \u21ad \U0001d550 " ,
2014-05-09 14:46:40 -04:00
" duration " : 10 ,
2013-08-21 07:48:19 -04:00
} ,
} ,
{
2014-01-06 17:38:16 -05:00
' url ' : ' http://vimeopro.com/openstreetmapus/state-of-the-map-us-2013/video/68093876 ' ,
' md5 ' : ' 3b5ca6aa22b60dfeeadf50b72e44ed82 ' ,
' note ' : ' Vimeo Pro video (#1197) ' ,
' info_dict ' : {
2014-05-04 16:27:56 -04:00
' id ' : ' 68093876 ' ,
' ext ' : ' mp4 ' ,
2014-01-06 17:38:16 -05:00
' uploader_id ' : ' openstreetmapus ' ,
' uploader ' : ' OpenStreetMap US ' ,
' title ' : ' Andy Allan - Putting the Carto into OpenStreetMap Cartography ' ,
2014-09-29 16:23:21 -04:00
' description ' : ' md5:380943ec71b89736ff4bf27183233d09 ' ,
2014-05-09 14:46:40 -04:00
' duration ' : 1595 ,
2013-08-21 07:48:19 -04:00
} ,
} ,
2013-09-03 04:48:56 -04:00
{
2014-01-06 17:38:16 -05:00
' url ' : ' http://player.vimeo.com/video/54469442 ' ,
' md5 ' : ' 619b811a4417aa4abe78dc653becf511 ' ,
' note ' : ' Videos that embed the url in the player page ' ,
' info_dict ' : {
2014-05-04 16:27:56 -04:00
' id ' : ' 54469442 ' ,
' ext ' : ' mp4 ' ,
2014-07-21 07:11:24 -04:00
' title ' : ' Kathy Sierra: Building the minimum Badass User, Business of Software 2012 ' ,
2014-01-06 17:38:16 -05:00
' uploader ' : ' The BLN & Business of Software ' ,
' uploader_id ' : ' theblnbusinessofsoftware ' ,
2014-05-09 14:46:40 -04:00
' duration ' : 3610 ,
2014-09-29 16:23:21 -04:00
' description ' : None ,
2013-09-03 04:48:56 -04:00
} ,
2013-10-23 10:31:53 -04:00
} ,
{
2014-01-06 17:38:16 -05:00
' url ' : ' http://vimeo.com/68375962 ' ,
' md5 ' : ' aaf896bdb7ddd6476df50007a0ac0ae7 ' ,
' note ' : ' Video protected with password ' ,
' info_dict ' : {
2014-05-04 16:27:56 -04:00
' id ' : ' 68375962 ' ,
' ext ' : ' mp4 ' ,
2014-01-06 17:38:16 -05:00
' title ' : ' youtube-dl password protected test video ' ,
' upload_date ' : ' 20130614 ' ,
' uploader_id ' : ' user18948128 ' ,
' uploader ' : ' Jaime Marquínez Ferrándiz ' ,
2014-05-09 14:46:40 -04:00
' duration ' : 10 ,
2014-09-29 16:23:21 -04:00
' description ' : ' This is " youtube-dl password protected test video " by Jaime Marquínez Ferrándiz on Vimeo, the home for high quality videos and the people who love them. ' ,
2013-10-23 10:31:53 -04:00
} ,
2014-01-06 17:38:16 -05:00
' params ' : {
' videopassword ' : ' youtube-dl ' ,
2013-10-23 10:31:53 -04:00
} ,
} ,
2014-08-03 13:04:47 -04:00
{
' url ' : ' http://vimeo.com/channels/keypeele/75629013 ' ,
' md5 ' : ' 2f86a05afe9d7abc0b9126d229bbe15d ' ,
' note ' : ' Video is freely available via original URL '
' and protected with password when accessed via http://vimeo.com/75629013 ' ,
' info_dict ' : {
' id ' : ' 75629013 ' ,
' ext ' : ' mp4 ' ,
' title ' : ' Key & Peele: Terrorist Interrogation ' ,
' description ' : ' md5:8678b246399b070816b12313e8b4eb5c ' ,
' uploader_id ' : ' atencio ' ,
' uploader ' : ' Peter Atencio ' ,
' duration ' : 187 ,
} ,
} ,
2014-02-03 08:02:58 -05:00
{
' url ' : ' http://vimeo.com/76979871 ' ,
' md5 ' : ' 3363dd6ffebe3784d56f4132317fd446 ' ,
' note ' : ' Video with subtitles ' ,
' info_dict ' : {
' id ' : ' 76979871 ' ,
' ext ' : ' mp4 ' ,
' title ' : ' The New Vimeo Player (You Know, For Videos) ' ,
' description ' : ' md5:2ec900bf97c3f389378a96aee11260ea ' ,
' upload_date ' : ' 20131015 ' ,
' uploader_id ' : ' staff ' ,
' uploader ' : ' Vimeo Staff ' ,
2014-05-09 14:46:40 -04:00
' duration ' : 62 ,
2014-02-03 08:02:58 -05:00
}
} ,
2014-10-17 09:49:16 -04:00
{
# from https://www.ouya.tv/game/Pier-Solar-and-the-Great-Architects/
' url ' : ' https://player.vimeo.com/video/98044508 ' ,
' note ' : ' The js code contains assignments to the same variable as the config ' ,
' info_dict ' : {
' id ' : ' 98044508 ' ,
' ext ' : ' mp4 ' ,
' title ' : ' Pier Solar OUYA Official Trailer ' ,
' uploader ' : ' Tulio Gonçalves ' ,
' uploader_id ' : ' user28849593 ' ,
} ,
} ,
2013-08-21 07:48:19 -04:00
]
2013-06-23 14:18:21 -04:00
def _verify_video_password ( self , url , video_id , webpage ) :
2013-06-25 16:22:32 -04:00
password = self . _downloader . params . get ( ' videopassword ' , None )
2013-06-23 14:18:21 -04:00
if password is None :
2014-01-06 17:38:16 -05:00
raise ExtractorError ( ' This video is protected by a password, use the --video-password option ' )
2014-01-06 23:19:28 -05:00
token = self . _search_regex ( r ' xsrft: \' (.*?) \' ' , webpage , ' login token ' )
2014-05-04 16:27:56 -04:00
data = compat_urllib_parse . urlencode ( {
' password ' : password ,
' token ' : token ,
} )
2013-06-23 14:18:21 -04:00
# I didn't manage to use the password with https
if url . startswith ( ' https ' ) :
2014-05-04 16:27:56 -04:00
pass_url = url . replace ( ' https ' , ' http ' )
2013-06-23 14:18:21 -04:00
else :
pass_url = url
2014-05-04 16:27:56 -04:00
password_request = compat_urllib_request . Request ( pass_url + ' /password ' , data )
2013-06-23 14:18:21 -04:00
password_request . add_header ( ' Content-Type ' , ' application/x-www-form-urlencoded ' )
password_request . add_header ( ' Cookie ' , ' xsrft= %s ' % token )
2015-02-09 22:53:21 -05:00
return self . _download_webpage (
password_request , video_id ,
' Verifying the password ' , ' Wrong password ' )
2013-06-23 14:18:21 -04:00
2014-01-07 03:51:57 -05:00
def _verify_player_video_password ( self , url , video_id ) :
password = self . _downloader . params . get ( ' videopassword ' , None )
if password is None :
raise ExtractorError ( ' This video is protected by a password, use the --video-password option ' )
data = compat_urllib_parse . urlencode ( { ' password ' : password } )
pass_url = url + ' /check-password '
password_request = compat_urllib_request . Request ( pass_url , data )
password_request . add_header ( ' Content-Type ' , ' application/x-www-form-urlencoded ' )
return self . _download_json (
password_request , video_id ,
' Verifying the password ' ,
' Wrong password ' )
2013-07-07 17:24:34 -04:00
def _real_initialize ( self ) :
self . _login ( )
2013-12-10 14:43:16 -05:00
def _real_extract ( self , url ) :
2013-10-15 06:05:13 -04:00
url , data = unsmuggle_url ( url )
headers = std_headers
if data is not None :
headers = headers . copy ( )
headers . update ( data )
2014-08-25 03:35:37 -04:00
if ' Referer ' not in headers :
headers [ ' Referer ' ] = url
2013-10-15 06:05:13 -04:00
2013-06-23 14:18:21 -04:00
# Extract ID from URL
mobj = re . match ( self . _VALID_URL , url )
video_id = mobj . group ( ' id ' )
2014-09-29 16:23:21 -04:00
orig_url = url
2013-11-03 06:11:13 -05:00
if mobj . group ( ' pro ' ) or mobj . group ( ' player ' ) :
2013-08-21 07:48:19 -04:00
url = ' http://player.vimeo.com/video/ ' + video_id
2013-06-23 14:18:21 -04:00
# Retrieve video webpage to extract further information
2013-10-15 06:05:13 -04:00
request = compat_urllib_request . Request ( url , None , headers )
2014-03-08 06:24:43 -05:00
try :
webpage = self . _download_webpage ( request , video_id )
except ExtractorError as ee :
if isinstance ( ee . cause , compat_HTTPError ) and ee . cause . code == 403 :
errmsg = ee . cause . read ( )
if b ' Because of its privacy settings, this video cannot be played here ' in errmsg :
raise ExtractorError (
' Cannot download embed-only video without embedding '
' URL. Please call youtube-dl with the URL of the page '
' that embeds this video. ' ,
expected = True )
raise
2013-06-23 14:18:21 -04:00
# Now we begin extracting as much information as we can from what we
# retrieved. First we extract the information common to all extractors,
# and latter we extract those that are Vimeo specific.
self . report_extraction ( video_id )
# Extract the config JSON
try :
2013-10-23 10:31:53 -04:00
try :
config_url = self . _html_search_regex (
2014-01-06 17:38:16 -05:00
r ' data-config-url= " (.+?) " ' , webpage , ' config URL ' )
2013-10-23 10:31:53 -04:00
config_json = self . _download_webpage ( config_url , video_id )
config = json . loads ( config_json )
except RegexNotFoundError :
# For pro videos or player.vimeo.com urls
2013-12-10 14:28:12 -05:00
# We try to find out to which variable is assigned the config dic
m_variable_name = re . search ( ' ( \ w) \ .video \ .id ' , webpage )
if m_variable_name is not None :
2014-10-17 09:49:16 -04:00
config_re = r ' %s =( { [^}].+?}); ' % re . escape ( m_variable_name . group ( 1 ) )
2013-12-10 14:28:12 -05:00
else :
config_re = [ r ' = { config:( { .+?}),assets: ' , r ' (?:[abc])=( { .+?}); ' ]
2014-01-06 17:38:16 -05:00
config = self . _search_regex ( config_re , webpage , ' info section ' ,
2014-11-23 15:39:15 -05:00
flags = re . DOTALL )
2013-10-23 10:31:53 -04:00
config = json . loads ( config )
2013-10-23 05:38:51 -04:00
except Exception as e :
2013-06-23 14:18:21 -04:00
if re . search ( ' The creator of this video has not given you permission to embed it on this domain. ' , webpage ) :
2014-01-06 17:38:16 -05:00
raise ExtractorError ( ' The author has restricted the access to this video, try with the " --referer " option ' )
2013-06-23 14:18:21 -04:00
2015-02-09 22:53:21 -05:00
if re . search ( r ' <form[^>]+?id= " pw_form " ' , webpage ) is not None :
2013-06-23 14:18:21 -04:00
self . _verify_video_password ( url , video_id , webpage )
return self . _real_extract ( url )
else :
2014-01-06 17:38:16 -05:00
raise ExtractorError ( ' Unable to extract info section ' ,
2013-10-23 05:38:51 -04:00
cause = e )
2014-01-06 17:35:24 -05:00
else :
if config . get ( ' view ' ) == 4 :
2014-01-07 03:51:57 -05:00
config = self . _verify_player_video_password ( url , video_id )
2013-06-23 14:18:21 -04:00
# Extract title
video_title = config [ " video " ] [ " title " ]
# Extract uploader and uploader_id
video_uploader = config [ " video " ] [ " owner " ] [ " name " ]
video_uploader_id = config [ " video " ] [ " owner " ] [ " url " ] . split ( ' / ' ) [ - 1 ] if config [ " video " ] [ " owner " ] [ " url " ] else None
# Extract video thumbnail
2013-09-03 04:48:56 -04:00
video_thumbnail = config [ " video " ] . get ( " thumbnail " )
2014-02-28 06:00:12 -05:00
if video_thumbnail is None :
video_thumbs = config [ " video " ] . get ( " thumbs " )
if video_thumbs and isinstance ( video_thumbs , dict ) :
2014-08-04 15:37:36 -04:00
_ , video_thumbnail = sorted ( ( int ( width if width . isdigit ( ) else 0 ) , t_url ) for ( width , t_url ) in video_thumbs . items ( ) ) [ - 1 ]
2013-06-23 14:18:21 -04:00
# Extract video description
2014-09-29 16:23:21 -04:00
2014-09-28 22:58:29 -04:00
video_description = self . _html_search_regex (
2014-09-29 16:23:21 -04:00
r ' (?s)<div \ s+class= " [^ " ]*description[^ " ]* " [^>]*>(.*?)</div> ' ,
webpage , ' description ' , default = None )
if not video_description :
video_description = self . _html_search_meta (
' description ' , webpage , default = None )
if not video_description and mobj . group ( ' pro ' ) :
orig_webpage = self . _download_webpage (
orig_url , video_id ,
note = ' Downloading webpage for description ' ,
fatal = False )
if orig_webpage :
video_description = self . _html_search_meta (
' description ' , orig_webpage , default = None )
if not video_description and not mobj . group ( ' player ' ) :
self . _downloader . report_warning ( ' Cannot find video description ' )
2013-06-23 14:18:21 -04:00
2014-05-09 14:46:40 -04:00
# Extract video duration
video_duration = int_or_none ( config [ " video " ] . get ( " duration " ) )
2013-06-23 14:18:21 -04:00
# Extract upload date
video_upload_date = None
mobj = re . search ( r ' <meta itemprop= " dateCreated " content= " ( \ d {4} )-( \ d {2} )-( \ d {2} )T ' , webpage )
if mobj is not None :
video_upload_date = mobj . group ( 1 ) + mobj . group ( 2 ) + mobj . group ( 3 )
2013-12-06 07:03:08 -05:00
try :
2014-01-06 17:38:16 -05:00
view_count = int ( self . _search_regex ( r ' UserPlays:( \ d+) ' , webpage , ' view count ' ) )
like_count = int ( self . _search_regex ( r ' UserLikes:( \ d+) ' , webpage , ' like count ' ) )
comment_count = int ( self . _search_regex ( r ' UserComments:( \ d+) ' , webpage , ' comment count ' ) )
2013-12-06 07:03:08 -05:00
except RegexNotFoundError :
# This info is only available in vimeo.com/{id} urls
view_count = None
like_count = None
comment_count = None
2013-06-23 14:18:21 -04:00
# Vimeo specific: extract request signature and timestamp
sig = config [ ' request ' ] [ ' signature ' ]
timestamp = config [ ' request ' ] [ ' timestamp ' ]
# Vimeo specific: extract video codec and quality information
# First consider quality, then codecs, then take everything
2013-07-05 12:10:57 -04:00
codecs = [ ( ' vp6 ' , ' flv ' ) , ( ' vp8 ' , ' flv ' ) , ( ' h264 ' , ' mp4 ' ) ]
2013-11-03 08:03:17 -05:00
files = { ' hd ' : [ ] , ' sd ' : [ ] , ' other ' : [ ] }
2013-09-03 04:48:56 -04:00
config_files = config [ " video " ] . get ( " files " ) or config [ " request " ] . get ( " files " )
2013-06-23 14:18:21 -04:00
for codec_name , codec_extension in codecs :
2013-07-05 12:10:57 -04:00
for quality in config_files . get ( codec_name , [ ] ) :
format_id = ' - ' . join ( ( codec_name , quality ) ) . lower ( )
key = quality if quality in files else ' other '
video_url = None
if isinstance ( config_files [ codec_name ] , dict ) :
file_info = config_files [ codec_name ] [ quality ]
video_url = file_info . get ( ' url ' )
2013-06-23 14:18:21 -04:00
else :
2013-07-05 12:10:57 -04:00
file_info = { }
if video_url is None :
video_url = " http://player.vimeo.com/play_redirect?clip_id= %s &sig= %s &time= %s &quality= %s &codecs= %s &type=moogaloop_local&embed_location= " \
2014-05-04 16:27:56 -04:00
% ( video_id , sig , timestamp , quality , codec_name . upper ( ) )
2013-07-05 12:10:57 -04:00
files [ key ] . append ( {
' ext ' : codec_extension ,
' url ' : video_url ,
' format_id ' : format_id ,
' width ' : file_info . get ( ' width ' ) ,
' height ' : file_info . get ( ' height ' ) ,
} )
formats = [ ]
for key in ( ' other ' , ' sd ' , ' hd ' ) :
formats + = files [ key ]
if len ( formats ) == 0 :
2014-01-06 17:38:16 -05:00
raise ExtractorError ( ' No known codec found ' )
2013-06-23 14:18:21 -04:00
2014-02-03 08:02:58 -05:00
subtitles = { }
text_tracks = config [ ' request ' ] . get ( ' text_tracks ' )
if text_tracks :
for tt in text_tracks :
subtitles [ tt [ ' lang ' ] ] = ' http://vimeo.com ' + tt [ ' url ' ]
video_subtitles = self . extract_subtitles ( video_id , subtitles )
if self . _downloader . params . get ( ' listsubtitles ' , False ) :
self . _list_available_subtitles ( video_id , subtitles )
return
2013-11-03 06:11:13 -05:00
return {
2014-02-03 08:24:11 -05:00
' id ' : video_id ,
2013-06-23 14:18:21 -04:00
' uploader ' : video_uploader ,
' uploader_id ' : video_uploader_id ,
2014-02-03 08:24:11 -05:00
' upload_date ' : video_upload_date ,
' title ' : video_title ,
' thumbnail ' : video_thumbnail ,
' description ' : video_description ,
2014-05-09 14:46:40 -04:00
' duration ' : video_duration ,
2013-07-05 12:10:57 -04:00
' formats ' : formats ,
2013-11-03 06:11:13 -05:00
' webpage_url ' : url ,
2013-12-06 07:03:08 -05:00
' view_count ' : view_count ,
' like_count ' : like_count ,
' comment_count ' : comment_count ,
2014-02-03 08:02:58 -05:00
' subtitles ' : video_subtitles ,
2013-11-03 06:11:13 -05:00
}
2013-07-29 07:12:09 -04:00
class VimeoChannelIE ( InfoExtractor ) :
2014-01-06 17:38:16 -05:00
IE_NAME = ' vimeo:channel '
2014-08-27 05:36:01 -04:00
_VALID_URL = r ' https?://vimeo \ .com/channels/(?P<id>[^/?#]+)/?(?:$|[?#]) '
2013-07-29 07:12:09 -04:00
_MORE_PAGES_INDICATOR = r ' <a.+?rel= " next " '
2013-12-01 16:36:18 -05:00
_TITLE_RE = r ' <link rel= " alternate " [^>]+?title= " (.*?) " '
2014-08-27 05:36:01 -04:00
_TESTS = [ {
' url ' : ' http://vimeo.com/channels/tributes ' ,
' info_dict ' : {
' title ' : ' Vimeo Tributes ' ,
} ,
' playlist_mincount ' : 25 ,
} ]
2013-07-29 07:12:09 -04:00
2013-12-06 15:47:32 -05:00
def _page_url ( self , base_url , pagenum ) :
return ' %s /videos/page: %d / ' % ( base_url , pagenum )
2013-12-06 16:01:41 -05:00
def _extract_list_title ( self , webpage ) :
2014-01-06 17:38:16 -05:00
return self . _html_search_regex ( self . _TITLE_RE , webpage , ' list title ' )
2013-12-06 16:01:41 -05:00
2015-02-09 22:53:21 -05:00
def _login_list_password ( self , page_url , list_id , webpage ) :
login_form = self . _search_regex (
r ' (?s)<form[^>]+?id= " pw_form " (.*?)</form> ' ,
webpage , ' login form ' , default = None )
if not login_form :
return webpage
password = self . _downloader . params . get ( ' videopassword ' , None )
if password is None :
raise ExtractorError ( ' This album is protected by a password, use the --video-password option ' , expected = True )
fields = dict ( re . findall ( r ''' (?x)<input \ s+
type = " hidden " \s +
name = " ([^ " ] + ) " \ s+
value = " ([^ " ] * ) "
''' , login_form))
token = self . _search_regex ( r ' xsrft: \' (.*?) \' ' , webpage , ' login token ' )
fields [ ' token ' ] = token
fields [ ' password ' ] = password
post = compat_urllib_parse . urlencode ( fields )
password_path = self . _search_regex (
r ' action= " ([^ " ]+) " ' , login_form , ' password URL ' )
password_url = compat_urlparse . urljoin ( page_url , password_path )
password_request = compat_urllib_request . Request ( password_url , post )
password_request . add_header ( ' Content-type ' , ' application/x-www-form-urlencoded ' )
self . _set_cookie ( ' vimeo.com ' , ' xsrft ' , token )
return self . _download_webpage (
password_request , list_id ,
' Verifying the password ' , ' Wrong password ' )
2013-12-01 16:36:18 -05:00
def _extract_videos ( self , list_id , base_url ) :
2013-07-29 07:12:09 -04:00
video_ids = [ ]
for pagenum in itertools . count ( 1 ) :
2015-02-09 22:53:21 -05:00
page_url = self . _page_url ( base_url , pagenum )
2013-12-01 16:36:18 -05:00
webpage = self . _download_webpage (
2015-02-09 22:53:21 -05:00
page_url , list_id ,
2014-01-06 17:38:16 -05:00
' Downloading page %s ' % pagenum )
2015-02-09 22:53:21 -05:00
if pagenum == 1 :
webpage = self . _login_list_password ( page_url , list_id , webpage )
2013-07-29 07:12:09 -04:00
video_ids . extend ( re . findall ( r ' id= " clip_( \ d+?) " ' , webpage ) )
if re . search ( self . _MORE_PAGES_INDICATOR , webpage , re . DOTALL ) is None :
break
entries = [ self . url_result ( ' http://vimeo.com/ %s ' % video_id , ' Vimeo ' )
for video_id in video_ids ]
return { ' _type ' : ' playlist ' ,
2013-12-01 16:36:18 -05:00
' id ' : list_id ,
2013-12-06 16:01:41 -05:00
' title ' : self . _extract_list_title ( webpage ) ,
2013-07-29 07:12:09 -04:00
' entries ' : entries ,
}
2013-12-01 16:36:18 -05:00
def _real_extract ( self , url ) :
mobj = re . match ( self . _VALID_URL , url )
2014-05-04 16:27:56 -04:00
channel_id = mobj . group ( ' id ' )
2013-12-01 16:36:18 -05:00
return self . _extract_videos ( channel_id , ' http://vimeo.com/channels/ %s ' % channel_id )
class VimeoUserIE ( VimeoChannelIE ) :
2014-01-06 17:38:16 -05:00
IE_NAME = ' vimeo:user '
2014-08-27 05:36:01 -04:00
_VALID_URL = r ' https?://vimeo \ .com/(?![0-9]+(?:$|[?#/]))(?P<name>[^/]+)(?:/videos|[#?]|$) '
2013-12-01 16:36:18 -05:00
_TITLE_RE = r ' <a[^>]+?class= " user " >([^<>]+?)</a> '
2014-08-27 05:36:01 -04:00
_TESTS = [ {
' url ' : ' http://vimeo.com/nkistudio/videos ' ,
' info_dict ' : {
' title ' : ' Nki ' ,
} ,
' playlist_mincount ' : 66 ,
} ]
2013-12-01 16:36:18 -05:00
def _real_extract ( self , url ) :
mobj = re . match ( self . _VALID_URL , url )
name = mobj . group ( ' name ' )
return self . _extract_videos ( name , ' http://vimeo.com/ %s ' % name )
2013-12-06 15:47:32 -05:00
class VimeoAlbumIE ( VimeoChannelIE ) :
2014-01-06 17:38:16 -05:00
IE_NAME = ' vimeo:album '
2014-08-27 05:36:01 -04:00
_VALID_URL = r ' https?://vimeo \ .com/album/(?P<id> \ d+) '
2013-12-06 15:47:32 -05:00
_TITLE_RE = r ' <header id= " page_header " > \ n \ s*<h1>(.*?)</h1> '
2014-08-27 05:36:01 -04:00
_TESTS = [ {
' url ' : ' http://vimeo.com/album/2632481 ' ,
' info_dict ' : {
' title ' : ' Staff Favorites: November 2013 ' ,
} ,
' playlist_mincount ' : 13 ,
2015-02-09 22:53:21 -05:00
} , {
' note ' : ' Password-protected album ' ,
' url ' : ' https://vimeo.com/album/3253534 ' ,
' info_dict ' : {
' title ' : ' test ' ,
' id ' : ' 3253534 ' ,
} ,
' playlist_count ' : 1 ,
' params ' : {
' videopassword ' : ' youtube-dl ' ,
}
2014-08-27 05:36:01 -04:00
} ]
2013-12-06 15:47:32 -05:00
def _page_url ( self , base_url , pagenum ) :
return ' %s /page: %d / ' % ( base_url , pagenum )
def _real_extract ( self , url ) :
2015-02-09 22:53:21 -05:00
album_id = self . _match_id ( url )
2013-12-06 15:47:32 -05:00
return self . _extract_videos ( album_id , ' http://vimeo.com/album/ %s ' % album_id )
2013-12-06 16:01:41 -05:00
class VimeoGroupsIE ( VimeoAlbumIE ) :
2014-01-06 17:38:16 -05:00
IE_NAME = ' vimeo:group '
2014-01-25 05:48:08 -05:00
_VALID_URL = r ' (?:https?://)?vimeo \ .com/groups/(?P<name>[^/]+) '
2014-08-27 05:36:01 -04:00
_TESTS = [ {
' url ' : ' http://vimeo.com/groups/rolexawards ' ,
' info_dict ' : {
' title ' : ' Rolex Awards for Enterprise ' ,
} ,
' playlist_mincount ' : 73 ,
} ]
2013-12-06 16:01:41 -05:00
def _extract_list_title ( self , webpage ) :
return self . _og_search_title ( webpage )
def _real_extract ( self , url ) :
mobj = re . match ( self . _VALID_URL , url )
name = mobj . group ( ' name ' )
return self . _extract_videos ( name , ' http://vimeo.com/groups/ %s ' % name )
2014-01-06 11:31:47 -05:00
class VimeoReviewIE ( InfoExtractor ) :
2014-01-06 17:38:16 -05:00
IE_NAME = ' vimeo:review '
IE_DESC = ' Review pages on vimeo '
2014-08-27 05:13:42 -04:00
_VALID_URL = r ' https?://vimeo \ .com/[^/]+/review/(?P<id>[^/]+) '
_TESTS = [ {
2014-01-06 11:31:47 -05:00
' url ' : ' https://vimeo.com/user21297594/review/75524534/3c257a1b5d ' ,
' md5 ' : ' c507a72f780cacc12b2248bb4006d253 ' ,
' info_dict ' : {
2015-02-01 06:12:27 -05:00
' id ' : ' 75524534 ' ,
' ext ' : ' mp4 ' ,
2014-01-06 11:31:47 -05:00
' title ' : " DICK HARDWICK ' Comedian ' " ,
' uploader ' : ' Richard Hardwick ' ,
}
2014-08-27 05:13:42 -04:00
} , {
' note ' : ' video player needs Referer ' ,
' url ' : ' http://vimeo.com/user22258446/review/91613211/13f927e053 ' ,
' md5 ' : ' 6295fdab8f4bf6a002d058b2c6dce276 ' ,
' info_dict ' : {
' id ' : ' 91613211 ' ,
' ext ' : ' mp4 ' ,
2014-10-26 19:13:40 -04:00
' title ' : ' re:(?i)^Death by dogma versus assembling agile . Sander Hoogendoorn ' ,
2014-08-27 05:13:42 -04:00
' uploader ' : ' DevWeek Events ' ,
' duration ' : 2773 ,
' thumbnail ' : ' re:^https?://.* \ .jpg$ ' ,
}
} ]
2014-01-06 11:31:47 -05:00
def _real_extract ( self , url ) :
mobj = re . match ( self . _VALID_URL , url )
video_id = mobj . group ( ' id ' )
player_url = ' https://player.vimeo.com/player/ ' + video_id
return self . url_result ( player_url , ' Vimeo ' , video_id )
2014-04-24 15:51:20 -04:00
class VimeoWatchLaterIE ( VimeoBaseInfoExtractor , VimeoChannelIE ) :
IE_NAME = ' vimeo:watchlater '
IE_DESC = ' Vimeo watch later list, " vimeowatchlater " keyword (requires authentication) '
_VALID_URL = r ' https?://vimeo \ .com/home/watchlater|:vimeowatchlater '
_LOGIN_REQUIRED = True
_TITLE_RE = r ' href= " /home/watchlater " .*?>(.*?)< '
2014-08-27 05:36:01 -04:00
_TESTS = [ {
' url ' : ' http://vimeo.com/home/watchlater ' ,
' only_matching ' : True ,
} ]
2014-04-24 15:51:20 -04:00
def _real_initialize ( self ) :
self . _login ( )
def _page_url ( self , base_url , pagenum ) :
url = ' %s /page: %d / ' % ( base_url , pagenum )
request = compat_urllib_request . Request ( url )
# Set the header to get a partial html page with the ids,
# the normal page doesn't contain them.
request . add_header ( ' X-Requested-With ' , ' XMLHttpRequest ' )
return request
def _real_extract ( self , url ) :
return self . _extract_videos ( ' watchlater ' , ' https://vimeo.com/home/watchlater ' )
2014-09-28 06:14:16 -04:00
class VimeoLikesIE ( InfoExtractor ) :
2014-09-28 18:36:06 -04:00
_VALID_URL = r ' https?://(?:www \ .)?vimeo \ .com/user(?P<id>[0-9]+)/likes/?(?:$|[?#]|sort:) '
2014-09-28 06:14:16 -04:00
IE_NAME = ' vimeo:likes '
IE_DESC = ' Vimeo user likes '
_TEST = {
2014-09-28 18:36:06 -04:00
' url ' : ' https://vimeo.com/user755559/likes/ ' ,
' playlist_mincount ' : 293 ,
2014-09-28 06:14:16 -04:00
" info_dict " : {
2014-09-28 18:36:06 -04:00
" description " : " See all the videos urza likes " ,
" title " : ' Videos urza likes ' ,
2014-09-28 06:14:16 -04:00
} ,
}
def _real_extract ( self , url ) :
user_id = self . _match_id ( url )
2014-09-28 18:36:06 -04:00
webpage = self . _download_webpage ( url , user_id )
page_count = self . _int (
self . _search_regex (
r ''' (?x)<li><a \ s+href= " [^ " ]+ " \ s+data-page= " ([0-9]+) " >
. * ? < / a > < / li > \s * < li \s + class = " pagination_next " >
''' , webpage, ' page count ' ),
' page count ' , fatal = True )
PAGE_SIZE = 12
title = self . _html_search_regex (
r ' (?s)<h1>(.+?)</h1> ' , webpage , ' title ' , fatal = False )
description = self . _html_search_meta ( ' description ' , webpage )
def _get_page ( idx ) :
page_url = ' %s //vimeo.com/user %s /likes/page: %d /sort:date ' % (
self . http_scheme ( ) , user_id , idx + 1 )
webpage = self . _download_webpage (
page_url , user_id ,
note = ' Downloading page %d / %d ' % ( idx + 1 , page_count ) )
video_list = self . _search_regex (
r ' (?s)<ol class= " js-browse_list[^ " ]+ " [^>]*>(.*?)</ol> ' ,
webpage , ' video content ' )
paths = re . findall (
r ' <li[^>]*> \ s*<a \ s+href= " ([^ " ]+) " ' , video_list )
for path in paths :
yield {
' _type ' : ' url ' ,
' url ' : compat_urlparse . urljoin ( page_url , path ) ,
}
pl = InAdvancePagedList ( _get_page , page_count , PAGE_SIZE )
2014-09-28 06:14:16 -04:00
return {
2014-09-28 18:36:06 -04:00
' _type ' : ' playlist ' ,
' id ' : ' user %s _likes ' % user_id ,
' title ' : title ,
' description ' : description ,
' entries ' : pl ,
2014-09-28 06:14:16 -04:00
}