mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-21 20:46:36 -05:00
[extractor/GoPlay] Add extractor (#3412)
Replaces old Vier extractors Closes https://github.com/yt-dlp/yt-dlp/issues/1546 Based on: https://github.com/ytdl-org/youtube-dl/pull/27815 Authored by: jeroenj, CNugteren, basrieter
This commit is contained in:
parent
46d72cd2c7
commit
fada8272b6
3 changed files with 396 additions and 262 deletions
|
@ -649,6 +649,7 @@
|
||||||
)
|
)
|
||||||
from .googlesearch import GoogleSearchIE
|
from .googlesearch import GoogleSearchIE
|
||||||
from .gopro import GoProIE
|
from .gopro import GoProIE
|
||||||
|
from .goplay import GoPlayIE
|
||||||
from .goshgay import GoshgayIE
|
from .goshgay import GoshgayIE
|
||||||
from .gotostage import GoToStageIE
|
from .gotostage import GoToStageIE
|
||||||
from .gputechconf import GPUTechConfIE
|
from .gputechconf import GPUTechConfIE
|
||||||
|
@ -2021,7 +2022,6 @@
|
||||||
VidioLiveIE
|
VidioLiveIE
|
||||||
)
|
)
|
||||||
from .vidlii import VidLiiIE
|
from .vidlii import VidLiiIE
|
||||||
from .vier import VierIE, VierVideosIE
|
|
||||||
from .viewlift import (
|
from .viewlift import (
|
||||||
ViewLiftIE,
|
ViewLiftIE,
|
||||||
ViewLiftEmbedIE,
|
ViewLiftEmbedIE,
|
||||||
|
|
395
yt_dlp/extractor/goplay.py
Normal file
395
yt_dlp/extractor/goplay.py
Normal file
|
@ -0,0 +1,395 @@
|
||||||
|
import base64
|
||||||
|
import binascii
|
||||||
|
import datetime
|
||||||
|
import hashlib
|
||||||
|
import hmac
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
ExtractorError,
|
||||||
|
traverse_obj,
|
||||||
|
unescapeHTML,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class GoPlayIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://(www\.)?goplay\.be/video/([^/]+/[^/]+/|)(?P<display_id>[^/#]+)'
|
||||||
|
|
||||||
|
_NETRC_MACHINE = 'goplay'
|
||||||
|
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://www.goplay.be/video/de-container-cup/de-container-cup-s3/de-container-cup-s3-aflevering-2#autoplay',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '9c4214b8-e55d-4e4b-a446-f015f6c6f811',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'S3 - Aflevering 2',
|
||||||
|
'series': 'De Container Cup',
|
||||||
|
'season': 'Season 3',
|
||||||
|
'season_number': 3,
|
||||||
|
'episode': 'Episode 2',
|
||||||
|
'episode_number': 2,
|
||||||
|
},
|
||||||
|
'skip': 'This video is only available for registered users'
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.goplay.be/video/a-family-for-thr-holidays-s1-aflevering-1#autoplay',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '74e3ed07-748c-49e4-85a0-393a93337dbf',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'A Family for the Holidays',
|
||||||
|
},
|
||||||
|
'skip': 'This video is only available for registered users'
|
||||||
|
}]
|
||||||
|
|
||||||
|
_id_token = None
|
||||||
|
|
||||||
|
def _perform_login(self, username, password):
|
||||||
|
self.report_login()
|
||||||
|
aws = AwsIdp(ie=self, pool_id='eu-west-1_dViSsKM5Y', client_id='6s1h851s8uplco5h6mqh1jac8m')
|
||||||
|
self._id_token, _ = aws.authenticate(username=username, password=password)
|
||||||
|
|
||||||
|
def _real_initialize(self):
|
||||||
|
if not self._id_token:
|
||||||
|
raise self.raise_login_required(method='password')
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
url, display_id = self._match_valid_url(url).group(0, 'display_id')
|
||||||
|
webpage = self._download_webpage(url, display_id)
|
||||||
|
video_data_json = self._html_search_regex(r'<div\s+data-hero="([^"]+)"', webpage, 'video_data')
|
||||||
|
video_data = self._parse_json(unescapeHTML(video_data_json), display_id).get('data')
|
||||||
|
|
||||||
|
movie = video_data.get('movie')
|
||||||
|
if movie:
|
||||||
|
video_id = movie['videoUuid']
|
||||||
|
info_dict = {
|
||||||
|
'title': movie.get('title')
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
episode = traverse_obj(video_data, ('playlists', ..., 'episodes', lambda _, v: v['pageInfo']['url'] == url), get_all=False)
|
||||||
|
video_id = episode['videoUuid']
|
||||||
|
info_dict = {
|
||||||
|
'title': episode.get('episodeTitle'),
|
||||||
|
'series': traverse_obj(episode, ('program', 'title')),
|
||||||
|
'season_number': episode.get('seasonNumber'),
|
||||||
|
'episode_number': episode.get('episodeNumber'),
|
||||||
|
}
|
||||||
|
|
||||||
|
api = self._download_json(
|
||||||
|
f'https://api.viervijfzes.be/content/{video_id}',
|
||||||
|
video_id, headers={'Authorization': self._id_token})
|
||||||
|
|
||||||
|
formats, subs = self._extract_m3u8_formats_and_subtitles(
|
||||||
|
api['video']['S'], video_id, ext='mp4', m3u8_id='HLS')
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
info_dict.update({
|
||||||
|
'id': video_id,
|
||||||
|
'formats': formats,
|
||||||
|
})
|
||||||
|
|
||||||
|
return info_dict
|
||||||
|
|
||||||
|
|
||||||
|
# Taken from https://github.com/add-ons/plugin.video.viervijfzes/blob/master/resources/lib/viervijfzes/auth_awsidp.py
|
||||||
|
# Released into Public domain by https://github.com/michaelarnauts
|
||||||
|
|
||||||
|
class InvalidLoginException(ExtractorError):
|
||||||
|
""" The login credentials are invalid """
|
||||||
|
|
||||||
|
|
||||||
|
class AuthenticationException(ExtractorError):
|
||||||
|
""" Something went wrong while logging in """
|
||||||
|
|
||||||
|
|
||||||
|
class AwsIdp:
|
||||||
|
""" AWS Identity Provider """
|
||||||
|
|
||||||
|
def __init__(self, ie, pool_id, client_id):
|
||||||
|
"""
|
||||||
|
:param InfoExtrator ie: The extractor that instantiated this class.
|
||||||
|
:param str pool_id: The AWS user pool to connect to (format: <region>_<poolid>).
|
||||||
|
E.g.: eu-west-1_aLkOfYN3T
|
||||||
|
:param str client_id: The client application ID (the ID of the application connecting)
|
||||||
|
"""
|
||||||
|
|
||||||
|
self.ie = ie
|
||||||
|
|
||||||
|
self.pool_id = pool_id
|
||||||
|
if "_" not in self.pool_id:
|
||||||
|
raise ValueError("Invalid pool_id format. Should be <region>_<poolid>.")
|
||||||
|
|
||||||
|
self.client_id = client_id
|
||||||
|
self.region = self.pool_id.split("_")[0]
|
||||||
|
self.url = "https://cognito-idp.%s.amazonaws.com/" % (self.region,)
|
||||||
|
|
||||||
|
# Initialize the values
|
||||||
|
# https://github.com/aws/amazon-cognito-identity-js/blob/master/src/AuthenticationHelper.js#L22
|
||||||
|
self.n_hex = 'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1' + \
|
||||||
|
'29024E088A67CC74020BBEA63B139B22514A08798E3404DD' + \
|
||||||
|
'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245' + \
|
||||||
|
'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED' + \
|
||||||
|
'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D' + \
|
||||||
|
'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F' + \
|
||||||
|
'83655D23DCA3AD961C62F356208552BB9ED529077096966D' + \
|
||||||
|
'670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B' + \
|
||||||
|
'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9' + \
|
||||||
|
'DE2BCBF6955817183995497CEA956AE515D2261898FA0510' + \
|
||||||
|
'15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64' + \
|
||||||
|
'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7' + \
|
||||||
|
'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B' + \
|
||||||
|
'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C' + \
|
||||||
|
'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31' + \
|
||||||
|
'43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF'
|
||||||
|
|
||||||
|
# https://github.com/aws/amazon-cognito-identity-js/blob/master/src/AuthenticationHelper.js#L49
|
||||||
|
self.g_hex = '2'
|
||||||
|
self.info_bits = bytearray('Caldera Derived Key', 'utf-8')
|
||||||
|
|
||||||
|
self.big_n = self.__hex_to_long(self.n_hex)
|
||||||
|
self.g = self.__hex_to_long(self.g_hex)
|
||||||
|
self.k = self.__hex_to_long(self.__hex_hash('00' + self.n_hex + '0' + self.g_hex))
|
||||||
|
self.small_a_value = self.__generate_random_small_a()
|
||||||
|
self.large_a_value = self.__calculate_a()
|
||||||
|
|
||||||
|
def authenticate(self, username, password):
|
||||||
|
""" Authenticate with a username and password. """
|
||||||
|
# Step 1: First initiate an authentication request
|
||||||
|
auth_data_dict = self.__get_authentication_request(username)
|
||||||
|
auth_data = json.dumps(auth_data_dict).encode("utf-8")
|
||||||
|
auth_headers = {
|
||||||
|
"X-Amz-Target": "AWSCognitoIdentityProviderService.InitiateAuth",
|
||||||
|
"Accept-Encoding": "identity",
|
||||||
|
"Content-Type": "application/x-amz-json-1.1"
|
||||||
|
}
|
||||||
|
auth_response_json = self.ie._download_json(
|
||||||
|
self.url, None, data=auth_data, headers=auth_headers,
|
||||||
|
note='Authenticating username', errnote='Invalid username')
|
||||||
|
challenge_parameters = auth_response_json.get("ChallengeParameters")
|
||||||
|
|
||||||
|
if auth_response_json.get("ChallengeName") != "PASSWORD_VERIFIER":
|
||||||
|
raise AuthenticationException(auth_response_json["message"])
|
||||||
|
|
||||||
|
# Step 2: Respond to the Challenge with a valid ChallengeResponse
|
||||||
|
challenge_request = self.__get_challenge_response_request(challenge_parameters, password)
|
||||||
|
challenge_data = json.dumps(challenge_request).encode("utf-8")
|
||||||
|
challenge_headers = {
|
||||||
|
"X-Amz-Target": "AWSCognitoIdentityProviderService.RespondToAuthChallenge",
|
||||||
|
"Content-Type": "application/x-amz-json-1.1"
|
||||||
|
}
|
||||||
|
auth_response_json = self.ie._download_json(
|
||||||
|
self.url, None, data=challenge_data, headers=challenge_headers,
|
||||||
|
note='Authenticating password', errnote='Invalid password')
|
||||||
|
|
||||||
|
if 'message' in auth_response_json:
|
||||||
|
raise InvalidLoginException(auth_response_json['message'])
|
||||||
|
return (
|
||||||
|
auth_response_json['AuthenticationResult']['IdToken'],
|
||||||
|
auth_response_json['AuthenticationResult']['RefreshToken']
|
||||||
|
)
|
||||||
|
|
||||||
|
def __get_authentication_request(self, username):
|
||||||
|
"""
|
||||||
|
|
||||||
|
:param str username: The username to use
|
||||||
|
|
||||||
|
:return: A full Authorization request.
|
||||||
|
:rtype: dict
|
||||||
|
"""
|
||||||
|
auth_request = {
|
||||||
|
"AuthParameters": {
|
||||||
|
"USERNAME": username,
|
||||||
|
"SRP_A": self.__long_to_hex(self.large_a_value)
|
||||||
|
},
|
||||||
|
"AuthFlow": "USER_SRP_AUTH",
|
||||||
|
"ClientId": self.client_id
|
||||||
|
}
|
||||||
|
return auth_request
|
||||||
|
|
||||||
|
def __get_challenge_response_request(self, challenge_parameters, password):
|
||||||
|
""" Create a Challenge Response Request object.
|
||||||
|
|
||||||
|
:param dict[str,str|imt] challenge_parameters: The parameters for the challenge.
|
||||||
|
:param str password: The password.
|
||||||
|
|
||||||
|
:return: A valid and full request data object to use as a response for a challenge.
|
||||||
|
:rtype: dict
|
||||||
|
"""
|
||||||
|
user_id = challenge_parameters["USERNAME"]
|
||||||
|
user_id_for_srp = challenge_parameters["USER_ID_FOR_SRP"]
|
||||||
|
srp_b = challenge_parameters["SRP_B"]
|
||||||
|
salt = challenge_parameters["SALT"]
|
||||||
|
secret_block = challenge_parameters["SECRET_BLOCK"]
|
||||||
|
|
||||||
|
timestamp = self.__get_current_timestamp()
|
||||||
|
|
||||||
|
# Get a HKDF key for the password, SrpB and the Salt
|
||||||
|
hkdf = self.__get_hkdf_key_for_password(
|
||||||
|
user_id_for_srp,
|
||||||
|
password,
|
||||||
|
self.__hex_to_long(srp_b),
|
||||||
|
salt
|
||||||
|
)
|
||||||
|
secret_block_bytes = base64.standard_b64decode(secret_block)
|
||||||
|
|
||||||
|
# the message is a combo of the pool_id, provided SRP userId, the Secret and Timestamp
|
||||||
|
msg = \
|
||||||
|
bytearray(self.pool_id.split('_')[1], 'utf-8') + \
|
||||||
|
bytearray(user_id_for_srp, 'utf-8') + \
|
||||||
|
bytearray(secret_block_bytes) + \
|
||||||
|
bytearray(timestamp, 'utf-8')
|
||||||
|
hmac_obj = hmac.new(hkdf, msg, digestmod=hashlib.sha256)
|
||||||
|
signature_string = base64.standard_b64encode(hmac_obj.digest()).decode('utf-8')
|
||||||
|
challenge_request = {
|
||||||
|
"ChallengeResponses": {
|
||||||
|
"USERNAME": user_id,
|
||||||
|
"TIMESTAMP": timestamp,
|
||||||
|
"PASSWORD_CLAIM_SECRET_BLOCK": secret_block,
|
||||||
|
"PASSWORD_CLAIM_SIGNATURE": signature_string
|
||||||
|
},
|
||||||
|
"ChallengeName": "PASSWORD_VERIFIER",
|
||||||
|
"ClientId": self.client_id
|
||||||
|
}
|
||||||
|
return challenge_request
|
||||||
|
|
||||||
|
def __get_hkdf_key_for_password(self, username, password, server_b_value, salt):
|
||||||
|
""" Calculates the final hkdf based on computed S value, and computed U value and the key.
|
||||||
|
|
||||||
|
:param str username: Username.
|
||||||
|
:param str password: Password.
|
||||||
|
:param int server_b_value: Server B value.
|
||||||
|
:param int salt: Generated salt.
|
||||||
|
|
||||||
|
:return Computed HKDF value.
|
||||||
|
:rtype: object
|
||||||
|
"""
|
||||||
|
|
||||||
|
u_value = self.__calculate_u(self.large_a_value, server_b_value)
|
||||||
|
if u_value == 0:
|
||||||
|
raise ValueError('U cannot be zero.')
|
||||||
|
username_password = '%s%s:%s' % (self.pool_id.split('_')[1], username, password)
|
||||||
|
username_password_hash = self.__hash_sha256(username_password.encode('utf-8'))
|
||||||
|
|
||||||
|
x_value = self.__hex_to_long(self.__hex_hash(self.__pad_hex(salt) + username_password_hash))
|
||||||
|
g_mod_pow_xn = pow(self.g, x_value, self.big_n)
|
||||||
|
int_value2 = server_b_value - self.k * g_mod_pow_xn
|
||||||
|
s_value = pow(int_value2, self.small_a_value + u_value * x_value, self.big_n)
|
||||||
|
hkdf = self.__compute_hkdf(
|
||||||
|
bytearray.fromhex(self.__pad_hex(s_value)),
|
||||||
|
bytearray.fromhex(self.__pad_hex(self.__long_to_hex(u_value)))
|
||||||
|
)
|
||||||
|
return hkdf
|
||||||
|
|
||||||
|
def __compute_hkdf(self, ikm, salt):
|
||||||
|
""" Standard hkdf algorithm
|
||||||
|
|
||||||
|
:param {Buffer} ikm Input key material.
|
||||||
|
:param {Buffer} salt Salt value.
|
||||||
|
:return {Buffer} Strong key material.
|
||||||
|
"""
|
||||||
|
|
||||||
|
prk = hmac.new(salt, ikm, hashlib.sha256).digest()
|
||||||
|
info_bits_update = self.info_bits + bytearray(chr(1), 'utf-8')
|
||||||
|
hmac_hash = hmac.new(prk, info_bits_update, hashlib.sha256).digest()
|
||||||
|
return hmac_hash[:16]
|
||||||
|
|
||||||
|
def __calculate_u(self, big_a, big_b):
|
||||||
|
""" Calculate the client's value U which is the hash of A and B
|
||||||
|
|
||||||
|
:param int big_a: Large A value.
|
||||||
|
:param int big_b: Server B value.
|
||||||
|
|
||||||
|
:return Computed U value.
|
||||||
|
:rtype: int
|
||||||
|
"""
|
||||||
|
|
||||||
|
u_hex_hash = self.__hex_hash(self.__pad_hex(big_a) + self.__pad_hex(big_b))
|
||||||
|
return self.__hex_to_long(u_hex_hash)
|
||||||
|
|
||||||
|
def __generate_random_small_a(self):
|
||||||
|
""" Helper function to generate a random big integer
|
||||||
|
|
||||||
|
:return a random value.
|
||||||
|
:rtype: int
|
||||||
|
"""
|
||||||
|
random_long_int = self.__get_random(128)
|
||||||
|
return random_long_int % self.big_n
|
||||||
|
|
||||||
|
def __calculate_a(self):
|
||||||
|
""" Calculate the client's public value A = g^a%N with the generated random number a
|
||||||
|
|
||||||
|
:return Computed large A.
|
||||||
|
:rtype: int
|
||||||
|
"""
|
||||||
|
|
||||||
|
big_a = pow(self.g, self.small_a_value, self.big_n)
|
||||||
|
# safety check
|
||||||
|
if (big_a % self.big_n) == 0:
|
||||||
|
raise ValueError('Safety check for A failed')
|
||||||
|
return big_a
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def __long_to_hex(long_num):
|
||||||
|
return '%x' % long_num
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def __hex_to_long(hex_string):
|
||||||
|
return int(hex_string, 16)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def __hex_hash(hex_string):
|
||||||
|
return AwsIdp.__hash_sha256(bytearray.fromhex(hex_string))
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def __hash_sha256(buf):
|
||||||
|
"""AuthenticationHelper.hash"""
|
||||||
|
digest = hashlib.sha256(buf).hexdigest()
|
||||||
|
return (64 - len(digest)) * '0' + digest
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def __pad_hex(long_int):
|
||||||
|
""" Converts a Long integer (or hex string) to hex format padded with zeroes for hashing
|
||||||
|
|
||||||
|
:param int|str long_int: Number or string to pad.
|
||||||
|
|
||||||
|
:return Padded hex string.
|
||||||
|
:rtype: str
|
||||||
|
"""
|
||||||
|
|
||||||
|
if not isinstance(long_int, str):
|
||||||
|
hash_str = AwsIdp.__long_to_hex(long_int)
|
||||||
|
else:
|
||||||
|
hash_str = long_int
|
||||||
|
if len(hash_str) % 2 == 1:
|
||||||
|
hash_str = '0%s' % hash_str
|
||||||
|
elif hash_str[0] in '89ABCDEFabcdef':
|
||||||
|
hash_str = '00%s' % hash_str
|
||||||
|
return hash_str
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def __get_random(nbytes):
|
||||||
|
random_hex = binascii.hexlify(os.urandom(nbytes))
|
||||||
|
return AwsIdp.__hex_to_long(random_hex)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def __get_current_timestamp():
|
||||||
|
""" Creates a timestamp with the correct English format.
|
||||||
|
|
||||||
|
:return: timestamp in format 'Sun Jan 27 19:00:04 UTC 2019'
|
||||||
|
:rtype: str
|
||||||
|
"""
|
||||||
|
|
||||||
|
# We need US only data, so we cannot just do a strftime:
|
||||||
|
# Sun Jan 27 19:00:04 UTC 2019
|
||||||
|
months = [None, 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
|
||||||
|
days = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
|
||||||
|
|
||||||
|
time_now = datetime.datetime.utcnow()
|
||||||
|
format_string = "{} {} {} %H:%M:%S UTC %Y".format(days[time_now.weekday()], months[time_now.month], time_now.day)
|
||||||
|
time_string = datetime.datetime.utcnow().strftime(format_string)
|
||||||
|
return time_string
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return "AWS IDP Client for:\nRegion: %s\nPoolId: %s\nAppId: %s" % (
|
||||||
|
self.region, self.pool_id.split("_")[1], self.client_id
|
||||||
|
)
|
|
@ -1,261 +0,0 @@
|
||||||
import re
|
|
||||||
import itertools
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
|
||||||
from ..utils import (
|
|
||||||
urlencode_postdata,
|
|
||||||
int_or_none,
|
|
||||||
unified_strdate,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class VierIE(InfoExtractor):
|
|
||||||
IE_NAME = 'vier'
|
|
||||||
IE_DESC = 'vier.be and vijf.be'
|
|
||||||
_VALID_URL = r'''(?x)
|
|
||||||
https?://
|
|
||||||
(?:www\.)?(?P<site>vier|vijf)\.be/
|
|
||||||
(?:
|
|
||||||
(?:
|
|
||||||
[^/]+/videos|
|
|
||||||
video(?:/[^/]+)*
|
|
||||||
)/
|
|
||||||
(?P<display_id>[^/]+)(?:/(?P<id>\d+))?|
|
|
||||||
(?:
|
|
||||||
video/v3/embed|
|
|
||||||
embed/video/public
|
|
||||||
)/(?P<embed_id>\d+)
|
|
||||||
)
|
|
||||||
'''
|
|
||||||
_NETRC_MACHINE = 'vier'
|
|
||||||
_TESTS = [{
|
|
||||||
'url': 'http://www.vier.be/planb/videos/het-wordt-warm-de-moestuin/16129',
|
|
||||||
'md5': 'e4ae2054a6b040ef1e289e20d111b46e',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '16129',
|
|
||||||
'display_id': 'het-wordt-warm-de-moestuin',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Het wordt warm in De Moestuin',
|
|
||||||
'description': 'De vele uren werk eisen hun tol. Wim droomt van assistentie...',
|
|
||||||
'upload_date': '20121025',
|
|
||||||
'series': 'Plan B',
|
|
||||||
'tags': ['De Moestuin', 'Moestuin', 'meisjes', 'Tomaat', 'Wim', 'Droom'],
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
'url': 'http://www.vijf.be/temptationisland/videos/zo-grappig-temptation-island-hosts-moeten-kiezen-tussen-onmogelijke-dilemmas/2561614',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '2561614',
|
|
||||||
'display_id': 'zo-grappig-temptation-island-hosts-moeten-kiezen-tussen-onmogelijke-dilemmas',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'md5:84f45fe48b8c1fa296a7f6d208d080a7',
|
|
||||||
'description': 'md5:0356d4981e58b8cbee19355cbd51a8fe',
|
|
||||||
'upload_date': '20170228',
|
|
||||||
'series': 'Temptation Island',
|
|
||||||
'tags': list,
|
|
||||||
},
|
|
||||||
'params': {
|
|
||||||
'skip_download': True,
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
'url': 'http://www.vier.be/janigaat/videos/jani-gaat-naar-tokio-aflevering-4/2674839',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '2674839',
|
|
||||||
'display_id': 'jani-gaat-naar-tokio-aflevering-4',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Jani gaat naar Tokio - Aflevering 4',
|
|
||||||
'description': 'md5:aa8d611541db6ae9e863125704511f88',
|
|
||||||
'upload_date': '20170501',
|
|
||||||
'series': 'Jani gaat',
|
|
||||||
'episode_number': 4,
|
|
||||||
'tags': ['Jani Gaat', 'Volledige Aflevering'],
|
|
||||||
},
|
|
||||||
'params': {
|
|
||||||
'skip_download': True,
|
|
||||||
},
|
|
||||||
'skip': 'Requires account credentials',
|
|
||||||
}, {
|
|
||||||
# Requires account credentials but bypassed extraction via v3/embed page
|
|
||||||
# without metadata
|
|
||||||
'url': 'http://www.vier.be/janigaat/videos/jani-gaat-naar-tokio-aflevering-4/2674839',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '2674839',
|
|
||||||
'display_id': 'jani-gaat-naar-tokio-aflevering-4',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'jani-gaat-naar-tokio-aflevering-4',
|
|
||||||
},
|
|
||||||
'params': {
|
|
||||||
'skip_download': True,
|
|
||||||
},
|
|
||||||
'expected_warnings': ['Log in to extract metadata'],
|
|
||||||
}, {
|
|
||||||
# Without video id in URL
|
|
||||||
'url': 'http://www.vier.be/planb/videos/dit-najaar-plan-b',
|
|
||||||
'only_matching': True,
|
|
||||||
}, {
|
|
||||||
'url': 'http://www.vier.be/video/v3/embed/16129',
|
|
||||||
'only_matching': True,
|
|
||||||
}, {
|
|
||||||
'url': 'https://www.vijf.be/embed/video/public/4093',
|
|
||||||
'only_matching': True,
|
|
||||||
}, {
|
|
||||||
'url': 'https://www.vier.be/video/blockbusters/in-juli-en-augustus-summer-classics',
|
|
||||||
'only_matching': True,
|
|
||||||
}, {
|
|
||||||
'url': 'https://www.vier.be/video/achter-de-rug/2017/achter-de-rug-seizoen-1-aflevering-6',
|
|
||||||
'only_matching': True,
|
|
||||||
}]
|
|
||||||
|
|
||||||
def _real_initialize(self):
|
|
||||||
self._logged_in = False
|
|
||||||
|
|
||||||
def _login(self, site):
|
|
||||||
username, password = self._get_login_info()
|
|
||||||
if username is None or password is None:
|
|
||||||
return
|
|
||||||
|
|
||||||
login_page = self._download_webpage(
|
|
||||||
'http://www.%s.be/user/login' % site,
|
|
||||||
None, note='Logging in', errnote='Unable to log in',
|
|
||||||
data=urlencode_postdata({
|
|
||||||
'form_id': 'user_login',
|
|
||||||
'name': username,
|
|
||||||
'pass': password,
|
|
||||||
}),
|
|
||||||
headers={'Content-Type': 'application/x-www-form-urlencoded'})
|
|
||||||
|
|
||||||
login_error = self._html_search_regex(
|
|
||||||
r'(?s)<div class="messages error">\s*<div>\s*<h2.+?</h2>(.+?)<',
|
|
||||||
login_page, 'login error', default=None)
|
|
||||||
if login_error:
|
|
||||||
self.report_warning('Unable to log in: %s' % login_error)
|
|
||||||
else:
|
|
||||||
self._logged_in = True
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
mobj = self._match_valid_url(url)
|
|
||||||
embed_id = mobj.group('embed_id')
|
|
||||||
display_id = mobj.group('display_id') or embed_id
|
|
||||||
video_id = mobj.group('id') or embed_id
|
|
||||||
site = mobj.group('site')
|
|
||||||
|
|
||||||
if not self._logged_in:
|
|
||||||
self._login(site)
|
|
||||||
|
|
||||||
webpage = self._download_webpage(url, display_id)
|
|
||||||
|
|
||||||
if r'id="user-login"' in webpage:
|
|
||||||
self.report_warning(
|
|
||||||
'Log in to extract metadata', video_id=display_id)
|
|
||||||
webpage = self._download_webpage(
|
|
||||||
'http://www.%s.be/video/v3/embed/%s' % (site, video_id),
|
|
||||||
display_id)
|
|
||||||
|
|
||||||
video_id = self._search_regex(
|
|
||||||
[r'data-nid="(\d+)"', r'"nid"\s*:\s*"(\d+)"'],
|
|
||||||
webpage, 'video id', default=video_id or display_id)
|
|
||||||
|
|
||||||
playlist_url = self._search_regex(
|
|
||||||
r'data-file=(["\'])(?P<url>(?:https?:)?//[^/]+/.+?\.m3u8.*?)\1',
|
|
||||||
webpage, 'm3u8 url', default=None, group='url')
|
|
||||||
|
|
||||||
if not playlist_url:
|
|
||||||
application = self._search_regex(
|
|
||||||
[r'data-application="([^"]+)"', r'"application"\s*:\s*"([^"]+)"'],
|
|
||||||
webpage, 'application', default=site + '_vod')
|
|
||||||
filename = self._search_regex(
|
|
||||||
[r'data-filename="([^"]+)"', r'"filename"\s*:\s*"([^"]+)"'],
|
|
||||||
webpage, 'filename')
|
|
||||||
playlist_url = 'http://vod.streamcloud.be/%s/_definst_/mp4:%s.mp4/playlist.m3u8' % (application, filename)
|
|
||||||
|
|
||||||
formats = self._extract_wowza_formats(
|
|
||||||
playlist_url, display_id, skip_protocols=['dash'])
|
|
||||||
self._sort_formats(formats)
|
|
||||||
|
|
||||||
title = self._og_search_title(webpage, default=display_id)
|
|
||||||
description = self._html_search_regex(
|
|
||||||
r'(?s)<div\b[^>]+\bclass=(["\'])[^>]*?\bfield-type-text-with-summary\b[^>]*?\1[^>]*>.*?<p>(?P<value>.+?)</p>',
|
|
||||||
webpage, 'description', default=None, group='value')
|
|
||||||
thumbnail = self._og_search_thumbnail(webpage, default=None)
|
|
||||||
upload_date = unified_strdate(self._html_search_regex(
|
|
||||||
r'(?s)<div\b[^>]+\bclass=(["\'])[^>]*?\bfield-name-post-date\b[^>]*?\1[^>]*>.*?(?P<value>\d{2}/\d{2}/\d{4})',
|
|
||||||
webpage, 'upload date', default=None, group='value'))
|
|
||||||
|
|
||||||
series = self._search_regex(
|
|
||||||
r'data-program=(["\'])(?P<value>(?:(?!\1).)+)\1', webpage,
|
|
||||||
'series', default=None, group='value')
|
|
||||||
episode_number = int_or_none(self._search_regex(
|
|
||||||
r'(?i)aflevering (\d+)', title, 'episode number', default=None))
|
|
||||||
tags = re.findall(r'<a\b[^>]+\bhref=["\']/tags/[^>]+>([^<]+)<', webpage)
|
|
||||||
|
|
||||||
return {
|
|
||||||
'id': video_id,
|
|
||||||
'display_id': display_id,
|
|
||||||
'title': title,
|
|
||||||
'description': description,
|
|
||||||
'thumbnail': thumbnail,
|
|
||||||
'upload_date': upload_date,
|
|
||||||
'series': series,
|
|
||||||
'episode_number': episode_number,
|
|
||||||
'tags': tags,
|
|
||||||
'formats': formats,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class VierVideosIE(InfoExtractor):
|
|
||||||
IE_NAME = 'vier:videos'
|
|
||||||
_VALID_URL = r'https?://(?:www\.)?(?P<site>vier|vijf)\.be/(?P<program>[^/]+)/videos(?:\?.*\bpage=(?P<page>\d+)|$)'
|
|
||||||
_TESTS = [{
|
|
||||||
'url': 'http://www.vier.be/demoestuin/videos',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'demoestuin',
|
|
||||||
},
|
|
||||||
'playlist_mincount': 153,
|
|
||||||
}, {
|
|
||||||
'url': 'http://www.vijf.be/temptationisland/videos',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'temptationisland',
|
|
||||||
},
|
|
||||||
'playlist_mincount': 159,
|
|
||||||
}, {
|
|
||||||
'url': 'http://www.vier.be/demoestuin/videos?page=6',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'demoestuin-page6',
|
|
||||||
},
|
|
||||||
'playlist_mincount': 20,
|
|
||||||
}, {
|
|
||||||
'url': 'http://www.vier.be/demoestuin/videos?page=7',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'demoestuin-page7',
|
|
||||||
},
|
|
||||||
'playlist_mincount': 13,
|
|
||||||
}]
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
mobj = self._match_valid_url(url)
|
|
||||||
program = mobj.group('program')
|
|
||||||
site = mobj.group('site')
|
|
||||||
|
|
||||||
page_id = mobj.group('page')
|
|
||||||
if page_id:
|
|
||||||
page_id = int(page_id)
|
|
||||||
start_page = page_id
|
|
||||||
playlist_id = '%s-page%d' % (program, page_id)
|
|
||||||
else:
|
|
||||||
start_page = 0
|
|
||||||
playlist_id = program
|
|
||||||
|
|
||||||
entries = []
|
|
||||||
for current_page_id in itertools.count(start_page):
|
|
||||||
current_page = self._download_webpage(
|
|
||||||
'http://www.%s.be/%s/videos?page=%d' % (site, program, current_page_id),
|
|
||||||
program,
|
|
||||||
'Downloading page %d' % (current_page_id + 1))
|
|
||||||
page_entries = [
|
|
||||||
self.url_result('http://www.' + site + '.be' + video_url, 'Vier')
|
|
||||||
for video_url in re.findall(
|
|
||||||
r'<h[23]><a href="(/[^/]+/videos/[^/]+(?:/\d+)?)">', current_page)]
|
|
||||||
entries.extend(page_entries)
|
|
||||||
if page_id or '>Meer<' not in current_page:
|
|
||||||
break
|
|
||||||
|
|
||||||
return self.playlist_result(entries, playlist_id)
|
|
Loading…
Reference in a new issue