mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-14 20:38:11 -05:00
b827ee921f
* [scrippsnetworks] Add new extractor(closes #19857)(closes #22981) * [teachable] Improve locked lessons detection (#23528) * [teachable] Fail with error message if no video URL found * [extractors] add missing import for ScrippsNetworksIE * [brightcove] cache brightcove player policy keys * [prosiebensat1] improve geo restriction handling(closes #23571) * [soundcloud] automatically update client id on failing requests * [spankbang] Fix extraction (closes #23307, closes #23423, closes #23444) * [spankbang] Improve removed video detection (#23423) * [brightcove] update policy key on failing requests * [pornhub] Fix extraction and add support for m3u8 formats (closes #22749, closes #23082) * [pornhub] Improve locked videos detection (closes #22449, closes #22780) * [brightcove] invalidate policy key cache on failing requests * [soundcloud] fix client id extraction for non fatal requests * [ChangeLog] Actualize [ci skip] * [devscripts/create-github-release] Switch to using PAT for authentication Basic authentication will be deprecated soon * release 2020.01.01 * [redtube] Detect private videos (#23518) * [vice] improve extraction(closes #23631) * [devscripts/create-github-release] Remove unused import * [wistia] improve format extraction and extract subtitles(closes #22590) * [nrktv:seriebase] Fix extraction (closes #23625) (#23537) * [discovery] fix anonymous token extraction(closes #23650) * [scrippsnetworks] add support for www.discovery.com videos * [scrippsnetworks] correct test case URL * [dctp] fix format extraction(closes #23656) * [pandatv] Remove extractor (#23630) * [naver] improve extraction - improve geo-restriction handling - extract automatic captions - extract uploader metadata - extract VLive HLS formats * [naver] improve metadata extraction * [cloudflarestream] improve extraction - add support for bytehighway.net domain - add support for signed URLs - extract thumbnail * [cloudflarestream] import embed URL extraction * [lego] fix extraction and extract subtitle(closes #23687) * [safari] Fix kaltura session extraction (closes #23679) (#23670) * [orf:fm4] Fix extraction (#23599) * [orf:radio] Clean description and improve extraction * [twitter] add support for promo_video_website cards(closes #23711) * [vodplatform] add support for embed.kwikmotion.com domain * [ndr:base:embed] Improve thumbnails extraction (closes #23731) * [canvas] Add support for new API endpoint and update tests (closes #17680, closes #18629) * [travis] Add flake8 job (#23720) * [yourporn] Fix extraction (closes #21645, closes #22255, closes #23459) * [ChangeLog] Actualize [ci skip] * release 2020.01.15 * [soundcloud] Restore previews extraction (closes #23739) * [orf:tvthek] Improve geo restricted videos detection (closes #23741) * [zype] improve extraction - extract subtitles(closes #21258) - support URLs with alternative keys/tokens(#21258) - extract more metadata * [americastestkitchen] fix extraction * [nbc] add support for nbc multi network URLs(closes #23049) * [ard] improve extraction(closes #23761) - simplify extraction - extract age limit and series - bypass geo-restriction * [ivi:compilation] Fix entries extraction (closes #23770) * [24video] Add support for 24video.vip (closes #23753) * [businessinsider] Fix jwplatform id extraction (closes #22929) (#22954) * [ard] add a missing condition * [azmedien] fix extraction(closes #23783) * [voicerepublic] fix extraction * [stretchinternet] fix extraction(closes #4319) * [youtube] Fix sigfunc name extraction (closes #23819) * [ChangeLog] Actualize [ci skip] * release 2020.01.24 * [soundcloud] imporve private playlist/set tracks extraction https://github.com/ytdl-org/youtube-dl/issues/3707#issuecomment-577873539 * [svt] fix article extraction(closes #22897)(closes #22919) * [svt] fix series extraction(closes #22297) * [viewlift] improve extraction - fix extraction(closes #23851) - add add support for authentication - add support for more domains * [vimeo] fix album extraction(closes #23864) * [tva] Relax _VALID_URL (closes #23903) * [tv5mondeplus] Fix extraction (closes #23907, closes #23911) * [twitch:stream] Lowercase channel id for stream request (closes #23917) * [sportdeutschland] Update to new sportdeutschland API They switched to SSL, but under a different host AND path... Remove the old test cases because these videos have become unavailable. * [popcorntimes] Add extractor (closes #23949) * [thisoldhouse] fix extraction(closes #23951) * [toggle] Add support for mewatch.sg (closes #23895) (#23930) * [compat] Introduce compat_realpath (refs #23991) * [update] Fix updating via symlinks (closes #23991) * [nytimes] improve format sorting(closes #24010) * [abc:iview] Support 720p (#22907) (#22921) * [nova:embed] Fix extraction (closes #23672) * [nova:embed] Improve (closes #23690) * [nova] Improve extraction (refs #23690) * [jpopsuki] Remove extractor (closes #23858) * [YoutubeDL] Fix playlist entry indexing with --playlist-items (closes #10591, closes #10622) * [test_YoutubeDL] Fix get_ids * [test_YoutubeDL] Add tests for #10591 (closes #23873) * [24video] Add support for porn.24video.net (closes #23779, closes #23784) * [npr] Add support for streams (closes #24042) * [ChangeLog] Actualize [ci skip] * release 2020.02.16 * [tv2dk:bornholm:play] Fix extraction (#24076) * [imdb] Fix extraction (closes #23443) * [wistia] Add support for multiple generic embeds (closes #8347, closes #11385) * [teachable] Add support for multiple videos per lecture (closes #24101) * [pornhd] Fix extraction (closes #24128) * [options] Remove duplicate short option -v for --version (#24162) * [extractor/common] Convert ISM manifest to unicode before processing on python 2 (#24152) * [YoutubeDL] Force redirect URL to unicode on python 2 * Remove no longer needed compat_str around geturl * [youjizz] Fix extraction (closes #24181) * [test_subtitles] Remove obsolete test * [zdf:channel] Fix tests * [zapiks] Fix test * [xtube] Fix metadata extraction (closes #21073, closes #22455) * [xtube:user] Fix test * [telecinco] Fix extraction (refs #24195) * [telecinco] Add support for article opening videos * [franceculture] Fix extraction (closes #24204) * [xhamster] Fix extraction (closes #24205) * [ChangeLog] Actualize [ci skip] * release 2020.03.01 * [vimeo] Fix subtitles URLs (#24209) * [servus] Add support for new URL schema (closes #23475, closes #23583, closes #24142) * [youtube:playlist] Fix tests (closes #23872) (#23885) * [peertube] Improve extraction * [peertube] Fix issues and improve extraction (closes #23657) * [pornhub] Improve title extraction (closes #24184) * [vimeo] fix showcase password protected video extraction(closes #24224) * [youtube] Fix age-gated videos support without login (closes #24248) * [youtube] Fix tests * [ChangeLog] Actualize [ci skip] * release 2020.03.06 * [nhk] update API version(closes #24270) * [youtube] Improve extraction in 429 error conditions (closes #24283) * [youtube] Improve age-gated videos extraction in 429 error conditions (refs #24283) * [youtube] Remove outdated code Additional get_video_info requests don't seem to provide any extra itags any longer * [README.md] Clarify 429 error * [pornhub] Add support for pornhubpremium.com (#24288) * [utils] Add support for cookies with spaces used instead of tabs * [ChangeLog] Actualize [ci skip] * release 2020.03.08 * Revert "[utils] Add support for cookies with spaces used instead of tabs" According to [1] TABs must be used as separators between fields. Files produces by some tools with spaces as separators are considered malformed. 1. https://curl.haxx.se/docs/http-cookies.html This reverts commitcff99c91d1
. * [utils] Add reference to cookie file format * Revert "[vimeo] fix showcase password protected video extraction(closes #24224)" This reverts commit12ee431676
. * [nhk] Relax _VALID_URL (#24329) * [nhk] Remove obsolete rtmp formats (closes #24329) * [nhk] Update m3u8 URL and use native hls (#24329) * [ndr] Fix extraction (closes #24326) * [xtube] Fix formats extraction (closes #24348) * [xtube] Fix typo * [hellporno] Fix extraction (closes #24399) * [cbc:watch] Add support for authentication * [cbc:watch] Fix authenticated device token caching (closes #19160) * [soundcloud] fix download url extraction(closes #24394) * [limelight] remove disabled API requests(closes #24255) * [bilibili] Add support for new URL schema with BV ids (closes #24439, closes #24442) * [bilibili] Add support for player.bilibili.com (closes #24402) * [teachable] Extract chapter metadata (closes #24421) * [generic] Look for teachable embeds before wistia * [teachable] Update upskillcourses domain New version does not use teachable platform any longer * [teachable] Update gns3 domain * [teachable] Update test * [ChangeLog] Actualize [ci skip] * [ChangeLog] Actualize [ci skip] * release 2020.03.24 * [spankwire] Fix extraction (closes #18924, closes #20648) * [spankwire] Add support for generic embeds (refs #24633) * [youporn] Add support form generic embeds * [mofosex] Add support for generic embeds (closes #24633) * [tele5] Fix extraction (closes #24553) * [extractor/common] Skip malformed ISM manifest XMLs while extracting ISM formats (#24667) * [tv4] Fix ISM formats extraction (closes #24667) * [twitch:clips] Extend _VALID_URL (closes #24290) (#24642) * [motherless] Fix extraction (closes #24699) * [nova:embed] Fix extraction (closes #24700) * [youtube] Skip broken multifeed videos (closes #24711) * [soundcloud] Extract AAC format * [soundcloud] Improve AAC format extraction (closes #19173, closes #24708) * [thisoldhouse] Fix video id extraction (closes #24548) Added support for: with of without "www." and either ".chorus.build" or ".com" It now validated correctly on older URL's ``` <iframe src="https://thisoldhouse.chorus.build/videos/zype/5e33baec27d2e50001d5f52f ``` and newer ones ``` <iframe src="https://www.thisoldhouse.com/videos/zype/5e2b70e95216cc0001615120 ``` * [thisoldhouse] Improve video id extraction (closes #24549) * [youtube] Fix DRM videos detection (refs #24736) * [options] Clarify doc on --exec command (closes #19087) (#24883) * [prosiebensat1] Improve extraction and remove 7tv.de support (#24948) * [prosiebensat1] Extract series metadata * [tenplay] Relax _VALID_URL (closes #25001) * [tvplay] fix Viafree extraction(closes #15189)(closes #24473)(closes #24789) * [yahoo] fix GYAO Player extraction and relax title URL regex(closes #24178)(closes #24778) * [youtube] Use redirected video id if any (closes #25063) * [youtube] Improve player id extraction and add tests * [extractor/common] Extract multiple JSON-LD entries * [crunchyroll] Fix and improve extraction (closes #25096, closes #25060) * [ChangeLog] Actualize [ci skip] * release 2020.05.03 * [puhutv] Remove no longer available HTTP formats (closes #25124) * [utils] Improve cookie files support + Add support for UTF-8 in cookie files * Skip malformed cookie file entries instead of crashing (invalid entry len, invalid expires at) * [dailymotion] Fix typo * [compat] Introduce compat_cookiejar_Cookie * [extractor/common] Use compat_cookiejar_Cookie for _set_cookie (closes #23256, closes #24776) To always ensure cookie name and value are bytestrings on python 2. * [orf] Add support for more radio stations (closes #24938) (#24968) * [uol] fix extraction(closes #22007) * [downloader/http] Finish downloading once received data length matches expected Always do this if possible, i.e. if Content-Length or expected length is known, not only in test. This will save unnecessary last extra loop trying to read 0 bytes. * [downloader/http] Request last data block of exact remaining size Always request last data block of exact size remaining to download if possible not the current block size. * [iprima] Improve extraction (closes #25138) * [youtube] Improve signature cipher extraction (closes #25188) * [ChangeLog] Actualize [ci skip] * release 2020.05.08 * [spike] fix Bellator mgid extraction(closes #25195) * [bbccouk] PEP8 * [mailru] Fix extraction (closes #24530) (#25239) * [README.md] flake8 HTTPS URL (#25230) * [youtube] Add support for yewtu.be (#25226) * [soundcloud] reduce API playlist page limit(closes #25274) * [vimeo] improve format extraction and sorting(closes #25285) * [redtube] Improve title extraction (#25208) * [indavideo] Switch to HTTPS for API request (#25191) * [utils] Fix file permissions in write_json_file (closes #12471) (#25122) * [redtube] Improve formats extraction and extract m3u8 formats (closes #25311, closes #25321) * [ard] Improve _VALID_URL (closes #25134) (#25198) * [giantbomb] Extend _VALID_URL (#25222) * [postprocessor/ffmpeg] Embed series metadata with --add-metadata * [youtube] Add support for more invidious instances (#25417) * [ard:beta] Extend _VALID_URL (closes #25405) * [ChangeLog] Actualize [ci skip] * release 2020.05.29 * [jwplatform] Improve embeds extraction (closes #25467) * [periscope] Fix untitled broadcasts (#25482) * [twitter:broadcast] Add untitled periscope broadcast test * [malltv] Add support for sk.mall.tv (#25445) * [brightcove] Fix subtitles extraction (closes #25540) * [brightcove] Sort imports * [twitch] Pass v5 accept header and fix thumbnails extraction (closes #25531) * [twitch:stream] Fix extraction (closes #25528) * [twitch:stream] Expect 400 and 410 HTTP errors from API * [tele5] Prefer jwplatform over nexx (closes #25533) * [jwplatform] Add support for bypass geo restriction * [tele5] Bypass geo restriction * [ChangeLog] Actualize [ci skip] * release 2020.06.06 * [kaltura] Add support for multiple embeds on a webpage (closes #25523) * [youtube] Extract chapters from JSON (closes #24819) * [facebook] Support single-video ID links I stumbled upon this at https://www.facebook.com/bwfbadminton/posts/10157127020046316 . No idea how prevalent it is yet. * [youtube] Fix playlist and feed extraction (closes #25675) * [youtube] Fix thumbnails extraction and remove uploader id extraction warning (closes #25676) * [youtube] Fix upload date extraction * [youtube] Improve view count extraction * [youtube] Fix uploader id and uploader URL extraction * [ChangeLog] Actualize [ci skip] * release 2020.06.16 * [youtube] Fix categories and improve tags extraction * [youtube] Force old layout (closes #25682, closes #25683, closes #25680, closes #25686) * [ChangeLog] Actualize [ci skip] * release 2020.06.16.1 * [brightcove] Improve embed detection (closes #25674) * [bellmedia] add support for cp24.com clip URLs(closes #25764) * [youtube:playlists] Extend _VALID_URL (closes #25810) * [youtube] Prevent excess HTTP 301 (#25786) * [wistia] Restrict embed regex (closes #25969) * [youtube] Improve description extraction (closes #25937) (#25980) * [youtube] Fix sigfunc name extraction (closes #26134, closes #26135, closes #26136, closes #26137) * [ChangeLog] Actualize [ci skip] * release 2020.07.28 * [xhamster] Extend _VALID_URL (closes #25789) (#25804) * [xhamster] Fix extraction (closes #26157) (#26254) * [xhamster] Extend _VALID_URL (closes #25927) Co-authored-by: Remita Amine <remitamine@gmail.com> Co-authored-by: Sergey M․ <dstftw@gmail.com> Co-authored-by: nmeum <soeren+github@soeren-tempel.net> Co-authored-by: Roxedus <me@roxedus.dev> Co-authored-by: Singwai Chan <c.singwai@gmail.com> Co-authored-by: cdarlint <cdarlint@users.noreply.github.com> Co-authored-by: Johannes N <31795504+jonolt@users.noreply.github.com> Co-authored-by: jnozsc <jnozsc@gmail.com> Co-authored-by: Moritz Patelscheck <moritz.patelscheck@campus.tu-berlin.de> Co-authored-by: PB <3854688+uno20001@users.noreply.github.com> Co-authored-by: Philipp Hagemeister <phihag@phihag.de> Co-authored-by: Xaver Hellauer <software@hellauer.bayern> Co-authored-by: d2au <d2au.dev@gmail.com> Co-authored-by: Jan 'Yenda' Trmal <jtrmal@gmail.com> Co-authored-by: jxu <7989982+jxu@users.noreply.github.com> Co-authored-by: Martin Ström <name@my-domain.se> Co-authored-by: The Hatsune Daishi <nao20010128@gmail.com> Co-authored-by: tsia <github@tsia.de> Co-authored-by: 3risian <59593325+3risian@users.noreply.github.com> Co-authored-by: Tristan Waddington <tristan.waddington@gmail.com> Co-authored-by: Devon Meunier <devon.meunier@gmail.com> Co-authored-by: Felix Stupp <felix.stupp@outlook.com> Co-authored-by: tom <tomster954@gmail.com> Co-authored-by: AndrewMBL <62922222+AndrewMBL@users.noreply.github.com> Co-authored-by: willbeaufoy <will@willbeaufoy.net> Co-authored-by: Philipp Stehle <anderschwiedu@googlemail.com> Co-authored-by: hh0rva1h <61889859+hh0rva1h@users.noreply.github.com> Co-authored-by: comsomisha <shmelev1996@mail.ru> Co-authored-by: TotalCaesar659 <14265316+TotalCaesar659@users.noreply.github.com> Co-authored-by: Juan Francisco Cantero Hurtado <iam@juanfra.info> Co-authored-by: Dave Loyall <dave@the-good-guys.net> Co-authored-by: tlsssl <63866177+tlsssl@users.noreply.github.com> Co-authored-by: Rob <ankenyr@gmail.com> Co-authored-by: Michael Klein <github@a98shuttle.de> Co-authored-by: JordanWeatherby <47519158+JordanWeatherby@users.noreply.github.com> Co-authored-by: striker.sh <19488257+strikersh@users.noreply.github.com> Co-authored-by: Matej Dujava <mdujava@gmail.com> Co-authored-by: Glenn Slayden <5589855+glenn-slayden@users.noreply.github.com> Co-authored-by: MRWITEK <mrvvitek@gmail.com> Co-authored-by: JChris246 <43832407+JChris246@users.noreply.github.com> Co-authored-by: TheRealDude2 <the.real.dude@gmx.de>
813 lines
29 KiB
Python
813 lines
29 KiB
Python
# coding: utf-8
|
|
from __future__ import unicode_literals
|
|
|
|
import itertools
|
|
import re
|
|
|
|
from .common import (
|
|
InfoExtractor,
|
|
SearchInfoExtractor
|
|
)
|
|
from ..compat import (
|
|
compat_HTTPError,
|
|
compat_kwargs,
|
|
compat_str,
|
|
compat_urlparse,
|
|
)
|
|
from ..utils import (
|
|
error_to_compat_str,
|
|
ExtractorError,
|
|
float_or_none,
|
|
HEADRequest,
|
|
int_or_none,
|
|
KNOWN_EXTENSIONS,
|
|
mimetype2ext,
|
|
str_or_none,
|
|
try_get,
|
|
unified_timestamp,
|
|
update_url_query,
|
|
url_or_none,
|
|
urlhandle_detect_ext,
|
|
)
|
|
|
|
|
|
class SoundcloudEmbedIE(InfoExtractor):
|
|
_VALID_URL = r'https?://(?:w|player|p)\.soundcloud\.com/player/?.*?\burl=(?P<id>.+)'
|
|
_TEST = {
|
|
# from https://www.soundi.fi/uutiset/ennakkokuuntelussa-timo-kaukolammen-station-to-station-to-station-julkaisua-juhlitaan-tanaan-g-livelabissa/
|
|
'url': 'https://w.soundcloud.com/player/?visual=true&url=https%3A%2F%2Fapi.soundcloud.com%2Fplaylists%2F922213810&show_artwork=true&maxwidth=640&maxheight=960&dnt=1&secret_token=s-ziYey',
|
|
'only_matching': True,
|
|
}
|
|
|
|
@staticmethod
|
|
def _extract_urls(webpage):
|
|
return [m.group('url') for m in re.finditer(
|
|
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?://)?(?:w\.)?soundcloud\.com/player.+?)\1',
|
|
webpage)]
|
|
|
|
def _real_extract(self, url):
|
|
query = compat_urlparse.parse_qs(
|
|
compat_urlparse.urlparse(url).query)
|
|
api_url = query['url'][0]
|
|
secret_token = query.get('secret_token')
|
|
if secret_token:
|
|
api_url = update_url_query(api_url, {'secret_token': secret_token[0]})
|
|
return self.url_result(api_url)
|
|
|
|
|
|
class SoundcloudIE(InfoExtractor):
|
|
"""Information extractor for soundcloud.com
|
|
To access the media, the uid of the song and a stream token
|
|
must be extracted from the page source and the script must make
|
|
a request to media.soundcloud.com/crossdomain.xml. Then
|
|
the media can be grabbed by requesting from an url composed
|
|
of the stream token and uid
|
|
"""
|
|
|
|
_VALID_URL = r'''(?x)^(?:https?://)?
|
|
(?:(?:(?:www\.|m\.)?soundcloud\.com/
|
|
(?!stations/track)
|
|
(?P<uploader>[\w\d-]+)/
|
|
(?!(?:tracks|albums|sets(?:/.+?)?|reposts|likes|spotlight)/?(?:$|[?#]))
|
|
(?P<title>[\w\d-]+)/?
|
|
(?P<token>[^?]+?)?(?:[?].*)?$)
|
|
|(?:api(?:-v2)?\.soundcloud\.com/tracks/(?P<track_id>\d+)
|
|
(?:/?\?secret_token=(?P<secret_token>[^&]+))?)
|
|
)
|
|
'''
|
|
IE_NAME = 'soundcloud'
|
|
_TESTS = [
|
|
{
|
|
'url': 'http://soundcloud.com/ethmusic/lostin-powers-she-so-heavy',
|
|
'md5': 'ebef0a451b909710ed1d7787dddbf0d7',
|
|
'info_dict': {
|
|
'id': '62986583',
|
|
'ext': 'mp3',
|
|
'title': 'Lostin Powers - She so Heavy (SneakPreview) Adrian Ackers Blueprint 1',
|
|
'description': 'No Downloads untill we record the finished version this weekend, i was too pumped n i had to post it , earl is prolly gonna b hella p.o\'d',
|
|
'uploader': 'E.T. ExTerrestrial Music',
|
|
'uploader_id': '1571244',
|
|
'timestamp': 1349920598,
|
|
'upload_date': '20121011',
|
|
'duration': 143.216,
|
|
'license': 'all-rights-reserved',
|
|
'view_count': int,
|
|
'like_count': int,
|
|
'comment_count': int,
|
|
'repost_count': int,
|
|
}
|
|
},
|
|
# geo-restricted
|
|
{
|
|
'url': 'https://soundcloud.com/the-concept-band/goldrushed-mastered?in=the-concept-band/sets/the-royal-concept-ep',
|
|
'info_dict': {
|
|
'id': '47127627',
|
|
'ext': 'mp3',
|
|
'title': 'Goldrushed',
|
|
'description': 'From Stockholm Sweden\r\nPovel / Magnus / Filip / David\r\nwww.theroyalconcept.com',
|
|
'uploader': 'The Royal Concept',
|
|
'uploader_id': '9615865',
|
|
'timestamp': 1337635207,
|
|
'upload_date': '20120521',
|
|
'duration': 227.155,
|
|
'license': 'all-rights-reserved',
|
|
'view_count': int,
|
|
'like_count': int,
|
|
'comment_count': int,
|
|
'repost_count': int,
|
|
},
|
|
},
|
|
# private link
|
|
{
|
|
'url': 'https://soundcloud.com/jaimemf/youtube-dl-test-video-a-y-baw/s-8Pjrp',
|
|
'md5': 'aa0dd32bfea9b0c5ef4f02aacd080604',
|
|
'info_dict': {
|
|
'id': '123998367',
|
|
'ext': 'mp3',
|
|
'title': 'Youtube - Dl Test Video \'\' Ä↭',
|
|
'description': 'test chars: \"\'/\\ä↭',
|
|
'uploader': 'jaimeMF',
|
|
'uploader_id': '69767071',
|
|
'timestamp': 1386604920,
|
|
'upload_date': '20131209',
|
|
'duration': 9.927,
|
|
'license': 'all-rights-reserved',
|
|
'view_count': int,
|
|
'like_count': int,
|
|
'comment_count': int,
|
|
'repost_count': int,
|
|
},
|
|
},
|
|
# private link (alt format)
|
|
{
|
|
'url': 'https://api.soundcloud.com/tracks/123998367?secret_token=s-8Pjrp',
|
|
'md5': 'aa0dd32bfea9b0c5ef4f02aacd080604',
|
|
'info_dict': {
|
|
'id': '123998367',
|
|
'ext': 'mp3',
|
|
'title': 'Youtube - Dl Test Video \'\' Ä↭',
|
|
'description': 'test chars: \"\'/\\ä↭',
|
|
'uploader': 'jaimeMF',
|
|
'uploader_id': '69767071',
|
|
'timestamp': 1386604920,
|
|
'upload_date': '20131209',
|
|
'duration': 9.927,
|
|
'license': 'all-rights-reserved',
|
|
'view_count': int,
|
|
'like_count': int,
|
|
'comment_count': int,
|
|
'repost_count': int,
|
|
},
|
|
},
|
|
# downloadable song
|
|
{
|
|
'url': 'https://soundcloud.com/oddsamples/bus-brakes',
|
|
'md5': '7624f2351f8a3b2e7cd51522496e7631',
|
|
'info_dict': {
|
|
'id': '128590877',
|
|
'ext': 'mp3',
|
|
'title': 'Bus Brakes',
|
|
'description': 'md5:0053ca6396e8d2fd7b7e1595ef12ab66',
|
|
'uploader': 'oddsamples',
|
|
'uploader_id': '73680509',
|
|
'timestamp': 1389232924,
|
|
'upload_date': '20140109',
|
|
'duration': 17.346,
|
|
'license': 'cc-by-sa',
|
|
'view_count': int,
|
|
'like_count': int,
|
|
'comment_count': int,
|
|
'repost_count': int,
|
|
},
|
|
},
|
|
# private link, downloadable format
|
|
{
|
|
'url': 'https://soundcloud.com/oriuplift/uponly-238-no-talking-wav/s-AyZUd',
|
|
'md5': '64a60b16e617d41d0bef032b7f55441e',
|
|
'info_dict': {
|
|
'id': '340344461',
|
|
'ext': 'wav',
|
|
'title': 'Uplifting Only 238 [No Talking] (incl. Alex Feed Guestmix) (Aug 31, 2017) [wav]',
|
|
'description': 'md5:fa20ee0fca76a3d6df8c7e57f3715366',
|
|
'uploader': 'Ori Uplift Music',
|
|
'uploader_id': '12563093',
|
|
'timestamp': 1504206263,
|
|
'upload_date': '20170831',
|
|
'duration': 7449.096,
|
|
'license': 'all-rights-reserved',
|
|
'view_count': int,
|
|
'like_count': int,
|
|
'comment_count': int,
|
|
'repost_count': int,
|
|
},
|
|
},
|
|
# no album art, use avatar pic for thumbnail
|
|
{
|
|
'url': 'https://soundcloud.com/garyvee/sideways-prod-mad-real',
|
|
'md5': '59c7872bc44e5d99b7211891664760c2',
|
|
'info_dict': {
|
|
'id': '309699954',
|
|
'ext': 'mp3',
|
|
'title': 'Sideways (Prod. Mad Real)',
|
|
'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
|
|
'uploader': 'garyvee',
|
|
'uploader_id': '2366352',
|
|
'timestamp': 1488152409,
|
|
'upload_date': '20170226',
|
|
'duration': 207.012,
|
|
'thumbnail': r're:https?://.*\.jpg',
|
|
'license': 'all-rights-reserved',
|
|
'view_count': int,
|
|
'like_count': int,
|
|
'comment_count': int,
|
|
'repost_count': int,
|
|
},
|
|
'params': {
|
|
'skip_download': True,
|
|
},
|
|
},
|
|
{
|
|
'url': 'https://soundcloud.com/giovannisarani/mezzo-valzer',
|
|
'md5': 'e22aecd2bc88e0e4e432d7dcc0a1abf7',
|
|
'info_dict': {
|
|
'id': '583011102',
|
|
'ext': 'mp3',
|
|
'title': 'Mezzo Valzer',
|
|
'description': 'md5:4138d582f81866a530317bae316e8b61',
|
|
'uploader': 'Micronie',
|
|
'uploader_id': '3352531',
|
|
'timestamp': 1551394171,
|
|
'upload_date': '20190228',
|
|
'duration': 180.157,
|
|
'thumbnail': r're:https?://.*\.jpg',
|
|
'license': 'all-rights-reserved',
|
|
'view_count': int,
|
|
'like_count': int,
|
|
'comment_count': int,
|
|
'repost_count': int,
|
|
},
|
|
},
|
|
{
|
|
# with AAC HQ format available via OAuth token
|
|
'url': 'https://soundcloud.com/wandw/the-chainsmokers-ft-daya-dont-let-me-down-ww-remix-1',
|
|
'only_matching': True,
|
|
},
|
|
]
|
|
|
|
_API_V2_BASE = 'https://api-v2.soundcloud.com/'
|
|
_BASE_URL = 'https://soundcloud.com/'
|
|
_IMAGE_REPL_RE = r'-([0-9a-z]+)\.jpg'
|
|
|
|
_ARTWORK_MAP = {
|
|
'mini': 16,
|
|
'tiny': 20,
|
|
'small': 32,
|
|
'badge': 47,
|
|
't67x67': 67,
|
|
'large': 100,
|
|
't300x300': 300,
|
|
'crop': 400,
|
|
't500x500': 500,
|
|
'original': 0,
|
|
}
|
|
|
|
def _store_client_id(self, client_id):
|
|
self._downloader.cache.store('soundcloud', 'client_id', client_id)
|
|
|
|
def _update_client_id(self):
|
|
webpage = self._download_webpage('https://soundcloud.com/', None)
|
|
for src in reversed(re.findall(r'<script[^>]+src="([^"]+)"', webpage)):
|
|
script = self._download_webpage(src, None, fatal=False)
|
|
if script:
|
|
client_id = self._search_regex(
|
|
r'client_id\s*:\s*"([0-9a-zA-Z]{32})"',
|
|
script, 'client id', default=None)
|
|
if client_id:
|
|
self._CLIENT_ID = client_id
|
|
self._store_client_id(client_id)
|
|
return
|
|
raise ExtractorError('Unable to extract client id')
|
|
|
|
def _download_json(self, *args, **kwargs):
|
|
non_fatal = kwargs.get('fatal') is False
|
|
if non_fatal:
|
|
del kwargs['fatal']
|
|
query = kwargs.get('query', {}).copy()
|
|
for _ in range(2):
|
|
query['client_id'] = self._CLIENT_ID
|
|
kwargs['query'] = query
|
|
try:
|
|
return super(SoundcloudIE, self)._download_json(*args, **compat_kwargs(kwargs))
|
|
except ExtractorError as e:
|
|
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
|
|
self._store_client_id(None)
|
|
self._update_client_id()
|
|
continue
|
|
elif non_fatal:
|
|
self._downloader.report_warning(error_to_compat_str(e))
|
|
return False
|
|
raise
|
|
|
|
def _real_initialize(self):
|
|
self._CLIENT_ID = self._downloader.cache.load('soundcloud', 'client_id') or 'YUKXoArFcqrlQn9tfNHvvyfnDISj04zk'
|
|
|
|
@classmethod
|
|
def _resolv_url(cls, url):
|
|
return SoundcloudIE._API_V2_BASE + 'resolve?url=' + url
|
|
|
|
def _extract_info_dict(self, info, full_title=None, secret_token=None):
|
|
track_id = compat_str(info['id'])
|
|
title = info['title']
|
|
|
|
format_urls = set()
|
|
formats = []
|
|
query = {'client_id': self._CLIENT_ID}
|
|
if secret_token:
|
|
query['secret_token'] = secret_token
|
|
|
|
if info.get('downloadable') and info.get('has_downloads_left'):
|
|
download_url = update_url_query(
|
|
self._API_V2_BASE + 'tracks/' + track_id + '/download', query)
|
|
redirect_url = (self._download_json(download_url, track_id, fatal=False) or {}).get('redirectUri')
|
|
if redirect_url:
|
|
urlh = self._request_webpage(
|
|
HEADRequest(redirect_url), track_id, fatal=False)
|
|
if urlh:
|
|
format_url = urlh.geturl()
|
|
format_urls.add(format_url)
|
|
formats.append({
|
|
'format_id': 'download',
|
|
'ext': urlhandle_detect_ext(urlh) or 'mp3',
|
|
'filesize': int_or_none(urlh.headers.get('Content-Length')),
|
|
'url': format_url,
|
|
'preference': 10,
|
|
})
|
|
|
|
def invalid_url(url):
|
|
return not url or url in format_urls
|
|
|
|
def add_format(f, protocol, is_preview=False):
|
|
mobj = re.search(r'\.(?P<abr>\d+)\.(?P<ext>[0-9a-z]{3,4})(?=[/?])', stream_url)
|
|
if mobj:
|
|
for k, v in mobj.groupdict().items():
|
|
if not f.get(k):
|
|
f[k] = v
|
|
format_id_list = []
|
|
if protocol:
|
|
format_id_list.append(protocol)
|
|
ext = f.get('ext')
|
|
if ext == 'aac':
|
|
f['abr'] = '256'
|
|
for k in ('ext', 'abr'):
|
|
v = f.get(k)
|
|
if v:
|
|
format_id_list.append(v)
|
|
preview = is_preview or re.search(r'/(?:preview|playlist)/0/30/', f['url'])
|
|
if preview:
|
|
format_id_list.append('preview')
|
|
abr = f.get('abr')
|
|
if abr:
|
|
f['abr'] = int(abr)
|
|
if protocol == 'hls':
|
|
protocol = 'm3u8' if ext == 'aac' else 'm3u8_native'
|
|
else:
|
|
protocol = 'http'
|
|
f.update({
|
|
'format_id': '_'.join(format_id_list),
|
|
'protocol': protocol,
|
|
'preference': -10 if preview else None,
|
|
})
|
|
formats.append(f)
|
|
|
|
# New API
|
|
transcodings = try_get(
|
|
info, lambda x: x['media']['transcodings'], list) or []
|
|
for t in transcodings:
|
|
if not isinstance(t, dict):
|
|
continue
|
|
format_url = url_or_none(t.get('url'))
|
|
if not format_url:
|
|
continue
|
|
stream = self._download_json(
|
|
format_url, track_id, query=query, fatal=False)
|
|
if not isinstance(stream, dict):
|
|
continue
|
|
stream_url = url_or_none(stream.get('url'))
|
|
if invalid_url(stream_url):
|
|
continue
|
|
format_urls.add(stream_url)
|
|
stream_format = t.get('format') or {}
|
|
protocol = stream_format.get('protocol')
|
|
if protocol != 'hls' and '/hls' in format_url:
|
|
protocol = 'hls'
|
|
ext = None
|
|
preset = str_or_none(t.get('preset'))
|
|
if preset:
|
|
ext = preset.split('_')[0]
|
|
if ext not in KNOWN_EXTENSIONS:
|
|
ext = mimetype2ext(stream_format.get('mime_type'))
|
|
add_format({
|
|
'url': stream_url,
|
|
'ext': ext,
|
|
}, 'http' if protocol == 'progressive' else protocol,
|
|
t.get('snipped') or '/preview/' in format_url)
|
|
|
|
for f in formats:
|
|
f['vcodec'] = 'none'
|
|
|
|
if not formats and info.get('policy') == 'BLOCK':
|
|
self.raise_geo_restricted()
|
|
self._sort_formats(formats)
|
|
|
|
user = info.get('user') or {}
|
|
|
|
thumbnails = []
|
|
artwork_url = info.get('artwork_url')
|
|
thumbnail = artwork_url or user.get('avatar_url')
|
|
if isinstance(thumbnail, compat_str):
|
|
if re.search(self._IMAGE_REPL_RE, thumbnail):
|
|
for image_id, size in self._ARTWORK_MAP.items():
|
|
i = {
|
|
'id': image_id,
|
|
'url': re.sub(self._IMAGE_REPL_RE, '-%s.jpg' % image_id, thumbnail),
|
|
}
|
|
if image_id == 'tiny' and not artwork_url:
|
|
size = 18
|
|
elif image_id == 'original':
|
|
i['preference'] = 10
|
|
if size:
|
|
i.update({
|
|
'width': size,
|
|
'height': size,
|
|
})
|
|
thumbnails.append(i)
|
|
else:
|
|
thumbnails = [{'url': thumbnail}]
|
|
|
|
def extract_count(key):
|
|
return int_or_none(info.get('%s_count' % key))
|
|
|
|
return {
|
|
'id': track_id,
|
|
'uploader': user.get('username'),
|
|
'uploader_id': str_or_none(user.get('id')) or user.get('permalink'),
|
|
'uploader_url': user.get('permalink_url'),
|
|
'timestamp': unified_timestamp(info.get('created_at')),
|
|
'title': title,
|
|
'description': info.get('description'),
|
|
'thumbnails': thumbnails,
|
|
'duration': float_or_none(info.get('duration'), 1000),
|
|
'webpage_url': info.get('permalink_url'),
|
|
'license': info.get('license'),
|
|
'view_count': extract_count('playback'),
|
|
'like_count': extract_count('favoritings') or extract_count('likes'),
|
|
'comment_count': extract_count('comment'),
|
|
'repost_count': extract_count('reposts'),
|
|
'genre': info.get('genre'),
|
|
'formats': formats
|
|
}
|
|
|
|
def _real_extract(self, url):
|
|
mobj = re.match(self._VALID_URL, url)
|
|
|
|
track_id = mobj.group('track_id')
|
|
|
|
query = {}
|
|
if track_id:
|
|
info_json_url = self._API_V2_BASE + 'tracks/' + track_id
|
|
full_title = track_id
|
|
token = mobj.group('secret_token')
|
|
if token:
|
|
query['secret_token'] = token
|
|
else:
|
|
full_title = resolve_title = '%s/%s' % mobj.group('uploader', 'title')
|
|
token = mobj.group('token')
|
|
if token:
|
|
resolve_title += '/%s' % token
|
|
info_json_url = self._resolv_url(self._BASE_URL + resolve_title)
|
|
|
|
info = self._download_json(
|
|
info_json_url, full_title, 'Downloading info JSON', query=query)
|
|
|
|
return self._extract_info_dict(info, full_title, token)
|
|
|
|
|
|
class SoundcloudPlaylistBaseIE(SoundcloudIE):
|
|
def _extract_set(self, playlist, token=None):
|
|
playlist_id = compat_str(playlist['id'])
|
|
tracks = playlist.get('tracks') or []
|
|
if not all([t.get('permalink_url') for t in tracks]) and token:
|
|
tracks = self._download_json(
|
|
self._API_V2_BASE + 'tracks', playlist_id,
|
|
'Downloading tracks', query={
|
|
'ids': ','.join([compat_str(t['id']) for t in tracks]),
|
|
'playlistId': playlist_id,
|
|
'playlistSecretToken': token,
|
|
})
|
|
entries = []
|
|
for track in tracks:
|
|
track_id = str_or_none(track.get('id'))
|
|
url = track.get('permalink_url')
|
|
if not url:
|
|
if not track_id:
|
|
continue
|
|
url = self._API_V2_BASE + 'tracks/' + track_id
|
|
if token:
|
|
url += '?secret_token=' + token
|
|
entries.append(self.url_result(
|
|
url, SoundcloudIE.ie_key(), track_id))
|
|
return self.playlist_result(
|
|
entries, playlist_id,
|
|
playlist.get('title'),
|
|
playlist.get('description'))
|
|
|
|
|
|
class SoundcloudSetIE(SoundcloudPlaylistBaseIE):
|
|
_VALID_URL = r'https?://(?:(?:www|m)\.)?soundcloud\.com/(?P<uploader>[\w\d-]+)/sets/(?P<slug_title>[\w\d-]+)(?:/(?P<token>[^?/]+))?'
|
|
IE_NAME = 'soundcloud:set'
|
|
_TESTS = [{
|
|
'url': 'https://soundcloud.com/the-concept-band/sets/the-royal-concept-ep',
|
|
'info_dict': {
|
|
'id': '2284613',
|
|
'title': 'The Royal Concept EP',
|
|
'description': 'md5:71d07087c7a449e8941a70a29e34671e',
|
|
},
|
|
'playlist_mincount': 5,
|
|
}, {
|
|
'url': 'https://soundcloud.com/the-concept-band/sets/the-royal-concept-ep/token',
|
|
'only_matching': True,
|
|
}]
|
|
|
|
def _real_extract(self, url):
|
|
mobj = re.match(self._VALID_URL, url)
|
|
|
|
full_title = '%s/sets/%s' % mobj.group('uploader', 'slug_title')
|
|
token = mobj.group('token')
|
|
if token:
|
|
full_title += '/' + token
|
|
|
|
info = self._download_json(self._resolv_url(
|
|
self._BASE_URL + full_title), full_title)
|
|
|
|
if 'errors' in info:
|
|
msgs = (compat_str(err['error_message']) for err in info['errors'])
|
|
raise ExtractorError('unable to download video webpage: %s' % ','.join(msgs))
|
|
|
|
return self._extract_set(info, token)
|
|
|
|
|
|
class SoundcloudPagedPlaylistBaseIE(SoundcloudIE):
|
|
def _extract_playlist(self, base_url, playlist_id, playlist_title):
|
|
COMMON_QUERY = {
|
|
'limit': 80000,
|
|
'linked_partitioning': '1',
|
|
}
|
|
|
|
query = COMMON_QUERY.copy()
|
|
query['offset'] = 0
|
|
|
|
next_href = base_url
|
|
|
|
entries = []
|
|
for i in itertools.count():
|
|
response = self._download_json(
|
|
next_href, playlist_id,
|
|
'Downloading track page %s' % (i + 1), query=query)
|
|
|
|
collection = response['collection']
|
|
|
|
if not isinstance(collection, list):
|
|
collection = []
|
|
|
|
# Empty collection may be returned, in this case we proceed
|
|
# straight to next_href
|
|
|
|
def resolve_entry(candidates):
|
|
for cand in candidates:
|
|
if not isinstance(cand, dict):
|
|
continue
|
|
permalink_url = url_or_none(cand.get('permalink_url'))
|
|
if not permalink_url:
|
|
continue
|
|
return self.url_result(
|
|
permalink_url,
|
|
SoundcloudIE.ie_key() if SoundcloudIE.suitable(permalink_url) else None,
|
|
str_or_none(cand.get('id')), cand.get('title'))
|
|
|
|
for e in collection:
|
|
entry = resolve_entry((e, e.get('track'), e.get('playlist')))
|
|
if entry:
|
|
entries.append(entry)
|
|
|
|
next_href = response.get('next_href')
|
|
if not next_href:
|
|
break
|
|
|
|
next_href = response['next_href']
|
|
parsed_next_href = compat_urlparse.urlparse(next_href)
|
|
query = compat_urlparse.parse_qs(parsed_next_href.query)
|
|
query.update(COMMON_QUERY)
|
|
|
|
return {
|
|
'_type': 'playlist',
|
|
'id': playlist_id,
|
|
'title': playlist_title,
|
|
'entries': entries,
|
|
}
|
|
|
|
|
|
class SoundcloudUserIE(SoundcloudPagedPlaylistBaseIE):
|
|
_VALID_URL = r'''(?x)
|
|
https?://
|
|
(?:(?:www|m)\.)?soundcloud\.com/
|
|
(?P<user>[^/]+)
|
|
(?:/
|
|
(?P<rsrc>tracks|albums|sets|reposts|likes|spotlight)
|
|
)?
|
|
/?(?:[?#].*)?$
|
|
'''
|
|
IE_NAME = 'soundcloud:user'
|
|
_TESTS = [{
|
|
'url': 'https://soundcloud.com/soft-cell-official',
|
|
'info_dict': {
|
|
'id': '207965082',
|
|
'title': 'Soft Cell (All)',
|
|
},
|
|
'playlist_mincount': 28,
|
|
}, {
|
|
'url': 'https://soundcloud.com/soft-cell-official/tracks',
|
|
'info_dict': {
|
|
'id': '207965082',
|
|
'title': 'Soft Cell (Tracks)',
|
|
},
|
|
'playlist_mincount': 27,
|
|
}, {
|
|
'url': 'https://soundcloud.com/soft-cell-official/albums',
|
|
'info_dict': {
|
|
'id': '207965082',
|
|
'title': 'Soft Cell (Albums)',
|
|
},
|
|
'playlist_mincount': 1,
|
|
}, {
|
|
'url': 'https://soundcloud.com/jcv246/sets',
|
|
'info_dict': {
|
|
'id': '12982173',
|
|
'title': 'Jordi / cv (Sets)',
|
|
},
|
|
'playlist_mincount': 2,
|
|
}, {
|
|
'url': 'https://soundcloud.com/jcv246/reposts',
|
|
'info_dict': {
|
|
'id': '12982173',
|
|
'title': 'Jordi / cv (Reposts)',
|
|
},
|
|
'playlist_mincount': 6,
|
|
}, {
|
|
'url': 'https://soundcloud.com/clalberg/likes',
|
|
'info_dict': {
|
|
'id': '11817582',
|
|
'title': 'clalberg (Likes)',
|
|
},
|
|
'playlist_mincount': 5,
|
|
}, {
|
|
'url': 'https://soundcloud.com/grynpyret/spotlight',
|
|
'info_dict': {
|
|
'id': '7098329',
|
|
'title': 'Grynpyret (Spotlight)',
|
|
},
|
|
'playlist_mincount': 1,
|
|
}]
|
|
|
|
_BASE_URL_MAP = {
|
|
'all': 'stream/users/%s',
|
|
'tracks': 'users/%s/tracks',
|
|
'albums': 'users/%s/albums',
|
|
'sets': 'users/%s/playlists',
|
|
'reposts': 'stream/users/%s/reposts',
|
|
'likes': 'users/%s/likes',
|
|
'spotlight': 'users/%s/spotlight',
|
|
}
|
|
|
|
def _real_extract(self, url):
|
|
mobj = re.match(self._VALID_URL, url)
|
|
uploader = mobj.group('user')
|
|
|
|
user = self._download_json(
|
|
self._resolv_url(self._BASE_URL + uploader),
|
|
uploader, 'Downloading user info')
|
|
|
|
resource = mobj.group('rsrc') or 'all'
|
|
|
|
return self._extract_playlist(
|
|
self._API_V2_BASE + self._BASE_URL_MAP[resource] % user['id'],
|
|
str_or_none(user.get('id')),
|
|
'%s (%s)' % (user['username'], resource.capitalize()))
|
|
|
|
|
|
class SoundcloudTrackStationIE(SoundcloudPagedPlaylistBaseIE):
|
|
_VALID_URL = r'https?://(?:(?:www|m)\.)?soundcloud\.com/stations/track/[^/]+/(?P<id>[^/?#&]+)'
|
|
IE_NAME = 'soundcloud:trackstation'
|
|
_TESTS = [{
|
|
'url': 'https://soundcloud.com/stations/track/officialsundial/your-text',
|
|
'info_dict': {
|
|
'id': '286017854',
|
|
'title': 'Track station: your text',
|
|
},
|
|
'playlist_mincount': 47,
|
|
}]
|
|
|
|
def _real_extract(self, url):
|
|
track_name = self._match_id(url)
|
|
|
|
track = self._download_json(self._resolv_url(url), track_name)
|
|
track_id = self._search_regex(
|
|
r'soundcloud:track-stations:(\d+)', track['id'], 'track id')
|
|
|
|
return self._extract_playlist(
|
|
self._API_V2_BASE + 'stations/%s/tracks' % track['id'],
|
|
track_id, 'Track station: %s' % track['title'])
|
|
|
|
|
|
class SoundcloudPlaylistIE(SoundcloudPlaylistBaseIE):
|
|
_VALID_URL = r'https?://api(?:-v2)?\.soundcloud\.com/playlists/(?P<id>[0-9]+)(?:/?\?secret_token=(?P<token>[^&]+?))?$'
|
|
IE_NAME = 'soundcloud:playlist'
|
|
_TESTS = [{
|
|
'url': 'https://api.soundcloud.com/playlists/4110309',
|
|
'info_dict': {
|
|
'id': '4110309',
|
|
'title': 'TILT Brass - Bowery Poetry Club, August \'03 [Non-Site SCR 02]',
|
|
'description': 're:.*?TILT Brass - Bowery Poetry Club',
|
|
},
|
|
'playlist_count': 6,
|
|
}]
|
|
|
|
def _real_extract(self, url):
|
|
mobj = re.match(self._VALID_URL, url)
|
|
playlist_id = mobj.group('id')
|
|
|
|
query = {}
|
|
token = mobj.group('token')
|
|
if token:
|
|
query['secret_token'] = token
|
|
|
|
data = self._download_json(
|
|
self._API_V2_BASE + 'playlists/' + playlist_id,
|
|
playlist_id, 'Downloading playlist', query=query)
|
|
|
|
return self._extract_set(data, token)
|
|
|
|
|
|
class SoundcloudSearchIE(SearchInfoExtractor, SoundcloudIE):
|
|
IE_NAME = 'soundcloud:search'
|
|
IE_DESC = 'Soundcloud search'
|
|
_MAX_RESULTS = float('inf')
|
|
_TESTS = [{
|
|
'url': 'scsearch15:post-avant jazzcore',
|
|
'info_dict': {
|
|
'title': 'post-avant jazzcore',
|
|
},
|
|
'playlist_count': 15,
|
|
}]
|
|
|
|
_SEARCH_KEY = 'scsearch'
|
|
_MAX_RESULTS_PER_PAGE = 200
|
|
_DEFAULT_RESULTS_PER_PAGE = 50
|
|
|
|
def _get_collection(self, endpoint, collection_id, **query):
|
|
limit = min(
|
|
query.get('limit', self._DEFAULT_RESULTS_PER_PAGE),
|
|
self._MAX_RESULTS_PER_PAGE)
|
|
query.update({
|
|
'limit': limit,
|
|
'linked_partitioning': 1,
|
|
'offset': 0,
|
|
})
|
|
next_url = update_url_query(self._API_V2_BASE + endpoint, query)
|
|
|
|
collected_results = 0
|
|
|
|
for i in itertools.count(1):
|
|
response = self._download_json(
|
|
next_url, collection_id, 'Downloading page {0}'.format(i),
|
|
'Unable to download API page')
|
|
|
|
collection = response.get('collection', [])
|
|
if not collection:
|
|
break
|
|
|
|
collection = list(filter(bool, collection))
|
|
collected_results += len(collection)
|
|
|
|
for item in collection:
|
|
yield self.url_result(item['uri'], SoundcloudIE.ie_key())
|
|
|
|
if not collection or collected_results >= limit:
|
|
break
|
|
|
|
next_url = response.get('next_href')
|
|
if not next_url:
|
|
break
|
|
|
|
def _get_n_results(self, query, n):
|
|
tracks = self._get_collection('search/tracks', query, limit=n, q=query)
|
|
return self.playlist_result(tracks, playlist_title=query)
|