mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-21 20:46:36 -05:00
[kuwo] Use single quotes
This commit is contained in:
parent
a9684c0dbf
commit
cf2c5fda4f
1 changed files with 11 additions and 11 deletions
|
@ -27,9 +27,9 @@ def _get_formats(self, song_id):
|
||||||
formats = []
|
formats = []
|
||||||
for file_format in self._FORMATS:
|
for file_format in self._FORMATS:
|
||||||
song_url = self._download_webpage(
|
song_url = self._download_webpage(
|
||||||
"http://antiserver.kuwo.cn/anti.s?format=%s&br=%s&rid=MUSIC_%s&type=convert_url&response=url" %
|
'http://antiserver.kuwo.cn/anti.s?format=%s&br=%s&rid=MUSIC_%s&type=convert_url&response=url' %
|
||||||
(file_format['ext'], file_format.get('br', ''), song_id),
|
(file_format['ext'], file_format.get('br', ''), song_id),
|
||||||
song_id, note="Download %s url info" % file_format["format"],
|
song_id, note='Download %s url info' % file_format['format'],
|
||||||
)
|
)
|
||||||
if song_url.startswith('http://') or song_url.startswith('https://'):
|
if song_url.startswith('http://') or song_url.startswith('https://'):
|
||||||
formats.append({
|
formats.append({
|
||||||
|
@ -81,7 +81,7 @@ def _real_extract(self, url):
|
||||||
singer_name = self._html_search_regex(
|
singer_name = self._html_search_regex(
|
||||||
r'<div[^>]+class="s_img">\s*<a[^>]+title="([^>]+)"',
|
r'<div[^>]+class="s_img">\s*<a[^>]+title="([^>]+)"',
|
||||||
webpage, 'singer name', default=None)
|
webpage, 'singer name', default=None)
|
||||||
lrc_content = clean_html(get_element_by_id("lrcContent", webpage))
|
lrc_content = clean_html(get_element_by_id('lrcContent', webpage))
|
||||||
if lrc_content == '暂无': # indicates no lyrics
|
if lrc_content == '暂无': # indicates no lyrics
|
||||||
lrc_content = None
|
lrc_content = None
|
||||||
|
|
||||||
|
@ -94,7 +94,7 @@ def _real_extract(self, url):
|
||||||
publish_time = None
|
publish_time = None
|
||||||
if album_id is not None:
|
if album_id is not None:
|
||||||
album_info_page = self._download_webpage(
|
album_info_page = self._download_webpage(
|
||||||
"http://www.kuwo.cn/album/%s/" % album_id, song_id,
|
'http://www.kuwo.cn/album/%s/' % album_id, song_id,
|
||||||
note='Download album detail info',
|
note='Download album detail info',
|
||||||
errnote='Unable to get album detail info')
|
errnote='Unable to get album detail info')
|
||||||
|
|
||||||
|
@ -138,11 +138,11 @@ def _real_extract(self, url):
|
||||||
r'<div[^>]+class="comm"[^<]+<h1[^>]+title="([^"]+)"', webpage,
|
r'<div[^>]+class="comm"[^<]+<h1[^>]+title="([^"]+)"', webpage,
|
||||||
'album name')
|
'album name')
|
||||||
album_intro = remove_start(
|
album_intro = remove_start(
|
||||||
clean_html(get_element_by_id("intro", webpage)),
|
clean_html(get_element_by_id('intro', webpage)),
|
||||||
'%s简介:' % album_name)
|
'%s简介:' % album_name)
|
||||||
|
|
||||||
entries = [
|
entries = [
|
||||||
self.url_result("http://www.kuwo.cn/yinyue/%s/" % song_id, 'Kuwo', song_id)
|
self.url_result('http://www.kuwo.cn/yinyue/%s/' % song_id, 'Kuwo', song_id)
|
||||||
for song_id in re.findall(
|
for song_id in re.findall(
|
||||||
r'<p[^>]+class="listen"><a[^>]+href="http://www\.kuwo\.cn/yinyue/(\d+)/"',
|
r'<p[^>]+class="listen"><a[^>]+href="http://www\.kuwo\.cn/yinyue/(\d+)/"',
|
||||||
webpage)
|
webpage)
|
||||||
|
@ -176,7 +176,7 @@ def _real_extract(self, url):
|
||||||
r'<p[^>]+class="tabDef">(\d{4}第\d{2}期)</p>', webpage, 'chart desc')
|
r'<p[^>]+class="tabDef">(\d{4}第\d{2}期)</p>', webpage, 'chart desc')
|
||||||
|
|
||||||
entries = [
|
entries = [
|
||||||
self.url_result("http://www.kuwo.cn/yinyue/%s/" % song_id, 'Kuwo', song_id)
|
self.url_result('http://www.kuwo.cn/yinyue/%s/' % song_id, 'Kuwo', song_id)
|
||||||
for song_id in re.findall(
|
for song_id in re.findall(
|
||||||
r'<a[^>]+href="http://www\.kuwo\.cn/yinyue/(\d+)/"', webpage)
|
r'<a[^>]+href="http://www\.kuwo\.cn/yinyue/(\d+)/"', webpage)
|
||||||
]
|
]
|
||||||
|
@ -221,7 +221,7 @@ def _real_extract(self, url):
|
||||||
errnote='Unable to get song list page #%d' % page_num)
|
errnote='Unable to get song list page #%d' % page_num)
|
||||||
|
|
||||||
entries.extend([
|
entries.extend([
|
||||||
self.url_result("http://www.kuwo.cn/yinyue/%s/" % song_id, 'Kuwo', song_id)
|
self.url_result('http://www.kuwo.cn/yinyue/%s/' % song_id, 'Kuwo', song_id)
|
||||||
for song_id in re.findall(
|
for song_id in re.findall(
|
||||||
r'<p[^>]+class="m_name"><a[^>]+href="http://www\.kuwo\.cn/yinyue/([0-9]+)/',
|
r'<p[^>]+class="m_name"><a[^>]+href="http://www\.kuwo\.cn/yinyue/([0-9]+)/',
|
||||||
webpage)
|
webpage)
|
||||||
|
@ -256,7 +256,7 @@ def _real_extract(self, url):
|
||||||
r'<h1[^>]+title="([^<>]+?)">[^<>]+?</h1>', webpage, 'category name')
|
r'<h1[^>]+title="([^<>]+?)">[^<>]+?</h1>', webpage, 'category name')
|
||||||
|
|
||||||
category_desc = remove_start(
|
category_desc = remove_start(
|
||||||
get_element_by_id("intro", webpage).strip(),
|
get_element_by_id('intro', webpage).strip(),
|
||||||
'%s简介:' % category_name)
|
'%s简介:' % category_name)
|
||||||
|
|
||||||
jsonm = self._parse_json(self._html_search_regex(
|
jsonm = self._parse_json(self._html_search_regex(
|
||||||
|
@ -264,7 +264,7 @@ def _real_extract(self, url):
|
||||||
|
|
||||||
entries = [
|
entries = [
|
||||||
self.url_result(
|
self.url_result(
|
||||||
"http://www.kuwo.cn/yinyue/%s/" % song['musicrid'],
|
'http://www.kuwo.cn/yinyue/%s/' % song['musicrid'],
|
||||||
'Kuwo', song['musicrid'])
|
'Kuwo', song['musicrid'])
|
||||||
for song in jsonm['musiclist']
|
for song in jsonm['musiclist']
|
||||||
]
|
]
|
||||||
|
@ -301,7 +301,7 @@ def _real_extract(self, url):
|
||||||
song_name = mobj.group('song')
|
song_name = mobj.group('song')
|
||||||
singer_name = mobj.group('singer')
|
singer_name = mobj.group('singer')
|
||||||
else:
|
else:
|
||||||
raise ExtractorError("Unable to find song or singer names")
|
raise ExtractorError('Unable to find song or singer names')
|
||||||
|
|
||||||
formats = self._get_formats(song_id)
|
formats = self._get_formats(song_id)
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue