mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-21 20:46:36 -05:00
fix some not important codesnips
This commit is contained in:
parent
fdf01663d1
commit
98c3806b15
1 changed files with 6 additions and 8 deletions
|
@ -1,15 +1,11 @@
|
||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import json
|
|
||||||
import base64
|
import base64
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_urllib_parse,
|
compat_urllib_parse,
|
||||||
compat_cookiejar,
|
|
||||||
compat_cookies,
|
|
||||||
compat_urllib_request,
|
|
||||||
compat_ord,
|
compat_ord,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
@ -89,7 +85,7 @@ def yk_t(s1, s2):
|
||||||
).decode('ascii').split('_')
|
).decode('ascii').split('_')
|
||||||
|
|
||||||
# get oip
|
# get oip
|
||||||
oip = data2['security']['ip']
|
oip = data1['security']['ip']
|
||||||
|
|
||||||
# get fileid
|
# get fileid
|
||||||
string_ls = list(
|
string_ls = list(
|
||||||
|
@ -136,7 +132,8 @@ def generate_ep(format, n):
|
||||||
format = stream.get('stream_type')
|
format = stream.get('stream_type')
|
||||||
video_urls = []
|
video_urls = []
|
||||||
for dt in stream['segs']:
|
for dt in stream['segs']:
|
||||||
n = str(int(dt['size']))
|
#n = str(int(dt['size']))
|
||||||
|
n = str(stream['segs'].index(dt))
|
||||||
param = {
|
param = {
|
||||||
'K': dt['key'],
|
'K': dt['key'],
|
||||||
'hd': self.get_hd(format),
|
'hd': self.get_hd(format),
|
||||||
|
@ -177,6 +174,8 @@ def parse_ext_l(self, fm):
|
||||||
ext_dict = {
|
ext_dict = {
|
||||||
'flv': 'flv',
|
'flv': 'flv',
|
||||||
'mp4': 'mp4',
|
'mp4': 'mp4',
|
||||||
|
'mp4hd': 'mp4',
|
||||||
|
'mp4hd2': 'mp4',
|
||||||
'hd2': 'flv',
|
'hd2': 'flv',
|
||||||
'hd3': 'flv',
|
'hd3': 'flv',
|
||||||
'3gp': 'flv',
|
'3gp': 'flv',
|
||||||
|
@ -214,7 +213,6 @@ def retrieve_data(req_url, note):
|
||||||
req.add_header('Ytdl-request-proxy', cn_verification_proxy)
|
req.add_header('Ytdl-request-proxy', cn_verification_proxy)
|
||||||
|
|
||||||
raw_data = self._download_json(req, video_id, note=note)
|
raw_data = self._download_json(req, video_id, note=note)
|
||||||
jsonDumpIn = json.dumps(raw_data,indent = 1)
|
|
||||||
|
|
||||||
return raw_data['data']
|
return raw_data['data']
|
||||||
|
|
||||||
|
@ -260,7 +258,7 @@ def retrieve_data(req_url, note):
|
||||||
'formats': [],
|
'formats': [],
|
||||||
# some formats are not available for all parts, we have to detect
|
# some formats are not available for all parts, we have to detect
|
||||||
# which one has all
|
# which one has all
|
||||||
} for i in range(max(len(v) for v in data1['stream']))]
|
} for i in range(max(len(v.get('segs')) for v in data1['stream']))]
|
||||||
for stream in data1['stream']:
|
for stream in data1['stream']:
|
||||||
fm = stream.get('stream_type')
|
fm = stream.get('stream_type')
|
||||||
video_urls = video_urls_dict[fm]
|
video_urls = video_urls_dict[fm]
|
||||||
|
|
Loading…
Reference in a new issue