2022-04-28 21:48:36 -04:00
|
|
|
import concurrent.futures
|
2022-04-17 16:58:28 -04:00
|
|
|
import contextlib
|
2021-12-20 01:06:46 -05:00
|
|
|
import http.client
|
|
|
|
import json
|
|
|
|
import math
|
2015-07-28 16:26:16 -04:00
|
|
|
import os
|
2022-06-24 04:10:17 -04:00
|
|
|
import struct
|
2015-07-28 16:26:16 -04:00
|
|
|
import time
|
2022-06-24 04:10:17 -04:00
|
|
|
import urllib.error
|
2015-07-28 16:26:16 -04:00
|
|
|
|
|
|
|
from .common import FileDownloader
|
|
|
|
from .http import HttpFD
|
2022-01-31 09:49:33 -05:00
|
|
|
from ..aes import aes_cbc_decrypt_bytes, unpad_pkcs7
|
2022-06-24 04:10:17 -04:00
|
|
|
from ..compat import compat_os_name
|
2015-07-28 16:26:16 -04:00
|
|
|
from ..utils import (
|
2021-06-21 14:59:50 -04:00
|
|
|
DownloadError,
|
2015-07-28 16:26:16 -04:00
|
|
|
encodeFilename,
|
2022-04-11 18:32:57 -04:00
|
|
|
error_to_compat_str,
|
2016-11-13 10:06:16 -05:00
|
|
|
sanitized_Request,
|
2022-02-24 12:00:46 -05:00
|
|
|
traverse_obj,
|
2015-07-28 16:26:16 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
class HttpQuietDownloader(HttpFD):
|
|
|
|
def to_screen(self, *args, **kargs):
|
|
|
|
pass
|
|
|
|
|
2022-06-21 18:17:41 -04:00
|
|
|
to_console_title = to_screen
|
2022-04-28 21:48:36 -04:00
|
|
|
|
2015-07-28 16:26:16 -04:00
|
|
|
|
|
|
|
class FragmentFD(FileDownloader):
|
|
|
|
"""
|
|
|
|
A base file downloader class for fragmented media (e.g. f4m/m3u8 manifests).
|
2016-03-19 10:54:21 -04:00
|
|
|
|
|
|
|
Available options:
|
|
|
|
|
2016-08-26 17:52:18 -04:00
|
|
|
fragment_retries: Number of times to retry a fragment for HTTP error (DASH
|
|
|
|
and hlsnative only)
|
|
|
|
skip_unavailable_fragments:
|
|
|
|
Skip unavailable fragments (DASH and hlsnative only)
|
2017-04-23 16:09:08 -04:00
|
|
|
keep_fragments: Keep downloaded fragments on disk after downloading is
|
|
|
|
finished
|
2021-11-09 17:42:25 -05:00
|
|
|
concurrent_fragment_downloads: The number of threads to use for native hls and dash downloads
|
2021-05-04 11:54:00 -04:00
|
|
|
_no_ytdl_file: Don't use .ytdl file
|
2017-04-24 12:50:20 -04:00
|
|
|
|
2021-02-24 13:45:56 -05:00
|
|
|
For each incomplete fragment download yt-dlp keeps on disk a special
|
2017-04-24 12:50:20 -04:00
|
|
|
bookkeeping file with download state and metadata (in future such files will
|
2021-02-24 13:45:56 -05:00
|
|
|
be used for any incomplete download handled by yt-dlp). This file is
|
2017-04-24 12:50:20 -04:00
|
|
|
used to properly handle resuming, check download file consistency and detect
|
|
|
|
potential errors. The file has a .ytdl extension and represents a standard
|
|
|
|
JSON file of the following format:
|
|
|
|
|
|
|
|
extractor:
|
|
|
|
Dictionary of extractor related data. TBD.
|
|
|
|
|
|
|
|
downloader:
|
|
|
|
Dictionary of downloader related data. May contain following data:
|
|
|
|
current_fragment:
|
|
|
|
Dictionary with current (being downloaded) fragment data:
|
2017-04-25 12:33:35 -04:00
|
|
|
index: 0-based index of current fragment among all fragments
|
2017-04-24 12:50:20 -04:00
|
|
|
fragment_count:
|
|
|
|
Total count of fragments
|
2017-04-30 11:04:01 -04:00
|
|
|
|
2017-04-25 12:33:35 -04:00
|
|
|
This feature is experimental and file format may change in future.
|
2015-07-28 16:26:16 -04:00
|
|
|
"""
|
|
|
|
|
2016-06-28 13:07:50 -04:00
|
|
|
def report_retry_fragment(self, err, frag_index, count, retries):
|
2016-03-19 10:41:24 -04:00
|
|
|
self.to_screen(
|
2021-06-21 14:59:50 -04:00
|
|
|
'\r[download] Got server HTTP error: %s. Retrying fragment %d (attempt %d of %s) ...'
|
2016-06-28 13:07:50 -04:00
|
|
|
% (error_to_compat_str(err), frag_index, count, self.format_retries(retries)))
|
2022-05-19 10:30:31 -04:00
|
|
|
self.sleep_retry('fragment', count)
|
2016-03-19 10:41:24 -04:00
|
|
|
|
2021-10-19 12:21:33 -04:00
|
|
|
def report_skip_fragment(self, frag_index, err=None):
|
|
|
|
err = f' {err};' if err else ''
|
|
|
|
self.to_screen(f'[download]{err} Skipping fragment {frag_index:d} ...')
|
2016-08-26 17:52:18 -04:00
|
|
|
|
2016-11-13 10:06:16 -05:00
|
|
|
def _prepare_url(self, info_dict, url):
|
|
|
|
headers = info_dict.get('http_headers')
|
|
|
|
return sanitized_Request(url, None, headers) if headers else url
|
|
|
|
|
2021-07-21 13:28:43 -04:00
|
|
|
def _prepare_and_start_frag_download(self, ctx, info_dict):
|
2015-07-28 16:26:16 -04:00
|
|
|
self._prepare_frag_download(ctx)
|
2021-07-21 13:28:43 -04:00
|
|
|
self._start_frag_download(ctx, info_dict)
|
2015-07-28 16:26:16 -04:00
|
|
|
|
2021-05-04 11:54:00 -04:00
|
|
|
def __do_ytdl_file(self, ctx):
|
2021-12-20 01:06:46 -05:00
|
|
|
return ctx['live'] is not True and ctx['tmpfilename'] != '-' and not self.params.get('_no_ytdl_file')
|
2017-04-24 12:05:56 -04:00
|
|
|
|
2017-04-23 15:54:17 -04:00
|
|
|
def _read_ytdl_file(self, ctx):
|
2018-04-28 13:33:31 -04:00
|
|
|
assert 'ytdl_corrupt' not in ctx
|
2021-12-22 21:29:03 -05:00
|
|
|
stream, _ = self.sanitize_open(self.ytdl_filename(ctx['filename']), 'r')
|
2018-04-28 13:33:31 -04:00
|
|
|
try:
|
2021-04-23 03:11:28 -04:00
|
|
|
ytdl_data = json.loads(stream.read())
|
|
|
|
ctx['fragment_index'] = ytdl_data['downloader']['current_fragment']['index']
|
|
|
|
if 'extra_state' in ytdl_data['downloader']:
|
|
|
|
ctx['extra_state'] = ytdl_data['downloader']['extra_state']
|
2018-04-28 13:33:31 -04:00
|
|
|
except Exception:
|
|
|
|
ctx['ytdl_corrupt'] = True
|
|
|
|
finally:
|
|
|
|
stream.close()
|
2017-04-23 15:54:17 -04:00
|
|
|
|
|
|
|
def _write_ytdl_file(self, ctx):
|
2021-12-22 21:29:03 -05:00
|
|
|
frag_index_stream, _ = self.sanitize_open(self.ytdl_filename(ctx['filename']), 'w')
|
2021-08-09 08:10:24 -04:00
|
|
|
try:
|
|
|
|
downloader = {
|
|
|
|
'current_fragment': {
|
|
|
|
'index': ctx['fragment_index'],
|
|
|
|
},
|
|
|
|
}
|
|
|
|
if 'extra_state' in ctx:
|
|
|
|
downloader['extra_state'] = ctx['extra_state']
|
|
|
|
if ctx.get('fragment_count') is not None:
|
|
|
|
downloader['fragment_count'] = ctx['fragment_count']
|
|
|
|
frag_index_stream.write(json.dumps({'downloader': downloader}))
|
|
|
|
finally:
|
|
|
|
frag_index_stream.close()
|
2017-04-23 15:54:17 -04:00
|
|
|
|
2021-02-15 04:57:21 -05:00
|
|
|
def _download_fragment(self, ctx, frag_url, info_dict, headers=None, request_data=None):
|
2017-04-23 15:54:17 -04:00
|
|
|
fragment_filename = '%s-Frag%d' % (ctx['tmpfilename'], ctx['fragment_index'])
|
2020-11-26 12:27:34 -05:00
|
|
|
fragment_info_dict = {
|
2016-06-28 13:07:50 -04:00
|
|
|
'url': frag_url,
|
|
|
|
'http_headers': headers or info_dict.get('http_headers'),
|
2021-02-15 04:57:21 -05:00
|
|
|
'request_data': request_data,
|
2021-09-22 10:12:04 -04:00
|
|
|
'ctx_id': ctx.get('ctx_id'),
|
2020-11-26 12:27:34 -05:00
|
|
|
}
|
2022-04-29 12:02:31 -04:00
|
|
|
success, _ = ctx['dl'].download(fragment_filename, fragment_info_dict)
|
2016-06-28 13:07:50 -04:00
|
|
|
if not success:
|
2022-03-14 23:27:41 -04:00
|
|
|
return False
|
2020-11-26 12:27:34 -05:00
|
|
|
if fragment_info_dict.get('filetime'):
|
|
|
|
ctx['fragment_filetime'] = fragment_info_dict.get('filetime')
|
2021-06-21 14:59:50 -04:00
|
|
|
ctx['fragment_filename_sanitized'] = fragment_filename
|
2022-03-14 23:27:41 -04:00
|
|
|
return True
|
2021-06-21 14:59:50 -04:00
|
|
|
|
|
|
|
def _read_fragment(self, ctx):
|
2022-04-15 14:13:07 -04:00
|
|
|
if not ctx.get('fragment_filename_sanitized'):
|
|
|
|
return None
|
2022-03-14 23:27:41 -04:00
|
|
|
try:
|
|
|
|
down, frag_sanitized = self.sanitize_open(ctx['fragment_filename_sanitized'], 'rb')
|
|
|
|
except FileNotFoundError:
|
|
|
|
if ctx.get('live'):
|
|
|
|
return None
|
|
|
|
raise
|
2017-04-23 15:54:17 -04:00
|
|
|
ctx['fragment_filename_sanitized'] = frag_sanitized
|
|
|
|
frag_content = down.read()
|
2016-06-28 13:07:50 -04:00
|
|
|
down.close()
|
2021-06-21 14:59:50 -04:00
|
|
|
return frag_content
|
2016-06-28 13:07:50 -04:00
|
|
|
|
|
|
|
def _append_fragment(self, ctx, frag_content):
|
2017-04-23 15:54:17 -04:00
|
|
|
try:
|
|
|
|
ctx['dest_stream'].write(frag_content)
|
2017-12-02 09:15:45 -05:00
|
|
|
ctx['dest_stream'].flush()
|
2017-04-23 15:54:17 -04:00
|
|
|
finally:
|
2017-04-24 12:05:56 -04:00
|
|
|
if self.__do_ytdl_file(ctx):
|
2017-04-23 15:54:17 -04:00
|
|
|
self._write_ytdl_file(ctx)
|
2017-04-23 16:09:08 -04:00
|
|
|
if not self.params.get('keep_fragments', False):
|
2022-03-03 09:33:32 -05:00
|
|
|
self.try_remove(encodeFilename(ctx['fragment_filename_sanitized']))
|
2017-04-23 15:54:17 -04:00
|
|
|
del ctx['fragment_filename_sanitized']
|
2016-06-28 13:07:50 -04:00
|
|
|
|
2015-07-28 16:26:16 -04:00
|
|
|
def _prepare_frag_download(self, ctx):
|
2016-01-30 08:20:52 -05:00
|
|
|
if 'live' not in ctx:
|
|
|
|
ctx['live'] = False
|
2017-10-14 19:13:07 -04:00
|
|
|
if not ctx['live']:
|
|
|
|
total_frags_str = '%d' % ctx['total_frags']
|
|
|
|
ad_frags = ctx.get('ad_frags', 0)
|
|
|
|
if ad_frags:
|
|
|
|
total_frags_str += ' (not including %d ad)' % ad_frags
|
|
|
|
else:
|
|
|
|
total_frags_str = 'unknown (live)'
|
2022-04-11 11:10:28 -04:00
|
|
|
self.to_screen(f'[{self.FD_NAME}] Total fragments: {total_frags_str}')
|
2015-07-28 16:26:16 -04:00
|
|
|
self.report_destination(ctx['filename'])
|
2022-05-20 11:19:30 -04:00
|
|
|
dl = HttpQuietDownloader(self.ydl, {
|
|
|
|
**self.params,
|
|
|
|
'noprogress': True,
|
|
|
|
'test': False,
|
|
|
|
})
|
2015-07-28 16:26:16 -04:00
|
|
|
tmpfilename = self.temp_name(ctx['filename'])
|
2016-06-28 13:07:50 -04:00
|
|
|
open_mode = 'wb'
|
|
|
|
resume_len = 0
|
2017-04-23 15:54:17 -04:00
|
|
|
|
2016-06-28 13:07:50 -04:00
|
|
|
# Establish possible resume length
|
|
|
|
if os.path.isfile(encodeFilename(tmpfilename)):
|
|
|
|
open_mode = 'ab'
|
|
|
|
resume_len = os.path.getsize(encodeFilename(tmpfilename))
|
2017-04-23 15:54:17 -04:00
|
|
|
|
2017-04-24 12:05:56 -04:00
|
|
|
# Should be initialized before ytdl file check
|
|
|
|
ctx.update({
|
|
|
|
'tmpfilename': tmpfilename,
|
|
|
|
'fragment_index': 0,
|
|
|
|
})
|
2017-04-23 15:54:17 -04:00
|
|
|
|
2017-04-24 12:05:56 -04:00
|
|
|
if self.__do_ytdl_file(ctx):
|
|
|
|
if os.path.isfile(encodeFilename(self.ytdl_filename(ctx['filename']))):
|
|
|
|
self._read_ytdl_file(ctx)
|
2018-04-28 13:33:31 -04:00
|
|
|
is_corrupt = ctx.get('ytdl_corrupt') is True
|
|
|
|
is_inconsistent = ctx['fragment_index'] > 0 and resume_len == 0
|
|
|
|
if is_corrupt or is_inconsistent:
|
|
|
|
message = (
|
|
|
|
'.ytdl file is corrupt' if is_corrupt else
|
|
|
|
'Inconsistent state of incomplete fragment download')
|
2017-10-17 11:53:34 -04:00
|
|
|
self.report_warning(
|
2021-02-28 19:09:50 -05:00
|
|
|
'%s. Restarting from the beginning ...' % message)
|
2017-09-14 12:19:53 -04:00
|
|
|
ctx['fragment_index'] = resume_len = 0
|
2018-04-28 13:33:31 -04:00
|
|
|
if 'ytdl_corrupt' in ctx:
|
|
|
|
del ctx['ytdl_corrupt']
|
2017-09-14 12:19:53 -04:00
|
|
|
self._write_ytdl_file(ctx)
|
2017-04-24 12:05:56 -04:00
|
|
|
else:
|
|
|
|
self._write_ytdl_file(ctx)
|
2017-09-14 12:19:53 -04:00
|
|
|
assert ctx['fragment_index'] == 0
|
2017-04-23 15:54:17 -04:00
|
|
|
|
2021-12-22 21:29:03 -05:00
|
|
|
dest_stream, tmpfilename = self.sanitize_open(tmpfilename, open_mode)
|
2016-06-28 13:07:50 -04:00
|
|
|
|
2015-07-28 16:26:16 -04:00
|
|
|
ctx.update({
|
|
|
|
'dl': dl,
|
|
|
|
'dest_stream': dest_stream,
|
|
|
|
'tmpfilename': tmpfilename,
|
2016-06-28 13:07:50 -04:00
|
|
|
# Total complete fragments downloaded so far in bytes
|
|
|
|
'complete_frags_downloaded_bytes': resume_len,
|
2015-07-28 16:26:16 -04:00
|
|
|
})
|
|
|
|
|
2021-07-21 13:28:43 -04:00
|
|
|
def _start_frag_download(self, ctx, info_dict):
|
2019-08-10 19:57:43 -04:00
|
|
|
resume_len = ctx['complete_frags_downloaded_bytes']
|
2015-07-28 16:26:16 -04:00
|
|
|
total_frags = ctx['total_frags']
|
2021-09-22 10:12:04 -04:00
|
|
|
ctx_id = ctx.get('ctx_id')
|
2015-07-28 16:26:16 -04:00
|
|
|
# This dict stores the download progress, it's updated by the progress
|
|
|
|
# hook
|
|
|
|
state = {
|
|
|
|
'status': 'downloading',
|
2019-08-10 19:57:43 -04:00
|
|
|
'downloaded_bytes': resume_len,
|
2017-04-22 11:42:24 -04:00
|
|
|
'fragment_index': ctx['fragment_index'],
|
|
|
|
'fragment_count': total_frags,
|
2015-07-28 16:26:16 -04:00
|
|
|
'filename': ctx['filename'],
|
|
|
|
'tmpfilename': ctx['tmpfilename'],
|
2016-01-12 13:00:31 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
start = time.time()
|
|
|
|
ctx.update({
|
|
|
|
'started': start,
|
2021-11-03 06:58:45 -04:00
|
|
|
'fragment_started': start,
|
2016-01-12 12:18:38 -05:00
|
|
|
# Amount of fragment's bytes downloaded by the time of the previous
|
|
|
|
# frag progress hook invocation
|
2016-01-12 13:00:31 -05:00
|
|
|
'prev_frag_downloaded_bytes': 0,
|
|
|
|
})
|
2015-07-28 16:26:16 -04:00
|
|
|
|
|
|
|
def frag_progress_hook(s):
|
|
|
|
if s['status'] not in ('downloading', 'finished'):
|
|
|
|
return
|
|
|
|
|
2022-06-07 08:44:08 -04:00
|
|
|
if not total_frags and ctx.get('fragment_count'):
|
|
|
|
state['fragment_count'] = ctx['fragment_count']
|
|
|
|
|
2021-09-22 10:12:04 -04:00
|
|
|
if ctx_id is not None and s.get('ctx_id') != ctx_id:
|
|
|
|
return
|
|
|
|
|
|
|
|
state['max_progress'] = ctx.get('max_progress')
|
|
|
|
state['progress_idx'] = ctx.get('progress_idx')
|
|
|
|
|
2016-01-30 08:20:52 -05:00
|
|
|
time_now = time.time()
|
2016-01-30 08:30:31 -05:00
|
|
|
state['elapsed'] = time_now - start
|
2016-01-10 08:32:53 -05:00
|
|
|
frag_total_bytes = s.get('total_bytes') or 0
|
2021-07-21 13:28:43 -04:00
|
|
|
s['fragment_info_dict'] = s.pop('info_dict', {})
|
2016-01-30 08:20:52 -05:00
|
|
|
if not ctx['live']:
|
|
|
|
estimated_size = (
|
2019-05-10 16:56:22 -04:00
|
|
|
(ctx['complete_frags_downloaded_bytes'] + frag_total_bytes)
|
|
|
|
/ (state['fragment_index'] + 1) * total_frags)
|
2016-01-30 08:20:52 -05:00
|
|
|
state['total_bytes_estimate'] = estimated_size
|
2015-07-28 16:26:16 -04:00
|
|
|
|
2016-01-12 12:18:38 -05:00
|
|
|
if s['status'] == 'finished':
|
2017-04-22 11:42:24 -04:00
|
|
|
state['fragment_index'] += 1
|
|
|
|
ctx['fragment_index'] = state['fragment_index']
|
2016-01-12 13:00:31 -05:00
|
|
|
state['downloaded_bytes'] += frag_total_bytes - ctx['prev_frag_downloaded_bytes']
|
|
|
|
ctx['complete_frags_downloaded_bytes'] = state['downloaded_bytes']
|
2021-11-03 06:58:45 -04:00
|
|
|
ctx['speed'] = state['speed'] = self.calc_speed(
|
|
|
|
ctx['fragment_started'], time_now, frag_total_bytes)
|
|
|
|
ctx['fragment_started'] = time.time()
|
2016-01-12 13:00:31 -05:00
|
|
|
ctx['prev_frag_downloaded_bytes'] = 0
|
2016-01-12 12:18:38 -05:00
|
|
|
else:
|
|
|
|
frag_downloaded_bytes = s['downloaded_bytes']
|
2016-01-12 13:00:31 -05:00
|
|
|
state['downloaded_bytes'] += frag_downloaded_bytes - ctx['prev_frag_downloaded_bytes']
|
2016-01-30 08:20:52 -05:00
|
|
|
if not ctx['live']:
|
|
|
|
state['eta'] = self.calc_eta(
|
2019-08-10 19:57:43 -04:00
|
|
|
start, time_now, estimated_size - resume_len,
|
|
|
|
state['downloaded_bytes'] - resume_len)
|
2021-11-03 06:58:45 -04:00
|
|
|
ctx['speed'] = state['speed'] = self.calc_speed(
|
|
|
|
ctx['fragment_started'], time_now, frag_downloaded_bytes)
|
2016-01-12 13:00:31 -05:00
|
|
|
ctx['prev_frag_downloaded_bytes'] = frag_downloaded_bytes
|
2021-07-21 13:28:43 -04:00
|
|
|
self._hook_progress(state, info_dict)
|
2015-07-28 16:26:16 -04:00
|
|
|
|
|
|
|
ctx['dl'].add_progress_hook(frag_progress_hook)
|
|
|
|
|
|
|
|
return start
|
|
|
|
|
2021-07-21 13:28:43 -04:00
|
|
|
def _finish_frag_download(self, ctx, info_dict):
|
2015-07-28 16:26:16 -04:00
|
|
|
ctx['dest_stream'].close()
|
2017-04-24 12:05:56 -04:00
|
|
|
if self.__do_ytdl_file(ctx):
|
|
|
|
ytdl_filename = encodeFilename(self.ytdl_filename(ctx['filename']))
|
|
|
|
if os.path.isfile(ytdl_filename):
|
2022-03-03 09:33:32 -05:00
|
|
|
self.try_remove(ytdl_filename)
|
2015-07-28 16:26:16 -04:00
|
|
|
elapsed = time.time() - ctx['started']
|
2018-03-24 04:59:48 -04:00
|
|
|
|
|
|
|
if ctx['tmpfilename'] == '-':
|
|
|
|
downloaded_bytes = ctx['complete_frags_downloaded_bytes']
|
|
|
|
else:
|
|
|
|
self.try_rename(ctx['tmpfilename'], ctx['filename'])
|
2020-11-26 12:27:34 -05:00
|
|
|
if self.params.get('updatetime', True):
|
|
|
|
filetime = ctx.get('fragment_filetime')
|
|
|
|
if filetime:
|
2022-04-17 16:58:28 -04:00
|
|
|
with contextlib.suppress(Exception):
|
2020-11-26 12:27:34 -05:00
|
|
|
os.utime(ctx['filename'], (time.time(), filetime))
|
2018-03-24 04:59:48 -04:00
|
|
|
downloaded_bytes = os.path.getsize(encodeFilename(ctx['filename']))
|
2015-07-28 16:26:16 -04:00
|
|
|
|
|
|
|
self._hook_progress({
|
2018-03-24 04:59:48 -04:00
|
|
|
'downloaded_bytes': downloaded_bytes,
|
|
|
|
'total_bytes': downloaded_bytes,
|
2015-07-28 16:26:16 -04:00
|
|
|
'filename': ctx['filename'],
|
|
|
|
'status': 'finished',
|
|
|
|
'elapsed': elapsed,
|
2021-09-22 10:12:04 -04:00
|
|
|
'ctx_id': ctx.get('ctx_id'),
|
|
|
|
'max_progress': ctx.get('max_progress'),
|
|
|
|
'progress_idx': ctx.get('progress_idx'),
|
2021-07-21 13:28:43 -04:00
|
|
|
}, info_dict)
|
2021-02-08 11:46:01 -05:00
|
|
|
|
|
|
|
def _prepare_external_frag_download(self, ctx):
|
|
|
|
if 'live' not in ctx:
|
|
|
|
ctx['live'] = False
|
|
|
|
if not ctx['live']:
|
|
|
|
total_frags_str = '%d' % ctx['total_frags']
|
|
|
|
ad_frags = ctx.get('ad_frags', 0)
|
|
|
|
if ad_frags:
|
|
|
|
total_frags_str += ' (not including %d ad)' % ad_frags
|
|
|
|
else:
|
|
|
|
total_frags_str = 'unknown (live)'
|
2022-04-11 11:10:28 -04:00
|
|
|
self.to_screen(f'[{self.FD_NAME}] Total fragments: {total_frags_str}')
|
2021-02-08 11:46:01 -05:00
|
|
|
|
|
|
|
tmpfilename = self.temp_name(ctx['filename'])
|
|
|
|
|
|
|
|
# Should be initialized before ytdl file check
|
|
|
|
ctx.update({
|
|
|
|
'tmpfilename': tmpfilename,
|
|
|
|
'fragment_index': 0,
|
|
|
|
})
|
2021-06-21 14:59:50 -04:00
|
|
|
|
2021-09-21 19:57:07 -04:00
|
|
|
def decrypter(self, info_dict):
|
|
|
|
_key_cache = {}
|
|
|
|
|
|
|
|
def _get_key(url):
|
|
|
|
if url not in _key_cache:
|
|
|
|
_key_cache[url] = self.ydl.urlopen(self._prepare_url(info_dict, url)).read()
|
|
|
|
return _key_cache[url]
|
|
|
|
|
|
|
|
def decrypt_fragment(fragment, frag_content):
|
|
|
|
decrypt_info = fragment.get('decrypt_info')
|
|
|
|
if not decrypt_info or decrypt_info['METHOD'] != 'AES-128':
|
|
|
|
return frag_content
|
2022-06-24 04:10:17 -04:00
|
|
|
iv = decrypt_info.get('IV') or struct.pack('>8xq', fragment['media_sequence'])
|
2021-09-21 19:57:07 -04:00
|
|
|
decrypt_info['KEY'] = decrypt_info.get('KEY') or _get_key(info_dict.get('_decryption_key_url') or decrypt_info['URI'])
|
|
|
|
# Don't decrypt the content in tests since the data is explicitly truncated and it's not to a valid block
|
|
|
|
# size (see https://github.com/ytdl-org/youtube-dl/pull/27660). Tests only care that the correct data downloaded,
|
|
|
|
# not what it decrypts to.
|
|
|
|
if self.params.get('test', False):
|
|
|
|
return frag_content
|
2022-01-31 09:49:33 -05:00
|
|
|
return unpad_pkcs7(aes_cbc_decrypt_bytes(frag_content, decrypt_info['KEY'], iv))
|
2021-09-21 19:57:07 -04:00
|
|
|
|
|
|
|
return decrypt_fragment
|
|
|
|
|
2021-09-22 10:12:04 -04:00
|
|
|
def download_and_append_fragments_multiple(self, *args, pack_func=None, finish_func=None):
|
|
|
|
'''
|
|
|
|
@params (ctx1, fragments1, info_dict1), (ctx2, fragments2, info_dict2), ...
|
|
|
|
all args must be either tuple or list
|
|
|
|
'''
|
2021-12-20 01:06:46 -05:00
|
|
|
interrupt_trigger = [True]
|
2021-09-22 10:12:04 -04:00
|
|
|
max_progress = len(args)
|
|
|
|
if max_progress == 1:
|
|
|
|
return self.download_and_append_fragments(*args[0], pack_func=pack_func, finish_func=finish_func)
|
2021-12-20 01:06:46 -05:00
|
|
|
max_workers = self.params.get('concurrent_fragment_downloads', 1)
|
2021-10-22 16:37:20 -04:00
|
|
|
if max_progress > 1:
|
|
|
|
self._prepare_multiline_status(max_progress)
|
2022-02-24 12:00:46 -05:00
|
|
|
is_live = any(traverse_obj(args, (..., 2, 'is_live'), default=[]))
|
2021-09-22 10:12:04 -04:00
|
|
|
|
|
|
|
def thread_func(idx, ctx, fragments, info_dict, tpe):
|
|
|
|
ctx['max_progress'] = max_progress
|
|
|
|
ctx['progress_idx'] = idx
|
2021-12-20 01:06:46 -05:00
|
|
|
return self.download_and_append_fragments(
|
|
|
|
ctx, fragments, info_dict, pack_func=pack_func, finish_func=finish_func,
|
|
|
|
tpe=tpe, interrupt_trigger=interrupt_trigger)
|
2021-09-22 10:12:04 -04:00
|
|
|
|
|
|
|
class FTPE(concurrent.futures.ThreadPoolExecutor):
|
|
|
|
# has to stop this or it's going to wait on the worker thread itself
|
|
|
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
|
|
pass
|
|
|
|
|
2021-12-20 01:06:46 -05:00
|
|
|
if compat_os_name == 'nt':
|
2022-03-26 22:20:43 -04:00
|
|
|
def future_result(future):
|
2022-02-24 12:00:46 -05:00
|
|
|
while True:
|
|
|
|
try:
|
|
|
|
return future.result(0.1)
|
|
|
|
except KeyboardInterrupt:
|
|
|
|
raise
|
|
|
|
except concurrent.futures.TimeoutError:
|
|
|
|
continue
|
|
|
|
else:
|
2022-03-26 22:20:43 -04:00
|
|
|
def future_result(future):
|
2022-02-24 12:00:46 -05:00
|
|
|
return future.result()
|
|
|
|
|
2022-02-24 23:22:17 -05:00
|
|
|
def interrupt_trigger_iter(fg):
|
|
|
|
for f in fg:
|
|
|
|
if not interrupt_trigger[0]:
|
|
|
|
break
|
|
|
|
yield f
|
|
|
|
|
2022-02-24 12:00:46 -05:00
|
|
|
spins = []
|
2021-09-22 10:12:04 -04:00
|
|
|
for idx, (ctx, fragments, info_dict) in enumerate(args):
|
2021-12-20 01:06:46 -05:00
|
|
|
tpe = FTPE(math.ceil(max_workers / max_progress))
|
2022-02-24 23:22:17 -05:00
|
|
|
job = tpe.submit(thread_func, idx, ctx, interrupt_trigger_iter(fragments), info_dict, tpe)
|
2021-09-22 10:12:04 -04:00
|
|
|
spins.append((tpe, job))
|
|
|
|
|
|
|
|
result = True
|
|
|
|
for tpe, job in spins:
|
|
|
|
try:
|
2022-03-26 22:20:43 -04:00
|
|
|
result = result and future_result(job)
|
2021-12-20 01:06:46 -05:00
|
|
|
except KeyboardInterrupt:
|
|
|
|
interrupt_trigger[0] = False
|
2021-09-22 10:12:04 -04:00
|
|
|
finally:
|
|
|
|
tpe.shutdown(wait=True)
|
2022-02-24 12:00:46 -05:00
|
|
|
if not interrupt_trigger[0] and not is_live:
|
2021-12-20 01:06:46 -05:00
|
|
|
raise KeyboardInterrupt()
|
2022-02-24 12:00:46 -05:00
|
|
|
# we expect the user wants to stop and DO WANT the preceding postprocessors to run;
|
|
|
|
# so returning a intermediate result here instead of KeyboardInterrupt on live
|
2021-10-08 15:11:59 -04:00
|
|
|
return result
|
2021-09-22 10:12:04 -04:00
|
|
|
|
2021-12-20 01:06:46 -05:00
|
|
|
def download_and_append_fragments(
|
|
|
|
self, ctx, fragments, info_dict, *, pack_func=None, finish_func=None,
|
|
|
|
tpe=None, interrupt_trigger=None):
|
|
|
|
if not interrupt_trigger:
|
|
|
|
interrupt_trigger = (True, )
|
|
|
|
|
2021-06-21 14:59:50 -04:00
|
|
|
fragment_retries = self.params.get('fragment_retries', 0)
|
2021-12-20 01:06:46 -05:00
|
|
|
is_fatal = (
|
|
|
|
((lambda _: False) if info_dict.get('is_live') else (lambda idx: idx == 0))
|
|
|
|
if self.params.get('skip_unavailable_fragments', True) else (lambda _: True))
|
|
|
|
|
2021-06-21 14:59:50 -04:00
|
|
|
if not pack_func:
|
|
|
|
pack_func = lambda frag_content, _: frag_content
|
|
|
|
|
|
|
|
def download_fragment(fragment, ctx):
|
2022-02-24 12:00:46 -05:00
|
|
|
if not interrupt_trigger[0]:
|
2022-03-14 23:27:41 -04:00
|
|
|
return
|
2022-02-24 12:00:46 -05:00
|
|
|
|
2021-06-21 14:59:50 -04:00
|
|
|
frag_index = ctx['fragment_index'] = fragment['frag_index']
|
2021-12-26 05:19:35 -05:00
|
|
|
ctx['last_error'] = None
|
2021-09-21 18:46:50 -04:00
|
|
|
headers = info_dict.get('http_headers', {}).copy()
|
2021-06-21 14:59:50 -04:00
|
|
|
byte_range = fragment.get('byte_range')
|
|
|
|
if byte_range:
|
|
|
|
headers['Range'] = 'bytes=%d-%d' % (byte_range['start'], byte_range['end'] - 1)
|
|
|
|
|
|
|
|
# Never skip the first fragment
|
2022-03-14 23:27:41 -04:00
|
|
|
fatal, count = is_fatal(fragment.get('index') or (frag_index - 1)), 0
|
2021-06-21 14:59:50 -04:00
|
|
|
while count <= fragment_retries:
|
|
|
|
try:
|
2022-06-07 08:44:08 -04:00
|
|
|
ctx['fragment_count'] = fragment.get('fragment_count')
|
2022-03-14 23:27:41 -04:00
|
|
|
if self._download_fragment(ctx, fragment['url'], info_dict, headers):
|
|
|
|
break
|
|
|
|
return
|
2022-06-24 04:10:17 -04:00
|
|
|
except (urllib.error.HTTPError, http.client.IncompleteRead) as err:
|
2021-06-21 14:59:50 -04:00
|
|
|
# Unavailable (possibly temporary) fragments may be served.
|
|
|
|
# First we try to retry then either skip or abort.
|
|
|
|
# See https://github.com/ytdl-org/youtube-dl/issues/10165,
|
|
|
|
# https://github.com/ytdl-org/youtube-dl/issues/10448).
|
|
|
|
count += 1
|
2021-12-26 05:19:35 -05:00
|
|
|
ctx['last_error'] = err
|
2021-06-21 14:59:50 -04:00
|
|
|
if count <= fragment_retries:
|
|
|
|
self.report_retry_fragment(err, frag_index, count, fragment_retries)
|
|
|
|
except DownloadError:
|
|
|
|
# Don't retry fragment if error occurred during HTTP downloading
|
|
|
|
# itself since it has own retry settings
|
|
|
|
if not fatal:
|
|
|
|
break
|
|
|
|
raise
|
|
|
|
|
2022-03-14 23:27:41 -04:00
|
|
|
if count > fragment_retries and fatal:
|
2021-06-21 14:59:50 -04:00
|
|
|
ctx['dest_stream'].close()
|
|
|
|
self.report_error('Giving up after %s fragment retries' % fragment_retries)
|
|
|
|
|
|
|
|
def append_fragment(frag_content, frag_index, ctx):
|
2022-03-26 22:20:43 -04:00
|
|
|
if frag_content:
|
|
|
|
self._append_fragment(ctx, pack_func(frag_content, frag_index))
|
|
|
|
elif not is_fatal(frag_index - 1):
|
|
|
|
self.report_skip_fragment(frag_index, 'fragment not found')
|
|
|
|
else:
|
|
|
|
ctx['dest_stream'].close()
|
|
|
|
self.report_error(f'fragment {frag_index} not found, unable to continue')
|
|
|
|
return False
|
2021-06-21 14:59:50 -04:00
|
|
|
return True
|
|
|
|
|
2021-09-21 19:57:07 -04:00
|
|
|
decrypt_fragment = self.decrypter(info_dict)
|
|
|
|
|
2021-12-20 01:06:46 -05:00
|
|
|
max_workers = math.ceil(
|
|
|
|
self.params.get('concurrent_fragment_downloads', 1) / ctx.get('max_progress', 1))
|
2022-04-28 21:48:36 -04:00
|
|
|
if max_workers > 1:
|
2021-06-21 14:59:50 -04:00
|
|
|
def _download_fragment(fragment):
|
2021-07-06 16:25:54 -04:00
|
|
|
ctx_copy = ctx.copy()
|
2022-03-14 23:27:41 -04:00
|
|
|
download_fragment(fragment, ctx_copy)
|
|
|
|
return fragment, fragment['frag_index'], ctx_copy.get('fragment_filename_sanitized')
|
2021-06-21 14:59:50 -04:00
|
|
|
|
|
|
|
self.report_warning('The download speed shown is only of one thread. This is a known issue and patches are welcome')
|
2021-09-22 10:12:04 -04:00
|
|
|
with tpe or concurrent.futures.ThreadPoolExecutor(max_workers) as pool:
|
2022-06-03 11:59:03 -04:00
|
|
|
try:
|
|
|
|
for fragment, frag_index, frag_filename in pool.map(_download_fragment, fragments):
|
|
|
|
ctx.update({
|
|
|
|
'fragment_filename_sanitized': frag_filename,
|
|
|
|
'fragment_index': frag_index,
|
|
|
|
})
|
|
|
|
if not append_fragment(decrypt_fragment(fragment, self._read_fragment(ctx)), frag_index, ctx):
|
|
|
|
return False
|
|
|
|
except KeyboardInterrupt:
|
|
|
|
self._finish_multiline_status()
|
|
|
|
self.report_error(
|
|
|
|
'Interrupted by user. Waiting for all threads to shutdown...', is_error=False, tb=False)
|
|
|
|
pool.shutdown(wait=False)
|
|
|
|
raise
|
2021-06-21 14:59:50 -04:00
|
|
|
else:
|
|
|
|
for fragment in fragments:
|
2021-12-20 01:06:46 -05:00
|
|
|
if not interrupt_trigger[0]:
|
|
|
|
break
|
2022-04-16 08:11:09 -04:00
|
|
|
try:
|
|
|
|
download_fragment(fragment, ctx)
|
2022-04-17 16:58:28 -04:00
|
|
|
result = append_fragment(
|
|
|
|
decrypt_fragment(fragment, self._read_fragment(ctx)), fragment['frag_index'], ctx)
|
2022-04-16 08:11:09 -04:00
|
|
|
except KeyboardInterrupt:
|
|
|
|
if info_dict.get('is_live'):
|
|
|
|
break
|
|
|
|
raise
|
2021-06-21 14:59:50 -04:00
|
|
|
if not result:
|
|
|
|
return False
|
|
|
|
|
2021-08-09 16:22:30 -04:00
|
|
|
if finish_func is not None:
|
|
|
|
ctx['dest_stream'].write(finish_func())
|
|
|
|
ctx['dest_stream'].flush()
|
2021-07-21 13:28:43 -04:00
|
|
|
self._finish_frag_download(ctx, info_dict)
|
2021-06-24 08:24:05 -04:00
|
|
|
return True
|