mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-07 20:30:41 -05:00
[ie] Do not test truth value of xml.etree.ElementTree.Element
(#8582)
Testing the truthiness of an `xml.etree.ElementTree.Element` instance is deprecated in py3.12 Authored by: bashonly
This commit is contained in:
parent
87264d4fda
commit
d4f14a72dc
5 changed files with 21 additions and 12 deletions
|
@ -1,8 +1,9 @@
|
||||||
import re
|
|
||||||
import json
|
|
||||||
import base64
|
import base64
|
||||||
|
import json
|
||||||
|
import re
|
||||||
import time
|
import time
|
||||||
import urllib.parse
|
import urllib.parse
|
||||||
|
import xml.etree.ElementTree
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
|
@ -387,7 +388,7 @@ def _find_secret_formats(self, formats, video_id):
|
||||||
url = re.sub(r'(Manifest\(.*?),format=[\w-]+(.*?\))', r'\1\2', base_url)
|
url = re.sub(r'(Manifest\(.*?),format=[\w-]+(.*?\))', r'\1\2', base_url)
|
||||||
|
|
||||||
secret_xml = self._download_xml(url, video_id, note='Downloading secret XML', fatal=False)
|
secret_xml = self._download_xml(url, video_id, note='Downloading secret XML', fatal=False)
|
||||||
if not secret_xml:
|
if not isinstance(secret_xml, xml.etree.ElementTree.Element):
|
||||||
return
|
return
|
||||||
|
|
||||||
for child in secret_xml:
|
for child in secret_xml:
|
||||||
|
|
|
@ -2225,7 +2225,9 @@ def _extract_mpd_vod_duration(
|
||||||
mpd_url, video_id,
|
mpd_url, video_id,
|
||||||
note='Downloading MPD VOD manifest' if note is None else note,
|
note='Downloading MPD VOD manifest' if note is None else note,
|
||||||
errnote='Failed to download VOD manifest' if errnote is None else errnote,
|
errnote='Failed to download VOD manifest' if errnote is None else errnote,
|
||||||
fatal=False, data=data, headers=headers, query=query) or {}
|
fatal=False, data=data, headers=headers, query=query)
|
||||||
|
if not isinstance(mpd_doc, xml.etree.ElementTree.Element):
|
||||||
|
return None
|
||||||
return int_or_none(parse_duration(mpd_doc.get('mediaPresentationDuration')))
|
return int_or_none(parse_duration(mpd_doc.get('mediaPresentationDuration')))
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
import re
|
import re
|
||||||
|
import xml.etree.ElementTree
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_str
|
from ..compat import compat_str
|
||||||
|
@ -137,7 +138,7 @@ def _get_video_info(self, itemdoc, use_hls=True):
|
||||||
mediagen_doc = self._download_xml(
|
mediagen_doc = self._download_xml(
|
||||||
mediagen_url, video_id, 'Downloading video urls', fatal=False)
|
mediagen_url, video_id, 'Downloading video urls', fatal=False)
|
||||||
|
|
||||||
if mediagen_doc is False:
|
if not isinstance(mediagen_doc, xml.etree.ElementTree.Element):
|
||||||
return None
|
return None
|
||||||
|
|
||||||
item = mediagen_doc.find('./video/item')
|
item = mediagen_doc.find('./video/item')
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
import base64
|
import base64
|
||||||
import json
|
import json
|
||||||
import re
|
import re
|
||||||
|
import xml.etree.ElementTree
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from .theplatform import ThePlatformIE, default_ns
|
from .theplatform import ThePlatformIE, default_ns
|
||||||
|
@ -803,8 +804,10 @@ def _real_extract(self, url):
|
||||||
smil = self._download_xml(
|
smil = self._download_xml(
|
||||||
f'https://link.theplatform.com/s/{pdk_acct}/{player_id}', video_id,
|
f'https://link.theplatform.com/s/{pdk_acct}/{player_id}', video_id,
|
||||||
note='Downloading SMIL data', query=query, fatal=is_live)
|
note='Downloading SMIL data', query=query, fatal=is_live)
|
||||||
subtitles = self._parse_smil_subtitles(smil, default_ns) if smil else {}
|
if not isinstance(smil, xml.etree.ElementTree.Element):
|
||||||
for video in smil.findall(self._xpath_ns('.//video', default_ns)) if smil else []:
|
smil = None
|
||||||
|
subtitles = self._parse_smil_subtitles(smil, default_ns) if smil is not None else {}
|
||||||
|
for video in smil.findall(self._xpath_ns('.//video', default_ns)) if smil is not None else []:
|
||||||
info['duration'] = float_or_none(remove_end(video.get('dur'), 'ms'), 1000)
|
info['duration'] = float_or_none(remove_end(video.get('dur'), 'ms'), 1000)
|
||||||
video_src_url = video.get('src')
|
video_src_url = video.get('src')
|
||||||
ext = mimetype2ext(video.get('type'), default=determine_ext(video_src_url))
|
ext = mimetype2ext(video.get('type'), default=determine_ext(video_src_url))
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
import re
|
import re
|
||||||
import urllib.parse
|
import urllib.parse
|
||||||
|
import xml.etree.ElementTree
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
@ -469,8 +470,9 @@ def _real_extract(self, url):
|
||||||
slides = self._download_xml(
|
slides = self._download_xml(
|
||||||
player_info['slides_xml_url'], video_id, fatal=False,
|
player_info['slides_xml_url'], video_id, fatal=False,
|
||||||
note='Downloading slides XML', errnote='Failed to download slides info')
|
note='Downloading slides XML', errnote='Failed to download slides info')
|
||||||
|
if isinstance(slides, xml.etree.ElementTree.Element):
|
||||||
slide_url_template = 'https://cdn.slideslive.com/data/presentations/%s/slides/big/%s%s'
|
slide_url_template = 'https://cdn.slideslive.com/data/presentations/%s/slides/big/%s%s'
|
||||||
for slide_id, slide in enumerate(slides.findall('./slide') if slides else [], 1):
|
for slide_id, slide in enumerate(slides.findall('./slide')):
|
||||||
slides_info.append((
|
slides_info.append((
|
||||||
slide_id, xpath_text(slide, './slideName', 'name'), '.jpg',
|
slide_id, xpath_text(slide, './slideName', 'name'), '.jpg',
|
||||||
int_or_none(xpath_text(slide, './timeSec', 'time'))))
|
int_or_none(xpath_text(slide, './timeSec', 'time'))))
|
||||||
|
|
Loading…
Reference in a new issue