mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-03 18:36:07 +01:00
[cleanup] Misc fixes and cleanup
Closes #3780, Closes #3853, Closes #3850
This commit is contained in:
parent
8246f8402b
commit
8a82af3511
2
Makefile
2
Makefile
@ -129,7 +129,7 @@ completions/fish/yt-dlp.fish: yt_dlp/*.py yt_dlp/*/*.py devscripts/fish-completi
|
|||||||
mkdir -p completions/fish
|
mkdir -p completions/fish
|
||||||
$(PYTHON) devscripts/fish-completion.py
|
$(PYTHON) devscripts/fish-completion.py
|
||||||
|
|
||||||
_EXTRACTOR_FILES = $(shell find yt_dlp/extractor -iname '*.py' -and -not -iname 'lazy_extractors.py')
|
_EXTRACTOR_FILES = $(shell find yt_dlp/extractor -name '*.py' -and -not -name 'lazy_extractors.py')
|
||||||
yt_dlp/extractor/lazy_extractors.py: devscripts/make_lazy_extractors.py devscripts/lazy_load_template.py $(_EXTRACTOR_FILES)
|
yt_dlp/extractor/lazy_extractors.py: devscripts/make_lazy_extractors.py devscripts/lazy_load_template.py $(_EXTRACTOR_FILES)
|
||||||
$(PYTHON) devscripts/make_lazy_extractors.py $@
|
$(PYTHON) devscripts/make_lazy_extractors.py $@
|
||||||
|
|
||||||
|
10
README.md
10
README.md
@ -111,7 +111,7 @@ # NEW FEATURES
|
|||||||
|
|
||||||
* **Output template improvements**: Output templates can now have date-time formatting, numeric offsets, object traversal etc. See [output template](#output-template) for details. Even more advanced operations can also be done with the help of `--parse-metadata` and `--replace-in-metadata`
|
* **Output template improvements**: Output templates can now have date-time formatting, numeric offsets, object traversal etc. See [output template](#output-template) for details. Even more advanced operations can also be done with the help of `--parse-metadata` and `--replace-in-metadata`
|
||||||
|
|
||||||
* **Other new options**: Many new options have been added such as `--concat-playlist`, `--print`, `--wait-for-video`, `--sleep-requests`, `--convert-thumbnails`, `--write-link`, `--force-download-archive`, `--force-overwrites`, `--break-on-reject` etc
|
* **Other new options**: Many new options have been added such as `--alias`, `--print`, `--concat-playlist`, `--wait-for-video`, `--retry-sleep`, `--sleep-requests`, `--convert-thumbnails`, `--force-download-archive`, `--force-overwrites`, `--break-on-reject` etc
|
||||||
|
|
||||||
* **Improvements**: Regex and other operators in `--format`/`--match-filter`, multiple `--postprocessor-args` and `--downloader-args`, faster archive checking, more [format selection options](#format-selection), merge multi-video/audio, multiple `--config-locations`, `--exec` at different stages, etc
|
* **Improvements**: Regex and other operators in `--format`/`--match-filter`, multiple `--postprocessor-args` and `--downloader-args`, faster archive checking, more [format selection options](#format-selection), merge multi-video/audio, multiple `--config-locations`, `--exec` at different stages, etc
|
||||||
|
|
||||||
@ -151,7 +151,7 @@ ### Differences in default behavior
|
|||||||
|
|
||||||
For ease of use, a few more compat options are available:
|
For ease of use, a few more compat options are available:
|
||||||
|
|
||||||
* `--compat-options all`: Use all compat options
|
* `--compat-options all`: Use all compat options (Do NOT use)
|
||||||
* `--compat-options youtube-dl`: Same as `--compat-options all,-multistreams`
|
* `--compat-options youtube-dl`: Same as `--compat-options all,-multistreams`
|
||||||
* `--compat-options youtube-dlc`: Same as `--compat-options all,-no-live-chat,-no-youtube-channel-redirect`
|
* `--compat-options youtube-dlc`: Same as `--compat-options all,-no-live-chat,-no-youtube-channel-redirect`
|
||||||
|
|
||||||
@ -1003,9 +1003,9 @@ ## Post-Processing Options:
|
|||||||
--no-remove-chapters Do not remove any chapters from the file
|
--no-remove-chapters Do not remove any chapters from the file
|
||||||
(default)
|
(default)
|
||||||
--force-keyframes-at-cuts Force keyframes around chapters when
|
--force-keyframes-at-cuts Force keyframes around chapters when
|
||||||
removing/splitting them. The resulting video
|
removing/splitting them. This is slow due to
|
||||||
may have fewer artifacts around the cuts,
|
needing a re-encode, but the resulting video
|
||||||
but is very slow due to needing a re-encode
|
may have fewer artifacts around the cuts
|
||||||
--no-force-keyframes-at-cuts Do not force keyframes around the chapters
|
--no-force-keyframes-at-cuts Do not force keyframes around the chapters
|
||||||
when cutting/splitting (default)
|
when cutting/splitting (default)
|
||||||
--use-postprocessor NAME[:ARGS]
|
--use-postprocessor NAME[:ARGS]
|
||||||
|
@ -12,6 +12,8 @@
|
|||||||
OPTIONS_END = 'CONFIGURATION'
|
OPTIONS_END = 'CONFIGURATION'
|
||||||
EPILOG_START = 'See full documentation'
|
EPILOG_START = 'See full documentation'
|
||||||
|
|
||||||
|
DISABLE_PATCH = object()
|
||||||
|
|
||||||
|
|
||||||
def take_section(text, start=None, end=None, *, shift=0):
|
def take_section(text, start=None, end=None, *, shift=0):
|
||||||
return text[
|
return text[
|
||||||
@ -21,7 +23,7 @@ def take_section(text, start=None, end=None, *, shift=0):
|
|||||||
|
|
||||||
|
|
||||||
def apply_patch(text, patch):
|
def apply_patch(text, patch):
|
||||||
return re.sub(*patch, text)
|
return text if patch[0] is DISABLE_PATCH else re.sub(*patch, text)
|
||||||
|
|
||||||
|
|
||||||
options = take_section(sys.stdin.read(), f'\n {OPTIONS_START}', f'\n{EPILOG_START}', shift=1)
|
options = take_section(sys.stdin.read(), f'\n {OPTIONS_START}', f'\n{EPILOG_START}', shift=1)
|
||||||
@ -38,11 +40,15 @@ def apply_patch(text, patch):
|
|||||||
rf'({delim[:-1]})? (?P<label>\[\S+\] )?(?P<url>https?({delim})?:({delim})?/({delim})?/(({delim})?\S+)+)\s',
|
rf'({delim[:-1]})? (?P<label>\[\S+\] )?(?P<url>https?({delim})?:({delim})?/({delim})?/(({delim})?\S+)+)\s',
|
||||||
lambda mobj: ''.join((delim, mobj.group('label') or '', re.sub(r'\s+', '', mobj.group('url')), '\n'))
|
lambda mobj: ''.join((delim, mobj.group('label') or '', re.sub(r'\s+', '', mobj.group('url')), '\n'))
|
||||||
),
|
),
|
||||||
# This creates issues with prepare_manpage
|
( # Do not split "words"
|
||||||
# ( # Avoid newline when a space is available b/w switch and description
|
rf'(?m)({delim}\S+)+$',
|
||||||
# r'(?m)^(\s{4}-.{%d})(%s)' % (switch_col_width - 6, delim),
|
lambda mobj: ''.join((delim, mobj.group(0).replace(delim, '')))
|
||||||
# r'\1 '
|
),
|
||||||
# ),
|
( # Avoid newline when a space is available b/w switch and description
|
||||||
|
DISABLE_PATCH, # This creates issues with prepare_manpage
|
||||||
|
r'(?m)^(\s{4}-.{%d})(%s)' % (switch_col_width - 6, delim),
|
||||||
|
r'\1 '
|
||||||
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
with open(README_FILE, encoding='utf-8') as f:
|
with open(README_FILE, encoding='utf-8') as f:
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
#!/bin/sh
|
#!/usr/bin/env sh
|
||||||
|
|
||||||
if [ -z $1 ]; then
|
if [ -z $1 ]; then
|
||||||
test_set='test'
|
test_set='test'
|
||||||
|
@ -1,17 +1,21 @@
|
|||||||
[wheel]
|
[wheel]
|
||||||
universal = true
|
universal = true
|
||||||
|
|
||||||
|
|
||||||
[flake8]
|
[flake8]
|
||||||
exclude = build,venv,.tox,.git
|
exclude = build,venv,.tox,.git,.pytest_cache
|
||||||
ignore = E402,E501,E731,E741,W503
|
ignore = E402,E501,E731,E741,W503
|
||||||
|
max_line_length = 120
|
||||||
per_file_ignores =
|
per_file_ignores =
|
||||||
./devscripts/lazy_load_template.py: F401
|
devscripts/lazy_load_template.py: F401
|
||||||
|
|
||||||
|
|
||||||
[tool:pytest]
|
[tool:pytest]
|
||||||
addopts = -ra -v --strict-markers
|
addopts = -ra -v --strict-markers
|
||||||
markers =
|
markers =
|
||||||
download
|
download
|
||||||
|
|
||||||
|
|
||||||
[tox:tox]
|
[tox:tox]
|
||||||
skipsdist = true
|
skipsdist = true
|
||||||
envlist = py{36,37,38,39,310},pypy{36,37,38,39}
|
envlist = py{36,37,38,39,310},pypy{36,37,38,39}
|
||||||
@ -25,6 +29,7 @@ passenv = HOME # For test_compat_expanduser
|
|||||||
setenv =
|
setenv =
|
||||||
# PYTHONWARNINGS = error # Catches PIP's warnings too
|
# PYTHONWARNINGS = error # Catches PIP's warnings too
|
||||||
|
|
||||||
|
|
||||||
[isort]
|
[isort]
|
||||||
py_version = 36
|
py_version = 36
|
||||||
multi_line_output = VERTICAL_HANGING_INDENT
|
multi_line_output = VERTICAL_HANGING_INDENT
|
||||||
|
@ -1,2 +1,2 @@
|
|||||||
#!/bin/sh
|
#!/usr/bin/env sh
|
||||||
exec "${PYTHON:-python3}" -bb -Werror -Xdev "$(dirname "$(realpath "$0")")/yt_dlp/__main__.py" "$@"
|
exec "${PYTHON:-python3}" -bb -Werror -Xdev "$(dirname "$(realpath "$0")")/yt_dlp/__main__.py" "$@"
|
||||||
|
@ -27,6 +27,7 @@
|
|||||||
|
|
||||||
from .cache import Cache
|
from .cache import Cache
|
||||||
from .compat import (
|
from .compat import (
|
||||||
|
HAS_LEGACY as compat_has_legacy,
|
||||||
compat_get_terminal_size,
|
compat_get_terminal_size,
|
||||||
compat_os_name,
|
compat_os_name,
|
||||||
compat_shlex_quote,
|
compat_shlex_quote,
|
||||||
@ -591,7 +592,10 @@ def check_deprecated(param, option, suggestion):
|
|||||||
for msg in self.params.get('_deprecation_warnings', []):
|
for msg in self.params.get('_deprecation_warnings', []):
|
||||||
self.deprecation_warning(msg)
|
self.deprecation_warning(msg)
|
||||||
|
|
||||||
if 'list-formats' in self.params.get('compat_opts', []):
|
self.params['compat_opts'] = set(self.params.get('compat_opts', ()))
|
||||||
|
if not compat_has_legacy:
|
||||||
|
self.params['compat_opts'].add('no-compat-legacy')
|
||||||
|
if 'list-formats' in self.params['compat_opts']:
|
||||||
self.params['listformats_table'] = False
|
self.params['listformats_table'] = False
|
||||||
|
|
||||||
if 'overwrites' not in self.params and self.params.get('nooverwrites') is not None:
|
if 'overwrites' not in self.params and self.params.get('nooverwrites') is not None:
|
||||||
@ -788,9 +792,9 @@ def to_stdout(self, message, skip_eol=False, quiet=None):
|
|||||||
"""Print message to stdout"""
|
"""Print message to stdout"""
|
||||||
if quiet is not None:
|
if quiet is not None:
|
||||||
self.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument quiet. Use "YoutubeDL.to_screen" instead')
|
self.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument quiet. Use "YoutubeDL.to_screen" instead')
|
||||||
self._write_string(
|
if skip_eol is not False:
|
||||||
'%s%s' % (self._bidi_workaround(message), ('' if skip_eol else '\n')),
|
self.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument skip_eol. Use "YoutubeDL.to_screen" instead')
|
||||||
self._out_files.out)
|
self._write_string(self._bidi_workaround(message), self._out_files.out)
|
||||||
|
|
||||||
def to_screen(self, message, skip_eol=False, quiet=None):
|
def to_screen(self, message, skip_eol=False, quiet=None):
|
||||||
"""Print message to screen if not in quiet mode"""
|
"""Print message to screen if not in quiet mode"""
|
||||||
@ -942,7 +946,7 @@ def write_debug(self, message, only_once=False):
|
|||||||
'''Log debug message or Print message to stderr'''
|
'''Log debug message or Print message to stderr'''
|
||||||
if not self.params.get('verbose', False):
|
if not self.params.get('verbose', False):
|
||||||
return
|
return
|
||||||
message = '[debug] %s' % message
|
message = f'[debug] {message}'
|
||||||
if self.params.get('logger'):
|
if self.params.get('logger'):
|
||||||
self.params['logger'].debug(message)
|
self.params['logger'].debug(message)
|
||||||
else:
|
else:
|
||||||
@ -1136,7 +1140,7 @@ def get_value(mdict):
|
|||||||
def filename_sanitizer(key, value, restricted=self.params.get('restrictfilenames')):
|
def filename_sanitizer(key, value, restricted=self.params.get('restrictfilenames')):
|
||||||
return sanitize_filename(str(value), restricted=restricted, is_id=(
|
return sanitize_filename(str(value), restricted=restricted, is_id=(
|
||||||
bool(re.search(r'(^|[_.])id(\.|$)', key))
|
bool(re.search(r'(^|[_.])id(\.|$)', key))
|
||||||
if 'filename-sanitization' in self.params.get('compat_opts', [])
|
if 'filename-sanitization' in self.params['compat_opts']
|
||||||
else NO_DEFAULT))
|
else NO_DEFAULT))
|
||||||
|
|
||||||
sanitizer = sanitize if callable(sanitize) else filename_sanitizer
|
sanitizer = sanitize if callable(sanitize) else filename_sanitizer
|
||||||
@ -1775,7 +1779,7 @@ def get_entry(i):
|
|||||||
max_failures = self.params.get('skip_playlist_after_errors') or float('inf')
|
max_failures = self.params.get('skip_playlist_after_errors') or float('inf')
|
||||||
for i, entry_tuple in enumerate(entries, 1):
|
for i, entry_tuple in enumerate(entries, 1):
|
||||||
playlist_index, entry = entry_tuple
|
playlist_index, entry = entry_tuple
|
||||||
if 'playlist-index' in self.params.get('compat_opts', []):
|
if 'playlist-index' in self.params['compat_opts']:
|
||||||
playlist_index = playlistitems[i - 1] if playlistitems else i + playliststart - 1
|
playlist_index = playlistitems[i - 1] if playlistitems else i + playliststart - 1
|
||||||
self.to_screen('[download] Downloading video %s of %s' % (
|
self.to_screen('[download] Downloading video %s of %s' % (
|
||||||
self._format_screen(i, self.Styles.ID), self._format_screen(n_entries, self.Styles.EMPHASIS)))
|
self._format_screen(i, self.Styles.ID), self._format_screen(n_entries, self.Styles.EMPHASIS)))
|
||||||
@ -1906,7 +1910,7 @@ def _check_formats(self, formats):
|
|||||||
temp_file.close()
|
temp_file.close()
|
||||||
try:
|
try:
|
||||||
success, _ = self.dl(temp_file.name, f, test=True)
|
success, _ = self.dl(temp_file.name, f, test=True)
|
||||||
except (DownloadError, IOError, OSError, ValueError) + network_exceptions:
|
except (DownloadError, OSError, ValueError) + network_exceptions:
|
||||||
success = False
|
success = False
|
||||||
finally:
|
finally:
|
||||||
if os.path.exists(temp_file.name):
|
if os.path.exists(temp_file.name):
|
||||||
@ -1935,7 +1939,7 @@ def can_merge():
|
|||||||
compat = (
|
compat = (
|
||||||
prefer_best
|
prefer_best
|
||||||
or self.params.get('allow_multiple_audio_streams', False)
|
or self.params.get('allow_multiple_audio_streams', False)
|
||||||
or 'format-spec' in self.params.get('compat_opts', []))
|
or 'format-spec' in self.params['compat_opts'])
|
||||||
|
|
||||||
return (
|
return (
|
||||||
'best/bestvideo+bestaudio' if prefer_best
|
'best/bestvideo+bestaudio' if prefer_best
|
||||||
@ -3652,8 +3656,8 @@ def get_encoding(stream):
|
|||||||
write_debug('Plugins: %s' % [
|
write_debug('Plugins: %s' % [
|
||||||
'%s%s' % (klass.__name__, '' if klass.__name__ == name else f' as {name}')
|
'%s%s' % (klass.__name__, '' if klass.__name__ == name else f' as {name}')
|
||||||
for name, klass in itertools.chain(plugin_extractors.items(), plugin_postprocessors.items())])
|
for name, klass in itertools.chain(plugin_extractors.items(), plugin_postprocessors.items())])
|
||||||
if self.params.get('compat_opts'):
|
if self.params['compat_opts']:
|
||||||
write_debug('Compatibility options: %s' % ', '.join(self.params.get('compat_opts')))
|
write_debug('Compatibility options: %s' % ', '.join(self.params['compat_opts']))
|
||||||
|
|
||||||
if source == 'source':
|
if source == 'source':
|
||||||
try:
|
try:
|
||||||
|
@ -9,8 +9,13 @@
|
|||||||
|
|
||||||
|
|
||||||
# XXX: Implement this the same way as other DeprecationWarnings without circular import
|
# XXX: Implement this the same way as other DeprecationWarnings without circular import
|
||||||
passthrough_module(__name__, '._legacy', callback=lambda attr: warnings.warn(
|
try:
|
||||||
DeprecationWarning(f'{__name__}.{attr} is deprecated'), stacklevel=2))
|
passthrough_module(__name__, '._legacy', callback=lambda attr: warnings.warn(
|
||||||
|
DeprecationWarning(f'{__name__}.{attr} is deprecated'), stacklevel=2))
|
||||||
|
HAS_LEGACY = True
|
||||||
|
except ModuleNotFoundError:
|
||||||
|
# Keep working even without _legacy module
|
||||||
|
HAS_LEGACY = False
|
||||||
del passthrough_module
|
del passthrough_module
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
from ..compat import compat_str
|
|
||||||
from ..utils import NO_DEFAULT, determine_protocol
|
from ..utils import NO_DEFAULT, determine_protocol
|
||||||
|
|
||||||
|
|
||||||
@ -91,7 +90,7 @@ def _get_suitable_downloader(info_dict, protocol, params, default):
|
|||||||
info_dict['protocol'] = protocol
|
info_dict['protocol'] = protocol
|
||||||
downloaders = params.get('external_downloader')
|
downloaders = params.get('external_downloader')
|
||||||
external_downloader = (
|
external_downloader = (
|
||||||
downloaders if isinstance(downloaders, compat_str) or downloaders is None
|
downloaders if isinstance(downloaders, str) or downloaders is None
|
||||||
else downloaders.get(shorten_protocol_name(protocol, True), downloaders.get('default')))
|
else downloaders.get(shorten_protocol_name(protocol, True), downloaders.get('default')))
|
||||||
|
|
||||||
if external_downloader is None:
|
if external_downloader is None:
|
||||||
|
@ -610,8 +610,7 @@ def _initialize_geo_bypass(self, geo_bypass_context):
|
|||||||
|
|
||||||
if ip_block:
|
if ip_block:
|
||||||
self._x_forwarded_for_ip = GeoUtils.random_ipv4(ip_block)
|
self._x_forwarded_for_ip = GeoUtils.random_ipv4(ip_block)
|
||||||
self._downloader.write_debug(
|
self.write_debug(f'Using fake IP {self._x_forwarded_for_ip} as X-Forwarded-For')
|
||||||
'[debug] Using fake IP %s as X-Forwarded-For' % self._x_forwarded_for_ip)
|
|
||||||
return
|
return
|
||||||
|
|
||||||
# Path 2: bypassing based on country code
|
# Path 2: bypassing based on country code
|
||||||
|
@ -27,7 +27,7 @@ def _call_api(self, path, video_id, query=None):
|
|||||||
auth_cookie = self._get_cookies('https://curiositystream.com').get('auth_token')
|
auth_cookie = self._get_cookies('https://curiositystream.com').get('auth_token')
|
||||||
if auth_cookie:
|
if auth_cookie:
|
||||||
self.write_debug('Obtained auth_token cookie')
|
self.write_debug('Obtained auth_token cookie')
|
||||||
self._auth_token = cookie.value
|
self._auth_token = auth_cookie.value
|
||||||
if self._auth_token:
|
if self._auth_token:
|
||||||
headers['X-Auth-Token'] = self._auth_token
|
headers['X-Auth-Token'] = self._auth_token
|
||||||
result = self._download_json(
|
result = self._download_json(
|
||||||
|
@ -1,9 +1,7 @@
|
|||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import compat_parse_qs
|
||||||
compat_parse_qs,
|
|
||||||
)
|
|
||||||
from ..dependencies import websockets
|
from ..dependencies import websockets
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
@ -209,7 +207,7 @@ def _real_extract(self, url):
|
|||||||
'User-Agent': self.get_param('http_headers')['User-Agent'],
|
'User-Agent': self.get_param('http_headers')['User-Agent'],
|
||||||
})
|
})
|
||||||
|
|
||||||
self.write_debug('[debug] Sending HLS server request')
|
self.write_debug('Sending HLS server request')
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
recv = ws.recv()
|
recv = ws.recv()
|
||||||
@ -231,13 +229,10 @@ def _real_extract(self, url):
|
|||||||
if not data or not isinstance(data, dict):
|
if not data or not isinstance(data, dict):
|
||||||
continue
|
continue
|
||||||
if data.get('name') == '_response_' and data.get('id') == 1:
|
if data.get('name') == '_response_' and data.get('id') == 1:
|
||||||
self.write_debug('[debug] Goodbye.')
|
self.write_debug('Goodbye')
|
||||||
playlist_data = data
|
playlist_data = data
|
||||||
break
|
break
|
||||||
elif self._downloader.params.get('verbose', False):
|
self.write_debug('Server said: %s%s' % (recv[:100], '...' if len(recv) > 100 else ''))
|
||||||
if len(recv) > 100:
|
|
||||||
recv = recv[:100] + '...'
|
|
||||||
self.to_screen('[debug] Server said: %s' % recv)
|
|
||||||
|
|
||||||
if not playlist_data:
|
if not playlist_data:
|
||||||
raise ExtractorError('Unable to fetch HLS playlist info via WebSocket')
|
raise ExtractorError('Unable to fetch HLS playlist info via WebSocket')
|
||||||
|
@ -1634,8 +1634,8 @@ def _alias_callback(option, opt_str, value, parser, opts, nargs):
|
|||||||
action='store_true', dest='force_keyframes_at_cuts', default=False,
|
action='store_true', dest='force_keyframes_at_cuts', default=False,
|
||||||
help=(
|
help=(
|
||||||
'Force keyframes around chapters when removing/splitting them. '
|
'Force keyframes around chapters when removing/splitting them. '
|
||||||
'The resulting video may have fewer artifacts around the cuts, '
|
'This is slow due to needing a re-encode, but '
|
||||||
'but is very slow due to needing a re-encode'))
|
'the resulting video may have fewer artifacts around the cuts'))
|
||||||
postproc.add_option(
|
postproc.add_option(
|
||||||
'--no-force-keyframes-at-cuts',
|
'--no-force-keyframes-at-cuts',
|
||||||
action='store_false', dest='force_keyframes_at_cuts',
|
action='store_false', dest='force_keyframes_at_cuts',
|
||||||
|
@ -176,6 +176,8 @@ def add_progress_hook(self, ph):
|
|||||||
|
|
||||||
def report_progress(self, s):
|
def report_progress(self, s):
|
||||||
s['_default_template'] = '%(postprocessor)s %(status)s' % s
|
s['_default_template'] = '%(postprocessor)s %(status)s' % s
|
||||||
|
if not self._downloader:
|
||||||
|
return
|
||||||
|
|
||||||
progress_dict = s.copy()
|
progress_dict = s.copy()
|
||||||
progress_dict.pop('info_dict')
|
progress_dict.pop('info_dict')
|
||||||
@ -184,7 +186,8 @@ def report_progress(self, s):
|
|||||||
progress_template = self.get_param('progress_template', {})
|
progress_template = self.get_param('progress_template', {})
|
||||||
tmpl = progress_template.get('postprocess')
|
tmpl = progress_template.get('postprocess')
|
||||||
if tmpl:
|
if tmpl:
|
||||||
self._downloader.to_stdout(self._downloader.evaluate_outtmpl(tmpl, progress_dict))
|
self._downloader.to_screen(
|
||||||
|
self._downloader.evaluate_outtmpl(tmpl, progress_dict), skip_eol=True, quiet=False)
|
||||||
|
|
||||||
self._downloader.to_console_title(self._downloader.evaluate_outtmpl(
|
self._downloader.to_console_title(self._downloader.evaluate_outtmpl(
|
||||||
progress_template.get('postprocess-title') or 'yt-dlp %(progress._default_template)s',
|
progress_template.get('postprocess-title') or 'yt-dlp %(progress._default_template)s',
|
||||||
|
@ -66,15 +66,6 @@ def __init__(self, downloader=None):
|
|||||||
self._prefer_ffmpeg = self.get_param('prefer_ffmpeg', True)
|
self._prefer_ffmpeg = self.get_param('prefer_ffmpeg', True)
|
||||||
self._paths = self._determine_executables()
|
self._paths = self._determine_executables()
|
||||||
|
|
||||||
def check_version(self):
|
|
||||||
if not self.available:
|
|
||||||
raise FFmpegPostProcessorError('ffmpeg not found. Please install or provide the path using --ffmpeg-location')
|
|
||||||
|
|
||||||
required_version = '10-0' if self.basename == 'avconv' else '1.0'
|
|
||||||
if is_outdated_version(self._version, required_version):
|
|
||||||
self.report_warning(f'Your copy of {self.basename} is outdated, update {self.basename} '
|
|
||||||
f'to version {required_version} or newer if you encounter any errors')
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_versions_and_features(downloader=None):
|
def get_versions_and_features(downloader=None):
|
||||||
pp = FFmpegPostProcessor(downloader)
|
pp = FFmpegPostProcessor(downloader)
|
||||||
@ -205,6 +196,15 @@ def stream_copy_opts(copy=True, *, ext=None):
|
|||||||
if ext in ('mp4', 'mov', 'm4a'):
|
if ext in ('mp4', 'mov', 'm4a'):
|
||||||
yield from ('-c:s', 'mov_text')
|
yield from ('-c:s', 'mov_text')
|
||||||
|
|
||||||
|
def check_version(self):
|
||||||
|
if not self.available:
|
||||||
|
raise FFmpegPostProcessorError('ffmpeg not found. Please install or provide the path using --ffmpeg-location')
|
||||||
|
|
||||||
|
required_version = '10-0' if self.basename == 'avconv' else '1.0'
|
||||||
|
if is_outdated_version(self._version, required_version):
|
||||||
|
self.report_warning(f'Your copy of {self.basename} is outdated, update {self.basename} '
|
||||||
|
f'to version {required_version} or newer if you encounter any errors')
|
||||||
|
|
||||||
def get_audio_codec(self, path):
|
def get_audio_codec(self, path):
|
||||||
if not self.probe_available and not self.available:
|
if not self.probe_available and not self.available:
|
||||||
raise PostProcessingError('ffprobe and ffmpeg not found. Please install or provide the path using --ffmpeg-location')
|
raise PostProcessingError('ffprobe and ffmpeg not found. Please install or provide the path using --ffmpeg-location')
|
||||||
|
@ -619,9 +619,9 @@ def sanitize_open(filename, open_mode):
|
|||||||
# Ref: https://github.com/yt-dlp/yt-dlp/issues/3124
|
# Ref: https://github.com/yt-dlp/yt-dlp/issues/3124
|
||||||
raise LockingUnsupportedError()
|
raise LockingUnsupportedError()
|
||||||
stream = locked_file(filename, open_mode, block=False).__enter__()
|
stream = locked_file(filename, open_mode, block=False).__enter__()
|
||||||
except LockingUnsupportedError:
|
except OSError:
|
||||||
stream = open(filename, open_mode)
|
stream = open(filename, open_mode)
|
||||||
return (stream, filename)
|
return stream, filename
|
||||||
except OSError as err:
|
except OSError as err:
|
||||||
if attempt or err.errno in (errno.EACCES,):
|
if attempt or err.errno in (errno.EACCES,):
|
||||||
raise
|
raise
|
||||||
@ -815,12 +815,9 @@ def escapeHTML(text):
|
|||||||
|
|
||||||
|
|
||||||
def process_communicate_or_kill(p, *args, **kwargs):
|
def process_communicate_or_kill(p, *args, **kwargs):
|
||||||
try:
|
write_string('DeprecationWarning: yt_dlp.utils.process_communicate_or_kill is deprecated '
|
||||||
return p.communicate(*args, **kwargs)
|
'and may be removed in a future version. Use yt_dlp.utils.Popen.communicate_or_kill instead')
|
||||||
except BaseException: # Including KeyboardInterrupt
|
return Popen.communicate_or_kill(p, *args, **kwargs)
|
||||||
p.kill()
|
|
||||||
p.wait()
|
|
||||||
raise
|
|
||||||
|
|
||||||
|
|
||||||
class Popen(subprocess.Popen):
|
class Popen(subprocess.Popen):
|
||||||
@ -834,7 +831,12 @@ def __init__(self, *args, **kwargs):
|
|||||||
super().__init__(*args, **kwargs, startupinfo=self._startupinfo)
|
super().__init__(*args, **kwargs, startupinfo=self._startupinfo)
|
||||||
|
|
||||||
def communicate_or_kill(self, *args, **kwargs):
|
def communicate_or_kill(self, *args, **kwargs):
|
||||||
return process_communicate_or_kill(self, *args, **kwargs)
|
try:
|
||||||
|
return self.communicate(*args, **kwargs)
|
||||||
|
except BaseException: # Including KeyboardInterrupt
|
||||||
|
self.kill()
|
||||||
|
self.wait()
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
def get_subprocess_encoding():
|
def get_subprocess_encoding():
|
||||||
@ -921,22 +923,23 @@ def make_HTTPS_handler(params, **kwargs):
|
|||||||
context.options |= 4 # SSL_OP_LEGACY_SERVER_CONNECT
|
context.options |= 4 # SSL_OP_LEGACY_SERVER_CONNECT
|
||||||
# Allow use of weaker ciphers in Python 3.10+. See https://bugs.python.org/issue43998
|
# Allow use of weaker ciphers in Python 3.10+. See https://bugs.python.org/issue43998
|
||||||
context.set_ciphers('DEFAULT')
|
context.set_ciphers('DEFAULT')
|
||||||
|
|
||||||
context.verify_mode = ssl.CERT_REQUIRED if opts_check_certificate else ssl.CERT_NONE
|
context.verify_mode = ssl.CERT_REQUIRED if opts_check_certificate else ssl.CERT_NONE
|
||||||
if opts_check_certificate:
|
if opts_check_certificate:
|
||||||
if has_certifi and 'no-certifi' not in params.get('compat_opts', []):
|
if has_certifi and 'no-certifi' not in params.get('compat_opts', []):
|
||||||
context.load_verify_locations(cafile=certifi.where())
|
context.load_verify_locations(cafile=certifi.where())
|
||||||
else:
|
try:
|
||||||
try:
|
context.load_default_certs()
|
||||||
context.load_default_certs()
|
# Work around the issue in load_default_certs when there are bad certificates. See:
|
||||||
# Work around the issue in load_default_certs when there are bad certificates. See:
|
# https://github.com/yt-dlp/yt-dlp/issues/1060,
|
||||||
# https://github.com/yt-dlp/yt-dlp/issues/1060,
|
# https://bugs.python.org/issue35665, https://bugs.python.org/issue45312
|
||||||
# https://bugs.python.org/issue35665, https://bugs.python.org/issue45312
|
except ssl.SSLError:
|
||||||
except ssl.SSLError:
|
# enum_certificates is not present in mingw python. See https://github.com/yt-dlp/yt-dlp/issues/1151
|
||||||
# enum_certificates is not present in mingw python. See https://github.com/yt-dlp/yt-dlp/issues/1151
|
if sys.platform == 'win32' and hasattr(ssl, 'enum_certificates'):
|
||||||
if sys.platform == 'win32' and hasattr(ssl, 'enum_certificates'):
|
for storename in ('CA', 'ROOT'):
|
||||||
for storename in ('CA', 'ROOT'):
|
_ssl_load_windows_store_certs(context, storename)
|
||||||
_ssl_load_windows_store_certs(context, storename)
|
context.set_default_verify_paths()
|
||||||
context.set_default_verify_paths()
|
|
||||||
client_certfile = params.get('client_certificate')
|
client_certfile = params.get('client_certificate')
|
||||||
if client_certfile:
|
if client_certfile:
|
||||||
try:
|
try:
|
||||||
@ -1885,11 +1888,11 @@ def platform_name():
|
|||||||
|
|
||||||
@functools.cache
|
@functools.cache
|
||||||
def get_windows_version():
|
def get_windows_version():
|
||||||
''' Get Windows version. None if it's not running on Windows '''
|
''' Get Windows version. returns () if it's not running on Windows '''
|
||||||
if compat_os_name == 'nt':
|
if compat_os_name == 'nt':
|
||||||
return version_tuple(platform.win32_ver()[1])
|
return version_tuple(platform.win32_ver()[1])
|
||||||
else:
|
else:
|
||||||
return None
|
return ()
|
||||||
|
|
||||||
|
|
||||||
def write_string(s, out=None, encoding=None):
|
def write_string(s, out=None, encoding=None):
|
||||||
@ -1899,14 +1902,14 @@ def write_string(s, out=None, encoding=None):
|
|||||||
if compat_os_name == 'nt' and supports_terminal_sequences(out):
|
if compat_os_name == 'nt' and supports_terminal_sequences(out):
|
||||||
s = re.sub(r'([\r\n]+)', r' \1', s)
|
s = re.sub(r'([\r\n]+)', r' \1', s)
|
||||||
|
|
||||||
enc = None
|
enc, buffer = None, out
|
||||||
if 'b' in getattr(out, 'mode', ''):
|
if 'b' in getattr(out, 'mode', ''):
|
||||||
enc = encoding or preferredencoding()
|
enc = encoding or preferredencoding()
|
||||||
elif hasattr(out, 'buffer'):
|
elif hasattr(out, 'buffer'):
|
||||||
out = out.buffer
|
buffer = out.buffer
|
||||||
enc = encoding or getattr(out, 'encoding', None) or preferredencoding()
|
enc = encoding or getattr(out, 'encoding', None) or preferredencoding()
|
||||||
|
|
||||||
out.write(s.encode(enc, 'ignore') if enc else s)
|
buffer.write(s.encode(enc, 'ignore') if enc else s)
|
||||||
out.flush()
|
out.flush()
|
||||||
|
|
||||||
|
|
||||||
@ -1925,7 +1928,7 @@ def intlist_to_bytes(xs):
|
|||||||
return compat_struct_pack('%dB' % len(xs), *xs)
|
return compat_struct_pack('%dB' % len(xs), *xs)
|
||||||
|
|
||||||
|
|
||||||
class LockingUnsupportedError(IOError):
|
class LockingUnsupportedError(OSError):
|
||||||
msg = 'File locking is not supported on this platform'
|
msg = 'File locking is not supported on this platform'
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
@ -5089,7 +5092,7 @@ def jwt_decode_hs256(jwt):
|
|||||||
@functools.cache
|
@functools.cache
|
||||||
def supports_terminal_sequences(stream):
|
def supports_terminal_sequences(stream):
|
||||||
if compat_os_name == 'nt':
|
if compat_os_name == 'nt':
|
||||||
if not WINDOWS_VT_MODE or get_windows_version() < (10, 0, 10586):
|
if not WINDOWS_VT_MODE:
|
||||||
return False
|
return False
|
||||||
elif not os.getenv('TERM'):
|
elif not os.getenv('TERM'):
|
||||||
return False
|
return False
|
||||||
@ -5100,7 +5103,7 @@ def supports_terminal_sequences(stream):
|
|||||||
|
|
||||||
|
|
||||||
def windows_enable_vt_mode(): # TODO: Do this the proper way https://bugs.python.org/issue30075
|
def windows_enable_vt_mode(): # TODO: Do this the proper way https://bugs.python.org/issue30075
|
||||||
if compat_os_name != 'nt':
|
if get_windows_version() < (10, 0, 10586):
|
||||||
return
|
return
|
||||||
global WINDOWS_VT_MODE
|
global WINDOWS_VT_MODE
|
||||||
startupinfo = subprocess.STARTUPINFO()
|
startupinfo = subprocess.STARTUPINFO()
|
||||||
|
Loading…
Reference in New Issue
Block a user