mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-23 05:06:49 +01:00
[cleanup] Add more ruff rules (#10149)
Authored by: seproDev Reviewed-by: bashonly <88596187+bashonly@users.noreply.github.com> Reviewed-by: Simon Sawicki <contact@grub4k.xyz>
This commit is contained in:
parent
db50f19d76
commit
add96eb9f8
@ -266,7 +266,7 @@ ## Adding support for a new site
|
||||
$ hatch fmt --check
|
||||
```
|
||||
|
||||
You can use `hatch fmt` to automatically fix problems.
|
||||
You can use `hatch fmt` to automatically fix problems. Rules that the linter/formatter enforces should not be disabled with `# noqa` unless a maintainer requests it. The only exception allowed is for old/printf-style string formatting in GraphQL query templates (use `# noqa: UP031`).
|
||||
|
||||
1. Make sure your code works under all [Python](https://www.python.org/) versions supported by yt-dlp, namely CPython and PyPy for Python 3.8 and above. Backward compatibility is not required for even older versions of Python.
|
||||
1. When the tests pass, [add](https://git-scm.com/docs/git-add) the new files, [commit](https://git-scm.com/docs/git-commit) them and [push](https://git-scm.com/docs/git-push) the result, like this:
|
||||
|
@ -44,7 +44,7 @@ def main():
|
||||
'Cryptodome',
|
||||
# requests >=2.32.0 breaks py2exe builds due to certifi dependency
|
||||
'requests',
|
||||
'urllib3'
|
||||
'urllib3',
|
||||
],
|
||||
'dll_excludes': ['w9xpopen.exe', 'crypt32.dll'],
|
||||
# Modules that are only imported dynamically must be added here
|
||||
|
@ -68,7 +68,7 @@ def exe(onedir):
|
||||
'dist/',
|
||||
onedir and f'{name}/',
|
||||
name,
|
||||
OS_NAME == 'win32' and '.exe'
|
||||
OS_NAME == 'win32' and '.exe',
|
||||
)))
|
||||
|
||||
|
||||
@ -113,7 +113,7 @@ def windows_set_version(exe, version):
|
||||
),
|
||||
kids=[
|
||||
StringFileInfo([StringTable('040904B0', [
|
||||
StringStruct('Comments', 'yt-dlp%s Command Line Interface' % suffix),
|
||||
StringStruct('Comments', f'yt-dlp{suffix} Command Line Interface'),
|
||||
StringStruct('CompanyName', 'https://github.com/yt-dlp'),
|
||||
StringStruct('FileDescription', 'yt-dlp%s' % (MACHINE and f' ({MACHINE})')),
|
||||
StringStruct('FileVersion', version),
|
||||
@ -123,8 +123,8 @@ def windows_set_version(exe, version):
|
||||
StringStruct('ProductName', f'yt-dlp{suffix}'),
|
||||
StringStruct(
|
||||
'ProductVersion', f'{version}{suffix} on Python {platform.python_version()}'),
|
||||
])]), VarFileInfo([VarStruct('Translation', [0, 1200])])
|
||||
]
|
||||
])]), VarFileInfo([VarStruct('Translation', [0, 1200])]),
|
||||
],
|
||||
))
|
||||
|
||||
|
||||
|
@ -9,8 +9,8 @@
|
||||
|
||||
import yt_dlp
|
||||
|
||||
BASH_COMPLETION_FILE = "completions/bash/yt-dlp"
|
||||
BASH_COMPLETION_TEMPLATE = "devscripts/bash-completion.in"
|
||||
BASH_COMPLETION_FILE = 'completions/bash/yt-dlp'
|
||||
BASH_COMPLETION_TEMPLATE = 'devscripts/bash-completion.in'
|
||||
|
||||
|
||||
def build_completion(opt_parser):
|
||||
@ -21,9 +21,9 @@ def build_completion(opt_parser):
|
||||
opts_flag.append(option.get_opt_string())
|
||||
with open(BASH_COMPLETION_TEMPLATE) as f:
|
||||
template = f.read()
|
||||
with open(BASH_COMPLETION_FILE, "w") as f:
|
||||
with open(BASH_COMPLETION_FILE, 'w') as f:
|
||||
# just using the special char
|
||||
filled_template = template.replace("{{flags}}", " ".join(opts_flag))
|
||||
filled_template = template.replace('{{flags}}', ' '.join(opts_flag))
|
||||
f.write(filled_template)
|
||||
|
||||
|
||||
|
@ -223,10 +223,10 @@ def format_single_change(self, info: CommitInfo):
|
||||
|
||||
return message if not sep else f'{message}{sep}{rest}'
|
||||
|
||||
def _format_message_link(self, message, hash):
|
||||
assert message or hash, 'Improperly defined commit message or override'
|
||||
message = message if message else hash[:HASH_LENGTH]
|
||||
return f'[{message}]({self.repo_url}/commit/{hash})' if hash else message
|
||||
def _format_message_link(self, message, commit_hash):
|
||||
assert message or commit_hash, 'Improperly defined commit message or override'
|
||||
message = message if message else commit_hash[:HASH_LENGTH]
|
||||
return f'[{message}]({self.repo_url}/commit/{commit_hash})' if commit_hash else message
|
||||
|
||||
def _format_issues(self, issues):
|
||||
return ', '.join(f'[#{issue}]({self.repo_url}/issues/{issue})' for issue in issues)
|
||||
@ -356,7 +356,7 @@ def apply_overrides(self, overrides):
|
||||
logger.info(f'CHANGE {self._commits[commit.hash]} -> {commit}')
|
||||
self._commits[commit.hash] = commit
|
||||
|
||||
self._commits = {key: value for key, value in reversed(self._commits.items())}
|
||||
self._commits = dict(reversed(self._commits.items()))
|
||||
|
||||
def groups(self):
|
||||
group_dict = defaultdict(list)
|
||||
|
@ -51,7 +51,7 @@ def apply_patch(text, patch):
|
||||
),
|
||||
( # Headings
|
||||
r'(?m)^ (\w.+\n)( (?=\w))?',
|
||||
r'## \1'
|
||||
r'## \1',
|
||||
),
|
||||
( # Fixup `--date` formatting
|
||||
rf'(?m)( --date DATE.+({delim}[^\[]+)*)\[.+({delim}.+)*$',
|
||||
@ -61,26 +61,26 @@ def apply_patch(text, patch):
|
||||
),
|
||||
( # Do not split URLs
|
||||
rf'({delim[:-1]})? (?P<label>\[\S+\] )?(?P<url>https?({delim})?:({delim})?/({delim})?/(({delim})?\S+)+)\s',
|
||||
lambda mobj: ''.join((delim, mobj.group('label') or '', re.sub(r'\s+', '', mobj.group('url')), '\n'))
|
||||
lambda mobj: ''.join((delim, mobj.group('label') or '', re.sub(r'\s+', '', mobj.group('url')), '\n')),
|
||||
),
|
||||
( # Do not split "words"
|
||||
rf'(?m)({delim}\S+)+$',
|
||||
lambda mobj: ''.join((delim, mobj.group(0).replace(delim, '')))
|
||||
lambda mobj: ''.join((delim, mobj.group(0).replace(delim, ''))),
|
||||
),
|
||||
( # Allow overshooting last line
|
||||
rf'(?m)^(?P<prev>.+)${delim}(?P<current>.+)$(?!{delim})',
|
||||
lambda mobj: (mobj.group().replace(delim, ' ')
|
||||
if len(mobj.group()) - len(delim) + 1 <= max_width + ALLOWED_OVERSHOOT
|
||||
else mobj.group())
|
||||
else mobj.group()),
|
||||
),
|
||||
( # Avoid newline when a space is available b/w switch and description
|
||||
DISABLE_PATCH, # This creates issues with prepare_manpage
|
||||
r'(?m)^(\s{4}-.{%d})(%s)' % (switch_col_width - 6, delim),
|
||||
r'\1 '
|
||||
r'\1 ',
|
||||
),
|
||||
( # Replace brackets with a Markdown link
|
||||
r'SponsorBlock API \((http.+)\)',
|
||||
r'[SponsorBlock API](\1)'
|
||||
r'[SponsorBlock API](\1)',
|
||||
),
|
||||
)
|
||||
|
||||
|
@ -30,7 +30,7 @@ def property_setter(name, value):
|
||||
opts = parse_options()
|
||||
transform = compose_functions(
|
||||
property_setter('VARIANT', opts.variant),
|
||||
property_setter('UPDATE_HINT', opts.update_message)
|
||||
property_setter('UPDATE_HINT', opts.update_message),
|
||||
)
|
||||
|
||||
write_file(VERSION_FILE, transform(read_file(VERSION_FILE)))
|
||||
|
@ -24,7 +24,7 @@ def get_new_version(version, revision):
|
||||
else:
|
||||
old_version = read_version().split('.')
|
||||
if version.split('.') == old_version[:3]:
|
||||
revision = str(int((old_version + [0])[3]) + 1)
|
||||
revision = str(int(([*old_version, 0])[3]) + 1)
|
||||
|
||||
return f'{version}.{revision}' if revision else version
|
||||
|
||||
|
@ -9,15 +9,15 @@
|
||||
|
||||
import yt_dlp
|
||||
|
||||
ZSH_COMPLETION_FILE = "completions/zsh/_yt-dlp"
|
||||
ZSH_COMPLETION_TEMPLATE = "devscripts/zsh-completion.in"
|
||||
ZSH_COMPLETION_FILE = 'completions/zsh/_yt-dlp'
|
||||
ZSH_COMPLETION_TEMPLATE = 'devscripts/zsh-completion.in'
|
||||
|
||||
|
||||
def build_completion(opt_parser):
|
||||
opts = [opt for group in opt_parser.option_groups
|
||||
for opt in group.option_list]
|
||||
opts_file = [opt for opt in opts if opt.metavar == "FILE"]
|
||||
opts_dir = [opt for opt in opts if opt.metavar == "DIR"]
|
||||
opts_file = [opt for opt in opts if opt.metavar == 'FILE']
|
||||
opts_dir = [opt for opt in opts if opt.metavar == 'DIR']
|
||||
|
||||
fileopts = []
|
||||
for opt in opts_file:
|
||||
@ -38,11 +38,11 @@ def build_completion(opt_parser):
|
||||
with open(ZSH_COMPLETION_TEMPLATE) as f:
|
||||
template = f.read()
|
||||
|
||||
template = template.replace("{{fileopts}}", "|".join(fileopts))
|
||||
template = template.replace("{{diropts}}", "|".join(diropts))
|
||||
template = template.replace("{{flags}}", " ".join(flags))
|
||||
template = template.replace('{{fileopts}}', '|'.join(fileopts))
|
||||
template = template.replace('{{diropts}}', '|'.join(diropts))
|
||||
template = template.replace('{{flags}}', ' '.join(flags))
|
||||
|
||||
with open(ZSH_COMPLETION_FILE, "w") as f:
|
||||
with open(ZSH_COMPLETION_FILE, 'w') as f:
|
||||
f.write(template)
|
||||
|
||||
|
||||
|
127
pyproject.toml
127
pyproject.toml
@ -183,21 +183,84 @@ line-length = 120
|
||||
|
||||
[tool.ruff.lint]
|
||||
ignore = [
|
||||
"E402", # module level import not at top of file
|
||||
"E501", # line too long
|
||||
"E731", # do not assign a lambda expression, use a def
|
||||
"E741", # ambiguous variable name
|
||||
"E402", # module-import-not-at-top-of-file
|
||||
"E501", # line-too-long
|
||||
"E731", # lambda-assignment
|
||||
"E741", # ambiguous-variable-name
|
||||
"UP036", # outdated-version-block
|
||||
"B006", # mutable-argument-default
|
||||
"B008", # function-call-in-default-argument
|
||||
"B011", # assert-false
|
||||
"B017", # assert-raises-exception
|
||||
"B023", # function-uses-loop-variable (false positives)
|
||||
"B028", # no-explicit-stacklevel
|
||||
"B904", # raise-without-from-inside-except
|
||||
"C401", # unnecessary-generator-set
|
||||
"C402", # unnecessary-generator-dict
|
||||
"PIE790", # unnecessary-placeholder
|
||||
"SIM102", # collapsible-if
|
||||
"SIM108", # if-else-block-instead-of-if-exp
|
||||
"SIM112", # uncapitalized-environment-variables
|
||||
"SIM113", # enumerate-for-loop
|
||||
"SIM114", # if-with-same-arms
|
||||
"SIM115", # open-file-with-context-handler
|
||||
"SIM117", # multiple-with-statements
|
||||
"SIM223", # expr-and-false
|
||||
"SIM300", # yoda-conditions
|
||||
"TD001", # invalid-todo-tag
|
||||
"TD002", # missing-todo-author
|
||||
"TD003", # missing-todo-link
|
||||
"PLE0604", # invalid-all-object (false positives)
|
||||
"PLW0603", # global-statement
|
||||
"PLW1510", # subprocess-run-without-check
|
||||
"PLW2901", # redefined-loop-name
|
||||
"RUF001", # ambiguous-unicode-character-string
|
||||
"RUF012", # mutable-class-default
|
||||
"RUF100", # unused-noqa (flake8 has slightly different behavior)
|
||||
]
|
||||
select = [
|
||||
"E", # pycodestyle errors
|
||||
"W", # pycodestyle warnings
|
||||
"F", # pyflakes
|
||||
"I", # import order
|
||||
"E", # pycodestyle Error
|
||||
"W", # pycodestyle Warning
|
||||
"F", # Pyflakes
|
||||
"I", # isort
|
||||
"Q", # flake8-quotes
|
||||
"N803", # invalid-argument-name
|
||||
"N804", # invalid-first-argument-name-for-class-method
|
||||
"UP", # pyupgrade
|
||||
"B", # flake8-bugbear
|
||||
"A", # flake8-builtins
|
||||
"COM", # flake8-commas
|
||||
"C4", # flake8-comprehensions
|
||||
"FA", # flake8-future-annotations
|
||||
"ISC", # flake8-implicit-str-concat
|
||||
"ICN003", # banned-import-from
|
||||
"PIE", # flake8-pie
|
||||
"T20", # flake8-print
|
||||
"RSE", # flake8-raise
|
||||
"RET504", # unnecessary-assign
|
||||
"SIM", # flake8-simplify
|
||||
"TID251", # banned-api
|
||||
"TD", # flake8-todos
|
||||
"PLC", # Pylint Convention
|
||||
"PLE", # Pylint Error
|
||||
"PLW", # Pylint Warning
|
||||
"RUF", # Ruff-specific rules
|
||||
]
|
||||
|
||||
[tool.ruff.lint.per-file-ignores]
|
||||
"devscripts/lazy_load_template.py" = ["F401"]
|
||||
"!yt_dlp/extractor/**.py" = ["I"]
|
||||
"devscripts/lazy_load_template.py" = [
|
||||
"F401", # unused-import
|
||||
]
|
||||
"!yt_dlp/extractor/**.py" = [
|
||||
"I", # isort
|
||||
"ICN003", # banned-import-from
|
||||
"T20", # flake8-print
|
||||
"A002", # builtin-argument-shadowing
|
||||
"C408", # unnecessary-collection-call
|
||||
]
|
||||
"yt_dlp/jsinterp.py" = [
|
||||
"UP031", # printf-string-formatting
|
||||
]
|
||||
|
||||
[tool.ruff.lint.isort]
|
||||
known-first-party = [
|
||||
@ -207,6 +270,50 @@ known-first-party = [
|
||||
]
|
||||
relative-imports-order = "closest-to-furthest"
|
||||
|
||||
[tool.ruff.lint.flake8-quotes]
|
||||
docstring-quotes = "double"
|
||||
multiline-quotes = "single"
|
||||
inline-quotes = "single"
|
||||
avoid-escape = false
|
||||
|
||||
[tool.ruff.lint.pep8-naming]
|
||||
classmethod-decorators = [
|
||||
"yt_dlp.utils.classproperty",
|
||||
]
|
||||
|
||||
[tool.ruff.lint.flake8-import-conventions]
|
||||
banned-from = [
|
||||
"base64",
|
||||
"datetime",
|
||||
"functools",
|
||||
"glob",
|
||||
"hashlib",
|
||||
"itertools",
|
||||
"json",
|
||||
"math",
|
||||
"os",
|
||||
"pathlib",
|
||||
"random",
|
||||
"re",
|
||||
"string",
|
||||
"sys",
|
||||
"time",
|
||||
"urllib",
|
||||
"uuid",
|
||||
"xml",
|
||||
]
|
||||
|
||||
[tool.ruff.lint.flake8-tidy-imports.banned-api]
|
||||
"yt_dlp.compat.compat_str".msg = "Use `str` instead."
|
||||
"yt_dlp.compat.compat_b64decode".msg = "Use `base64.b64decode` instead."
|
||||
"yt_dlp.compat.compat_urlparse".msg = "Use `urllib.parse` instead."
|
||||
"yt_dlp.compat.compat_parse_qs".msg = "Use `urllib.parse.parse_qs` instead."
|
||||
"yt_dlp.compat.compat_urllib_parse_unquote".msg = "Use `urllib.parse.unquote` instead."
|
||||
"yt_dlp.compat.compat_urllib_parse_urlencode".msg = "Use `urllib.parse.urlencode` instead."
|
||||
"yt_dlp.compat.compat_urllib_parse_urlparse".msg = "Use `urllib.parse.urlparse` instead."
|
||||
"yt_dlp.compat.compat_shlex_quote".msg = "Use `yt_dlp.utils.shell_quote` instead."
|
||||
"yt_dlp.utils.error_to_compat_str".msg = "Use `str` instead."
|
||||
|
||||
[tool.autopep8]
|
||||
max_line_length = 120
|
||||
recursive = true
|
||||
|
@ -22,8 +22,8 @@ def handler(request):
|
||||
class HandlerWrapper(handler):
|
||||
RH_KEY = handler.RH_KEY
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(logger=FakeLogger, *args, **kwargs)
|
||||
def __init__(self, **kwargs):
|
||||
super().__init__(logger=FakeLogger, **kwargs)
|
||||
|
||||
return HandlerWrapper
|
||||
|
||||
@ -54,11 +54,11 @@ def skip_handlers_if(request, handler):
|
||||
|
||||
def pytest_configure(config):
|
||||
config.addinivalue_line(
|
||||
"markers", "skip_handler(handler): skip test for the given handler",
|
||||
'markers', 'skip_handler(handler): skip test for the given handler',
|
||||
)
|
||||
config.addinivalue_line(
|
||||
"markers", "skip_handler_if(handler): skip test for the given handler if condition is true"
|
||||
'markers', 'skip_handler_if(handler): skip test for the given handler if condition is true',
|
||||
)
|
||||
config.addinivalue_line(
|
||||
"markers", "skip_handlers_if(handler): skip test for handlers when the condition is true"
|
||||
'markers', 'skip_handlers_if(handler): skip test for handlers when the condition is true',
|
||||
)
|
||||
|
@ -16,8 +16,8 @@
|
||||
import pytest
|
||||
is_download_test = pytest.mark.download
|
||||
else:
|
||||
def is_download_test(testClass):
|
||||
return testClass
|
||||
def is_download_test(test_class):
|
||||
return test_class
|
||||
|
||||
|
||||
def get_params(override=None):
|
||||
@ -45,10 +45,10 @@ def try_rm(filename):
|
||||
|
||||
|
||||
def report_warning(message, *args, **kwargs):
|
||||
'''
|
||||
"""
|
||||
Print the message to stderr, it will be prefixed with 'WARNING:'
|
||||
If stderr is a tty file the 'WARNING:' will be colored
|
||||
'''
|
||||
"""
|
||||
if sys.stderr.isatty() and compat_os_name != 'nt':
|
||||
_msg_header = '\033[0;33mWARNING:\033[0m'
|
||||
else:
|
||||
@ -138,15 +138,14 @@ def expect_value(self, got, expected, field):
|
||||
elif isinstance(expected, list) and isinstance(got, list):
|
||||
self.assertEqual(
|
||||
len(expected), len(got),
|
||||
'Expect a list of length %d, but got a list of length %d for field %s' % (
|
||||
len(expected), len(got), field))
|
||||
f'Expect a list of length {len(expected)}, but got a list of length {len(got)} for field {field}')
|
||||
for index, (item_got, item_expected) in enumerate(zip(got, expected)):
|
||||
type_got = type(item_got)
|
||||
type_expected = type(item_expected)
|
||||
self.assertEqual(
|
||||
type_expected, type_got,
|
||||
'Type mismatch for list item at index %d for field %s, expected %r, got %r' % (
|
||||
index, field, type_expected, type_got))
|
||||
f'Type mismatch for list item at index {index} for field {field}, '
|
||||
f'expected {type_expected!r}, got {type_got!r}')
|
||||
expect_value(self, item_got, item_expected, field)
|
||||
else:
|
||||
if isinstance(expected, str) and expected.startswith('md5:'):
|
||||
@ -224,7 +223,7 @@ def sanitize(key, value):
|
||||
test_info_dict.pop('display_id')
|
||||
|
||||
# Remove deprecated fields
|
||||
for old in YoutubeDL._deprecated_multivalue_fields.keys():
|
||||
for old in YoutubeDL._deprecated_multivalue_fields:
|
||||
test_info_dict.pop(old, None)
|
||||
|
||||
# release_year may be generated from release_date
|
||||
@ -246,11 +245,11 @@ def expect_info_dict(self, got_dict, expected_dict):
|
||||
if expected_dict.get('ext'):
|
||||
mandatory_fields.extend(('url', 'ext'))
|
||||
for key in mandatory_fields:
|
||||
self.assertTrue(got_dict.get(key), 'Missing mandatory field %s' % key)
|
||||
self.assertTrue(got_dict.get(key), f'Missing mandatory field {key}')
|
||||
# Check for mandatory fields that are automatically set by YoutubeDL
|
||||
if got_dict.get('_type', 'video') == 'video':
|
||||
for key in ['webpage_url', 'extractor', 'extractor_key']:
|
||||
self.assertTrue(got_dict.get(key), 'Missing field: %s' % key)
|
||||
self.assertTrue(got_dict.get(key), f'Missing field: {key}')
|
||||
|
||||
test_info_dict = sanitize_got_info_dict(got_dict)
|
||||
|
||||
@ -258,7 +257,7 @@ def expect_info_dict(self, got_dict, expected_dict):
|
||||
if missing_keys:
|
||||
def _repr(v):
|
||||
if isinstance(v, str):
|
||||
return "'%s'" % v.replace('\\', '\\\\').replace("'", "\\'").replace('\n', '\\n')
|
||||
return "'{}'".format(v.replace('\\', '\\\\').replace("'", "\\'").replace('\n', '\\n'))
|
||||
elif isinstance(v, type):
|
||||
return v.__name__
|
||||
else:
|
||||
@ -275,8 +274,7 @@ def _repr(v):
|
||||
write_string(info_dict_str.replace('\n', '\n '), out=sys.stderr)
|
||||
self.assertFalse(
|
||||
missing_keys,
|
||||
'Missing keys in test definition: %s' % (
|
||||
', '.join(sorted(missing_keys))))
|
||||
'Missing keys in test definition: {}'.format(', '.join(sorted(missing_keys))))
|
||||
|
||||
|
||||
def assertRegexpMatches(self, text, regexp, msg=None):
|
||||
@ -285,9 +283,9 @@ def assertRegexpMatches(self, text, regexp, msg=None):
|
||||
else:
|
||||
m = re.match(regexp, text)
|
||||
if not m:
|
||||
note = 'Regexp didn\'t match: %r not found' % (regexp)
|
||||
note = f'Regexp didn\'t match: {regexp!r} not found'
|
||||
if len(text) < 1000:
|
||||
note += ' in %r' % text
|
||||
note += f' in {text!r}'
|
||||
if msg is None:
|
||||
msg = note
|
||||
else:
|
||||
@ -310,7 +308,7 @@ def assertLessEqual(self, got, expected, msg=None):
|
||||
|
||||
|
||||
def assertEqual(self, got, expected, msg=None):
|
||||
if not (got == expected):
|
||||
if got != expected:
|
||||
if msg is None:
|
||||
msg = f'{got!r} not equal to {expected!r}'
|
||||
self.assertTrue(got == expected, msg)
|
||||
|
@ -262,19 +262,19 @@ def test_search_json_ld_realworld(self):
|
||||
''',
|
||||
{
|
||||
'chapters': [
|
||||
{"title": "Explosie Turnhout", "start_time": 70, "end_time": 440},
|
||||
{"title": "Jaarwisseling", "start_time": 440, "end_time": 1179},
|
||||
{"title": "Natuurbranden Colorado", "start_time": 1179, "end_time": 1263},
|
||||
{"title": "Klimaatverandering", "start_time": 1263, "end_time": 1367},
|
||||
{"title": "Zacht weer", "start_time": 1367, "end_time": 1383},
|
||||
{"title": "Financiële balans", "start_time": 1383, "end_time": 1484},
|
||||
{"title": "Club Brugge", "start_time": 1484, "end_time": 1575},
|
||||
{"title": "Mentale gezondheid bij topsporters", "start_time": 1575, "end_time": 1728},
|
||||
{"title": "Olympische Winterspelen", "start_time": 1728, "end_time": 1873},
|
||||
{"title": "Sober oudjaar in Nederland", "start_time": 1873, "end_time": 2079.23}
|
||||
{'title': 'Explosie Turnhout', 'start_time': 70, 'end_time': 440},
|
||||
{'title': 'Jaarwisseling', 'start_time': 440, 'end_time': 1179},
|
||||
{'title': 'Natuurbranden Colorado', 'start_time': 1179, 'end_time': 1263},
|
||||
{'title': 'Klimaatverandering', 'start_time': 1263, 'end_time': 1367},
|
||||
{'title': 'Zacht weer', 'start_time': 1367, 'end_time': 1383},
|
||||
{'title': 'Financiële balans', 'start_time': 1383, 'end_time': 1484},
|
||||
{'title': 'Club Brugge', 'start_time': 1484, 'end_time': 1575},
|
||||
{'title': 'Mentale gezondheid bij topsporters', 'start_time': 1575, 'end_time': 1728},
|
||||
{'title': 'Olympische Winterspelen', 'start_time': 1728, 'end_time': 1873},
|
||||
{'title': 'Sober oudjaar in Nederland', 'start_time': 1873, 'end_time': 2079.23},
|
||||
],
|
||||
'title': 'Het journaal - Aflevering 365 (Seizoen 2021)'
|
||||
}, {}
|
||||
'title': 'Het journaal - Aflevering 365 (Seizoen 2021)',
|
||||
}, {},
|
||||
),
|
||||
(
|
||||
# test multiple thumbnails in a list
|
||||
@ -301,13 +301,13 @@ def test_search_json_ld_realworld(self):
|
||||
'thumbnails': [{'url': 'https://www.rainews.it/cropgd/640x360/dl/img/2021/12/30/1640886376927_GettyImages.jpg'}],
|
||||
},
|
||||
{},
|
||||
)
|
||||
),
|
||||
]
|
||||
for html, expected_dict, search_json_ld_kwargs in _TESTS:
|
||||
expect_dict(
|
||||
self,
|
||||
self.ie._search_json_ld(html, None, **search_json_ld_kwargs),
|
||||
expected_dict
|
||||
expected_dict,
|
||||
)
|
||||
|
||||
def test_download_json(self):
|
||||
@ -366,7 +366,7 @@ def test_parse_html5_media_entries(self):
|
||||
'height': 740,
|
||||
'tbr': 1500,
|
||||
}],
|
||||
'thumbnail': '//pics.r18.com/digital/amateur/mgmr105/mgmr105jp.jpg'
|
||||
'thumbnail': '//pics.r18.com/digital/amateur/mgmr105/mgmr105jp.jpg',
|
||||
})
|
||||
|
||||
# from https://www.csfd.cz/
|
||||
@ -419,9 +419,9 @@ def test_parse_html5_media_entries(self):
|
||||
'height': 1080,
|
||||
}],
|
||||
'subtitles': {
|
||||
'cs': [{'url': 'https://video.csfd.cz/files/subtitles/163/344/163344115_4c388b.srt'}]
|
||||
'cs': [{'url': 'https://video.csfd.cz/files/subtitles/163/344/163344115_4c388b.srt'}],
|
||||
},
|
||||
'thumbnail': 'https://img.csfd.cz/files/images/film/video/preview/163/344/163344118_748d20.png?h360'
|
||||
'thumbnail': 'https://img.csfd.cz/files/images/film/video/preview/163/344/163344118_748d20.png?h360',
|
||||
})
|
||||
|
||||
# from https://tamasha.com/v/Kkdjw
|
||||
@ -452,7 +452,7 @@ def test_parse_html5_media_entries(self):
|
||||
'ext': 'mp4',
|
||||
'format_id': '144p',
|
||||
'height': 144,
|
||||
}]
|
||||
}],
|
||||
})
|
||||
|
||||
# from https://www.directvnow.com
|
||||
@ -470,7 +470,7 @@ def test_parse_html5_media_entries(self):
|
||||
'formats': [{
|
||||
'ext': 'mp4',
|
||||
'url': 'https://cdn.directv.com/content/dam/dtv/prod/website_directvnow-international/videos/DTVN_hdr_HBO_v3.mp4',
|
||||
}]
|
||||
}],
|
||||
})
|
||||
|
||||
# from https://www.directvnow.com
|
||||
@ -488,7 +488,7 @@ def test_parse_html5_media_entries(self):
|
||||
'formats': [{
|
||||
'url': 'https://cdn.directv.com/content/dam/dtv/prod/website_directvnow-international/videos/DTVN_hdr_HBO_v3.mp4',
|
||||
'ext': 'mp4',
|
||||
}]
|
||||
}],
|
||||
})
|
||||
|
||||
# from https://www.klarna.com/uk/
|
||||
@ -547,8 +547,8 @@ def test_extract_jwplayer_data_realworld(self):
|
||||
'id': 'XEgvuql4',
|
||||
'formats': [{
|
||||
'url': 'rtmp://192.138.214.154/live/sjclive',
|
||||
'ext': 'flv'
|
||||
}]
|
||||
'ext': 'flv',
|
||||
}],
|
||||
})
|
||||
|
||||
# from https://www.pornoxo.com/videos/7564/striptease-from-sexy-secretary/
|
||||
@ -588,8 +588,8 @@ def test_extract_jwplayer_data_realworld(self):
|
||||
'thumbnail': 'https://t03.vipstreamservice.com/thumbs/pxo-full/2009-12/14/a4b2157147afe5efa93ce1978e0265289c193874e02597.flv-full-13.jpg',
|
||||
'formats': [{
|
||||
'url': 'https://cdn.pornoxo.com/key=MF+oEbaxqTKb50P-w9G3nA,end=1489689259,ip=104.199.146.27/ip=104.199.146.27/speed=6573765/buffer=3.0/2009-12/4b2157147afe5efa93ce1978e0265289c193874e02597.flv',
|
||||
'ext': 'flv'
|
||||
}]
|
||||
'ext': 'flv',
|
||||
}],
|
||||
})
|
||||
|
||||
# from http://www.indiedb.com/games/king-machine/videos
|
||||
@ -610,12 +610,12 @@ def test_extract_jwplayer_data_realworld(self):
|
||||
'formats': [{
|
||||
'url': 'http://cdn.dbolical.com/cache/videos/games/1/50/49678/encode_mp4/king-machine-trailer.mp4',
|
||||
'height': 360,
|
||||
'ext': 'mp4'
|
||||
'ext': 'mp4',
|
||||
}, {
|
||||
'url': 'http://cdn.dbolical.com/cache/videos/games/1/50/49678/encode720p_mp4/king-machine-trailer.mp4',
|
||||
'height': 720,
|
||||
'ext': 'mp4'
|
||||
}]
|
||||
'ext': 'mp4',
|
||||
}],
|
||||
})
|
||||
|
||||
def test_parse_m3u8_formats(self):
|
||||
@ -866,7 +866,7 @@ def test_parse_m3u8_formats(self):
|
||||
'height': 1080,
|
||||
'vcodec': 'avc1.64002a',
|
||||
}],
|
||||
{}
|
||||
{},
|
||||
),
|
||||
(
|
||||
'bipbop_16x9',
|
||||
@ -990,45 +990,45 @@ def test_parse_m3u8_formats(self):
|
||||
'en': [{
|
||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/eng/prog_index.m3u8',
|
||||
'ext': 'vtt',
|
||||
'protocol': 'm3u8_native'
|
||||
'protocol': 'm3u8_native',
|
||||
}, {
|
||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/eng_forced/prog_index.m3u8',
|
||||
'ext': 'vtt',
|
||||
'protocol': 'm3u8_native'
|
||||
'protocol': 'm3u8_native',
|
||||
}],
|
||||
'fr': [{
|
||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/fra/prog_index.m3u8',
|
||||
'ext': 'vtt',
|
||||
'protocol': 'm3u8_native'
|
||||
'protocol': 'm3u8_native',
|
||||
}, {
|
||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/fra_forced/prog_index.m3u8',
|
||||
'ext': 'vtt',
|
||||
'protocol': 'm3u8_native'
|
||||
'protocol': 'm3u8_native',
|
||||
}],
|
||||
'es': [{
|
||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/spa/prog_index.m3u8',
|
||||
'ext': 'vtt',
|
||||
'protocol': 'm3u8_native'
|
||||
'protocol': 'm3u8_native',
|
||||
}, {
|
||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/spa_forced/prog_index.m3u8',
|
||||
'ext': 'vtt',
|
||||
'protocol': 'm3u8_native'
|
||||
'protocol': 'm3u8_native',
|
||||
}],
|
||||
'ja': [{
|
||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/jpn/prog_index.m3u8',
|
||||
'ext': 'vtt',
|
||||
'protocol': 'm3u8_native'
|
||||
'protocol': 'm3u8_native',
|
||||
}, {
|
||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/jpn_forced/prog_index.m3u8',
|
||||
'ext': 'vtt',
|
||||
'protocol': 'm3u8_native'
|
||||
'protocol': 'm3u8_native',
|
||||
}],
|
||||
}
|
||||
},
|
||||
),
|
||||
]
|
||||
|
||||
for m3u8_file, m3u8_url, expected_formats, expected_subs in _TEST_CASES:
|
||||
with open('./test/testdata/m3u8/%s.m3u8' % m3u8_file, encoding='utf-8') as f:
|
||||
with open(f'./test/testdata/m3u8/{m3u8_file}.m3u8', encoding='utf-8') as f:
|
||||
formats, subs = self.ie._parse_m3u8_formats_and_subtitles(
|
||||
f.read(), m3u8_url, ext='mp4')
|
||||
self.ie._sort_formats(formats)
|
||||
@ -1366,14 +1366,14 @@ def test_parse_mpd_formats(self):
|
||||
'url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
|
||||
'fragment_base_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/dash/',
|
||||
'protocol': 'http_dash_segments',
|
||||
}
|
||||
]
|
||||
},
|
||||
],
|
||||
},
|
||||
)
|
||||
),
|
||||
]
|
||||
|
||||
for mpd_file, mpd_url, mpd_base_url, expected_formats, expected_subtitles in _TEST_CASES:
|
||||
with open('./test/testdata/mpd/%s.mpd' % mpd_file, encoding='utf-8') as f:
|
||||
with open(f'./test/testdata/mpd/{mpd_file}.mpd', encoding='utf-8') as f:
|
||||
formats, subtitles = self.ie._parse_mpd_formats_and_subtitles(
|
||||
compat_etree_fromstring(f.read().encode()),
|
||||
mpd_base_url=mpd_base_url, mpd_url=mpd_url)
|
||||
@ -1408,7 +1408,7 @@ def test_parse_ism_formats(self):
|
||||
'sampling_rate': 48000,
|
||||
'channels': 2,
|
||||
'bits_per_sample': 16,
|
||||
'nal_unit_length_field': 4
|
||||
'nal_unit_length_field': 4,
|
||||
},
|
||||
}, {
|
||||
'format_id': 'video-100',
|
||||
@ -1431,7 +1431,7 @@ def test_parse_ism_formats(self):
|
||||
'codec_private_data': '00000001674D401FDA0544EFFC2D002CBC40000003004000000C03C60CA80000000168EF32C8',
|
||||
'channels': 2,
|
||||
'bits_per_sample': 16,
|
||||
'nal_unit_length_field': 4
|
||||
'nal_unit_length_field': 4,
|
||||
},
|
||||
}, {
|
||||
'format_id': 'video-326',
|
||||
@ -1454,7 +1454,7 @@ def test_parse_ism_formats(self):
|
||||
'codec_private_data': '00000001674D401FDA0241FE23FFC3BC83BA44000003000400000300C03C60CA800000000168EF32C8',
|
||||
'channels': 2,
|
||||
'bits_per_sample': 16,
|
||||
'nal_unit_length_field': 4
|
||||
'nal_unit_length_field': 4,
|
||||
},
|
||||
}, {
|
||||
'format_id': 'video-698',
|
||||
@ -1477,7 +1477,7 @@ def test_parse_ism_formats(self):
|
||||
'codec_private_data': '00000001674D401FDA0350BFB97FF06AF06AD1000003000100000300300F1832A00000000168EF32C8',
|
||||
'channels': 2,
|
||||
'bits_per_sample': 16,
|
||||
'nal_unit_length_field': 4
|
||||
'nal_unit_length_field': 4,
|
||||
},
|
||||
}, {
|
||||
'format_id': 'video-1493',
|
||||
@ -1500,7 +1500,7 @@ def test_parse_ism_formats(self):
|
||||
'codec_private_data': '00000001674D401FDA011C3DE6FFF0D890D871000003000100000300300F1832A00000000168EF32C8',
|
||||
'channels': 2,
|
||||
'bits_per_sample': 16,
|
||||
'nal_unit_length_field': 4
|
||||
'nal_unit_length_field': 4,
|
||||
},
|
||||
}, {
|
||||
'format_id': 'video-4482',
|
||||
@ -1523,7 +1523,7 @@ def test_parse_ism_formats(self):
|
||||
'codec_private_data': '00000001674D401FDA01A816F97FFC1ABC1AB440000003004000000C03C60CA80000000168EF32C8',
|
||||
'channels': 2,
|
||||
'bits_per_sample': 16,
|
||||
'nal_unit_length_field': 4
|
||||
'nal_unit_length_field': 4,
|
||||
},
|
||||
}],
|
||||
{
|
||||
@ -1538,10 +1538,10 @@ def test_parse_ism_formats(self):
|
||||
'duration': 8880746666,
|
||||
'timescale': 10000000,
|
||||
'fourcc': 'TTML',
|
||||
'codec_private_data': ''
|
||||
}
|
||||
}
|
||||
]
|
||||
'codec_private_data': '',
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
),
|
||||
(
|
||||
@ -1571,7 +1571,7 @@ def test_parse_ism_formats(self):
|
||||
'sampling_rate': 48000,
|
||||
'channels': 2,
|
||||
'bits_per_sample': 16,
|
||||
'nal_unit_length_field': 4
|
||||
'nal_unit_length_field': 4,
|
||||
},
|
||||
}, {
|
||||
'format_id': 'audio_deu_1-224',
|
||||
@ -1597,7 +1597,7 @@ def test_parse_ism_formats(self):
|
||||
'sampling_rate': 48000,
|
||||
'channels': 6,
|
||||
'bits_per_sample': 16,
|
||||
'nal_unit_length_field': 4
|
||||
'nal_unit_length_field': 4,
|
||||
},
|
||||
}, {
|
||||
'format_id': 'video_deu-23',
|
||||
@ -1622,7 +1622,7 @@ def test_parse_ism_formats(self):
|
||||
'codec_private_data': '000000016742C00CDB06077E5C05A808080A00000300020000030009C0C02EE0177CC6300F142AE00000000168CA8DC8',
|
||||
'channels': 2,
|
||||
'bits_per_sample': 16,
|
||||
'nal_unit_length_field': 4
|
||||
'nal_unit_length_field': 4,
|
||||
},
|
||||
}, {
|
||||
'format_id': 'video_deu-403',
|
||||
@ -1647,7 +1647,7 @@ def test_parse_ism_formats(self):
|
||||
'codec_private_data': '00000001674D4014E98323B602D4040405000003000100000300320F1429380000000168EAECF2',
|
||||
'channels': 2,
|
||||
'bits_per_sample': 16,
|
||||
'nal_unit_length_field': 4
|
||||
'nal_unit_length_field': 4,
|
||||
},
|
||||
}, {
|
||||
'format_id': 'video_deu-680',
|
||||
@ -1672,7 +1672,7 @@ def test_parse_ism_formats(self):
|
||||
'codec_private_data': '00000001674D401EE981405FF2E02D4040405000000300100000030320F162D3800000000168EAECF2',
|
||||
'channels': 2,
|
||||
'bits_per_sample': 16,
|
||||
'nal_unit_length_field': 4
|
||||
'nal_unit_length_field': 4,
|
||||
},
|
||||
}, {
|
||||
'format_id': 'video_deu-1253',
|
||||
@ -1698,7 +1698,7 @@ def test_parse_ism_formats(self):
|
||||
'codec_private_data': '00000001674D401EE981405FF2E02D4040405000000300100000030320F162D3800000000168EAECF2',
|
||||
'channels': 2,
|
||||
'bits_per_sample': 16,
|
||||
'nal_unit_length_field': 4
|
||||
'nal_unit_length_field': 4,
|
||||
},
|
||||
}, {
|
||||
'format_id': 'video_deu-2121',
|
||||
@ -1723,7 +1723,7 @@ def test_parse_ism_formats(self):
|
||||
'codec_private_data': '00000001674D401EECA0601BD80B50101014000003000400000300C83C58B6580000000168E93B3C80',
|
||||
'channels': 2,
|
||||
'bits_per_sample': 16,
|
||||
'nal_unit_length_field': 4
|
||||
'nal_unit_length_field': 4,
|
||||
},
|
||||
}, {
|
||||
'format_id': 'video_deu-3275',
|
||||
@ -1748,7 +1748,7 @@ def test_parse_ism_formats(self):
|
||||
'codec_private_data': '00000001674D4020ECA02802DD80B501010140000003004000000C83C60C65800000000168E93B3C80',
|
||||
'channels': 2,
|
||||
'bits_per_sample': 16,
|
||||
'nal_unit_length_field': 4
|
||||
'nal_unit_length_field': 4,
|
||||
},
|
||||
}, {
|
||||
'format_id': 'video_deu-5300',
|
||||
@ -1773,7 +1773,7 @@ def test_parse_ism_formats(self):
|
||||
'codec_private_data': '00000001674D4028ECA03C0113F2E02D4040405000000300100000030320F18319600000000168E93B3C80',
|
||||
'channels': 2,
|
||||
'bits_per_sample': 16,
|
||||
'nal_unit_length_field': 4
|
||||
'nal_unit_length_field': 4,
|
||||
},
|
||||
}, {
|
||||
'format_id': 'video_deu-8079',
|
||||
@ -1798,7 +1798,7 @@ def test_parse_ism_formats(self):
|
||||
'codec_private_data': '00000001674D4028ECA03C0113F2E02D4040405000000300100000030320F18319600000000168E93B3C80',
|
||||
'channels': 2,
|
||||
'bits_per_sample': 16,
|
||||
'nal_unit_length_field': 4
|
||||
'nal_unit_length_field': 4,
|
||||
},
|
||||
}],
|
||||
{},
|
||||
@ -1806,7 +1806,7 @@ def test_parse_ism_formats(self):
|
||||
]
|
||||
|
||||
for ism_file, ism_url, expected_formats, expected_subtitles in _TEST_CASES:
|
||||
with open('./test/testdata/ism/%s.Manifest' % ism_file, encoding='utf-8') as f:
|
||||
with open(f'./test/testdata/ism/{ism_file}.Manifest', encoding='utf-8') as f:
|
||||
formats, subtitles = self.ie._parse_ism_formats_and_subtitles(
|
||||
compat_etree_fromstring(f.read().encode()), ism_url=ism_url)
|
||||
self.ie._sort_formats(formats)
|
||||
@ -1827,12 +1827,12 @@ def test_parse_f4m_formats(self):
|
||||
'tbr': 2148,
|
||||
'width': 1280,
|
||||
'height': 720,
|
||||
}]
|
||||
}],
|
||||
),
|
||||
]
|
||||
|
||||
for f4m_file, f4m_url, expected_formats in _TEST_CASES:
|
||||
with open('./test/testdata/f4m/%s.f4m' % f4m_file, encoding='utf-8') as f:
|
||||
with open(f'./test/testdata/f4m/{f4m_file}.f4m', encoding='utf-8') as f:
|
||||
formats = self.ie._parse_f4m_formats(
|
||||
compat_etree_fromstring(f.read().encode()),
|
||||
f4m_url, None)
|
||||
@ -1873,13 +1873,13 @@ def test_parse_xspf(self):
|
||||
}, {
|
||||
'manifest_url': 'https://example.org/src/foo_xspf.xspf',
|
||||
'url': 'https://example.com/track3.mp3',
|
||||
}]
|
||||
}]
|
||||
}],
|
||||
}],
|
||||
),
|
||||
]
|
||||
|
||||
for xspf_file, xspf_url, expected_entries in _TEST_CASES:
|
||||
with open('./test/testdata/xspf/%s.xspf' % xspf_file, encoding='utf-8') as f:
|
||||
with open(f'./test/testdata/xspf/{xspf_file}.xspf', encoding='utf-8') as f:
|
||||
entries = self.ie._parse_xspf(
|
||||
compat_etree_fromstring(f.read().encode()),
|
||||
xspf_file, xspf_url=xspf_url, xspf_base_url=xspf_url)
|
||||
@ -1902,7 +1902,7 @@ def test_response_with_expected_status_returns_content(self):
|
||||
server_thread.start()
|
||||
|
||||
(content, urlh) = self.ie._download_webpage_handle(
|
||||
'http://127.0.0.1:%d/teapot' % port, None,
|
||||
f'http://127.0.0.1:{port}/teapot', None,
|
||||
expected_status=TEAPOT_RESPONSE_STATUS)
|
||||
self.assertEqual(content, TEAPOT_RESPONSE_BODY)
|
||||
|
||||
|
@ -8,6 +8,7 @@
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
|
||||
import contextlib
|
||||
import copy
|
||||
import json
|
||||
|
||||
@ -129,8 +130,8 @@ def test(inp, *expected, multi=False):
|
||||
'allow_multiple_audio_streams': multi,
|
||||
})
|
||||
ydl.process_ie_result(info_dict.copy())
|
||||
downloaded = map(lambda x: x['format_id'], ydl.downloaded_info_dicts)
|
||||
self.assertEqual(list(downloaded), list(expected))
|
||||
downloaded = [x['format_id'] for x in ydl.downloaded_info_dicts]
|
||||
self.assertEqual(downloaded, list(expected))
|
||||
|
||||
test('20/47', '47')
|
||||
test('20/71/worst', '35')
|
||||
@ -515,10 +516,8 @@ def test_format_filtering(self):
|
||||
self.assertEqual(downloaded_ids, ['D', 'C', 'B'])
|
||||
|
||||
ydl = YDL({'format': 'best[height<40]'})
|
||||
try:
|
||||
with contextlib.suppress(ExtractorError):
|
||||
ydl.process_ie_result(info_dict)
|
||||
except ExtractorError:
|
||||
pass
|
||||
self.assertEqual(ydl.downloaded_info_dicts, [])
|
||||
|
||||
def test_default_format_spec(self):
|
||||
@ -652,8 +651,8 @@ def test_add_extra_info(self):
|
||||
'formats': [
|
||||
{'id': 'id 1', 'height': 1080, 'width': 1920},
|
||||
{'id': 'id 2', 'height': 720},
|
||||
{'id': 'id 3'}
|
||||
]
|
||||
{'id': 'id 3'},
|
||||
],
|
||||
}
|
||||
|
||||
def test_prepare_outtmpl_and_filename(self):
|
||||
@ -773,7 +772,7 @@ def expect_same_infodict(out):
|
||||
test('%(formats)j', (json.dumps(FORMATS), None))
|
||||
test('%(formats)#j', (
|
||||
json.dumps(FORMATS, indent=4),
|
||||
json.dumps(FORMATS, indent=4).replace(':', ':').replace('"', """).replace('\n', ' ')
|
||||
json.dumps(FORMATS, indent=4).replace(':', ':').replace('"', '"').replace('\n', ' '),
|
||||
))
|
||||
test('%(title5).3B', 'á')
|
||||
test('%(title5)U', 'áéí 𝐀')
|
||||
@ -843,8 +842,8 @@ def gen():
|
||||
|
||||
# Empty filename
|
||||
test('%(foo|)s-%(bar|)s.%(ext)s', '-.mp4')
|
||||
# test('%(foo|)s.%(ext)s', ('.mp4', '_.mp4')) # fixme
|
||||
# test('%(foo|)s', ('', '_')) # fixme
|
||||
# test('%(foo|)s.%(ext)s', ('.mp4', '_.mp4')) # FIXME: ?
|
||||
# test('%(foo|)s', ('', '_')) # FIXME: ?
|
||||
|
||||
# Environment variable expansion for prepare_filename
|
||||
os.environ['__yt_dlp_var'] = 'expanded'
|
||||
@ -861,7 +860,7 @@ def gen():
|
||||
test('Hello %(title1)s', 'Hello $PATH')
|
||||
test('Hello %(title2)s', 'Hello %PATH%')
|
||||
test('%(title3)s', ('foo/bar\\test', 'foo⧸bar⧹test'))
|
||||
test('folder/%(title3)s', ('folder/foo/bar\\test', 'folder%sfoo⧸bar⧹test' % os.path.sep))
|
||||
test('folder/%(title3)s', ('folder/foo/bar\\test', f'folder{os.path.sep}foo⧸bar⧹test'))
|
||||
|
||||
def test_format_note(self):
|
||||
ydl = YoutubeDL()
|
||||
@ -883,22 +882,22 @@ def run(self, info):
|
||||
f.write('EXAMPLE')
|
||||
return [info['filepath']], info
|
||||
|
||||
def run_pp(params, PP):
|
||||
def run_pp(params, pp):
|
||||
with open(filename, 'w') as f:
|
||||
f.write('EXAMPLE')
|
||||
ydl = YoutubeDL(params)
|
||||
ydl.add_post_processor(PP())
|
||||
ydl.add_post_processor(pp())
|
||||
ydl.post_process(filename, {'filepath': filename})
|
||||
|
||||
run_pp({'keepvideo': True}, SimplePP)
|
||||
self.assertTrue(os.path.exists(filename), '%s doesn\'t exist' % filename)
|
||||
self.assertTrue(os.path.exists(audiofile), '%s doesn\'t exist' % audiofile)
|
||||
self.assertTrue(os.path.exists(filename), f'{filename} doesn\'t exist')
|
||||
self.assertTrue(os.path.exists(audiofile), f'{audiofile} doesn\'t exist')
|
||||
os.unlink(filename)
|
||||
os.unlink(audiofile)
|
||||
|
||||
run_pp({'keepvideo': False}, SimplePP)
|
||||
self.assertFalse(os.path.exists(filename), '%s exists' % filename)
|
||||
self.assertTrue(os.path.exists(audiofile), '%s doesn\'t exist' % audiofile)
|
||||
self.assertFalse(os.path.exists(filename), f'{filename} exists')
|
||||
self.assertTrue(os.path.exists(audiofile), f'{audiofile} doesn\'t exist')
|
||||
os.unlink(audiofile)
|
||||
|
||||
class ModifierPP(PostProcessor):
|
||||
@ -908,7 +907,7 @@ def run(self, info):
|
||||
return [], info
|
||||
|
||||
run_pp({'keepvideo': False}, ModifierPP)
|
||||
self.assertTrue(os.path.exists(filename), '%s doesn\'t exist' % filename)
|
||||
self.assertTrue(os.path.exists(filename), f'{filename} doesn\'t exist')
|
||||
os.unlink(filename)
|
||||
|
||||
def test_match_filter(self):
|
||||
@ -920,7 +919,7 @@ def test_match_filter(self):
|
||||
'duration': 30,
|
||||
'filesize': 10 * 1024,
|
||||
'playlist_id': '42',
|
||||
'uploader': "變態妍字幕版 太妍 тест",
|
||||
'uploader': '變態妍字幕版 太妍 тест',
|
||||
'creator': "тест ' 123 ' тест--",
|
||||
'webpage_url': 'http://example.com/watch?v=shenanigans',
|
||||
}
|
||||
@ -933,7 +932,7 @@ def test_match_filter(self):
|
||||
'description': 'foo',
|
||||
'filesize': 5 * 1024,
|
||||
'playlist_id': '43',
|
||||
'uploader': "тест 123",
|
||||
'uploader': 'тест 123',
|
||||
'webpage_url': 'http://example.com/watch?v=SHENANIGANS',
|
||||
}
|
||||
videos = [first, second]
|
||||
@ -1180,7 +1179,7 @@ def _real_extract(self, url):
|
||||
})
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': 'Video %s' % video_id,
|
||||
'title': f'Video {video_id}',
|
||||
'formats': formats,
|
||||
}
|
||||
|
||||
@ -1194,8 +1193,8 @@ def _entries(self):
|
||||
'_type': 'url_transparent',
|
||||
'ie_key': VideoIE.ie_key(),
|
||||
'id': video_id,
|
||||
'url': 'video:%s' % video_id,
|
||||
'title': 'Video Transparent %s' % video_id,
|
||||
'url': f'video:{video_id}',
|
||||
'title': f'Video Transparent {video_id}',
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
|
@ -87,7 +87,7 @@ def test_decrypt_text(self):
|
||||
password = intlist_to_bytes(self.key).decode()
|
||||
encrypted = base64.b64encode(
|
||||
intlist_to_bytes(self.iv[:8])
|
||||
+ b'\x17\x15\x93\xab\x8d\x80V\xcdV\xe0\t\xcdo\xc2\xa5\xd8ksM\r\xe27N\xae'
|
||||
+ b'\x17\x15\x93\xab\x8d\x80V\xcdV\xe0\t\xcdo\xc2\xa5\xd8ksM\r\xe27N\xae',
|
||||
).decode()
|
||||
decrypted = (aes_decrypt_text(encrypted, password, 16))
|
||||
self.assertEqual(decrypted, self.secret_msg)
|
||||
@ -95,7 +95,7 @@ def test_decrypt_text(self):
|
||||
password = intlist_to_bytes(self.key).decode()
|
||||
encrypted = base64.b64encode(
|
||||
intlist_to_bytes(self.iv[:8])
|
||||
+ b'\x0b\xe6\xa4\xd9z\x0e\xb8\xb9\xd0\xd4i_\x85\x1d\x99\x98_\xe5\x80\xe7.\xbf\xa5\x83'
|
||||
+ b'\x0b\xe6\xa4\xd9z\x0e\xb8\xb9\xd0\xd4i_\x85\x1d\x99\x98_\xe5\x80\xe7.\xbf\xa5\x83',
|
||||
).decode()
|
||||
decrypted = (aes_decrypt_text(encrypted, password, 32))
|
||||
self.assertEqual(decrypted, self.secret_msg)
|
||||
@ -132,16 +132,16 @@ def test_pad_block(self):
|
||||
block = [0x21, 0xA0, 0x43, 0xFF]
|
||||
|
||||
self.assertEqual(pad_block(block, 'pkcs7'),
|
||||
block + [0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C])
|
||||
[*block, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C])
|
||||
|
||||
self.assertEqual(pad_block(block, 'iso7816'),
|
||||
block + [0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00])
|
||||
[*block, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00])
|
||||
|
||||
self.assertEqual(pad_block(block, 'whitespace'),
|
||||
block + [0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20])
|
||||
[*block, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20])
|
||||
|
||||
self.assertEqual(pad_block(block, 'zero'),
|
||||
block + [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00])
|
||||
[*block, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00])
|
||||
|
||||
block = list(range(16))
|
||||
for mode in ('pkcs7', 'iso7816', 'whitespace', 'zero'):
|
||||
|
@ -15,8 +15,8 @@
|
||||
from yt_dlp.compat import (
|
||||
compat_etree_fromstring,
|
||||
compat_expanduser,
|
||||
compat_urllib_parse_unquote,
|
||||
compat_urllib_parse_urlencode,
|
||||
compat_urllib_parse_unquote, # noqa: TID251
|
||||
compat_urllib_parse_urlencode, # noqa: TID251
|
||||
)
|
||||
from yt_dlp.compat.urllib.request import getproxies
|
||||
|
||||
@ -24,15 +24,15 @@
|
||||
class TestCompat(unittest.TestCase):
|
||||
def test_compat_passthrough(self):
|
||||
with self.assertWarns(DeprecationWarning):
|
||||
compat.compat_basestring
|
||||
_ = compat.compat_basestring
|
||||
|
||||
with self.assertWarns(DeprecationWarning):
|
||||
compat.WINDOWS_VT_MODE
|
||||
_ = compat.WINDOWS_VT_MODE
|
||||
|
||||
self.assertEqual(urllib.request.getproxies, getproxies)
|
||||
|
||||
with self.assertWarns(DeprecationWarning):
|
||||
compat.compat_pycrypto_AES # Must not raise error
|
||||
_ = compat.compat_pycrypto_AES # Must not raise error
|
||||
|
||||
def test_compat_expanduser(self):
|
||||
old_home = os.environ.get('HOME')
|
||||
|
@ -71,7 +71,7 @@ def _generate_expected_groups():
|
||||
Path('/etc/yt-dlp.conf'),
|
||||
Path('/etc/yt-dlp/config'),
|
||||
Path('/etc/yt-dlp/config.txt'),
|
||||
]
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
|
@ -106,7 +106,7 @@ def test_chrome_cookie_decryptor_linux_v11(self):
|
||||
|
||||
def test_chrome_cookie_decryptor_windows_v10(self):
|
||||
with MonkeyPatch(cookies, {
|
||||
'_get_windows_v10_key': lambda *args, **kwargs: b'Y\xef\xad\xad\xeerp\xf0Y\xe6\x9b\x12\xc2<z\x16]\n\xbb\xb8\xcb\xd7\x9bA\xc3\x14e\x99{\xd6\xf4&'
|
||||
'_get_windows_v10_key': lambda *args, **kwargs: b'Y\xef\xad\xad\xeerp\xf0Y\xe6\x9b\x12\xc2<z\x16]\n\xbb\xb8\xcb\xd7\x9bA\xc3\x14e\x99{\xd6\xf4&',
|
||||
}):
|
||||
encrypted_value = b'v10T\xb8\xf3\xb8\x01\xa7TtcV\xfc\x88\xb8\xb8\xef\x05\xb5\xfd\x18\xc90\x009\xab\xb1\x893\x85)\x87\xe1\xa9-\xa3\xad='
|
||||
value = '32101439'
|
||||
@ -121,17 +121,17 @@ def test_chrome_cookie_decryptor_mac_v10(self):
|
||||
self.assertEqual(decryptor.decrypt(encrypted_value), value)
|
||||
|
||||
def test_safari_cookie_parsing(self):
|
||||
cookies = \
|
||||
b'cook\x00\x00\x00\x01\x00\x00\x00i\x00\x00\x01\x00\x01\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00Y' \
|
||||
b'\x00\x00\x00\x00\x00\x00\x00 \x00\x00\x00\x00\x00\x00\x008\x00\x00\x00B\x00\x00\x00F\x00\x00\x00H' \
|
||||
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x03\xa5>\xc3A\x00\x00\x80\xc3\x07:\xc3A' \
|
||||
b'localhost\x00foo\x00/\x00test%20%3Bcookie\x00\x00\x00\x054\x07\x17 \x05\x00\x00\x00Kbplist00\xd1\x01' \
|
||||
b'\x02_\x10\x18NSHTTPCookieAcceptPolicy\x10\x02\x08\x0b&\x00\x00\x00\x00\x00\x00\x01\x01\x00\x00\x00' \
|
||||
b'\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00('
|
||||
cookies = (
|
||||
b'cook\x00\x00\x00\x01\x00\x00\x00i\x00\x00\x01\x00\x01\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00Y'
|
||||
b'\x00\x00\x00\x00\x00\x00\x00 \x00\x00\x00\x00\x00\x00\x008\x00\x00\x00B\x00\x00\x00F\x00\x00\x00H'
|
||||
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x03\xa5>\xc3A\x00\x00\x80\xc3\x07:\xc3A'
|
||||
b'localhost\x00foo\x00/\x00test%20%3Bcookie\x00\x00\x00\x054\x07\x17 \x05\x00\x00\x00Kbplist00\xd1\x01'
|
||||
b'\x02_\x10\x18NSHTTPCookieAcceptPolicy\x10\x02\x08\x0b&\x00\x00\x00\x00\x00\x00\x01\x01\x00\x00\x00'
|
||||
b'\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00(')
|
||||
|
||||
jar = parse_safari_cookies(cookies)
|
||||
self.assertEqual(len(jar), 1)
|
||||
cookie = list(jar)[0]
|
||||
cookie = next(iter(jar))
|
||||
self.assertEqual(cookie.domain, 'localhost')
|
||||
self.assertEqual(cookie.port, None)
|
||||
self.assertEqual(cookie.path, '/')
|
||||
@ -164,7 +164,7 @@ def _run_tests(self, *cases):
|
||||
attributes = {
|
||||
key: value
|
||||
for key, value in dict(morsel).items()
|
||||
if value != ""
|
||||
if value != ''
|
||||
}
|
||||
self.assertEqual(attributes, expected_attributes, message)
|
||||
|
||||
@ -174,133 +174,133 @@ def test_parsing(self):
|
||||
self._run_tests(
|
||||
# Copied from https://github.com/python/cpython/blob/v3.10.7/Lib/test/test_http_cookies.py
|
||||
(
|
||||
"Test basic cookie",
|
||||
"chips=ahoy; vienna=finger",
|
||||
{"chips": "ahoy", "vienna": "finger"},
|
||||
'Test basic cookie',
|
||||
'chips=ahoy; vienna=finger',
|
||||
{'chips': 'ahoy', 'vienna': 'finger'},
|
||||
),
|
||||
(
|
||||
"Test quoted cookie",
|
||||
'Test quoted cookie',
|
||||
'keebler="E=mc2; L=\\"Loves\\"; fudge=\\012;"',
|
||||
{"keebler": 'E=mc2; L="Loves"; fudge=\012;'},
|
||||
{'keebler': 'E=mc2; L="Loves"; fudge=\012;'},
|
||||
),
|
||||
(
|
||||
"Allow '=' in an unquoted value",
|
||||
"keebler=E=mc2",
|
||||
{"keebler": "E=mc2"},
|
||||
'keebler=E=mc2',
|
||||
{'keebler': 'E=mc2'},
|
||||
),
|
||||
(
|
||||
"Allow cookies with ':' in their name",
|
||||
"key:term=value:term",
|
||||
{"key:term": "value:term"},
|
||||
'key:term=value:term',
|
||||
{'key:term': 'value:term'},
|
||||
),
|
||||
(
|
||||
"Allow '[' and ']' in cookie values",
|
||||
"a=b; c=[; d=r; f=h",
|
||||
{"a": "b", "c": "[", "d": "r", "f": "h"},
|
||||
'a=b; c=[; d=r; f=h',
|
||||
{'a': 'b', 'c': '[', 'd': 'r', 'f': 'h'},
|
||||
),
|
||||
(
|
||||
"Test basic cookie attributes",
|
||||
'Test basic cookie attributes',
|
||||
'Customer="WILE_E_COYOTE"; Version=1; Path=/acme',
|
||||
{"Customer": ("WILE_E_COYOTE", {"version": "1", "path": "/acme"})},
|
||||
{'Customer': ('WILE_E_COYOTE', {'version': '1', 'path': '/acme'})},
|
||||
),
|
||||
(
|
||||
"Test flag only cookie attributes",
|
||||
'Test flag only cookie attributes',
|
||||
'Customer="WILE_E_COYOTE"; HttpOnly; Secure',
|
||||
{"Customer": ("WILE_E_COYOTE", {"httponly": True, "secure": True})},
|
||||
{'Customer': ('WILE_E_COYOTE', {'httponly': True, 'secure': True})},
|
||||
),
|
||||
(
|
||||
"Test flag only attribute with values",
|
||||
"eggs=scrambled; httponly=foo; secure=bar; Path=/bacon",
|
||||
{"eggs": ("scrambled", {"httponly": "foo", "secure": "bar", "path": "/bacon"})},
|
||||
'Test flag only attribute with values',
|
||||
'eggs=scrambled; httponly=foo; secure=bar; Path=/bacon',
|
||||
{'eggs': ('scrambled', {'httponly': 'foo', 'secure': 'bar', 'path': '/bacon'})},
|
||||
),
|
||||
(
|
||||
"Test special case for 'expires' attribute, 4 digit year",
|
||||
'Customer="W"; expires=Wed, 01 Jan 2010 00:00:00 GMT',
|
||||
{"Customer": ("W", {"expires": "Wed, 01 Jan 2010 00:00:00 GMT"})},
|
||||
{'Customer': ('W', {'expires': 'Wed, 01 Jan 2010 00:00:00 GMT'})},
|
||||
),
|
||||
(
|
||||
"Test special case for 'expires' attribute, 2 digit year",
|
||||
'Customer="W"; expires=Wed, 01 Jan 98 00:00:00 GMT',
|
||||
{"Customer": ("W", {"expires": "Wed, 01 Jan 98 00:00:00 GMT"})},
|
||||
{'Customer': ('W', {'expires': 'Wed, 01 Jan 98 00:00:00 GMT'})},
|
||||
),
|
||||
(
|
||||
"Test extra spaces in keys and values",
|
||||
"eggs = scrambled ; secure ; path = bar ; foo=foo ",
|
||||
{"eggs": ("scrambled", {"secure": True, "path": "bar"}), "foo": "foo"},
|
||||
'Test extra spaces in keys and values',
|
||||
'eggs = scrambled ; secure ; path = bar ; foo=foo ',
|
||||
{'eggs': ('scrambled', {'secure': True, 'path': 'bar'}), 'foo': 'foo'},
|
||||
),
|
||||
(
|
||||
"Test quoted attributes",
|
||||
'Test quoted attributes',
|
||||
'Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"',
|
||||
{"Customer": ("WILE_E_COYOTE", {"version": "1", "path": "/acme"})}
|
||||
{'Customer': ('WILE_E_COYOTE', {'version': '1', 'path': '/acme'})},
|
||||
),
|
||||
# Our own tests that CPython passes
|
||||
(
|
||||
"Allow ';' in quoted value",
|
||||
'chips="a;hoy"; vienna=finger',
|
||||
{"chips": "a;hoy", "vienna": "finger"},
|
||||
{'chips': 'a;hoy', 'vienna': 'finger'},
|
||||
),
|
||||
(
|
||||
"Keep only the last set value",
|
||||
"a=c; a=b",
|
||||
{"a": "b"},
|
||||
'Keep only the last set value',
|
||||
'a=c; a=b',
|
||||
{'a': 'b'},
|
||||
),
|
||||
)
|
||||
|
||||
def test_lenient_parsing(self):
|
||||
self._run_tests(
|
||||
(
|
||||
"Ignore and try to skip invalid cookies",
|
||||
'Ignore and try to skip invalid cookies',
|
||||
'chips={"ahoy;": 1}; vienna="finger;"',
|
||||
{"vienna": "finger;"},
|
||||
{'vienna': 'finger;'},
|
||||
),
|
||||
(
|
||||
"Ignore cookies without a name",
|
||||
"a=b; unnamed; c=d",
|
||||
{"a": "b", "c": "d"},
|
||||
'Ignore cookies without a name',
|
||||
'a=b; unnamed; c=d',
|
||||
{'a': 'b', 'c': 'd'},
|
||||
),
|
||||
(
|
||||
"Ignore '\"' cookie without name",
|
||||
'a=b; "; c=d',
|
||||
{"a": "b", "c": "d"},
|
||||
{'a': 'b', 'c': 'd'},
|
||||
),
|
||||
(
|
||||
"Skip all space separated values",
|
||||
"x a=b c=d x; e=f",
|
||||
{"a": "b", "c": "d", "e": "f"},
|
||||
'Skip all space separated values',
|
||||
'x a=b c=d x; e=f',
|
||||
{'a': 'b', 'c': 'd', 'e': 'f'},
|
||||
),
|
||||
(
|
||||
"Skip all space separated values",
|
||||
'Skip all space separated values',
|
||||
'x a=b; data={"complex": "json", "with": "key=value"}; x c=d x',
|
||||
{"a": "b", "c": "d"},
|
||||
{'a': 'b', 'c': 'd'},
|
||||
),
|
||||
(
|
||||
"Expect quote mending",
|
||||
'Expect quote mending',
|
||||
'a=b; invalid="; c=d',
|
||||
{"a": "b", "c": "d"},
|
||||
{'a': 'b', 'c': 'd'},
|
||||
),
|
||||
(
|
||||
"Reset morsel after invalid to not capture attributes",
|
||||
"a=b; invalid; Version=1; c=d",
|
||||
{"a": "b", "c": "d"},
|
||||
'Reset morsel after invalid to not capture attributes',
|
||||
'a=b; invalid; Version=1; c=d',
|
||||
{'a': 'b', 'c': 'd'},
|
||||
),
|
||||
(
|
||||
"Reset morsel after invalid to not capture attributes",
|
||||
"a=b; $invalid; $Version=1; c=d",
|
||||
{"a": "b", "c": "d"},
|
||||
'Reset morsel after invalid to not capture attributes',
|
||||
'a=b; $invalid; $Version=1; c=d',
|
||||
{'a': 'b', 'c': 'd'},
|
||||
),
|
||||
(
|
||||
"Continue after non-flag attribute without value",
|
||||
"a=b; path; Version=1; c=d",
|
||||
{"a": "b", "c": "d"},
|
||||
'Continue after non-flag attribute without value',
|
||||
'a=b; path; Version=1; c=d',
|
||||
{'a': 'b', 'c': 'd'},
|
||||
),
|
||||
(
|
||||
"Allow cookie attributes with `$` prefix",
|
||||
'Allow cookie attributes with `$` prefix',
|
||||
'Customer="WILE_E_COYOTE"; $Version=1; $Secure; $Path=/acme',
|
||||
{"Customer": ("WILE_E_COYOTE", {"version": "1", "secure": True, "path": "/acme"})},
|
||||
{'Customer': ('WILE_E_COYOTE', {'version': '1', 'secure': True, 'path': '/acme'})},
|
||||
),
|
||||
(
|
||||
"Invalid Morsel keys should not result in an error",
|
||||
"Key=Value; [Invalid]=Value; Another=Value",
|
||||
{"Key": "Value", "Another": "Value"},
|
||||
'Invalid Morsel keys should not result in an error',
|
||||
'Key=Value; [Invalid]=Value; Another=Value',
|
||||
{'Key': 'Value', 'Another': 'Value'},
|
||||
),
|
||||
)
|
||||
|
@ -94,7 +94,7 @@ def test_template(self):
|
||||
'playlist', [] if is_playlist else [test_case])
|
||||
|
||||
def print_skipping(reason):
|
||||
print('Skipping %s: %s' % (test_case['name'], reason))
|
||||
print('Skipping {}: {}'.format(test_case['name'], reason))
|
||||
self.skipTest(reason)
|
||||
|
||||
if not ie.working():
|
||||
@ -117,7 +117,7 @@ def print_skipping(reason):
|
||||
|
||||
for other_ie in other_ies:
|
||||
if not other_ie.working():
|
||||
print_skipping('test depends on %sIE, marked as not WORKING' % other_ie.ie_key())
|
||||
print_skipping(f'test depends on {other_ie.ie_key()}IE, marked as not WORKING')
|
||||
|
||||
params = get_params(test_case.get('params', {}))
|
||||
params['outtmpl'] = tname + '_' + params['outtmpl']
|
||||
@ -148,10 +148,7 @@ def match_exception(err):
|
||||
return False
|
||||
if err.__class__.__name__ == expected_exception:
|
||||
return True
|
||||
for exc in err.exc_info:
|
||||
if exc.__class__.__name__ == expected_exception:
|
||||
return True
|
||||
return False
|
||||
return any(exc.__class__.__name__ == expected_exception for exc in err.exc_info)
|
||||
|
||||
def try_rm_tcs_files(tcs=None):
|
||||
if tcs is None:
|
||||
@ -181,7 +178,7 @@ def try_rm_tcs_files(tcs=None):
|
||||
raise
|
||||
|
||||
if try_num == RETRIES:
|
||||
report_warning('%s failed due to network errors, skipping...' % tname)
|
||||
report_warning(f'{tname} failed due to network errors, skipping...')
|
||||
return
|
||||
|
||||
print(f'Retrying: {try_num} failed tries\n\n##########\n\n')
|
||||
@ -244,9 +241,8 @@ def try_rm_tcs_files(tcs=None):
|
||||
got_fsize = os.path.getsize(tc_filename)
|
||||
assertGreaterEqual(
|
||||
self, got_fsize, expected_minsize,
|
||||
'Expected %s to be at least %s, but it\'s only %s ' %
|
||||
(tc_filename, format_bytes(expected_minsize),
|
||||
format_bytes(got_fsize)))
|
||||
f'Expected {tc_filename} to be at least {format_bytes(expected_minsize)}, '
|
||||
f'but it\'s only {format_bytes(got_fsize)} ')
|
||||
if 'md5' in tc:
|
||||
md5_for_file = _file_md5(tc_filename)
|
||||
self.assertEqual(tc['md5'], md5_for_file)
|
||||
@ -255,7 +251,7 @@ def try_rm_tcs_files(tcs=None):
|
||||
info_json_fn = os.path.splitext(tc_filename)[0] + '.info.json'
|
||||
self.assertTrue(
|
||||
os.path.exists(info_json_fn),
|
||||
'Missing info file %s' % info_json_fn)
|
||||
f'Missing info file {info_json_fn}')
|
||||
with open(info_json_fn, encoding='utf-8') as infof:
|
||||
info_dict = json.load(infof)
|
||||
expect_info_dict(self, info_dict, tc.get('info_dict', {}))
|
||||
|
@ -38,9 +38,9 @@ def send_content_range(self, total=None):
|
||||
end = int(mobj.group(2))
|
||||
valid_range = start is not None and end is not None
|
||||
if valid_range:
|
||||
content_range = 'bytes %d-%d' % (start, end)
|
||||
content_range = f'bytes {start}-{end}'
|
||||
if total:
|
||||
content_range += '/%d' % total
|
||||
content_range += f'/{total}'
|
||||
self.send_header('Content-Range', content_range)
|
||||
return (end - start + 1) if valid_range else total
|
||||
|
||||
@ -84,7 +84,7 @@ def download(self, params, ep):
|
||||
filename = 'testfile.mp4'
|
||||
try_rm(encodeFilename(filename))
|
||||
self.assertTrue(downloader.real_download(filename, {
|
||||
'url': 'http://127.0.0.1:%d/%s' % (self.port, ep),
|
||||
'url': f'http://127.0.0.1:{self.port}/{ep}',
|
||||
}), ep)
|
||||
self.assertEqual(os.path.getsize(encodeFilename(filename)), TEST_SIZE, ep)
|
||||
try_rm(encodeFilename(filename))
|
||||
|
@ -105,7 +105,7 @@ def __init__(self, socket, ssl_context, server_hostname=None, suppress_ragged_eo
|
||||
self.incoming,
|
||||
self.outgoing,
|
||||
server_hostname=server_hostname,
|
||||
server_side=server_side
|
||||
server_side=server_side,
|
||||
)
|
||||
self._ssl_io_loop(self.sslobj.do_handshake)
|
||||
|
||||
@ -333,7 +333,7 @@ def test_http_connect_auth(self, handler, ctx):
|
||||
|
||||
@pytest.mark.skip_handler(
|
||||
'Requests',
|
||||
'bug in urllib3 causes unclosed socket: https://github.com/urllib3/urllib3/issues/3374'
|
||||
'bug in urllib3 causes unclosed socket: https://github.com/urllib3/urllib3/issues/3374',
|
||||
)
|
||||
def test_http_connect_bad_auth(self, handler, ctx):
|
||||
with ctx.http_server(HTTPConnectProxyHandler, username='test', password='test') as server_address:
|
||||
|
@ -29,11 +29,11 @@ def error(self, msg):
|
||||
@is_download_test
|
||||
class TestIqiyiSDKInterpreter(unittest.TestCase):
|
||||
def test_iqiyi_sdk_interpreter(self):
|
||||
'''
|
||||
"""
|
||||
Test the functionality of IqiyiSDKInterpreter by trying to log in
|
||||
|
||||
If `sign` is incorrect, /validate call throws an HTTP 556 error
|
||||
'''
|
||||
"""
|
||||
logger = WarningLogger()
|
||||
ie = IqiyiIE(FakeYDL({'logger': logger}))
|
||||
ie._perform_login('foo', 'bar')
|
||||
|
@ -21,7 +21,7 @@ def test_netrc_present(self):
|
||||
continue
|
||||
self.assertTrue(
|
||||
ie._NETRC_MACHINE,
|
||||
'Extractor %s supports login, but is missing a _NETRC_MACHINE property' % ie.IE_NAME)
|
||||
f'Extractor {ie.IE_NAME} supports login, but is missing a _NETRC_MACHINE property')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
@ -375,10 +375,10 @@ def test_raise_http_error(self, handler):
|
||||
with handler() as rh:
|
||||
for bad_status in (400, 500, 599, 302):
|
||||
with pytest.raises(HTTPError):
|
||||
validate_and_send(rh, Request('http://127.0.0.1:%d/gen_%d' % (self.http_port, bad_status)))
|
||||
validate_and_send(rh, Request(f'http://127.0.0.1:{self.http_port}/gen_{bad_status}'))
|
||||
|
||||
# Should not raise an error
|
||||
validate_and_send(rh, Request('http://127.0.0.1:%d/gen_200' % self.http_port)).close()
|
||||
validate_and_send(rh, Request(f'http://127.0.0.1:{self.http_port}/gen_200')).close()
|
||||
|
||||
def test_response_url(self, handler):
|
||||
with handler() as rh:
|
||||
@ -472,7 +472,7 @@ def test_redirect_loop(self, handler):
|
||||
def test_incompleteread(self, handler):
|
||||
with handler(timeout=2) as rh:
|
||||
with pytest.raises(IncompleteRead, match='13 bytes read, 234221 more expected'):
|
||||
validate_and_send(rh, Request('http://127.0.0.1:%d/incompleteread' % self.http_port)).read()
|
||||
validate_and_send(rh, Request(f'http://127.0.0.1:{self.http_port}/incompleteread')).read()
|
||||
|
||||
def test_cookies(self, handler):
|
||||
cookiejar = YoutubeDLCookieJar()
|
||||
@ -740,7 +740,7 @@ class TestRequestHandlerMisc:
|
||||
@pytest.mark.parametrize('handler,logger_name', [
|
||||
('Requests', 'urllib3'),
|
||||
('Websockets', 'websockets.client'),
|
||||
('Websockets', 'websockets.server')
|
||||
('Websockets', 'websockets.server'),
|
||||
], indirect=['handler'])
|
||||
def test_remove_logging_handler(self, handler, logger_name):
|
||||
# Ensure any logging handlers, which may contain a YoutubeDL instance,
|
||||
@ -794,7 +794,7 @@ def test_verify_cert_error_text(self, handler):
|
||||
with handler() as rh:
|
||||
with pytest.raises(
|
||||
CertificateVerifyError,
|
||||
match=r'\[SSL: CERTIFICATE_VERIFY_FAILED\] certificate verify failed: self.signed certificate'
|
||||
match=r'\[SSL: CERTIFICATE_VERIFY_FAILED\] certificate verify failed: self.signed certificate',
|
||||
):
|
||||
validate_and_send(rh, Request(f'https://127.0.0.1:{self.https_port}/headers'))
|
||||
|
||||
@ -804,14 +804,14 @@ def test_verify_cert_error_text(self, handler):
|
||||
(
|
||||
Request('http://127.0.0.1', method='GET\n'),
|
||||
'method can\'t contain control characters',
|
||||
lambda v: v < (3, 7, 9) or (3, 8, 0) <= v < (3, 8, 5)
|
||||
lambda v: v < (3, 7, 9) or (3, 8, 0) <= v < (3, 8, 5),
|
||||
),
|
||||
# https://github.com/python/cpython/blob/987b712b4aeeece336eed24fcc87a950a756c3e2/Lib/http/client.py#L1265
|
||||
# bpo-38576: Check implemented in 3.7.8+, 3.8.3+
|
||||
(
|
||||
Request('http://127.0.0. 1', method='GET'),
|
||||
'URL can\'t contain control characters',
|
||||
lambda v: v < (3, 7, 8) or (3, 8, 0) <= v < (3, 8, 3)
|
||||
lambda v: v < (3, 7, 8) or (3, 8, 0) <= v < (3, 8, 3),
|
||||
),
|
||||
# https://github.com/python/cpython/blob/987b712b4aeeece336eed24fcc87a950a756c3e2/Lib/http/client.py#L1288C31-L1288C50
|
||||
(Request('http://127.0.0.1', headers={'foo\n': 'bar'}), 'Invalid header name', None),
|
||||
@ -840,7 +840,7 @@ class TestRequestsRequestHandler(TestRequestHandlerBase):
|
||||
(lambda: requests.exceptions.InvalidHeader(), RequestError),
|
||||
# catch-all: https://github.com/psf/requests/blob/main/src/requests/adapters.py#L535
|
||||
(lambda: urllib3.exceptions.HTTPError(), TransportError),
|
||||
(lambda: requests.exceptions.RequestException(), RequestError)
|
||||
(lambda: requests.exceptions.RequestException(), RequestError),
|
||||
# (lambda: requests.exceptions.TooManyRedirects(), HTTPError) - Needs a response object
|
||||
])
|
||||
def test_request_error_mapping(self, handler, monkeypatch, raised, expected):
|
||||
@ -868,12 +868,12 @@ def request(self, *args, **kwargs):
|
||||
(
|
||||
lambda: urllib3.exceptions.ProtocolError('error', http.client.IncompleteRead(partial=b'abc', expected=4)),
|
||||
IncompleteRead,
|
||||
'3 bytes read, 4 more expected'
|
||||
'3 bytes read, 4 more expected',
|
||||
),
|
||||
(
|
||||
lambda: urllib3.exceptions.ProtocolError('error', urllib3.exceptions.IncompleteRead(partial=3, expected=5)),
|
||||
IncompleteRead,
|
||||
'3 bytes read, 5 more expected'
|
||||
'3 bytes read, 5 more expected',
|
||||
),
|
||||
])
|
||||
def test_response_error_mapping(self, handler, monkeypatch, raised, expected, match):
|
||||
@ -1125,7 +1125,7 @@ class HTTPSupportedRH(ValidationRH):
|
||||
('https', False, {}),
|
||||
]),
|
||||
(NoCheckRH, [('http', False, {})]),
|
||||
(ValidationRH, [('http', UnsupportedRequest, {})])
|
||||
(ValidationRH, [('http', UnsupportedRequest, {})]),
|
||||
]
|
||||
|
||||
PROXY_SCHEME_TESTS = [
|
||||
@ -1219,7 +1219,7 @@ class HTTPSupportedRH(ValidationRH):
|
||||
({'impersonate': ImpersonateTarget('chrome', None, None, None)}, False),
|
||||
({'impersonate': ImpersonateTarget(None, None, None, None)}, False),
|
||||
({'impersonate': ImpersonateTarget()}, False),
|
||||
({'impersonate': 'chrome'}, AssertionError)
|
||||
({'impersonate': 'chrome'}, AssertionError),
|
||||
]),
|
||||
(NoCheckRH, 'http', [
|
||||
({'cookiejar': 'notacookiejar'}, False),
|
||||
@ -1235,7 +1235,7 @@ class HTTPSupportedRH(ValidationRH):
|
||||
('Urllib', False, 'http'),
|
||||
('Requests', False, 'http'),
|
||||
('CurlCFFI', False, 'http'),
|
||||
('Websockets', False, 'ws')
|
||||
('Websockets', False, 'ws'),
|
||||
], indirect=['handler'])
|
||||
def test_no_proxy(self, handler, fail, scheme):
|
||||
run_validation(handler, fail, Request(f'{scheme}://', proxies={'no': '127.0.0.1,github.com'}))
|
||||
@ -1246,7 +1246,7 @@ def test_no_proxy(self, handler, fail, scheme):
|
||||
(HTTPSupportedRH, 'http'),
|
||||
('Requests', 'http'),
|
||||
('CurlCFFI', 'http'),
|
||||
('Websockets', 'ws')
|
||||
('Websockets', 'ws'),
|
||||
], indirect=['handler'])
|
||||
def test_empty_proxy(self, handler, scheme):
|
||||
run_validation(handler, False, Request(f'{scheme}://', proxies={scheme: None}))
|
||||
@ -1258,7 +1258,7 @@ def test_empty_proxy(self, handler, scheme):
|
||||
(HTTPSupportedRH, 'http'),
|
||||
('Requests', 'http'),
|
||||
('CurlCFFI', 'http'),
|
||||
('Websockets', 'ws')
|
||||
('Websockets', 'ws'),
|
||||
], indirect=['handler'])
|
||||
def test_invalid_proxy_url(self, handler, scheme, proxy_url):
|
||||
run_validation(handler, UnsupportedRequest, Request(f'{scheme}://', proxies={scheme: proxy_url}))
|
||||
@ -1474,7 +1474,7 @@ def test_compat_opener(self):
|
||||
@pytest.mark.parametrize('proxy,expected', [
|
||||
('http://127.0.0.1:8080', {'all': 'http://127.0.0.1:8080'}),
|
||||
('', {'all': '__noproxy__'}),
|
||||
(None, {'http': 'http://127.0.0.1:8081', 'https': 'http://127.0.0.1:8081'}) # env, set https
|
||||
(None, {'http': 'http://127.0.0.1:8081', 'https': 'http://127.0.0.1:8081'}), # env, set https
|
||||
])
|
||||
def test_proxy(self, proxy, expected, monkeypatch):
|
||||
monkeypatch.setenv('HTTP_PROXY', 'http://127.0.0.1:8081')
|
||||
@ -1546,7 +1546,7 @@ def _send(self, request: Request):
|
||||
with FakeImpersonationRHYDL() as ydl:
|
||||
with pytest.raises(
|
||||
RequestError,
|
||||
match=r'Impersonate target "test" is not available'
|
||||
match=r'Impersonate target "test" is not available',
|
||||
):
|
||||
ydl.urlopen(Request('http://', extensions={'impersonate': ImpersonateTarget('test', None, None, None)}))
|
||||
|
||||
@ -1558,7 +1558,7 @@ def _send(self, request: Request):
|
||||
pass
|
||||
|
||||
_SUPPORTED_URL_SCHEMES = ('http',)
|
||||
_SUPPORTED_IMPERSONATE_TARGET_MAP = {ImpersonateTarget('abc',): 'test'}
|
||||
_SUPPORTED_IMPERSONATE_TARGET_MAP = {ImpersonateTarget('abc'): 'test'}
|
||||
_SUPPORTED_PROXY_SCHEMES = None
|
||||
|
||||
super().__init__(*args, **kwargs)
|
||||
@ -1567,14 +1567,14 @@ def _send(self, request: Request):
|
||||
with FakeHTTPRHYDL() as ydl:
|
||||
with pytest.raises(
|
||||
RequestError,
|
||||
match=r'Impersonate target "test" is not available'
|
||||
match=r'Impersonate target "test" is not available',
|
||||
):
|
||||
ydl.urlopen(Request('http://', extensions={'impersonate': ImpersonateTarget('test', None, None, None)}))
|
||||
|
||||
def test_raise_impersonate_error(self):
|
||||
with pytest.raises(
|
||||
YoutubeDLError,
|
||||
match=r'Impersonate target "test" is not available'
|
||||
match=r'Impersonate target "test" is not available',
|
||||
):
|
||||
FakeYDL({'impersonate': ImpersonateTarget('test', None, None, None)})
|
||||
|
||||
@ -1592,7 +1592,7 @@ def _send(self, request: Request):
|
||||
monkeypatch.setattr(FakeYDL, 'build_request_director', lambda cls, handlers, preferences=None: brh(cls, handlers=[IRH]))
|
||||
|
||||
with FakeYDL({
|
||||
'impersonate': ImpersonateTarget('abc', None, None, None)
|
||||
'impersonate': ImpersonateTarget('abc', None, None, None),
|
||||
}) as ydl:
|
||||
rh = self.build_handler(ydl, IRH)
|
||||
assert rh.impersonate == ImpersonateTarget('abc', None, None, None)
|
||||
@ -1604,7 +1604,7 @@ class TestRH(ImpersonateRequestHandler):
|
||||
def _send(self, request: Request):
|
||||
pass
|
||||
_SUPPORTED_URL_SCHEMES = ('http',)
|
||||
_SUPPORTED_IMPERSONATE_TARGET_MAP = {ImpersonateTarget(target_client,): 'test'}
|
||||
_SUPPORTED_IMPERSONATE_TARGET_MAP = {ImpersonateTarget(target_client): 'test'}
|
||||
RH_KEY = target_client
|
||||
RH_NAME = target_client
|
||||
handlers.append(TestRH)
|
||||
@ -1614,7 +1614,7 @@ def _send(self, request: Request):
|
||||
assert set(ydl._get_available_impersonate_targets()) == {
|
||||
(ImpersonateTarget('xyz'), 'xyz'),
|
||||
(ImpersonateTarget('abc'), 'abc'),
|
||||
(ImpersonateTarget('asd'), 'asd')
|
||||
(ImpersonateTarget('asd'), 'asd'),
|
||||
}
|
||||
assert ydl._impersonate_target_available(ImpersonateTarget('abc'))
|
||||
assert ydl._impersonate_target_available(ImpersonateTarget())
|
||||
@ -1837,7 +1837,7 @@ def test_copy(self):
|
||||
extensions={'cookiejar': CookieJar()},
|
||||
headers={'Accept-Encoding': 'br'},
|
||||
proxies={'http': 'http://127.0.0.1'},
|
||||
data=[b'123']
|
||||
data=[b'123'],
|
||||
)
|
||||
req_copy = req.copy()
|
||||
assert req_copy is not req
|
||||
@ -1863,7 +1863,7 @@ class AnotherRequest(Request):
|
||||
assert isinstance(req.copy(), AnotherRequest)
|
||||
|
||||
def test_url(self):
|
||||
req = Request(url='https://фtest.example.com/ some spaceв?ä=c',)
|
||||
req = Request(url='https://фtest.example.com/ some spaceв?ä=c')
|
||||
assert req.url == 'https://xn--test-z6d.example.com/%20some%20space%D0%B2?%C3%A4=c'
|
||||
|
||||
assert Request(url='//example.com').url == 'http://example.com'
|
||||
@ -1878,7 +1878,7 @@ class TestResponse:
|
||||
('custom', 200, 'custom'),
|
||||
(None, 404, 'Not Found'), # fallback status
|
||||
('', 403, 'Forbidden'),
|
||||
(None, 999, None)
|
||||
(None, 999, None),
|
||||
])
|
||||
def test_reason(self, reason, status, expected):
|
||||
res = Response(io.BytesIO(b''), url='test://', headers={}, status=status, reason=reason)
|
||||
@ -1933,7 +1933,7 @@ def test_target_from_str(self, target_str, expected):
|
||||
|
||||
@pytest.mark.parametrize('target_str', [
|
||||
'-120', ':-12.0', '-12:-12', '-:-',
|
||||
'::', 'a-c-d:', 'a-c-d:e-f-g', 'a:b:'
|
||||
'::', 'a-c-d:', 'a-c-d:e-f-g', 'a:b:',
|
||||
])
|
||||
def test_target_from_invalid_str(self, target_str):
|
||||
with pytest.raises(ValueError):
|
||||
@ -1949,7 +1949,7 @@ def test_target_from_invalid_str(self, target_str):
|
||||
(ImpersonateTarget('abc', '120', 'xyz', None), 'abc-120:xyz'),
|
||||
(ImpersonateTarget('abc', None, 'xyz'), 'abc:xyz'),
|
||||
(ImpersonateTarget(None, None, 'xyz', '6.5'), ':xyz-6.5'),
|
||||
(ImpersonateTarget('abc', ), 'abc'),
|
||||
(ImpersonateTarget('abc'), 'abc'),
|
||||
(ImpersonateTarget(None, None, None, None), ''),
|
||||
])
|
||||
def test_str(self, target, expected):
|
||||
|
@ -39,7 +39,7 @@ def test_select_proxy(self):
|
||||
proxies = {
|
||||
'all': 'socks5://example.com',
|
||||
'http': 'http://example.com:1080',
|
||||
'no': 'bypass.example.com,yt-dl.org'
|
||||
'no': 'bypass.example.com,yt-dl.org',
|
||||
}
|
||||
|
||||
assert select_proxy('https://example.com', proxies) == proxies['all']
|
||||
@ -54,7 +54,7 @@ def test_select_proxy(self):
|
||||
'port': 1080,
|
||||
'rdns': True,
|
||||
'username': None,
|
||||
'password': None
|
||||
'password': None,
|
||||
}),
|
||||
('socks5://user:@example.com:5555', {
|
||||
'proxytype': ProxyType.SOCKS5,
|
||||
@ -62,7 +62,7 @@ def test_select_proxy(self):
|
||||
'port': 5555,
|
||||
'rdns': False,
|
||||
'username': 'user',
|
||||
'password': ''
|
||||
'password': '',
|
||||
}),
|
||||
('socks4://u%40ser:pa%20ss@127.0.0.1:1080', {
|
||||
'proxytype': ProxyType.SOCKS4,
|
||||
@ -70,7 +70,7 @@ def test_select_proxy(self):
|
||||
'port': 1080,
|
||||
'rdns': False,
|
||||
'username': 'u@ser',
|
||||
'password': 'pa ss'
|
||||
'password': 'pa ss',
|
||||
}),
|
||||
('socks4a://:pa%20ss@127.0.0.1', {
|
||||
'proxytype': ProxyType.SOCKS4A,
|
||||
@ -78,8 +78,8 @@ def test_select_proxy(self):
|
||||
'port': 1080,
|
||||
'rdns': True,
|
||||
'username': '',
|
||||
'password': 'pa ss'
|
||||
})
|
||||
'password': 'pa ss',
|
||||
}),
|
||||
])
|
||||
def test_make_socks_proxy_opts(self, socks_proxy, expected):
|
||||
assert make_socks_proxy_opts(socks_proxy) == expected
|
||||
|
@ -27,7 +27,7 @@ def test_default_overwrites(self):
|
||||
[
|
||||
sys.executable, 'yt_dlp/__main__.py',
|
||||
'-o', 'test.webm',
|
||||
'https://www.youtube.com/watch?v=jNQXAC9IVRw'
|
||||
'https://www.youtube.com/watch?v=jNQXAC9IVRw',
|
||||
], cwd=root_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
sout, serr = outp.communicate()
|
||||
self.assertTrue(b'has already been downloaded' in sout)
|
||||
@ -39,7 +39,7 @@ def test_yes_overwrites(self):
|
||||
[
|
||||
sys.executable, 'yt_dlp/__main__.py', '--yes-overwrites',
|
||||
'-o', 'test.webm',
|
||||
'https://www.youtube.com/watch?v=jNQXAC9IVRw'
|
||||
'https://www.youtube.com/watch?v=jNQXAC9IVRw',
|
||||
], cwd=root_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
sout, serr = outp.communicate()
|
||||
self.assertTrue(b'has already been downloaded' not in sout)
|
||||
|
@ -31,7 +31,7 @@ def test_extractor_classes(self):
|
||||
|
||||
# don't load modules with underscore prefix
|
||||
self.assertFalse(
|
||||
f'{PACKAGE_NAME}.extractor._ignore' in sys.modules.keys(),
|
||||
f'{PACKAGE_NAME}.extractor._ignore' in sys.modules,
|
||||
'loaded module beginning with underscore')
|
||||
self.assertNotIn('IgnorePluginIE', plugins_ie.keys())
|
||||
|
||||
|
@ -59,7 +59,7 @@ def hook_two(self, filename):
|
||||
|
||||
def hook_three(self, filename):
|
||||
self.files.append(filename)
|
||||
raise Exception('Test exception for \'%s\'' % filename)
|
||||
raise Exception(f'Test exception for \'{filename}\'')
|
||||
|
||||
def tearDown(self):
|
||||
for f in self.files:
|
||||
|
@ -9,7 +9,7 @@
|
||||
|
||||
|
||||
from yt_dlp import YoutubeDL
|
||||
from yt_dlp.compat import compat_shlex_quote
|
||||
from yt_dlp.utils import shell_quote
|
||||
from yt_dlp.postprocessor import (
|
||||
ExecPP,
|
||||
FFmpegThumbnailsConvertorPP,
|
||||
@ -65,7 +65,7 @@ class TestExec(unittest.TestCase):
|
||||
def test_parse_cmd(self):
|
||||
pp = ExecPP(YoutubeDL(), '')
|
||||
info = {'filepath': 'file name'}
|
||||
cmd = 'echo %s' % compat_shlex_quote(info['filepath'])
|
||||
cmd = 'echo {}'.format(shell_quote(info['filepath']))
|
||||
|
||||
self.assertEqual(pp.parse_cmd('echo', info), cmd)
|
||||
self.assertEqual(pp.parse_cmd('echo {}', info), cmd)
|
||||
@ -125,7 +125,8 @@ def test_remove_marked_arrange_sponsors_CanGetThroughUnaltered(self):
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, chapters, [])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_ChapterWithSponsors(self):
|
||||
chapters = self._chapters([70], ['c']) + [
|
||||
chapters = [
|
||||
*self._chapters([70], ['c']),
|
||||
self._sponsor_chapter(10, 20, 'sponsor'),
|
||||
self._sponsor_chapter(30, 40, 'preview'),
|
||||
self._sponsor_chapter(50, 60, 'filler')]
|
||||
@ -136,7 +137,8 @@ def test_remove_marked_arrange_sponsors_ChapterWithSponsors(self):
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_SponsorBlockChapters(self):
|
||||
chapters = self._chapters([70], ['c']) + [
|
||||
chapters = [
|
||||
*self._chapters([70], ['c']),
|
||||
self._sponsor_chapter(10, 20, 'chapter', title='sb c1'),
|
||||
self._sponsor_chapter(15, 16, 'chapter', title='sb c2'),
|
||||
self._sponsor_chapter(30, 40, 'preview'),
|
||||
@ -149,10 +151,14 @@ def test_remove_marked_arrange_sponsors_SponsorBlockChapters(self):
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_UniqueNamesForOverlappingSponsors(self):
|
||||
chapters = self._chapters([120], ['c']) + [
|
||||
self._sponsor_chapter(10, 45, 'sponsor'), self._sponsor_chapter(20, 40, 'selfpromo'),
|
||||
self._sponsor_chapter(50, 70, 'sponsor'), self._sponsor_chapter(60, 85, 'selfpromo'),
|
||||
self._sponsor_chapter(90, 120, 'selfpromo'), self._sponsor_chapter(100, 110, 'sponsor')]
|
||||
chapters = [
|
||||
*self._chapters([120], ['c']),
|
||||
self._sponsor_chapter(10, 45, 'sponsor'),
|
||||
self._sponsor_chapter(20, 40, 'selfpromo'),
|
||||
self._sponsor_chapter(50, 70, 'sponsor'),
|
||||
self._sponsor_chapter(60, 85, 'selfpromo'),
|
||||
self._sponsor_chapter(90, 120, 'selfpromo'),
|
||||
self._sponsor_chapter(100, 110, 'sponsor')]
|
||||
expected = self._chapters(
|
||||
[10, 20, 40, 45, 50, 60, 70, 85, 90, 100, 110, 120],
|
||||
['c', '[SponsorBlock]: Sponsor', '[SponsorBlock]: Sponsor, Unpaid/Self Promotion',
|
||||
@ -172,7 +178,8 @@ def test_remove_marked_arrange_sponsors_ChapterWithCuts(self):
|
||||
chapters, self._chapters([40], ['c']), cuts)
|
||||
|
||||
def test_remove_marked_arrange_sponsors_ChapterWithSponsorsAndCuts(self):
|
||||
chapters = self._chapters([70], ['c']) + [
|
||||
chapters = [
|
||||
*self._chapters([70], ['c']),
|
||||
self._sponsor_chapter(10, 20, 'sponsor'),
|
||||
self._sponsor_chapter(30, 40, 'selfpromo', remove=True),
|
||||
self._sponsor_chapter(50, 60, 'interaction')]
|
||||
@ -185,24 +192,29 @@ def test_remove_marked_arrange_sponsors_ChapterWithSponsorsAndCuts(self):
|
||||
def test_remove_marked_arrange_sponsors_ChapterWithSponsorCutInTheMiddle(self):
|
||||
cuts = [self._sponsor_chapter(20, 30, 'selfpromo', remove=True),
|
||||
self._chapter(40, 50, remove=True)]
|
||||
chapters = self._chapters([70], ['c']) + [self._sponsor_chapter(10, 60, 'sponsor')] + cuts
|
||||
chapters = [
|
||||
*self._chapters([70], ['c']),
|
||||
self._sponsor_chapter(10, 60, 'sponsor'),
|
||||
*cuts]
|
||||
expected = self._chapters(
|
||||
[10, 40, 50], ['c', '[SponsorBlock]: Sponsor', 'c'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
||||
|
||||
def test_remove_marked_arrange_sponsors_ChapterWithCutHidingSponsor(self):
|
||||
cuts = [self._sponsor_chapter(20, 50, 'selfpromo', remove=True)]
|
||||
chapters = self._chapters([60], ['c']) + [
|
||||
chapters = [
|
||||
*self._chapters([60], ['c']),
|
||||
self._sponsor_chapter(10, 20, 'intro'),
|
||||
self._sponsor_chapter(30, 40, 'sponsor'),
|
||||
self._sponsor_chapter(50, 60, 'outro'),
|
||||
] + cuts
|
||||
*cuts]
|
||||
expected = self._chapters(
|
||||
[10, 20, 30], ['c', '[SponsorBlock]: Intermission/Intro Animation', '[SponsorBlock]: Endcards/Credits'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
||||
|
||||
def test_remove_marked_arrange_sponsors_ChapterWithAdjacentSponsors(self):
|
||||
chapters = self._chapters([70], ['c']) + [
|
||||
chapters = [
|
||||
*self._chapters([70], ['c']),
|
||||
self._sponsor_chapter(10, 20, 'sponsor'),
|
||||
self._sponsor_chapter(20, 30, 'selfpromo'),
|
||||
self._sponsor_chapter(30, 40, 'interaction')]
|
||||
@ -213,7 +225,8 @@ def test_remove_marked_arrange_sponsors_ChapterWithAdjacentSponsors(self):
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_ChapterWithAdjacentCuts(self):
|
||||
chapters = self._chapters([70], ['c']) + [
|
||||
chapters = [
|
||||
*self._chapters([70], ['c']),
|
||||
self._sponsor_chapter(10, 20, 'sponsor'),
|
||||
self._sponsor_chapter(20, 30, 'interaction', remove=True),
|
||||
self._chapter(30, 40, remove=True),
|
||||
@ -226,7 +239,8 @@ def test_remove_marked_arrange_sponsors_ChapterWithAdjacentCuts(self):
|
||||
chapters, expected, [self._chapter(20, 50, remove=True)])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_ChapterWithOverlappingSponsors(self):
|
||||
chapters = self._chapters([70], ['c']) + [
|
||||
chapters = [
|
||||
*self._chapters([70], ['c']),
|
||||
self._sponsor_chapter(10, 30, 'sponsor'),
|
||||
self._sponsor_chapter(20, 50, 'selfpromo'),
|
||||
self._sponsor_chapter(40, 60, 'interaction')]
|
||||
@ -238,7 +252,8 @@ def test_remove_marked_arrange_sponsors_ChapterWithOverlappingSponsors(self):
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_ChapterWithOverlappingCuts(self):
|
||||
chapters = self._chapters([70], ['c']) + [
|
||||
chapters = [
|
||||
*self._chapters([70], ['c']),
|
||||
self._sponsor_chapter(10, 30, 'sponsor', remove=True),
|
||||
self._sponsor_chapter(20, 50, 'selfpromo', remove=True),
|
||||
self._sponsor_chapter(40, 60, 'interaction', remove=True)]
|
||||
@ -246,7 +261,8 @@ def test_remove_marked_arrange_sponsors_ChapterWithOverlappingCuts(self):
|
||||
chapters, self._chapters([20], ['c']), [self._chapter(10, 60, remove=True)])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_ChapterWithRunsOfOverlappingSponsors(self):
|
||||
chapters = self._chapters([170], ['c']) + [
|
||||
chapters = [
|
||||
*self._chapters([170], ['c']),
|
||||
self._sponsor_chapter(0, 30, 'intro'),
|
||||
self._sponsor_chapter(20, 50, 'sponsor'),
|
||||
self._sponsor_chapter(40, 60, 'selfpromo'),
|
||||
@ -267,7 +283,8 @@ def test_remove_marked_arrange_sponsors_ChapterWithRunsOfOverlappingSponsors(sel
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_ChapterWithRunsOfOverlappingCuts(self):
|
||||
chapters = self._chapters([170], ['c']) + [
|
||||
chapters = [
|
||||
*self._chapters([170], ['c']),
|
||||
self._chapter(0, 30, remove=True),
|
||||
self._sponsor_chapter(20, 50, 'sponsor', remove=True),
|
||||
self._chapter(40, 60, remove=True),
|
||||
@ -284,7 +301,8 @@ def test_remove_marked_arrange_sponsors_ChapterWithRunsOfOverlappingCuts(self):
|
||||
chapters, self._chapters([20], ['c']), expected_cuts)
|
||||
|
||||
def test_remove_marked_arrange_sponsors_OverlappingSponsorsDifferentTitlesAfterCut(self):
|
||||
chapters = self._chapters([60], ['c']) + [
|
||||
chapters = [
|
||||
*self._chapters([60], ['c']),
|
||||
self._sponsor_chapter(10, 60, 'sponsor'),
|
||||
self._sponsor_chapter(10, 40, 'intro'),
|
||||
self._sponsor_chapter(30, 50, 'interaction'),
|
||||
@ -297,7 +315,8 @@ def test_remove_marked_arrange_sponsors_OverlappingSponsorsDifferentTitlesAfterC
|
||||
chapters, expected, [self._chapter(30, 50, remove=True)])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_SponsorsNoLongerOverlapAfterCut(self):
|
||||
chapters = self._chapters([70], ['c']) + [
|
||||
chapters = [
|
||||
*self._chapters([70], ['c']),
|
||||
self._sponsor_chapter(10, 30, 'sponsor'),
|
||||
self._sponsor_chapter(20, 50, 'interaction'),
|
||||
self._sponsor_chapter(30, 50, 'selfpromo', remove=True),
|
||||
@ -310,7 +329,8 @@ def test_remove_marked_arrange_sponsors_SponsorsNoLongerOverlapAfterCut(self):
|
||||
chapters, expected, [self._chapter(30, 50, remove=True)])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_SponsorsStillOverlapAfterCut(self):
|
||||
chapters = self._chapters([70], ['c']) + [
|
||||
chapters = [
|
||||
*self._chapters([70], ['c']),
|
||||
self._sponsor_chapter(10, 60, 'sponsor'),
|
||||
self._sponsor_chapter(20, 60, 'interaction'),
|
||||
self._sponsor_chapter(30, 50, 'selfpromo', remove=True)]
|
||||
@ -321,7 +341,8 @@ def test_remove_marked_arrange_sponsors_SponsorsStillOverlapAfterCut(self):
|
||||
chapters, expected, [self._chapter(30, 50, remove=True)])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_ChapterWithRunsOfOverlappingSponsorsAndCuts(self):
|
||||
chapters = self._chapters([200], ['c']) + [
|
||||
chapters = [
|
||||
*self._chapters([200], ['c']),
|
||||
self._sponsor_chapter(10, 40, 'sponsor'),
|
||||
self._sponsor_chapter(10, 30, 'intro'),
|
||||
self._chapter(20, 30, remove=True),
|
||||
@ -347,8 +368,9 @@ def test_remove_marked_arrange_sponsors_ChapterWithRunsOfOverlappingSponsorsAndC
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, expected_cuts)
|
||||
|
||||
def test_remove_marked_arrange_sponsors_SponsorOverlapsMultipleChapters(self):
|
||||
chapters = (self._chapters([20, 40, 60, 80, 100], ['c1', 'c2', 'c3', 'c4', 'c5'])
|
||||
+ [self._sponsor_chapter(10, 90, 'sponsor')])
|
||||
chapters = [
|
||||
*self._chapters([20, 40, 60, 80, 100], ['c1', 'c2', 'c3', 'c4', 'c5']),
|
||||
self._sponsor_chapter(10, 90, 'sponsor')]
|
||||
expected = self._chapters([10, 90, 100], ['c1', '[SponsorBlock]: Sponsor', 'c5'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||
|
||||
@ -359,9 +381,10 @@ def test_remove_marked_arrange_sponsors_CutOverlapsMultipleChapters(self):
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
||||
|
||||
def test_remove_marked_arrange_sponsors_SponsorsWithinSomeChaptersAndOverlappingOthers(self):
|
||||
chapters = (self._chapters([10, 40, 60, 80], ['c1', 'c2', 'c3', 'c4'])
|
||||
+ [self._sponsor_chapter(20, 30, 'sponsor'),
|
||||
self._sponsor_chapter(50, 70, 'selfpromo')])
|
||||
chapters = [
|
||||
*self._chapters([10, 40, 60, 80], ['c1', 'c2', 'c3', 'c4']),
|
||||
self._sponsor_chapter(20, 30, 'sponsor'),
|
||||
self._sponsor_chapter(50, 70, 'selfpromo')]
|
||||
expected = self._chapters([10, 20, 30, 40, 50, 70, 80],
|
||||
['c1', 'c2', '[SponsorBlock]: Sponsor', 'c2', 'c3',
|
||||
'[SponsorBlock]: Unpaid/Self Promotion', 'c4'])
|
||||
@ -374,8 +397,9 @@ def test_remove_marked_arrange_sponsors_CutsWithinSomeChaptersAndOverlappingOthe
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
||||
|
||||
def test_remove_marked_arrange_sponsors_ChaptersAfterLastSponsor(self):
|
||||
chapters = (self._chapters([20, 40, 50, 60], ['c1', 'c2', 'c3', 'c4'])
|
||||
+ [self._sponsor_chapter(10, 30, 'music_offtopic')])
|
||||
chapters = [
|
||||
*self._chapters([20, 40, 50, 60], ['c1', 'c2', 'c3', 'c4']),
|
||||
self._sponsor_chapter(10, 30, 'music_offtopic')]
|
||||
expected = self._chapters(
|
||||
[10, 30, 40, 50, 60],
|
||||
['c1', '[SponsorBlock]: Non-Music Section', 'c2', 'c3', 'c4'])
|
||||
@ -388,8 +412,9 @@ def test_remove_marked_arrange_sponsors_ChaptersAfterLastCut(self):
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
||||
|
||||
def test_remove_marked_arrange_sponsors_SponsorStartsAtChapterStart(self):
|
||||
chapters = (self._chapters([10, 20, 40], ['c1', 'c2', 'c3'])
|
||||
+ [self._sponsor_chapter(20, 30, 'sponsor')])
|
||||
chapters = [
|
||||
*self._chapters([10, 20, 40], ['c1', 'c2', 'c3']),
|
||||
self._sponsor_chapter(20, 30, 'sponsor')]
|
||||
expected = self._chapters([10, 20, 30, 40], ['c1', 'c2', '[SponsorBlock]: Sponsor', 'c3'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||
|
||||
@ -400,8 +425,9 @@ def test_remove_marked_arrange_sponsors_CutStartsAtChapterStart(self):
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
||||
|
||||
def test_remove_marked_arrange_sponsors_SponsorEndsAtChapterEnd(self):
|
||||
chapters = (self._chapters([10, 30, 40], ['c1', 'c2', 'c3'])
|
||||
+ [self._sponsor_chapter(20, 30, 'sponsor')])
|
||||
chapters = [
|
||||
*self._chapters([10, 30, 40], ['c1', 'c2', 'c3']),
|
||||
self._sponsor_chapter(20, 30, 'sponsor')]
|
||||
expected = self._chapters([10, 20, 30, 40], ['c1', 'c2', '[SponsorBlock]: Sponsor', 'c3'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||
|
||||
@ -412,8 +438,9 @@ def test_remove_marked_arrange_sponsors_CutEndsAtChapterEnd(self):
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
||||
|
||||
def test_remove_marked_arrange_sponsors_SponsorCoincidesWithChapters(self):
|
||||
chapters = (self._chapters([10, 20, 30, 40], ['c1', 'c2', 'c3', 'c4'])
|
||||
+ [self._sponsor_chapter(10, 30, 'sponsor')])
|
||||
chapters = [
|
||||
*self._chapters([10, 20, 30, 40], ['c1', 'c2', 'c3', 'c4']),
|
||||
self._sponsor_chapter(10, 30, 'sponsor')]
|
||||
expected = self._chapters([10, 30, 40], ['c1', '[SponsorBlock]: Sponsor', 'c4'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||
|
||||
@ -424,8 +451,9 @@ def test_remove_marked_arrange_sponsors_CutCoincidesWithChapters(self):
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
||||
|
||||
def test_remove_marked_arrange_sponsors_SponsorsAtVideoBoundaries(self):
|
||||
chapters = (self._chapters([20, 40, 60], ['c1', 'c2', 'c3'])
|
||||
+ [self._sponsor_chapter(0, 10, 'intro'), self._sponsor_chapter(50, 60, 'outro')])
|
||||
chapters = [
|
||||
*self._chapters([20, 40, 60], ['c1', 'c2', 'c3']),
|
||||
self._sponsor_chapter(0, 10, 'intro'), self._sponsor_chapter(50, 60, 'outro')]
|
||||
expected = self._chapters(
|
||||
[10, 20, 40, 50, 60], ['[SponsorBlock]: Intermission/Intro Animation', 'c1', 'c2', 'c3', '[SponsorBlock]: Endcards/Credits'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||
@ -437,8 +465,10 @@ def test_remove_marked_arrange_sponsors_CutsAtVideoBoundaries(self):
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
||||
|
||||
def test_remove_marked_arrange_sponsors_SponsorsOverlapChaptersAtVideoBoundaries(self):
|
||||
chapters = (self._chapters([10, 40, 50], ['c1', 'c2', 'c3'])
|
||||
+ [self._sponsor_chapter(0, 20, 'intro'), self._sponsor_chapter(30, 50, 'outro')])
|
||||
chapters = [
|
||||
*self._chapters([10, 40, 50], ['c1', 'c2', 'c3']),
|
||||
self._sponsor_chapter(0, 20, 'intro'),
|
||||
self._sponsor_chapter(30, 50, 'outro')]
|
||||
expected = self._chapters(
|
||||
[20, 30, 50], ['[SponsorBlock]: Intermission/Intro Animation', 'c2', '[SponsorBlock]: Endcards/Credits'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||
@ -450,8 +480,10 @@ def test_remove_marked_arrange_sponsors_CutsOverlapChaptersAtVideoBoundaries(sel
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
||||
|
||||
def test_remove_marked_arrange_sponsors_EverythingSponsored(self):
|
||||
chapters = (self._chapters([10, 20, 30, 40], ['c1', 'c2', 'c3', 'c4'])
|
||||
+ [self._sponsor_chapter(0, 20, 'intro'), self._sponsor_chapter(20, 40, 'outro')])
|
||||
chapters = [
|
||||
*self._chapters([10, 20, 30, 40], ['c1', 'c2', 'c3', 'c4']),
|
||||
self._sponsor_chapter(0, 20, 'intro'),
|
||||
self._sponsor_chapter(20, 40, 'outro')]
|
||||
expected = self._chapters([20, 40], ['[SponsorBlock]: Intermission/Intro Animation', '[SponsorBlock]: Endcards/Credits'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||
|
||||
@ -491,38 +523,39 @@ def test_remove_marked_arrange_sponsors_TinyChapterAtTheStartPrependedToTheNext(
|
||||
chapters, self._chapters([2.5], ['c2']), cuts)
|
||||
|
||||
def test_remove_marked_arrange_sponsors_TinyChaptersResultingFromSponsorOverlapAreIgnored(self):
|
||||
chapters = self._chapters([1, 3, 4], ['c1', 'c2', 'c3']) + [
|
||||
chapters = [
|
||||
*self._chapters([1, 3, 4], ['c1', 'c2', 'c3']),
|
||||
self._sponsor_chapter(1.5, 2.5, 'sponsor')]
|
||||
self._remove_marked_arrange_sponsors_test_impl(
|
||||
chapters, self._chapters([1.5, 2.5, 4], ['c1', '[SponsorBlock]: Sponsor', 'c3']), [])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_TinySponsorsOverlapsAreIgnored(self):
|
||||
chapters = self._chapters([2, 3, 5], ['c1', 'c2', 'c3']) + [
|
||||
chapters = [
|
||||
*self._chapters([2, 3, 5], ['c1', 'c2', 'c3']),
|
||||
self._sponsor_chapter(1, 3, 'sponsor'),
|
||||
self._sponsor_chapter(2.5, 4, 'selfpromo')
|
||||
]
|
||||
self._sponsor_chapter(2.5, 4, 'selfpromo')]
|
||||
self._remove_marked_arrange_sponsors_test_impl(
|
||||
chapters, self._chapters([1, 3, 4, 5], [
|
||||
'c1', '[SponsorBlock]: Sponsor', '[SponsorBlock]: Unpaid/Self Promotion', 'c3']), [])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_TinySponsorsPrependedToTheNextSponsor(self):
|
||||
chapters = self._chapters([4], ['c']) + [
|
||||
chapters = [
|
||||
*self._chapters([4], ['c']),
|
||||
self._sponsor_chapter(1.5, 2, 'sponsor'),
|
||||
self._sponsor_chapter(2, 4, 'selfpromo')
|
||||
]
|
||||
self._sponsor_chapter(2, 4, 'selfpromo')]
|
||||
self._remove_marked_arrange_sponsors_test_impl(
|
||||
chapters, self._chapters([1.5, 4], ['c', '[SponsorBlock]: Unpaid/Self Promotion']), [])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_SmallestSponsorInTheOverlapGetsNamed(self):
|
||||
self._pp._sponsorblock_chapter_title = '[SponsorBlock]: %(name)s'
|
||||
chapters = self._chapters([10], ['c']) + [
|
||||
chapters = [
|
||||
*self._chapters([10], ['c']),
|
||||
self._sponsor_chapter(2, 8, 'sponsor'),
|
||||
self._sponsor_chapter(4, 6, 'selfpromo')
|
||||
]
|
||||
self._sponsor_chapter(4, 6, 'selfpromo')]
|
||||
self._remove_marked_arrange_sponsors_test_impl(
|
||||
chapters, self._chapters([2, 4, 6, 8, 10], [
|
||||
'c', '[SponsorBlock]: Sponsor', '[SponsorBlock]: Unpaid/Self Promotion',
|
||||
'[SponsorBlock]: Sponsor', 'c'
|
||||
'[SponsorBlock]: Sponsor', 'c',
|
||||
]), [])
|
||||
|
||||
def test_make_concat_opts_CommonCase(self):
|
||||
|
@ -95,7 +95,7 @@ def handle(self):
|
||||
return
|
||||
|
||||
elif Socks5Auth.AUTH_USER_PASS in methods:
|
||||
self.connection.sendall(struct.pack("!BB", SOCKS5_VERSION, Socks5Auth.AUTH_USER_PASS))
|
||||
self.connection.sendall(struct.pack('!BB', SOCKS5_VERSION, Socks5Auth.AUTH_USER_PASS))
|
||||
|
||||
_, user_len = struct.unpack('!BB', self.connection.recv(2))
|
||||
username = self.connection.recv(user_len).decode()
|
||||
@ -174,7 +174,7 @@ def handle(self):
|
||||
if 0x0 < dest_ip <= 0xFF:
|
||||
use_remote_dns = True
|
||||
else:
|
||||
socks_info['ipv4_address'] = socket.inet_ntoa(struct.pack("!I", dest_ip))
|
||||
socks_info['ipv4_address'] = socket.inet_ntoa(struct.pack('!I', dest_ip))
|
||||
|
||||
user_id = self._read_until_null().decode()
|
||||
if user_id != (self.socks_kwargs.get('user_id') or ''):
|
||||
@ -291,7 +291,7 @@ def ctx(request):
|
||||
('Urllib', 'http'),
|
||||
('Requests', 'http'),
|
||||
('Websockets', 'ws'),
|
||||
('CurlCFFI', 'http')
|
||||
('CurlCFFI', 'http'),
|
||||
], indirect=True)
|
||||
class TestSocks4Proxy:
|
||||
def test_socks4_no_auth(self, handler, ctx):
|
||||
@ -366,7 +366,7 @@ def test_timeout(self, handler, ctx):
|
||||
('Urllib', 'http'),
|
||||
('Requests', 'http'),
|
||||
('Websockets', 'ws'),
|
||||
('CurlCFFI', 'http')
|
||||
('CurlCFFI', 'http'),
|
||||
], indirect=True)
|
||||
class TestSocks5Proxy:
|
||||
|
||||
|
@ -40,12 +40,11 @@ def setUp(self):
|
||||
self.ie = self.IE()
|
||||
self.DL.add_info_extractor(self.ie)
|
||||
if not self.IE.working():
|
||||
print('Skipping: %s marked as not _WORKING' % self.IE.ie_key())
|
||||
print(f'Skipping: {self.IE.ie_key()} marked as not _WORKING')
|
||||
self.skipTest('IE marked as not _WORKING')
|
||||
|
||||
def getInfoDict(self):
|
||||
info_dict = self.DL.extract_info(self.url, download=False)
|
||||
return info_dict
|
||||
return self.DL.extract_info(self.url, download=False)
|
||||
|
||||
def getSubtitles(self):
|
||||
info_dict = self.getInfoDict()
|
||||
@ -87,7 +86,7 @@ def test_youtube_allsubtitles(self):
|
||||
self.assertEqual(md5(subtitles['en']), 'ae1bd34126571a77aabd4d276b28044d')
|
||||
self.assertEqual(md5(subtitles['it']), '0e0b667ba68411d88fd1c5f4f4eab2f9')
|
||||
for lang in ['fr', 'de']:
|
||||
self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang)
|
||||
self.assertTrue(subtitles.get(lang) is not None, f'Subtitles for \'{lang}\' not extracted')
|
||||
|
||||
def _test_subtitles_format(self, fmt, md5_hash, lang='en'):
|
||||
self.DL.params['writesubtitles'] = True
|
||||
@ -157,7 +156,7 @@ def test_allsubtitles(self):
|
||||
self.assertEqual(md5(subtitles['en']), '976553874490cba125086bbfea3ff76f')
|
||||
self.assertEqual(md5(subtitles['fr']), '594564ec7d588942e384e920e5341792')
|
||||
for lang in ['es', 'fr', 'de']:
|
||||
self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang)
|
||||
self.assertTrue(subtitles.get(lang) is not None, f'Subtitles for \'{lang}\' not extracted')
|
||||
|
||||
def test_nosubtitles(self):
|
||||
self.DL.expect_warning('video doesn\'t have subtitles')
|
||||
@ -182,7 +181,7 @@ def test_allsubtitles(self):
|
||||
self.assertEqual(md5(subtitles['en']), '4262c1665ff928a2dada178f62cb8d14')
|
||||
self.assertEqual(md5(subtitles['fr']), '66a63f7f42c97a50f8c0e90bc7797bb5')
|
||||
for lang in ['es', 'fr', 'de']:
|
||||
self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang)
|
||||
self.assertTrue(subtitles.get(lang) is not None, f'Subtitles for \'{lang}\' not extracted')
|
||||
|
||||
|
||||
@is_download_test
|
||||
|
@ -31,7 +31,7 @@ def test_traversal_base(self):
|
||||
'allow tuple path'
|
||||
assert traverse_obj(_TEST_DATA, ['str']) == 'str', \
|
||||
'allow list path'
|
||||
assert traverse_obj(_TEST_DATA, (value for value in ("str",))) == 'str', \
|
||||
assert traverse_obj(_TEST_DATA, (value for value in ('str',))) == 'str', \
|
||||
'allow iterable path'
|
||||
assert traverse_obj(_TEST_DATA, 'str') == 'str', \
|
||||
'single items should be treated as a path'
|
||||
@ -70,7 +70,7 @@ def test_traversal_function(self):
|
||||
|
||||
def test_traversal_set(self):
|
||||
# transformation/type, like `expected_type`
|
||||
assert traverse_obj(_TEST_DATA, (..., {str.upper}, )) == ['STR'], \
|
||||
assert traverse_obj(_TEST_DATA, (..., {str.upper})) == ['STR'], \
|
||||
'Function in set should be a transformation'
|
||||
assert traverse_obj(_TEST_DATA, (..., {str})) == ['str'], \
|
||||
'Type in set should be a type filter'
|
||||
@ -276,7 +276,7 @@ def test_traversal_traverse_string(self):
|
||||
'`...` should result in string (same value) if `traverse_string`'
|
||||
assert traverse_obj(_TRAVERSE_STRING_DATA, ('str', slice(0, None, 2)), traverse_string=True) == 'sr', \
|
||||
'`slice` should result in string if `traverse_string`'
|
||||
assert traverse_obj(_TRAVERSE_STRING_DATA, ('str', lambda i, v: i or v == "s"), traverse_string=True) == 'str', \
|
||||
assert traverse_obj(_TRAVERSE_STRING_DATA, ('str', lambda i, v: i or v == 's'), traverse_string=True) == 'str', \
|
||||
'function should result in string if `traverse_string`'
|
||||
assert traverse_obj(_TRAVERSE_STRING_DATA, ('str', (0, 2)), traverse_string=True) == ['s', 'r'], \
|
||||
'branching should result in list if `traverse_string`'
|
||||
|
@ -78,11 +78,11 @@
|
||||
|
||||
TEST_LOCKFILE_COMMENT = '# This file is used for regulating self-update'
|
||||
|
||||
TEST_LOCKFILE_V1 = r'''%s
|
||||
TEST_LOCKFILE_V1 = rf'''{TEST_LOCKFILE_COMMENT}
|
||||
lock 2022.08.18.36 .+ Python 3\.6
|
||||
lock 2023.11.16 (?!win_x86_exe).+ Python 3\.7
|
||||
lock 2023.11.16 win_x86_exe .+ Windows-(?:Vista|2008Server)
|
||||
''' % TEST_LOCKFILE_COMMENT
|
||||
'''
|
||||
|
||||
TEST_LOCKFILE_V2_TMPL = r'''%s
|
||||
lockV2 yt-dlp/yt-dlp 2022.08.18.36 .+ Python 3\.6
|
||||
@ -98,12 +98,12 @@
|
||||
|
||||
TEST_LOCKFILE_ACTUAL = TEST_LOCKFILE_V2_TMPL % TEST_LOCKFILE_V1.rstrip('\n')
|
||||
|
||||
TEST_LOCKFILE_FORK = r'''%s# Test if a fork blocks updates to non-numeric tags
|
||||
TEST_LOCKFILE_FORK = rf'''{TEST_LOCKFILE_ACTUAL}# Test if a fork blocks updates to non-numeric tags
|
||||
lockV2 fork/yt-dlp pr0000 .+ Python 3.6
|
||||
lockV2 fork/yt-dlp pr1234 (?!win_x86_exe).+ Python 3\.7
|
||||
lockV2 fork/yt-dlp pr1234 win_x86_exe .+ Windows-(?:Vista|2008Server)
|
||||
lockV2 fork/yt-dlp pr9999 .+ Python 3.11
|
||||
''' % TEST_LOCKFILE_ACTUAL
|
||||
'''
|
||||
|
||||
|
||||
class FakeUpdater(Updater):
|
||||
|
@ -276,8 +276,8 @@ def env(var):
|
||||
self.assertEqual(expand_path(env('HOME')), os.getenv('HOME'))
|
||||
self.assertEqual(expand_path('~'), os.getenv('HOME'))
|
||||
self.assertEqual(
|
||||
expand_path('~/%s' % env('yt_dlp_EXPATH_PATH')),
|
||||
'%s/expanded' % os.getenv('HOME'))
|
||||
expand_path('~/{}'.format(env('yt_dlp_EXPATH_PATH'))),
|
||||
'{}/expanded'.format(os.getenv('HOME')))
|
||||
finally:
|
||||
os.environ['HOME'] = old_home or ''
|
||||
|
||||
@ -356,12 +356,12 @@ def test_datetime_from_str(self):
|
||||
self.assertEqual(datetime_from_str('now+23hours', precision='hour'), datetime_from_str('now+23hours', precision='auto'))
|
||||
|
||||
def test_daterange(self):
|
||||
_20century = DateRange("19000101", "20000101")
|
||||
self.assertFalse("17890714" in _20century)
|
||||
_ac = DateRange("00010101")
|
||||
self.assertTrue("19690721" in _ac)
|
||||
_firstmilenium = DateRange(end="10000101")
|
||||
self.assertTrue("07110427" in _firstmilenium)
|
||||
_20century = DateRange('19000101', '20000101')
|
||||
self.assertFalse('17890714' in _20century)
|
||||
_ac = DateRange('00010101')
|
||||
self.assertTrue('19690721' in _ac)
|
||||
_firstmilenium = DateRange(end='10000101')
|
||||
self.assertTrue('07110427' in _firstmilenium)
|
||||
|
||||
def test_unified_dates(self):
|
||||
self.assertEqual(unified_strdate('December 21, 2010'), '20101221')
|
||||
@ -506,7 +506,7 @@ def test_xpath_attr(self):
|
||||
self.assertRaises(ExtractorError, xpath_attr, doc, 'div/p', 'y', fatal=True)
|
||||
|
||||
def test_smuggle_url(self):
|
||||
data = {"ö": "ö", "abc": [3]}
|
||||
data = {'ö': 'ö', 'abc': [3]}
|
||||
url = 'https://foo.bar/baz?x=y#a'
|
||||
smug_url = smuggle_url(url, data)
|
||||
unsmug_url, unsmug_data = unsmuggle_url(smug_url)
|
||||
@ -784,7 +784,7 @@ def test_parse_iso8601(self):
|
||||
def test_strip_jsonp(self):
|
||||
stripped = strip_jsonp('cb ([ {"id":"532cb",\n\n\n"x":\n3}\n]\n);')
|
||||
d = json.loads(stripped)
|
||||
self.assertEqual(d, [{"id": "532cb", "x": 3}])
|
||||
self.assertEqual(d, [{'id': '532cb', 'x': 3}])
|
||||
|
||||
stripped = strip_jsonp('parseMetadata({"STATUS":"OK"})\n\n\n//epc')
|
||||
d = json.loads(stripped)
|
||||
@ -922,19 +922,19 @@ def test_escape_rfc3986(self):
|
||||
def test_normalize_url(self):
|
||||
self.assertEqual(
|
||||
normalize_url('http://wowza.imust.org/srv/vod/telemb/new/UPLOAD/UPLOAD/20224_IncendieHavré_FD.mp4'),
|
||||
'http://wowza.imust.org/srv/vod/telemb/new/UPLOAD/UPLOAD/20224_IncendieHavre%CC%81_FD.mp4'
|
||||
'http://wowza.imust.org/srv/vod/telemb/new/UPLOAD/UPLOAD/20224_IncendieHavre%CC%81_FD.mp4',
|
||||
)
|
||||
self.assertEqual(
|
||||
normalize_url('http://www.ardmediathek.de/tv/Sturm-der-Liebe/Folge-2036-Zu-Mann-und-Frau-erklärt/Das-Erste/Video?documentId=22673108&bcastId=5290'),
|
||||
'http://www.ardmediathek.de/tv/Sturm-der-Liebe/Folge-2036-Zu-Mann-und-Frau-erkl%C3%A4rt/Das-Erste/Video?documentId=22673108&bcastId=5290'
|
||||
'http://www.ardmediathek.de/tv/Sturm-der-Liebe/Folge-2036-Zu-Mann-und-Frau-erkl%C3%A4rt/Das-Erste/Video?documentId=22673108&bcastId=5290',
|
||||
)
|
||||
self.assertEqual(
|
||||
normalize_url('http://тест.рф/фрагмент'),
|
||||
'http://xn--e1aybc.xn--p1ai/%D1%84%D1%80%D0%B0%D0%B3%D0%BC%D0%B5%D0%BD%D1%82'
|
||||
'http://xn--e1aybc.xn--p1ai/%D1%84%D1%80%D0%B0%D0%B3%D0%BC%D0%B5%D0%BD%D1%82',
|
||||
)
|
||||
self.assertEqual(
|
||||
normalize_url('http://тест.рф/абв?абв=абв#абв'),
|
||||
'http://xn--e1aybc.xn--p1ai/%D0%B0%D0%B1%D0%B2?%D0%B0%D0%B1%D0%B2=%D0%B0%D0%B1%D0%B2#%D0%B0%D0%B1%D0%B2'
|
||||
'http://xn--e1aybc.xn--p1ai/%D0%B0%D0%B1%D0%B2?%D0%B0%D0%B1%D0%B2=%D0%B0%D0%B1%D0%B2#%D0%B0%D0%B1%D0%B2',
|
||||
)
|
||||
self.assertEqual(normalize_url('http://vimeo.com/56015672#at=0'), 'http://vimeo.com/56015672#at=0')
|
||||
|
||||
@ -979,7 +979,7 @@ def test_js_to_json_vars_strings(self):
|
||||
'e': 'false',
|
||||
'f': '"false"',
|
||||
'g': 'var',
|
||||
}
|
||||
},
|
||||
)),
|
||||
{
|
||||
'null': None,
|
||||
@ -988,8 +988,8 @@ def test_js_to_json_vars_strings(self):
|
||||
'trueStr': 'true',
|
||||
'false': False,
|
||||
'falseStr': 'false',
|
||||
'unresolvedVar': 'var'
|
||||
}
|
||||
'unresolvedVar': 'var',
|
||||
},
|
||||
)
|
||||
|
||||
self.assertDictEqual(
|
||||
@ -1005,14 +1005,14 @@ def test_js_to_json_vars_strings(self):
|
||||
'b': '"123"',
|
||||
'c': '1.23',
|
||||
'd': '"1.23"',
|
||||
}
|
||||
},
|
||||
)),
|
||||
{
|
||||
'int': 123,
|
||||
'intStr': '123',
|
||||
'float': 1.23,
|
||||
'floatStr': '1.23',
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
self.assertDictEqual(
|
||||
@ -1028,14 +1028,14 @@ def test_js_to_json_vars_strings(self):
|
||||
'b': '"{}"',
|
||||
'c': '[]',
|
||||
'd': '"[]"',
|
||||
}
|
||||
},
|
||||
)),
|
||||
{
|
||||
'object': {},
|
||||
'objectStr': '{}',
|
||||
'array': [],
|
||||
'arrayStr': '[]',
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
def test_js_to_json_realworld(self):
|
||||
@ -1081,7 +1081,7 @@ def test_js_to_json_realworld(self):
|
||||
|
||||
def test_js_to_json_edgecases(self):
|
||||
on = js_to_json("{abc_def:'1\\'\\\\2\\\\\\'3\"4'}")
|
||||
self.assertEqual(json.loads(on), {"abc_def": "1'\\2\\'3\"4"})
|
||||
self.assertEqual(json.loads(on), {'abc_def': "1'\\2\\'3\"4"})
|
||||
|
||||
on = js_to_json('{"abc": true}')
|
||||
self.assertEqual(json.loads(on), {'abc': True})
|
||||
@ -1113,9 +1113,9 @@ def test_js_to_json_edgecases(self):
|
||||
'c': 0,
|
||||
'd': 42.42,
|
||||
'e': [],
|
||||
'f': "abc",
|
||||
'g': "",
|
||||
'42': 42
|
||||
'f': 'abc',
|
||||
'g': '',
|
||||
'42': 42,
|
||||
})
|
||||
|
||||
on = js_to_json('["abc", "def",]')
|
||||
@ -1209,8 +1209,8 @@ def test_js_to_json_common_constructors(self):
|
||||
self.assertEqual(json.loads(js_to_json('Array(5, 10)')), [5, 10])
|
||||
self.assertEqual(json.loads(js_to_json('new Array(15,5)')), [15, 5])
|
||||
self.assertEqual(json.loads(js_to_json('new Map([Array(5, 10),new Array(15,5)])')), {'5': 10, '15': 5})
|
||||
self.assertEqual(json.loads(js_to_json('new Date("123")')), "123")
|
||||
self.assertEqual(json.loads(js_to_json('new Date(\'2023-10-19\')')), "2023-10-19")
|
||||
self.assertEqual(json.loads(js_to_json('new Date("123")')), '123')
|
||||
self.assertEqual(json.loads(js_to_json('new Date(\'2023-10-19\')')), '2023-10-19')
|
||||
|
||||
def test_extract_attributes(self):
|
||||
self.assertEqual(extract_attributes('<e x="y">'), {'x': 'y'})
|
||||
@ -1265,7 +1265,7 @@ def test_intlist_to_bytes(self):
|
||||
def test_args_to_str(self):
|
||||
self.assertEqual(
|
||||
args_to_str(['foo', 'ba/r', '-baz', '2 be', '']),
|
||||
'foo ba/r -baz \'2 be\' \'\'' if compat_os_name != 'nt' else 'foo ba/r -baz "2 be" ""'
|
||||
'foo ba/r -baz \'2 be\' \'\'' if compat_os_name != 'nt' else 'foo ba/r -baz "2 be" ""',
|
||||
)
|
||||
|
||||
def test_parse_filesize(self):
|
||||
@ -1348,10 +1348,10 @@ def test_is_html(self):
|
||||
self.assertTrue(is_html( # UTF-8 with BOM
|
||||
b'\xef\xbb\xbf<!DOCTYPE foo>\xaaa'))
|
||||
self.assertTrue(is_html( # UTF-16-LE
|
||||
b'\xff\xfe<\x00h\x00t\x00m\x00l\x00>\x00\xe4\x00'
|
||||
b'\xff\xfe<\x00h\x00t\x00m\x00l\x00>\x00\xe4\x00',
|
||||
))
|
||||
self.assertTrue(is_html( # UTF-16-BE
|
||||
b'\xfe\xff\x00<\x00h\x00t\x00m\x00l\x00>\x00\xe4'
|
||||
b'\xfe\xff\x00<\x00h\x00t\x00m\x00l\x00>\x00\xe4',
|
||||
))
|
||||
self.assertTrue(is_html( # UTF-32-BE
|
||||
b'\x00\x00\xFE\xFF\x00\x00\x00<\x00\x00\x00h\x00\x00\x00t\x00\x00\x00m\x00\x00\x00l\x00\x00\x00>\x00\x00\x00\xe4'))
|
||||
@ -1935,7 +1935,7 @@ def test_locked_file(self):
|
||||
with locked_file(FILE, test_mode, False):
|
||||
pass
|
||||
except (BlockingIOError, PermissionError):
|
||||
if not testing_write: # FIXME
|
||||
if not testing_write: # FIXME: blocked read access
|
||||
print(f'Known issue: Exclusive lock ({lock_mode}) blocks read access ({test_mode})')
|
||||
continue
|
||||
self.assertTrue(testing_write, f'{test_mode} is blocked by {lock_mode}')
|
||||
@ -2003,7 +2003,7 @@ def total(*x, **kwargs):
|
||||
msg='int fn with expected_type int should give int')
|
||||
self.assertEqual(try_call(lambda: 1, expected_type=dict), None,
|
||||
msg='int fn with wrong expected_type should give None')
|
||||
self.assertEqual(try_call(total, args=(0, 1, 0, ), expected_type=int), 1,
|
||||
self.assertEqual(try_call(total, args=(0, 1, 0), expected_type=int), 1,
|
||||
msg='fn should accept arglist')
|
||||
self.assertEqual(try_call(total, kwargs={'a': 0, 'b': 1, 'c': 0}, expected_type=int), 1,
|
||||
msg='fn should accept kwargs')
|
||||
|
@ -297,14 +297,14 @@ def test_request_headers(self, handler):
|
||||
'client_certificate': os.path.join(MTLS_CERT_DIR, 'client.crt'),
|
||||
'client_certificate_key': os.path.join(MTLS_CERT_DIR, 'clientencrypted.key'),
|
||||
'client_certificate_password': 'foobar',
|
||||
}
|
||||
},
|
||||
))
|
||||
def test_mtls(self, handler, client_cert):
|
||||
with handler(
|
||||
# Disable client-side validation of unacceptable self-signed testcert.pem
|
||||
# The test is of a check on the server side, so unaffected
|
||||
verify=False,
|
||||
client_cert=client_cert
|
||||
client_cert=client_cert,
|
||||
) as rh:
|
||||
ws_validate_and_send(rh, Request(self.mtls_wss_base_url)).close()
|
||||
|
||||
|
@ -13,7 +13,7 @@
|
||||
|
||||
class TestYoutubeMisc(unittest.TestCase):
|
||||
def test_youtube_extract(self):
|
||||
assertExtractId = lambda url, id: self.assertEqual(YoutubeIE.extract_id(url), id)
|
||||
assertExtractId = lambda url, video_id: self.assertEqual(YoutubeIE.extract_id(url), video_id)
|
||||
assertExtractId('http://www.youtube.com/watch?&v=BaW_jenozKc', 'BaW_jenozKc')
|
||||
assertExtractId('https://www.youtube.com/watch?&v=BaW_jenozKc', 'BaW_jenozKc')
|
||||
assertExtractId('https://www.youtube.com/watch?feature=player_embedded&v=BaW_jenozKc', 'BaW_jenozKc')
|
||||
|
@ -46,17 +46,17 @@
|
||||
(
|
||||
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflBb0OQx.js',
|
||||
84,
|
||||
'123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQ0STUVWXYZ!"#$%&\'()*+,@./:;<=>'
|
||||
'123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQ0STUVWXYZ!"#$%&\'()*+,@./:;<=>',
|
||||
),
|
||||
(
|
||||
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vfl9FYC6l.js',
|
||||
83,
|
||||
'123456789abcdefghijklmnopqr0tuvwxyzABCDETGHIJKLMNOPQRS>UVWXYZ!"#$%&\'()*+,-./:;<=F'
|
||||
'123456789abcdefghijklmnopqr0tuvwxyzABCDETGHIJKLMNOPQRS>UVWXYZ!"#$%&\'()*+,-./:;<=F',
|
||||
),
|
||||
(
|
||||
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflCGk6yw/html5player.js',
|
||||
'4646B5181C6C3020DF1D9C7FCFEA.AD80ABF70C39BD369CCCAE780AFBB98FA6B6CB42766249D9488C288',
|
||||
'82C8849D94266724DC6B6AF89BBFA087EACCD963.B93C07FBA084ACAEFCF7C9D1FD0203C6C1815B6B'
|
||||
'82C8849D94266724DC6B6AF89BBFA087EACCD963.B93C07FBA084ACAEFCF7C9D1FD0203C6C1815B6B',
|
||||
),
|
||||
(
|
||||
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflKjOTVq/html5player.js',
|
||||
@ -207,7 +207,7 @@ def tearDown(self):
|
||||
def t_factory(name, sig_func, url_pattern):
|
||||
def make_tfunc(url, sig_input, expected_sig):
|
||||
m = url_pattern.match(url)
|
||||
assert m, '%r should follow URL format' % url
|
||||
assert m, f'{url!r} should follow URL format'
|
||||
test_id = m.group('id')
|
||||
|
||||
def test_func(self):
|
||||
|
@ -109,7 +109,6 @@
|
||||
determine_protocol,
|
||||
encode_compat_str,
|
||||
encodeFilename,
|
||||
error_to_compat_str,
|
||||
escapeHTML,
|
||||
expand_path,
|
||||
extract_basic_auth,
|
||||
@ -583,7 +582,7 @@ class YoutubeDL:
|
||||
'player_url', 'protocol', 'fragment_base_url', 'fragments', 'is_from_start', 'is_dash_periods', 'request_data',
|
||||
'preference', 'language', 'language_preference', 'quality', 'source_preference', 'cookies',
|
||||
'http_headers', 'stretched_ratio', 'no_resume', 'has_drm', 'extra_param_to_segment_url', 'hls_aes', 'downloader_options',
|
||||
'page_url', 'app', 'play_path', 'tc_url', 'flash_version', 'rtmp_live', 'rtmp_conn', 'rtmp_protocol', 'rtmp_real_time'
|
||||
'page_url', 'app', 'play_path', 'tc_url', 'flash_version', 'rtmp_live', 'rtmp_conn', 'rtmp_protocol', 'rtmp_real_time',
|
||||
}
|
||||
_deprecated_multivalue_fields = {
|
||||
'album_artist': 'album_artists',
|
||||
@ -594,7 +593,7 @@ class YoutubeDL:
|
||||
}
|
||||
_format_selection_exts = {
|
||||
'audio': set(MEDIA_EXTENSIONS.common_audio),
|
||||
'video': set(MEDIA_EXTENSIONS.common_video + ('3gp', )),
|
||||
'video': {*MEDIA_EXTENSIONS.common_video, '3gp'},
|
||||
'storyboards': set(MEDIA_EXTENSIONS.storyboards),
|
||||
}
|
||||
|
||||
@ -628,7 +627,7 @@ def __init__(self, params=None, auto_init=True):
|
||||
error=sys.stderr,
|
||||
screen=sys.stderr if self.params.get('quiet') else stdout,
|
||||
console=None if compat_os_name == 'nt' else next(
|
||||
filter(supports_terminal_sequences, (sys.stderr, sys.stdout)), None)
|
||||
filter(supports_terminal_sequences, (sys.stderr, sys.stdout)), None),
|
||||
)
|
||||
|
||||
try:
|
||||
@ -679,9 +678,9 @@ def process_color_policy(stream):
|
||||
width_args = [] if width is None else ['-w', str(width)]
|
||||
sp_kwargs = {'stdin': subprocess.PIPE, 'stdout': slave, 'stderr': self._out_files.error}
|
||||
try:
|
||||
self._output_process = Popen(['bidiv'] + width_args, **sp_kwargs)
|
||||
self._output_process = Popen(['bidiv', *width_args], **sp_kwargs)
|
||||
except OSError:
|
||||
self._output_process = Popen(['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs)
|
||||
self._output_process = Popen(['fribidi', '-c', 'UTF-8', *width_args], **sp_kwargs)
|
||||
self._output_channel = os.fdopen(master, 'rb')
|
||||
except OSError as ose:
|
||||
if ose.errno == errno.ENOENT:
|
||||
@ -822,8 +821,7 @@ def warn_if_short_id(self, argv):
|
||||
)
|
||||
self.report_warning(
|
||||
'Long argument string detected. '
|
||||
'Use -- to separate parameters and URLs, like this:\n%s' %
|
||||
shell_quote(correct_argv))
|
||||
f'Use -- to separate parameters and URLs, like this:\n{shell_quote(correct_argv)}')
|
||||
|
||||
def add_info_extractor(self, ie):
|
||||
"""Add an InfoExtractor object to the end of the list."""
|
||||
@ -922,7 +920,7 @@ def to_screen(self, message, skip_eol=False, quiet=None, only_once=False):
|
||||
if (self.params.get('quiet') if quiet is None else quiet) and not self.params.get('verbose'):
|
||||
return
|
||||
self._write_string(
|
||||
'%s%s' % (self._bidi_workaround(message), ('' if skip_eol else '\n')),
|
||||
'{}{}'.format(self._bidi_workaround(message), ('' if skip_eol else '\n')),
|
||||
self._out_files.screen, only_once=only_once)
|
||||
|
||||
def to_stderr(self, message, only_once=False):
|
||||
@ -1045,10 +1043,10 @@ def _format_err(self, *args, **kwargs):
|
||||
return self._format_text(self._out_files.error, self._allow_colors.error, *args, **kwargs)
|
||||
|
||||
def report_warning(self, message, only_once=False):
|
||||
'''
|
||||
"""
|
||||
Print the message to stderr, it will be prefixed with 'WARNING:'
|
||||
If stderr is a tty file the 'WARNING:' will be colored
|
||||
'''
|
||||
"""
|
||||
if self.params.get('logger') is not None:
|
||||
self.params['logger'].warning(message)
|
||||
else:
|
||||
@ -1066,14 +1064,14 @@ def deprecated_feature(self, message):
|
||||
self.to_stderr(f'{self._format_err("Deprecated Feature:", self.Styles.ERROR)} {message}', True)
|
||||
|
||||
def report_error(self, message, *args, **kwargs):
|
||||
'''
|
||||
"""
|
||||
Do the same as trouble, but prefixes the message with 'ERROR:', colored
|
||||
in red if stderr is a tty file.
|
||||
'''
|
||||
"""
|
||||
self.trouble(f'{self._format_err("ERROR:", self.Styles.ERROR)} {message}', *args, **kwargs)
|
||||
|
||||
def write_debug(self, message, only_once=False):
|
||||
'''Log debug message or Print message to stderr'''
|
||||
"""Log debug message or Print message to stderr"""
|
||||
if not self.params.get('verbose', False):
|
||||
return
|
||||
message = f'[debug] {message}'
|
||||
@ -1085,14 +1083,14 @@ def write_debug(self, message, only_once=False):
|
||||
def report_file_already_downloaded(self, file_name):
|
||||
"""Report file has already been fully downloaded."""
|
||||
try:
|
||||
self.to_screen('[download] %s has already been downloaded' % file_name)
|
||||
self.to_screen(f'[download] {file_name} has already been downloaded')
|
||||
except UnicodeEncodeError:
|
||||
self.to_screen('[download] The file has already been downloaded')
|
||||
|
||||
def report_file_delete(self, file_name):
|
||||
"""Report that existing file will be deleted."""
|
||||
try:
|
||||
self.to_screen('Deleting existing file %s' % file_name)
|
||||
self.to_screen(f'Deleting existing file {file_name}')
|
||||
except UnicodeEncodeError:
|
||||
self.to_screen('Deleting existing file')
|
||||
|
||||
@ -1147,7 +1145,7 @@ def _outtmpl_expandpath(outtmpl):
|
||||
|
||||
@staticmethod
|
||||
def escape_outtmpl(outtmpl):
|
||||
''' Escape any remaining strings like %s, %abc% etc. '''
|
||||
""" Escape any remaining strings like %s, %abc% etc. """
|
||||
return re.sub(
|
||||
STR_FORMAT_RE_TMPL.format('', '(?![%(\0])'),
|
||||
lambda mobj: ('' if mobj.group('has_key') else '%') + mobj.group(0),
|
||||
@ -1155,7 +1153,7 @@ def escape_outtmpl(outtmpl):
|
||||
|
||||
@classmethod
|
||||
def validate_outtmpl(cls, outtmpl):
|
||||
''' @return None or Exception object '''
|
||||
""" @return None or Exception object """
|
||||
outtmpl = re.sub(
|
||||
STR_FORMAT_RE_TMPL.format('[^)]*', '[ljhqBUDS]'),
|
||||
lambda mobj: f'{mobj.group(0)[:-1]}s',
|
||||
@ -1208,13 +1206,13 @@ def prepare_outtmpl(self, outtmpl, info_dict, sanitize=False):
|
||||
}
|
||||
# Field is of the form key1.key2...
|
||||
# where keys (except first) can be string, int, slice or "{field, ...}"
|
||||
FIELD_INNER_RE = r'(?:\w+|%(num)s|%(num)s?(?::%(num)s?){1,2})' % {'num': r'(?:-?\d+)'}
|
||||
FIELD_RE = r'\w*(?:\.(?:%(inner)s|{%(field)s(?:,%(field)s)*}))*' % {
|
||||
FIELD_INNER_RE = r'(?:\w+|%(num)s|%(num)s?(?::%(num)s?){1,2})' % {'num': r'(?:-?\d+)'} # noqa: UP031
|
||||
FIELD_RE = r'\w*(?:\.(?:%(inner)s|{%(field)s(?:,%(field)s)*}))*' % { # noqa: UP031
|
||||
'inner': FIELD_INNER_RE,
|
||||
'field': rf'\w*(?:\.{FIELD_INNER_RE})*'
|
||||
'field': rf'\w*(?:\.{FIELD_INNER_RE})*',
|
||||
}
|
||||
MATH_FIELD_RE = rf'(?:{FIELD_RE}|-?{NUMBER_RE})'
|
||||
MATH_OPERATORS_RE = r'(?:%s)' % '|'.join(map(re.escape, MATH_FUNCTIONS.keys()))
|
||||
MATH_OPERATORS_RE = r'(?:{})'.format('|'.join(map(re.escape, MATH_FUNCTIONS.keys())))
|
||||
INTERNAL_FORMAT_RE = re.compile(rf'''(?xs)
|
||||
(?P<negate>-)?
|
||||
(?P<fields>{FIELD_RE})
|
||||
@ -1337,7 +1335,7 @@ def create_key(outer_mobj):
|
||||
value, default = None, na
|
||||
|
||||
fmt = outer_mobj.group('format')
|
||||
if fmt == 's' and last_field in field_size_compat_map.keys() and isinstance(value, int):
|
||||
if fmt == 's' and last_field in field_size_compat_map and isinstance(value, int):
|
||||
fmt = f'0{field_size_compat_map[last_field]:d}d'
|
||||
|
||||
flags = outer_mobj.group('conversion') or ''
|
||||
@ -1362,7 +1360,7 @@ def create_key(outer_mobj):
|
||||
elif fmt[-1] == 'U': # unicode normalized
|
||||
value, fmt = unicodedata.normalize(
|
||||
# "+" = compatibility equivalence, "#" = NFD
|
||||
'NF%s%s' % ('K' if '+' in flags else '', 'D' if '#' in flags else 'C'),
|
||||
'NF{}{}'.format('K' if '+' in flags else '', 'D' if '#' in flags else 'C'),
|
||||
value), str_fmt
|
||||
elif fmt[-1] == 'D': # decimal suffix
|
||||
num_fmt, fmt = fmt[:-1].replace('#', ''), 's'
|
||||
@ -1390,7 +1388,7 @@ def create_key(outer_mobj):
|
||||
if fmt[-1] in 'csra':
|
||||
value = sanitizer(last_field, value)
|
||||
|
||||
key = '%s\0%s' % (key.replace('%', '%\0'), outer_mobj.group('format'))
|
||||
key = '{}\0{}'.format(key.replace('%', '%\0'), outer_mobj.group('format'))
|
||||
TMPL_DICT[key] = value
|
||||
return '{prefix}%({key}){fmt}'.format(key=key, fmt=fmt, prefix=outer_mobj.group('prefix'))
|
||||
|
||||
@ -1479,9 +1477,9 @@ def check_filter():
|
||||
|
||||
date = info_dict.get('upload_date')
|
||||
if date is not None:
|
||||
dateRange = self.params.get('daterange', DateRange())
|
||||
if date not in dateRange:
|
||||
return f'{date_from_str(date).isoformat()} upload date is not in range {dateRange}'
|
||||
date_range = self.params.get('daterange', DateRange())
|
||||
if date not in date_range:
|
||||
return f'{date_from_str(date).isoformat()} upload date is not in range {date_range}'
|
||||
view_count = info_dict.get('view_count')
|
||||
if view_count is not None:
|
||||
min_views = self.params.get('min_views')
|
||||
@ -1491,7 +1489,7 @@ def check_filter():
|
||||
if max_views is not None and view_count > max_views:
|
||||
return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
|
||||
if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')):
|
||||
return 'Skipping "%s" because it is age restricted' % video_title
|
||||
return f'Skipping "{video_title}" because it is age restricted'
|
||||
|
||||
match_filter = self.params.get('match_filter')
|
||||
if match_filter is None:
|
||||
@ -1544,7 +1542,7 @@ def check_filter():
|
||||
|
||||
@staticmethod
|
||||
def add_extra_info(info_dict, extra_info):
|
||||
'''Set the keys from extra_info in info dict if they are missing'''
|
||||
"""Set the keys from extra_info in info dict if they are missing"""
|
||||
for key, value in extra_info.items():
|
||||
info_dict.setdefault(key, value)
|
||||
|
||||
@ -1590,7 +1588,7 @@ def extract_info(self, url, download=True, ie_key=None, extra_info=None,
|
||||
self.to_screen(f'[download] {self._format_screen(temp_id, self.Styles.ID)}: '
|
||||
'has already been recorded in the archive')
|
||||
if self.params.get('break_on_existing', False):
|
||||
raise ExistingVideoReached()
|
||||
raise ExistingVideoReached
|
||||
break
|
||||
return self.__extract_info(url, self.get_info_extractor(key), download, extra_info, process)
|
||||
else:
|
||||
@ -1616,8 +1614,8 @@ def wrapper(self, *args, **kwargs):
|
||||
except GeoRestrictedError as e:
|
||||
msg = e.msg
|
||||
if e.countries:
|
||||
msg += '\nThis video is available in %s.' % ', '.join(
|
||||
map(ISO3166Utils.short2full, e.countries))
|
||||
msg += '\nThis video is available in {}.'.format(', '.join(
|
||||
map(ISO3166Utils.short2full, e.countries)))
|
||||
msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
|
||||
self.report_error(msg)
|
||||
except ExtractorError as e: # An error we somewhat expected
|
||||
@ -1826,8 +1824,8 @@ def process_ie_result(self, ie_result, download=True, extra_info=None):
|
||||
if isinstance(additional_urls, str):
|
||||
additional_urls = [additional_urls]
|
||||
self.to_screen(
|
||||
'[info] %s: %d additional URL(s) requested' % (ie_result['id'], len(additional_urls)))
|
||||
self.write_debug('Additional URLs: "%s"' % '", "'.join(additional_urls))
|
||||
'[info] {}: {} additional URL(s) requested'.format(ie_result['id'], len(additional_urls)))
|
||||
self.write_debug('Additional URLs: "{}"'.format('", "'.join(additional_urls)))
|
||||
ie_result['additional_entries'] = [
|
||||
self.extract_info(
|
||||
url, download, extra_info=extra_info,
|
||||
@ -1879,8 +1877,8 @@ def process_ie_result(self, ie_result, download=True, extra_info=None):
|
||||
webpage_url = ie_result.get('webpage_url') # Playlists maynot have webpage_url
|
||||
if webpage_url and webpage_url in self._playlist_urls:
|
||||
self.to_screen(
|
||||
'[download] Skipping already downloaded playlist: %s'
|
||||
% ie_result.get('title') or ie_result.get('id'))
|
||||
'[download] Skipping already downloaded playlist: {}'.format(
|
||||
ie_result.get('title')) or ie_result.get('id'))
|
||||
return
|
||||
|
||||
self._playlist_level += 1
|
||||
@ -1895,8 +1893,8 @@ def process_ie_result(self, ie_result, download=True, extra_info=None):
|
||||
self._playlist_urls.clear()
|
||||
elif result_type == 'compat_list':
|
||||
self.report_warning(
|
||||
'Extractor %s returned a compat_list result. '
|
||||
'It needs to be updated.' % ie_result.get('extractor'))
|
||||
'Extractor {} returned a compat_list result. '
|
||||
'It needs to be updated.'.format(ie_result.get('extractor')))
|
||||
|
||||
def _fixup(r):
|
||||
self.add_extra_info(r, {
|
||||
@ -1913,7 +1911,7 @@ def _fixup(r):
|
||||
]
|
||||
return ie_result
|
||||
else:
|
||||
raise Exception('Invalid result type: %s' % result_type)
|
||||
raise Exception(f'Invalid result type: {result_type}')
|
||||
|
||||
def _ensure_dir_exists(self, path):
|
||||
return make_dir(path, self.report_error)
|
||||
@ -2029,8 +2027,9 @@ def __process_playlist(self, ie_result, download):
|
||||
resolved_entries[i] = (playlist_index, NO_DEFAULT)
|
||||
continue
|
||||
|
||||
self.to_screen('[download] Downloading item %s of %s' % (
|
||||
self._format_screen(i + 1, self.Styles.ID), self._format_screen(n_entries, self.Styles.EMPHASIS)))
|
||||
self.to_screen(
|
||||
f'[download] Downloading item {self._format_screen(i + 1, self.Styles.ID)} '
|
||||
f'of {self._format_screen(n_entries, self.Styles.EMPHASIS)}')
|
||||
|
||||
entry_result = self.__process_iterable_entry(entry, download, collections.ChainMap({
|
||||
'playlist_index': playlist_index,
|
||||
@ -2080,9 +2079,9 @@ def _build_format_filter(self, filter_spec):
|
||||
}
|
||||
operator_rex = re.compile(r'''(?x)\s*
|
||||
(?P<key>[\w.-]+)\s*
|
||||
(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
|
||||
(?P<op>{})(?P<none_inclusive>\s*\?)?\s*
|
||||
(?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)\s*
|
||||
''' % '|'.join(map(re.escape, OPERATORS.keys())))
|
||||
'''.format('|'.join(map(re.escape, OPERATORS.keys()))))
|
||||
m = operator_rex.fullmatch(filter_spec)
|
||||
if m:
|
||||
try:
|
||||
@ -2093,7 +2092,7 @@ def _build_format_filter(self, filter_spec):
|
||||
comparison_value = parse_filesize(m.group('value') + 'B')
|
||||
if comparison_value is None:
|
||||
raise ValueError(
|
||||
'Invalid value %r in format specification %r' % (
|
||||
'Invalid value {!r} in format specification {!r}'.format(
|
||||
m.group('value'), filter_spec))
|
||||
op = OPERATORS[m.group('op')]
|
||||
|
||||
@ -2103,15 +2102,15 @@ def _build_format_filter(self, filter_spec):
|
||||
'^=': lambda attr, value: attr.startswith(value),
|
||||
'$=': lambda attr, value: attr.endswith(value),
|
||||
'*=': lambda attr, value: value in attr,
|
||||
'~=': lambda attr, value: value.search(attr) is not None
|
||||
'~=': lambda attr, value: value.search(attr) is not None,
|
||||
}
|
||||
str_operator_rex = re.compile(r'''(?x)\s*
|
||||
(?P<key>[a-zA-Z0-9._-]+)\s*
|
||||
(?P<negation>!\s*)?(?P<op>%s)\s*(?P<none_inclusive>\?\s*)?
|
||||
(?P<negation>!\s*)?(?P<op>{})\s*(?P<none_inclusive>\?\s*)?
|
||||
(?P<quote>["'])?
|
||||
(?P<value>(?(quote)(?:(?!(?P=quote))[^\\]|\\.)+|[\w.-]+))
|
||||
(?(quote)(?P=quote))\s*
|
||||
''' % '|'.join(map(re.escape, STR_OPERATORS.keys())))
|
||||
'''.format('|'.join(map(re.escape, STR_OPERATORS.keys()))))
|
||||
m = str_operator_rex.fullmatch(filter_spec)
|
||||
if m:
|
||||
if m.group('op') == '~=':
|
||||
@ -2125,7 +2124,7 @@ def _build_format_filter(self, filter_spec):
|
||||
op = str_op
|
||||
|
||||
if not m:
|
||||
raise SyntaxError('Invalid filter specification %r' % filter_spec)
|
||||
raise SyntaxError(f'Invalid filter specification {filter_spec!r}')
|
||||
|
||||
def _filter(f):
|
||||
actual_value = f.get(m.group('key'))
|
||||
@ -2141,7 +2140,7 @@ def _check_formats(self, formats):
|
||||
if working:
|
||||
yield f
|
||||
continue
|
||||
self.to_screen('[info] Testing format %s' % f['format_id'])
|
||||
self.to_screen('[info] Testing format {}'.format(f['format_id']))
|
||||
path = self.get_output_path('temp')
|
||||
if not self._ensure_dir_exists(f'{path}/'):
|
||||
continue
|
||||
@ -2149,19 +2148,19 @@ def _check_formats(self, formats):
|
||||
temp_file.close()
|
||||
try:
|
||||
success, _ = self.dl(temp_file.name, f, test=True)
|
||||
except (DownloadError, OSError, ValueError) + network_exceptions:
|
||||
except (DownloadError, OSError, ValueError, *network_exceptions):
|
||||
success = False
|
||||
finally:
|
||||
if os.path.exists(temp_file.name):
|
||||
try:
|
||||
os.remove(temp_file.name)
|
||||
except OSError:
|
||||
self.report_warning('Unable to delete temporary file "%s"' % temp_file.name)
|
||||
self.report_warning(f'Unable to delete temporary file "{temp_file.name}"')
|
||||
f['__working'] = success
|
||||
if success:
|
||||
yield f
|
||||
else:
|
||||
self.to_screen('[info] Unable to download format %s. Skipping...' % f['format_id'])
|
||||
self.to_screen('[info] Unable to download format {}. Skipping...'.format(f['format_id']))
|
||||
|
||||
def _select_formats(self, formats, selector):
|
||||
return list(selector({
|
||||
@ -2214,8 +2213,8 @@ def syntax_error(note, start):
|
||||
|
||||
def _parse_filter(tokens):
|
||||
filter_parts = []
|
||||
for type, string_, start, _, _ in tokens:
|
||||
if type == tokenize.OP and string_ == ']':
|
||||
for type_, string_, _start, _, _ in tokens:
|
||||
if type_ == tokenize.OP and string_ == ']':
|
||||
return ''.join(filter_parts)
|
||||
else:
|
||||
filter_parts.append(string_)
|
||||
@ -2225,23 +2224,23 @@ def _remove_unused_ops(tokens):
|
||||
# E.g. 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
|
||||
ALLOWED_OPS = ('/', '+', ',', '(', ')')
|
||||
last_string, last_start, last_end, last_line = None, None, None, None
|
||||
for type, string_, start, end, line in tokens:
|
||||
if type == tokenize.OP and string_ == '[':
|
||||
for type_, string_, start, end, line in tokens:
|
||||
if type_ == tokenize.OP and string_ == '[':
|
||||
if last_string:
|
||||
yield tokenize.NAME, last_string, last_start, last_end, last_line
|
||||
last_string = None
|
||||
yield type, string_, start, end, line
|
||||
yield type_, string_, start, end, line
|
||||
# everything inside brackets will be handled by _parse_filter
|
||||
for type, string_, start, end, line in tokens:
|
||||
yield type, string_, start, end, line
|
||||
if type == tokenize.OP and string_ == ']':
|
||||
for type_, string_, start, end, line in tokens:
|
||||
yield type_, string_, start, end, line
|
||||
if type_ == tokenize.OP and string_ == ']':
|
||||
break
|
||||
elif type == tokenize.OP and string_ in ALLOWED_OPS:
|
||||
elif type_ == tokenize.OP and string_ in ALLOWED_OPS:
|
||||
if last_string:
|
||||
yield tokenize.NAME, last_string, last_start, last_end, last_line
|
||||
last_string = None
|
||||
yield type, string_, start, end, line
|
||||
elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]:
|
||||
yield type_, string_, start, end, line
|
||||
elif type_ in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]:
|
||||
if not last_string:
|
||||
last_string = string_
|
||||
last_start = start
|
||||
@ -2254,13 +2253,13 @@ def _remove_unused_ops(tokens):
|
||||
def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False):
|
||||
selectors = []
|
||||
current_selector = None
|
||||
for type, string_, start, _, _ in tokens:
|
||||
for type_, string_, start, _, _ in tokens:
|
||||
# ENCODING is only defined in Python 3.x
|
||||
if type == getattr(tokenize, 'ENCODING', None):
|
||||
if type_ == getattr(tokenize, 'ENCODING', None):
|
||||
continue
|
||||
elif type in [tokenize.NAME, tokenize.NUMBER]:
|
||||
elif type_ in [tokenize.NAME, tokenize.NUMBER]:
|
||||
current_selector = FormatSelector(SINGLE, string_, [])
|
||||
elif type == tokenize.OP:
|
||||
elif type_ == tokenize.OP:
|
||||
if string_ == ')':
|
||||
if not inside_group:
|
||||
# ')' will be handled by the parentheses group
|
||||
@ -2303,7 +2302,7 @@ def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, ins
|
||||
current_selector = FormatSelector(MERGE, (selector_1, selector_2), [])
|
||||
else:
|
||||
raise syntax_error(f'Operator not recognized: "{string_}"', start)
|
||||
elif type == tokenize.ENDMARKER:
|
||||
elif type_ == tokenize.ENDMARKER:
|
||||
break
|
||||
if current_selector:
|
||||
selectors.append(current_selector)
|
||||
@ -2378,7 +2377,7 @@ def _merge(formats_pair):
|
||||
'acodec': the_only_audio.get('acodec'),
|
||||
'abr': the_only_audio.get('abr'),
|
||||
'asr': the_only_audio.get('asr'),
|
||||
'audio_channels': the_only_audio.get('audio_channels')
|
||||
'audio_channels': the_only_audio.get('audio_channels'),
|
||||
})
|
||||
|
||||
return new_dict
|
||||
@ -2459,9 +2458,9 @@ def selector_function(ctx):
|
||||
|
||||
format_fallback = not format_type and not format_modified # for b, w
|
||||
_filter_f = (
|
||||
(lambda f: f.get('%scodec' % format_type) != 'none')
|
||||
(lambda f: f.get(f'{format_type}codec') != 'none')
|
||||
if format_type and format_modified # bv*, ba*, wv*, wa*
|
||||
else (lambda f: f.get('%scodec' % not_format_type) == 'none')
|
||||
else (lambda f: f.get(f'{not_format_type}codec') == 'none')
|
||||
if format_type # bv, ba, wv, wa
|
||||
else (lambda f: f.get('vcodec') != 'none' and f.get('acodec') != 'none')
|
||||
if not format_modified # b, w
|
||||
@ -2529,7 +2528,7 @@ def __iter__(self):
|
||||
|
||||
def __next__(self):
|
||||
if self.counter >= len(self.tokens):
|
||||
raise StopIteration()
|
||||
raise StopIteration
|
||||
value = self.tokens[self.counter]
|
||||
self.counter += 1
|
||||
return value
|
||||
@ -2612,7 +2611,7 @@ def check_thumbnails(thumbnails):
|
||||
self._sort_thumbnails(thumbnails)
|
||||
for i, t in enumerate(thumbnails):
|
||||
if t.get('id') is None:
|
||||
t['id'] = '%d' % i
|
||||
t['id'] = str(i)
|
||||
if t.get('width') and t.get('height'):
|
||||
t['resolution'] = '%dx%d' % (t['width'], t['height'])
|
||||
t['url'] = sanitize_url(t['url'])
|
||||
@ -2673,8 +2672,8 @@ def _fill_common_fields(self, info_dict, final=True):
|
||||
# Auto generate title fields corresponding to the *_number fields when missing
|
||||
# in order to always have clean titles. This is very common for TV series.
|
||||
for field in ('chapter', 'season', 'episode'):
|
||||
if final and info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
|
||||
info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
|
||||
if final and info_dict.get(f'{field}_number') is not None and not info_dict.get(field):
|
||||
info_dict[field] = '%s %d' % (field.capitalize(), info_dict[f'{field}_number'])
|
||||
|
||||
for old_key, new_key in self._deprecated_multivalue_fields.items():
|
||||
if new_key in info_dict and old_key in info_dict:
|
||||
@ -2706,8 +2705,8 @@ def process_video_result(self, info_dict, download=True):
|
||||
|
||||
def report_force_conversion(field, field_not, conversion):
|
||||
self.report_warning(
|
||||
'"%s" field is not %s - forcing %s conversion, there is an error in extractor'
|
||||
% (field, field_not, conversion))
|
||||
f'"{field}" field is not {field_not} - forcing {conversion} conversion, '
|
||||
'there is an error in extractor')
|
||||
|
||||
def sanitize_string_field(info, string_field):
|
||||
field = info.get(string_field)
|
||||
@ -2824,28 +2823,28 @@ def is_wellformed(f):
|
||||
if not formats:
|
||||
self.raise_no_formats(info_dict)
|
||||
|
||||
for format in formats:
|
||||
sanitize_string_field(format, 'format_id')
|
||||
sanitize_numeric_fields(format)
|
||||
format['url'] = sanitize_url(format['url'])
|
||||
if format.get('ext') is None:
|
||||
format['ext'] = determine_ext(format['url']).lower()
|
||||
if format['ext'] in ('aac', 'opus', 'mp3', 'flac', 'vorbis'):
|
||||
if format.get('acodec') is None:
|
||||
format['acodec'] = format['ext']
|
||||
if format.get('protocol') is None:
|
||||
format['protocol'] = determine_protocol(format)
|
||||
if format.get('resolution') is None:
|
||||
format['resolution'] = self.format_resolution(format, default=None)
|
||||
if format.get('dynamic_range') is None and format.get('vcodec') != 'none':
|
||||
format['dynamic_range'] = 'SDR'
|
||||
if format.get('aspect_ratio') is None:
|
||||
format['aspect_ratio'] = try_call(lambda: round(format['width'] / format['height'], 2))
|
||||
for fmt in formats:
|
||||
sanitize_string_field(fmt, 'format_id')
|
||||
sanitize_numeric_fields(fmt)
|
||||
fmt['url'] = sanitize_url(fmt['url'])
|
||||
if fmt.get('ext') is None:
|
||||
fmt['ext'] = determine_ext(fmt['url']).lower()
|
||||
if fmt['ext'] in ('aac', 'opus', 'mp3', 'flac', 'vorbis'):
|
||||
if fmt.get('acodec') is None:
|
||||
fmt['acodec'] = fmt['ext']
|
||||
if fmt.get('protocol') is None:
|
||||
fmt['protocol'] = determine_protocol(fmt)
|
||||
if fmt.get('resolution') is None:
|
||||
fmt['resolution'] = self.format_resolution(fmt, default=None)
|
||||
if fmt.get('dynamic_range') is None and fmt.get('vcodec') != 'none':
|
||||
fmt['dynamic_range'] = 'SDR'
|
||||
if fmt.get('aspect_ratio') is None:
|
||||
fmt['aspect_ratio'] = try_call(lambda: round(fmt['width'] / fmt['height'], 2))
|
||||
# For fragmented formats, "tbr" is often max bitrate and not average
|
||||
if (('manifest-filesize-approx' in self.params['compat_opts'] or not format.get('manifest_url'))
|
||||
and not format.get('filesize') and not format.get('filesize_approx')):
|
||||
format['filesize_approx'] = filesize_from_tbr(format.get('tbr'), info_dict.get('duration'))
|
||||
format['http_headers'] = self._calc_headers(collections.ChainMap(format, info_dict), load_cookies=True)
|
||||
if (('manifest-filesize-approx' in self.params['compat_opts'] or not fmt.get('manifest_url'))
|
||||
and not fmt.get('filesize') and not fmt.get('filesize_approx')):
|
||||
fmt['filesize_approx'] = filesize_from_tbr(fmt.get('tbr'), info_dict.get('duration'))
|
||||
fmt['http_headers'] = self._calc_headers(collections.ChainMap(fmt, info_dict), load_cookies=True)
|
||||
|
||||
# Safeguard against old/insecure infojson when using --load-info-json
|
||||
if info_dict.get('http_headers'):
|
||||
@ -2858,36 +2857,36 @@ def is_wellformed(f):
|
||||
|
||||
self.sort_formats({
|
||||
'formats': formats,
|
||||
'_format_sort_fields': info_dict.get('_format_sort_fields')
|
||||
'_format_sort_fields': info_dict.get('_format_sort_fields'),
|
||||
})
|
||||
|
||||
# Sanitize and group by format_id
|
||||
formats_dict = {}
|
||||
for i, format in enumerate(formats):
|
||||
if not format.get('format_id'):
|
||||
format['format_id'] = str(i)
|
||||
for i, fmt in enumerate(formats):
|
||||
if not fmt.get('format_id'):
|
||||
fmt['format_id'] = str(i)
|
||||
else:
|
||||
# Sanitize format_id from characters used in format selector expression
|
||||
format['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', format['format_id'])
|
||||
formats_dict.setdefault(format['format_id'], []).append(format)
|
||||
fmt['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', fmt['format_id'])
|
||||
formats_dict.setdefault(fmt['format_id'], []).append(fmt)
|
||||
|
||||
# Make sure all formats have unique format_id
|
||||
common_exts = set(itertools.chain(*self._format_selection_exts.values()))
|
||||
for format_id, ambiguous_formats in formats_dict.items():
|
||||
ambigious_id = len(ambiguous_formats) > 1
|
||||
for i, format in enumerate(ambiguous_formats):
|
||||
for i, fmt in enumerate(ambiguous_formats):
|
||||
if ambigious_id:
|
||||
format['format_id'] = '%s-%d' % (format_id, i)
|
||||
fmt['format_id'] = f'{format_id}-{i}'
|
||||
# Ensure there is no conflict between id and ext in format selection
|
||||
# See https://github.com/yt-dlp/yt-dlp/issues/1282
|
||||
if format['format_id'] != format['ext'] and format['format_id'] in common_exts:
|
||||
format['format_id'] = 'f%s' % format['format_id']
|
||||
if fmt['format_id'] != fmt['ext'] and fmt['format_id'] in common_exts:
|
||||
fmt['format_id'] = 'f{}'.format(fmt['format_id'])
|
||||
|
||||
if format.get('format') is None:
|
||||
format['format'] = '{id} - {res}{note}'.format(
|
||||
id=format['format_id'],
|
||||
res=self.format_resolution(format),
|
||||
note=format_field(format, 'format_note', ' (%s)'),
|
||||
if fmt.get('format') is None:
|
||||
fmt['format'] = '{id} - {res}{note}'.format(
|
||||
id=fmt['format_id'],
|
||||
res=self.format_resolution(fmt),
|
||||
note=format_field(fmt, 'format_note', ' (%s)'),
|
||||
)
|
||||
|
||||
if self.params.get('check_formats') is True:
|
||||
@ -3009,7 +3008,7 @@ def to_screen(*msg):
|
||||
info_dict['requested_downloads'] = downloaded_formats
|
||||
info_dict = self.run_all_pps('after_video', info_dict)
|
||||
if max_downloads_reached:
|
||||
raise MaxDownloadsReached()
|
||||
raise MaxDownloadsReached
|
||||
|
||||
# We update the info dict with the selected best quality format (backwards compatibility)
|
||||
info_dict.update(best_format)
|
||||
@ -3070,8 +3069,8 @@ def process_subtitles(self, video_id, normal_subtitles, automatic_captions):
|
||||
else:
|
||||
f = formats[-1]
|
||||
self.report_warning(
|
||||
'No subtitle format found matching "%s" for language %s, '
|
||||
'using %s. Use --list-subs for a list of available subtitles' % (formats_query, lang, f['ext']))
|
||||
'No subtitle format found matching "{}" for language {}, '
|
||||
'using {}. Use --list-subs for a list of available subtitles'.format(formats_query, lang, f['ext']))
|
||||
subs[lang] = f
|
||||
return subs
|
||||
|
||||
@ -3226,7 +3225,7 @@ def replace_info_dict(new_info):
|
||||
|
||||
def check_max_downloads():
|
||||
if self._num_downloads >= float(self.params.get('max_downloads') or 'inf'):
|
||||
raise MaxDownloadsReached()
|
||||
raise MaxDownloadsReached
|
||||
|
||||
if self.params.get('simulate'):
|
||||
info_dict['__write_download_archive'] = self.params.get('force_write_download_archive')
|
||||
@ -3400,7 +3399,7 @@ def correct_ext(filename, ext=new_ext):
|
||||
for f in info_dict['requested_formats'] if fd != FFmpegFD else []:
|
||||
f['filepath'] = fname = prepend_extension(
|
||||
correct_ext(temp_filename, info_dict['ext']),
|
||||
'f%s' % f['format_id'], info_dict['ext'])
|
||||
'f{}'.format(f['format_id']), info_dict['ext'])
|
||||
downloaded.append(fname)
|
||||
info_dict['url'] = '\n'.join(f['url'] for f in info_dict['requested_formats'])
|
||||
success, real_download = self.dl(temp_filename, info_dict)
|
||||
@ -3433,7 +3432,7 @@ def correct_ext(filename, ext=new_ext):
|
||||
if temp_filename != '-':
|
||||
fname = prepend_extension(
|
||||
correct_ext(temp_filename, new_info['ext']),
|
||||
'f%s' % f['format_id'], new_info['ext'])
|
||||
'f{}'.format(f['format_id']), new_info['ext'])
|
||||
if not self._ensure_dir_exists(fname):
|
||||
return
|
||||
f['filepath'] = fname
|
||||
@ -3465,11 +3464,11 @@ def correct_ext(filename, ext=new_ext):
|
||||
info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
|
||||
|
||||
except network_exceptions as err:
|
||||
self.report_error('unable to download video data: %s' % error_to_compat_str(err))
|
||||
self.report_error(f'unable to download video data: {err}')
|
||||
return
|
||||
except OSError as err:
|
||||
raise UnavailableVideoError(err)
|
||||
except (ContentTooShortError, ) as err:
|
||||
except ContentTooShortError as err:
|
||||
self.report_error(f'content too short (expected {err.expected} bytes and served {err.downloaded})')
|
||||
return
|
||||
|
||||
@ -3536,13 +3535,13 @@ def ffmpeg_fixup(cndn, msg, cls):
|
||||
try:
|
||||
replace_info_dict(self.post_process(dl_filename, info_dict, files_to_move))
|
||||
except PostProcessingError as err:
|
||||
self.report_error('Postprocessing: %s' % str(err))
|
||||
self.report_error(f'Postprocessing: {err}')
|
||||
return
|
||||
try:
|
||||
for ph in self._post_hooks:
|
||||
ph(info_dict['filepath'])
|
||||
except Exception as err:
|
||||
self.report_error('post hooks: %s' % str(err))
|
||||
self.report_error(f'post hooks: {err}')
|
||||
return
|
||||
info_dict['__write_download_archive'] = True
|
||||
|
||||
@ -3609,7 +3608,7 @@ def download_with_info_file(self, info_filename):
|
||||
|
||||
@staticmethod
|
||||
def sanitize_info(info_dict, remove_private_keys=False):
|
||||
''' Sanitize the infodict for converting to json '''
|
||||
""" Sanitize the infodict for converting to json """
|
||||
if info_dict is None:
|
||||
return info_dict
|
||||
info_dict.setdefault('epoch', int(time.time()))
|
||||
@ -3644,7 +3643,7 @@ def filter_fn(obj):
|
||||
|
||||
@staticmethod
|
||||
def filter_requested_info(info_dict, actually_filter=True):
|
||||
''' Alias of sanitize_info for backward compatibility '''
|
||||
""" Alias of sanitize_info for backward compatibility """
|
||||
return YoutubeDL.sanitize_info(info_dict, actually_filter)
|
||||
|
||||
def _delete_downloaded_files(self, *files_to_delete, info={}, msg=None):
|
||||
@ -3666,7 +3665,7 @@ def actual_post_extract(info_dict):
|
||||
actual_post_extract(video_dict or {})
|
||||
return
|
||||
|
||||
post_extractor = info_dict.pop('__post_extractor', None) or (lambda: {})
|
||||
post_extractor = info_dict.pop('__post_extractor', None) or dict
|
||||
info_dict.update(post_extractor())
|
||||
|
||||
actual_post_extract(info_dict or {})
|
||||
@ -3771,7 +3770,7 @@ def format_resolution(format, default='unknown'):
|
||||
if format.get('width') and format.get('height'):
|
||||
return '%dx%d' % (format['width'], format['height'])
|
||||
elif format.get('height'):
|
||||
return '%sp' % format['height']
|
||||
return '{}p'.format(format['height'])
|
||||
elif format.get('width'):
|
||||
return '%dx?' % format['width']
|
||||
return default
|
||||
@ -3788,7 +3787,7 @@ def _format_note(self, fdict):
|
||||
if fdict.get('language'):
|
||||
if res:
|
||||
res += ' '
|
||||
res += '[%s]' % fdict['language']
|
||||
res += '[{}]'.format(fdict['language'])
|
||||
if fdict.get('format_note') is not None:
|
||||
if res:
|
||||
res += ' '
|
||||
@ -3800,7 +3799,7 @@ def _format_note(self, fdict):
|
||||
if fdict.get('container') is not None:
|
||||
if res:
|
||||
res += ', '
|
||||
res += '%s container' % fdict['container']
|
||||
res += '{} container'.format(fdict['container'])
|
||||
if (fdict.get('vcodec') is not None
|
||||
and fdict.get('vcodec') != 'none'):
|
||||
if res:
|
||||
@ -3815,7 +3814,7 @@ def _format_note(self, fdict):
|
||||
if fdict.get('fps') is not None:
|
||||
if res:
|
||||
res += ', '
|
||||
res += '%sfps' % fdict['fps']
|
||||
res += '{}fps'.format(fdict['fps'])
|
||||
if fdict.get('acodec') is not None:
|
||||
if res:
|
||||
res += ', '
|
||||
@ -3858,7 +3857,7 @@ def render_formats_table(self, info_dict):
|
||||
format_field(f, 'format_id'),
|
||||
format_field(f, 'ext'),
|
||||
self.format_resolution(f),
|
||||
self._format_note(f)
|
||||
self._format_note(f),
|
||||
] for f in formats if (f.get('preference') or 0) >= -1000]
|
||||
return render_table(['format code', 'extension', 'resolution', 'note'], table, extra_gap=1)
|
||||
|
||||
@ -3964,11 +3963,11 @@ def print_debug_header(self):
|
||||
from .extractor.extractors import _LAZY_LOADER
|
||||
from .extractor.extractors import (
|
||||
_PLUGIN_CLASSES as plugin_ies,
|
||||
_PLUGIN_OVERRIDES as plugin_ie_overrides
|
||||
_PLUGIN_OVERRIDES as plugin_ie_overrides,
|
||||
)
|
||||
|
||||
def get_encoding(stream):
|
||||
ret = str(getattr(stream, 'encoding', 'missing (%s)' % type(stream).__name__))
|
||||
ret = str(getattr(stream, 'encoding', f'missing ({type(stream).__name__})'))
|
||||
additional_info = []
|
||||
if os.environ.get('TERM', '').lower() == 'dumb':
|
||||
additional_info.append('dumb')
|
||||
@ -3979,13 +3978,13 @@ def get_encoding(stream):
|
||||
ret = f'{ret} ({",".join(additional_info)})'
|
||||
return ret
|
||||
|
||||
encoding_str = 'Encodings: locale %s, fs %s, pref %s, %s' % (
|
||||
encoding_str = 'Encodings: locale {}, fs {}, pref {}, {}'.format(
|
||||
locale.getpreferredencoding(),
|
||||
sys.getfilesystemencoding(),
|
||||
self.get_encoding(),
|
||||
', '.join(
|
||||
f'{key} {get_encoding(stream)}' for key, stream in self._out_files.items_
|
||||
if stream is not None and key != 'console')
|
||||
if stream is not None and key != 'console'),
|
||||
)
|
||||
|
||||
logger = self.params.get('logger')
|
||||
@ -4017,7 +4016,7 @@ def get_encoding(stream):
|
||||
else:
|
||||
write_debug('Lazy loading extractors is disabled')
|
||||
if self.params['compat_opts']:
|
||||
write_debug('Compatibility options: %s' % ', '.join(self.params['compat_opts']))
|
||||
write_debug('Compatibility options: {}'.format(', '.join(self.params['compat_opts'])))
|
||||
|
||||
if current_git_head():
|
||||
write_debug(f'Git HEAD: {current_git_head()}')
|
||||
@ -4026,14 +4025,14 @@ def get_encoding(stream):
|
||||
exe_versions, ffmpeg_features = FFmpegPostProcessor.get_versions_and_features(self)
|
||||
ffmpeg_features = {key for key, val in ffmpeg_features.items() if val}
|
||||
if ffmpeg_features:
|
||||
exe_versions['ffmpeg'] += ' (%s)' % ','.join(sorted(ffmpeg_features))
|
||||
exe_versions['ffmpeg'] += ' ({})'.format(','.join(sorted(ffmpeg_features)))
|
||||
|
||||
exe_versions['rtmpdump'] = rtmpdump_version()
|
||||
exe_versions['phantomjs'] = PhantomJSwrapper._version()
|
||||
exe_str = ', '.join(
|
||||
f'{exe} {v}' for exe, v in sorted(exe_versions.items()) if v
|
||||
) or 'none'
|
||||
write_debug('exe versions: %s' % exe_str)
|
||||
write_debug(f'exe versions: {exe_str}')
|
||||
|
||||
from .compat.compat_utils import get_package_info
|
||||
from .dependencies import available_dependencies
|
||||
@ -4045,7 +4044,7 @@ def get_encoding(stream):
|
||||
write_debug(f'Proxy map: {self.proxies}')
|
||||
write_debug(f'Request Handlers: {", ".join(rh.RH_NAME for rh in self._request_director.handlers.values())}')
|
||||
for plugin_type, plugins in {'Extractor': plugin_ies, 'Post-Processor': plugin_pps}.items():
|
||||
display_list = ['%s%s' % (
|
||||
display_list = ['{}{}'.format(
|
||||
klass.__name__, '' if klass.__name__ == name else f' as {name}')
|
||||
for name, klass in plugins.items()]
|
||||
if plugin_type == 'Extractor':
|
||||
@ -4062,14 +4061,13 @@ def get_encoding(stream):
|
||||
# Not implemented
|
||||
if False and self.params.get('call_home'):
|
||||
ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode()
|
||||
write_debug('Public IP address: %s' % ipaddr)
|
||||
write_debug(f'Public IP address: {ipaddr}')
|
||||
latest_version = self.urlopen(
|
||||
'https://yt-dl.org/latest/version').read().decode()
|
||||
if version_tuple(latest_version) > version_tuple(__version__):
|
||||
self.report_warning(
|
||||
'You are using an outdated version (newest version: %s)! '
|
||||
'See https://yt-dl.org/update if you need help updating.' %
|
||||
latest_version)
|
||||
f'You are using an outdated version (newest version: {latest_version})! '
|
||||
'See https://yt-dl.org/update if you need help updating.')
|
||||
|
||||
@functools.cached_property
|
||||
def proxies(self):
|
||||
@ -4103,7 +4101,7 @@ def _opener(self):
|
||||
return handler._get_instance(cookiejar=self.cookiejar, proxies=self.proxies)
|
||||
|
||||
def _get_available_impersonate_targets(self):
|
||||
# todo(future): make available as public API
|
||||
# TODO(future): make available as public API
|
||||
return [
|
||||
(target, rh.RH_NAME)
|
||||
for rh in self._request_director.handlers.values()
|
||||
@ -4112,7 +4110,7 @@ def _get_available_impersonate_targets(self):
|
||||
]
|
||||
|
||||
def _impersonate_target_available(self, target):
|
||||
# todo(future): make available as public API
|
||||
# TODO(future): make available as public API
|
||||
return any(
|
||||
rh.is_supported_target(target)
|
||||
for rh in self._request_director.handlers.values()
|
||||
@ -4238,7 +4236,7 @@ def get_encoding(self):
|
||||
return encoding
|
||||
|
||||
def _write_info_json(self, label, ie_result, infofn, overwrite=None):
|
||||
''' Write infojson and returns True = written, 'exists' = Already exists, False = skip, None = error '''
|
||||
""" Write infojson and returns True = written, 'exists' = Already exists, False = skip, None = error """
|
||||
if overwrite is None:
|
||||
overwrite = self.params.get('overwrites', True)
|
||||
if not self.params.get('writeinfojson'):
|
||||
@ -4261,7 +4259,7 @@ def _write_info_json(self, label, ie_result, infofn, overwrite=None):
|
||||
return None
|
||||
|
||||
def _write_description(self, label, ie_result, descfn):
|
||||
''' Write description and returns True = written, False = skip, None = error '''
|
||||
""" Write description and returns True = written, False = skip, None = error """
|
||||
if not self.params.get('writedescription'):
|
||||
return False
|
||||
elif not descfn:
|
||||
@ -4285,7 +4283,7 @@ def _write_description(self, label, ie_result, descfn):
|
||||
return True
|
||||
|
||||
def _write_subtitles(self, info_dict, filename):
|
||||
''' Write subtitles to file and return list of (sub_filename, final_sub_filename); or None if error'''
|
||||
""" Write subtitles to file and return list of (sub_filename, final_sub_filename); or None if error"""
|
||||
ret = []
|
||||
subtitles = info_dict.get('requested_subtitles')
|
||||
if not (self.params.get('writesubtitles') or self.params.get('writeautomaticsub')):
|
||||
@ -4331,7 +4329,7 @@ def _write_subtitles(self, info_dict, filename):
|
||||
self.dl(sub_filename, sub_copy, subtitle=True)
|
||||
sub_info['filepath'] = sub_filename
|
||||
ret.append((sub_filename, sub_filename_final))
|
||||
except (DownloadError, ExtractorError, IOError, OSError, ValueError) + network_exceptions as err:
|
||||
except (DownloadError, ExtractorError, OSError, ValueError, *network_exceptions) as err:
|
||||
msg = f'Unable to download video subtitles for {sub_lang!r}: {err}'
|
||||
if self.params.get('ignoreerrors') is not True: # False or 'only_download'
|
||||
if not self.params.get('ignoreerrors'):
|
||||
@ -4341,7 +4339,7 @@ def _write_subtitles(self, info_dict, filename):
|
||||
return ret
|
||||
|
||||
def _write_thumbnails(self, label, info_dict, filename, thumb_filename_base=None):
|
||||
''' Write thumbnails to file and return list of (thumb_filename, final_thumb_filename); or None if error '''
|
||||
""" Write thumbnails to file and return list of (thumb_filename, final_thumb_filename); or None if error """
|
||||
write_all = self.params.get('write_all_thumbnails', False)
|
||||
thumbnails, ret = [], []
|
||||
if write_all or self.params.get('writethumbnail', False):
|
||||
@ -4368,8 +4366,8 @@ def _write_thumbnails(self, label, info_dict, filename, thumb_filename_base=None
|
||||
|
||||
existing_thumb = self.existing_file((thumb_filename_final, thumb_filename))
|
||||
if existing_thumb:
|
||||
self.to_screen('[info] %s is already present' % (
|
||||
thumb_display_id if multiple else f'{label} thumbnail').capitalize())
|
||||
self.to_screen('[info] {} is already present'.format((
|
||||
thumb_display_id if multiple else f'{label} thumbnail').capitalize()))
|
||||
t['filepath'] = existing_thumb
|
||||
ret.append((existing_thumb, thumb_filename_final))
|
||||
else:
|
||||
|
@ -14,7 +14,7 @@
|
||||
import re
|
||||
import traceback
|
||||
|
||||
from .compat import compat_os_name, compat_shlex_quote
|
||||
from .compat import compat_os_name
|
||||
from .cookies import SUPPORTED_BROWSERS, SUPPORTED_KEYRINGS
|
||||
from .downloader.external import get_external_downloader
|
||||
from .extractor import list_extractor_classes
|
||||
@ -58,6 +58,7 @@
|
||||
read_stdin,
|
||||
render_table,
|
||||
setproctitle,
|
||||
shell_quote,
|
||||
traverse_obj,
|
||||
variadic,
|
||||
write_string,
|
||||
@ -115,9 +116,9 @@ def print_extractor_information(opts, urls):
|
||||
ie.description(markdown=False, search_examples=_SEARCHES)
|
||||
for ie in list_extractor_classes(opts.age_limit) if ie.working() and ie.IE_DESC is not False)
|
||||
elif opts.ap_list_mso:
|
||||
out = 'Supported TV Providers:\n%s\n' % render_table(
|
||||
out = 'Supported TV Providers:\n{}\n'.format(render_table(
|
||||
['mso', 'mso name'],
|
||||
[[mso_id, mso_info['name']] for mso_id, mso_info in MSO_INFO.items()])
|
||||
[[mso_id, mso_info['name']] for mso_id, mso_info in MSO_INFO.items()]))
|
||||
else:
|
||||
return False
|
||||
write_string(out, out=sys.stdout)
|
||||
@ -129,7 +130,7 @@ def _unused_compat_opt(name):
|
||||
if name not in opts.compat_opts:
|
||||
return False
|
||||
opts.compat_opts.discard(name)
|
||||
opts.compat_opts.update(['*%s' % name])
|
||||
opts.compat_opts.update([f'*{name}'])
|
||||
return True
|
||||
|
||||
def set_default_compat(compat_name, opt_name, default=True, remove_compat=True):
|
||||
@ -222,7 +223,7 @@ def validate_minmax(min_val, max_val, min_name, max_name=None):
|
||||
validate_minmax(opts.sleep_interval, opts.max_sleep_interval, 'sleep interval')
|
||||
|
||||
if opts.wait_for_video is not None:
|
||||
min_wait, max_wait, *_ = map(parse_duration, opts.wait_for_video.split('-', 1) + [None])
|
||||
min_wait, max_wait, *_ = map(parse_duration, [*opts.wait_for_video.split('-', 1), None])
|
||||
validate(min_wait is not None and not (max_wait is None and '-' in opts.wait_for_video),
|
||||
'time range to wait for video', opts.wait_for_video)
|
||||
validate_minmax(min_wait, max_wait, 'time range to wait for video')
|
||||
@ -264,9 +265,9 @@ def parse_retries(name, value):
|
||||
# Retry sleep function
|
||||
def parse_sleep_func(expr):
|
||||
NUMBER_RE = r'\d+(?:\.\d+)?'
|
||||
op, start, limit, step, *_ = tuple(re.fullmatch(
|
||||
op, start, limit, step, *_ = (*tuple(re.fullmatch(
|
||||
rf'(?:(linear|exp)=)?({NUMBER_RE})(?::({NUMBER_RE})?)?(?::({NUMBER_RE}))?',
|
||||
expr.strip()).groups()) + (None, None)
|
||||
expr.strip()).groups()), None, None)
|
||||
|
||||
if op == 'exp':
|
||||
return lambda n: min(float(start) * (float(step or 2) ** n), float(limit or 'inf'))
|
||||
@ -396,13 +397,13 @@ def parse_chapters(name, value, advanced=False):
|
||||
# MetadataParser
|
||||
def metadataparser_actions(f):
|
||||
if isinstance(f, str):
|
||||
cmd = '--parse-metadata %s' % compat_shlex_quote(f)
|
||||
cmd = f'--parse-metadata {shell_quote(f)}'
|
||||
try:
|
||||
actions = [MetadataFromFieldPP.to_action(f)]
|
||||
except Exception as err:
|
||||
raise ValueError(f'{cmd} is invalid; {err}')
|
||||
else:
|
||||
cmd = '--replace-in-metadata %s' % ' '.join(map(compat_shlex_quote, f))
|
||||
cmd = f'--replace-in-metadata {shell_quote(f)}'
|
||||
actions = ((MetadataParserPP.Actions.REPLACE, x, *f[1:]) for x in f[0].split(','))
|
||||
|
||||
for action in actions:
|
||||
@ -413,7 +414,7 @@ def metadataparser_actions(f):
|
||||
yield action
|
||||
|
||||
if opts.metafromtitle is not None:
|
||||
opts.parse_metadata.setdefault('pre_process', []).append('title:%s' % opts.metafromtitle)
|
||||
opts.parse_metadata.setdefault('pre_process', []).append(f'title:{opts.metafromtitle}')
|
||||
opts.parse_metadata = {
|
||||
k: list(itertools.chain(*map(metadataparser_actions, v)))
|
||||
for k, v in opts.parse_metadata.items()
|
||||
@ -602,7 +603,7 @@ def get_postprocessors(opts):
|
||||
yield {
|
||||
'key': 'MetadataParser',
|
||||
'actions': actions,
|
||||
'when': when
|
||||
'when': when,
|
||||
}
|
||||
sponsorblock_query = opts.sponsorblock_mark | opts.sponsorblock_remove
|
||||
if sponsorblock_query:
|
||||
@ -610,19 +611,19 @@ def get_postprocessors(opts):
|
||||
'key': 'SponsorBlock',
|
||||
'categories': sponsorblock_query,
|
||||
'api': opts.sponsorblock_api,
|
||||
'when': 'after_filter'
|
||||
'when': 'after_filter',
|
||||
}
|
||||
if opts.convertsubtitles:
|
||||
yield {
|
||||
'key': 'FFmpegSubtitlesConvertor',
|
||||
'format': opts.convertsubtitles,
|
||||
'when': 'before_dl'
|
||||
'when': 'before_dl',
|
||||
}
|
||||
if opts.convertthumbnails:
|
||||
yield {
|
||||
'key': 'FFmpegThumbnailsConvertor',
|
||||
'format': opts.convertthumbnails,
|
||||
'when': 'before_dl'
|
||||
'when': 'before_dl',
|
||||
}
|
||||
if opts.extractaudio:
|
||||
yield {
|
||||
@ -647,7 +648,7 @@ def get_postprocessors(opts):
|
||||
yield {
|
||||
'key': 'FFmpegEmbedSubtitle',
|
||||
# already_have_subtitle = True prevents the file from being deleted after embedding
|
||||
'already_have_subtitle': opts.writesubtitles and keep_subs
|
||||
'already_have_subtitle': opts.writesubtitles and keep_subs,
|
||||
}
|
||||
if not opts.writeautomaticsub and keep_subs:
|
||||
opts.writesubtitles = True
|
||||
@ -660,7 +661,7 @@ def get_postprocessors(opts):
|
||||
'remove_sponsor_segments': opts.sponsorblock_remove,
|
||||
'remove_ranges': opts.remove_ranges,
|
||||
'sponsorblock_chapter_title': opts.sponsorblock_chapter_title,
|
||||
'force_keyframes': opts.force_keyframes_at_cuts
|
||||
'force_keyframes': opts.force_keyframes_at_cuts,
|
||||
}
|
||||
# FFmpegMetadataPP should be run after FFmpegVideoConvertorPP and
|
||||
# FFmpegExtractAudioPP as containers before conversion may not support
|
||||
@ -694,7 +695,7 @@ def get_postprocessors(opts):
|
||||
yield {
|
||||
'key': 'EmbedThumbnail',
|
||||
# already_have_thumbnail = True prevents the file from being deleted after embedding
|
||||
'already_have_thumbnail': opts.writethumbnail
|
||||
'already_have_thumbnail': opts.writethumbnail,
|
||||
}
|
||||
if not opts.writethumbnail:
|
||||
opts.writethumbnail = True
|
||||
@ -741,7 +742,7 @@ def parse_options(argv=None):
|
||||
print_only = bool(opts.forceprint) and all(k not in opts.forceprint for k in POSTPROCESS_WHEN[3:])
|
||||
any_getting = any(getattr(opts, k) for k in (
|
||||
'dumpjson', 'dump_single_json', 'getdescription', 'getduration', 'getfilename',
|
||||
'getformat', 'getid', 'getthumbnail', 'gettitle', 'geturl'
|
||||
'getformat', 'getid', 'getthumbnail', 'gettitle', 'geturl',
|
||||
))
|
||||
if opts.quiet is None:
|
||||
opts.quiet = any_getting or opts.print_json or bool(opts.forceprint)
|
||||
@ -1002,7 +1003,7 @@ def _real_main(argv=None):
|
||||
def make_row(target, handler):
|
||||
return [
|
||||
join_nonempty(target.client.title(), target.version, delim='-') or '-',
|
||||
join_nonempty((target.os or "").title(), target.os_version, delim='-') or '-',
|
||||
join_nonempty((target.os or '').title(), target.os_version, delim='-') or '-',
|
||||
handler,
|
||||
]
|
||||
|
||||
|
@ -68,7 +68,7 @@ def pad_block(block, padding_mode):
|
||||
raise NotImplementedError(f'Padding mode {padding_mode} is not implemented')
|
||||
|
||||
if padding_mode == 'iso7816' and padding_size:
|
||||
block = block + [0x80] # NB: += mutates list
|
||||
block = [*block, 0x80] # NB: += mutates list
|
||||
padding_size -= 1
|
||||
|
||||
return block + [PADDING_BYTE[padding_mode]] * padding_size
|
||||
@ -110,9 +110,7 @@ def aes_ecb_decrypt(data, key, iv=None):
|
||||
for i in range(block_count):
|
||||
block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]
|
||||
encrypted_data += aes_decrypt(block, expanded_key)
|
||||
encrypted_data = encrypted_data[:len(data)]
|
||||
|
||||
return encrypted_data
|
||||
return encrypted_data[:len(data)]
|
||||
|
||||
|
||||
def aes_ctr_decrypt(data, key, iv):
|
||||
@ -148,9 +146,7 @@ def aes_ctr_encrypt(data, key, iv):
|
||||
|
||||
cipher_counter_block = aes_encrypt(counter_block, expanded_key)
|
||||
encrypted_data += xor(block, cipher_counter_block)
|
||||
encrypted_data = encrypted_data[:len(data)]
|
||||
|
||||
return encrypted_data
|
||||
return encrypted_data[:len(data)]
|
||||
|
||||
|
||||
def aes_cbc_decrypt(data, key, iv):
|
||||
@ -174,9 +170,7 @@ def aes_cbc_decrypt(data, key, iv):
|
||||
decrypted_block = aes_decrypt(block, expanded_key)
|
||||
decrypted_data += xor(decrypted_block, previous_cipher_block)
|
||||
previous_cipher_block = block
|
||||
decrypted_data = decrypted_data[:len(data)]
|
||||
|
||||
return decrypted_data
|
||||
return decrypted_data[:len(data)]
|
||||
|
||||
|
||||
def aes_cbc_encrypt(data, key, iv, *, padding_mode='pkcs7'):
|
||||
@ -224,7 +218,7 @@ def aes_gcm_decrypt_and_verify(data, key, tag, nonce):
|
||||
hash_subkey = aes_encrypt([0] * BLOCK_SIZE_BYTES, key_expansion(key))
|
||||
|
||||
if len(nonce) == 12:
|
||||
j0 = nonce + [0, 0, 0, 1]
|
||||
j0 = [*nonce, 0, 0, 0, 1]
|
||||
else:
|
||||
fill = (BLOCK_SIZE_BYTES - (len(nonce) % BLOCK_SIZE_BYTES)) % BLOCK_SIZE_BYTES + 8
|
||||
ghash_in = nonce + [0] * fill + bytes_to_intlist((8 * len(nonce)).to_bytes(8, 'big'))
|
||||
@ -242,11 +236,11 @@ def aes_gcm_decrypt_and_verify(data, key, tag, nonce):
|
||||
data
|
||||
+ [0] * (BLOCK_SIZE_BYTES - len(data) + pad_len) # pad
|
||||
+ bytes_to_intlist((0 * 8).to_bytes(8, 'big') # length of associated data
|
||||
+ ((len(data) * 8).to_bytes(8, 'big'))) # length of data
|
||||
+ ((len(data) * 8).to_bytes(8, 'big'))), # length of data
|
||||
)
|
||||
|
||||
if tag != aes_ctr_encrypt(s_tag, key, j0):
|
||||
raise ValueError("Mismatching authentication tag")
|
||||
raise ValueError('Mismatching authentication tag')
|
||||
|
||||
return decrypted_data
|
||||
|
||||
@ -288,9 +282,7 @@ def aes_decrypt(data, expanded_key):
|
||||
data = list(iter_mix_columns(data, MIX_COLUMN_MATRIX_INV))
|
||||
data = shift_rows_inv(data)
|
||||
data = sub_bytes_inv(data)
|
||||
data = xor(data, expanded_key[:BLOCK_SIZE_BYTES])
|
||||
|
||||
return data
|
||||
return xor(data, expanded_key[:BLOCK_SIZE_BYTES])
|
||||
|
||||
|
||||
def aes_decrypt_text(data, password, key_size_bytes):
|
||||
@ -318,9 +310,7 @@ def aes_decrypt_text(data, password, key_size_bytes):
|
||||
cipher = data[NONCE_LENGTH_BYTES:]
|
||||
|
||||
decrypted_data = aes_ctr_decrypt(cipher, key, nonce + [0] * (BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES))
|
||||
plaintext = intlist_to_bytes(decrypted_data)
|
||||
|
||||
return plaintext
|
||||
return intlist_to_bytes(decrypted_data)
|
||||
|
||||
|
||||
RCON = (0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36)
|
||||
@ -428,9 +418,7 @@ def key_expansion(data):
|
||||
for _ in range(3 if key_size_bytes == 32 else 2 if key_size_bytes == 24 else 0):
|
||||
temp = data[-4:]
|
||||
data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
|
||||
data = data[:expanded_key_size_bytes]
|
||||
|
||||
return data
|
||||
return data[:expanded_key_size_bytes]
|
||||
|
||||
|
||||
def iter_vector(iv):
|
||||
@ -511,7 +499,7 @@ def block_product(block_x, block_y):
|
||||
# NIST SP 800-38D, Algorithm 1
|
||||
|
||||
if len(block_x) != BLOCK_SIZE_BYTES or len(block_y) != BLOCK_SIZE_BYTES:
|
||||
raise ValueError("Length of blocks need to be %d bytes" % BLOCK_SIZE_BYTES)
|
||||
raise ValueError(f'Length of blocks need to be {BLOCK_SIZE_BYTES} bytes')
|
||||
|
||||
block_r = [0xE1] + [0] * (BLOCK_SIZE_BYTES - 1)
|
||||
block_v = block_y[:]
|
||||
@ -534,7 +522,7 @@ def ghash(subkey, data):
|
||||
# NIST SP 800-38D, Algorithm 2
|
||||
|
||||
if len(data) % BLOCK_SIZE_BYTES:
|
||||
raise ValueError("Length of data should be %d bytes" % BLOCK_SIZE_BYTES)
|
||||
raise ValueError(f'Length of data should be {BLOCK_SIZE_BYTES} bytes')
|
||||
|
||||
last_y = [0] * BLOCK_SIZE_BYTES
|
||||
for i in range(0, len(data), BLOCK_SIZE_BYTES):
|
||||
|
@ -81,10 +81,10 @@ def remove(self):
|
||||
|
||||
cachedir = self._get_root_dir()
|
||||
if not any((term in cachedir) for term in ('cache', 'tmp')):
|
||||
raise Exception('Not removing directory %s - this does not look like a cache dir' % cachedir)
|
||||
raise Exception(f'Not removing directory {cachedir} - this does not look like a cache dir')
|
||||
|
||||
self._ydl.to_screen(
|
||||
'Removing cache dir %s .' % cachedir, skip_eol=True)
|
||||
f'Removing cache dir {cachedir} .', skip_eol=True)
|
||||
if os.path.exists(cachedir):
|
||||
self._ydl.to_screen('.', skip_eol=True)
|
||||
shutil.rmtree(cachedir)
|
||||
|
@ -35,7 +35,7 @@
|
||||
from ..dependencies import brotli as compat_brotli # noqa: F401
|
||||
from ..dependencies import websockets as compat_websockets # noqa: F401
|
||||
from ..dependencies.Cryptodome import AES as compat_pycrypto_AES # noqa: F401
|
||||
from ..networking.exceptions import HTTPError as compat_HTTPError # noqa: F401
|
||||
from ..networking.exceptions import HTTPError as compat_HTTPError
|
||||
|
||||
passthrough_module(__name__, '...utils', ('WINDOWS_VT_MODE', 'windows_enable_vt_mode'))
|
||||
|
||||
|
@ -7,6 +7,6 @@
|
||||
del passthrough_module
|
||||
|
||||
try:
|
||||
cache # >= 3.9
|
||||
_ = cache # >= 3.9
|
||||
except NameError:
|
||||
cache = lru_cache(maxsize=None)
|
||||
|
@ -146,7 +146,7 @@ def _extract_firefox_cookies(profile, container, logger):
|
||||
identities = json.load(containers).get('identities', [])
|
||||
container_id = next((context.get('userContextId') for context in identities if container in (
|
||||
context.get('name'),
|
||||
try_call(lambda: re.fullmatch(r'userContext([^\.]+)\.label', context['l10nID']).group())
|
||||
try_call(lambda: re.fullmatch(r'userContext([^\.]+)\.label', context['l10nID']).group()),
|
||||
)), None)
|
||||
if not isinstance(container_id, int):
|
||||
raise ValueError(f'could not find firefox container "{container}" in containers.json')
|
||||
@ -263,7 +263,7 @@ def _get_chromium_based_browser_settings(browser_name):
|
||||
return {
|
||||
'browser_dir': browser_dir,
|
||||
'keyring_name': keyring_name,
|
||||
'supports_profiles': browser_name not in browsers_without_profiles
|
||||
'supports_profiles': browser_name not in browsers_without_profiles,
|
||||
}
|
||||
|
||||
|
||||
@ -826,7 +826,7 @@ def _choose_linux_keyring(logger):
|
||||
elif desktop_environment == _LinuxDesktopEnvironment.KDE6:
|
||||
linux_keyring = _LinuxKeyring.KWALLET6
|
||||
elif desktop_environment in (
|
||||
_LinuxDesktopEnvironment.KDE3, _LinuxDesktopEnvironment.LXQT, _LinuxDesktopEnvironment.OTHER
|
||||
_LinuxDesktopEnvironment.KDE3, _LinuxDesktopEnvironment.LXQT, _LinuxDesktopEnvironment.OTHER,
|
||||
):
|
||||
linux_keyring = _LinuxKeyring.BASICTEXT
|
||||
else:
|
||||
@ -861,7 +861,7 @@ def _get_kwallet_network_wallet(keyring, logger):
|
||||
'dbus-send', '--session', '--print-reply=literal',
|
||||
f'--dest={service_name}',
|
||||
wallet_path,
|
||||
'org.kde.KWallet.networkWallet'
|
||||
'org.kde.KWallet.networkWallet',
|
||||
], text=True, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
|
||||
|
||||
if returncode:
|
||||
@ -891,7 +891,7 @@ def _get_kwallet_password(browser_keyring_name, keyring, logger):
|
||||
'kwallet-query',
|
||||
'--read-password', f'{browser_keyring_name} Safe Storage',
|
||||
'--folder', f'{browser_keyring_name} Keys',
|
||||
network_wallet
|
||||
network_wallet,
|
||||
], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
|
||||
|
||||
if returncode:
|
||||
@ -931,9 +931,8 @@ def _get_gnome_keyring_password(browser_keyring_name, logger):
|
||||
for item in col.get_all_items():
|
||||
if item.get_label() == f'{browser_keyring_name} Safe Storage':
|
||||
return item.get_secret()
|
||||
else:
|
||||
logger.error('failed to read from keyring')
|
||||
return b''
|
||||
logger.error('failed to read from keyring')
|
||||
return b''
|
||||
|
||||
|
||||
def _get_linux_keyring_password(browser_keyring_name, keyring, logger):
|
||||
@ -1053,7 +1052,7 @@ class DATA_BLOB(ctypes.Structure):
|
||||
None, # pvReserved: must be NULL
|
||||
None, # pPromptStruct: information about prompts to display
|
||||
0, # dwFlags
|
||||
ctypes.byref(blob_out) # pDataOut
|
||||
ctypes.byref(blob_out), # pDataOut
|
||||
)
|
||||
if not ret:
|
||||
logger.warning('failed to decrypt with DPAPI', only_once=True)
|
||||
@ -1129,24 +1128,24 @@ class LenientSimpleCookie(http.cookies.SimpleCookie):
|
||||
_LEGAL_VALUE_CHARS = _LEGAL_KEY_CHARS + re.escape('(),/<=>?@[]{}')
|
||||
|
||||
_RESERVED = {
|
||||
"expires",
|
||||
"path",
|
||||
"comment",
|
||||
"domain",
|
||||
"max-age",
|
||||
"secure",
|
||||
"httponly",
|
||||
"version",
|
||||
"samesite",
|
||||
'expires',
|
||||
'path',
|
||||
'comment',
|
||||
'domain',
|
||||
'max-age',
|
||||
'secure',
|
||||
'httponly',
|
||||
'version',
|
||||
'samesite',
|
||||
}
|
||||
|
||||
_FLAGS = {"secure", "httponly"}
|
||||
_FLAGS = {'secure', 'httponly'}
|
||||
|
||||
# Added 'bad' group to catch the remaining value
|
||||
_COOKIE_PATTERN = re.compile(r"""
|
||||
_COOKIE_PATTERN = re.compile(r'''
|
||||
\s* # Optional whitespace at start of cookie
|
||||
(?P<key> # Start of group 'key'
|
||||
[""" + _LEGAL_KEY_CHARS + r"""]+?# Any word of at least one letter
|
||||
[''' + _LEGAL_KEY_CHARS + r''']+?# Any word of at least one letter
|
||||
) # End of group 'key'
|
||||
( # Optional group: there may not be a value.
|
||||
\s*=\s* # Equal Sign
|
||||
@ -1156,7 +1155,7 @@ class LenientSimpleCookie(http.cookies.SimpleCookie):
|
||||
| # or
|
||||
\w{3},\s[\w\d\s-]{9,11}\s[\d:]{8}\sGMT # Special case for "expires" attr
|
||||
| # or
|
||||
[""" + _LEGAL_VALUE_CHARS + r"""]* # Any word or empty string
|
||||
[''' + _LEGAL_VALUE_CHARS + r''']* # Any word or empty string
|
||||
) # End of group 'val'
|
||||
| # or
|
||||
(?P<bad>(?:\\;|[^;])*?) # 'bad' group fallback for invalid values
|
||||
@ -1164,7 +1163,7 @@ class LenientSimpleCookie(http.cookies.SimpleCookie):
|
||||
)? # End of optional value group
|
||||
\s* # Any number of spaces.
|
||||
(\s+|;|$) # Ending either at space, semicolon, or EOS.
|
||||
""", re.ASCII | re.VERBOSE)
|
||||
''', re.ASCII | re.VERBOSE)
|
||||
|
||||
def load(self, data):
|
||||
# Workaround for https://github.com/yt-dlp/yt-dlp/issues/4776
|
||||
@ -1260,14 +1259,14 @@ def _really_save(self, f, ignore_discard, ignore_expires):
|
||||
# with no name, whereas http.cookiejar regards it as a
|
||||
# cookie with no value.
|
||||
name, value = '', name
|
||||
f.write('%s\n' % '\t'.join((
|
||||
f.write('{}\n'.format('\t'.join((
|
||||
cookie.domain,
|
||||
self._true_or_false(cookie.domain.startswith('.')),
|
||||
cookie.path,
|
||||
self._true_or_false(cookie.secure),
|
||||
str_or_none(cookie.expires, default=''),
|
||||
name, value
|
||||
)))
|
||||
name, value,
|
||||
))))
|
||||
|
||||
def save(self, filename=None, ignore_discard=True, ignore_expires=True):
|
||||
"""
|
||||
@ -1306,10 +1305,10 @@ def prepare_line(line):
|
||||
return line
|
||||
cookie_list = line.split('\t')
|
||||
if len(cookie_list) != self._ENTRY_LEN:
|
||||
raise http.cookiejar.LoadError('invalid length %d' % len(cookie_list))
|
||||
raise http.cookiejar.LoadError(f'invalid length {len(cookie_list)}')
|
||||
cookie = self._CookieFileEntry(*cookie_list)
|
||||
if cookie.expires_at and not cookie.expires_at.isdigit():
|
||||
raise http.cookiejar.LoadError('invalid expires at %s' % cookie.expires_at)
|
||||
raise http.cookiejar.LoadError(f'invalid expires at {cookie.expires_at}')
|
||||
return line
|
||||
|
||||
cf = io.StringIO()
|
||||
|
@ -404,7 +404,7 @@ def with_fields(*tups, default=''):
|
||||
|
||||
def report_resuming_byte(self, resume_len):
|
||||
"""Report attempt to resume at given byte."""
|
||||
self.to_screen('[download] Resuming download at byte %s' % resume_len)
|
||||
self.to_screen(f'[download] Resuming download at byte {resume_len}')
|
||||
|
||||
def report_retry(self, err, count, retries, frag_index=NO_DEFAULT, fatal=True):
|
||||
"""Report retry"""
|
||||
|
@ -55,7 +55,7 @@ def real_download(self, filename, info_dict):
|
||||
# correct and expected termination thus all postprocessing
|
||||
# should take place
|
||||
retval = 0
|
||||
self.to_screen('[%s] Interrupted by user' % self.get_basename())
|
||||
self.to_screen(f'[{self.get_basename()}] Interrupted by user')
|
||||
finally:
|
||||
if self._cookies_tempfile:
|
||||
self.try_remove(self._cookies_tempfile)
|
||||
@ -172,7 +172,7 @@ def _call_downloader(self, tmpfilename, info_dict):
|
||||
decrypt_fragment = self.decrypter(info_dict)
|
||||
dest, _ = self.sanitize_open(tmpfilename, 'wb')
|
||||
for frag_index, fragment in enumerate(info_dict['fragments']):
|
||||
fragment_filename = '%s-Frag%d' % (tmpfilename, frag_index)
|
||||
fragment_filename = f'{tmpfilename}-Frag{frag_index}'
|
||||
try:
|
||||
src, _ = self.sanitize_open(fragment_filename, 'rb')
|
||||
except OSError as err:
|
||||
@ -186,7 +186,7 @@ def _call_downloader(self, tmpfilename, info_dict):
|
||||
if not self.params.get('keep_fragments', False):
|
||||
self.try_remove(encodeFilename(fragment_filename))
|
||||
dest.close()
|
||||
self.try_remove(encodeFilename('%s.frag.urls' % tmpfilename))
|
||||
self.try_remove(encodeFilename(f'{tmpfilename}.frag.urls'))
|
||||
return 0
|
||||
|
||||
def _call_process(self, cmd, info_dict):
|
||||
@ -336,11 +336,11 @@ def _make_cmd(self, tmpfilename, info_dict):
|
||||
|
||||
if 'fragments' in info_dict:
|
||||
cmd += ['--uri-selector=inorder']
|
||||
url_list_file = '%s.frag.urls' % tmpfilename
|
||||
url_list_file = f'{tmpfilename}.frag.urls'
|
||||
url_list = []
|
||||
for frag_index, fragment in enumerate(info_dict['fragments']):
|
||||
fragment_filename = '%s-Frag%d' % (os.path.basename(tmpfilename), frag_index)
|
||||
url_list.append('%s\n\tout=%s' % (fragment['url'], self._aria2c_filename(fragment_filename)))
|
||||
fragment_filename = f'{os.path.basename(tmpfilename)}-Frag{frag_index}'
|
||||
url_list.append('{}\n\tout={}'.format(fragment['url'], self._aria2c_filename(fragment_filename)))
|
||||
stream, _ = self.sanitize_open(url_list_file, 'wb')
|
||||
stream.write('\n'.join(url_list).encode())
|
||||
stream.close()
|
||||
@ -357,7 +357,7 @@ def aria2c_rpc(self, rpc_port, rpc_secret, method, params=()):
|
||||
'id': sanitycheck,
|
||||
'method': method,
|
||||
'params': [f'token:{rpc_secret}', *params],
|
||||
}).encode('utf-8')
|
||||
}).encode()
|
||||
request = Request(
|
||||
f'http://localhost:{rpc_port}/jsonrpc',
|
||||
data=d, headers={
|
||||
@ -416,7 +416,7 @@ def get_stat(key, *obj, average=False):
|
||||
'total_bytes_estimate': total,
|
||||
'eta': (total - downloaded) / (speed or 1),
|
||||
'fragment_index': min(frag_count, len(completed) + 1) if fragmented else None,
|
||||
'elapsed': time.time() - started
|
||||
'elapsed': time.time() - started,
|
||||
})
|
||||
self._hook_progress(status, info_dict)
|
||||
|
||||
@ -509,12 +509,12 @@ def _call_downloader(self, tmpfilename, info_dict):
|
||||
proxy = self.params.get('proxy')
|
||||
if proxy:
|
||||
if not re.match(r'^[\da-zA-Z]+://', proxy):
|
||||
proxy = 'http://%s' % proxy
|
||||
proxy = f'http://{proxy}'
|
||||
|
||||
if proxy.startswith('socks'):
|
||||
self.report_warning(
|
||||
'%s does not support SOCKS proxies. Downloading is likely to fail. '
|
||||
'Consider adding --hls-prefer-native to your command.' % self.get_basename())
|
||||
f'{self.get_basename()} does not support SOCKS proxies. Downloading is likely to fail. '
|
||||
'Consider adding --hls-prefer-native to your command.')
|
||||
|
||||
# Since December 2015 ffmpeg supports -http_proxy option (see
|
||||
# http://git.videolan.org/?p=ffmpeg.git;a=commit;h=b4eb1f29ebddd60c41a2eb39f5af701e38e0d3fd)
|
||||
@ -575,7 +575,7 @@ def _call_downloader(self, tmpfilename, info_dict):
|
||||
if end_time:
|
||||
args += ['-t', str(end_time - start_time)]
|
||||
|
||||
args += self._configuration_args((f'_i{i + 1}', '_i')) + ['-i', fmt['url']]
|
||||
args += [*self._configuration_args((f'_i{i + 1}', '_i')), '-i', fmt['url']]
|
||||
|
||||
if not (start_time or end_time) or not self.params.get('force_keyframes_at_cuts'):
|
||||
args += ['-c', 'copy']
|
||||
|
@ -67,12 +67,12 @@ def read_asrt(self):
|
||||
self.read_bytes(3)
|
||||
quality_entry_count = self.read_unsigned_char()
|
||||
# QualityEntryCount
|
||||
for i in range(quality_entry_count):
|
||||
for _ in range(quality_entry_count):
|
||||
self.read_string()
|
||||
|
||||
segment_run_count = self.read_unsigned_int()
|
||||
segments = []
|
||||
for i in range(segment_run_count):
|
||||
for _ in range(segment_run_count):
|
||||
first_segment = self.read_unsigned_int()
|
||||
fragments_per_segment = self.read_unsigned_int()
|
||||
segments.append((first_segment, fragments_per_segment))
|
||||
@ -91,12 +91,12 @@ def read_afrt(self):
|
||||
|
||||
quality_entry_count = self.read_unsigned_char()
|
||||
# QualitySegmentUrlModifiers
|
||||
for i in range(quality_entry_count):
|
||||
for _ in range(quality_entry_count):
|
||||
self.read_string()
|
||||
|
||||
fragments_count = self.read_unsigned_int()
|
||||
fragments = []
|
||||
for i in range(fragments_count):
|
||||
for _ in range(fragments_count):
|
||||
first = self.read_unsigned_int()
|
||||
first_ts = self.read_unsigned_long_long()
|
||||
duration = self.read_unsigned_int()
|
||||
@ -135,11 +135,11 @@ def read_abst(self):
|
||||
self.read_string() # MovieIdentifier
|
||||
server_count = self.read_unsigned_char()
|
||||
# ServerEntryTable
|
||||
for i in range(server_count):
|
||||
for _ in range(server_count):
|
||||
self.read_string()
|
||||
quality_count = self.read_unsigned_char()
|
||||
# QualityEntryTable
|
||||
for i in range(quality_count):
|
||||
for _ in range(quality_count):
|
||||
self.read_string()
|
||||
# DrmData
|
||||
self.read_string()
|
||||
@ -148,14 +148,14 @@ def read_abst(self):
|
||||
|
||||
segments_count = self.read_unsigned_char()
|
||||
segments = []
|
||||
for i in range(segments_count):
|
||||
for _ in range(segments_count):
|
||||
box_size, box_type, box_data = self.read_box_info()
|
||||
assert box_type == b'asrt'
|
||||
segment = FlvReader(box_data).read_asrt()
|
||||
segments.append(segment)
|
||||
fragments_run_count = self.read_unsigned_char()
|
||||
fragments = []
|
||||
for i in range(fragments_run_count):
|
||||
for _ in range(fragments_run_count):
|
||||
box_size, box_type, box_data = self.read_box_info()
|
||||
assert box_type == b'afrt'
|
||||
fragments.append(FlvReader(box_data).read_afrt())
|
||||
@ -309,7 +309,7 @@ def _parse_bootstrap_node(self, node, base_url):
|
||||
def real_download(self, filename, info_dict):
|
||||
man_url = info_dict['url']
|
||||
requested_bitrate = info_dict.get('tbr')
|
||||
self.to_screen('[%s] Downloading f4m manifest' % self.FD_NAME)
|
||||
self.to_screen(f'[{self.FD_NAME}] Downloading f4m manifest')
|
||||
|
||||
urlh = self.ydl.urlopen(self._prepare_url(info_dict, man_url))
|
||||
man_url = urlh.url
|
||||
@ -326,8 +326,8 @@ def real_download(self, filename, info_dict):
|
||||
formats = sorted(formats, key=lambda f: f[0])
|
||||
rate, media = formats[-1]
|
||||
else:
|
||||
rate, media = list(filter(
|
||||
lambda f: int(f[0]) == requested_bitrate, formats))[0]
|
||||
rate, media = next(filter(
|
||||
lambda f: int(f[0]) == requested_bitrate, formats))
|
||||
|
||||
# Prefer baseURL for relative URLs as per 11.2 of F4M 3.0 spec.
|
||||
man_base_url = get_base_url(doc) or man_url
|
||||
|
@ -199,7 +199,7 @@ def _prepare_frag_download(self, ctx):
|
||||
'.ytdl file is corrupt' if is_corrupt else
|
||||
'Inconsistent state of incomplete fragment download')
|
||||
self.report_warning(
|
||||
'%s. Restarting from the beginning ...' % message)
|
||||
f'{message}. Restarting from the beginning ...')
|
||||
ctx['fragment_index'] = resume_len = 0
|
||||
if 'ytdl_corrupt' in ctx:
|
||||
del ctx['ytdl_corrupt']
|
||||
@ -366,10 +366,10 @@ def decrypt_fragment(fragment, frag_content):
|
||||
return decrypt_fragment
|
||||
|
||||
def download_and_append_fragments_multiple(self, *args, **kwargs):
|
||||
'''
|
||||
"""
|
||||
@params (ctx1, fragments1, info_dict1), (ctx2, fragments2, info_dict2), ...
|
||||
all args must be either tuple or list
|
||||
'''
|
||||
"""
|
||||
interrupt_trigger = [True]
|
||||
max_progress = len(args)
|
||||
if max_progress == 1:
|
||||
@ -424,7 +424,7 @@ def interrupt_trigger_iter(fg):
|
||||
finally:
|
||||
tpe.shutdown(wait=True)
|
||||
if not interrupt_trigger[0] and not is_live:
|
||||
raise KeyboardInterrupt()
|
||||
raise KeyboardInterrupt
|
||||
# we expect the user wants to stop and DO WANT the preceding postprocessors to run;
|
||||
# so returning a intermediate result here instead of KeyboardInterrupt on live
|
||||
return result
|
||||
|
@ -72,7 +72,7 @@ def check_results():
|
||||
|
||||
def real_download(self, filename, info_dict):
|
||||
man_url = info_dict['url']
|
||||
self.to_screen('[%s] Downloading m3u8 manifest' % self.FD_NAME)
|
||||
self.to_screen(f'[{self.FD_NAME}] Downloading m3u8 manifest')
|
||||
|
||||
urlh = self.ydl.urlopen(self._prepare_url(info_dict, man_url))
|
||||
man_url = urlh.url
|
||||
@ -228,7 +228,7 @@ def is_ad_fragment_end(s):
|
||||
'url': frag_url,
|
||||
'decrypt_info': decrypt_info,
|
||||
'byte_range': byte_range,
|
||||
'media_sequence': media_sequence
|
||||
'media_sequence': media_sequence,
|
||||
})
|
||||
media_sequence += 1
|
||||
|
||||
@ -350,9 +350,8 @@ def pack_fragment(frag_content, frag_index):
|
||||
# XXX: this should probably be silent as well
|
||||
# or verify that all segments contain the same data
|
||||
self.report_warning(bug_reports_message(
|
||||
'Discarding a %s block found in the middle of the stream; '
|
||||
'if the subtitles display incorrectly,'
|
||||
% (type(block).__name__)))
|
||||
f'Discarding a {type(block).__name__} block found in the middle of the stream; '
|
||||
'if the subtitles display incorrectly,'))
|
||||
continue
|
||||
block.write_into(output)
|
||||
|
||||
|
@ -176,7 +176,7 @@ def establish_connection():
|
||||
'downloaded_bytes': ctx.resume_len,
|
||||
'total_bytes': ctx.resume_len,
|
||||
}, info_dict)
|
||||
raise SucceedDownload()
|
||||
raise SucceedDownload
|
||||
else:
|
||||
# The length does not match, we start the download over
|
||||
self.report_unable_to_resume()
|
||||
@ -194,7 +194,7 @@ def establish_connection():
|
||||
|
||||
def close_stream():
|
||||
if ctx.stream is not None:
|
||||
if not ctx.tmpfilename == '-':
|
||||
if ctx.tmpfilename != '-':
|
||||
ctx.stream.close()
|
||||
ctx.stream = None
|
||||
|
||||
@ -268,20 +268,20 @@ def retry(e):
|
||||
ctx.filename = self.undo_temp_name(ctx.tmpfilename)
|
||||
self.report_destination(ctx.filename)
|
||||
except OSError as err:
|
||||
self.report_error('unable to open for writing: %s' % str(err))
|
||||
self.report_error(f'unable to open for writing: {err}')
|
||||
return False
|
||||
|
||||
if self.params.get('xattr_set_filesize', False) and data_len is not None:
|
||||
try:
|
||||
write_xattr(ctx.tmpfilename, 'user.ytdl.filesize', str(data_len).encode())
|
||||
except (XAttrUnavailableError, XAttrMetadataError) as err:
|
||||
self.report_error('unable to set filesize xattr: %s' % str(err))
|
||||
self.report_error(f'unable to set filesize xattr: {err}')
|
||||
|
||||
try:
|
||||
ctx.stream.write(data_block)
|
||||
except OSError as err:
|
||||
self.to_stderr('\n')
|
||||
self.report_error('unable to write data: %s' % str(err))
|
||||
self.report_error(f'unable to write data: {err}')
|
||||
return False
|
||||
|
||||
# Apply rate limit
|
||||
@ -327,7 +327,7 @@ def retry(e):
|
||||
elif now - ctx.throttle_start > 3:
|
||||
if ctx.stream is not None and ctx.tmpfilename != '-':
|
||||
ctx.stream.close()
|
||||
raise ThrottledDownload()
|
||||
raise ThrottledDownload
|
||||
elif speed:
|
||||
ctx.throttle_start = None
|
||||
|
||||
@ -338,7 +338,7 @@ def retry(e):
|
||||
|
||||
if not is_test and ctx.chunk_size and ctx.content_len is not None and byte_counter < ctx.content_len:
|
||||
ctx.resume_len = byte_counter
|
||||
raise NextFragment()
|
||||
raise NextFragment
|
||||
|
||||
if ctx.tmpfilename != '-':
|
||||
ctx.stream.close()
|
||||
|
@ -251,7 +251,7 @@ def real_download(self, filename, info_dict):
|
||||
skip_unavailable_fragments = self.params.get('skip_unavailable_fragments', True)
|
||||
|
||||
frag_index = 0
|
||||
for i, segment in enumerate(segments):
|
||||
for segment in segments:
|
||||
frag_index += 1
|
||||
if frag_index <= ctx['fragment_index']:
|
||||
continue
|
||||
|
@ -10,7 +10,7 @@
|
||||
|
||||
|
||||
class MhtmlFD(FragmentFD):
|
||||
_STYLESHEET = """\
|
||||
_STYLESHEET = '''\
|
||||
html, body {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
@ -45,7 +45,7 @@ class MhtmlFD(FragmentFD):
|
||||
max-width: 100%;
|
||||
max-height: calc(100vh - 5em);
|
||||
}
|
||||
"""
|
||||
'''
|
||||
_STYLESHEET = re.sub(r'\s+', ' ', _STYLESHEET)
|
||||
_STYLESHEET = re.sub(r'\B \B|(?<=[\w\-]) (?=[^\w\-])|(?<=[^\w\-]) (?=[\w\-])', '', _STYLESHEET)
|
||||
|
||||
@ -57,24 +57,19 @@ def _escape_mime(s):
|
||||
)).decode('us-ascii') + '?='
|
||||
|
||||
def _gen_cid(self, i, fragment, frag_boundary):
|
||||
return '%u.%s@yt-dlp.github.io.invalid' % (i, frag_boundary)
|
||||
return f'{i}.{frag_boundary}@yt-dlp.github.io.invalid'
|
||||
|
||||
def _gen_stub(self, *, fragments, frag_boundary, title):
|
||||
output = io.StringIO()
|
||||
|
||||
output.write((
|
||||
output.write(
|
||||
'<!DOCTYPE html>'
|
||||
'<html>'
|
||||
'<head>'
|
||||
'' '<meta name="generator" content="yt-dlp {version}">'
|
||||
'' '<title>{title}</title>'
|
||||
'' '<style>{styles}</style>'
|
||||
'<body>'
|
||||
).format(
|
||||
version=escapeHTML(YT_DLP_VERSION),
|
||||
styles=self._STYLESHEET,
|
||||
title=escapeHTML(title)
|
||||
))
|
||||
f'<meta name="generator" content="yt-dlp {escapeHTML(YT_DLP_VERSION)}">'
|
||||
f'<title>{escapeHTML(title)}</title>'
|
||||
f'<style>{self._STYLESHEET}</style>'
|
||||
'<body>')
|
||||
|
||||
t0 = 0
|
||||
for i, frag in enumerate(fragments):
|
||||
@ -87,15 +82,12 @@ def _gen_stub(self, *, fragments, frag_boundary, title):
|
||||
num=i + 1,
|
||||
t0=srt_subtitles_timecode(t0),
|
||||
t1=srt_subtitles_timecode(t1),
|
||||
duration=formatSeconds(frag['duration'], msec=True)
|
||||
duration=formatSeconds(frag['duration'], msec=True),
|
||||
))
|
||||
except (KeyError, ValueError, TypeError):
|
||||
t1 = None
|
||||
output.write((
|
||||
'<figcaption>Slide #{num}</figcaption>'
|
||||
).format(num=i + 1))
|
||||
output.write('<img src="cid:{cid}">'.format(
|
||||
cid=self._gen_cid(i, frag, frag_boundary)))
|
||||
output.write(f'<figcaption>Slide #{i + 1}</figcaption>')
|
||||
output.write(f'<img src="cid:{self._gen_cid(i, frag, frag_boundary)}">')
|
||||
output.write('</figure>')
|
||||
t0 = t1
|
||||
|
||||
@ -126,31 +118,24 @@ def real_download(self, filename, info_dict):
|
||||
stub = self._gen_stub(
|
||||
fragments=fragments,
|
||||
frag_boundary=frag_boundary,
|
||||
title=title
|
||||
title=title,
|
||||
)
|
||||
|
||||
ctx['dest_stream'].write((
|
||||
'MIME-Version: 1.0\r\n'
|
||||
'From: <nowhere@yt-dlp.github.io.invalid>\r\n'
|
||||
'To: <nowhere@yt-dlp.github.io.invalid>\r\n'
|
||||
'Subject: {title}\r\n'
|
||||
f'Subject: {self._escape_mime(title)}\r\n'
|
||||
'Content-type: multipart/related; '
|
||||
'' 'boundary="{boundary}"; '
|
||||
'' 'type="text/html"\r\n'
|
||||
'X.yt-dlp.Origin: {origin}\r\n'
|
||||
f'boundary="{frag_boundary}"; '
|
||||
'type="text/html"\r\n'
|
||||
f'X.yt-dlp.Origin: {origin}\r\n'
|
||||
'\r\n'
|
||||
'--{boundary}\r\n'
|
||||
f'--{frag_boundary}\r\n'
|
||||
'Content-Type: text/html; charset=utf-8\r\n'
|
||||
'Content-Length: {length}\r\n'
|
||||
f'Content-Length: {len(stub)}\r\n'
|
||||
'\r\n'
|
||||
'{stub}\r\n'
|
||||
).format(
|
||||
origin=origin,
|
||||
boundary=frag_boundary,
|
||||
length=len(stub),
|
||||
title=self._escape_mime(title),
|
||||
stub=stub
|
||||
).encode())
|
||||
f'{stub}\r\n').encode())
|
||||
extra_state['header_written'] = True
|
||||
|
||||
for i, fragment in enumerate(fragments):
|
||||
|
@ -15,7 +15,7 @@ class NiconicoDmcFD(FileDownloader):
|
||||
def real_download(self, filename, info_dict):
|
||||
from ..extractor.niconico import NiconicoIE
|
||||
|
||||
self.to_screen('[%s] Downloading from DMC' % self.FD_NAME)
|
||||
self.to_screen(f'[{self.FD_NAME}] Downloading from DMC')
|
||||
ie = NiconicoIE(self.ydl)
|
||||
info_dict, heartbeat_info_dict = ie._get_heartbeat_info(info_dict)
|
||||
|
||||
@ -34,7 +34,7 @@ def heartbeat():
|
||||
try:
|
||||
self.ydl.urlopen(request).read()
|
||||
except Exception:
|
||||
self.to_screen('[%s] Heartbeat failed' % self.FD_NAME)
|
||||
self.to_screen(f'[{self.FD_NAME}] Heartbeat failed')
|
||||
|
||||
with heartbeat_lock:
|
||||
if not download_complete:
|
||||
@ -85,14 +85,14 @@ def communicate_ws(reconnect):
|
||||
'quality': live_quality,
|
||||
'protocol': 'hls+fmp4',
|
||||
'latency': live_latency,
|
||||
'chasePlay': False
|
||||
'chasePlay': False,
|
||||
},
|
||||
'room': {
|
||||
'protocol': 'webSocket',
|
||||
'commentable': True
|
||||
'commentable': True,
|
||||
},
|
||||
'reconnect': True,
|
||||
}
|
||||
},
|
||||
}))
|
||||
else:
|
||||
ws = ws_extractor
|
||||
@ -118,7 +118,7 @@ def communicate_ws(reconnect):
|
||||
elif self.ydl.params.get('verbose', False):
|
||||
if len(recv) > 100:
|
||||
recv = recv[:100] + '...'
|
||||
self.to_screen('[debug] Server said: %s' % recv)
|
||||
self.to_screen(f'[debug] Server said: {recv}')
|
||||
|
||||
def ws_main():
|
||||
reconnect = False
|
||||
@ -128,7 +128,7 @@ def ws_main():
|
||||
if ret is True:
|
||||
return
|
||||
except BaseException as e:
|
||||
self.to_screen('[%s] %s: Connection error occured, reconnecting after 10 seconds: %s' % ('niconico:live', video_id, str_or_none(e)))
|
||||
self.to_screen('[{}] {}: Connection error occured, reconnecting after 10 seconds: {}'.format('niconico:live', video_id, str_or_none(e)))
|
||||
time.sleep(10)
|
||||
continue
|
||||
finally:
|
||||
|
@ -180,9 +180,9 @@ def run_rtmpdump(args):
|
||||
|
||||
while retval in (RD_INCOMPLETE, RD_FAILED) and not test and not live:
|
||||
prevsize = os.path.getsize(encodeFilename(tmpfilename))
|
||||
self.to_screen('[rtmpdump] Downloaded %s bytes' % prevsize)
|
||||
self.to_screen(f'[rtmpdump] Downloaded {prevsize} bytes')
|
||||
time.sleep(5.0) # This seems to be needed
|
||||
args = basic_args + ['--resume']
|
||||
args = [*basic_args, '--resume']
|
||||
if retval == RD_FAILED:
|
||||
args += ['--skip', '1']
|
||||
args = [encodeArgument(a) for a in args]
|
||||
@ -197,7 +197,7 @@ def run_rtmpdump(args):
|
||||
break
|
||||
if retval == RD_SUCCESS or (test and retval == RD_INCOMPLETE):
|
||||
fsize = os.path.getsize(encodeFilename(tmpfilename))
|
||||
self.to_screen('[rtmpdump] Downloaded %s bytes' % fsize)
|
||||
self.to_screen(f'[rtmpdump] Downloaded {fsize} bytes')
|
||||
self.try_rename(tmpfilename, filename)
|
||||
self._hook_progress({
|
||||
'downloaded_bytes': fsize,
|
||||
|
@ -18,7 +18,7 @@ class YoutubeLiveChatFD(FragmentFD):
|
||||
|
||||
def real_download(self, filename, info_dict):
|
||||
video_id = info_dict['video_id']
|
||||
self.to_screen('[%s] Downloading live chat' % self.FD_NAME)
|
||||
self.to_screen(f'[{self.FD_NAME}] Downloading live chat')
|
||||
if not self.params.get('skip_download') and info_dict['protocol'] == 'youtube_live_chat':
|
||||
self.report_warning('Live chat download runs until the livestream ends. '
|
||||
'If you wish to download the video simultaneously, run a separate yt-dlp instance')
|
||||
|
@ -4,7 +4,6 @@
|
||||
import time
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_str
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
dict_get,
|
||||
@ -67,7 +66,7 @@ class ABCIE(InfoExtractor):
|
||||
'ext': 'mp4',
|
||||
'title': 'WWI Centenary',
|
||||
'description': 'md5:c2379ec0ca84072e86b446e536954546',
|
||||
}
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.abc.net.au/news/programs/the-world/2020-06-10/black-lives-matter-protests-spawn-support-for/12342074',
|
||||
'info_dict': {
|
||||
@ -75,7 +74,7 @@ class ABCIE(InfoExtractor):
|
||||
'ext': 'mp4',
|
||||
'title': 'Black Lives Matter protests spawn support for Papuans in Indonesia',
|
||||
'description': 'md5:2961a17dc53abc558589ccd0fb8edd6f',
|
||||
}
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.abc.net.au/btn/newsbreak/btn-newsbreak-20200814/12560476',
|
||||
'info_dict': {
|
||||
@ -86,7 +85,7 @@ class ABCIE(InfoExtractor):
|
||||
'upload_date': '20200813',
|
||||
'uploader': 'Behind the News',
|
||||
'uploader_id': 'behindthenews',
|
||||
}
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.abc.net.au/news/2023-06-25/wagner-boss-orders-troops-back-to-bases-to-avoid-bloodshed/102520540',
|
||||
'info_dict': {
|
||||
@ -95,7 +94,7 @@ class ABCIE(InfoExtractor):
|
||||
'ext': 'mp4',
|
||||
'description': 'Wagner troops leave Rostov-on-Don and\xa0Yevgeny Prigozhin will move to Belarus under a deal brokered by Belarusian President Alexander Lukashenko to end the mutiny.',
|
||||
'thumbnail': 'https://live-production.wcms.abc-cdn.net.au/0c170f5b57f0105c432f366c0e8e267b?impolicy=wcms_crop_resize&cropH=2813&cropW=5000&xPos=0&yPos=249&width=862&height=485',
|
||||
}
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
@ -126,7 +125,7 @@ def _real_extract(self, url):
|
||||
if mobj is None:
|
||||
expired = self._html_search_regex(r'(?s)class="expired-(?:video|audio)".+?<span>(.+?)</span>', webpage, 'expired', None)
|
||||
if expired:
|
||||
raise ExtractorError('%s said: %s' % (self.IE_NAME, expired), expected=True)
|
||||
raise ExtractorError(f'{self.IE_NAME} said: {expired}', expected=True)
|
||||
raise ExtractorError('Unable to extract video urls')
|
||||
|
||||
urls_info = self._parse_json(
|
||||
@ -164,7 +163,7 @@ def _real_extract(self, url):
|
||||
'height': height,
|
||||
'tbr': bitrate,
|
||||
'filesize': int_or_none(url_info.get('filesize')),
|
||||
'format_id': format_id
|
||||
'format_id': format_id,
|
||||
})
|
||||
|
||||
return {
|
||||
@ -288,13 +287,12 @@ def _real_extract(self, url):
|
||||
stream = next(s for s in video_params['playlist'] if s.get('type') in ('program', 'livestream'))
|
||||
|
||||
house_number = video_params.get('episodeHouseNumber') or video_id
|
||||
path = '/auth/hls/sign?ts={0}&hn={1}&d=android-tablet'.format(
|
||||
int(time.time()), house_number)
|
||||
path = f'/auth/hls/sign?ts={int(time.time())}&hn={house_number}&d=android-tablet'
|
||||
sig = hmac.new(
|
||||
b'android.content.res.Resources',
|
||||
path.encode('utf-8'), hashlib.sha256).hexdigest()
|
||||
path.encode(), hashlib.sha256).hexdigest()
|
||||
token = self._download_webpage(
|
||||
'http://iview.abc.net.au{0}&sig={1}'.format(path, sig), video_id)
|
||||
f'http://iview.abc.net.au{path}&sig={sig}', video_id)
|
||||
|
||||
def tokenize_url(url, token):
|
||||
return update_url_query(url, {
|
||||
@ -303,7 +301,7 @@ def tokenize_url(url, token):
|
||||
|
||||
for sd in ('1080', '720', 'sd', 'sd-low'):
|
||||
sd_url = try_get(
|
||||
stream, lambda x: x['streams']['hls'][sd], compat_str)
|
||||
stream, lambda x: x['streams']['hls'][sd], str)
|
||||
if not sd_url:
|
||||
continue
|
||||
formats = self._extract_m3u8_formats(
|
||||
@ -358,7 +356,7 @@ class ABCIViewShowSeriesIE(InfoExtractor):
|
||||
'description': 'md5:93119346c24a7c322d446d8eece430ff',
|
||||
'series': 'Upper Middle Bogan',
|
||||
'season': 'Series 1',
|
||||
'thumbnail': r're:^https?://cdn\.iview\.abc\.net\.au/thumbs/.*\.jpg$'
|
||||
'thumbnail': r're:^https?://cdn\.iview\.abc\.net\.au/thumbs/.*\.jpg$',
|
||||
},
|
||||
'playlist_count': 8,
|
||||
}, {
|
||||
@ -386,7 +384,7 @@ class ABCIViewShowSeriesIE(InfoExtractor):
|
||||
'description': 'Satirist Mark Humphries brings his unique perspective on current political events for 7.30.',
|
||||
'series': '7.30 Mark Humphries Satire',
|
||||
'season': 'Episodes',
|
||||
'thumbnail': r're:^https?://cdn\.iview\.abc\.net\.au/thumbs/.*\.jpg$'
|
||||
'thumbnail': r're:^https?://cdn\.iview\.abc\.net\.au/thumbs/.*\.jpg$',
|
||||
},
|
||||
'playlist_count': 15,
|
||||
}]
|
||||
@ -398,7 +396,7 @@ def _real_extract(self, url):
|
||||
r'window\.__INITIAL_STATE__\s*=\s*[\'"](.+?)[\'"]\s*;',
|
||||
webpage, 'initial state')
|
||||
video_data = self._parse_json(
|
||||
unescapeHTML(webpage_data).encode('utf-8').decode('unicode_escape'), show_id)
|
||||
unescapeHTML(webpage_data).encode().decode('unicode_escape'), show_id)
|
||||
video_data = video_data['route']['pageData']['_embedded']
|
||||
|
||||
highlight = try_get(video_data, lambda x: x['highlightVideo']['shareUrl'])
|
||||
|
@ -58,7 +58,7 @@ def _real_extract(self, url):
|
||||
display_id = mobj.group('display_id')
|
||||
video_id = mobj.group('id')
|
||||
info_dict = self._extract_feed_info(
|
||||
'http://abcnews.go.com/video/itemfeed?id=%s' % video_id)
|
||||
f'http://abcnews.go.com/video/itemfeed?id={video_id}')
|
||||
info_dict.update({
|
||||
'id': video_id,
|
||||
'display_id': display_id,
|
||||
|
@ -1,5 +1,4 @@
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_str
|
||||
from ..utils import (
|
||||
dict_get,
|
||||
int_or_none,
|
||||
@ -57,11 +56,11 @@ def _real_extract(self, url):
|
||||
data = self._download_json(
|
||||
'https://api.abcotvs.com/v2/content', display_id, query={
|
||||
'id': video_id,
|
||||
'key': 'otv.web.%s.story' % station,
|
||||
'key': f'otv.web.{station}.story',
|
||||
'station': station,
|
||||
})['data']
|
||||
video = try_get(data, lambda x: x['featuredMedia']['video'], dict) or data
|
||||
video_id = compat_str(dict_get(video, ('id', 'publishedKey'), video_id))
|
||||
video_id = str(dict_get(video, ('id', 'publishedKey'), video_id))
|
||||
title = video.get('title') or video['linkText']
|
||||
|
||||
formats = []
|
||||
|
@ -66,8 +66,8 @@ def _get_videokey_from_ticket(self, ticket):
|
||||
query={'t': media_token},
|
||||
data=json.dumps({
|
||||
'kv': 'a',
|
||||
'lt': ticket
|
||||
}).encode('utf-8'),
|
||||
'lt': ticket,
|
||||
}).encode(),
|
||||
headers={
|
||||
'Content-Type': 'application/json',
|
||||
})
|
||||
@ -77,7 +77,7 @@ def _get_videokey_from_ticket(self, ticket):
|
||||
|
||||
h = hmac.new(
|
||||
binascii.unhexlify(self.HKEY),
|
||||
(license_response['cid'] + self.ie._DEVICE_ID).encode('utf-8'),
|
||||
(license_response['cid'] + self.ie._DEVICE_ID).encode(),
|
||||
digestmod=hashlib.sha256)
|
||||
enckey = bytes_to_intlist(h.digest())
|
||||
|
||||
@ -103,11 +103,11 @@ class AbemaTVBaseIE(InfoExtractor):
|
||||
|
||||
@classmethod
|
||||
def _generate_aks(cls, deviceid):
|
||||
deviceid = deviceid.encode('utf-8')
|
||||
deviceid = deviceid.encode()
|
||||
# add 1 hour and then drop minute and secs
|
||||
ts_1hour = int((time_seconds() // 3600 + 1) * 3600)
|
||||
time_struct = time.gmtime(ts_1hour)
|
||||
ts_1hour_str = str(ts_1hour).encode('utf-8')
|
||||
ts_1hour_str = str(ts_1hour).encode()
|
||||
|
||||
tmp = None
|
||||
|
||||
@ -119,7 +119,7 @@ def mix_once(nonce):
|
||||
|
||||
def mix_tmp(count):
|
||||
nonlocal tmp
|
||||
for i in range(count):
|
||||
for _ in range(count):
|
||||
mix_once(tmp)
|
||||
|
||||
def mix_twist(nonce):
|
||||
@ -160,7 +160,7 @@ def _get_device_token(self):
|
||||
data=json.dumps({
|
||||
'deviceId': self._DEVICE_ID,
|
||||
'applicationKeySecret': aks,
|
||||
}).encode('utf-8'),
|
||||
}).encode(),
|
||||
headers={
|
||||
'Content-Type': 'application/json',
|
||||
})
|
||||
@ -180,7 +180,7 @@ def _get_media_token(self, invalidate=False, to_show=True):
|
||||
'osLang': 'ja_JP',
|
||||
'osTimezone': 'Asia/Tokyo',
|
||||
'appId': 'tv.abema',
|
||||
'appVersion': '3.27.1'
|
||||
'appVersion': '3.27.1',
|
||||
}, headers={
|
||||
'Authorization': f'bearer {self._get_device_token()}',
|
||||
})['token']
|
||||
@ -202,8 +202,8 @@ def _perform_login(self, username, password):
|
||||
f'https://api.abema.io/v1/auth/{ep}', None, note='Logging in',
|
||||
data=json.dumps({
|
||||
method: username,
|
||||
'password': password
|
||||
}).encode('utf-8'), headers={
|
||||
'password': password,
|
||||
}).encode(), headers={
|
||||
'Authorization': f'bearer {self._get_device_token()}',
|
||||
'Origin': 'https://abema.tv',
|
||||
'Referer': 'https://abema.tv/',
|
||||
@ -344,7 +344,7 @@ def _real_extract(self, url):
|
||||
|
||||
description = self._html_search_regex(
|
||||
(r'<p\s+class="com-video-EpisodeDetailsBlock__content"><span\s+class=".+?">(.+?)</span></p><div',
|
||||
r'<span\s+class=".+?SlotSummary.+?">(.+?)</span></div><div',),
|
||||
r'<span\s+class=".+?SlotSummary.+?">(.+?)</span></div><div'),
|
||||
webpage, 'description', default=None, group=1)
|
||||
if not description:
|
||||
og_desc = self._html_search_meta(
|
||||
|
@ -67,7 +67,7 @@ class ACastIE(ACastBaseIE):
|
||||
'display_id': '2.raggarmordet-rosterurdetforflutna',
|
||||
'season_number': 4,
|
||||
'season': 'Season 4',
|
||||
}
|
||||
},
|
||||
}, {
|
||||
'url': 'http://embed.acast.com/adambuxton/ep.12-adam-joeschristmaspodcast2015',
|
||||
'only_matching': True,
|
||||
@ -93,13 +93,13 @@ class ACastIE(ACastBaseIE):
|
||||
'series': 'Democracy Sausage with Mark Kenny',
|
||||
'timestamp': 1684826362,
|
||||
'description': 'md5:feabe1fc5004c78ee59c84a46bf4ba16',
|
||||
}
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
channel, display_id = self._match_valid_url(url).groups()
|
||||
episode = self._call_api(
|
||||
'%s/episodes/%s' % (channel, display_id),
|
||||
f'{channel}/episodes/{display_id}',
|
||||
display_id, {'showInfo': 'true'})
|
||||
return self._extract_episode(
|
||||
episode, self._extract_show_info(episode.get('show') or {}))
|
||||
@ -130,7 +130,7 @@ class ACastChannelIE(ACastBaseIE):
|
||||
|
||||
@classmethod
|
||||
def suitable(cls, url):
|
||||
return False if ACastIE.suitable(url) else super(ACastChannelIE, cls).suitable(url)
|
||||
return False if ACastIE.suitable(url) else super().suitable(url)
|
||||
|
||||
def _real_extract(self, url):
|
||||
show_slug = self._match_id(url)
|
||||
|
@ -25,7 +25,7 @@ def _extract_metadata(self, video_id, video_info):
|
||||
'width': int_or_none(video.get('width')),
|
||||
'height': int_or_none(video.get('height')),
|
||||
'tbr': float_or_none(video.get('avgBitrate')),
|
||||
**parse_codecs(video.get('codecs', ''))
|
||||
**parse_codecs(video.get('codecs', '')),
|
||||
})
|
||||
|
||||
return {
|
||||
@ -77,7 +77,7 @@ class AcFunVideoIE(AcFunVideoBaseIE):
|
||||
'comment_count': int,
|
||||
'thumbnail': r're:^https?://.*\.(jpg|jpeg)',
|
||||
'description': 'md5:67583aaf3a0f933bd606bc8a2d3ebb17',
|
||||
}
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
|
@ -7,7 +7,6 @@
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..aes import aes_cbc_decrypt_bytes, unpad_pkcs7
|
||||
from ..compat import compat_b64decode
|
||||
from ..networking.exceptions import HTTPError
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
@ -111,9 +110,9 @@ def _get_subtitles(self, sub_url, video_id):
|
||||
|
||||
# http://animationdigitalnetwork.fr/components/com_vodvideo/videojs/adn-vjs.min.js
|
||||
dec_subtitles = unpad_pkcs7(aes_cbc_decrypt_bytes(
|
||||
compat_b64decode(enc_subtitles[24:]),
|
||||
base64.b64decode(enc_subtitles[24:]),
|
||||
binascii.unhexlify(self._K + '7fac1178830cfe0c'),
|
||||
compat_b64decode(enc_subtitles[:24])))
|
||||
base64.b64decode(enc_subtitles[:24])))
|
||||
subtitles_json = self._parse_json(dec_subtitles.decode(), None, fatal=False)
|
||||
if not subtitles_json:
|
||||
return None
|
||||
@ -136,7 +135,7 @@ def _get_subtitles(self, sub_url, video_id):
|
||||
if start is None or end is None or text is None:
|
||||
continue
|
||||
alignment = self._POS_ALIGN_MAP.get(position_align, 2) + self._LINE_ALIGN_MAP.get(line_align, 0)
|
||||
ssa += os.linesep + 'Dialogue: Marked=0,%s,%s,Default,,0,0,0,,%s%s' % (
|
||||
ssa += os.linesep + 'Dialogue: Marked=0,{},{},Default,,0,0,0,,{}{}'.format(
|
||||
ass_subtitles_timecode(start),
|
||||
ass_subtitles_timecode(end),
|
||||
'{\\a%d}' % alignment if alignment != 2 else '',
|
||||
@ -178,7 +177,7 @@ def _perform_login(self, username, password):
|
||||
|
||||
def _real_extract(self, url):
|
||||
lang, video_id = self._match_valid_url(url).group('lang', 'id')
|
||||
video_base_url = self._PLAYER_BASE_URL + 'video/%s/' % video_id
|
||||
video_base_url = self._PLAYER_BASE_URL + f'video/{video_id}/'
|
||||
player = self._download_json(
|
||||
video_base_url + 'configuration', video_id,
|
||||
'Downloading player config JSON metadata',
|
||||
@ -219,12 +218,12 @@ def _real_extract(self, url):
|
||||
links_url, video_id, 'Downloading links JSON metadata', headers={
|
||||
'X-Player-Token': authorization,
|
||||
'X-Target-Distribution': lang,
|
||||
**self._HEADERS
|
||||
**self._HEADERS,
|
||||
}, query={
|
||||
'freeWithAds': 'true',
|
||||
'adaptive': 'false',
|
||||
'withMetadata': 'true',
|
||||
'source': 'Web'
|
||||
'source': 'Web',
|
||||
})
|
||||
break
|
||||
except ExtractorError as e:
|
||||
@ -256,7 +255,7 @@ def _real_extract(self, url):
|
||||
for quality, load_balancer_url in qualities.items():
|
||||
load_balancer_data = self._download_json(
|
||||
load_balancer_url, video_id,
|
||||
'Downloading %s %s JSON metadata' % (format_id, quality),
|
||||
f'Downloading {format_id} {quality} JSON metadata',
|
||||
fatal=False) or {}
|
||||
m3u8_url = load_balancer_data.get('location')
|
||||
if not m3u8_url:
|
||||
@ -276,7 +275,7 @@ def _real_extract(self, url):
|
||||
self.raise_login_required('This video requires a subscription', method='password')
|
||||
|
||||
video = (self._download_json(
|
||||
self._API_BASE_URL + 'video/%s' % video_id, video_id,
|
||||
self._API_BASE_URL + f'video/{video_id}', video_id,
|
||||
'Downloading additional video metadata', fatal=False) or {}).get('video') or {}
|
||||
show = video.get('show') or {}
|
||||
|
||||
@ -320,7 +319,7 @@ def _real_extract(self, url):
|
||||
f'{self._API_BASE_URL}video/show/{show_id}', video_show_slug,
|
||||
'Downloading episode list', headers={
|
||||
'X-Target-Distribution': lang,
|
||||
**self._HEADERS
|
||||
**self._HEADERS,
|
||||
}, query={
|
||||
'order': 'asc',
|
||||
'limit': '-1',
|
||||
|
@ -1,8 +1,6 @@
|
||||
import urllib.parse
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_parse_qs,
|
||||
compat_urlparse,
|
||||
)
|
||||
|
||||
|
||||
class AdobeConnectIE(InfoExtractor):
|
||||
@ -12,13 +10,13 @@ def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
title = self._html_extract_title(webpage)
|
||||
qs = compat_parse_qs(self._search_regex(r"swfUrl\s*=\s*'([^']+)'", webpage, 'swf url').split('?')[1])
|
||||
qs = urllib.parse.parse_qs(self._search_regex(r"swfUrl\s*=\s*'([^']+)'", webpage, 'swf url').split('?')[1])
|
||||
is_live = qs.get('isLive', ['false'])[0] == 'true'
|
||||
formats = []
|
||||
for con_string in qs['conStrings'][0].split(','):
|
||||
formats.append({
|
||||
'format_id': con_string.split('://')[0],
|
||||
'app': compat_urlparse.quote('?' + con_string.split('?')[1] + 'flvplayerapp/' + qs['appInstance'][0]),
|
||||
'app': urllib.parse.quote('?' + con_string.split('?')[1] + 'flvplayerapp/' + qs['appInstance'][0]),
|
||||
'ext': 'flv',
|
||||
'play_path': 'mp4:' + qs['streamName'][0],
|
||||
'rtmp_conn': 'S:' + qs['ticket'][0],
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -2,7 +2,6 @@
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_str
|
||||
from ..utils import (
|
||||
ISO639Utils,
|
||||
OnDemandPagedList,
|
||||
@ -36,7 +35,7 @@ def _parse_subtitles(self, video_data, url_key):
|
||||
return subtitles
|
||||
|
||||
def _parse_video_data(self, video_data):
|
||||
video_id = compat_str(video_data['id'])
|
||||
video_id = str(video_data['id'])
|
||||
title = video_data['title']
|
||||
|
||||
s3_extracted = False
|
||||
@ -151,7 +150,7 @@ def _fetch_page(self, display_id, query, page):
|
||||
page += 1
|
||||
query['page'] = page
|
||||
for element_data in self._call_api(
|
||||
self._RESOURCE, display_id, query, 'Download Page %d' % page):
|
||||
self._RESOURCE, display_id, query, f'Download Page {page}'):
|
||||
yield self._process_data(element_data)
|
||||
|
||||
def _extract_playlist_entries(self, display_id, query):
|
||||
|
@ -91,7 +91,7 @@ def _real_extract(self, url):
|
||||
getShowBySlug(slug:"%s") {
|
||||
%%s
|
||||
}
|
||||
}''' % show_path
|
||||
}''' % show_path # noqa: UP031
|
||||
if episode_path:
|
||||
query = query % '''title
|
||||
getVideoBySlug(slug:"%s") {
|
||||
@ -128,7 +128,7 @@ def _real_extract(self, url):
|
||||
episode_title = title = video_data['title']
|
||||
series = show_data.get('title')
|
||||
if series:
|
||||
title = '%s - %s' % (series, title)
|
||||
title = f'{series} - {title}'
|
||||
info = {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
@ -191,7 +191,7 @@ def _real_extract(self, url):
|
||||
if not slug:
|
||||
continue
|
||||
entries.append(self.url_result(
|
||||
'http://adultswim.com/videos/%s/%s' % (show_path, slug),
|
||||
f'http://adultswim.com/videos/{show_path}/{slug}',
|
||||
'AdultSwim', video.get('_id')))
|
||||
return self.playlist_result(
|
||||
entries, show_path, show_data.get('title'),
|
||||
|
@ -73,8 +73,8 @@ def _extract_aen_smil(self, smil_url, video_id, auth=None):
|
||||
def _extract_aetn_info(self, domain, filter_key, filter_value, url):
|
||||
requestor_id, brand = self._DOMAIN_MAP[domain]
|
||||
result = self._download_json(
|
||||
'https://feeds.video.aetnd.com/api/v2/%s/videos' % brand,
|
||||
filter_value, query={'filter[%s]' % filter_key: filter_value})
|
||||
f'https://feeds.video.aetnd.com/api/v2/{brand}/videos',
|
||||
filter_value, query={f'filter[{filter_key}]': filter_value})
|
||||
result = traverse_obj(
|
||||
result, ('results',
|
||||
lambda k, v: k == 0 and v[filter_key] == filter_value),
|
||||
@ -142,7 +142,7 @@ class AENetworksIE(AENetworksBaseIE):
|
||||
'skip_download': True,
|
||||
},
|
||||
'add_ie': ['ThePlatform'],
|
||||
'skip': 'Geo-restricted - This content is not available in your location.'
|
||||
'skip': 'Geo-restricted - This content is not available in your location.',
|
||||
}, {
|
||||
'url': 'http://www.aetv.com/shows/duck-dynasty/season-9/episode-1',
|
||||
'info_dict': {
|
||||
@ -171,28 +171,28 @@ class AENetworksIE(AENetworksBaseIE):
|
||||
'skip': 'This video is only available for users of participating TV providers.',
|
||||
}, {
|
||||
'url': 'http://www.fyi.tv/shows/tiny-house-nation/season-1/episode-8',
|
||||
'only_matching': True
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://www.mylifetime.com/shows/project-runway-junior/season-1/episode-6',
|
||||
'only_matching': True
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://www.mylifetime.com/movies/center-stage-on-pointe/full-movie',
|
||||
'only_matching': True
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://watch.lifetimemovieclub.com/movies/10-year-reunion/full-movie',
|
||||
'only_matching': True
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://www.history.com/specials/sniper-into-the-kill-zone/full-special',
|
||||
'only_matching': True
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://www.aetv.com/specials/hunting-jonbenets-killer-the-untold-story/preview-hunting-jonbenets-killer-the-untold-story',
|
||||
'only_matching': True
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://www.history.com/videos/history-of-valentines-day',
|
||||
'only_matching': True
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://play.aetv.com/shows/duck-dynasty/videos/best-of-duck-dynasty-getting-quack-in-shape',
|
||||
'only_matching': True
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
@ -209,14 +209,14 @@ def _call_api(self, resource, slug, brand, fields):
|
||||
%s(slug: "%s") {
|
||||
%s
|
||||
}
|
||||
}''' % (resource, slug, fields),
|
||||
}''' % (resource, slug, fields), # noqa: UP031
|
||||
}))['data'][resource]
|
||||
|
||||
def _real_extract(self, url):
|
||||
domain, slug = self._match_valid_url(url).groups()
|
||||
_, brand = self._DOMAIN_MAP[domain]
|
||||
playlist = self._call_api(self._RESOURCE, slug, brand, self._FIELDS)
|
||||
base_url = 'http://watch.%s' % domain
|
||||
base_url = f'http://watch.{domain}'
|
||||
|
||||
entries = []
|
||||
for item in (playlist.get(self._ITEMS_KEY) or []):
|
||||
@ -248,10 +248,10 @@ class AENetworksCollectionIE(AENetworksListBaseIE):
|
||||
'playlist_mincount': 12,
|
||||
}, {
|
||||
'url': 'https://watch.historyvault.com/shows/america-the-story-of-us-2/season-1/list/america-the-story-of-us',
|
||||
'only_matching': True
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://www.historyvault.com/collections/mysteryquest',
|
||||
'only_matching': True
|
||||
'only_matching': True,
|
||||
}]
|
||||
_RESOURCE = 'list'
|
||||
_ITEMS_KEY = 'items'
|
||||
@ -309,7 +309,7 @@ class HistoryTopicIE(AENetworksBaseIE):
|
||||
'info_dict': {
|
||||
'id': '40700995724',
|
||||
'ext': 'mp4',
|
||||
'title': "History of Valentine’s Day",
|
||||
'title': 'History of Valentine’s Day',
|
||||
'description': 'md5:7b57ea4829b391995b405fa60bd7b5f7',
|
||||
'timestamp': 1375819729,
|
||||
'upload_date': '20130806',
|
||||
@ -364,6 +364,6 @@ def _real_extract(self, url):
|
||||
display_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
player_url = self._search_regex(
|
||||
r'<phoenix-iframe[^>]+src="(%s)' % HistoryPlayerIE._VALID_URL,
|
||||
rf'<phoenix-iframe[^>]+src="({HistoryPlayerIE._VALID_URL})',
|
||||
webpage, 'player URL')
|
||||
return self.url_result(player_url, HistoryPlayerIE.ie_key())
|
||||
|
@ -16,8 +16,8 @@ class AeonCoIE(InfoExtractor):
|
||||
'uploader': 'Semiconductor',
|
||||
'uploader_id': 'semiconductor',
|
||||
'uploader_url': 'https://vimeo.com/semiconductor',
|
||||
'duration': 348
|
||||
}
|
||||
'duration': 348,
|
||||
},
|
||||
}, {
|
||||
'url': 'https://aeon.co/videos/dazzling-timelapse-shows-how-microbes-spoil-our-food-and-sometimes-enrich-it',
|
||||
'md5': '03582d795382e49f2fd0b427b55de409',
|
||||
@ -29,8 +29,8 @@ class AeonCoIE(InfoExtractor):
|
||||
'uploader': 'Aeon Video',
|
||||
'uploader_id': 'aeonvideo',
|
||||
'uploader_url': 'https://vimeo.com/aeonvideo',
|
||||
'duration': 1344
|
||||
}
|
||||
'duration': 1344,
|
||||
},
|
||||
}, {
|
||||
'url': 'https://aeon.co/videos/chew-over-the-prisoners-dilemma-and-see-if-you-can-find-the-rational-path-out',
|
||||
'md5': '1cfda0bf3ae24df17d00f2c0cb6cc21b',
|
||||
|
@ -55,7 +55,7 @@ def _perform_login(self, username, password):
|
||||
if result != 1:
|
||||
error = _ERRORS.get(result, 'You have failed to log in.')
|
||||
raise ExtractorError(
|
||||
'Unable to login: %s said: %s' % (self.IE_NAME, error),
|
||||
f'Unable to login: {self.IE_NAME} said: {error}',
|
||||
expected=True)
|
||||
|
||||
|
||||
@ -227,7 +227,7 @@ def _real_extract(self, url):
|
||||
**traverse_obj(file_element, {
|
||||
'duration': ('duration', {functools.partial(int_or_none, scale=1000)}),
|
||||
'timestamp': ('file_start', {unified_timestamp}),
|
||||
})
|
||||
}),
|
||||
})
|
||||
|
||||
if traverse_obj(data, ('adult_status', {str})) == 'notLogin':
|
||||
|
@ -168,7 +168,7 @@ def _real_extract(self, url):
|
||||
for ext in ('aac', 'mp3'):
|
||||
url_data = self._download_json(
|
||||
f'https://api.podcast.radioagora.pl/api4/getSongUrl?podcast_id={media_id}&device_id={uuid.uuid4()}&ppre=false&audio={ext}',
|
||||
media_id, 'Downloading podcast %s URL' % ext)
|
||||
media_id, f'Downloading podcast {ext} URL')
|
||||
# prevents inserting the mp3 (default) multiple times
|
||||
if 'link_ssl' in url_data and f'.{ext}' in url_data['link_ssl']:
|
||||
formats.append({
|
||||
@ -206,8 +206,8 @@ class TokFMAuditionIE(InfoExtractor):
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _create_url(id):
|
||||
return f'https://audycje.tokfm.pl/audycja/{id}'
|
||||
def _create_url(video_id):
|
||||
return f'https://audycje.tokfm.pl/audycja/{video_id}'
|
||||
|
||||
def _real_extract(self, url):
|
||||
audition_id = self._match_id(url)
|
||||
|
@ -26,7 +26,7 @@ class AirTVIE(InfoExtractor):
|
||||
'view_count': int,
|
||||
'thumbnail': 'https://cdn-sp-gcs.air.tv/videos/W/8/W87jcWleSn2hXZN47zJZsQ/b13fc56464f47d9d62a36d110b9b5a72-4096x2160_9.jpg',
|
||||
'timestamp': 1664792603,
|
||||
}
|
||||
},
|
||||
}, {
|
||||
# with youtube_id
|
||||
'url': 'https://www.air.tv/watch?v=sv57EC8tRXG6h8dNXFUU1Q',
|
||||
@ -54,7 +54,7 @@ class AirTVIE(InfoExtractor):
|
||||
'channel': 'Newsflare',
|
||||
'duration': 37,
|
||||
'upload_date': '20180511',
|
||||
}
|
||||
},
|
||||
}]
|
||||
|
||||
def _get_formats_and_subtitle(self, json_data, video_id):
|
||||
|
@ -22,7 +22,7 @@ class AitubeKZVideoIE(InfoExtractor):
|
||||
'timestamp': 1667370519,
|
||||
'title': 'Ангел хранитель 1 серия',
|
||||
'channel_follower_count': int,
|
||||
}
|
||||
},
|
||||
}, {
|
||||
# embed url
|
||||
'url': 'https://aitube.kz/embed/?id=9291d29b-c038-49a1-ad42-3da2051d353c',
|
||||
|
@ -1,5 +1,4 @@
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_str
|
||||
from ..utils import (
|
||||
float_or_none,
|
||||
try_get,
|
||||
@ -44,7 +43,7 @@ def _real_extract(self, url):
|
||||
'title': title,
|
||||
'thumbnail': data.get('coverUrl'),
|
||||
'uploader': try_get(
|
||||
data, lambda x: x['followBar']['name'], compat_str),
|
||||
data, lambda x: x['followBar']['name'], str),
|
||||
'timestamp': float_or_none(data.get('startTimeLong'), scale=1000),
|
||||
'formats': formats,
|
||||
}
|
||||
|
@ -18,7 +18,7 @@ class AlJazeeraIE(InfoExtractor):
|
||||
'timestamp': 1636219149,
|
||||
'description': 'U sarajevskim naseljima Rajlovac i Reljevo stambeni objekti, ali i industrijska postrojenja i dalje su pod vodom.',
|
||||
'upload_date': '20211106',
|
||||
}
|
||||
},
|
||||
}, {
|
||||
'url': 'https://balkans.aljazeera.net/videos/2021/11/6/djokovic-usao-u-finale-mastersa-u-parizu',
|
||||
'info_dict': {
|
||||
@ -33,7 +33,7 @@ class AlJazeeraIE(InfoExtractor):
|
||||
BRIGHTCOVE_URL_RE = r'https?://players.brightcove.net/(?P<account>\d+)/(?P<player_id>[a-zA-Z0-9]+)_(?P<embed>[^/]+)/index.html\?videoId=(?P<id>\d+)'
|
||||
|
||||
def _real_extract(self, url):
|
||||
base, post_type, id = self._match_valid_url(url).groups()
|
||||
base, post_type, display_id = self._match_valid_url(url).groups()
|
||||
wp = {
|
||||
'balkans.aljazeera.net': 'ajb',
|
||||
'chinese.aljazeera.net': 'chinese',
|
||||
@ -47,11 +47,11 @@ def _real_extract(self, url):
|
||||
'news': 'news',
|
||||
}[post_type.split('/')[0]]
|
||||
video = self._download_json(
|
||||
f'https://{base}/graphql', id, query={
|
||||
f'https://{base}/graphql', display_id, query={
|
||||
'wp-site': wp,
|
||||
'operationName': 'ArchipelagoSingleArticleQuery',
|
||||
'variables': json.dumps({
|
||||
'name': id,
|
||||
'name': display_id,
|
||||
'postType': post_type,
|
||||
}),
|
||||
}, headers={
|
||||
@ -64,7 +64,7 @@ def _real_extract(self, url):
|
||||
embed = 'default'
|
||||
|
||||
if video_id is None:
|
||||
webpage = self._download_webpage(url, id)
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
|
||||
account, player_id, embed, video_id = self._search_regex(self.BRIGHTCOVE_URL_RE, webpage, 'video id',
|
||||
group=(1, 2, 3, 4), default=(None, None, None, None))
|
||||
@ -73,11 +73,11 @@ def _real_extract(self, url):
|
||||
return {
|
||||
'_type': 'url_transparent',
|
||||
'url': url,
|
||||
'ie_key': 'Generic'
|
||||
'ie_key': 'Generic',
|
||||
}
|
||||
|
||||
return {
|
||||
'_type': 'url_transparent',
|
||||
'url': f'https://players.brightcove.net/{account}/{player_id}_{embed}/index.html?videoId={video_id}',
|
||||
'ie_key': 'BrightcoveNew'
|
||||
'ie_key': 'BrightcoveNew',
|
||||
}
|
||||
|
@ -1,5 +1,4 @@
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_str
|
||||
from ..utils import (
|
||||
int_or_none,
|
||||
qualities,
|
||||
@ -95,11 +94,11 @@ def _real_extract(self, url):
|
||||
duration = int_or_none(video.get('duration'))
|
||||
view_count = int_or_none(video.get('view_count'))
|
||||
timestamp = unified_timestamp(try_get(
|
||||
video, lambda x: x['added_at']['date'], compat_str))
|
||||
video, lambda x: x['added_at']['date'], str))
|
||||
else:
|
||||
video_id = display_id
|
||||
media_data = self._download_json(
|
||||
'http://www.allocine.fr/ws/AcVisiondataV5.ashx?media=%s' % video_id, display_id)
|
||||
f'http://www.allocine.fr/ws/AcVisiondataV5.ashx?media={video_id}', display_id)
|
||||
title = remove_end(strip_or_none(self._html_extract_title(webpage), ' - AlloCiné'))
|
||||
for key, value in media_data['video'].items():
|
||||
if not key.endswith('Path'):
|
||||
|
@ -33,27 +33,27 @@
|
||||
video: getClip(clipIdentifier: $id) {
|
||||
%s %s
|
||||
}
|
||||
}''' % (_FIELDS, _EXTRA_FIELDS),
|
||||
}''' % (_FIELDS, _EXTRA_FIELDS), # noqa: UP031
|
||||
'montage': '''query ($id: String!) {
|
||||
video: getMontage(clipIdentifier: $id) {
|
||||
%s
|
||||
}
|
||||
}''' % _FIELDS,
|
||||
}''' % _FIELDS, # noqa: UP031
|
||||
'Clips': '''query ($page: Int!, $user: String!, $game: Int) {
|
||||
videos: clips(search: createdDate, page: $page, user: $user, mobile: false, game: $game) {
|
||||
data { %s %s }
|
||||
}
|
||||
}''' % (_FIELDS, _EXTRA_FIELDS),
|
||||
}''' % (_FIELDS, _EXTRA_FIELDS), # noqa: UP031
|
||||
'Montages': '''query ($page: Int!, $user: String!) {
|
||||
videos: montages(search: createdDate, page: $page, user: $user) {
|
||||
data { %s }
|
||||
}
|
||||
}''' % _FIELDS,
|
||||
}''' % _FIELDS, # noqa: UP031
|
||||
'Mobile Clips': '''query ($page: Int!, $user: String!) {
|
||||
videos: clips(search: createdDate, page: $page, user: $user, mobile: true) {
|
||||
data { %s %s }
|
||||
}
|
||||
}''' % (_FIELDS, _EXTRA_FIELDS),
|
||||
}''' % (_FIELDS, _EXTRA_FIELDS), # noqa: UP031
|
||||
}
|
||||
|
||||
|
||||
@ -121,7 +121,7 @@ class AllstarIE(AllstarBaseIE):
|
||||
'uploader_url': 'https://allstar.gg/u/62b8bdfc9021052f7905882d',
|
||||
'upload_date': '20230425',
|
||||
'view_count': int,
|
||||
}
|
||||
},
|
||||
}, {
|
||||
'url': 'https://allstar.gg/clip?clip=8LJLY4JKB',
|
||||
'info_dict': {
|
||||
@ -139,7 +139,7 @@ class AllstarIE(AllstarBaseIE):
|
||||
'uploader_url': 'https://allstar.gg/u/62b8bdfc9021052f7905882d',
|
||||
'upload_date': '20230702',
|
||||
'view_count': int,
|
||||
}
|
||||
},
|
||||
}, {
|
||||
'url': 'https://allstar.gg/montage?montage=643e64089da7e9363e1fa66c',
|
||||
'info_dict': {
|
||||
@ -155,7 +155,7 @@ class AllstarIE(AllstarBaseIE):
|
||||
'uploader_url': 'https://allstar.gg/u/62b8bdfc9021052f7905882d',
|
||||
'upload_date': '20230418',
|
||||
'view_count': int,
|
||||
}
|
||||
},
|
||||
}, {
|
||||
'url': 'https://allstar.gg/montage?montage=RILJMH6QOS',
|
||||
'info_dict': {
|
||||
@ -171,7 +171,7 @@ class AllstarIE(AllstarBaseIE):
|
||||
'uploader_url': 'https://allstar.gg/u/62b8bdfc9021052f7905882d',
|
||||
'upload_date': '20230703',
|
||||
'view_count': int,
|
||||
}
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
@ -191,28 +191,28 @@ class AllstarProfileIE(AllstarBaseIE):
|
||||
'id': '62b8bdfc9021052f7905882d-clips',
|
||||
'title': 'cherokee - Clips',
|
||||
},
|
||||
'playlist_mincount': 15
|
||||
'playlist_mincount': 15,
|
||||
}, {
|
||||
'url': 'https://allstar.gg/u/cherokee?game=730&view=Clips',
|
||||
'info_dict': {
|
||||
'id': '62b8bdfc9021052f7905882d-clips-730',
|
||||
'title': 'cherokee - Clips - 730',
|
||||
},
|
||||
'playlist_mincount': 15
|
||||
'playlist_mincount': 15,
|
||||
}, {
|
||||
'url': 'https://allstar.gg/u/62b8bdfc9021052f7905882d?view=Montages',
|
||||
'info_dict': {
|
||||
'id': '62b8bdfc9021052f7905882d-montages',
|
||||
'title': 'cherokee - Montages',
|
||||
},
|
||||
'playlist_mincount': 4
|
||||
'playlist_mincount': 4,
|
||||
}, {
|
||||
'url': 'https://allstar.gg/profile?user=cherokee&view=Mobile Clips',
|
||||
'info_dict': {
|
||||
'id': '62b8bdfc9021052f7905882d-mobile',
|
||||
'title': 'cherokee - Mobile Clips',
|
||||
},
|
||||
'playlist_mincount': 1
|
||||
'playlist_mincount': 1,
|
||||
}]
|
||||
|
||||
_PAGE_SIZE = 10
|
||||
|
@ -25,7 +25,7 @@ class AlphaPornoIE(InfoExtractor):
|
||||
'tbr': 1145,
|
||||
'categories': list,
|
||||
'age_limit': 18,
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
|
@ -12,7 +12,7 @@
|
||||
class Alsace20TVBaseIE(InfoExtractor):
|
||||
def _extract_video(self, video_id, url=None):
|
||||
info = self._download_json(
|
||||
'https://www.alsace20.tv/visionneuse/visio_v9_js.php?key=%s&habillage=0&mode=html' % (video_id, ),
|
||||
f'https://www.alsace20.tv/visionneuse/visio_v9_js.php?key={video_id}&habillage=0&mode=html',
|
||||
video_id) or {}
|
||||
title = info.get('titre')
|
||||
|
||||
@ -24,9 +24,9 @@ def _extract_video(self, video_id, url=None):
|
||||
else self._extract_mpd_formats(fmt_url, video_id, mpd_id=res, fatal=False))
|
||||
|
||||
webpage = (url and self._download_webpage(url, video_id, fatal=False)) or ''
|
||||
thumbnail = url_or_none(dict_get(info, ('image', 'preview', )) or self._og_search_thumbnail(webpage))
|
||||
thumbnail = url_or_none(dict_get(info, ('image', 'preview')) or self._og_search_thumbnail(webpage))
|
||||
upload_date = self._search_regex(r'/(\d{6})_', thumbnail, 'upload_date', default=None)
|
||||
upload_date = unified_strdate('20%s-%s-%s' % (upload_date[:2], upload_date[2:4], upload_date[4:])) if upload_date else None
|
||||
upload_date = unified_strdate(f'20{upload_date[:2]}-{upload_date[2:4]}-{upload_date[4:]}') if upload_date else None
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
|
@ -34,7 +34,7 @@ class AltCensoredIE(InfoExtractor):
|
||||
'thumbnail': 'https://archive.org/download/youtube-k0srjLSkga8/youtube-k0srjLSkga8.thumbs/k0srjLSkga8_000925.jpg',
|
||||
'view_count': int,
|
||||
'categories': ['News & Politics'],
|
||||
}
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
|
@ -1,7 +1,7 @@
|
||||
import re
|
||||
import urllib.parse
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_urlparse
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
clean_html,
|
||||
@ -21,7 +21,7 @@ class AluraIE(InfoExtractor):
|
||||
'info_dict': {
|
||||
'id': '60095',
|
||||
'ext': 'mp4',
|
||||
'title': 'Referências, ref-set e alter'
|
||||
'title': 'Referências, ref-set e alter',
|
||||
},
|
||||
'skip': 'Requires alura account credentials'},
|
||||
{
|
||||
@ -30,7 +30,7 @@ class AluraIE(InfoExtractor):
|
||||
'only_matching': True},
|
||||
{
|
||||
'url': 'https://cursos.alura.com.br/course/fundamentos-market-digital/task/55219',
|
||||
'only_matching': True}
|
||||
'only_matching': True},
|
||||
]
|
||||
|
||||
def _real_extract(self, url):
|
||||
@ -62,7 +62,7 @@ def _real_extract(self, url):
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': video_title,
|
||||
"formats": formats
|
||||
'formats': formats,
|
||||
}
|
||||
|
||||
def _perform_login(self, username, password):
|
||||
@ -91,7 +91,7 @@ def is_logged(webpage):
|
||||
'post url', default=self._LOGIN_URL, group='url')
|
||||
|
||||
if not post_url.startswith('http'):
|
||||
post_url = compat_urlparse.urljoin(self._LOGIN_URL, post_url)
|
||||
post_url = urllib.parse.urljoin(self._LOGIN_URL, post_url)
|
||||
|
||||
response = self._download_webpage(
|
||||
post_url, None, 'Logging in',
|
||||
@ -103,7 +103,7 @@ def is_logged(webpage):
|
||||
r'(?s)<p[^>]+class="alert-message[^"]*">(.+?)</p>',
|
||||
response, 'error message', default=None)
|
||||
if error:
|
||||
raise ExtractorError('Unable to login: %s' % error, expected=True)
|
||||
raise ExtractorError(f'Unable to login: {error}', expected=True)
|
||||
raise ExtractorError('Unable to log in')
|
||||
|
||||
|
||||
@ -119,7 +119,7 @@ class AluraCourseIE(AluraIE): # XXX: Do not subclass from concrete IE
|
||||
|
||||
@classmethod
|
||||
def suitable(cls, url):
|
||||
return False if AluraIE.suitable(url) else super(AluraCourseIE, cls).suitable(url)
|
||||
return False if AluraIE.suitable(url) else super().suitable(url)
|
||||
|
||||
def _real_extract(self, url):
|
||||
|
||||
@ -157,7 +157,7 @@ def _real_extract(self, url):
|
||||
'url': video_url,
|
||||
'id_key': self.ie_key(),
|
||||
'chapter': chapter,
|
||||
'chapter_number': chapter_number
|
||||
'chapter_number': chapter_number,
|
||||
}
|
||||
entries.append(entry)
|
||||
return self.playlist_result(entries, course_path, course_title)
|
||||
|
@ -24,7 +24,7 @@ class AmadeusTVIE(InfoExtractor):
|
||||
'display_id': '65091a87ff85af59d9fc54c3',
|
||||
'view_count': int,
|
||||
'description': 'md5:a0357b9c215489e2067cbae0b777bb95',
|
||||
}
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
|
@ -25,7 +25,7 @@ class AmaraIE(InfoExtractor):
|
||||
'uploader': 'PBS NewsHour',
|
||||
'uploader_id': 'PBSNewsHour',
|
||||
'timestamp': 1549639570,
|
||||
}
|
||||
},
|
||||
}, {
|
||||
# Vimeo
|
||||
'url': 'https://amara.org/en/videos/kYkK1VUTWW5I/info/vimeo-at-ces-2011',
|
||||
@ -40,8 +40,8 @@ class AmaraIE(InfoExtractor):
|
||||
'timestamp': 1294763658,
|
||||
'upload_date': '20110111',
|
||||
'uploader': 'Sam Morrill',
|
||||
'uploader_id': 'sammorrill'
|
||||
}
|
||||
'uploader_id': 'sammorrill',
|
||||
},
|
||||
}, {
|
||||
# Direct Link
|
||||
'url': 'https://amara.org/en/videos/s8KL7I3jLmh6/info/the-danger-of-a-single-story/',
|
||||
@ -55,13 +55,13 @@ class AmaraIE(InfoExtractor):
|
||||
'subtitles': dict,
|
||||
'upload_date': '20091007',
|
||||
'timestamp': 1254942511,
|
||||
}
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
meta = self._download_json(
|
||||
'https://amara.org/api/videos/%s/' % video_id,
|
||||
f'https://amara.org/api/videos/{video_id}/',
|
||||
video_id, query={'format': 'json'})
|
||||
title = meta['title']
|
||||
video_url = meta['all_urls'][0]
|
||||
|
@ -61,13 +61,13 @@ class AmazonStoreIE(InfoExtractor):
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
id = self._match_id(url)
|
||||
playlist_id = self._match_id(url)
|
||||
|
||||
for retry in self.RetryManager():
|
||||
webpage = self._download_webpage(url, id)
|
||||
webpage = self._download_webpage(url, playlist_id)
|
||||
try:
|
||||
data_json = self._search_json(
|
||||
r'var\s?obj\s?=\s?jQuery\.parseJSON\(\'', webpage, 'data', id,
|
||||
r'var\s?obj\s?=\s?jQuery\.parseJSON\(\'', webpage, 'data', playlist_id,
|
||||
transform_source=js_to_json)
|
||||
except ExtractorError as e:
|
||||
retry.error = e
|
||||
@ -81,7 +81,7 @@ def _real_extract(self, url):
|
||||
'height': int_or_none(video.get('videoHeight')),
|
||||
'width': int_or_none(video.get('videoWidth')),
|
||||
} for video in (data_json.get('videos') or []) if video.get('isVideo') and video.get('url')]
|
||||
return self.playlist_result(entries, playlist_id=id, playlist_title=data_json.get('title'))
|
||||
return self.playlist_result(entries, playlist_id=playlist_id, playlist_title=data_json.get('title'))
|
||||
|
||||
|
||||
class AmazonReviewsIE(InfoExtractor):
|
||||
|
@ -25,7 +25,7 @@ def _call_api(self, asin, data=None, note=None):
|
||||
asin, note=note, headers={
|
||||
'Content-Type': 'application/json',
|
||||
'currentpageurl': '/',
|
||||
'currentplatform': 'dWeb'
|
||||
'currentplatform': 'dWeb',
|
||||
}, data=json.dumps(data).encode() if data else None,
|
||||
query=None if data else {
|
||||
'deviceType': 'A1WMMUXPCUJL4N',
|
||||
|
@ -64,8 +64,8 @@ def _real_extract(self, url):
|
||||
site, display_id = self._match_valid_url(url).groups()
|
||||
requestor_id = self._REQUESTOR_ID_MAP[site]
|
||||
page_data = self._download_json(
|
||||
'https://content-delivery-gw.svc.ds.amcn.com/api/v2/content/amcn/%s/url/%s'
|
||||
% (requestor_id.lower(), display_id), display_id)['data']
|
||||
f'https://content-delivery-gw.svc.ds.amcn.com/api/v2/content/amcn/{requestor_id.lower()}/url/{display_id}',
|
||||
display_id)['data']
|
||||
properties = page_data.get('properties') or {}
|
||||
query = {
|
||||
'mbr': 'true',
|
||||
@ -76,15 +76,15 @@ def _real_extract(self, url):
|
||||
try:
|
||||
for v in page_data['children']:
|
||||
if v.get('type') == 'video-player':
|
||||
releasePid = v['properties']['currentVideo']['meta']['releasePid']
|
||||
tp_path = 'M_UwQC/' + releasePid
|
||||
release_pid = v['properties']['currentVideo']['meta']['releasePid']
|
||||
tp_path = 'M_UwQC/' + release_pid
|
||||
media_url = 'https://link.theplatform.com/s/' + tp_path
|
||||
video_player_count += 1
|
||||
except KeyError:
|
||||
pass
|
||||
if video_player_count > 1:
|
||||
self.report_warning(
|
||||
'The JSON data has %d video players. Only one will be extracted' % video_player_count)
|
||||
f'The JSON data has {video_player_count} video players. Only one will be extracted')
|
||||
|
||||
# Fall back to videoPid if releasePid not found.
|
||||
# TODO: Fall back to videoPid if releasePid manifest uses DRM.
|
||||
@ -131,7 +131,7 @@ def _real_extract(self, url):
|
||||
})
|
||||
ns_keys = theplatform_metadata.get('$xmlns', {}).keys()
|
||||
if ns_keys:
|
||||
ns = list(ns_keys)[0]
|
||||
ns = next(iter(ns_keys))
|
||||
episode = theplatform_metadata.get(ns + '$episodeTitle') or None
|
||||
episode_number = int_or_none(
|
||||
theplatform_metadata.get(ns + '$episode'))
|
||||
|
@ -87,13 +87,13 @@ def _real_extract(self, url):
|
||||
resource_type = 'episodes'
|
||||
|
||||
resource = self._download_json(
|
||||
'https://www.americastestkitchen.com/api/v6/%s/%s' % (resource_type, video_id), video_id)
|
||||
f'https://www.americastestkitchen.com/api/v6/{resource_type}/{video_id}', video_id)
|
||||
video = resource['video'] if is_episode else resource
|
||||
episode = resource if is_episode else resource.get('episode') or {}
|
||||
|
||||
return {
|
||||
'_type': 'url_transparent',
|
||||
'url': 'https://player.zype.com/embed/%s.js?api_key=jZ9GUhRmxcPvX7M3SlfejB6Hle9jyHTdk2jVxG7wOHPLODgncEKVdPYBhuz9iWXQ' % video['zypeId'],
|
||||
'url': 'https://player.zype.com/embed/{}.js?api_key=jZ9GUhRmxcPvX7M3SlfejB6Hle9jyHTdk2jVxG7wOHPLODgncEKVdPYBhuz9iWXQ'.format(video['zypeId']),
|
||||
'ie_key': 'Zype',
|
||||
'description': clean_html(video.get('description')),
|
||||
'timestamp': unified_timestamp(video.get('publishDate')),
|
||||
@ -174,22 +174,22 @@ def _real_extract(self, url):
|
||||
]
|
||||
|
||||
if season_number:
|
||||
playlist_id = 'season_%d' % season_number
|
||||
playlist_title = 'Season %d' % season_number
|
||||
playlist_id = f'season_{season_number}'
|
||||
playlist_title = f'Season {season_number}'
|
||||
facet_filters.append('search_season_list:' + playlist_title)
|
||||
else:
|
||||
playlist_id = show
|
||||
playlist_title = title
|
||||
|
||||
season_search = self._download_json(
|
||||
'https://y1fnzxui30-dsn.algolia.net/1/indexes/everest_search_%s_season_desc_production' % slug,
|
||||
f'https://y1fnzxui30-dsn.algolia.net/1/indexes/everest_search_{slug}_season_desc_production',
|
||||
playlist_id, headers={
|
||||
'Origin': 'https://www.americastestkitchen.com',
|
||||
'X-Algolia-API-Key': '8d504d0099ed27c1b73708d22871d805',
|
||||
'X-Algolia-Application-Id': 'Y1FNZXUI30',
|
||||
}, query={
|
||||
'facetFilters': json.dumps(facet_filters),
|
||||
'attributesToRetrieve': 'description,search_%s_episode_number,search_document_date,search_url,title,search_atk_episode_season' % slug,
|
||||
'attributesToRetrieve': f'description,search_{slug}_episode_number,search_document_date,search_url,title,search_atk_episode_season',
|
||||
'attributesToHighlight': '',
|
||||
'hitsPerPage': 1000,
|
||||
})
|
||||
@ -207,7 +207,7 @@ def entries():
|
||||
'description': episode.get('description'),
|
||||
'timestamp': unified_timestamp(episode.get('search_document_date')),
|
||||
'season_number': season_number,
|
||||
'episode_number': int_or_none(episode.get('search_%s_episode_number' % slug)),
|
||||
'episode_number': int_or_none(episode.get(f'search_{slug}_episode_number')),
|
||||
'ie_key': AmericasTestKitchenIE.ie_key(),
|
||||
}
|
||||
|
||||
|
@ -19,12 +19,12 @@ def _extract_feed_info(self, url):
|
||||
'Unable to download Akamai AMP feed', transform_source=strip_jsonp)
|
||||
item = feed.get('channel', {}).get('item')
|
||||
if not item:
|
||||
raise ExtractorError('%s said: %s' % (self.IE_NAME, feed['error']))
|
||||
raise ExtractorError('{} said: {}'.format(self.IE_NAME, feed['error']))
|
||||
|
||||
video_id = item['guid']
|
||||
|
||||
def get_media_node(name, default=None):
|
||||
media_name = 'media-%s' % name
|
||||
media_name = f'media-{name}'
|
||||
media_group = item.get('media-group') or item
|
||||
return media_group.get(media_name) or item.get(media_name) or item.get(name, default)
|
||||
|
||||
|
@ -29,7 +29,7 @@ class AnchorFMEpisodeIE(InfoExtractor):
|
||||
'release_date': '20230121',
|
||||
'release_timestamp': 1674285179,
|
||||
'episode_id': 'e1tpt3d',
|
||||
}
|
||||
},
|
||||
}, {
|
||||
# embed url
|
||||
'url': 'https://anchor.fm/apakatatempo/embed/episodes/S2E75-Perang-Bintang-di-Balik-Kasus-Ferdy-Sambo-dan-Ismail-Bolong-e1shjqd',
|
||||
@ -50,7 +50,7 @@ class AnchorFMEpisodeIE(InfoExtractor):
|
||||
'season': 'Season 2',
|
||||
'season_number': 2,
|
||||
'episode_id': 'e1shjqd',
|
||||
}
|
||||
},
|
||||
}]
|
||||
|
||||
_WEBPAGE_TESTS = [{
|
||||
@ -72,7 +72,7 @@ class AnchorFMEpisodeIE(InfoExtractor):
|
||||
'thumbnail': 'https://s3-us-west-2.amazonaws.com/anchor-generated-image-bank/production/podcast_uploaded_episode400/2627805/2627805-1671590688729-4db3882ac9e4b.jpg',
|
||||
'uploader': 'Podcast Tempo',
|
||||
'channel': 'apakatatempo',
|
||||
}
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
|
@ -15,8 +15,8 @@ class AngelIE(InfoExtractor):
|
||||
'title': 'Tuttle Twins Season 1, Episode 1: When Laws Give You Lemons',
|
||||
'description': 'md5:73b704897c20ab59c433a9c0a8202d5e',
|
||||
'thumbnail': r're:^https?://images.angelstudios.com/image/upload/angel-app/.*$',
|
||||
'duration': 1359.0
|
||||
}
|
||||
'duration': 1359.0,
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.angel.com/watch/the-chosen/episode/8dfb714d-bca5-4812-8125-24fb9514cd10/season-1/episode-1/i-have-called-you-by-name',
|
||||
'md5': 'e4774bad0a5f0ad2e90d175cafdb797d',
|
||||
@ -26,8 +26,8 @@ class AngelIE(InfoExtractor):
|
||||
'title': 'The Chosen Season 1, Episode 1: I Have Called You By Name',
|
||||
'description': 'md5:aadfb4827a94415de5ff6426e6dee3be',
|
||||
'thumbnail': r're:^https?://images.angelstudios.com/image/upload/angel-app/.*$',
|
||||
'duration': 3276.0
|
||||
}
|
||||
'duration': 3276.0,
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
@ -44,7 +44,7 @@ def _real_extract(self, url):
|
||||
'title': self._og_search_title(webpage),
|
||||
'description': self._og_search_description(webpage),
|
||||
'formats': formats,
|
||||
'subtitles': subtitles
|
||||
'subtitles': subtitles,
|
||||
}
|
||||
|
||||
# Angel uses cloudinary in the background and supports image transformations.
|
||||
|
@ -105,7 +105,7 @@ def _real_extract(self, url):
|
||||
info = self._search_json_ld(webpage, video_id, expected_type='NewsArticle')
|
||||
embed_urls = list(Ant1NewsGrEmbedIE._extract_embed_urls(url, webpage))
|
||||
if not embed_urls:
|
||||
raise ExtractorError('no videos found for %s' % video_id, expected=True)
|
||||
raise ExtractorError(f'no videos found for {video_id}', expected=True)
|
||||
return self.playlist_from_matches(
|
||||
embed_urls, video_id, info.get('title'), ie=Ant1NewsGrEmbedIE.ie_key(),
|
||||
video_kwargs={'url_transparent': True, 'timestamp': info.get('timestamp')})
|
||||
|
@ -238,7 +238,7 @@ class AnvatoIE(InfoExtractor):
|
||||
'gray': 'anvato_mcp_gray_web_prod_4c10f067c393ed8fc453d3930f8ab2b159973900',
|
||||
'hearst': 'anvato_mcp_hearst_web_prod_5356c3de0fc7c90a3727b4863ca7fec3a4524a99',
|
||||
'cbs': 'anvato_mcp_cbs_web_prod_02f26581ff80e5bda7aad28226a8d369037f2cbe',
|
||||
'telemundo': 'anvato_mcp_telemundo_web_prod_c5278d51ad46fda4b6ca3d0ea44a7846a054f582'
|
||||
'telemundo': 'anvato_mcp_telemundo_web_prod_c5278d51ad46fda4b6ca3d0ea44a7846a054f582',
|
||||
}
|
||||
|
||||
def _generate_nfl_token(self, anvack, mcp_id):
|
||||
@ -255,7 +255,7 @@ def _generate_nfl_token(self, anvack, mcp_id):
|
||||
token
|
||||
}
|
||||
}
|
||||
}''' % (anvack, mcp_id),
|
||||
}''' % (anvack, mcp_id), # noqa: UP031
|
||||
}).encode(), headers={
|
||||
'Authorization': auth_token,
|
||||
'Content-Type': 'application/json',
|
||||
@ -299,7 +299,7 @@ def _get_video_json(self, access_key, video_id, extracted_token):
|
||||
|
||||
return self._download_json(
|
||||
video_data_url, video_id, transform_source=strip_jsonp, query=query,
|
||||
data=json.dumps({'api': api}, separators=(',', ':')).encode('utf-8'))
|
||||
data=json.dumps({'api': api}, separators=(',', ':')).encode())
|
||||
|
||||
def _get_anvato_videos(self, access_key, video_id, token):
|
||||
video_data = self._get_video_json(access_key, video_id, token)
|
||||
@ -358,7 +358,7 @@ def _get_anvato_videos(self, access_key, video_id, token):
|
||||
for caption in video_data.get('captions', []):
|
||||
a_caption = {
|
||||
'url': caption['url'],
|
||||
'ext': 'tt' if caption.get('format') == 'SMPTE-TT' else None
|
||||
'ext': 'tt' if caption.get('format') == 'SMPTE-TT' else None,
|
||||
}
|
||||
subtitles.setdefault(caption['language'], []).append(a_caption)
|
||||
subtitles = self._merge_subtitles(subtitles, hls_subs, vtt_subs)
|
||||
|
@ -30,7 +30,7 @@ class AolIE(YahooIE): # XXX: Do not subclass from concrete IE
|
||||
'params': {
|
||||
# m3u8 download
|
||||
'skip_download': True,
|
||||
}
|
||||
},
|
||||
}, {
|
||||
# video with vidible ID
|
||||
'url': 'https://www.aol.com/video/view/netflix-is-raising-rates/5707d6b8e4b090497b04f706/',
|
||||
@ -46,7 +46,7 @@ class AolIE(YahooIE): # XXX: Do not subclass from concrete IE
|
||||
'params': {
|
||||
# m3u8 download
|
||||
'skip_download': True,
|
||||
}
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.aol.com/video/view/park-bench-season-2-trailer/559a1b9be4b0c3bfad3357a7/',
|
||||
'only_matching': True,
|
||||
@ -83,10 +83,10 @@ def _real_extract(self, url):
|
||||
return self._extract_yahoo_video(video_id, 'us')
|
||||
|
||||
response = self._download_json(
|
||||
'https://feedapi.b2c.on.aol.com/v1.0/app/videos/aolon/%s/details' % video_id,
|
||||
f'https://feedapi.b2c.on.aol.com/v1.0/app/videos/aolon/{video_id}/details',
|
||||
video_id)['response']
|
||||
if response['statusText'] != 'Ok':
|
||||
raise ExtractorError('%s said: %s' % (self.IE_NAME, response['statusText']), expected=True)
|
||||
raise ExtractorError('{} said: {}'.format(self.IE_NAME, response['statusText']), expected=True)
|
||||
|
||||
video_data = response['data']
|
||||
formats = []
|
||||
|
@ -34,7 +34,7 @@ def _real_extract(self, url):
|
||||
video_id, base_url = mobj.group('id', 'base_url')
|
||||
|
||||
webpage = self._download_webpage(
|
||||
'%s/player/%s' % (base_url, video_id), video_id)
|
||||
f'{base_url}/player/{video_id}', video_id)
|
||||
|
||||
jwplatform_id = self._search_regex(
|
||||
r'media[iI]d\s*:\s*["\'](?P<id>[a-zA-Z0-9]{8})', webpage,
|
||||
@ -47,7 +47,7 @@ def _real_extract(self, url):
|
||||
|
||||
def extract(field, name=None):
|
||||
return self._search_regex(
|
||||
r'\b%s["\']\s*:\s*(["\'])(?P<value>(?:(?!\1).)+)\1' % field,
|
||||
rf'\b{field}["\']\s*:\s*(["\'])(?P<value>(?:(?!\1).)+)\1',
|
||||
webpage, name or field, default=None, group='value')
|
||||
|
||||
title = extract('title') or video_id
|
||||
|
@ -24,7 +24,7 @@ class ApplePodcastsIE(InfoExtractor):
|
||||
'duration': 6454,
|
||||
'series': 'The Tim Dillon Show',
|
||||
'thumbnail': 're:.+[.](png|jpe?g|webp)',
|
||||
}
|
||||
},
|
||||
}, {
|
||||
'url': 'https://podcasts.apple.com/podcast/207-whitney-webb-returns/id1135137367?i=1000482637777',
|
||||
'only_matching': True,
|
||||
|
@ -1,8 +1,8 @@
|
||||
import json
|
||||
import re
|
||||
import urllib.parse
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_urlparse
|
||||
from ..utils import (
|
||||
int_or_none,
|
||||
parse_duration,
|
||||
@ -64,7 +64,7 @@ class AppleTrailersIE(InfoExtractor):
|
||||
'uploader_id': 'wb',
|
||||
},
|
||||
},
|
||||
]
|
||||
],
|
||||
}, {
|
||||
'url': 'http://trailers.apple.com/trailers/magnolia/blackthorn/',
|
||||
'info_dict': {
|
||||
@ -99,7 +99,7 @@ def _real_extract(self, url):
|
||||
webpage = self._download_webpage(url, movie)
|
||||
film_id = self._search_regex(r"FilmId\s*=\s*'(\d+)'", webpage, 'film id')
|
||||
film_data = self._download_json(
|
||||
'http://trailers.apple.com/trailers/feeds/data/%s.json' % film_id,
|
||||
f'http://trailers.apple.com/trailers/feeds/data/{film_id}.json',
|
||||
film_id, fatal=False)
|
||||
|
||||
if film_data:
|
||||
@ -114,7 +114,7 @@ def _real_extract(self, url):
|
||||
if not src:
|
||||
continue
|
||||
formats.append({
|
||||
'format_id': '%s-%s' % (version, size),
|
||||
'format_id': f'{version}-{size}',
|
||||
'url': re.sub(r'_(\d+p\.mov)', r'_h\1', src),
|
||||
'width': int_or_none(size_data.get('width')),
|
||||
'height': int_or_none(size_data.get('height')),
|
||||
@ -134,7 +134,7 @@ def _real_extract(self, url):
|
||||
page_data = film_data.get('page', {})
|
||||
return self.playlist_result(entries, film_id, page_data.get('movie_title'))
|
||||
|
||||
playlist_url = compat_urlparse.urljoin(url, 'includes/playlists/itunes.inc')
|
||||
playlist_url = urllib.parse.urljoin(url, 'includes/playlists/itunes.inc')
|
||||
|
||||
def fix_html(s):
|
||||
s = re.sub(r'(?s)<script[^<]*?>.*?</script>', '', s)
|
||||
@ -143,10 +143,9 @@ def fix_html(s):
|
||||
# like: http://trailers.apple.com/trailers/wb/gravity/
|
||||
|
||||
def _clean_json(m):
|
||||
return 'iTunes.playURL(%s);' % m.group(1).replace('\'', ''')
|
||||
return 'iTunes.playURL({});'.format(m.group(1).replace('\'', '''))
|
||||
s = re.sub(self._JSON_RE, _clean_json, s)
|
||||
s = '<html>%s</html>' % s
|
||||
return s
|
||||
return f'<html>{s}</html>'
|
||||
doc = self._download_xml(playlist_url, movie, transform_source=fix_html)
|
||||
|
||||
playlist = []
|
||||
@ -170,18 +169,18 @@ def _clean_json(m):
|
||||
duration = 60 * int(m.group('minutes')) + int(m.group('seconds'))
|
||||
|
||||
trailer_id = first_url.split('/')[-1].rpartition('_')[0].lower()
|
||||
settings_json_url = compat_urlparse.urljoin(url, 'includes/settings/%s.json' % trailer_id)
|
||||
settings_json_url = urllib.parse.urljoin(url, f'includes/settings/{trailer_id}.json')
|
||||
settings = self._download_json(settings_json_url, trailer_id, 'Downloading settings json')
|
||||
|
||||
formats = []
|
||||
for format in settings['metadata']['sizes']:
|
||||
for fmt in settings['metadata']['sizes']:
|
||||
# The src is a file pointing to the real video file
|
||||
format_url = re.sub(r'_(\d*p\.mov)', r'_h\1', format['src'])
|
||||
format_url = re.sub(r'_(\d*p\.mov)', r'_h\1', fmt['src'])
|
||||
formats.append({
|
||||
'url': format_url,
|
||||
'format': format['type'],
|
||||
'width': int_or_none(format['width']),
|
||||
'height': int_or_none(format['height']),
|
||||
'format': fmt['type'],
|
||||
'width': int_or_none(fmt['width']),
|
||||
'height': int_or_none(fmt['height']),
|
||||
})
|
||||
|
||||
playlist.append({
|
||||
@ -229,7 +228,7 @@ class AppleTrailersSectionIE(InfoExtractor):
|
||||
'title': 'Movie Studios',
|
||||
},
|
||||
}
|
||||
_VALID_URL = r'https?://(?:www\.)?trailers\.apple\.com/#section=(?P<id>%s)' % '|'.join(_SECTIONS)
|
||||
_VALID_URL = r'https?://(?:www\.)?trailers\.apple\.com/#section=(?P<id>{})'.format('|'.join(_SECTIONS))
|
||||
_TESTS = [{
|
||||
'url': 'http://trailers.apple.com/#section=justadded',
|
||||
'info_dict': {
|
||||
@ -270,7 +269,7 @@ class AppleTrailersSectionIE(InfoExtractor):
|
||||
def _real_extract(self, url):
|
||||
section = self._match_id(url)
|
||||
section_data = self._download_json(
|
||||
'http://trailers.apple.com/trailers/home/feeds/%s.json' % self._SECTIONS[section]['feed_path'],
|
||||
'http://trailers.apple.com/trailers/home/feeds/{}.json'.format(self._SECTIONS[section]['feed_path']),
|
||||
section)
|
||||
entries = [
|
||||
self.url_result('http://trailers.apple.com' + e['location'])
|
||||
|
@ -1,10 +1,11 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import re
|
||||
import urllib.parse
|
||||
|
||||
from .common import InfoExtractor
|
||||
from .youtube import YoutubeBaseInfoExtractor, YoutubeIE
|
||||
from ..compat import compat_urllib_parse_unquote
|
||||
from ..networking import HEADRequest
|
||||
from ..networking.exceptions import HTTPError
|
||||
from ..utils import (
|
||||
@ -145,7 +146,7 @@ class ArchiveOrgIE(InfoExtractor):
|
||||
'title': 'Bells Of Rostov',
|
||||
'ext': 'mp3',
|
||||
},
|
||||
'skip': 'restricted'
|
||||
'skip': 'restricted',
|
||||
}, {
|
||||
'url': 'https://archive.org/details/lp_the-music-of-russia_various-artists-a-askaryan-alexander-melik/disc1/02.02.+Song+And+Chorus+In+The+Polovetsian+Camp+From+%22Prince+Igor%22+(Act+2%2C+Scene+1).mp3',
|
||||
'md5': '1d0aabe03edca83ca58d9ed3b493a3c3',
|
||||
@ -158,7 +159,7 @@ class ArchiveOrgIE(InfoExtractor):
|
||||
'description': 'md5:012b2d668ae753be36896f343d12a236',
|
||||
'upload_date': '20190928',
|
||||
},
|
||||
'skip': 'restricted'
|
||||
'skip': 'restricted',
|
||||
}, {
|
||||
# Original formats are private
|
||||
'url': 'https://archive.org/details/irelandthemakingofarepublic',
|
||||
@ -202,8 +203,8 @@ class ArchiveOrgIE(InfoExtractor):
|
||||
'thumbnail': 'https://archive.org/download/irelandthemakingofarepublic/irelandthemakingofarepublic.thumbs/irelandthemakingofarepublicreel2_001554.jpg',
|
||||
'display_id': 'irelandthemakingofarepublicreel2.mov',
|
||||
},
|
||||
}
|
||||
]
|
||||
},
|
||||
],
|
||||
}]
|
||||
|
||||
@staticmethod
|
||||
@ -220,7 +221,7 @@ def _playlist_data(webpage):
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = urllib.parse.unquote_plus(self._match_id(url))
|
||||
identifier, entry_id = (video_id.split('/', 1) + [None])[:2]
|
||||
identifier, _, entry_id = video_id.partition('/')
|
||||
|
||||
# Archive.org metadata API doesn't clearly demarcate playlist entries
|
||||
# or subtitle tracks, so we get them from the embeddable player.
|
||||
@ -246,7 +247,7 @@ def _real_extract(self, url):
|
||||
if track['kind'] != 'subtitles':
|
||||
continue
|
||||
entries[p['orig']][track['label']] = {
|
||||
'url': 'https://archive.org/' + track['file'].lstrip('/')
|
||||
'url': 'https://archive.org/' + track['file'].lstrip('/'),
|
||||
}
|
||||
|
||||
metadata = self._download_json('http://archive.org/metadata/' + identifier, identifier)
|
||||
@ -293,7 +294,9 @@ def _real_extract(self, url):
|
||||
'height': int_or_none(f.get('width')),
|
||||
'filesize': int_or_none(f.get('size'))})
|
||||
|
||||
extension = (f['name'].rsplit('.', 1) + [None])[1]
|
||||
_, has_ext, extension = f['name'].rpartition('.')
|
||||
if not has_ext:
|
||||
extension = None
|
||||
|
||||
# We don't want to skip private formats if the user has access to them,
|
||||
# however without access to an account with such privileges we can't implement/test this.
|
||||
@ -308,7 +311,7 @@ def _real_extract(self, url):
|
||||
'filesize': int_or_none(f.get('size')),
|
||||
'protocol': 'https',
|
||||
'source_preference': 0 if f.get('source') == 'original' else -1,
|
||||
'format_note': f.get('source')
|
||||
'format_note': f.get('source'),
|
||||
})
|
||||
|
||||
for entry in entries.values():
|
||||
@ -371,7 +374,7 @@ class YoutubeWebArchiveIE(InfoExtractor):
|
||||
'uploader_url': 'https://www.youtube.com/user/Zeurel',
|
||||
'thumbnail': r're:https?://.*\.(jpg|webp)',
|
||||
'channel_url': 'https://www.youtube.com/channel/UCukCyHaD-bK3in_pKpfH9Eg',
|
||||
}
|
||||
},
|
||||
}, {
|
||||
# Internal link
|
||||
'url': 'https://web.archive.org/web/2oe/http://wayback-fakeurl.archive.org/yt/97t7Xj_iBv0',
|
||||
@ -388,7 +391,7 @@ class YoutubeWebArchiveIE(InfoExtractor):
|
||||
'uploader_url': 'https://www.youtube.com/user/1veritasium',
|
||||
'thumbnail': r're:https?://.*\.(jpg|webp)',
|
||||
'channel_url': 'https://www.youtube.com/channel/UCHnyfMqiRRG1u-2MsSQLbXA',
|
||||
}
|
||||
},
|
||||
}, {
|
||||
# Video from 2012, webm format itag 45. Newest capture is deleted video, with an invalid description.
|
||||
# Should use the date in the link. Title ends with '- Youtube'. Capture has description in eow-description
|
||||
@ -403,8 +406,8 @@ class YoutubeWebArchiveIE(InfoExtractor):
|
||||
'uploader_id': 'machinima',
|
||||
'uploader_url': 'https://www.youtube.com/user/machinima',
|
||||
'thumbnail': r're:https?://.*\.(jpg|webp)',
|
||||
'uploader': 'machinima'
|
||||
}
|
||||
'uploader': 'machinima',
|
||||
},
|
||||
}, {
|
||||
# FLV video. Video file URL does not provide itag information
|
||||
'url': 'https://web.archive.org/web/20081211103536/http://www.youtube.com/watch?v=jNQXAC9IVRw',
|
||||
@ -421,7 +424,7 @@ class YoutubeWebArchiveIE(InfoExtractor):
|
||||
'channel_url': 'https://www.youtube.com/channel/UC4QobU6STFB0P71PMvOGN5A',
|
||||
'thumbnail': r're:https?://.*\.(jpg|webp)',
|
||||
'uploader': 'jawed',
|
||||
}
|
||||
},
|
||||
}, {
|
||||
'url': 'https://web.archive.org/web/20110712231407/http://www.youtube.com/watch?v=lTx3G6h2xyA',
|
||||
'info_dict': {
|
||||
@ -437,7 +440,7 @@ class YoutubeWebArchiveIE(InfoExtractor):
|
||||
'uploader_url': 'https://www.youtube.com/user/itsmadeon',
|
||||
'channel_url': 'https://www.youtube.com/channel/UCqMDNf3Pn5L7pcNkuSEeO3w',
|
||||
'thumbnail': r're:https?://.*\.(jpg|webp)',
|
||||
}
|
||||
},
|
||||
}, {
|
||||
# First capture is of dead video, second is the oldest from CDX response.
|
||||
'url': 'https://web.archive.org/https://www.youtube.com/watch?v=1JYutPM8O6E',
|
||||
@ -454,7 +457,7 @@ class YoutubeWebArchiveIE(InfoExtractor):
|
||||
'channel_url': 'https://www.youtube.com/channel/UCdIaNUarhzLSXGoItz7BHVA',
|
||||
'thumbnail': r're:https?://.*\.(jpg|webp)',
|
||||
'uploader': 'ETC News',
|
||||
}
|
||||
},
|
||||
}, {
|
||||
# First capture of dead video, capture date in link links to dead capture.
|
||||
'url': 'https://web.archive.org/web/20180803221945/https://www.youtube.com/watch?v=6FPhZJGvf4E',
|
||||
@ -473,15 +476,15 @@ class YoutubeWebArchiveIE(InfoExtractor):
|
||||
'uploader': 'ETC News',
|
||||
},
|
||||
'expected_warnings': [
|
||||
r'unable to download capture webpage \(it may not be archived\)'
|
||||
]
|
||||
r'unable to download capture webpage \(it may not be archived\)',
|
||||
],
|
||||
}, { # Very old YouTube page, has - YouTube in title.
|
||||
'url': 'http://web.archive.org/web/20070302011044/http://youtube.com/watch?v=-06-KB9XTzg',
|
||||
'info_dict': {
|
||||
'id': '-06-KB9XTzg',
|
||||
'ext': 'flv',
|
||||
'title': 'New Coin Hack!! 100% Safe!!'
|
||||
}
|
||||
'title': 'New Coin Hack!! 100% Safe!!',
|
||||
},
|
||||
}, {
|
||||
'url': 'web.archive.org/https://www.youtube.com/watch?v=dWW7qP423y8',
|
||||
'info_dict': {
|
||||
@ -495,7 +498,7 @@ class YoutubeWebArchiveIE(InfoExtractor):
|
||||
'description': 'md5:7b567f898d8237b256f36c1a07d6d7bc',
|
||||
'thumbnail': r're:https?://.*\.(jpg|webp)',
|
||||
'uploader': 'DankPods',
|
||||
}
|
||||
},
|
||||
}, {
|
||||
# player response contains '};' See: https://github.com/ytdl-org/youtube-dl/issues/27093
|
||||
'url': 'https://web.archive.org/web/20200827003909if_/http://www.youtube.com/watch?v=6Dh-RL__uN4',
|
||||
@ -512,7 +515,7 @@ class YoutubeWebArchiveIE(InfoExtractor):
|
||||
'uploader_id': 'PewDiePie',
|
||||
'uploader_url': 'https://www.youtube.com/user/PewDiePie',
|
||||
'thumbnail': r're:https?://.*\.(jpg|webp)',
|
||||
}
|
||||
},
|
||||
}, {
|
||||
# ~June 2010 Capture. swfconfig
|
||||
'url': 'https://web.archive.org/web/0/https://www.youtube.com/watch?v=8XeW5ilk-9Y',
|
||||
@ -527,7 +530,7 @@ class YoutubeWebArchiveIE(InfoExtractor):
|
||||
'thumbnail': r're:https?://.*\.(jpg|webp)',
|
||||
'uploader_url': 'https://www.youtube.com/user/HowTheWorldWorks',
|
||||
'upload_date': '20090520',
|
||||
}
|
||||
},
|
||||
}, {
|
||||
# Jan 2011: watch-video-date/eow-date surrounded by whitespace
|
||||
'url': 'https://web.archive.org/web/20110126141719/http://www.youtube.com/watch?v=Q_yjX80U7Yc',
|
||||
@ -542,7 +545,7 @@ class YoutubeWebArchiveIE(InfoExtractor):
|
||||
'thumbnail': r're:https?://.*\.(jpg|webp)',
|
||||
'duration': 132,
|
||||
'uploader_url': 'https://www.youtube.com/user/claybutlermusic',
|
||||
}
|
||||
},
|
||||
}, {
|
||||
# ~May 2009 swfArgs. ytcfg is spread out over various vars
|
||||
'url': 'https://web.archive.org/web/0/https://www.youtube.com/watch?v=c5uJgG05xUY',
|
||||
@ -557,7 +560,7 @@ class YoutubeWebArchiveIE(InfoExtractor):
|
||||
'description': 'md5:4ca77d79538064e41e4cc464e93f44f0',
|
||||
'thumbnail': r're:https?://.*\.(jpg|webp)',
|
||||
'duration': 754,
|
||||
}
|
||||
},
|
||||
}, {
|
||||
# ~June 2012. Upload date is in another lang so cannot extract.
|
||||
'url': 'https://web.archive.org/web/20120607174520/http://www.youtube.com/watch?v=xWTLLl-dQaA',
|
||||
@ -571,7 +574,7 @@ class YoutubeWebArchiveIE(InfoExtractor):
|
||||
'uploader': 'BlackNerdComedy',
|
||||
'duration': 182,
|
||||
'thumbnail': r're:https?://.*\.(jpg|webp)',
|
||||
}
|
||||
},
|
||||
}, {
|
||||
# ~July 2013
|
||||
'url': 'https://web.archive.org/web/*/https://www.youtube.com/watch?v=9eO1aasHyTM',
|
||||
@ -587,7 +590,7 @@ class YoutubeWebArchiveIE(InfoExtractor):
|
||||
'channel_url': 'https://www.youtube.com/channel/UC62R2cBezNBOqxSerfb1nMQ',
|
||||
'upload_date': '20060428',
|
||||
'uploader': 'punkybird',
|
||||
}
|
||||
},
|
||||
}, {
|
||||
# April 2020: Player response in player config
|
||||
'url': 'https://web.archive.org/web/20200416034815/https://www.youtube.com/watch?v=Cf7vS8jc7dY&gl=US&hl=en',
|
||||
@ -604,7 +607,7 @@ class YoutubeWebArchiveIE(InfoExtractor):
|
||||
'thumbnail': r're:https?://.*\.(jpg|webp)',
|
||||
'description': 'md5:c625bb3c02c4f5fb4205971e468fa341',
|
||||
'uploader_url': 'https://www.youtube.com/user/GameGrumps',
|
||||
}
|
||||
},
|
||||
}, {
|
||||
# watch7-user-header with yt-user-info
|
||||
'url': 'ytarchive:kbh4T_b4Ixw:20160307085057',
|
||||
@ -619,7 +622,7 @@ class YoutubeWebArchiveIE(InfoExtractor):
|
||||
'thumbnail': r're:https?://.*\.(jpg|webp)',
|
||||
'upload_date': '20150503',
|
||||
'channel_id': 'UCnTaGvsHmMy792DWeT6HbGA',
|
||||
}
|
||||
},
|
||||
}, {
|
||||
# April 2012
|
||||
'url': 'https://web.archive.org/web/0/https://www.youtube.com/watch?v=SOm7mPoPskU',
|
||||
@ -634,35 +637,35 @@ class YoutubeWebArchiveIE(InfoExtractor):
|
||||
'duration': 200,
|
||||
'upload_date': '20120407',
|
||||
'uploader_id': 'thecomputernerd01',
|
||||
}
|
||||
},
|
||||
}, {
|
||||
'url': 'https://web.archive.org/web/http://www.youtube.com/watch?v=kH-G_aIBlFw',
|
||||
'only_matching': True
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://web.archive.org/web/20050214000000_if/http://www.youtube.com/watch?v=0altSZ96U4M',
|
||||
'only_matching': True
|
||||
'only_matching': True,
|
||||
}, {
|
||||
# Video not archived, only capture is unavailable video page
|
||||
'url': 'https://web.archive.org/web/20210530071008/https://www.youtube.com/watch?v=lHJTf93HL1s&spfreload=10',
|
||||
'only_matching': True
|
||||
'only_matching': True,
|
||||
}, { # Encoded url
|
||||
'url': 'https://web.archive.org/web/20120712231619/http%3A//www.youtube.com/watch%3Fgl%3DUS%26v%3DAkhihxRKcrs%26hl%3Den',
|
||||
'only_matching': True
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://web.archive.org/web/20120712231619/http%3A//www.youtube.com/watch%3Fv%3DAkhihxRKcrs%26gl%3DUS%26hl%3Den',
|
||||
'only_matching': True
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://web.archive.org/web/20060527081937/http://www.youtube.com:80/watch.php?v=ELTFsLT73fA&search=soccer',
|
||||
'only_matching': True
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://web.archive.org/http://www.youtube.com:80/watch?v=-05VVye-ffg',
|
||||
'only_matching': True
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'ytarchive:BaW_jenozKc:20050214000000',
|
||||
'only_matching': True
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'ytarchive:BaW_jenozKc',
|
||||
'only_matching': True
|
||||
'only_matching': True,
|
||||
},
|
||||
]
|
||||
_YT_INITIAL_DATA_RE = YoutubeBaseInfoExtractor._YT_INITIAL_DATA_RE
|
||||
@ -673,13 +676,13 @@ class YoutubeWebArchiveIE(InfoExtractor):
|
||||
|
||||
_YT_DEFAULT_THUMB_SERVERS = ['i.ytimg.com'] # thumbnails most likely archived on these servers
|
||||
_YT_ALL_THUMB_SERVERS = orderedSet(
|
||||
_YT_DEFAULT_THUMB_SERVERS + ['img.youtube.com', *[f'{c}{n or ""}.ytimg.com' for c in ('i', 's') for n in (*range(0, 5), 9)]])
|
||||
[*_YT_DEFAULT_THUMB_SERVERS, 'img.youtube.com', *[f'{c}{n or ""}.ytimg.com' for c in ('i', 's') for n in (*range(5), 9)]])
|
||||
|
||||
_WAYBACK_BASE_URL = 'https://web.archive.org/web/%sif_/'
|
||||
_OLDEST_CAPTURE_DATE = 20050214000000
|
||||
_NEWEST_CAPTURE_DATE = 20500101000000
|
||||
|
||||
def _call_cdx_api(self, item_id, url, filters: list = None, collapse: list = None, query: dict = None, note=None, fatal=False):
|
||||
def _call_cdx_api(self, item_id, url, filters: list | None = None, collapse: list | None = None, query: dict | None = None, note=None, fatal=False):
|
||||
# CDX docs: https://github.com/internetarchive/wayback/blob/master/wayback-cdx-server/README.md
|
||||
query = {
|
||||
'url': url,
|
||||
@ -688,14 +691,14 @@ def _call_cdx_api(self, item_id, url, filters: list = None, collapse: list = Non
|
||||
'limit': 500,
|
||||
'filter': ['statuscode:200'] + (filters or []),
|
||||
'collapse': collapse or [],
|
||||
**(query or {})
|
||||
**(query or {}),
|
||||
}
|
||||
res = self._download_json(
|
||||
'https://web.archive.org/cdx/search/cdx', item_id,
|
||||
note or 'Downloading CDX API JSON', query=query, fatal=fatal)
|
||||
if isinstance(res, list) and len(res) >= 2:
|
||||
# format response to make it easier to use
|
||||
return list(dict(zip(res[0], v)) for v in res[1:])
|
||||
return [dict(zip(res[0], v)) for v in res[1:]]
|
||||
elif not isinstance(res, list) or len(res) != 0:
|
||||
self.report_warning('Error while parsing CDX API response' + bug_reports_message())
|
||||
|
||||
@ -852,7 +855,7 @@ def _extract_thumbnails(self, video_id):
|
||||
{
|
||||
'url': (self._WAYBACK_BASE_URL % (int_or_none(thumbnail_dict.get('timestamp')) or self._OLDEST_CAPTURE_DATE)) + thumbnail_dict.get('original'),
|
||||
'filesize': int_or_none(thumbnail_dict.get('length')),
|
||||
'preference': int_or_none(thumbnail_dict.get('length'))
|
||||
'preference': int_or_none(thumbnail_dict.get('length')),
|
||||
} for thumbnail_dict in response)
|
||||
if not try_all:
|
||||
break
|
||||
@ -893,7 +896,7 @@ def _real_extract(self, url):
|
||||
for retry in retry_manager:
|
||||
try:
|
||||
urlh = self._request_webpage(
|
||||
HEADRequest('https://web.archive.org/web/2oe_/http://wayback-fakeurl.archive.org/yt/%s' % video_id),
|
||||
HEADRequest(f'https://web.archive.org/web/2oe_/http://wayback-fakeurl.archive.org/yt/{video_id}'),
|
||||
video_id, note='Fetching archived video file url', expected_status=True)
|
||||
except ExtractorError as e:
|
||||
# HTTP Error 404 is expected if the video is not saved.
|
||||
@ -924,21 +927,21 @@ def _real_extract(self, url):
|
||||
info['thumbnails'] = self._extract_thumbnails(video_id)
|
||||
|
||||
if urlh:
|
||||
url = compat_urllib_parse_unquote(urlh.url)
|
||||
url = urllib.parse.unquote(urlh.url)
|
||||
video_file_url_qs = parse_qs(url)
|
||||
# Attempt to recover any ext & format info from playback url & response headers
|
||||
format = {'url': url, 'filesize': int_or_none(urlh.headers.get('x-archive-orig-content-length'))}
|
||||
fmt = {'url': url, 'filesize': int_or_none(urlh.headers.get('x-archive-orig-content-length'))}
|
||||
itag = try_get(video_file_url_qs, lambda x: x['itag'][0])
|
||||
if itag and itag in YoutubeIE._formats:
|
||||
format.update(YoutubeIE._formats[itag])
|
||||
format.update({'format_id': itag})
|
||||
fmt.update(YoutubeIE._formats[itag])
|
||||
fmt.update({'format_id': itag})
|
||||
else:
|
||||
mime = try_get(video_file_url_qs, lambda x: x['mime'][0])
|
||||
ext = (mimetype2ext(mime)
|
||||
or urlhandle_detect_ext(urlh)
|
||||
or mimetype2ext(urlh.headers.get('x-archive-guessed-content-type')))
|
||||
format.update({'ext': ext})
|
||||
info['formats'] = [format]
|
||||
fmt.update({'ext': ext})
|
||||
info['formats'] = [fmt]
|
||||
if not info.get('duration'):
|
||||
info['duration'] = str_to_int(try_get(video_file_url_qs, lambda x: x['dur'][0]))
|
||||
|
||||
|
@ -11,7 +11,7 @@
|
||||
|
||||
class ArcPublishingIE(InfoExtractor):
|
||||
_UUID_REGEX = r'[\da-f]{8}-(?:[\da-f]{4}-){3}[\da-f]{12}'
|
||||
_VALID_URL = r'arcpublishing:(?P<org>[a-z]+):(?P<id>%s)' % _UUID_REGEX
|
||||
_VALID_URL = rf'arcpublishing:(?P<org>[a-z]+):(?P<id>{_UUID_REGEX})'
|
||||
_TESTS = [{
|
||||
# https://www.adn.com/politics/2020/11/02/video-senate-candidates-campaign-in-anchorage-on-eve-of-election-day/
|
||||
'url': 'arcpublishing:adn:8c99cb6e-b29c-4bc9-9173-7bf9979225ab',
|
||||
@ -74,12 +74,12 @@ class ArcPublishingIE(InfoExtractor):
|
||||
def _extract_embed_urls(cls, url, webpage):
|
||||
entries = []
|
||||
# https://arcpublishing.atlassian.net/wiki/spaces/POWA/overview
|
||||
for powa_el in re.findall(r'(<div[^>]+class="[^"]*\bpowa\b[^"]*"[^>]+data-uuid="%s"[^>]*>)' % ArcPublishingIE._UUID_REGEX, webpage):
|
||||
for powa_el in re.findall(rf'(<div[^>]+class="[^"]*\bpowa\b[^"]*"[^>]+data-uuid="{ArcPublishingIE._UUID_REGEX}"[^>]*>)', webpage):
|
||||
powa = extract_attributes(powa_el) or {}
|
||||
org = powa.get('data-org')
|
||||
uuid = powa.get('data-uuid')
|
||||
if org and uuid:
|
||||
entries.append('arcpublishing:%s:%s' % (org, uuid))
|
||||
entries.append(f'arcpublishing:{org}:{uuid}')
|
||||
return entries
|
||||
|
||||
def _real_extract(self, url):
|
||||
@ -122,7 +122,7 @@ def _real_extract(self, url):
|
||||
elif stream_type in ('ts', 'hls'):
|
||||
m3u8_formats = self._extract_m3u8_formats(
|
||||
s_url, uuid, 'mp4', live=is_live, m3u8_id='hls', fatal=False)
|
||||
if all([f.get('acodec') == 'none' for f in m3u8_formats]):
|
||||
if all(f.get('acodec') == 'none' for f in m3u8_formats):
|
||||
continue
|
||||
for f in m3u8_formats:
|
||||
height = f.get('height')
|
||||
@ -136,7 +136,7 @@ def _real_extract(self, url):
|
||||
else:
|
||||
vbr = int_or_none(s.get('bitrate'))
|
||||
formats.append({
|
||||
'format_id': '%s-%d' % (stream_type, vbr) if vbr else stream_type,
|
||||
'format_id': f'{stream_type}-{vbr}' if vbr else stream_type,
|
||||
'vbr': vbr,
|
||||
'width': int_or_none(s.get('width')),
|
||||
'height': int_or_none(s.get('height')),
|
||||
|
@ -85,7 +85,7 @@ def _extract_formats(self, media_info, video_id):
|
||||
formats.extend(self._extract_f4m_formats(
|
||||
update_url_query(stream_url, {
|
||||
'hdcore': '3.1.1',
|
||||
'plugin': 'aasp-3.1.1.69.124'
|
||||
'plugin': 'aasp-3.1.1.69.124',
|
||||
}), video_id, f4m_id='hds', fatal=False))
|
||||
elif ext == 'm3u8':
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
@ -96,12 +96,12 @@ def _extract_formats(self, media_info, video_id):
|
||||
f = {
|
||||
'url': server,
|
||||
'play_path': stream_url,
|
||||
'format_id': 'a%s-rtmp-%s' % (num, quality),
|
||||
'format_id': f'a{num}-rtmp-{quality}',
|
||||
}
|
||||
else:
|
||||
f = {
|
||||
'url': stream_url,
|
||||
'format_id': 'a%s-%s-%s' % (num, ext, quality)
|
||||
'format_id': f'a{num}-{ext}-{quality}',
|
||||
}
|
||||
m = re.search(
|
||||
r'_(?P<width>\d+)x(?P<height>\d+)\.mp4$',
|
||||
|
@ -64,7 +64,7 @@ def _real_extract(self, url):
|
||||
raise ExtractorError('Invalid URL', expected=True)
|
||||
|
||||
media = self._download_json(
|
||||
'https://video.qbrick.com/api/v1/public/accounts/%s/medias/%s' % (account_id, video_id),
|
||||
f'https://video.qbrick.com/api/v1/public/accounts/{account_id}/medias/{video_id}',
|
||||
video_id, query={
|
||||
# https://video.qbrick.com/docs/api/examples/library-api.html
|
||||
'fields': 'asset/resources/*/renditions/*(height,id,language,links/*(href,mimeType),type,size,videos/*(audios/*(codec,sampleRate),bitrate,codec,duration,height,width),width),created,metadata/*(title,description),tags',
|
||||
|
@ -1,8 +1,6 @@
|
||||
import urllib.parse
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_parse_qs,
|
||||
compat_urllib_parse_urlparse,
|
||||
)
|
||||
from ..utils import (
|
||||
float_or_none,
|
||||
format_field,
|
||||
@ -35,7 +33,7 @@ class ArnesIE(InfoExtractor):
|
||||
'view_count': int,
|
||||
'tags': ['linearna_algebra'],
|
||||
'start_time': 10,
|
||||
}
|
||||
},
|
||||
}, {
|
||||
'url': 'https://video.arnes.si/api/asset/s1YjnV7hadlC/play.mp4',
|
||||
'only_matching': True,
|
||||
@ -93,6 +91,6 @@ def _real_extract(self, url):
|
||||
'duration': float_or_none(video.get('duration'), 1000),
|
||||
'view_count': int_or_none(video.get('views')),
|
||||
'tags': video.get('hashtags'),
|
||||
'start_time': int_or_none(compat_parse_qs(
|
||||
compat_urllib_parse_urlparse(url).query).get('t', [None])[0]),
|
||||
'start_time': int_or_none(urllib.parse.parse_qs(
|
||||
urllib.parse.urlparse(url).query).get('t', [None])[0]),
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user