[cleanup] Misc

This commit is contained in:
pukkandan 2021-09-05 11:16:23 +05:30
parent e04a1ff92e
commit 526d74ec5a
No known key found for this signature in database
GPG Key ID: 0F00D95A001F4698
7 changed files with 10 additions and 12 deletions

View File

@ -30,7 +30,7 @@ ### 2021.09.02
* The fetched sponsor sections are written to the infojson
* Deprecates: `--sponskrub`, `--no-sponskrub`, `--sponskrub-cut`, `--no-sponskrub-cut`, `--sponskrub-force`, `--no-sponskrub-force`, `--sponskrub-location`, `--sponskrub-args`
* Split `--embed-chapters` from `--embed-metadata` (it still implies the former by default)
* Add option `--remove-chapters` to remove arbitrary chapters by [nihil-admirari](https://github.com/nihil-admirari), pukkandan
* Add option `--remove-chapters` to remove arbitrary chapters by [nihil-admirari](https://github.com/nihil-admirari), [pukkandan](https://github.com/pukkandan)
* Add option `--force-keyframes-at-cuts` for more accurate cuts when removing and splitting chapters by [nihil-admirari](https://github.com/nihil-admirari)
* Let `--match-filter` reject entries early
* Makes redundant: `--match-title`, `--reject-title`, `--min-views`, `--max-views`

View File

@ -966,7 +966,7 @@ # OUTPUT TEMPLATE
%(name[.keys][addition][>strf][|default])[flags][width][.precision][length]type
```
Additionally, you can set different output templates for the various metadata files separately from the general output template by specifying the type of file followed by the template separated by a colon `:`. The different file types supported are `subtitle`, `thumbnail`, `description`, `annotation`, `infojson`, `pl_thumbnail`, `pl_description`, `pl_infojson`, `chapter`. For example, `-o '%(title)s.%(ext)s' -o 'thumbnail:%(title)s\%(title)s.%(ext)s'` will put the thumbnails in a folder with the same name as the video.
Additionally, you can set different output templates for the various metadata files separately from the general output template by specifying the type of file followed by the template separated by a colon `:`. The different file types supported are `subtitle`, `thumbnail`, `description`, `annotation` (deprecated), `infojson`, `pl_thumbnail`, `pl_description`, `pl_infojson`, `chapter`. For example, `-o '%(title)s.%(ext)s' -o 'thumbnail:%(title)s\%(title)s.%(ext)s'` will put the thumbnails in a folder with the same name as the video.
The available fields are:

View File

@ -123,7 +123,7 @@ def _extract_firefox_cookies(profile, logger):
cookie_database_path = _find_most_recently_used_file(search_root, 'cookies.sqlite')
if cookie_database_path is None:
raise FileNotFoundError('could not find firefox cookies database in {}'.format(search_root))
logger.debug('extracting from: "{}"'.format(cookie_database_path))
logger.debug('Extracting cookies from: "{}"'.format(cookie_database_path))
with tempfile.TemporaryDirectory(prefix='youtube_dl') as tmpdir:
cursor = None
@ -240,7 +240,7 @@ def _extract_chrome_cookies(browser_name, profile, logger):
cookie_database_path = _find_most_recently_used_file(search_root, 'Cookies')
if cookie_database_path is None:
raise FileNotFoundError('could not find {} cookies database in "{}"'.format(browser_name, search_root))
logger.debug('extracting from: "{}"'.format(cookie_database_path))
logger.debug('Extracting cookies from: "{}"'.format(cookie_database_path))
decryptor = get_cookie_decryptor(config['browser_dir'], config['keyring_name'], logger)

View File

@ -23,7 +23,6 @@
int_or_none,
KNOWN_EXTENSIONS,
mimetype2ext,
network_exceptions,
remove_end,
parse_qs,
str_or_none,
@ -711,7 +710,6 @@ def resolve_entry(*candidates):
query.pop('offset', None)
class SoundcloudUserIE(SoundcloudPagedPlaylistBaseIE):
_VALID_URL = r'''(?x)
https?://

View File

@ -132,9 +132,9 @@ def _real_extract(self, url):
class TikTokUserIE(InfoExtractor):
IE_NAME = 'tiktok:user'
_VALID_URL = r'(?!.*/video/)https?://www\.tiktok\.com/@(?P<id>[\w\._]+)'
_VALID_URL = r'https?://(?:www\.)?tiktok\.com/@(?P<id>[\w\._]+)/?(?:$|[#?])'
_TESTS = [{
'url': 'https://www.tiktok.com/@corgibobaa?lang=en',
'url': 'https://tiktok.com/@corgibobaa?lang=en',
'playlist_mincount': 45,
'info_dict': {
'id': '6935371178089399301',
@ -196,7 +196,7 @@ def _entries(self, url, user_id):
'Referer': video_url,
}
}
if not data_json['hasMore']:
if not data_json.get('hasMore'):
break
cursor = data_json['cursor']

View File

@ -803,7 +803,7 @@ def _extract_response(self, item_id, query, note='Downloading API JSON', headers
# We also want to catch all other network exceptions since errors in later pages can be troublesome
# See https://github.com/yt-dlp/yt-dlp/issues/507#issuecomment-880188210
if not isinstance(e.cause, compat_HTTPError) or e.cause.code not in (403, 429):
last_error = error_to_compat_str(e.cause or e)
last_error = error_to_compat_str(e.cause or e.msg)
if count < retries:
continue
if fatal:

View File

@ -2408,7 +2408,7 @@ def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None, ie=N
if sys.exc_info()[0] in network_exceptions:
expected = True
self.msg = msg
self.msg = str(msg)
self.traceback = tb
self.expected = expected
self.cause = cause
@ -2419,7 +2419,7 @@ def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None, ie=N
super(ExtractorError, self).__init__(''.join((
format_field(ie, template='[%s] '),
format_field(video_id, template='%s: '),
msg,
self.msg,
format_field(cause, template=' (caused by %r)'),
'' if expected else bug_reports_message())))