Compare commits

...

5 Commits

Author SHA1 Message Date
Renan D
457cad2509
Merge f80ba18ee9 into 8e15177b41 2024-05-17 21:31:51 +05:30
Justin Keogh
8e15177b41
[ie/youtube] Fix comments extraction (#9775)
Closes #9358
Authored by: jakeogh, minamotorin, shoxie007, bbilly1

Co-authored-by: minamotorin <76122224+minamotorin@users.noreply.github.com>
Co-authored-by: shoxie007 <74592022+shoxie007@users.noreply.github.com>
Co-authored-by: Simon <35427372+bbilly1@users.noreply.github.com>
2024-05-17 14:37:30 +00:00
Roeniss Moon
dd9ad97b1f
[cookies] Add --cookies-from-browser support for Whale (#9649)
Closes #9307
Authored by: roeniss
2024-05-17 14:33:12 +00:00
minamotorin
61b17437dc
[ie] Add POST data hash to --write-pages filenames (#9879)
Closes #9773
Authored by: minamotorin
2024-05-17 14:28:36 +00:00
Renan D.
f80ba18ee9 [threads] Add extractor 2024-05-03 19:27:49 -03:00
7 changed files with 236 additions and 16 deletions

View File

@ -666,7 +666,7 @@ ## Filesystem Options:
The name of the browser to load cookies
from. Currently supported browsers are:
brave, chrome, chromium, edge, firefox,
opera, safari, vivaldi. Optionally, the
opera, safari, vivaldi, whale. Optionally, the
KEYRING used for decrypting Chromium cookies
on Linux, the name/path of the PROFILE to
load cookies from, and the CONTAINER name

View File

@ -1449,6 +1449,8 @@ # Supported sites
- **ThisVid**
- **ThisVidMember**
- **ThisVidPlaylist**
- **Threads**
- **ThreadsIOS**: Threads' iOS `barcelona://` URL
- **ThreeSpeak**
- **ThreeSpeakUser**
- **TikTok**

View File

@ -46,7 +46,7 @@
from .utils._utils import _YDLLogger
from .utils.networking import normalize_url
CHROMIUM_BASED_BROWSERS = {'brave', 'chrome', 'chromium', 'edge', 'opera', 'vivaldi'}
CHROMIUM_BASED_BROWSERS = {'brave', 'chrome', 'chromium', 'edge', 'opera', 'vivaldi', 'whale'}
SUPPORTED_BROWSERS = CHROMIUM_BASED_BROWSERS | {'firefox', 'safari'}
@ -219,6 +219,7 @@ def _get_chromium_based_browser_settings(browser_name):
'edge': os.path.join(appdata_local, R'Microsoft\Edge\User Data'),
'opera': os.path.join(appdata_roaming, R'Opera Software\Opera Stable'),
'vivaldi': os.path.join(appdata_local, R'Vivaldi\User Data'),
'whale': os.path.join(appdata_local, R'Naver\Naver Whale\User Data'),
}[browser_name]
elif sys.platform == 'darwin':
@ -230,6 +231,7 @@ def _get_chromium_based_browser_settings(browser_name):
'edge': os.path.join(appdata, 'Microsoft Edge'),
'opera': os.path.join(appdata, 'com.operasoftware.Opera'),
'vivaldi': os.path.join(appdata, 'Vivaldi'),
'whale': os.path.join(appdata, 'Naver/Whale'),
}[browser_name]
else:
@ -241,6 +243,7 @@ def _get_chromium_based_browser_settings(browser_name):
'edge': os.path.join(config, 'microsoft-edge'),
'opera': os.path.join(config, 'opera'),
'vivaldi': os.path.join(config, 'vivaldi'),
'whale': os.path.join(config, 'naver-whale'),
}[browser_name]
# Linux keyring names can be determined by snooping on dbus while opening the browser in KDE:
@ -252,6 +255,7 @@ def _get_chromium_based_browser_settings(browser_name):
'edge': 'Microsoft Edge' if sys.platform == 'darwin' else 'Chromium',
'opera': 'Opera' if sys.platform == 'darwin' else 'Chromium',
'vivaldi': 'Vivaldi' if sys.platform == 'darwin' else 'Chrome',
'whale': 'Whale',
}[browser_name]
browsers_without_profiles = {'opera'}

View File

@ -1985,6 +1985,10 @@
ThisVidMemberIE,
ThisVidPlaylistIE,
)
from .threads import (
ThreadsIE,
ThreadsIOSIE
)
from .threespeak import (
ThreeSpeakIE,
ThreeSpeakUserIE,

View File

@ -957,7 +957,8 @@ def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=
if urlh is False:
assert not fatal
return False
content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal, encoding=encoding)
content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal,
encoding=encoding, data=data)
return (content, urlh)
@staticmethod
@ -1005,8 +1006,10 @@ def __check_blocked(self, content):
'Visit http://blocklist.rkn.gov.ru/ for a block reason.',
expected=True)
def _request_dump_filename(self, url, video_id):
basen = f'{video_id}_{url}'
def _request_dump_filename(self, url, video_id, data=None):
if data is not None:
data = hashlib.md5(data).hexdigest()
basen = join_nonempty(video_id, data, url, delim='_')
trim_length = self.get_param('trim_file_name') or 240
if len(basen) > trim_length:
h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
@ -1028,16 +1031,18 @@ def __decode_webpage(self, webpage_bytes, encoding, headers):
except LookupError:
return webpage_bytes.decode('utf-8', 'replace')
def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None, encoding=None):
def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True,
prefix=None, encoding=None, data=None):
webpage_bytes = urlh.read()
if prefix is not None:
webpage_bytes = prefix + webpage_bytes
url_or_request = self._create_request(url_or_request, data)
if self.get_param('dump_intermediate_pages', False):
self.to_screen('Dumping request to ' + urlh.url)
dump = base64.b64encode(webpage_bytes).decode('ascii')
self._downloader.to_screen(dump)
if self.get_param('write_pages'):
filename = self._request_dump_filename(urlh.url, video_id)
filename = self._request_dump_filename(urlh.url, video_id, url_or_request.data)
self.to_screen(f'Saving request to {filename}')
with open(filename, 'wb') as outf:
outf.write(webpage_bytes)
@ -1098,7 +1103,7 @@ def download_content(self, url_or_request, video_id, note=note, errnote=errnote,
impersonate=None, require_impersonation=False):
if self.get_param('load_pages'):
url_or_request = self._create_request(url_or_request, data, headers, query)
filename = self._request_dump_filename(url_or_request.url, video_id)
filename = self._request_dump_filename(url_or_request.url, video_id, url_or_request.data)
self.to_screen(f'Loading request from {filename}')
try:
with open(filename, 'rb') as dumpf:

157
yt_dlp/extractor/threads.py Normal file
View File

@ -0,0 +1,157 @@
from .common import InfoExtractor
from ..utils import (
strftime_or_none,
traverse_obj,
remove_end,
strip_or_none
)
class ThreadsIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?threads\.net/(?P<uploader>[^/]+)/post/(?P<id>[^/?#&]+)/?(?P<embed>embed.*?)?'
_TESTS = [{
'url': 'https://www.threads.net/@tntsportsbr/post/C6cqebdCfBi',
'info_dict': {
'id': 'C6cqebdCfBi',
'ext': 'mp4',
'title': 'md5:062673d04195aa2d99b8d7a11798cb9d',
'description': 'md5:fe0c73f9a892fb92efcc67cc075561b0',
'uploader': 'TNT Sports Brasil',
'uploader_id': 'tntsportsbr',
'uploader_url': 'https://www.threads.net/@tntsportsbr',
'channel': 'tntsportsbr',
'channel_url': 'https://www.threads.net/@tntsportsbr',
'timestamp': 1714613811,
'upload_date': '20240502',
'like_count': int,
'channel_is_verified': bool,
'thumbnail': r're:^https?://.*\.jpg'
}
}, {
'url': 'https://www.threads.net/@felipebecari/post/C6cM_yNPHCF',
'info_dict': {
'id': 'C6cM_yNPHCF',
'ext': 'mp4',
'title': '@felipebecari • Sobre o futuro dos dois últimos resgatados: tem muita notícia boa! 🐶❤️',
'description': 'Sobre o futuro dos dois últimos resgatados: tem muita notícia boa! 🐶❤️',
'uploader': 'Felipe Becari',
'uploader_id': 'felipebecari',
'uploader_url': 'https://www.threads.net/@felipebecari',
'channel': 'felipebecari',
'channel_url': 'https://www.threads.net/@felipebecari',
'timestamp': 1714598318,
'upload_date': '20240501',
'like_count': int,
'channel_is_verified': bool,
'thumbnail': r're:^https?://.*\.jpg'
}
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
metadata = {}
# Try getting videos from json
json_data = self._search_regex(
r'<script[^>]+>(.*"code":"%s".*)</script>' % video_id,
webpage, 'main json', fatal=True)
result = self._search_json(
r'"result":', json_data,
'result data', video_id, fatal=True)
edges = traverse_obj(result, ('data', 'data', 'edges'))
for node in edges:
items = traverse_obj(node, ('node', 'thread_items'))
for item in items:
post = item.get('post')
if post and post.get('code') == video_id:
formats = []
thumbnails = []
# Videos
if (post.get('carousel_media') is not None): # Handle multiple videos posts
media_list = post.get('carousel_media')
else:
media_list = [post]
for media in media_list:
videos = media.get('video_versions')
for video in videos:
formats.append({
'format_id': '%s-%s' % (media.get('pk'), video['type']), # id-type
'url': video['url'],
'width': media.get('original_width'),
'height': media.get('original_height'),
})
# Thumbnails
thumbs = traverse_obj(post, ('image_versions2', 'candidates'))
for thumb in thumbs:
thumbnails.append({
'url': thumb['url'],
'width': thumb['width'],
'height': thumb['height'],
})
# Metadata
metadata.setdefault('uploader_id', traverse_obj(post, ('user', 'username')))
metadata.setdefault('channel_is_verified', traverse_obj(post, ('user', 'is_verified')))
metadata.setdefault('uploader_url', 'https://www.threads.net/@%s' % traverse_obj(post, ('user', 'username')))
metadata.setdefault('timestamp', post.get('taken_at'))
metadata.setdefault('like_count', post.get('like_count'))
# Try getting metadata
metadata['id'] = video_id
metadata['title'] = strip_or_none(remove_end(self._html_extract_title(webpage), '• Threads'))
metadata['description'] = self._og_search_description(webpage)
metadata['channel'] = metadata.get('uploader_id')
metadata['channel_url'] = metadata.get('uploader_url')
metadata['uploader'] = self._search_regex(r'(.*?) \(', self._og_search_title(webpage), 'uploader')
metadata['upload_date'] = strftime_or_none(metadata.get('timestamp'))
return {
**metadata,
'formats': formats,
'thumbnails': thumbnails
}
class ThreadsIOSIE(InfoExtractor):
IE_DESC = 'IOS barcelona:// URL'
_VALID_URL = r'barcelona://media\?shortcode=(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'barcelona://media?shortcode=C6fDehepo5D',
'info_dict': {
'id': 'C6fDehepo5D',
'ext': 'mp4',
'title': 'md5:dc92f960981b8b3a33eba9681e9fdfc6',
'description': 'md5:0c36a7e67e1517459bc0334dba932164',
'uploader': 'Sa\u0303o Paulo Futebol Clube',
'uploader_id': 'saopaulofc',
'uploader_url': 'https://www.threads.net/@saopaulofc',
'channel': 'saopaulofc',
'channel_url': 'https://www.threads.net/@saopaulofc',
'timestamp': 1714694014,
'upload_date': '20240502',
'like_count': int,
'channel_is_verified': bool,
'thumbnail': r're:^https?://.*\.jpg'
},
'add_ie': ['Threads']
}]
def _real_extract(self, url):
video_id = self._match_id(url)
# Threads doesn't care about the user url, it redirects to the right one
# So we use ** instead so that we don't need to find it
return self.url_result(f'http://www.threads.net/**/post/{video_id}', ThreadsIE, video_id)

View File

@ -3317,7 +3317,36 @@ def _extract_heatmap(self, data):
'value': ('intensityScoreNormalized', {float_or_none}),
})) or None
def _extract_comment(self, comment_renderer, parent=None):
def _extract_comment(self, entities, parent=None):
comment_entity_payload = get_first(entities, ('payload', 'commentEntityPayload', {dict}))
if not (comment_id := traverse_obj(comment_entity_payload, ('properties', 'commentId', {str}))):
return
toolbar_entity_payload = get_first(entities, ('payload', 'engagementToolbarStateEntityPayload', {dict}))
time_text = traverse_obj(comment_entity_payload, ('properties', 'publishedTime', {str})) or ''
return {
'id': comment_id,
'parent': parent or 'root',
**traverse_obj(comment_entity_payload, {
'text': ('properties', 'content', 'content', {str}),
'like_count': ('toolbar', 'likeCountA11y', {parse_count}),
'author_id': ('author', 'channelId', {self.ucid_or_none}),
'author': ('author', 'displayName', {str}),
'author_thumbnail': ('author', 'avatarThumbnailUrl', {url_or_none}),
'author_is_uploader': ('author', 'isCreator', {bool}),
'author_is_verified': ('author', 'isVerified', {bool}),
'author_url': ('author', 'channelCommand', 'innertubeCommand', (
('browseEndpoint', 'canonicalBaseUrl'), ('commandMetadata', 'webCommandMetadata', 'url')
), {lambda x: urljoin('https://www.youtube.com', x)}),
}, get_all=False),
'is_favorited': (None if toolbar_entity_payload is None else
toolbar_entity_payload.get('heartState') == 'TOOLBAR_HEART_STATE_HEARTED'),
'_time_text': time_text, # FIXME: non-standard, but we need a way of showing that it is an estimate.
'timestamp': self._parse_time_text(time_text),
}
def _extract_comment_old(self, comment_renderer, parent=None):
comment_id = comment_renderer.get('commentId')
if not comment_id:
return
@ -3398,21 +3427,39 @@ def extract_header(contents):
break
return _continuation
def extract_thread(contents):
def extract_thread(contents, entity_payloads):
if not parent:
tracker['current_page_thread'] = 0
for content in contents:
if not parent and tracker['total_parent_comments'] >= max_parents:
yield
comment_thread_renderer = try_get(content, lambda x: x['commentThreadRenderer'])
comment_renderer = get_first(
(comment_thread_renderer, content), [['commentRenderer', ('comment', 'commentRenderer')]],
expected_type=dict, default={})
comment = self._extract_comment(comment_renderer, parent)
# old comment format
if not entity_payloads:
comment_renderer = get_first(
(comment_thread_renderer, content), [['commentRenderer', ('comment', 'commentRenderer')]],
expected_type=dict, default={})
comment = self._extract_comment_old(comment_renderer, parent)
# new comment format
else:
view_model = (
traverse_obj(comment_thread_renderer, ('commentViewModel', 'commentViewModel', {dict}))
or traverse_obj(content, ('commentViewModel', {dict})))
comment_keys = traverse_obj(view_model, (('commentKey', 'toolbarStateKey'), {str}))
if not comment_keys:
continue
entities = traverse_obj(entity_payloads, lambda _, v: v['entityKey'] in comment_keys)
comment = self._extract_comment(entities, parent)
if comment:
comment['is_pinned'] = traverse_obj(view_model, ('pinnedText', {str})) is not None
if not comment:
continue
comment_id = comment['id']
if comment.get('is_pinned'):
tracker['pinned_comment_ids'].add(comment_id)
# Sometimes YouTube may break and give us infinite looping comments.
@ -3505,7 +3552,7 @@ def extract_thread(contents):
check_get_keys = None
if not is_forced_continuation and not (tracker['est_total'] == 0 and tracker['running_total'] == 0):
check_get_keys = [[*continuation_items_path, ..., (
'commentsHeaderRenderer' if is_first_continuation else ('commentThreadRenderer', 'commentRenderer'))]]
'commentsHeaderRenderer' if is_first_continuation else ('commentThreadRenderer', 'commentViewModel', 'commentRenderer'))]]
try:
response = self._extract_response(
item_id=None, query=continuation,
@ -3529,6 +3576,7 @@ def extract_thread(contents):
raise
is_forced_continuation = False
continuation = None
mutations = traverse_obj(response, ('frameworkUpdates', 'entityBatchUpdate', 'mutations', ..., {dict}))
for continuation_items in traverse_obj(response, continuation_items_path, expected_type=list, default=[]):
if is_first_continuation:
continuation = extract_header(continuation_items)
@ -3537,7 +3585,7 @@ def extract_thread(contents):
break
continue
for entry in extract_thread(continuation_items):
for entry in extract_thread(continuation_items, mutations):
if not entry:
return
yield entry