mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-01 17:23:48 +01:00
[extractor,cleanup] Use _search_nextjs_data
This commit is contained in:
parent
e0585e6562
commit
135dfa2c7e
@ -1544,12 +1544,12 @@ def traverse_json_ld(json_ld, at_top_level=True):
|
|||||||
|
|
||||||
return dict((k, v) for k, v in info.items() if v is not None)
|
return dict((k, v) for k, v in info.items() if v is not None)
|
||||||
|
|
||||||
def _search_nextjs_data(self, webpage, video_id, **kw):
|
def _search_nextjs_data(self, webpage, video_id, *, transform_source=None, fatal=True, **kw):
|
||||||
return self._parse_json(
|
return self._parse_json(
|
||||||
self._search_regex(
|
self._search_regex(
|
||||||
r'(?s)<script[^>]+id=[\'"]__NEXT_DATA__[\'"][^>]*>([^<]+)</script>',
|
r'(?s)<script[^>]+id=[\'"]__NEXT_DATA__[\'"][^>]*>([^<]+)</script>',
|
||||||
webpage, 'next.js data', **kw),
|
webpage, 'next.js data', fatal=fatal, **kw),
|
||||||
video_id, **kw)
|
video_id, transform_source=transform_source, fatal=fatal)
|
||||||
|
|
||||||
def _search_nuxt_data(self, webpage, video_id, context_name='__NUXT__'):
|
def _search_nuxt_data(self, webpage, video_id, context_name='__NUXT__'):
|
||||||
''' Parses Nuxt.js metadata. This works as long as the function __NUXT__ invokes is a pure function. '''
|
''' Parses Nuxt.js metadata. This works as long as the function __NUXT__ invokes is a pure function. '''
|
||||||
|
@ -243,8 +243,8 @@ def _real_extract(self, url):
|
|||||||
|
|
||||||
webpage = self._download_webpage(url, playlist_id)
|
webpage = self._download_webpage(url, playlist_id)
|
||||||
|
|
||||||
json_map = try_get(self._parse_json(self._html_search_regex(
|
json_map = try_get(
|
||||||
'(?s)<script[^>]+id=[\'"]__NEXT_DATA__[^>]*>([^<]+)</script>', webpage, 'json_map'), playlist_id),
|
self._search_nextjs_data(webpage, playlist_id),
|
||||||
lambda x: x['props']['pageProps']['article']['body']['content']) or []
|
lambda x: x['props']['pageProps']['article']['body']['content']) or []
|
||||||
|
|
||||||
entries = []
|
entries = []
|
||||||
|
@ -408,9 +408,7 @@ def _real_extract(self, url):
|
|||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
data = self._parse_json(self._search_regex(
|
data = self._search_nextjs_data(webpage, video_id)['props']['initialState']
|
||||||
r'<script[^>]+id="__NEXT_DATA__"[^>]*>({.+?})</script>',
|
|
||||||
webpage, 'bootstrap json'), video_id)['props']['initialState']
|
|
||||||
video_data = try_get(data, lambda x: x['video']['current'], dict)
|
video_data = try_get(data, lambda x: x['video']['current'], dict)
|
||||||
if not video_data:
|
if not video_data:
|
||||||
video_data = data['article']['content'][0]['primaryMedia']['video']
|
video_data = data['article']['content'][0]['primaryMedia']['video']
|
||||||
|
@ -41,9 +41,7 @@ class NovaPlayIE(InfoExtractor):
|
|||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
video_props = self._parse_json(self._search_regex(
|
video_props = self._search_nextjs_data(webpage, video_id)['props']['pageProps']['video']
|
||||||
r'<script\s?id=\"__NEXT_DATA__\"\s?type=\"application/json\">({.+})</script>',
|
|
||||||
webpage, 'video_props'), video_id)['props']['pageProps']['video']
|
|
||||||
m3u8_url = self._download_json(
|
m3u8_url = self._download_json(
|
||||||
f'https://nbg-api.fite.tv/api/v2/videos/{video_id}/streams',
|
f'https://nbg-api.fite.tv/api/v2/videos/{video_id}/streams',
|
||||||
video_id, headers={'x-flipps-user-agent': 'Flipps/75/9.7'})[0]['url']
|
video_id, headers={'x-flipps-user-agent': 'Flipps/75/9.7'})[0]['url']
|
||||||
|
@ -3,7 +3,6 @@
|
|||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_str,
|
|
||||||
compat_parse_qs,
|
compat_parse_qs,
|
||||||
compat_urllib_parse_urlparse,
|
compat_urllib_parse_urlparse,
|
||||||
)
|
)
|
||||||
@ -125,9 +124,7 @@ class SkyItVideoLiveIE(SkyItPlayerIE):
|
|||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
display_id = self._match_id(url)
|
display_id = self._match_id(url)
|
||||||
webpage = self._download_webpage(url, display_id)
|
webpage = self._download_webpage(url, display_id)
|
||||||
asset_id = compat_str(self._parse_json(self._search_regex(
|
asset_id = str(self._search_nextjs_data(webpage, display_id)['props']['initialState']['livePage']['content']['asset_id'])
|
||||||
r'<script[^>]+id="__NEXT_DATA__"[^>]*>({.+?})</script>',
|
|
||||||
webpage, 'next data'), display_id)['props']['initialState']['livePage']['content']['asset_id'])
|
|
||||||
livestream = self._download_json(
|
livestream = self._download_json(
|
||||||
'https://apid.sky.it/vdp/v1/getLivestream',
|
'https://apid.sky.it/vdp/v1/getLivestream',
|
||||||
asset_id, query={'id': asset_id})
|
asset_id, query={'id': asset_id})
|
||||||
|
@ -45,10 +45,7 @@ def _real_extract(self, url):
|
|||||||
ptype, video_id = self._match_valid_url(url).groups()
|
ptype, video_id = self._match_valid_url(url).groups()
|
||||||
|
|
||||||
webpage = self._download_webpage(url, video_id, fatal=False) or ''
|
webpage = self._download_webpage(url, video_id, fatal=False) or ''
|
||||||
props = (self._parse_json(self._search_regex(
|
props = self._search_nextjs_data(webpage, video_id, default='{}').get('props') or {}
|
||||||
r'<script[^>]+id="__NEXT_DATA__"[^>]*>({.+?})</script>',
|
|
||||||
webpage, 'next data', default='{}'), video_id,
|
|
||||||
fatal=False) or {}).get('props') or {}
|
|
||||||
player_api_cache = try_get(
|
player_api_cache = try_get(
|
||||||
props, lambda x: x['initialReduxState']['playerApiCache']) or {}
|
props, lambda x: x['initialReduxState']['playerApiCache']) or {}
|
||||||
|
|
||||||
|
@ -34,8 +34,7 @@ class TelemundoIE(InfoExtractor):
|
|||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
metadata = self._parse_json(
|
metadata = self._search_nextjs_data(webpage, video_id)
|
||||||
self._search_regex(r'<[^>]+id="__NEXT_DATA__"[^>]+>([^<]+)', webpage, 'JSON metadata'), video_id)
|
|
||||||
redirect_url = try_get(
|
redirect_url = try_get(
|
||||||
metadata,
|
metadata,
|
||||||
lambda x: x['props']['initialState']['video']['associatedPlaylists'][0]['videos'][0]['videoAssets'][0]['publicUrl'])
|
lambda x: x['props']['initialState']['video']['associatedPlaylists'][0]['videos'][0]['videoAssets'][0]['publicUrl'])
|
||||||
|
@ -451,12 +451,9 @@ def _real_extract(self, url):
|
|||||||
# If we only call once, we get a 403 when downlaoding the video.
|
# If we only call once, we get a 403 when downlaoding the video.
|
||||||
self._download_webpage(url, video_id)
|
self._download_webpage(url, video_id)
|
||||||
webpage = self._download_webpage(url, video_id, note='Downloading video webpage')
|
webpage = self._download_webpage(url, video_id, note='Downloading video webpage')
|
||||||
next_json = self._search_regex(
|
next_data = self._search_nextjs_data(webpage, video_id, default='{}')
|
||||||
r'id=\"__NEXT_DATA__\"\s+type=\"application\/json\"\s*[^>]+>\s*(?P<next_data>[^<]+)',
|
|
||||||
webpage, 'next data', group='next_data', default=None)
|
|
||||||
|
|
||||||
if next_json:
|
if next_data:
|
||||||
next_data = self._parse_json(next_json, video_id)
|
|
||||||
status = traverse_obj(next_data, ('props', 'pageProps', 'statusCode'), expected_type=int) or 0
|
status = traverse_obj(next_data, ('props', 'pageProps', 'statusCode'), expected_type=int) or 0
|
||||||
video_data = traverse_obj(next_data, ('props', 'pageProps', 'itemInfo', 'itemStruct'), expected_type=dict)
|
video_data = traverse_obj(next_data, ('props', 'pageProps', 'itemInfo', 'itemStruct'), expected_type=dict)
|
||||||
else:
|
else:
|
||||||
|
Loading…
Reference in New Issue
Block a user