1
1
mirror of https://github.com/ytdl-org/youtube-dl synced 2024-11-27 23:56:51 +01:00

[viewlift] fix extraction for snagfils.com(closes #15766)

This commit is contained in:
Remita Amine 2018-05-23 11:27:36 +01:00
parent b89ac53455
commit 57d6792024

View File

@ -1,24 +1,27 @@
from __future__ import unicode_literals from __future__ import unicode_literals
import base64
import re import re
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_urllib_parse_unquote
from ..utils import ( from ..utils import (
ExtractorError, ExtractorError,
clean_html, clean_html,
determine_ext, determine_ext,
int_or_none, int_or_none,
js_to_json, js_to_json,
parse_age_limit,
parse_duration, parse_duration,
) )
class ViewLiftBaseIE(InfoExtractor): class ViewLiftBaseIE(InfoExtractor):
_DOMAINS_REGEX = r'(?:snagfilms|snagxtreme|funnyforfree|kiddovid|winnersview|monumentalsportsnetwork|vayafilm)\.com|kesari\.tv' _DOMAINS_REGEX = r'(?:snagfilms|snagxtreme|funnyforfree|kiddovid|winnersview|(?:monumental|lax)sportsnetwork|vayafilm)\.com'
class ViewLiftEmbedIE(ViewLiftBaseIE): class ViewLiftEmbedIE(ViewLiftBaseIE):
_VALID_URL = r'https?://(?:(?:www|embed)\.)?(?:%s)/embed/player\?.*\bfilmId=(?P<id>[\da-f-]{36})' % ViewLiftBaseIE._DOMAINS_REGEX _VALID_URL = r'https?://(?:(?:www|embed)\.)?(?:%s)/embed/player\?.*\bfilmId=(?P<id>[\da-f]{8}-(?:[\da-f]{4}-){3}[\da-f]{12})' % ViewLiftBaseIE._DOMAINS_REGEX
_TESTS = [{ _TESTS = [{
'url': 'http://embed.snagfilms.com/embed/player?filmId=74849a00-85a9-11e1-9660-123139220831&w=500', 'url': 'http://embed.snagfilms.com/embed/player?filmId=74849a00-85a9-11e1-9660-123139220831&w=500',
'md5': '2924e9215c6eff7a55ed35b72276bd93', 'md5': '2924e9215c6eff7a55ed35b72276bd93',
@ -60,8 +63,10 @@ class ViewLiftEmbedIE(ViewLiftBaseIE):
formats = [] formats = []
has_bitrate = False has_bitrate = False
for source in self._parse_json(js_to_json(self._search_regex( sources = self._parse_json(self._search_regex(
r'(?s)sources:\s*(\[.+?\]),', webpage, 'json')), video_id): r'(?s)sources:\s*(\[.+?\]),', webpage,
'sources', default='[]'), video_id, js_to_json)
for source in sources:
file_ = source.get('file') file_ = source.get('file')
if not file_: if not file_:
continue continue
@ -70,7 +75,8 @@ class ViewLiftEmbedIE(ViewLiftBaseIE):
format_id = source.get('label') or ext format_id = source.get('label') or ext
if all(v in ('m3u8', 'hls') for v in (type_, ext)): if all(v in ('m3u8', 'hls') for v in (type_, ext)):
formats.extend(self._extract_m3u8_formats( formats.extend(self._extract_m3u8_formats(
file_, video_id, 'mp4', m3u8_id='hls')) file_, video_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False))
else: else:
bitrate = int_or_none(self._search_regex( bitrate = int_or_none(self._search_regex(
[r'(\d+)kbps', r'_\d{1,2}x\d{1,2}_(\d{3,})\.%s' % ext], [r'(\d+)kbps', r'_\d{1,2}x\d{1,2}_(\d{3,})\.%s' % ext],
@ -85,6 +91,13 @@ class ViewLiftEmbedIE(ViewLiftBaseIE):
'tbr': bitrate, 'tbr': bitrate,
'height': height, 'height': height,
}) })
if not formats:
hls_url = self._parse_json(self._search_regex(
r'filmInfo\.src\s*=\s*({.+?});',
webpage, 'src'), video_id, js_to_json)['src']
formats = self._extract_m3u8_formats(
hls_url, video_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False)
field_preference = None if has_bitrate else ('height', 'tbr', 'format_id') field_preference = None if has_bitrate else ('height', 'tbr', 'format_id')
self._sort_formats(formats, field_preference) self._sort_formats(formats, field_preference)
@ -109,10 +122,13 @@ class ViewLiftIE(ViewLiftBaseIE):
'display_id': 'lost_for_life', 'display_id': 'lost_for_life',
'ext': 'mp4', 'ext': 'mp4',
'title': 'Lost for Life', 'title': 'Lost for Life',
'description': 'md5:fbdacc8bb6b455e464aaf98bc02e1c82', 'description': 'md5:ea10b5a50405ae1f7b5269a6ec594102',
'thumbnail': r're:^https?://.*\.jpg', 'thumbnail': r're:^https?://.*\.jpg',
'duration': 4489, 'duration': 4489,
'categories': ['Documentary', 'Crime', 'Award Winning', 'Festivals'] 'categories': 'mincount:3',
'age_limit': 14,
'upload_date': '20150421',
'timestamp': 1429656819,
} }
}, { }, {
'url': 'http://www.snagfilms.com/show/the_world_cut_project/india', 'url': 'http://www.snagfilms.com/show/the_world_cut_project/india',
@ -125,7 +141,9 @@ class ViewLiftIE(ViewLiftBaseIE):
'description': 'md5:5c168c5a8f4719c146aad2e0dfac6f5f', 'description': 'md5:5c168c5a8f4719c146aad2e0dfac6f5f',
'thumbnail': r're:^https?://.*\.jpg', 'thumbnail': r're:^https?://.*\.jpg',
'duration': 979, 'duration': 979,
'categories': ['Documentary', 'Sports', 'Politics'] 'categories': 'mincount:2',
'timestamp': 1399478279,
'upload_date': '20140507',
} }
}, { }, {
# Film is not playable in your area. # Film is not playable in your area.
@ -138,9 +156,6 @@ class ViewLiftIE(ViewLiftBaseIE):
}, { }, {
'url': 'http://www.winnersview.com/videos/the-good-son', 'url': 'http://www.winnersview.com/videos/the-good-son',
'only_matching': True, 'only_matching': True,
}, {
'url': 'http://www.kesari.tv/news/video/1461919076414',
'only_matching': True,
}, { }, {
# Was once Kaltura embed # Was once Kaltura embed
'url': 'https://www.monumentalsportsnetwork.com/videos/john-carlson-postgame-2-25-15', 'url': 'https://www.monumentalsportsnetwork.com/videos/john-carlson-postgame-2-25-15',
@ -156,45 +171,96 @@ class ViewLiftIE(ViewLiftBaseIE):
raise ExtractorError( raise ExtractorError(
'Film %s is not available.' % display_id, expected=True) 'Film %s is not available.' % display_id, expected=True)
film_id = self._search_regex(r'filmId=([\da-f-]{36})"', webpage, 'film id') initial_store_state = self._search_regex(
r"window\.initialStoreState\s*=.*?JSON\.parse\(unescape\(atob\('([^']+)'\)\)\)",
webpage, 'Initial Store State', default=None)
if initial_store_state:
modules = self._parse_json(compat_urllib_parse_unquote(base64.b64decode(
initial_store_state).decode()), display_id)['page']['data']['modules']
content_data = next(m['contentData'][0] for m in modules if m.get('moduleType') == 'VideoDetailModule')
gist = content_data['gist']
film_id = gist['id']
title = gist['title']
video_assets = content_data['streamingInfo']['videoAssets']
snag = self._parse_json( formats = []
self._search_regex( mpeg_video_assets = video_assets.get('mpeg') or []
r'Snag\.page\.data\s*=\s*(\[.+?\]);', webpage, 'snag'), for video_asset in mpeg_video_assets:
display_id) video_asset_url = video_asset.get('url')
if not video_asset:
continue
bitrate = int_or_none(video_asset.get('bitrate'))
height = int_or_none(self._search_regex(
r'^_?(\d+)[pP]$', video_asset.get('renditionValue'),
'height', default=None))
formats.append({
'url': video_asset_url,
'format_id': 'http%s' % ('-%d' % bitrate if bitrate else ''),
'tbr': bitrate,
'height': height,
'vcodec': video_asset.get('codec'),
})
for item in snag: hls_url = video_assets.get('hls')
if item.get('data', {}).get('film', {}).get('id') == film_id: if hls_url:
data = item['data']['film'] formats.extend(self._extract_m3u8_formats(
title = data['title'] hls_url, film_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False))
description = clean_html(data.get('synopsis')) self._sort_formats(formats, ('height', 'tbr', 'format_id'))
thumbnail = data.get('image')
duration = int_or_none(data.get('duration') or data.get('runtime')) info = {
categories = [ 'id': film_id,
category['title'] for category in data.get('categories', []) 'display_id': display_id,
if category.get('title')] 'title': title,
break 'description': gist.get('description'),
'thumbnail': gist.get('videoImageUrl'),
'duration': int_or_none(gist.get('runtime')),
'age_limit': parse_age_limit(content_data.get('parentalRating', '').replace('_', '-')),
'timestamp': int_or_none(gist.get('publishDate'), 1000),
'formats': formats,
}
for k in ('categories', 'tags'):
info[k] = [v['title'] for v in content_data.get(k, []) if v.get('title')]
return info
else: else:
title = self._search_regex( film_id = self._search_regex(r'filmId=([\da-f-]{36})"', webpage, 'film id')
r'itemprop="title">([^<]+)<', webpage, 'title')
description = self._html_search_regex(
r'(?s)<div itemprop="description" class="film-synopsis-inner ">(.+?)</div>',
webpage, 'description', default=None) or self._og_search_description(webpage)
thumbnail = self._og_search_thumbnail(webpage)
duration = parse_duration(self._search_regex(
r'<span itemprop="duration" class="film-duration strong">([^<]+)<',
webpage, 'duration', fatal=False))
categories = re.findall(r'<a href="/movies/[^"]+">([^<]+)</a>', webpage)
return { snag = self._parse_json(
'_type': 'url_transparent', self._search_regex(
'url': 'http://%s/embed/player?filmId=%s' % (domain, film_id), r'Snag\.page\.data\s*=\s*(\[.+?\]);', webpage, 'snag', default='[]'),
'id': film_id, display_id)
'display_id': display_id,
'title': title, for item in snag:
'description': description, if item.get('data', {}).get('film', {}).get('id') == film_id:
'thumbnail': thumbnail, data = item['data']['film']
'duration': duration, title = data['title']
'categories': categories, description = clean_html(data.get('synopsis'))
'ie_key': 'ViewLiftEmbed', thumbnail = data.get('image')
} duration = int_or_none(data.get('duration') or data.get('runtime'))
categories = [
category['title'] for category in data.get('categories', [])
if category.get('title')]
break
else:
title = self._search_regex(
r'itemprop="title">([^<]+)<', webpage, 'title')
description = self._html_search_regex(
r'(?s)<div itemprop="description" class="film-synopsis-inner ">(.+?)</div>',
webpage, 'description', default=None) or self._og_search_description(webpage)
thumbnail = self._og_search_thumbnail(webpage)
duration = parse_duration(self._search_regex(
r'<span itemprop="duration" class="film-duration strong">([^<]+)<',
webpage, 'duration', fatal=False))
categories = re.findall(r'<a href="/movies/[^"]+">([^<]+)</a>', webpage)
return {
'_type': 'url_transparent',
'url': 'http://%s/embed/player?filmId=%s' % (domain, film_id),
'id': film_id,
'display_id': display_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'categories': categories,
'ie_key': 'ViewLiftEmbed',
}