[viidea] Improve and cleanup (Closes #7390)

* Optimize requests for multipart videos
* Fix cfg regex
* Improve titles and identifiers
This commit is contained in:
Sergey M? 2015-11-08 06:54:27 +06:00 committed by Sergey M․
parent 6fdb39ded1
commit e8ce2375e0

View File

@ -35,35 +35,42 @@ class ViideaIE(InfoExtractor):
_TESTS = [{ _TESTS = [{
'url': 'http://videolectures.net/promogram_igor_mekjavic_eng/', 'url': 'http://videolectures.net/promogram_igor_mekjavic_eng/',
'info_dict': { 'info_dict': {
'id': '20171_part1', 'id': '20171',
'display_id': 'promogram_igor_mekjavic_eng',
'ext': 'mp4', 'ext': 'mp4',
'title': 'Automatics, robotics and biocybernetics', 'title': 'Automatics, robotics and biocybernetics',
'description': 'md5:815fc1deb6b3a2bff99de2d5325be482', 'description': 'md5:815fc1deb6b3a2bff99de2d5325be482',
'thumbnail': 're:http://.*\.jpg',
'timestamp': 1372349289,
'upload_date': '20130627', 'upload_date': '20130627',
'duration': 565, 'duration': 565,
'thumbnail': 're:http://.*\.jpg',
}, },
}, { }, {
# video with invalid direct format links (HTTP 403) # video with invalid direct format links (HTTP 403)
'url': 'http://videolectures.net/russir2010_filippova_nlp/', 'url': 'http://videolectures.net/russir2010_filippova_nlp/',
'info_dict': { 'info_dict': {
'id': '14891_part1', 'id': '14891',
'display_id': 'russir2010_filippova_nlp',
'ext': 'flv', 'ext': 'flv',
'title': 'NLP at Google', 'title': 'NLP at Google',
'description': 'md5:fc7a6d9bf0302d7cc0e53f7ca23747b3', 'description': 'md5:fc7a6d9bf0302d7cc0e53f7ca23747b3',
'duration': 5352,
'thumbnail': 're:http://.*\.jpg', 'thumbnail': 're:http://.*\.jpg',
'timestamp': 1284375600,
'upload_date': '20100913',
'duration': 5352,
}, },
'params': { 'params': {
# rtmp download # rtmp download
'skip_download': True, 'skip_download': True,
}, },
}, { }, {
# event playlist
'url': 'http://videolectures.net/deeplearning2015_montreal/', 'url': 'http://videolectures.net/deeplearning2015_montreal/',
'info_dict': { 'info_dict': {
'id': '23181', 'id': '23181',
'title': 'Deep Learning Summer School, Montreal 2015', 'title': 'Deep Learning Summer School, Montreal 2015',
'description': 'md5:0533a85e4bd918df52a01f0e1ebe87b7', 'description': 'md5:0533a85e4bd918df52a01f0e1ebe87b7',
'thumbnail': 're:http://.*\.jpg',
'timestamp': 1438560000, 'timestamp': 1438560000,
}, },
'playlist_count': 30, 'playlist_count': 30,
@ -72,37 +79,54 @@ class ViideaIE(InfoExtractor):
'url': 'http://videolectures.net/mlss09uk_bishop_ibi/', 'url': 'http://videolectures.net/mlss09uk_bishop_ibi/',
'info_dict': { 'info_dict': {
'id': '9737', 'id': '9737',
'display_id': 'mlss09uk_bishop_ibi',
'title': 'Introduction To Bayesian Inference', 'title': 'Introduction To Bayesian Inference',
'thumbnail': 're:http://.*\.jpg',
'timestamp': 1251622800, 'timestamp': 1251622800,
}, },
'playlist': [{ 'playlist': [{
'info_dict': { 'info_dict': {
'id': '9737_part1', 'id': '9737_part1',
'display_id': 'mlss09uk_bishop_ibi_part1',
'ext': 'wmv', 'ext': 'wmv',
'title': 'Introduction To Bayesian Inference', 'title': 'Introduction To Bayesian Inference (Part 1)',
'thumbnail': 're:http://.*\.jpg',
'duration': 4622,
'timestamp': 1251622800,
'upload_date': '20090830',
}, },
}, { }, {
'info_dict': { 'info_dict': {
'id': '9737_part2', 'id': '9737_part2',
'display_id': 'mlss09uk_bishop_ibi_part2',
'ext': 'wmv', 'ext': 'wmv',
'title': 'Introduction To Bayesian Inference', 'title': 'Introduction To Bayesian Inference (Part 2)',
'thumbnail': 're:http://.*\.jpg',
'duration': 5641,
'timestamp': 1251622800,
'upload_date': '20090830',
}, },
}], }],
'playlist_count': 2, 'playlist_count': 2,
}] }]
def _real_extract(self, url): def _real_extract(self, url):
lecture_slug, part = re.match(self._VALID_URL, url).groups() lecture_slug, explicit_part_id = re.match(self._VALID_URL, url).groups()
webpage = self._download_webpage(url, lecture_slug) webpage = self._download_webpage(url, lecture_slug)
cfg = self._parse_json(self._search_regex([r'cfg\s*:\s*({.+?}),[\da-zA-Z_]:\(?function', r'cfg\s*:\s*({[^}]+})'], webpage, 'cfg'), lecture_slug, js_to_json) cfg = self._parse_json(self._search_regex(
[r'cfg\s*:\s*({.+?})\s*,\s*[\da-zA-Z_]+\s*:\s*\(?\s*function',
r'cfg\s*:\s*({[^}]+})'],
webpage, 'cfg'), lecture_slug, js_to_json)
lecture_id = compat_str(cfg['obj_id']) lecture_id = compat_str(cfg['obj_id'])
base_url = self._proto_relative_url(cfg['livepipe'], 'http:') base_url = self._proto_relative_url(cfg['livepipe'], 'http:')
lecture_data = self._download_json('%s/site/api/lecture/%s?format=json' % (base_url, lecture_id), lecture_id)['lecture'][0] lecture_data = self._download_json(
'%s/site/api/lecture/%s?format=json' % (base_url, lecture_id),
lecture_id)['lecture'][0]
lecture_info = { lecture_info = {
'id': lecture_id, 'id': lecture_id,
@ -113,31 +137,52 @@ def _real_extract(self, url):
'thumbnail': lecture_data.get('thumb'), 'thumbnail': lecture_data.get('thumb'),
} }
entries = [] playlist_entries = []
parts = cfg.get('videos') lecture_type = lecture_data.get('type')
parts = [compat_str(video) for video in cfg.get('videos', [])]
if parts: if parts:
if len(parts) == 1: multipart = len(parts) > 1
part = compat_str(parts[0])
if part: def extract_part(part_id):
smil_url = '%s/%s/video/%s/smil.xml' % (base_url, lecture_slug, part) smil_url = '%s/%s/video/%s/smil.xml' % (base_url, lecture_slug, part_id)
smil = self._download_smil(smil_url, lecture_id) smil = self._download_smil(smil_url, lecture_id)
info = self._parse_smil(smil, smil_url, lecture_id) info = self._parse_smil(smil, smil_url, lecture_id)
info['id'] = '%s_part%s' % (lecture_id, part) info['id'] = lecture_id if not multipart else '%s_part%s' % (lecture_id, part_id)
info['display_id'] = lecture_slug if not multipart else '%s_part%s' % (lecture_slug, part_id)
if multipart:
info['title'] += ' (Part %s)' % part_id
switch = smil.find('.//switch') switch = smil.find('.//switch')
if switch is not None: if switch is not None:
info['duration'] = parse_duration(switch.attrib.get('dur')) info['duration'] = parse_duration(switch.attrib.get('dur'))
return info item_info = lecture_info.copy()
item_info.update(info)
return item_info
if explicit_part_id or not multipart:
result = extract_part(explicit_part_id or parts[0])
else: else:
for part in parts: result = {
entries.append(self.url_result('%s/%s/video/%s' % (base_url, lecture_slug, part), 'Viidea')) '_type': 'multi_video',
lecture_info['_type'] = 'multi_video' 'entries': [extract_part(part) for part in parts],
if not parts or lecture_data.get('type') == 'evt': }
# Probably a playlist result.update(lecture_info)
playlist_webpage = self._download_webpage('%s/site/ajax/drilldown/?id=%s' % (base_url, lecture_id), lecture_id)
# Immediately return explicitly requested part or non event item
if explicit_part_id or lecture_type != 'evt':
return result
playlist_entries.append(result)
# It's probably a playlist
if not parts or lecture_type == 'evt':
playlist_webpage = self._download_webpage(
'%s/site/ajax/drilldown/?id=%s' % (base_url, lecture_id), lecture_id)
entries = [ entries = [
self.url_result(compat_urlparse.urljoin(url, video_url), 'Viidea') self.url_result(compat_urlparse.urljoin(url, video_url), 'Viidea')
for _, video_url in re.findall(r'<a[^>]+href=(["\'])(.+?)\1[^>]+id=["\']lec=\d+', playlist_webpage)] for _, video_url in re.findall(
lecture_info['_type'] = 'playlist' r'<a[^>]+href=(["\'])(.+?)\1[^>]+id=["\']lec=\d+', playlist_webpage)]
playlist_entries.extend(entries)
lecture_info['entries'] = entries playlist = self.playlist_result(playlist_entries, lecture_id)
return lecture_info playlist.update(lecture_info)
return playlist