diff --git a/youtube_dl/extractor/gdcvault.py b/youtube_dl/extractor/gdcvault.py index 63527bc85c..9e59c0b348 100644 --- a/youtube_dl/extractor/gdcvault.py +++ b/youtube_dl/extractor/gdcvault.py @@ -1,25 +1,17 @@ from __future__ import unicode_literals import re -import json import xml.etree.ElementTree from .common import InfoExtractor -from ..utils import unified_strdate - +from ..utils import ( + compat_urllib_parse, + compat_urllib_request, +) class GDCVaultIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?gdcvault\.com/play/(?P\d+)/(?P(\w|-)+)' _TESTS = [ - { - u'url': u'http://www.gdcvault.com/play/1015683/Embracing-the-Dark-Art-of', - u'md5': u'05763e5edd1a74776999a12b02ee1c4e', - u'info_dict': { - u"id": u"1015683", - u"ext": u"flv", - u"title": u"Embracing the Dark Art of Mathematical Modeling in AI" - } - }, { u'url': u'http://www.gdcvault.com/play/1019721/Doki-Doki-Universe-Sweet-Simple', u'md5': u'7ce8388f544c88b7ac11c7ab1b593704', @@ -29,69 +21,116 @@ class GDCVaultIE(InfoExtractor): u"title": u"Doki-Doki Universe: Sweet, Simple and Genuine (GDC Next 10)" } }, + { + u'url': u'http://www.gdcvault.com/play/1015683/Embracing-the-Dark-Art-of', + u'md5': u'fca91078a90f28aa5164ef6b23b78654', + u'info_dict': { + u"id": u"1015683", + u"ext": u"flv", + u"title": u"Embracing the Dark Art of Mathematical Modeling in AI" + } + }, ] + def _parse_mp4(self, xml_description): + video_formats = [] + mp4_video = xml_description.find('./metadata/mp4video') + if mp4_video is None: + return None + + mobj = re.match(r'(?Phttps?://.*?/).*', mp4_video.text) + video_root = mobj.group('root') + formats = xml_description.findall('./metadata/MBRVideos/MBRVideo') + for format in formats: + mobj = re.match(r'mp4\:(?P.*)', format.find('streamName').text) + url = video_root + mobj.group('path') + vbr = format.find('bitrate').text + video_formats.append({ + 'url': url, + 'vbr': int(vbr), + }) + return video_formats + + def _parse_flv(self, xml_description): + video_formats = [] + akami_url = xml_description.find('./metadata/akamaiHost').text + slide_video_path = xml_description.find('./metadata/slideVideo').text + video_formats.append({ + 'url': 'rtmp://' + akami_url + '/' + slide_video_path, + 'format_note': 'slide deck video', + 'quality': -2, + 'preference': -2, + 'format_id': 'slides', + }) + speaker_video_path = xml_description.find('./metadata/speakerVideo').text + video_formats.append({ + 'url': 'rtmp://' + akami_url + '/' + speaker_video_path, + 'format_note': 'speaker video', + 'quality': -1, + 'preference': -1, + 'format_id': 'speaker', + }) + return video_formats + + def _login(self, webpage_url, video_id): + (username, password) = self._get_login_info() + if username is None or password is None: + self.report_warning(u'It looks like ' + webpage_url + u' requires a login. Try specifying a username and password and try again.') + return None + + mobj = re.match(r'(?Phttps?://.*?/).*', webpage_url) + login_url = mobj.group('root_url') + 'api/login.php' + logout_url = mobj.group('root_url') + 'logout' + + login_form = { + 'email': username, + 'password': password, + } + + request = compat_urllib_request.Request(login_url, compat_urllib_parse.urlencode(login_form)) + request.add_header('Content-Type', 'application/x-www-form-urlencoded') + login_content = self._download_webpage(request, video_id, 'Logging in') + start_page = self._download_webpage(webpage_url, video_id, 'Getting authenticated video page') + logout_content = self._download_webpage(logout_url, video_id, 'Logging out') + + return start_page + def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') webpage_url = 'http://www.gdcvault.com/play/' + video_id - start_page = self._download_webpage(webpage_url, video_id) - self.report_extraction(video_id) + xml_root = self._html_search_regex(r'', start_page, 'xml root') + if xml_root is None: + # Probably need to authenticate + start_page = self._login(webpage_url, video_id) + if start_page is None: + self.report_warning(u'Could not login.') + else: + # Grab the url from the authenticated page + xml_root = self._html_search_regex(r'', start_page, 'xml filename', None, False) if xml_name is None: # Fallback to the older format xml_name = self._html_search_regex(r'