1
1
mirror of https://github.com/ytdl-org/youtube-dl synced 2024-11-09 14:17:41 +01:00
youtube-dl/youtube_dl/extractor/ivi.py

194 lines
7.3 KiB
Python
Raw Normal View History

2013-12-18 23:28:16 +01:00
# encoding: utf-8
from __future__ import unicode_literals
2013-12-18 23:28:16 +01:00
import re
import json
from .common import InfoExtractor
from ..utils import (
2013-12-18 23:28:16 +01:00
ExtractorError,
2016-01-03 22:34:15 +01:00
int_or_none,
Switch codebase to use sanitized_Request instead of compat_urllib_request.Request [downloader/dash] Use sanitized_Request [downloader/http] Use sanitized_Request [atresplayer] Use sanitized_Request [bambuser] Use sanitized_Request [bliptv] Use sanitized_Request [brightcove] Use sanitized_Request [cbs] Use sanitized_Request [ceskatelevize] Use sanitized_Request [collegerama] Use sanitized_Request [extractor/common] Use sanitized_Request [crunchyroll] Use sanitized_Request [dailymotion] Use sanitized_Request [dcn] Use sanitized_Request [dramafever] Use sanitized_Request [dumpert] Use sanitized_Request [eitb] Use sanitized_Request [escapist] Use sanitized_Request [everyonesmixtape] Use sanitized_Request [extremetube] Use sanitized_Request [facebook] Use sanitized_Request [fc2] Use sanitized_Request [flickr] Use sanitized_Request [4tube] Use sanitized_Request [gdcvault] Use sanitized_Request [extractor/generic] Use sanitized_Request [hearthisat] Use sanitized_Request [hotnewhiphop] Use sanitized_Request [hypem] Use sanitized_Request [iprima] Use sanitized_Request [ivi] Use sanitized_Request [keezmovies] Use sanitized_Request [letv] Use sanitized_Request [lynda] Use sanitized_Request [metacafe] Use sanitized_Request [minhateca] Use sanitized_Request [miomio] Use sanitized_Request [meovideo] Use sanitized_Request [mofosex] Use sanitized_Request [moniker] Use sanitized_Request [mooshare] Use sanitized_Request [movieclips] Use sanitized_Request [mtv] Use sanitized_Request [myvideo] Use sanitized_Request [neteasemusic] Use sanitized_Request [nfb] Use sanitized_Request [niconico] Use sanitized_Request [noco] Use sanitized_Request [nosvideo] Use sanitized_Request [novamov] Use sanitized_Request [nowness] Use sanitized_Request [nuvid] Use sanitized_Request [played] Use sanitized_Request [pluralsight] Use sanitized_Request [pornhub] Use sanitized_Request [pornotube] Use sanitized_Request [primesharetv] Use sanitized_Request [promptfile] Use sanitized_Request [qqmusic] Use sanitized_Request [rtve] Use sanitized_Request [safari] Use sanitized_Request [sandia] Use sanitized_Request [shared] Use sanitized_Request [sharesix] Use sanitized_Request [sina] Use sanitized_Request [smotri] Use sanitized_Request [sohu] Use sanitized_Request [spankwire] Use sanitized_Request [sportdeutschland] Use sanitized_Request [streamcloud] Use sanitized_Request [streamcz] Use sanitized_Request [tapely] Use sanitized_Request [tube8] Use sanitized_Request [tubitv] Use sanitized_Request [twitch] Use sanitized_Request [twitter] Use sanitized_Request [udemy] Use sanitized_Request [vbox7] Use sanitized_Request [veoh] Use sanitized_Request [vessel] Use sanitized_Request [vevo] Use sanitized_Request [viddler] Use sanitized_Request [videomega] Use sanitized_Request [viewvster] Use sanitized_Request [viki] Use sanitized_Request [vk] Use sanitized_Request [vodlocker] Use sanitized_Request [voicerepublic] Use sanitized_Request [wistia] Use sanitized_Request [xfileshare] Use sanitized_Request [xtube] Use sanitized_Request [xvideos] Use sanitized_Request [yandexmusic] Use sanitized_Request [youku] Use sanitized_Request [youporn] Use sanitized_Request [youtube] Use sanitized_Request [patreon] Use sanitized_Request [extractor/common] Remove unused import [nfb] PEP 8
2015-11-21 17:18:17 +01:00
sanitized_Request,
2013-12-18 23:28:16 +01:00
)
class IviIE(InfoExtractor):
IE_DESC = 'ivi.ru'
IE_NAME = 'ivi'
2015-01-28 18:58:14 +01:00
_VALID_URL = r'https?://(?:www\.)?ivi\.ru/(?:watch/(?:[^/]+/)?|video/player\?.*?videoId=)(?P<id>\d+)'
2013-12-18 23:28:16 +01:00
_TESTS = [
# Single movie
{
'url': 'http://www.ivi.ru/watch/53141',
'md5': '6ff5be2254e796ed346251d117196cf4',
'info_dict': {
2014-02-07 13:36:50 +01:00
'id': '53141',
'ext': 'mp4',
'title': 'Иван Васильевич меняет профессию',
'description': 'md5:b924063ea1677c8fe343d8a72ac2195f',
'duration': 5498,
2016-01-03 22:34:15 +01:00
'thumbnail': 're:^https?://.*\.jpg$',
2013-12-18 23:28:16 +01:00
},
'skip': 'Only works from Russia',
2013-12-18 23:28:16 +01:00
},
2016-01-10 16:17:47 +01:00
# Serial's series
2013-12-18 23:28:16 +01:00
{
2014-05-30 14:12:55 +02:00
'url': 'http://www.ivi.ru/watch/dvoe_iz_lartsa/9549',
'md5': '221f56b35e3ed815fde2df71032f4b3e',
'info_dict': {
2014-05-30 14:12:55 +02:00
'id': '9549',
2014-02-07 13:36:50 +01:00
'ext': 'mp4',
2016-01-03 22:34:15 +01:00
'title': 'Двое из ларца - Дело Гольдберга (1 часть)',
'series': 'Двое из ларца',
2016-01-03 22:54:52 +01:00
'season': 'Сезон 1',
'season_number': 1,
2016-01-03 22:34:15 +01:00
'episode': 'Дело Гольдберга (1 часть)',
'episode_number': 1,
2014-05-30 14:12:55 +02:00
'duration': 2655,
2016-01-03 22:34:15 +01:00
'thumbnail': 're:^https?://.*\.jpg$',
2013-12-18 23:28:16 +01:00
},
'skip': 'Only works from Russia',
2014-11-23 22:21:46 +01:00
}
2013-12-18 23:28:16 +01:00
]
2013-12-18 23:28:16 +01:00
# Sorted by quality
2016-01-03 22:34:15 +01:00
_KNOWN_FORMATS = ['MP4-low-mobile', 'MP4-mobile', 'FLV-lo', 'MP4-lo', 'FLV-hi', 'MP4-hi', 'MP4-SHQ']
2013-12-18 23:28:16 +01:00
def _real_extract(self, url):
2015-01-28 18:58:14 +01:00
video_id = self._match_id(url)
2013-12-18 23:28:16 +01:00
2015-01-28 18:58:14 +01:00
data = {
'method': 'da.content.get',
'params': [
video_id, {
'site': 's183',
'referrer': 'http://www.ivi.ru/watch/%s' % video_id,
'contentid': video_id
2013-12-18 23:28:16 +01:00
}
2015-01-28 18:58:14 +01:00
]
}
2013-12-18 23:28:16 +01:00
2016-01-03 22:34:15 +01:00
request = sanitized_Request(
'http://api.digitalaccess.ru/api/json/', json.dumps(data))
video_json = self._download_json(
2015-01-28 18:58:14 +01:00
request, video_id, 'Downloading video JSON')
2013-12-18 23:28:16 +01:00
if 'error' in video_json:
error = video_json['error']
if error['origin'] == 'NoRedisValidData':
raise ExtractorError('Video %s does not exist' % video_id, expected=True)
2015-01-28 18:58:14 +01:00
raise ExtractorError(
'Unable to download video %s: %s' % (video_id, error['message']),
expected=True)
2013-12-18 23:28:16 +01:00
result = video_json['result']
2013-12-18 23:28:16 +01:00
2013-12-26 18:40:09 +01:00
formats = [{
'url': x['url'],
'format_id': x['content_format'],
2016-01-03 22:34:15 +01:00
'preference': self._KNOWN_FORMATS.index(x['content_format']),
} for x in result['files'] if x['content_format'] in self._KNOWN_FORMATS]
2013-12-26 18:40:09 +01:00
self._sort_formats(formats)
title = result['title']
2013-12-18 23:28:16 +01:00
2016-01-03 22:34:15 +01:00
duration = int_or_none(result.get('duration'))
compilation = result.get('compilation')
episode = title if compilation else None
2014-11-23 20:41:03 +01:00
title = '%s - %s' % (compilation, title) if compilation is not None else title
2013-12-18 23:28:16 +01:00
2016-01-03 22:34:15 +01:00
thumbnails = [{
'url': preview['url'],
'id': preview.get('content_format'),
} for preview in result.get('preview', []) if preview.get('url')]
webpage = self._download_webpage(url, video_id)
2016-01-03 22:54:52 +01:00
season = self._search_regex(
r'<li[^>]+class="season active"[^>]*><a[^>]+>([^<]+)',
webpage, 'season', default=None)
season_number = int_or_none(self._search_regex(
r'<li[^>]+class="season active"[^>]*><a[^>]+data-season(?:-index)?="(\d+)"',
webpage, 'season number', default=None))
2016-01-03 22:34:15 +01:00
episode_number = int_or_none(self._search_regex(
r'<meta[^>]+itemprop="episode"[^>]*>\s*<meta[^>]+itemprop="episodeNumber"[^>]+content="(\d+)',
webpage, 'episode number', default=None))
2013-12-18 23:28:16 +01:00
2016-01-03 22:34:15 +01:00
description = self._og_search_description(webpage, default=None) or self._html_search_meta(
'description', webpage, 'description', default=None)
2013-12-18 23:28:16 +01:00
return {
'id': video_id,
'title': title,
2016-01-03 22:34:15 +01:00
'series': compilation,
2016-01-03 22:54:52 +01:00
'season': season,
'season_number': season_number,
2016-01-03 22:34:15 +01:00
'episode': episode,
'episode_number': episode_number,
'thumbnails': thumbnails,
2013-12-18 23:28:16 +01:00
'description': description,
'duration': duration,
'formats': formats,
}
class IviCompilationIE(InfoExtractor):
IE_DESC = 'ivi.ru compilations'
IE_NAME = 'ivi:compilation'
2014-02-07 13:36:50 +01:00
_VALID_URL = r'https?://(?:www\.)?ivi\.ru/watch/(?!\d+)(?P<compilationid>[a-z\d_-]+)(?:/season(?P<seasonid>\d+))?$'
_TESTS = [{
'url': 'http://www.ivi.ru/watch/dvoe_iz_lartsa',
'info_dict': {
'id': 'dvoe_iz_lartsa',
'title': 'Двое из ларца (2006 - 2008)',
},
'playlist_mincount': 24,
}, {
'url': 'http://www.ivi.ru/watch/dvoe_iz_lartsa/season1',
'info_dict': {
'id': 'dvoe_iz_lartsa/season1',
'title': 'Двое из ларца (2006 - 2008) 1 сезон',
},
'playlist_mincount': 12,
}]
2013-12-18 23:28:16 +01:00
def _extract_entries(self, html, compilation_id):
2016-01-03 22:49:18 +01:00
return [
self.url_result(
'http://www.ivi.ru/watch/%s/%s' % (compilation_id, serie), IviIE.ie_key())
for serie in re.findall(
r'<a href="/watch/%s/(\d+)"[^>]+data-id="\1"' % compilation_id, html)]
2013-12-18 23:28:16 +01:00
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
compilation_id = mobj.group('compilationid')
season_id = mobj.group('seasonid')
2014-11-23 20:41:03 +01:00
if season_id is not None: # Season link
2016-01-03 22:49:18 +01:00
season_page = self._download_webpage(
url, compilation_id, 'Downloading season %s web page' % season_id)
2013-12-18 23:28:16 +01:00
playlist_id = '%s/season%s' % (compilation_id, season_id)
playlist_title = self._html_search_meta('title', season_page, 'title')
2013-12-18 23:28:16 +01:00
entries = self._extract_entries(season_page, compilation_id)
2014-11-23 20:41:03 +01:00
else: # Compilation link
compilation_page = self._download_webpage(url, compilation_id, 'Downloading compilation web page')
2013-12-18 23:28:16 +01:00
playlist_id = compilation_id
playlist_title = self._html_search_meta('title', compilation_page, 'title')
2016-01-03 22:49:18 +01:00
seasons = re.findall(
r'<a href="/watch/%s/season(\d+)' % compilation_id, compilation_page)
if not seasons: # No seasons in this compilation
2013-12-18 23:28:16 +01:00
entries = self._extract_entries(compilation_page, compilation_id)
else:
entries = []
for season_id in seasons:
season_page = self._download_webpage(
'http://www.ivi.ru/watch/%s/season%s' % (compilation_id, season_id),
compilation_id, 'Downloading season %s web page' % season_id)
2013-12-18 23:28:16 +01:00
entries.extend(self._extract_entries(season_page, compilation_id))
2014-11-23 20:41:03 +01:00
return self.playlist_result(entries, playlist_id, playlist_title)