yt-dlp/yt_dlp/extractor/aparat.py

89 lines
3.3 KiB
Python
Raw Normal View History

2013-12-20 17:05:28 +01:00
from .common import InfoExtractor
from ..utils import (
2021-01-01 13:26:37 +01:00
get_element_by_id,
int_or_none,
merge_dicts,
mimetype2ext,
2018-07-21 14:08:28 +02:00
url_or_none,
2013-12-20 17:05:28 +01:00
)
class AparatIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?aparat\.com/(?:v/|video/video/embed/videohash/)(?P<id>[a-zA-Z0-9]+)'
_EMBED_REGEX = [r'<iframe .*?src="(?P<url>http://www\.aparat\.com/video/[^"]+)"']
2013-12-20 17:05:28 +01:00
_TESTS = [{
2014-08-22 01:44:35 +02:00
'url': 'http://www.aparat.com/v/wP8On',
2016-08-08 06:59:07 +02:00
'md5': '131aca2e14fe7c4dcb3c4877ba300c89',
2014-08-22 01:44:35 +02:00
'info_dict': {
'id': 'wP8On',
'ext': 'mp4',
'title': 'تیم گلکسی 11 - زومیت',
'description': 'md5:096bdabcdcc4569f2b8a5e903a3b3028',
'duration': 231,
'timestamp': 1387394859,
'upload_date': '20131218',
'view_count': int,
2013-12-20 17:05:28 +01:00
},
}, {
# multiple formats
'url': 'https://www.aparat.com/v/8dflw/',
'only_matching': True,
}]
2013-12-20 17:05:28 +01:00
2022-01-10 19:57:53 +01:00
def _parse_options(self, webpage, video_id, fatal=True):
return self._parse_json(self._search_regex(
r'options\s*=\s*({.+?})\s*;', webpage, 'options', default='{}'), video_id)
2013-12-20 17:05:28 +01:00
def _real_extract(self, url):
2014-11-26 12:40:51 +01:00
video_id = self._match_id(url)
2013-12-20 17:05:28 +01:00
2022-01-10 19:57:53 +01:00
# If available, provides more metadata
webpage = self._download_webpage(url, video_id, fatal=False)
2022-01-10 19:57:53 +01:00
options = self._parse_options(webpage, video_id, fatal=False)
2022-01-10 19:57:53 +01:00
if not options:
webpage = self._download_webpage(
'http://www.aparat.com/video/video/embed/vt/frame/showvideo/yes/videohash/' + video_id,
2022-01-10 19:57:53 +01:00
video_id, 'Downloading embed webpage')
options = self._parse_options(webpage, video_id)
2018-09-05 23:38:38 +02:00
formats = []
2021-01-01 13:26:37 +01:00
for sources in (options.get('multiSRC') or []):
for item in sources:
if not isinstance(item, dict):
continue
2018-09-05 23:38:38 +02:00
file_url = url_or_none(item.get('src'))
if not file_url:
continue
item_type = item.get('type')
if item_type == 'application/vnd.apple.mpegurl':
formats.extend(self._extract_m3u8_formats(
file_url, video_id, 'mp4',
entry_protocol='m3u8_native', m3u8_id='hls',
fatal=False))
else:
ext = mimetype2ext(item.get('type'))
label = item.get('label')
formats.append({
'url': file_url,
'ext': ext,
'format_id': 'http-%s' % (label or ext),
'height': int_or_none(self._search_regex(
r'(\d+)[pP]', label or '', 'height',
default=None)),
})
info = self._search_json_ld(webpage, video_id, default={})
if not info.get('title'):
2021-01-01 13:26:37 +01:00
info['title'] = get_element_by_id('videoTitle', webpage) or \
self._html_search_meta(['og:title', 'twitter:title', 'DC.Title', 'title'], webpage, fatal=True)
2013-12-20 17:05:28 +01:00
return merge_dicts(info, {
2013-12-20 17:05:28 +01:00
'id': video_id,
'thumbnail': url_or_none(options.get('poster')),
2021-01-01 13:26:37 +01:00
'duration': int_or_none(options.get('duration')),
'formats': formats,
})