1
1
mirror of https://github.com/ytdl-org/youtube-dl synced 2024-11-30 23:22:56 +01:00
youtube-dl/youtube_dl/extractor/shahid.py

216 lines
7.8 KiB
Python
Raw Normal View History

# coding: utf-8
from __future__ import unicode_literals
import json
import math
import re
from .aws import AWSIE
from ..compat import compat_HTTPError
from ..utils import (
clean_html,
ExtractorError,
InAdvancePagedList,
2015-08-15 14:13:37 +02:00
int_or_none,
parse_iso8601,
str_or_none,
urlencode_postdata,
2015-07-19 00:10:20 +02:00
)
2015-07-18 23:10:57 +02:00
class ShahidBaseIE(AWSIE):
_AWS_PROXY_HOST = 'api2.shahid.net'
_AWS_API_KEY = '2RRtuMHx95aNI1Kvtn2rChEuwsCogUd4samGPjLh'
def _handle_error(self, e):
fail_data = self._parse_json(
e.cause.read().decode('utf-8'), None, fatal=False)
if fail_data:
faults = fail_data.get('faults', [])
faults_message = ', '.join([clean_html(fault['userMessage']) for fault in faults if fault.get('userMessage')])
if faults_message:
raise ExtractorError(faults_message, expected=True)
def _call_api(self, path, video_id, request=None):
query = {}
if request:
query['request'] = json.dumps(request)
try:
return self._aws_execute_api({
'uri': '/proxy/v2/' + path,
'access_key': 'AKIAI6X4TYCIXM2B7MUQ',
'secret_key': '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',
}, video_id, query)
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError):
self._handle_error(e)
raise
class ShahidIE(ShahidBaseIE):
_NETRC_MACHINE = 'shahid'
2017-10-12 11:20:07 +02:00
_VALID_URL = r'https?://shahid\.mbc\.net/ar/(?:serie|show|movie)s/[^/]+/(?P<type>episode|clip|movie)-(?P<id>\d+)'
2015-08-15 14:13:37 +02:00
_TESTS = [{
2017-10-12 11:20:07 +02:00
'url': 'https://shahid.mbc.net/ar/shows/%D9%85%D8%AC%D9%84%D8%B3-%D8%A7%D9%84%D8%B4%D8%A8%D8%A7%D8%A8-%D8%A7%D9%84%D9%85%D9%88%D8%B3%D9%85-1-%D9%83%D9%84%D9%8A%D8%A8-1/clip-275286',
2015-08-15 14:13:37 +02:00
'info_dict': {
2017-10-12 11:20:07 +02:00
'id': '275286',
2015-09-18 23:10:36 +02:00
'ext': 'mp4',
2017-10-12 11:20:07 +02:00
'title': 'مجلس الشباب الموسم 1 كليب 1',
'timestamp': 1506988800,
'upload_date': '20171003',
2015-07-18 23:10:57 +02:00
},
2015-08-15 14:13:37 +02:00
'params': {
# m3u8 download
'skip_download': True,
2015-07-18 23:10:57 +02:00
}
}, {
2017-10-12 11:20:07 +02:00
'url': 'https://shahid.mbc.net/ar/movies/%D8%A7%D9%84%D9%82%D9%86%D8%A7%D8%B5%D8%A9/movie-151746',
'only_matching': True
2015-08-15 14:13:37 +02:00
}, {
# shahid plus subscriber only
2017-10-12 11:20:07 +02:00
'url': 'https://shahid.mbc.net/ar/series/%D9%85%D8%B1%D8%A7%D9%8A%D8%A7-2011-%D8%A7%D9%84%D9%85%D9%88%D8%B3%D9%85-1-%D8%A7%D9%84%D8%AD%D9%84%D9%82%D8%A9-1/episode-90511',
2015-08-15 14:13:37 +02:00
'only_matching': True
}]
2015-07-18 23:10:57 +02:00
2017-10-12 11:20:07 +02:00
def _real_initialize(self):
email, password = self._get_login_info()
if email is None:
return
try:
user_data = self._download_json(
'https://shahid.mbc.net/wd/service/users/login',
None, 'Logging in', data=json.dumps({
'email': email,
'password': password,
'basic': 'false',
}).encode('utf-8'), headers={
'Content-Type': 'application/json; charset=UTF-8',
})['user']
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError):
self._handle_error(e)
raise
2017-10-12 11:20:07 +02:00
self._download_webpage(
'https://shahid.mbc.net/populateContext',
None, 'Populate Context', data=urlencode_postdata({
'firstName': user_data['firstName'],
'lastName': user_data['lastName'],
'userName': user_data['email'],
'csg_user_name': user_data['email'],
'subscriberId': user_data['id'],
'sessionId': user_data['sessionId'],
}))
2015-07-18 23:10:57 +02:00
def _real_extract(self, url):
page_type, video_id = re.match(self._VALID_URL, url).groups()
2017-10-12 11:20:07 +02:00
if page_type == 'clip':
page_type = 'episode'
2015-08-15 14:13:37 +02:00
playout = self._call_api(
'playout/url/' + video_id, video_id)['playout']
2015-07-25 19:13:44 +02:00
2017-10-12 11:20:07 +02:00
if playout.get('drm'):
raise ExtractorError('This video is DRM protected.', expected=True)
2017-10-12 11:20:07 +02:00
formats = self._extract_m3u8_formats(playout['url'], video_id, 'mp4')
self._sort_formats(formats)
# video = self._call_api(
# 'product/id', video_id, {
# 'id': video_id,
# 'productType': 'ASSET',
# 'productSubType': page_type.upper()
# })['productModel']
response = self._download_json(
'http://api.shahid.net/api/v1_1/%s/%s' % (page_type, video_id),
video_id, 'Downloading video JSON', query={
'apiKey': 'sh@hid0nlin3',
'hash': 'b2wMCTHpSmyxGqQjJFOycRmLSex+BpTK/ooxy6vHaqs=',
})
data = response.get('data', {})
error = data.get('error')
if error:
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, '\n'.join(error.values())),
expected=True)
2015-08-15 14:13:37 +02:00
video = data[page_type]
2015-08-15 14:13:37 +02:00
title = video['title']
categories = [
category['name']
for category in video.get('genres', []) if 'name' in category]
2015-07-25 19:13:44 +02:00
2015-07-18 23:10:57 +02:00
return {
'id': video_id,
'title': title,
'description': video.get('description'),
'thumbnail': video.get('thumbnailUrl'),
'duration': int_or_none(video.get('duration')),
'timestamp': parse_iso8601(video.get('referenceDate')),
2015-08-15 14:13:37 +02:00
'categories': categories,
'series': video.get('showTitle') or video.get('showName'),
'season': video.get('seasonTitle'),
'season_number': int_or_none(video.get('seasonNumber')),
'season_id': str_or_none(video.get('seasonId')),
'episode_number': int_or_none(video.get('number')),
'episode_id': video_id,
2015-07-18 23:10:57 +02:00
'formats': formats,
}
class ShahidShowIE(ShahidBaseIE):
_VALID_URL = r'https?://shahid\.mbc\.net/ar/(?:show|serie)s/[^/]+/(?:show|series)-(?P<id>\d+)'
_TESTS = [{
'url': 'https://shahid.mbc.net/ar/shows/%D8%B1%D8%A7%D9%85%D8%B2-%D9%82%D8%B1%D8%B4-%D8%A7%D9%84%D8%A8%D8%AD%D8%B1/show-79187',
'info_dict': {
'id': '79187',
'title': 'رامز قرش البحر',
'description': 'md5:c88fa7e0f02b0abd39d417aee0d046ff',
},
'playlist_mincount': 32,
}, {
'url': 'https://shahid.mbc.net/ar/series/How-to-live-Longer-(The-Big-Think)/series-291861',
'only_matching': True
}]
_PAGE_SIZE = 30
def _real_extract(self, url):
show_id = self._match_id(url)
product = self._call_api(
'playableAsset', show_id, {'showId': show_id})['productModel']
playlist = product['playlist']
playlist_id = playlist['id']
show = product.get('show', {})
def page_func(page_num):
playlist = self._call_api(
'product/playlist', show_id, {
'playListId': playlist_id,
'pageNumber': page_num,
'pageSize': 30,
'sorts': [{
'order': 'DESC',
'type': 'SORTDATE'
}],
})
for product in playlist.get('productList', {}).get('products', []):
product_url = product.get('productUrl', []).get('url')
if not product_url:
continue
yield self.url_result(
product_url, 'Shahid',
str_or_none(product.get('id')),
product.get('title'))
entries = InAdvancePagedList(
page_func,
math.ceil(playlist['count'] / self._PAGE_SIZE),
self._PAGE_SIZE)
return self.playlist_result(
entries, show_id, show.get('title'), show.get('description'))