1
1
mirror of https://github.com/ytdl-org/youtube-dl synced 2024-12-21 00:37:46 +01:00
youtube-dl/youtube_dl/extractor/nhk.py

82 lines
3.1 KiB
Python
Raw Normal View History

2016-08-25 20:21:06 +02:00
from __future__ import unicode_literals
2019-04-21 14:17:22 +02:00
import re
2016-08-25 20:21:06 +02:00
from .common import InfoExtractor
class NhkVodIE(InfoExtractor):
2019-04-21 14:17:22 +02:00
_VALID_URL = r'https?://www3\.nhk\.or\.jp/nhkworld/(?P<lang>[a-z]{2})/ondemand/(?P<type>video|audio)/(?P<id>\d{7}|[a-z]+-\d{8}-\d+)'
# Content available only for a limited period of time. Visit
# https://www3.nhk.or.jp/nhkworld/en/ondemand/ for working samples.
_TESTS = [{
'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/video/2015173/',
'only_matching': True,
2019-04-21 14:17:22 +02:00
}, {
'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/audio/plugin-20190404-1/',
'only_matching': True,
}, {
'url': 'https://www3.nhk.or.jp/nhkworld/fr/ondemand/audio/plugin-20190404-1/',
'only_matching': True,
}]
2019-04-21 14:17:22 +02:00
_API_URL_TEMPLATE = 'https://api.nhk.or.jp/nhkworld/%sodesdlist/v7/episode/%s/%s/all%s.json'
2016-08-25 20:21:06 +02:00
def _real_extract(self, url):
2019-04-21 14:17:22 +02:00
lang, m_type, episode_id = re.match(self._VALID_URL, url).groups()
if episode_id.isdigit():
episode_id = episode_id[:4] + '-' + episode_id[4:]
2019-04-21 14:17:22 +02:00
is_video = m_type == 'video'
episode = self._download_json(
self._API_URL_TEMPLATE % ('v' if is_video else 'r', episode_id, lang, '/all' if is_video else ''),
episode_id, query={'apikey': 'EJfK8jdS57GqlupFgAfAAwr573q01y6k'})['data']['episodes'][0]
title = episode.get('sub_title_clean') or episode['sub_title']
2016-09-13 18:20:25 +02:00
2019-04-21 14:17:22 +02:00
def get_clean_field(key):
return episode.get(key + '_clean') or episode.get(key)
2016-09-13 18:20:25 +02:00
2019-04-21 14:17:22 +02:00
series = get_clean_field('title')
2016-09-13 18:20:25 +02:00
2019-04-21 14:17:22 +02:00
thumbnails = []
for s, w, h in [('', 640, 360), ('_l', 1280, 720)]:
img_path = episode.get('image' + s)
if not img_path:
continue
thumbnails.append({
'id': '%dp' % h,
'height': h,
'width': w,
'url': 'https://www3.nhk.or.jp' + img_path,
})
2016-08-25 20:21:06 +02:00
2019-04-21 14:17:22 +02:00
info = {
'id': episode_id + '-' + lang,
'title': '%s - %s' % (series, title) if series and title else title,
2019-04-21 14:17:22 +02:00
'description': get_clean_field('description'),
'thumbnails': thumbnails,
'series': series,
'episode': title,
}
2019-04-21 14:17:22 +02:00
if is_video:
info.update({
'_type': 'url_transparent',
'ie_key': 'Ooyala',
'url': 'ooyala:' + episode['vod_id'],
})
else:
audio = episode['audio']
audio_path = audio['audio']
info['formats'] = self._extract_m3u8_formats(
'https://nhks-vh.akamaihd.net/i%s/master.m3u8' % audio_path,
episode_id, 'm4a', m3u8_id='hls', fatal=False)
2019-04-21 14:25:04 +02:00
for proto in ('rtmpt', 'rtmp'):
info['formats'].append({
'ext': 'flv',
'format_id': proto,
'url': '%s://flv.nhk.or.jp/ondemand/mp4:flv%s' % (proto, audio_path),
'vcodec': 'none',
})
2019-04-21 14:17:22 +02:00
for f in info['formats']:
f['language'] = lang
return info