mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-28 15:46:54 +01:00
b827ee921f
* [scrippsnetworks] Add new extractor(closes #19857)(closes #22981) * [teachable] Improve locked lessons detection (#23528) * [teachable] Fail with error message if no video URL found * [extractors] add missing import for ScrippsNetworksIE * [brightcove] cache brightcove player policy keys * [prosiebensat1] improve geo restriction handling(closes #23571) * [soundcloud] automatically update client id on failing requests * [spankbang] Fix extraction (closes #23307, closes #23423, closes #23444) * [spankbang] Improve removed video detection (#23423) * [brightcove] update policy key on failing requests * [pornhub] Fix extraction and add support for m3u8 formats (closes #22749, closes #23082) * [pornhub] Improve locked videos detection (closes #22449, closes #22780) * [brightcove] invalidate policy key cache on failing requests * [soundcloud] fix client id extraction for non fatal requests * [ChangeLog] Actualize [ci skip] * [devscripts/create-github-release] Switch to using PAT for authentication Basic authentication will be deprecated soon * release 2020.01.01 * [redtube] Detect private videos (#23518) * [vice] improve extraction(closes #23631) * [devscripts/create-github-release] Remove unused import * [wistia] improve format extraction and extract subtitles(closes #22590) * [nrktv:seriebase] Fix extraction (closes #23625) (#23537) * [discovery] fix anonymous token extraction(closes #23650) * [scrippsnetworks] add support for www.discovery.com videos * [scrippsnetworks] correct test case URL * [dctp] fix format extraction(closes #23656) * [pandatv] Remove extractor (#23630) * [naver] improve extraction - improve geo-restriction handling - extract automatic captions - extract uploader metadata - extract VLive HLS formats * [naver] improve metadata extraction * [cloudflarestream] improve extraction - add support for bytehighway.net domain - add support for signed URLs - extract thumbnail * [cloudflarestream] import embed URL extraction * [lego] fix extraction and extract subtitle(closes #23687) * [safari] Fix kaltura session extraction (closes #23679) (#23670) * [orf:fm4] Fix extraction (#23599) * [orf:radio] Clean description and improve extraction * [twitter] add support for promo_video_website cards(closes #23711) * [vodplatform] add support for embed.kwikmotion.com domain * [ndr:base:embed] Improve thumbnails extraction (closes #23731) * [canvas] Add support for new API endpoint and update tests (closes #17680, closes #18629) * [travis] Add flake8 job (#23720) * [yourporn] Fix extraction (closes #21645, closes #22255, closes #23459) * [ChangeLog] Actualize [ci skip] * release 2020.01.15 * [soundcloud] Restore previews extraction (closes #23739) * [orf:tvthek] Improve geo restricted videos detection (closes #23741) * [zype] improve extraction - extract subtitles(closes #21258) - support URLs with alternative keys/tokens(#21258) - extract more metadata * [americastestkitchen] fix extraction * [nbc] add support for nbc multi network URLs(closes #23049) * [ard] improve extraction(closes #23761) - simplify extraction - extract age limit and series - bypass geo-restriction * [ivi:compilation] Fix entries extraction (closes #23770) * [24video] Add support for 24video.vip (closes #23753) * [businessinsider] Fix jwplatform id extraction (closes #22929) (#22954) * [ard] add a missing condition * [azmedien] fix extraction(closes #23783) * [voicerepublic] fix extraction * [stretchinternet] fix extraction(closes #4319) * [youtube] Fix sigfunc name extraction (closes #23819) * [ChangeLog] Actualize [ci skip] * release 2020.01.24 * [soundcloud] imporve private playlist/set tracks extraction https://github.com/ytdl-org/youtube-dl/issues/3707#issuecomment-577873539 * [svt] fix article extraction(closes #22897)(closes #22919) * [svt] fix series extraction(closes #22297) * [viewlift] improve extraction - fix extraction(closes #23851) - add add support for authentication - add support for more domains * [vimeo] fix album extraction(closes #23864) * [tva] Relax _VALID_URL (closes #23903) * [tv5mondeplus] Fix extraction (closes #23907, closes #23911) * [twitch:stream] Lowercase channel id for stream request (closes #23917) * [sportdeutschland] Update to new sportdeutschland API They switched to SSL, but under a different host AND path... Remove the old test cases because these videos have become unavailable. * [popcorntimes] Add extractor (closes #23949) * [thisoldhouse] fix extraction(closes #23951) * [toggle] Add support for mewatch.sg (closes #23895) (#23930) * [compat] Introduce compat_realpath (refs #23991) * [update] Fix updating via symlinks (closes #23991) * [nytimes] improve format sorting(closes #24010) * [abc:iview] Support 720p (#22907) (#22921) * [nova:embed] Fix extraction (closes #23672) * [nova:embed] Improve (closes #23690) * [nova] Improve extraction (refs #23690) * [jpopsuki] Remove extractor (closes #23858) * [YoutubeDL] Fix playlist entry indexing with --playlist-items (closes #10591, closes #10622) * [test_YoutubeDL] Fix get_ids * [test_YoutubeDL] Add tests for #10591 (closes #23873) * [24video] Add support for porn.24video.net (closes #23779, closes #23784) * [npr] Add support for streams (closes #24042) * [ChangeLog] Actualize [ci skip] * release 2020.02.16 * [tv2dk:bornholm:play] Fix extraction (#24076) * [imdb] Fix extraction (closes #23443) * [wistia] Add support for multiple generic embeds (closes #8347, closes #11385) * [teachable] Add support for multiple videos per lecture (closes #24101) * [pornhd] Fix extraction (closes #24128) * [options] Remove duplicate short option -v for --version (#24162) * [extractor/common] Convert ISM manifest to unicode before processing on python 2 (#24152) * [YoutubeDL] Force redirect URL to unicode on python 2 * Remove no longer needed compat_str around geturl * [youjizz] Fix extraction (closes #24181) * [test_subtitles] Remove obsolete test * [zdf:channel] Fix tests * [zapiks] Fix test * [xtube] Fix metadata extraction (closes #21073, closes #22455) * [xtube:user] Fix test * [telecinco] Fix extraction (refs #24195) * [telecinco] Add support for article opening videos * [franceculture] Fix extraction (closes #24204) * [xhamster] Fix extraction (closes #24205) * [ChangeLog] Actualize [ci skip] * release 2020.03.01 * [vimeo] Fix subtitles URLs (#24209) * [servus] Add support for new URL schema (closes #23475, closes #23583, closes #24142) * [youtube:playlist] Fix tests (closes #23872) (#23885) * [peertube] Improve extraction * [peertube] Fix issues and improve extraction (closes #23657) * [pornhub] Improve title extraction (closes #24184) * [vimeo] fix showcase password protected video extraction(closes #24224) * [youtube] Fix age-gated videos support without login (closes #24248) * [youtube] Fix tests * [ChangeLog] Actualize [ci skip] * release 2020.03.06 * [nhk] update API version(closes #24270) * [youtube] Improve extraction in 429 error conditions (closes #24283) * [youtube] Improve age-gated videos extraction in 429 error conditions (refs #24283) * [youtube] Remove outdated code Additional get_video_info requests don't seem to provide any extra itags any longer * [README.md] Clarify 429 error * [pornhub] Add support for pornhubpremium.com (#24288) * [utils] Add support for cookies with spaces used instead of tabs * [ChangeLog] Actualize [ci skip] * release 2020.03.08 * Revert "[utils] Add support for cookies with spaces used instead of tabs" According to [1] TABs must be used as separators between fields. Files produces by some tools with spaces as separators are considered malformed. 1. https://curl.haxx.se/docs/http-cookies.html This reverts commitcff99c91d1
. * [utils] Add reference to cookie file format * Revert "[vimeo] fix showcase password protected video extraction(closes #24224)" This reverts commit12ee431676
. * [nhk] Relax _VALID_URL (#24329) * [nhk] Remove obsolete rtmp formats (closes #24329) * [nhk] Update m3u8 URL and use native hls (#24329) * [ndr] Fix extraction (closes #24326) * [xtube] Fix formats extraction (closes #24348) * [xtube] Fix typo * [hellporno] Fix extraction (closes #24399) * [cbc:watch] Add support for authentication * [cbc:watch] Fix authenticated device token caching (closes #19160) * [soundcloud] fix download url extraction(closes #24394) * [limelight] remove disabled API requests(closes #24255) * [bilibili] Add support for new URL schema with BV ids (closes #24439, closes #24442) * [bilibili] Add support for player.bilibili.com (closes #24402) * [teachable] Extract chapter metadata (closes #24421) * [generic] Look for teachable embeds before wistia * [teachable] Update upskillcourses domain New version does not use teachable platform any longer * [teachable] Update gns3 domain * [teachable] Update test * [ChangeLog] Actualize [ci skip] * [ChangeLog] Actualize [ci skip] * release 2020.03.24 * [spankwire] Fix extraction (closes #18924, closes #20648) * [spankwire] Add support for generic embeds (refs #24633) * [youporn] Add support form generic embeds * [mofosex] Add support for generic embeds (closes #24633) * [tele5] Fix extraction (closes #24553) * [extractor/common] Skip malformed ISM manifest XMLs while extracting ISM formats (#24667) * [tv4] Fix ISM formats extraction (closes #24667) * [twitch:clips] Extend _VALID_URL (closes #24290) (#24642) * [motherless] Fix extraction (closes #24699) * [nova:embed] Fix extraction (closes #24700) * [youtube] Skip broken multifeed videos (closes #24711) * [soundcloud] Extract AAC format * [soundcloud] Improve AAC format extraction (closes #19173, closes #24708) * [thisoldhouse] Fix video id extraction (closes #24548) Added support for: with of without "www." and either ".chorus.build" or ".com" It now validated correctly on older URL's ``` <iframe src="https://thisoldhouse.chorus.build/videos/zype/5e33baec27d2e50001d5f52f ``` and newer ones ``` <iframe src="https://www.thisoldhouse.com/videos/zype/5e2b70e95216cc0001615120 ``` * [thisoldhouse] Improve video id extraction (closes #24549) * [youtube] Fix DRM videos detection (refs #24736) * [options] Clarify doc on --exec command (closes #19087) (#24883) * [prosiebensat1] Improve extraction and remove 7tv.de support (#24948) * [prosiebensat1] Extract series metadata * [tenplay] Relax _VALID_URL (closes #25001) * [tvplay] fix Viafree extraction(closes #15189)(closes #24473)(closes #24789) * [yahoo] fix GYAO Player extraction and relax title URL regex(closes #24178)(closes #24778) * [youtube] Use redirected video id if any (closes #25063) * [youtube] Improve player id extraction and add tests * [extractor/common] Extract multiple JSON-LD entries * [crunchyroll] Fix and improve extraction (closes #25096, closes #25060) * [ChangeLog] Actualize [ci skip] * release 2020.05.03 * [puhutv] Remove no longer available HTTP formats (closes #25124) * [utils] Improve cookie files support + Add support for UTF-8 in cookie files * Skip malformed cookie file entries instead of crashing (invalid entry len, invalid expires at) * [dailymotion] Fix typo * [compat] Introduce compat_cookiejar_Cookie * [extractor/common] Use compat_cookiejar_Cookie for _set_cookie (closes #23256, closes #24776) To always ensure cookie name and value are bytestrings on python 2. * [orf] Add support for more radio stations (closes #24938) (#24968) * [uol] fix extraction(closes #22007) * [downloader/http] Finish downloading once received data length matches expected Always do this if possible, i.e. if Content-Length or expected length is known, not only in test. This will save unnecessary last extra loop trying to read 0 bytes. * [downloader/http] Request last data block of exact remaining size Always request last data block of exact size remaining to download if possible not the current block size. * [iprima] Improve extraction (closes #25138) * [youtube] Improve signature cipher extraction (closes #25188) * [ChangeLog] Actualize [ci skip] * release 2020.05.08 * [spike] fix Bellator mgid extraction(closes #25195) * [bbccouk] PEP8 * [mailru] Fix extraction (closes #24530) (#25239) * [README.md] flake8 HTTPS URL (#25230) * [youtube] Add support for yewtu.be (#25226) * [soundcloud] reduce API playlist page limit(closes #25274) * [vimeo] improve format extraction and sorting(closes #25285) * [redtube] Improve title extraction (#25208) * [indavideo] Switch to HTTPS for API request (#25191) * [utils] Fix file permissions in write_json_file (closes #12471) (#25122) * [redtube] Improve formats extraction and extract m3u8 formats (closes #25311, closes #25321) * [ard] Improve _VALID_URL (closes #25134) (#25198) * [giantbomb] Extend _VALID_URL (#25222) * [postprocessor/ffmpeg] Embed series metadata with --add-metadata * [youtube] Add support for more invidious instances (#25417) * [ard:beta] Extend _VALID_URL (closes #25405) * [ChangeLog] Actualize [ci skip] * release 2020.05.29 * [jwplatform] Improve embeds extraction (closes #25467) * [periscope] Fix untitled broadcasts (#25482) * [twitter:broadcast] Add untitled periscope broadcast test * [malltv] Add support for sk.mall.tv (#25445) * [brightcove] Fix subtitles extraction (closes #25540) * [brightcove] Sort imports * [twitch] Pass v5 accept header and fix thumbnails extraction (closes #25531) * [twitch:stream] Fix extraction (closes #25528) * [twitch:stream] Expect 400 and 410 HTTP errors from API * [tele5] Prefer jwplatform over nexx (closes #25533) * [jwplatform] Add support for bypass geo restriction * [tele5] Bypass geo restriction * [ChangeLog] Actualize [ci skip] * release 2020.06.06 * [kaltura] Add support for multiple embeds on a webpage (closes #25523) * [youtube] Extract chapters from JSON (closes #24819) * [facebook] Support single-video ID links I stumbled upon this at https://www.facebook.com/bwfbadminton/posts/10157127020046316 . No idea how prevalent it is yet. * [youtube] Fix playlist and feed extraction (closes #25675) * [youtube] Fix thumbnails extraction and remove uploader id extraction warning (closes #25676) * [youtube] Fix upload date extraction * [youtube] Improve view count extraction * [youtube] Fix uploader id and uploader URL extraction * [ChangeLog] Actualize [ci skip] * release 2020.06.16 * [youtube] Fix categories and improve tags extraction * [youtube] Force old layout (closes #25682, closes #25683, closes #25680, closes #25686) * [ChangeLog] Actualize [ci skip] * release 2020.06.16.1 * [brightcove] Improve embed detection (closes #25674) * [bellmedia] add support for cp24.com clip URLs(closes #25764) * [youtube:playlists] Extend _VALID_URL (closes #25810) * [youtube] Prevent excess HTTP 301 (#25786) * [wistia] Restrict embed regex (closes #25969) * [youtube] Improve description extraction (closes #25937) (#25980) * [youtube] Fix sigfunc name extraction (closes #26134, closes #26135, closes #26136, closes #26137) * [ChangeLog] Actualize [ci skip] * release 2020.07.28 * [xhamster] Extend _VALID_URL (closes #25789) (#25804) * [xhamster] Fix extraction (closes #26157) (#26254) * [xhamster] Extend _VALID_URL (closes #25927) Co-authored-by: Remita Amine <remitamine@gmail.com> Co-authored-by: Sergey M․ <dstftw@gmail.com> Co-authored-by: nmeum <soeren+github@soeren-tempel.net> Co-authored-by: Roxedus <me@roxedus.dev> Co-authored-by: Singwai Chan <c.singwai@gmail.com> Co-authored-by: cdarlint <cdarlint@users.noreply.github.com> Co-authored-by: Johannes N <31795504+jonolt@users.noreply.github.com> Co-authored-by: jnozsc <jnozsc@gmail.com> Co-authored-by: Moritz Patelscheck <moritz.patelscheck@campus.tu-berlin.de> Co-authored-by: PB <3854688+uno20001@users.noreply.github.com> Co-authored-by: Philipp Hagemeister <phihag@phihag.de> Co-authored-by: Xaver Hellauer <software@hellauer.bayern> Co-authored-by: d2au <d2au.dev@gmail.com> Co-authored-by: Jan 'Yenda' Trmal <jtrmal@gmail.com> Co-authored-by: jxu <7989982+jxu@users.noreply.github.com> Co-authored-by: Martin Ström <name@my-domain.se> Co-authored-by: The Hatsune Daishi <nao20010128@gmail.com> Co-authored-by: tsia <github@tsia.de> Co-authored-by: 3risian <59593325+3risian@users.noreply.github.com> Co-authored-by: Tristan Waddington <tristan.waddington@gmail.com> Co-authored-by: Devon Meunier <devon.meunier@gmail.com> Co-authored-by: Felix Stupp <felix.stupp@outlook.com> Co-authored-by: tom <tomster954@gmail.com> Co-authored-by: AndrewMBL <62922222+AndrewMBL@users.noreply.github.com> Co-authored-by: willbeaufoy <will@willbeaufoy.net> Co-authored-by: Philipp Stehle <anderschwiedu@googlemail.com> Co-authored-by: hh0rva1h <61889859+hh0rva1h@users.noreply.github.com> Co-authored-by: comsomisha <shmelev1996@mail.ru> Co-authored-by: TotalCaesar659 <14265316+TotalCaesar659@users.noreply.github.com> Co-authored-by: Juan Francisco Cantero Hurtado <iam@juanfra.info> Co-authored-by: Dave Loyall <dave@the-good-guys.net> Co-authored-by: tlsssl <63866177+tlsssl@users.noreply.github.com> Co-authored-by: Rob <ankenyr@gmail.com> Co-authored-by: Michael Klein <github@a98shuttle.de> Co-authored-by: JordanWeatherby <47519158+JordanWeatherby@users.noreply.github.com> Co-authored-by: striker.sh <19488257+strikersh@users.noreply.github.com> Co-authored-by: Matej Dujava <mdujava@gmail.com> Co-authored-by: Glenn Slayden <5589855+glenn-slayden@users.noreply.github.com> Co-authored-by: MRWITEK <mrvvitek@gmail.com> Co-authored-by: JChris246 <43832407+JChris246@users.noreply.github.com> Co-authored-by: TheRealDude2 <the.real.dude@gmx.de>
718 lines
25 KiB
Python
718 lines
25 KiB
Python
# coding: utf-8
|
||
from __future__ import unicode_literals
|
||
|
||
import re
|
||
|
||
from .common import InfoExtractor
|
||
from ..compat import (
|
||
compat_str,
|
||
compat_urllib_parse_unquote,
|
||
)
|
||
from ..utils import (
|
||
ExtractorError,
|
||
int_or_none,
|
||
JSON_LD_RE,
|
||
js_to_json,
|
||
NO_DEFAULT,
|
||
parse_age_limit,
|
||
parse_duration,
|
||
try_get,
|
||
)
|
||
|
||
|
||
class NRKBaseIE(InfoExtractor):
|
||
_GEO_COUNTRIES = ['NO']
|
||
|
||
_api_host = None
|
||
|
||
def _real_extract(self, url):
|
||
video_id = self._match_id(url)
|
||
|
||
api_hosts = (self._api_host, ) if self._api_host else self._API_HOSTS
|
||
|
||
for api_host in api_hosts:
|
||
data = self._download_json(
|
||
'http://%s/mediaelement/%s' % (api_host, video_id),
|
||
video_id, 'Downloading mediaelement JSON',
|
||
fatal=api_host == api_hosts[-1])
|
||
if not data:
|
||
continue
|
||
self._api_host = api_host
|
||
break
|
||
|
||
title = data.get('fullTitle') or data.get('mainTitle') or data['title']
|
||
video_id = data.get('id') or video_id
|
||
|
||
entries = []
|
||
|
||
conviva = data.get('convivaStatistics') or {}
|
||
live = (data.get('mediaElementType') == 'Live'
|
||
or data.get('isLive') is True or conviva.get('isLive'))
|
||
|
||
def make_title(t):
|
||
return self._live_title(t) if live else t
|
||
|
||
media_assets = data.get('mediaAssets')
|
||
if media_assets and isinstance(media_assets, list):
|
||
def video_id_and_title(idx):
|
||
return ((video_id, title) if len(media_assets) == 1
|
||
else ('%s-%d' % (video_id, idx), '%s (Part %d)' % (title, idx)))
|
||
for num, asset in enumerate(media_assets, 1):
|
||
asset_url = asset.get('url')
|
||
if not asset_url:
|
||
continue
|
||
formats = self._extract_akamai_formats(asset_url, video_id)
|
||
if not formats:
|
||
continue
|
||
self._sort_formats(formats)
|
||
|
||
# Some f4m streams may not work with hdcore in fragments' URLs
|
||
for f in formats:
|
||
extra_param = f.get('extra_param_to_segment_url')
|
||
if extra_param and 'hdcore' in extra_param:
|
||
del f['extra_param_to_segment_url']
|
||
|
||
entry_id, entry_title = video_id_and_title(num)
|
||
duration = parse_duration(asset.get('duration'))
|
||
subtitles = {}
|
||
for subtitle in ('webVtt', 'timedText'):
|
||
subtitle_url = asset.get('%sSubtitlesUrl' % subtitle)
|
||
if subtitle_url:
|
||
subtitles.setdefault('no', []).append({
|
||
'url': compat_urllib_parse_unquote(subtitle_url)
|
||
})
|
||
entries.append({
|
||
'id': asset.get('carrierId') or entry_id,
|
||
'title': make_title(entry_title),
|
||
'duration': duration,
|
||
'subtitles': subtitles,
|
||
'formats': formats,
|
||
})
|
||
|
||
if not entries:
|
||
media_url = data.get('mediaUrl')
|
||
if media_url:
|
||
formats = self._extract_akamai_formats(media_url, video_id)
|
||
self._sort_formats(formats)
|
||
duration = parse_duration(data.get('duration'))
|
||
entries = [{
|
||
'id': video_id,
|
||
'title': make_title(title),
|
||
'duration': duration,
|
||
'formats': formats,
|
||
}]
|
||
|
||
if not entries:
|
||
MESSAGES = {
|
||
'ProgramRightsAreNotReady': 'Du kan dessverre ikke se eller høre programmet',
|
||
'ProgramRightsHasExpired': 'Programmet har gått ut',
|
||
'NoProgramRights': 'Ikke tilgjengelig',
|
||
'ProgramIsGeoBlocked': 'NRK har ikke rettigheter til å vise dette programmet utenfor Norge',
|
||
}
|
||
message_type = data.get('messageType', '')
|
||
# Can be ProgramIsGeoBlocked or ChannelIsGeoBlocked*
|
||
if 'IsGeoBlocked' in message_type:
|
||
self.raise_geo_restricted(
|
||
msg=MESSAGES.get('ProgramIsGeoBlocked'),
|
||
countries=self._GEO_COUNTRIES)
|
||
raise ExtractorError(
|
||
'%s said: %s' % (self.IE_NAME, MESSAGES.get(
|
||
message_type, message_type)),
|
||
expected=True)
|
||
|
||
series = conviva.get('seriesName') or data.get('seriesTitle')
|
||
episode = conviva.get('episodeName') or data.get('episodeNumberOrDate')
|
||
|
||
season_number = None
|
||
episode_number = None
|
||
if data.get('mediaElementType') == 'Episode':
|
||
_season_episode = data.get('scoresStatistics', {}).get('springStreamStream') or \
|
||
data.get('relativeOriginUrl', '')
|
||
EPISODENUM_RE = [
|
||
r'/s(?P<season>\d{,2})e(?P<episode>\d{,2})\.',
|
||
r'/sesong-(?P<season>\d{,2})/episode-(?P<episode>\d{,2})',
|
||
]
|
||
season_number = int_or_none(self._search_regex(
|
||
EPISODENUM_RE, _season_episode, 'season number',
|
||
default=None, group='season'))
|
||
episode_number = int_or_none(self._search_regex(
|
||
EPISODENUM_RE, _season_episode, 'episode number',
|
||
default=None, group='episode'))
|
||
|
||
thumbnails = None
|
||
images = data.get('images')
|
||
if images and isinstance(images, dict):
|
||
web_images = images.get('webImages')
|
||
if isinstance(web_images, list):
|
||
thumbnails = [{
|
||
'url': image['imageUrl'],
|
||
'width': int_or_none(image.get('width')),
|
||
'height': int_or_none(image.get('height')),
|
||
} for image in web_images if image.get('imageUrl')]
|
||
|
||
description = data.get('description')
|
||
category = data.get('mediaAnalytics', {}).get('category')
|
||
|
||
common_info = {
|
||
'description': description,
|
||
'series': series,
|
||
'episode': episode,
|
||
'season_number': season_number,
|
||
'episode_number': episode_number,
|
||
'categories': [category] if category else None,
|
||
'age_limit': parse_age_limit(data.get('legalAge')),
|
||
'thumbnails': thumbnails,
|
||
}
|
||
|
||
vcodec = 'none' if data.get('mediaType') == 'Audio' else None
|
||
|
||
for entry in entries:
|
||
entry.update(common_info)
|
||
for f in entry['formats']:
|
||
f['vcodec'] = vcodec
|
||
|
||
points = data.get('shortIndexPoints')
|
||
if isinstance(points, list):
|
||
chapters = []
|
||
for next_num, point in enumerate(points, start=1):
|
||
if not isinstance(point, dict):
|
||
continue
|
||
start_time = parse_duration(point.get('startPoint'))
|
||
if start_time is None:
|
||
continue
|
||
end_time = parse_duration(
|
||
data.get('duration')
|
||
if next_num == len(points)
|
||
else points[next_num].get('startPoint'))
|
||
if end_time is None:
|
||
continue
|
||
chapters.append({
|
||
'start_time': start_time,
|
||
'end_time': end_time,
|
||
'title': point.get('title'),
|
||
})
|
||
if chapters and len(entries) == 1:
|
||
entries[0]['chapters'] = chapters
|
||
|
||
return self.playlist_result(entries, video_id, title, description)
|
||
|
||
|
||
class NRKIE(NRKBaseIE):
|
||
_VALID_URL = r'''(?x)
|
||
(?:
|
||
nrk:|
|
||
https?://
|
||
(?:
|
||
(?:www\.)?nrk\.no/video/PS\*|
|
||
v8[-.]psapi\.nrk\.no/mediaelement/
|
||
)
|
||
)
|
||
(?P<id>[^?#&]+)
|
||
'''
|
||
_API_HOSTS = ('psapi.nrk.no', 'v8-psapi.nrk.no')
|
||
_TESTS = [{
|
||
# video
|
||
'url': 'http://www.nrk.no/video/PS*150533',
|
||
'md5': '706f34cdf1322577589e369e522b50ef',
|
||
'info_dict': {
|
||
'id': '150533',
|
||
'ext': 'mp4',
|
||
'title': 'Dompap og andre fugler i Piip-Show',
|
||
'description': 'md5:d9261ba34c43b61c812cb6b0269a5c8f',
|
||
'duration': 262,
|
||
}
|
||
}, {
|
||
# audio
|
||
'url': 'http://www.nrk.no/video/PS*154915',
|
||
# MD5 is unstable
|
||
'info_dict': {
|
||
'id': '154915',
|
||
'ext': 'flv',
|
||
'title': 'Slik høres internett ut når du er blind',
|
||
'description': 'md5:a621f5cc1bd75c8d5104cb048c6b8568',
|
||
'duration': 20,
|
||
}
|
||
}, {
|
||
'url': 'nrk:ecc1b952-96dc-4a98-81b9-5296dc7a98d9',
|
||
'only_matching': True,
|
||
}, {
|
||
'url': 'nrk:clip/7707d5a3-ebe7-434a-87d5-a3ebe7a34a70',
|
||
'only_matching': True,
|
||
}, {
|
||
'url': 'https://v8-psapi.nrk.no/mediaelement/ecc1b952-96dc-4a98-81b9-5296dc7a98d9',
|
||
'only_matching': True,
|
||
}]
|
||
|
||
|
||
class NRKTVIE(NRKBaseIE):
|
||
IE_DESC = 'NRK TV and NRK Radio'
|
||
_EPISODE_RE = r'(?P<id>[a-zA-Z]{4}\d{8})'
|
||
_VALID_URL = r'''(?x)
|
||
https?://
|
||
(?:tv|radio)\.nrk(?:super)?\.no/
|
||
(?:serie(?:/[^/]+){1,2}|program)/
|
||
(?![Ee]pisodes)%s
|
||
(?:/\d{2}-\d{2}-\d{4})?
|
||
(?:\#del=(?P<part_id>\d+))?
|
||
''' % _EPISODE_RE
|
||
_API_HOSTS = ('psapi-ne.nrk.no', 'psapi-we.nrk.no')
|
||
_TESTS = [{
|
||
'url': 'https://tv.nrk.no/program/MDDP12000117',
|
||
'md5': '8270824df46ec629b66aeaa5796b36fb',
|
||
'info_dict': {
|
||
'id': 'MDDP12000117AA',
|
||
'ext': 'mp4',
|
||
'title': 'Alarm Trolltunga',
|
||
'description': 'md5:46923a6e6510eefcce23d5ef2a58f2ce',
|
||
'duration': 2223,
|
||
'age_limit': 6,
|
||
},
|
||
}, {
|
||
'url': 'https://tv.nrk.no/serie/20-spoersmaal-tv/MUHH48000314/23-05-2014',
|
||
'md5': '9a167e54d04671eb6317a37b7bc8a280',
|
||
'info_dict': {
|
||
'id': 'MUHH48000314AA',
|
||
'ext': 'mp4',
|
||
'title': '20 spørsmål 23.05.2014',
|
||
'description': 'md5:bdea103bc35494c143c6a9acdd84887a',
|
||
'duration': 1741,
|
||
'series': '20 spørsmål',
|
||
'episode': '23.05.2014',
|
||
},
|
||
'skip': 'NoProgramRights',
|
||
}, {
|
||
'url': 'https://tv.nrk.no/program/mdfp15000514',
|
||
'info_dict': {
|
||
'id': 'MDFP15000514CA',
|
||
'ext': 'mp4',
|
||
'title': 'Grunnlovsjubiléet - Stor ståhei for ingenting 24.05.2014',
|
||
'description': 'md5:89290c5ccde1b3a24bb8050ab67fe1db',
|
||
'duration': 4605,
|
||
'series': 'Kunnskapskanalen',
|
||
'episode': '24.05.2014',
|
||
},
|
||
'params': {
|
||
'skip_download': True,
|
||
},
|
||
}, {
|
||
# single playlist video
|
||
'url': 'https://tv.nrk.no/serie/tour-de-ski/MSPO40010515/06-01-2015#del=2',
|
||
'info_dict': {
|
||
'id': 'MSPO40010515-part2',
|
||
'ext': 'flv',
|
||
'title': 'Tour de Ski: Sprint fri teknikk, kvinner og menn 06.01.2015 (del 2:2)',
|
||
'description': 'md5:238b67b97a4ac7d7b4bf0edf8cc57d26',
|
||
},
|
||
'params': {
|
||
'skip_download': True,
|
||
},
|
||
'expected_warnings': ['Video is geo restricted'],
|
||
'skip': 'particular part is not supported currently',
|
||
}, {
|
||
'url': 'https://tv.nrk.no/serie/tour-de-ski/MSPO40010515/06-01-2015',
|
||
'playlist': [{
|
||
'info_dict': {
|
||
'id': 'MSPO40010515AH',
|
||
'ext': 'mp4',
|
||
'title': 'Sprint fri teknikk, kvinner og menn 06.01.2015 (Part 1)',
|
||
'description': 'md5:1f97a41f05a9486ee00c56f35f82993d',
|
||
'duration': 772,
|
||
'series': 'Tour de Ski',
|
||
'episode': '06.01.2015',
|
||
},
|
||
'params': {
|
||
'skip_download': True,
|
||
},
|
||
}, {
|
||
'info_dict': {
|
||
'id': 'MSPO40010515BH',
|
||
'ext': 'mp4',
|
||
'title': 'Sprint fri teknikk, kvinner og menn 06.01.2015 (Part 2)',
|
||
'description': 'md5:1f97a41f05a9486ee00c56f35f82993d',
|
||
'duration': 6175,
|
||
'series': 'Tour de Ski',
|
||
'episode': '06.01.2015',
|
||
},
|
||
'params': {
|
||
'skip_download': True,
|
||
},
|
||
}],
|
||
'info_dict': {
|
||
'id': 'MSPO40010515',
|
||
'title': 'Sprint fri teknikk, kvinner og menn 06.01.2015',
|
||
'description': 'md5:1f97a41f05a9486ee00c56f35f82993d',
|
||
},
|
||
'expected_warnings': ['Video is geo restricted'],
|
||
}, {
|
||
'url': 'https://tv.nrk.no/serie/anno/KMTE50001317/sesong-3/episode-13',
|
||
'info_dict': {
|
||
'id': 'KMTE50001317AA',
|
||
'ext': 'mp4',
|
||
'title': 'Anno 13:30',
|
||
'description': 'md5:11d9613661a8dbe6f9bef54e3a4cbbfa',
|
||
'duration': 2340,
|
||
'series': 'Anno',
|
||
'episode': '13:30',
|
||
'season_number': 3,
|
||
'episode_number': 13,
|
||
},
|
||
'params': {
|
||
'skip_download': True,
|
||
},
|
||
}, {
|
||
'url': 'https://tv.nrk.no/serie/nytt-paa-nytt/MUHH46000317/27-01-2017',
|
||
'info_dict': {
|
||
'id': 'MUHH46000317AA',
|
||
'ext': 'mp4',
|
||
'title': 'Nytt på Nytt 27.01.2017',
|
||
'description': 'md5:5358d6388fba0ea6f0b6d11c48b9eb4b',
|
||
'duration': 1796,
|
||
'series': 'Nytt på nytt',
|
||
'episode': '27.01.2017',
|
||
},
|
||
'params': {
|
||
'skip_download': True,
|
||
},
|
||
}, {
|
||
'url': 'https://radio.nrk.no/serie/dagsnytt/NPUB21019315/12-07-2015#',
|
||
'only_matching': True,
|
||
}, {
|
||
'url': 'https://tv.nrk.no/serie/lindmo/2018/MUHU11006318/avspiller',
|
||
'only_matching': True,
|
||
}]
|
||
|
||
|
||
class NRKTVEpisodeIE(InfoExtractor):
|
||
_VALID_URL = r'https?://tv\.nrk\.no/serie/(?P<id>[^/]+/sesong/\d+/episode/\d+)'
|
||
_TESTS = [{
|
||
'url': 'https://tv.nrk.no/serie/hellums-kro/sesong/1/episode/2',
|
||
'info_dict': {
|
||
'id': 'MUHH36005220BA',
|
||
'ext': 'mp4',
|
||
'title': 'Kro, krig og kjærlighet 2:6',
|
||
'description': 'md5:b32a7dc0b1ed27c8064f58b97bda4350',
|
||
'duration': 1563,
|
||
'series': 'Hellums kro',
|
||
'season_number': 1,
|
||
'episode_number': 2,
|
||
'episode': '2:6',
|
||
'age_limit': 6,
|
||
},
|
||
'params': {
|
||
'skip_download': True,
|
||
},
|
||
}, {
|
||
'url': 'https://tv.nrk.no/serie/backstage/sesong/1/episode/8',
|
||
'info_dict': {
|
||
'id': 'MSUI14000816AA',
|
||
'ext': 'mp4',
|
||
'title': 'Backstage 8:30',
|
||
'description': 'md5:de6ca5d5a2d56849e4021f2bf2850df4',
|
||
'duration': 1320,
|
||
'series': 'Backstage',
|
||
'season_number': 1,
|
||
'episode_number': 8,
|
||
'episode': '8:30',
|
||
},
|
||
'params': {
|
||
'skip_download': True,
|
||
},
|
||
'skip': 'ProgramRightsHasExpired',
|
||
}]
|
||
|
||
def _real_extract(self, url):
|
||
display_id = self._match_id(url)
|
||
|
||
webpage = self._download_webpage(url, display_id)
|
||
|
||
nrk_id = self._parse_json(
|
||
self._search_regex(JSON_LD_RE, webpage, 'JSON-LD', group='json_ld'),
|
||
display_id)['@id']
|
||
|
||
assert re.match(NRKTVIE._EPISODE_RE, nrk_id)
|
||
return self.url_result(
|
||
'nrk:%s' % nrk_id, ie=NRKIE.ie_key(), video_id=nrk_id)
|
||
|
||
|
||
class NRKTVSerieBaseIE(InfoExtractor):
|
||
def _extract_series(self, webpage, display_id, fatal=True):
|
||
config = self._parse_json(
|
||
self._search_regex(
|
||
(r'INITIAL_DATA(?:_V\d)?_*\s*=\s*({.+?})\s*;',
|
||
r'({.+?})\s*,\s*"[^"]+"\s*\)\s*</script>'),
|
||
webpage, 'config', default='{}' if not fatal else NO_DEFAULT),
|
||
display_id, fatal=False, transform_source=js_to_json)
|
||
if not config:
|
||
return
|
||
return try_get(
|
||
config,
|
||
(lambda x: x['initialState']['series'], lambda x: x['series']),
|
||
dict)
|
||
|
||
def _extract_seasons(self, seasons):
|
||
if not isinstance(seasons, list):
|
||
return []
|
||
entries = []
|
||
for season in seasons:
|
||
entries.extend(self._extract_episodes(season))
|
||
return entries
|
||
|
||
def _extract_episodes(self, season):
|
||
if not isinstance(season, dict):
|
||
return []
|
||
return self._extract_entries(season.get('episodes'))
|
||
|
||
def _extract_entries(self, entry_list):
|
||
if not isinstance(entry_list, list):
|
||
return []
|
||
entries = []
|
||
for episode in entry_list:
|
||
nrk_id = episode.get('prfId')
|
||
if not nrk_id or not isinstance(nrk_id, compat_str):
|
||
continue
|
||
entries.append(self.url_result(
|
||
'nrk:%s' % nrk_id, ie=NRKIE.ie_key(), video_id=nrk_id))
|
||
return entries
|
||
|
||
|
||
class NRKTVSeasonIE(NRKTVSerieBaseIE):
|
||
_VALID_URL = r'https?://tv\.nrk\.no/serie/[^/]+/sesong/(?P<id>\d+)'
|
||
_TEST = {
|
||
'url': 'https://tv.nrk.no/serie/backstage/sesong/1',
|
||
'info_dict': {
|
||
'id': '1',
|
||
'title': 'Sesong 1',
|
||
},
|
||
'playlist_mincount': 30,
|
||
}
|
||
|
||
@classmethod
|
||
def suitable(cls, url):
|
||
return (False if NRKTVIE.suitable(url) or NRKTVEpisodeIE.suitable(url)
|
||
else super(NRKTVSeasonIE, cls).suitable(url))
|
||
|
||
def _real_extract(self, url):
|
||
display_id = self._match_id(url)
|
||
|
||
webpage = self._download_webpage(url, display_id)
|
||
|
||
series = self._extract_series(webpage, display_id)
|
||
|
||
season = next(
|
||
s for s in series['seasons']
|
||
if int(display_id) == s.get('seasonNumber'))
|
||
|
||
title = try_get(season, lambda x: x['titles']['title'], compat_str)
|
||
return self.playlist_result(
|
||
self._extract_episodes(season), display_id, title)
|
||
|
||
|
||
class NRKTVSeriesIE(NRKTVSerieBaseIE):
|
||
_VALID_URL = r'https?://(?:tv|radio)\.nrk(?:super)?\.no/serie/(?P<id>[^/]+)'
|
||
_ITEM_RE = r'(?:data-season=["\']|id=["\']season-)(?P<id>\d+)'
|
||
_TESTS = [{
|
||
'url': 'https://tv.nrk.no/serie/blank',
|
||
'info_dict': {
|
||
'id': 'blank',
|
||
'title': 'Blank',
|
||
'description': 'md5:7664b4e7e77dc6810cd3bca367c25b6e',
|
||
},
|
||
'playlist_mincount': 30,
|
||
}, {
|
||
# new layout, seasons
|
||
'url': 'https://tv.nrk.no/serie/backstage',
|
||
'info_dict': {
|
||
'id': 'backstage',
|
||
'title': 'Backstage',
|
||
'description': 'md5:c3ec3a35736fca0f9e1207b5511143d3',
|
||
},
|
||
'playlist_mincount': 60,
|
||
}, {
|
||
# new layout, instalments
|
||
'url': 'https://tv.nrk.no/serie/groenn-glede',
|
||
'info_dict': {
|
||
'id': 'groenn-glede',
|
||
'title': 'Grønn glede',
|
||
'description': 'md5:7576e92ae7f65da6993cf90ee29e4608',
|
||
},
|
||
'playlist_mincount': 10,
|
||
}, {
|
||
# old layout
|
||
'url': 'https://tv.nrksuper.no/serie/labyrint',
|
||
'info_dict': {
|
||
'id': 'labyrint',
|
||
'title': 'Labyrint',
|
||
'description': 'md5:318b597330fdac5959247c9b69fdb1ec',
|
||
},
|
||
'playlist_mincount': 3,
|
||
}, {
|
||
'url': 'https://tv.nrk.no/serie/broedrene-dal-og-spektralsteinene',
|
||
'only_matching': True,
|
||
}, {
|
||
'url': 'https://tv.nrk.no/serie/saving-the-human-race',
|
||
'only_matching': True,
|
||
}, {
|
||
'url': 'https://tv.nrk.no/serie/postmann-pat',
|
||
'only_matching': True,
|
||
}]
|
||
|
||
@classmethod
|
||
def suitable(cls, url):
|
||
return (
|
||
False if any(ie.suitable(url)
|
||
for ie in (NRKTVIE, NRKTVEpisodeIE, NRKTVSeasonIE))
|
||
else super(NRKTVSeriesIE, cls).suitable(url))
|
||
|
||
def _real_extract(self, url):
|
||
series_id = self._match_id(url)
|
||
|
||
webpage = self._download_webpage(url, series_id)
|
||
|
||
# New layout (e.g. https://tv.nrk.no/serie/backstage)
|
||
series = self._extract_series(webpage, series_id, fatal=False)
|
||
if series:
|
||
title = try_get(series, lambda x: x['titles']['title'], compat_str)
|
||
description = try_get(
|
||
series, lambda x: x['titles']['subtitle'], compat_str)
|
||
entries = []
|
||
entries.extend(self._extract_seasons(series.get('seasons')))
|
||
entries.extend(self._extract_entries(series.get('instalments')))
|
||
entries.extend(self._extract_episodes(series.get('extraMaterial')))
|
||
return self.playlist_result(entries, series_id, title, description)
|
||
|
||
# Old layout (e.g. https://tv.nrksuper.no/serie/labyrint)
|
||
entries = [
|
||
self.url_result(
|
||
'https://tv.nrk.no/program/Episodes/{series}/{season}'.format(
|
||
series=series_id, season=season_id))
|
||
for season_id in re.findall(self._ITEM_RE, webpage)
|
||
]
|
||
|
||
title = self._html_search_meta(
|
||
'seriestitle', webpage,
|
||
'title', default=None) or self._og_search_title(
|
||
webpage, fatal=False)
|
||
if title:
|
||
title = self._search_regex(
|
||
r'NRK (?:Super )?TV\s*[-–]\s*(.+)', title, 'title', default=title)
|
||
|
||
description = self._html_search_meta(
|
||
'series_description', webpage,
|
||
'description', default=None) or self._og_search_description(webpage)
|
||
|
||
return self.playlist_result(entries, series_id, title, description)
|
||
|
||
|
||
class NRKTVDirekteIE(NRKTVIE):
|
||
IE_DESC = 'NRK TV Direkte and NRK Radio Direkte'
|
||
_VALID_URL = r'https?://(?:tv|radio)\.nrk\.no/direkte/(?P<id>[^/?#&]+)'
|
||
|
||
_TESTS = [{
|
||
'url': 'https://tv.nrk.no/direkte/nrk1',
|
||
'only_matching': True,
|
||
}, {
|
||
'url': 'https://radio.nrk.no/direkte/p1_oslo_akershus',
|
||
'only_matching': True,
|
||
}]
|
||
|
||
|
||
class NRKPlaylistBaseIE(InfoExtractor):
|
||
def _extract_description(self, webpage):
|
||
pass
|
||
|
||
def _real_extract(self, url):
|
||
playlist_id = self._match_id(url)
|
||
|
||
webpage = self._download_webpage(url, playlist_id)
|
||
|
||
entries = [
|
||
self.url_result('nrk:%s' % video_id, NRKIE.ie_key())
|
||
for video_id in re.findall(self._ITEM_RE, webpage)
|
||
]
|
||
|
||
playlist_title = self. _extract_title(webpage)
|
||
playlist_description = self._extract_description(webpage)
|
||
|
||
return self.playlist_result(
|
||
entries, playlist_id, playlist_title, playlist_description)
|
||
|
||
|
||
class NRKPlaylistIE(NRKPlaylistBaseIE):
|
||
_VALID_URL = r'https?://(?:www\.)?nrk\.no/(?!video|skole)(?:[^/]+/)+(?P<id>[^/]+)'
|
||
_ITEM_RE = r'class="[^"]*\brich\b[^"]*"[^>]+data-video-id="([^"]+)"'
|
||
_TESTS = [{
|
||
'url': 'http://www.nrk.no/troms/gjenopplev-den-historiske-solformorkelsen-1.12270763',
|
||
'info_dict': {
|
||
'id': 'gjenopplev-den-historiske-solformorkelsen-1.12270763',
|
||
'title': 'Gjenopplev den historiske solformørkelsen',
|
||
'description': 'md5:c2df8ea3bac5654a26fc2834a542feed',
|
||
},
|
||
'playlist_count': 2,
|
||
}, {
|
||
'url': 'http://www.nrk.no/kultur/bok/rivertonprisen-til-karin-fossum-1.12266449',
|
||
'info_dict': {
|
||
'id': 'rivertonprisen-til-karin-fossum-1.12266449',
|
||
'title': 'Rivertonprisen til Karin Fossum',
|
||
'description': 'Første kvinne på 15 år til å vinne krimlitteraturprisen.',
|
||
},
|
||
'playlist_count': 2,
|
||
}]
|
||
|
||
def _extract_title(self, webpage):
|
||
return self._og_search_title(webpage, fatal=False)
|
||
|
||
def _extract_description(self, webpage):
|
||
return self._og_search_description(webpage)
|
||
|
||
|
||
class NRKTVEpisodesIE(NRKPlaylistBaseIE):
|
||
_VALID_URL = r'https?://tv\.nrk\.no/program/[Ee]pisodes/[^/]+/(?P<id>\d+)'
|
||
_ITEM_RE = r'data-episode=["\']%s' % NRKTVIE._EPISODE_RE
|
||
_TESTS = [{
|
||
'url': 'https://tv.nrk.no/program/episodes/nytt-paa-nytt/69031',
|
||
'info_dict': {
|
||
'id': '69031',
|
||
'title': 'Nytt på nytt, sesong: 201210',
|
||
},
|
||
'playlist_count': 4,
|
||
}]
|
||
|
||
def _extract_title(self, webpage):
|
||
return self._html_search_regex(
|
||
r'<h1>([^<]+)</h1>', webpage, 'title', fatal=False)
|
||
|
||
|
||
class NRKSkoleIE(InfoExtractor):
|
||
IE_DESC = 'NRK Skole'
|
||
_VALID_URL = r'https?://(?:www\.)?nrk\.no/skole/?\?.*\bmediaId=(?P<id>\d+)'
|
||
|
||
_TESTS = [{
|
||
'url': 'https://www.nrk.no/skole/?page=search&q=&mediaId=14099',
|
||
'md5': '18c12c3d071953c3bf8d54ef6b2587b7',
|
||
'info_dict': {
|
||
'id': '6021',
|
||
'ext': 'mp4',
|
||
'title': 'Genetikk og eneggede tvillinger',
|
||
'description': 'md5:3aca25dcf38ec30f0363428d2b265f8d',
|
||
'duration': 399,
|
||
},
|
||
}, {
|
||
'url': 'https://www.nrk.no/skole/?page=objectives&subject=naturfag&objective=K15114&mediaId=19355',
|
||
'only_matching': True,
|
||
}]
|
||
|
||
def _real_extract(self, url):
|
||
video_id = self._match_id(url)
|
||
|
||
webpage = self._download_webpage(
|
||
'https://mimir.nrk.no/plugin/1.0/static?mediaId=%s' % video_id,
|
||
video_id)
|
||
|
||
nrk_id = self._parse_json(
|
||
self._search_regex(
|
||
r'<script[^>]+type=["\']application/json["\'][^>]*>({.+?})</script>',
|
||
webpage, 'application json'),
|
||
video_id)['activeMedia']['psId']
|
||
|
||
return self.url_result('nrk:%s' % nrk_id)
|