2014-01-07 10:04:48 +01:00
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
2016-11-18 22:18:21 +01:00
|
|
|
import random
|
2013-06-23 22:24:58 +02:00
|
|
|
import re
|
2016-11-18 22:18:21 +01:00
|
|
|
import time
|
2013-06-23 22:24:58 +02:00
|
|
|
|
|
|
|
from .common import InfoExtractor
|
2014-12-13 12:24:42 +01:00
|
|
|
from ..compat import (
|
2013-11-22 17:44:55 +01:00
|
|
|
compat_str,
|
2013-11-22 16:05:14 +01:00
|
|
|
compat_urlparse,
|
2014-12-13 12:24:42 +01:00
|
|
|
)
|
|
|
|
from ..utils import (
|
2013-06-23 22:24:58 +02:00
|
|
|
ExtractorError,
|
2015-10-16 20:51:35 +02:00
|
|
|
float_or_none,
|
|
|
|
int_or_none,
|
2017-06-04 18:21:30 +02:00
|
|
|
KNOWN_EXTENSIONS,
|
2016-11-18 22:18:21 +01:00
|
|
|
parse_filesize,
|
2018-08-30 22:35:55 +02:00
|
|
|
str_or_none,
|
|
|
|
try_get,
|
2016-11-18 22:18:21 +01:00
|
|
|
unescapeHTML,
|
|
|
|
update_url_query,
|
2017-06-04 15:47:05 +02:00
|
|
|
unified_strdate,
|
2018-08-30 22:35:55 +02:00
|
|
|
unified_timestamp,
|
2018-07-21 14:08:28 +02:00
|
|
|
url_or_none,
|
2013-06-23 22:24:58 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
class BandcampIE(InfoExtractor):
|
2018-08-30 22:35:55 +02:00
|
|
|
_VALID_URL = r'https?://[^/]+\.bandcamp\.com/track/(?P<title>[^/?#&]+)'
|
2013-11-22 17:44:55 +01:00
|
|
|
_TESTS = [{
|
2020-09-02 20:25:25 +02:00
|
|
|
'url': 'http://youtube-dlc.bandcamp.com/track/youtube-dlc-test-song',
|
2014-01-07 10:04:48 +01:00
|
|
|
'md5': 'c557841d5e50261777a6585648adf439',
|
|
|
|
'info_dict': {
|
2014-10-02 15:22:46 +02:00
|
|
|
'id': '1812978515',
|
|
|
|
'ext': 'mp3',
|
2020-09-29 05:54:36 +02:00
|
|
|
'title': "youtube-dl \"'/\\\u00e4\u21ad - youtube-dl \"'/\\\u00e4\u21ad - youtube-dl test song \"'/\\\u00e4\u21ad",
|
2014-10-02 15:22:46 +02:00
|
|
|
'duration': 9.8485,
|
2020-09-29 05:54:36 +02:00
|
|
|
'uploader': "youtube-dl \"'/\\\u00e4\u21ad",
|
|
|
|
'timestamp': 1354224127,
|
|
|
|
'upload_date': '20121129',
|
2013-06-27 20:46:46 +02:00
|
|
|
},
|
2014-01-07 10:04:48 +01:00
|
|
|
'_skip': 'There is a limit of 200 free downloads / month for the test song'
|
2014-10-02 15:22:46 +02:00
|
|
|
}, {
|
2018-08-30 22:35:55 +02:00
|
|
|
# free download
|
2014-10-02 15:22:46 +02:00
|
|
|
'url': 'http://benprunty.bandcamp.com/track/lanius-battle',
|
2020-09-29 05:54:36 +02:00
|
|
|
'md5': '5d92af55811e47f38962a54c30b07ef0',
|
2014-10-02 15:22:46 +02:00
|
|
|
'info_dict': {
|
|
|
|
'id': '2650410135',
|
2017-04-27 22:13:12 +02:00
|
|
|
'ext': 'aiff',
|
|
|
|
'title': 'Ben Prunty - Lanius (Battle)',
|
2018-08-30 22:35:55 +02:00
|
|
|
'thumbnail': r're:^https?://.*\.jpg$',
|
2017-04-27 22:13:12 +02:00
|
|
|
'uploader': 'Ben Prunty',
|
2018-08-30 22:35:55 +02:00
|
|
|
'timestamp': 1396508491,
|
|
|
|
'upload_date': '20140403',
|
|
|
|
'release_date': '20140403',
|
|
|
|
'duration': 260.877,
|
|
|
|
'track': 'Lanius (Battle)',
|
|
|
|
'track_number': 1,
|
|
|
|
'track_id': '2650410135',
|
|
|
|
'artist': 'Ben Prunty',
|
|
|
|
'album': 'FTL: Advanced Edition Soundtrack',
|
2014-10-02 15:22:46 +02:00
|
|
|
},
|
2018-08-30 21:32:35 +02:00
|
|
|
}, {
|
2018-08-30 22:35:55 +02:00
|
|
|
# no free download, mp3 128
|
2018-08-30 21:32:35 +02:00
|
|
|
'url': 'https://relapsealumni.bandcamp.com/track/hail-to-fire',
|
2018-08-30 22:35:55 +02:00
|
|
|
'md5': 'fec12ff55e804bb7f7ebeb77a800c8b7',
|
2018-08-30 21:32:35 +02:00
|
|
|
'info_dict': {
|
|
|
|
'id': '2584466013',
|
|
|
|
'ext': 'mp3',
|
2018-08-30 22:35:55 +02:00
|
|
|
'title': 'Mastodon - Hail to Fire',
|
|
|
|
'thumbnail': r're:^https?://.*\.jpg$',
|
|
|
|
'uploader': 'Mastodon',
|
|
|
|
'timestamp': 1322005399,
|
|
|
|
'upload_date': '20111122',
|
|
|
|
'release_date': '20040207',
|
|
|
|
'duration': 120.79,
|
|
|
|
'track': 'Hail to Fire',
|
2018-08-30 21:32:35 +02:00
|
|
|
'track_number': 5,
|
2018-08-30 22:35:55 +02:00
|
|
|
'track_id': '2584466013',
|
|
|
|
'artist': 'Mastodon',
|
|
|
|
'album': 'Call of the Mastodon',
|
2018-08-30 21:32:35 +02:00
|
|
|
},
|
2013-11-22 17:44:55 +01:00
|
|
|
}]
|
2013-06-23 22:24:58 +02:00
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
mobj = re.match(self._VALID_URL, url)
|
|
|
|
title = mobj.group('title')
|
|
|
|
webpage = self._download_webpage(url, title)
|
2017-05-05 22:35:42 +02:00
|
|
|
thumbnail = self._html_search_meta('og:image', webpage, default=None)
|
2018-08-30 22:35:55 +02:00
|
|
|
|
|
|
|
track_id = None
|
|
|
|
track = None
|
|
|
|
track_number = None
|
|
|
|
duration = None
|
|
|
|
|
|
|
|
formats = []
|
2020-09-29 05:54:36 +02:00
|
|
|
trackinfo_block = self._html_search_regex(
|
|
|
|
r'trackinfo(?:["\']|"):\[\s*({.+?})\s*\],(?:["\']|")',
|
|
|
|
webpage, 'track info', default='{}')
|
|
|
|
|
|
|
|
track_info = self._parse_json(trackinfo_block, title)
|
|
|
|
|
2018-08-30 22:35:55 +02:00
|
|
|
if track_info:
|
|
|
|
file_ = track_info.get('file')
|
|
|
|
if isinstance(file_, dict):
|
|
|
|
for format_id, format_url in file_.items():
|
|
|
|
if not url_or_none(format_url):
|
|
|
|
continue
|
2014-05-20 14:44:42 +02:00
|
|
|
ext, abr_str = format_id.split('-', 1)
|
2013-12-26 14:08:57 +01:00
|
|
|
formats.append({
|
|
|
|
'format_id': format_id,
|
2015-10-16 20:46:38 +02:00
|
|
|
'url': self._proto_relative_url(format_url, 'http:'),
|
2014-05-17 06:22:24 +02:00
|
|
|
'ext': ext,
|
2013-12-26 14:08:57 +01:00
|
|
|
'vcodec': 'none',
|
2014-05-17 06:22:24 +02:00
|
|
|
'acodec': ext,
|
2015-10-16 20:51:35 +02:00
|
|
|
'abr': int_or_none(abr_str),
|
2013-12-26 14:08:57 +01:00
|
|
|
})
|
2018-08-30 22:35:55 +02:00
|
|
|
track = track_info.get('title')
|
|
|
|
track_id = str_or_none(track_info.get('track_id') or track_info.get('id'))
|
|
|
|
track_number = int_or_none(track_info.get('track_num'))
|
|
|
|
duration = float_or_none(track_info.get('duration'))
|
|
|
|
|
|
|
|
def extract(key):
|
2020-09-29 05:54:36 +02:00
|
|
|
data = self._html_search_regex(
|
|
|
|
r',(["\']|")%s\1:\1(?P<value>(?:\\\1|((?!\1).))+)\1' % key,
|
2018-08-30 22:35:55 +02:00
|
|
|
webpage, key, default=None, group='value')
|
2020-09-29 05:54:36 +02:00
|
|
|
return data.replace(r'\"', '"').replace('\\\\', '\\') if data else data
|
2018-08-30 22:35:55 +02:00
|
|
|
|
|
|
|
artist = extract('artist')
|
|
|
|
album = extract('album_title')
|
|
|
|
timestamp = unified_timestamp(
|
|
|
|
extract('publish_date') or extract('album_publish_date'))
|
|
|
|
release_date = unified_strdate(extract('album_release_date'))
|
|
|
|
|
|
|
|
download_link = self._search_regex(
|
2020-09-29 05:54:36 +02:00
|
|
|
r'freeDownloadPage(?:["\']|"):\s*(["\']|")(?P<url>(?:(?!\1).)+)\1', webpage,
|
2018-08-30 22:35:55 +02:00
|
|
|
'download link', default=None, group='url')
|
|
|
|
if download_link:
|
|
|
|
track_id = self._search_regex(
|
2020-09-29 05:54:36 +02:00
|
|
|
r'\?id=(?P<id>\d+)&',
|
|
|
|
download_link, 'track id')
|
2018-08-30 22:35:55 +02:00
|
|
|
|
|
|
|
download_webpage = self._download_webpage(
|
|
|
|
download_link, track_id, 'Downloading free downloads page')
|
|
|
|
|
|
|
|
blob = self._parse_json(
|
|
|
|
self._search_regex(
|
|
|
|
r'data-blob=(["\'])(?P<blob>{.+?})\1', download_webpage,
|
|
|
|
'blob', group='blob'),
|
|
|
|
track_id, transform_source=unescapeHTML)
|
|
|
|
|
|
|
|
info = try_get(
|
|
|
|
blob, (lambda x: x['digital_items'][0],
|
|
|
|
lambda x: x['download_items'][0]), dict)
|
|
|
|
if info:
|
|
|
|
downloads = info.get('downloads')
|
|
|
|
if isinstance(downloads, dict):
|
|
|
|
if not track:
|
|
|
|
track = info.get('title')
|
|
|
|
if not artist:
|
|
|
|
artist = info.get('artist')
|
|
|
|
if not thumbnail:
|
|
|
|
thumbnail = info.get('thumb_url')
|
|
|
|
|
|
|
|
download_formats = {}
|
|
|
|
download_formats_list = blob.get('download_formats')
|
|
|
|
if isinstance(download_formats_list, list):
|
|
|
|
for f in blob['download_formats']:
|
|
|
|
name, ext = f.get('name'), f.get('file_extension')
|
|
|
|
if all(isinstance(x, compat_str) for x in (name, ext)):
|
|
|
|
download_formats[name] = ext.strip('.')
|
|
|
|
|
|
|
|
for format_id, f in downloads.items():
|
|
|
|
format_url = f.get('url')
|
|
|
|
if not format_url:
|
|
|
|
continue
|
|
|
|
# Stat URL generation algorithm is reverse engineered from
|
|
|
|
# download_*_bundle_*.js
|
|
|
|
stat_url = update_url_query(
|
|
|
|
format_url.replace('/download/', '/statdownload/'), {
|
|
|
|
'.rand': int(time.time() * 1000 * random.random()),
|
|
|
|
})
|
|
|
|
format_id = f.get('encoding_name') or format_id
|
|
|
|
stat = self._download_json(
|
|
|
|
stat_url, track_id, 'Downloading %s JSON' % format_id,
|
|
|
|
transform_source=lambda s: s[s.index('{'):s.rindex('}') + 1],
|
|
|
|
fatal=False)
|
|
|
|
if not stat:
|
|
|
|
continue
|
|
|
|
retry_url = url_or_none(stat.get('retry_url'))
|
|
|
|
if not retry_url:
|
|
|
|
continue
|
|
|
|
formats.append({
|
|
|
|
'url': self._proto_relative_url(retry_url, 'http:'),
|
|
|
|
'ext': download_formats.get(format_id),
|
|
|
|
'format_id': format_id,
|
|
|
|
'format_note': f.get('description'),
|
|
|
|
'filesize': parse_filesize(f.get('size_mb')),
|
|
|
|
'vcodec': 'none',
|
|
|
|
})
|
2013-12-26 14:08:57 +01:00
|
|
|
|
2018-08-30 22:35:55 +02:00
|
|
|
self._sort_formats(formats)
|
2016-11-18 22:18:21 +01:00
|
|
|
|
|
|
|
title = '%s - %s' % (artist, track) if artist else track
|
|
|
|
|
2018-08-30 22:35:55 +02:00
|
|
|
if not duration:
|
|
|
|
duration = float_or_none(self._html_search_meta(
|
|
|
|
'duration', webpage, default=None))
|
2013-06-23 22:24:58 +02:00
|
|
|
|
2013-12-26 14:08:57 +01:00
|
|
|
return {
|
2018-08-30 22:35:55 +02:00
|
|
|
'id': track_id,
|
2016-11-18 22:18:21 +01:00
|
|
|
'title': title,
|
2018-08-30 22:35:55 +02:00
|
|
|
'thumbnail': thumbnail,
|
|
|
|
'uploader': artist,
|
|
|
|
'timestamp': timestamp,
|
|
|
|
'release_date': release_date,
|
|
|
|
'duration': duration,
|
2016-11-18 22:18:21 +01:00
|
|
|
'track': track,
|
2018-08-30 22:35:55 +02:00
|
|
|
'track_number': track_number,
|
|
|
|
'track_id': track_id,
|
|
|
|
'artist': artist,
|
|
|
|
'album': album,
|
2016-11-18 22:18:21 +01:00
|
|
|
'formats': formats,
|
2013-12-26 14:08:57 +01:00
|
|
|
}
|
2013-11-22 16:05:14 +01:00
|
|
|
|
|
|
|
|
|
|
|
class BandcampAlbumIE(InfoExtractor):
|
2014-01-07 10:04:48 +01:00
|
|
|
IE_NAME = 'Bandcamp:album'
|
2017-06-04 18:21:30 +02:00
|
|
|
_VALID_URL = r'https?://(?:(?P<subdomain>[^.]+)\.)?bandcamp\.com(?:/album/(?P<album_id>[^/?#&]+))?'
|
2013-11-22 16:05:14 +01:00
|
|
|
|
2014-08-28 00:58:24 +02:00
|
|
|
_TESTS = [{
|
2014-01-07 10:04:48 +01:00
|
|
|
'url': 'http://blazo.bandcamp.com/album/jazz-format-mixtape-vol-1',
|
|
|
|
'playlist': [
|
2013-11-22 21:19:31 +01:00
|
|
|
{
|
2014-01-07 10:04:48 +01:00
|
|
|
'md5': '39bc1eded3476e927c724321ddf116cf',
|
|
|
|
'info_dict': {
|
2014-11-12 15:00:54 +01:00
|
|
|
'id': '1353101989',
|
|
|
|
'ext': 'mp3',
|
2014-01-07 10:04:48 +01:00
|
|
|
'title': 'Intro',
|
2013-11-22 21:19:31 +01:00
|
|
|
}
|
|
|
|
},
|
|
|
|
{
|
2014-01-07 10:04:48 +01:00
|
|
|
'md5': '1a2c32e2691474643e912cc6cd4bffaa',
|
|
|
|
'info_dict': {
|
2014-11-12 15:00:54 +01:00
|
|
|
'id': '38097443',
|
|
|
|
'ext': 'mp3',
|
2014-01-07 10:04:48 +01:00
|
|
|
'title': 'Kero One - Keep It Alive (Blazo remix)',
|
2013-11-22 21:19:31 +01:00
|
|
|
}
|
|
|
|
},
|
|
|
|
],
|
2014-11-12 15:00:54 +01:00
|
|
|
'info_dict': {
|
|
|
|
'title': 'Jazz Format Mixtape vol.1',
|
2015-02-18 00:48:52 +01:00
|
|
|
'id': 'jazz-format-mixtape-vol-1',
|
|
|
|
'uploader_id': 'blazo',
|
2014-11-12 15:00:54 +01:00
|
|
|
},
|
2014-01-07 10:04:48 +01:00
|
|
|
'params': {
|
|
|
|
'playlistend': 2
|
2013-11-22 21:19:31 +01:00
|
|
|
},
|
2015-02-18 00:48:52 +01:00
|
|
|
'skip': 'Bandcamp imposes download limits.'
|
2014-08-28 00:58:24 +02:00
|
|
|
}, {
|
|
|
|
'url': 'http://nightbringer.bandcamp.com/album/hierophany-of-the-open-grave',
|
|
|
|
'info_dict': {
|
|
|
|
'title': 'Hierophany of the Open Grave',
|
2015-02-18 00:48:52 +01:00
|
|
|
'uploader_id': 'nightbringer',
|
|
|
|
'id': 'hierophany-of-the-open-grave',
|
2014-08-28 00:58:24 +02:00
|
|
|
},
|
|
|
|
'playlist_mincount': 9,
|
2014-12-13 21:00:54 +01:00
|
|
|
}, {
|
|
|
|
'url': 'http://dotscale.bandcamp.com',
|
|
|
|
'info_dict': {
|
|
|
|
'title': 'Loom',
|
2015-02-18 00:48:52 +01:00
|
|
|
'id': 'dotscale',
|
|
|
|
'uploader_id': 'dotscale',
|
2014-12-13 21:00:54 +01:00
|
|
|
},
|
|
|
|
'playlist_mincount': 7,
|
2016-08-30 19:29:49 +02:00
|
|
|
}, {
|
|
|
|
# with escaped quote in title
|
|
|
|
'url': 'https://jstrecords.bandcamp.com/album/entropy-ep',
|
|
|
|
'info_dict': {
|
|
|
|
'title': '"Entropy" EP',
|
|
|
|
'uploader_id': 'jstrecords',
|
|
|
|
'id': 'entropy-ep',
|
|
|
|
},
|
|
|
|
'playlist_mincount': 3,
|
2017-02-05 15:47:04 +01:00
|
|
|
}, {
|
|
|
|
# not all tracks have songs
|
|
|
|
'url': 'https://insulters.bandcamp.com/album/we-are-the-plague',
|
|
|
|
'info_dict': {
|
|
|
|
'id': 'we-are-the-plague',
|
|
|
|
'title': 'WE ARE THE PLAGUE',
|
|
|
|
'uploader_id': 'insulters',
|
|
|
|
},
|
|
|
|
'playlist_count': 2,
|
2014-08-28 00:58:24 +02:00
|
|
|
}]
|
2013-11-22 21:19:31 +01:00
|
|
|
|
2017-06-04 15:47:05 +02:00
|
|
|
@classmethod
|
|
|
|
def suitable(cls, url):
|
2017-06-04 18:21:30 +02:00
|
|
|
return (False
|
|
|
|
if BandcampWeeklyIE.suitable(url) or BandcampIE.suitable(url)
|
|
|
|
else super(BandcampAlbumIE, cls).suitable(url))
|
2017-06-04 15:47:05 +02:00
|
|
|
|
2013-11-22 16:05:14 +01:00
|
|
|
def _real_extract(self, url):
|
|
|
|
mobj = re.match(self._VALID_URL, url)
|
2015-02-18 00:48:52 +01:00
|
|
|
uploader_id = mobj.group('subdomain')
|
|
|
|
album_id = mobj.group('album_id')
|
|
|
|
playlist_id = album_id or uploader_id
|
|
|
|
webpage = self._download_webpage(url, playlist_id)
|
2017-02-05 15:47:04 +01:00
|
|
|
track_elements = re.findall(
|
|
|
|
r'(?s)<div[^>]*>(.*?<a[^>]+href="([^"]+?)"[^>]+itemprop="url"[^>]*>.*?)</div>', webpage)
|
|
|
|
if not track_elements:
|
2014-01-07 10:04:48 +01:00
|
|
|
raise ExtractorError('The page doesn\'t contain any tracks')
|
2017-02-05 15:47:04 +01:00
|
|
|
# Only tracks with duration info have songs
|
2013-11-22 16:05:14 +01:00
|
|
|
entries = [
|
2017-08-20 18:32:33 +02:00
|
|
|
self.url_result(
|
|
|
|
compat_urlparse.urljoin(url, t_path),
|
|
|
|
ie=BandcampIE.ie_key(),
|
|
|
|
video_title=self._search_regex(
|
|
|
|
r'<span\b[^>]+\bitemprop=["\']name["\'][^>]*>([^<]+)',
|
|
|
|
elem_content, 'track title', fatal=False))
|
2017-02-05 15:47:04 +01:00
|
|
|
for elem_content, t_path in track_elements
|
|
|
|
if self._html_search_meta('duration', elem_content, default=None)]
|
|
|
|
|
2016-08-30 19:29:49 +02:00
|
|
|
title = self._html_search_regex(
|
2020-09-29 05:54:36 +02:00
|
|
|
r'album_title\s*(?:"|["\']):\s*("|["\'])(?P<album>(?:\\\1|((?!\1).))+)\1',
|
|
|
|
webpage, 'title', fatal=False, group='album')
|
|
|
|
|
2016-08-30 19:29:49 +02:00
|
|
|
if title:
|
|
|
|
title = title.replace(r'\"', '"')
|
2020-09-29 05:54:36 +02:00
|
|
|
|
2013-11-22 16:05:14 +01:00
|
|
|
return {
|
|
|
|
'_type': 'playlist',
|
2015-02-18 00:48:52 +01:00
|
|
|
'uploader_id': uploader_id,
|
2014-05-05 02:44:44 +02:00
|
|
|
'id': playlist_id,
|
2013-11-22 16:05:14 +01:00
|
|
|
'title': title,
|
|
|
|
'entries': entries,
|
|
|
|
}
|
2017-06-04 15:47:05 +02:00
|
|
|
|
|
|
|
|
|
|
|
class BandcampWeeklyIE(InfoExtractor):
|
2017-06-04 18:21:30 +02:00
|
|
|
IE_NAME = 'Bandcamp:weekly'
|
|
|
|
_VALID_URL = r'https?://(?:www\.)?bandcamp\.com/?\?(?:.*?&)?show=(?P<id>\d+)'
|
2017-06-04 15:47:05 +02:00
|
|
|
_TESTS = [{
|
|
|
|
'url': 'https://bandcamp.com/?show=224',
|
|
|
|
'md5': 'b00df799c733cf7e0c567ed187dea0fd',
|
|
|
|
'info_dict': {
|
|
|
|
'id': '224',
|
|
|
|
'ext': 'opus',
|
2017-06-04 18:21:30 +02:00
|
|
|
'title': 'BC Weekly April 4th 2017 - Magic Moments',
|
|
|
|
'description': 'md5:5d48150916e8e02d030623a48512c874',
|
|
|
|
'duration': 5829.77,
|
|
|
|
'release_date': '20170404',
|
|
|
|
'series': 'Bandcamp Weekly',
|
|
|
|
'episode': 'Magic Moments',
|
|
|
|
'episode_number': 208,
|
|
|
|
'episode_id': '224',
|
2017-06-04 15:47:05 +02:00
|
|
|
}
|
|
|
|
}, {
|
|
|
|
'url': 'https://bandcamp.com/?blah/blah@&show=228',
|
|
|
|
'only_matching': True
|
|
|
|
}]
|
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
video_id = self._match_id(url)
|
|
|
|
webpage = self._download_webpage(url, video_id)
|
|
|
|
|
|
|
|
blob = self._parse_json(
|
|
|
|
self._search_regex(
|
|
|
|
r'data-blob=(["\'])(?P<blob>{.+?})\1', webpage,
|
|
|
|
'blob', group='blob'),
|
|
|
|
video_id, transform_source=unescapeHTML)
|
|
|
|
|
|
|
|
show = blob['bcw_show']
|
|
|
|
|
|
|
|
# This is desired because any invalid show id redirects to `bandcamp.com`
|
|
|
|
# which happens to expose the latest Bandcamp Weekly episode.
|
2017-06-04 18:21:30 +02:00
|
|
|
show_id = int_or_none(show.get('show_id')) or int_or_none(video_id)
|
2017-06-04 15:47:05 +02:00
|
|
|
|
2017-06-04 18:21:30 +02:00
|
|
|
formats = []
|
|
|
|
for format_id, format_url in show['audio_stream'].items():
|
2018-07-21 14:08:28 +02:00
|
|
|
if not url_or_none(format_url):
|
2017-06-04 18:21:30 +02:00
|
|
|
continue
|
|
|
|
for known_ext in KNOWN_EXTENSIONS:
|
|
|
|
if known_ext in format_id:
|
|
|
|
ext = known_ext
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
ext = None
|
|
|
|
formats.append({
|
|
|
|
'format_id': format_id,
|
|
|
|
'url': format_url,
|
|
|
|
'ext': ext,
|
|
|
|
'vcodec': 'none',
|
|
|
|
})
|
|
|
|
self._sort_formats(formats)
|
2017-06-04 15:47:05 +02:00
|
|
|
|
2017-06-04 18:21:30 +02:00
|
|
|
title = show.get('audio_title') or 'Bandcamp Weekly'
|
|
|
|
subtitle = show.get('subtitle')
|
|
|
|
if subtitle:
|
|
|
|
title += ' - %s' % subtitle
|
2017-06-04 15:47:05 +02:00
|
|
|
|
2017-06-04 18:21:30 +02:00
|
|
|
episode_number = None
|
|
|
|
seq = blob.get('bcw_seq')
|
2017-06-04 15:47:05 +02:00
|
|
|
|
2017-06-04 18:21:30 +02:00
|
|
|
if seq and isinstance(seq, list):
|
|
|
|
try:
|
|
|
|
episode_number = next(
|
|
|
|
int_or_none(e.get('episode_number'))
|
|
|
|
for e in seq
|
|
|
|
if isinstance(e, dict) and int_or_none(e.get('id')) == show_id)
|
|
|
|
except StopIteration:
|
|
|
|
pass
|
2017-06-04 15:47:05 +02:00
|
|
|
|
|
|
|
return {
|
|
|
|
'id': video_id,
|
2017-06-04 18:21:30 +02:00
|
|
|
'title': title,
|
|
|
|
'description': show.get('desc') or show.get('short_desc'),
|
2017-06-04 15:47:05 +02:00
|
|
|
'duration': float_or_none(show.get('audio_duration')),
|
|
|
|
'is_live': False,
|
|
|
|
'release_date': unified_strdate(show.get('published_date')),
|
|
|
|
'series': 'Bandcamp Weekly',
|
2017-06-04 18:21:30 +02:00
|
|
|
'episode': show.get('subtitle'),
|
|
|
|
'episode_number': episode_number,
|
2017-06-04 15:47:05 +02:00
|
|
|
'episode_id': compat_str(video_id),
|
|
|
|
'formats': formats
|
|
|
|
}
|