[iwara] Add playlist extractors (#3639)

Authored by: i6t
This commit is contained in:
i6t 2022-05-05 00:49:46 +09:00 committed by GitHub
parent 4f7a98c565
commit ff4d7860d5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 96 additions and 5 deletions

View File

@ -702,7 +702,11 @@
IviCompilationIE IviCompilationIE
) )
from .ivideon import IvideonIE from .ivideon import IvideonIE
from .iwara import IwaraIE from .iwara import (
IwaraIE,
IwaraPlaylistIE,
IwaraUserIE,
)
from .izlesene import IzleseneIE from .izlesene import IzleseneIE
from .jable import ( from .jable import (
JableIE, JableIE,

View File

@ -1,19 +1,28 @@
import re import re
import urllib
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_urllib_parse_urlparse
from ..utils import ( from ..utils import (
int_or_none, int_or_none,
mimetype2ext, mimetype2ext,
remove_end, remove_end,
url_or_none, url_or_none,
urljoin,
unified_strdate, unified_strdate,
strip_or_none, strip_or_none,
) )
class IwaraIE(InfoExtractor): class IwaraBaseIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.|ecchi\.)?iwara\.tv/videos/(?P<id>[a-zA-Z0-9]+)' _BASE_REGEX = r'(?P<base_url>https?://(?:www\.|ecchi\.)?iwara\.tv)'
def _extract_playlist(self, base_url, webpage):
for path in re.findall(r'class="title">\s*<a[^<]+href="([^"]+)', webpage):
yield self.url_result(urljoin(base_url, path))
class IwaraIE(IwaraBaseIE):
_VALID_URL = fr'{IwaraBaseIE._BASE_REGEX}/videos/(?P<id>[a-zA-Z0-9]+)'
_TESTS = [{ _TESTS = [{
'url': 'http://iwara.tv/videos/amVwUl1EHpAD9RD', 'url': 'http://iwara.tv/videos/amVwUl1EHpAD9RD',
# md5 is unstable # md5 is unstable
@ -58,7 +67,7 @@ def _real_extract(self, url):
webpage, urlh = self._download_webpage_handle(url, video_id) webpage, urlh = self._download_webpage_handle(url, video_id)
hostname = compat_urllib_parse_urlparse(urlh.geturl()).hostname hostname = urllib.parse.urlparse(urlh.geturl()).hostname
# ecchi is 'sexy' in Japanese # ecchi is 'sexy' in Japanese
age_limit = 18 if hostname.split('.')[0] == 'ecchi' else 0 age_limit = 18 if hostname.split('.')[0] == 'ecchi' else 0
@ -118,3 +127,81 @@ def _real_extract(self, url):
'upload_date': upload_date, 'upload_date': upload_date,
'description': description, 'description': description,
} }
class IwaraPlaylistIE(IwaraBaseIE):
_VALID_URL = fr'{IwaraBaseIE._BASE_REGEX}/playlist/(?P<id>[^/?#&]+)'
IE_NAME = 'iwara:playlist'
_TESTS = [{
'url': 'https://ecchi.iwara.tv/playlist/best-enf',
'info_dict': {
'title': 'Best enf',
'uploader': 'Jared98112',
'id': 'best-enf',
},
'playlist_mincount': 1097,
}, {
# urlencoded
'url': 'https://ecchi.iwara.tv/playlist/%E3%83%97%E3%83%AC%E3%82%A4%E3%83%AA%E3%82%B9%E3%83%88-2',
'info_dict': {
'id': 'プレイリスト-2',
'title': 'プレイリスト',
'uploader': 'mainyu',
},
'playlist_mincount': 91,
}]
def _real_extract(self, url):
playlist_id, base_url = self._match_valid_url(url).group('id', 'base_url')
playlist_id = urllib.parse.unquote(playlist_id)
webpage = self._download_webpage(url, playlist_id)
return {
'_type': 'playlist',
'id': playlist_id,
'title': self._html_search_regex(r'class="title"[^>]*>([^<]+)', webpage, 'title', fatal=False),
'uploader': self._html_search_regex(r'<h2>([^<]+)', webpage, 'uploader', fatal=False),
'entries': self._extract_playlist(base_url, webpage),
}
class IwaraUserIE(IwaraBaseIE):
_VALID_URL = fr'{IwaraBaseIE._BASE_REGEX}/users/(?P<id>[^/?#&]+)'
IE_NAME = 'iwara:user'
_TESTS = [{
'url': 'https://ecchi.iwara.tv/users/CuteMMD',
'info_dict': {
'id': 'CuteMMD',
},
'playlist_mincount': 198,
}, {
# urlencoded
'url': 'https://ecchi.iwara.tv/users/%E5%92%95%E5%98%BF%E5%98%BF',
'info_dict': {
'id': '咕嘿嘿',
},
'playlist_mincount': 141,
}]
def _entries(self, playlist_id, base_url, webpage):
yield from self._extract_playlist(base_url, webpage)
page_urls = re.findall(
r'class="pager-item"[^>]*>\s*<a[^<]+href="([^"]+)', webpage)
for n, path in enumerate(page_urls, 2):
yield from self._extract_playlist(
base_url, self._download_webpage(
urljoin(base_url, path), playlist_id, note=f'Downloading playlist page {n}'))
def _real_extract(self, url):
playlist_id, base_url = self._match_valid_url(url).group('id', 'base_url')
playlist_id = urllib.parse.unquote(playlist_id)
webpage = self._download_webpage(
f'{base_url}/users/{playlist_id}/videos', playlist_id)
return self.playlist_result(
self._entries(playlist_id, base_url, webpage), playlist_id)