1
1
mirror of https://github.com/ytdl-org/youtube-dl synced 2024-11-24 00:36:52 +01:00
youtube-dl/youtube_dl/extractor/xhamster.py

451 lines
18 KiB
Python
Raw Normal View History

2014-01-23 03:52:59 +01:00
from __future__ import unicode_literals
import itertools
2013-06-23 22:32:44 +02:00
import re
from .common import InfoExtractor
from ..compat import compat_str
2013-06-23 22:32:44 +02:00
from ..utils import (
clean_html,
determine_ext,
dict_get,
extract_attributes,
ExtractorError,
float_or_none,
2014-02-19 19:42:15 +01:00
int_or_none,
parse_duration,
str_or_none,
try_get,
2016-01-08 19:26:37 +01:00
unified_strdate,
2018-07-21 14:08:28 +02:00
url_or_none,
urljoin,
2013-06-23 22:32:44 +02:00
)
class XHamsterIE(InfoExtractor):
_DOMAINS = r'(?:xhamster\.(?:com|one|desi)|xhms\.pro|xhamster\d+\.com)'
_VALID_URL = r'''(?x)
https?://
(?:.+?\.)?%s/
(?:
movies/(?P<id>[\dA-Za-z]+)/(?P<display_id>[^/]*)\.html|
videos/(?P<display_id_2>[^/]*)-(?P<id_2>[\dA-Za-z]+)
)
''' % _DOMAINS
2016-05-24 17:38:27 +02:00
_TESTS = [{
'url': 'https://xhamster.com/videos/femaleagent-shy-beauty-takes-the-bait-1509445',
'md5': '98b4687efb1ffd331c4197854dc09e8f',
2016-05-24 17:38:27 +02:00
'info_dict': {
'id': '1509445',
'display_id': 'femaleagent-shy-beauty-takes-the-bait',
2016-05-24 17:38:27 +02:00
'ext': 'mp4',
'title': 'FemaleAgent Shy beauty takes the bait',
'timestamp': 1350194821,
2016-05-24 17:38:27 +02:00
'upload_date': '20121014',
'uploader': 'Ruseful2011',
'duration': 893,
2016-05-24 17:38:27 +02:00
'age_limit': 18,
2014-02-19 19:42:15 +01:00
},
2016-05-24 17:38:27 +02:00
}, {
'url': 'https://xhamster.com/videos/britney-spears-sexy-booty-2221348?hd=',
2016-05-24 17:38:27 +02:00
'info_dict': {
'id': '2221348',
'display_id': 'britney-spears-sexy-booty',
2016-05-24 17:38:27 +02:00
'ext': 'mp4',
'title': 'Britney Spears Sexy Booty',
'timestamp': 1379123460,
2016-05-24 17:38:27 +02:00
'upload_date': '20130914',
'uploader': 'jojo747400',
'duration': 200,
2016-05-24 17:38:27 +02:00
'age_limit': 18,
2015-01-02 12:52:48 +01:00
},
2016-05-24 17:38:27 +02:00
'params': {
'skip_download': True,
},
2016-05-24 17:38:27 +02:00
}, {
# empty seo, unavailable via new URL schema
2016-05-24 17:38:27 +02:00
'url': 'http://xhamster.com/movies/5667973/.html',
'info_dict': {
'id': '5667973',
'ext': 'mp4',
'title': '....',
'timestamp': 1454948101,
2016-05-24 17:38:27 +02:00
'upload_date': '20160208',
'uploader': 'parejafree',
'duration': 72,
2016-05-24 17:38:27 +02:00
'age_limit': 18,
2015-01-02 12:52:48 +01:00
},
2016-05-24 17:38:27 +02:00
'params': {
'skip_download': True,
},
}, {
# mobile site
'url': 'https://m.xhamster.com/videos/cute-teen-jacqueline-solo-masturbation-8559111',
'only_matching': True,
2016-05-24 17:38:27 +02:00
}, {
'url': 'https://xhamster.com/movies/2272726/amber_slayed_by_the_knight.html',
'only_matching': True,
}, {
# This video is visible for marcoalfa123456's friends only
'url': 'https://it.xhamster.com/movies/7263980/la_mia_vicina.html',
'only_matching': True,
}, {
# new URL schema
'url': 'https://pt.xhamster.com/videos/euro-pedal-pumping-7937821',
'only_matching': True,
}, {
'url': 'https://xhamster.one/videos/femaleagent-shy-beauty-takes-the-bait-1509445',
'only_matching': True,
}, {
'url': 'https://xhamster.desi/videos/femaleagent-shy-beauty-takes-the-bait-1509445',
'only_matching': True,
}, {
'url': 'https://xhamster2.com/videos/femaleagent-shy-beauty-takes-the-bait-1509445',
'only_matching': True,
}, {
'url': 'https://xhamster11.com/videos/femaleagent-shy-beauty-takes-the-bait-1509445',
'only_matching': True,
}, {
'url': 'https://xhamster26.com/videos/femaleagent-shy-beauty-takes-the-bait-1509445',
'only_matching': True,
}, {
'url': 'http://xhamster.com/movies/1509445/femaleagent_shy_beauty_takes_the_bait.html',
'only_matching': True,
}, {
'url': 'http://xhamster.com/movies/2221348/britney_spears_sexy_booty.html?hd',
'only_matching': True,
}, {
'url': 'http://de.xhamster.com/videos/skinny-girl-fucks-herself-hard-in-the-forest-xhnBJZx',
'only_matching': True,
2016-05-24 17:38:27 +02:00
}]
2013-06-23 22:32:44 +02:00
2014-11-23 20:41:03 +01:00
def _real_extract(self, url):
2013-06-23 22:32:44 +02:00
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id') or mobj.group('id_2')
display_id = mobj.group('display_id') or mobj.group('display_id_2')
desktop_url = re.sub(r'^(https?://(?:.+?\.)?)m\.', r'\1', url)
webpage, urlh = self._download_webpage_handle(desktop_url, video_id)
2013-06-23 22:32:44 +02:00
error = self._html_search_regex(
r'<div[^>]+id=["\']videoClosed["\'][^>]*>(.+?)</div>',
webpage, 'error', default=None)
if error:
raise ExtractorError(error, expected=True)
age_limit = self._rta_search(webpage)
def get_height(s):
return int_or_none(self._search_regex(
r'^(\d+)[pP]', s, 'height', default=None))
initials = self._parse_json(
self._search_regex(
(r'window\.initials\s*=\s*({.+?})\s*;\s*</script>',
r'window\.initials\s*=\s*({.+?})\s*;'), webpage, 'initials',
default='{}'),
video_id, fatal=False)
if initials:
video = initials['videoModel']
title = video['title']
formats = []
format_urls = set()
format_sizes = {}
sources = try_get(video, lambda x: x['sources'], dict) or {}
for format_id, formats_dict in sources.items():
if not isinstance(formats_dict, dict):
continue
download_sources = try_get(sources, lambda x: x['download'], dict) or {}
for quality, format_dict in download_sources.items():
if not isinstance(format_dict, dict):
continue
format_sizes[quality] = float_or_none(format_dict.get('size'))
for quality, format_item in formats_dict.items():
if format_id == 'download':
# Download link takes some time to be generated,
# skipping for now
continue
format_url = format_item
2018-07-21 14:08:28 +02:00
format_url = url_or_none(format_url)
if not format_url or format_url in format_urls:
continue
format_urls.add(format_url)
formats.append({
'format_id': '%s-%s' % (format_id, quality),
'url': format_url,
'ext': determine_ext(format_url, 'mp4'),
'height': get_height(quality),
'filesize': format_sizes.get(quality),
'http_headers': {
'Referer': urlh.geturl(),
},
})
xplayer_sources = try_get(
initials, lambda x: x['xplayerSettings']['sources'], dict)
if xplayer_sources:
hls_sources = xplayer_sources.get('hls')
if isinstance(hls_sources, dict):
for hls_format_key in ('url', 'fallback'):
hls_url = hls_sources.get(hls_format_key)
if not hls_url:
continue
hls_url = urljoin(url, hls_url)
if not hls_url or hls_url in format_urls:
continue
format_urls.add(hls_url)
formats.extend(self._extract_m3u8_formats(
hls_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False))
standard_sources = xplayer_sources.get('standard')
if isinstance(standard_sources, dict):
for format_id, formats_list in standard_sources.items():
if not isinstance(formats_list, list):
continue
for standard_format in formats_list:
if not isinstance(standard_format, dict):
continue
for standard_format_key in ('url', 'fallback'):
standard_url = standard_format.get(standard_format_key)
if not standard_url:
continue
standard_url = urljoin(url, standard_url)
if not standard_url or standard_url in format_urls:
continue
format_urls.add(standard_url)
ext = determine_ext(standard_url, 'mp4')
if ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
standard_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False))
continue
quality = (str_or_none(standard_format.get('quality'))
or str_or_none(standard_format.get('label'))
or '')
formats.append({
'format_id': '%s-%s' % (format_id, quality),
'url': standard_url,
'ext': ext,
'height': get_height(quality),
'filesize': format_sizes.get(quality),
'http_headers': {
'Referer': standard_url,
},
})
self._sort_formats(formats, field_preference=('height', 'width', 'tbr', 'format_id'))
categories_list = video.get('categories')
if isinstance(categories_list, list):
categories = []
for c in categories_list:
if not isinstance(c, dict):
continue
c_name = c.get('name')
if isinstance(c_name, compat_str):
categories.append(c_name)
else:
categories = None
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': video.get('description'),
'timestamp': int_or_none(video.get('created')),
'uploader': try_get(
video, lambda x: x['author']['name'], compat_str),
'thumbnail': video.get('thumbURL'),
'duration': int_or_none(video.get('duration')),
'view_count': int_or_none(video.get('views')),
'like_count': int_or_none(try_get(
video, lambda x: x['rating']['likes'], int)),
'dislike_count': int_or_none(try_get(
video, lambda x: x['rating']['dislikes'], int)),
'comment_count': int_or_none(video.get('views')),
'age_limit': age_limit,
'categories': categories,
'formats': formats,
}
# Old layout fallback
title = self._html_search_regex(
[r'<h1[^>]*>([^<]+)</h1>',
r'<meta[^>]+itemprop=".*?caption.*?"[^>]+content="(.+?)"',
r'<title[^>]*>(.+?)(?:,\s*[^,]*?\s*Porn\s*[^,]*?:\s*xHamster[^<]*| - xHamster\.com)</title>'],
webpage, 'title')
2013-06-23 22:32:44 +02:00
formats = []
format_urls = set()
sources = self._parse_json(
self._search_regex(
r'sources\s*:\s*({.+?})\s*,?\s*\n', webpage, 'sources',
default='{}'),
video_id, fatal=False)
for format_id, format_url in sources.items():
2018-07-21 14:08:28 +02:00
format_url = url_or_none(format_url)
if not format_url:
continue
if format_url in format_urls:
continue
format_urls.add(format_url)
formats.append({
'format_id': format_id,
'url': format_url,
'height': get_height(format_id),
})
video_url = self._search_regex(
[r'''file\s*:\s*(?P<q>["'])(?P<mp4>.+?)(?P=q)''',
r'''<a\s+href=(?P<q>["'])(?P<mp4>.+?)(?P=q)\s+class=["']mp4Thumb''',
r'''<video[^>]+file=(?P<q>["'])(?P<mp4>.+?)(?P=q)[^>]*>'''],
webpage, 'video url', group='mp4', default=None)
if video_url and video_url not in format_urls:
formats.append({
'url': video_url,
})
self._sort_formats(formats)
2013-08-23 16:40:20 +02:00
# Only a few videos have an description
2014-01-23 04:04:35 +01:00
mobj = re.search(r'<span>Description: </span>([^<]+)', webpage)
2014-02-19 19:42:15 +01:00
description = mobj.group(1) if mobj else None
2013-06-23 22:32:44 +02:00
2016-01-08 19:21:57 +01:00
upload_date = unified_strdate(self._search_regex(
r'hint=["\'](\d{4}-\d{2}-\d{2}) \d{2}:\d{2}:\d{2} [A-Z]{3,4}',
webpage, 'upload date', fatal=False))
2013-06-23 22:32:44 +02:00
2015-08-05 16:41:40 +02:00
uploader = self._html_search_regex(
2017-05-28 02:55:56 +02:00
r'<span[^>]+itemprop=["\']author[^>]+><a[^>]+><span[^>]+>([^<]+)',
2015-08-05 16:41:40 +02:00
webpage, 'uploader', default='anonymous')
2013-06-23 22:32:44 +02:00
2015-08-05 16:36:37 +02:00
thumbnail = self._search_regex(
[r'''["']thumbUrl["']\s*:\s*(?P<q>["'])(?P<thumbnail>.+?)(?P=q)''',
r'''<video[^>]+"poster"=(?P<q>["'])(?P<thumbnail>.+?)(?P=q)[^>]*>'''],
2015-08-05 19:08:55 +02:00
webpage, 'thumbnail', fatal=False, group='thumbnail')
2014-02-19 19:42:15 +01:00
duration = parse_duration(self._search_regex(
[r'<[^<]+\bitemprop=["\']duration["\'][^<]+\bcontent=["\'](.+?)["\']',
r'Runtime:\s*</span>\s*([\d:]+)'], webpage,
'duration', fatal=False))
2014-02-19 19:42:15 +01:00
2016-01-08 19:29:10 +01:00
view_count = int_or_none(self._search_regex(
r'content=["\']User(?:View|Play)s:(\d+)',
webpage, 'view count', fatal=False))
2014-02-19 19:42:15 +01:00
2017-05-28 02:55:56 +02:00
mobj = re.search(r'hint=[\'"](?P<likecount>\d+) Likes / (?P<dislikecount>\d+) Dislikes', webpage)
2014-02-19 19:42:15 +01:00
(like_count, dislike_count) = (mobj.group('likecount'), mobj.group('dislikecount')) if mobj else (None, None)
mobj = re.search(r'</label>Comments \((?P<commentcount>\d+)\)</div>', webpage)
comment_count = mobj.group('commentcount') if mobj else 0
2013-06-23 22:32:44 +02:00
categories_html = self._search_regex(
r'(?s)<table.+?(<span>Categories:.+?)</table>', webpage,
'categories', default=None)
categories = [clean_html(category) for category in re.findall(
r'<a[^>]+>(.+?)</a>', categories_html)] if categories_html else None
2013-10-26 20:38:54 +02:00
return {
'id': video_id,
'display_id': display_id,
2014-02-19 19:42:15 +01:00
'title': title,
'description': description,
'upload_date': upload_date,
2015-08-05 16:41:40 +02:00
'uploader': uploader,
2014-02-19 19:42:15 +01:00
'thumbnail': thumbnail,
'duration': duration,
'view_count': view_count,
'like_count': int_or_none(like_count),
'dislike_count': int_or_none(dislike_count),
'comment_count': int_or_none(comment_count),
'age_limit': age_limit,
'categories': categories,
2014-02-19 19:42:15 +01:00
'formats': formats,
2013-10-26 20:38:54 +02:00
}
class XHamsterEmbedIE(InfoExtractor):
_VALID_URL = r'https?://(?:.+?\.)?%s/xembed\.php\?video=(?P<id>\d+)' % XHamsterIE._DOMAINS
_TEST = {
'url': 'http://xhamster.com/xembed.php?video=3328539',
'info_dict': {
'id': '3328539',
'ext': 'mp4',
'title': 'Pen Masturbation',
'timestamp': 1406581861,
'upload_date': '20140728',
'uploader': 'ManyakisArt',
'duration': 5,
'age_limit': 18,
}
}
@staticmethod
def _extract_urls(webpage):
return [url for _, url in re.findall(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?xhamster\.com/xembed\.php\?video=\d+)\1',
webpage)]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_url = self._search_regex(
r'href="(https?://xhamster\.com/(?:movies/{0}/[^"]*\.html|videos/[^/]*-{0})[^"]*)"'.format(video_id),
webpage, 'xhamster url', default=None)
if not video_url:
vars = self._parse_json(
self._search_regex(r'vars\s*:\s*({.+?})\s*,\s*\n', webpage, 'vars'),
video_id)
video_url = dict_get(vars, ('downloadLink', 'homepageLink', 'commentsLink', 'shareUrl'))
2015-06-22 11:18:52 +02:00
return self.url_result(video_url, 'XHamster')
class XHamsterUserIE(InfoExtractor):
_VALID_URL = r'https?://(?:.+?\.)?%s/users/(?P<id>[^/?#&]+)' % XHamsterIE._DOMAINS
_TESTS = [{
# Paginated user profile
'url': 'https://xhamster.com/users/netvideogirls/videos',
'info_dict': {
'id': 'netvideogirls',
},
'playlist_mincount': 267,
}, {
# Non-paginated user profile
'url': 'https://xhamster.com/users/firatkaan/videos',
'info_dict': {
'id': 'firatkaan',
},
'playlist_mincount': 1,
}]
def _entries(self, user_id):
next_page_url = 'https://xhamster.com/users/%s/videos/1' % user_id
for pagenum in itertools.count(1):
page = self._download_webpage(
next_page_url, user_id, 'Downloading page %s' % pagenum)
for video_tag in re.findall(
r'(<a[^>]+class=["\'].*?\bvideo-thumb__image-container[^>]+>)',
page):
video = extract_attributes(video_tag)
video_url = url_or_none(video.get('href'))
if not video_url or not XHamsterIE.suitable(video_url):
continue
video_id = XHamsterIE._match_id(video_url)
yield self.url_result(
video_url, ie=XHamsterIE.ie_key(), video_id=video_id)
mobj = re.search(r'<a[^>]+data-page=["\']next[^>]+>', page)
if not mobj:
break
next_page = extract_attributes(mobj.group(0))
next_page_url = url_or_none(next_page.get('href'))
if not next_page_url:
break
def _real_extract(self, url):
user_id = self._match_id(url)
return self.playlist_result(self._entries(user_id), user_id)