1
1
mirror of https://github.com/ytdl-org/youtube-dl synced 2024-11-25 04:46:50 +01:00
youtube-dl/youtube_dl/extractor/vine.py

155 lines
5.1 KiB
Python
Raw Normal View History

# coding: utf-8
2014-01-31 14:17:21 +01:00
from __future__ import unicode_literals
2013-06-23 22:26:30 +02:00
import re
import itertools
2013-06-23 22:26:30 +02:00
from .common import InfoExtractor
from ..utils import (
2017-02-03 15:56:48 +01:00
determine_ext,
int_or_none,
2017-02-03 15:56:48 +01:00
unified_timestamp,
)
2013-06-23 22:26:30 +02:00
class VineIE(InfoExtractor):
2015-04-09 18:40:18 +02:00
_VALID_URL = r'https?://(?:www\.)?vine\.co/(?:v|oembed)/(?P<id>\w+)'
2015-04-09 18:37:54 +02:00
_TESTS = [{
2014-01-31 14:17:21 +01:00
'url': 'https://vine.co/v/b9KOOWX7HUx',
'md5': '2f36fed6235b16da96ce9b4dc890940d',
'info_dict': {
'id': 'b9KOOWX7HUx',
'ext': 'mp4',
'title': 'Chicken.',
2017-02-03 15:56:48 +01:00
'alt_title': 'Vine by Jack',
'timestamp': 1368997951,
'upload_date': '20130519',
2017-02-03 15:56:48 +01:00
'uploader': 'Jack',
'uploader_id': '76',
2016-06-21 11:36:54 +02:00
'view_count': int,
2015-10-18 05:36:19 +02:00
'like_count': int,
'comment_count': int,
'repost_count': int,
2014-01-31 14:17:21 +01:00
},
}, {
'url': 'https://vine.co/v/e192BnZnZ9V',
'info_dict': {
'id': 'e192BnZnZ9V',
'ext': 'mp4',
'title': 'ยิ้ม~ เขิน~ อาย~ น่าร้ากอ้ะ >//< @n_whitewo @orlameena #lovesicktheseries #lovesickseason2',
'alt_title': 'Vine by Pimry_zaa',
2017-02-03 15:56:48 +01:00
'timestamp': 1436057405,
'upload_date': '20150705',
'uploader': 'Pimry_zaa',
'uploader_id': '1135760698325307392',
2016-06-21 11:36:54 +02:00
'view_count': int,
2015-10-18 05:36:19 +02:00
'like_count': int,
'comment_count': int,
'repost_count': int,
},
'params': {
'skip_download': True,
},
2017-02-03 15:56:48 +01:00
}, {
'url': 'https://vine.co/v/MYxVapFvz2z',
'only_matching': True,
}, {
'url': 'https://vine.co/v/bxVjBbZlPUH',
'only_matching': True,
}, {
'url': 'https://vine.co/oembed/MYxVapFvz2z.json',
'only_matching': True,
2015-04-09 18:37:54 +02:00
}]
2013-06-23 22:26:30 +02:00
def _real_extract(self, url):
2014-12-12 02:59:52 +01:00
video_id = self._match_id(url)
2016-07-01 21:45:00 +02:00
2017-02-03 15:56:48 +01:00
data = self._download_json(
'https://archive.vine.co/posts/%s.json' % video_id, video_id)
2014-04-30 14:12:30 +02:00
2017-02-03 15:56:48 +01:00
def video_url(kind):
for url_suffix in ('Url', 'URL'):
format_url = data.get('video%s%s' % (kind, url_suffix))
if format_url:
return format_url
2017-02-03 15:56:48 +01:00
formats = []
for quality, format_id in enumerate(('low', '', 'dash')):
format_url = video_url(format_id.capitalize())
if not format_url:
continue
# DASH link returns plain mp4
if format_id == 'dash' and determine_ext(format_url) == 'mpd':
formats.extend(self._extract_mpd_formats(
format_url, video_id, mpd_id='dash', fatal=False))
else:
formats.append({
'url': format_url,
'format_id': format_id or 'standard',
'quality': quality,
})
self._sort_formats(formats)
2013-06-23 22:26:30 +02:00
username = data.get('username')
alt_title = 'Vine by %s' % username if username else None
2014-01-31 14:17:21 +01:00
return {
'id': video_id,
'title': data.get('description') or alt_title or 'Vine video',
'alt_title': alt_title,
'thumbnail': data.get('thumbnailUrl'),
2017-02-03 15:56:48 +01:00
'timestamp': unified_timestamp(data.get('created')),
'uploader': username,
'uploader_id': data.get('userIdStr'),
2017-02-03 15:56:48 +01:00
'view_count': int_or_none(data.get('loops')),
'like_count': int_or_none(data.get('likes')),
'comment_count': int_or_none(data.get('comments')),
'repost_count': int_or_none(data.get('reposts')),
'formats': formats,
}
2014-05-13 09:50:03 +02:00
class VineUserIE(InfoExtractor):
IE_NAME = 'vine:user'
_VALID_URL = r'(?:https?://)?vine\.co/(?P<u>u/)?(?P<user>[^/]+)/?(\?.*)?$'
2016-02-14 10:37:17 +01:00
_VINE_BASE_URL = 'https://vine.co/'
_TESTS = [
{
'url': 'https://vine.co/Visa',
'info_dict': {
'id': 'Visa',
},
'playlist_mincount': 46,
},
{
'url': 'https://vine.co/u/941705360593584128',
'only_matching': True,
},
]
2014-05-13 09:50:03 +02:00
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
user = mobj.group('user')
u = mobj.group('u')
2016-02-14 10:37:17 +01:00
profile_url = '%sapi/users/profiles/%s%s' % (
self._VINE_BASE_URL, 'vanity/' if not u else '', user)
2014-05-13 09:50:03 +02:00
profile_data = self._download_json(
profile_url, user, note='Downloading user profile data')
user_id = profile_data['data']['userId']
timeline_data = []
for pagenum in itertools.count(1):
2016-02-14 10:37:17 +01:00
timeline_url = '%sapi/timelines/users/%s?page=%s&size=100' % (
2014-05-13 09:50:03 +02:00
self._VINE_BASE_URL, user_id, pagenum)
timeline_page = self._download_json(
timeline_url, user, note='Downloading page %d' % pagenum)
timeline_data.extend(timeline_page['data']['records'])
if timeline_page['data']['nextPage'] is None:
break
2014-05-13 09:50:03 +02:00
entries = [
self.url_result(e['permalinkUrl'], 'Vine') for e in timeline_data]
return self.playlist_result(entries, user)