Skip to content

Commit

Permalink
^q^
Browse files Browse the repository at this point in the history
  • Loading branch information
KurtBestor committed Dec 23, 2020
1 parent 4ba8a57 commit 896f0cc
Show file tree
Hide file tree
Showing 37 changed files with 125 additions and 156 deletions.
18 changes: 18 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
[![GitHub downloads](https://img.shields.io/github/downloads/KurtBestor/Hitomi-Downloader/latest/total.svg?logo=github)](https://github.com/KurtBestor/Hitomi-Downloader/releases/latest)
[![GitHub downloads](https://img.shields.io/github/downloads/KurtBestor/Hitomi-Downloader/total.svg?logo=github)](https://github.com/KurtBestor/Hitomi-Downloader/releases)

## Links
- [Download](https://github.com/KurtBestor/Hitomi-Downloader/releases/latest)
- [Issues](https://github.com/KurtBestor/Hitomi-Downloader/issues)
- [Scripts](https://github.com/KurtBestor/Hitomi-Downloader/wiki/Scripts)
Expand All @@ -15,6 +16,19 @@
## Demo
<img src="imgs/how_to_download.gif">

## Features
- 🍰 Simple and clear user interface
- 🚀 Download acceleration
- 💻 Supports 32 threads in a single task
- 🚥 Supports speed limit
- 📜 Supports user scripts
- 🧲 Supports BitTorrent & Magnet
- 🎞️ Supports M3U8 & MPD format videos
- 🌙 Dark mode
- 🧳 Portable
- 📋 Clipboard monitor
- 🗃️ Easy to organize tasks

## Supported Sites
| Site | URL |
| :--: | -- |
Expand All @@ -32,6 +46,7 @@
| **Epio** | <https://epio.app> |
| **E(x)Hentai Galleries** | <https://e-hentai.org><br><https://exhentai.org> |
| **Facebook** | <https://facebook.com> |
| **FC2 Video** | <https://video.fc2.com> |
| **Flickr** | <https://flickr.com> |
| **Gelbooru** | <https://gelbooru.com> |
| **hanime.tv** | <https://hanime.tv> |
Expand All @@ -44,13 +59,15 @@
| **Jmana** | <https://jmana.net> |
| **カクヨム** | <https://kakuyomu.jp> |
| **LHScan** | <https://loveheaven.net> |
| **Likee** | <https://likee.video> |
| **Luscious** | <https://luscious.net> |
| **Manamoa** | <https://manamoa.net> |
| **MyReadingManga** | <https://myreadingmanga.info> |
| **Naver Blog** | <https://blog.naver.com> |
| **Naver Post** | <https://post.naver.com> |
| **Naver Webtoon** | <https://comic.naver.com> |
| **nhentai** | <https://nhentai.net> |
| **nhentai.com** | <https://nhentai.com> |
| **Niconico** | <http://nicovideo.jp> |
| **ニジエ** | <https://nijie.info> |
| **Pawoo** | <https://pawoo.net> |
Expand All @@ -75,3 +92,4 @@
| **XVideos** | <https://xvideos.com> |
| **Yande.re** | <https://yande.re> |
| **YouTube** | <https://youtube.com> |
| **and more...** | [Supported sites by youtube-dl](http://ytdl-org.github.io/youtube-dl/supportedsites.html) |
3 changes: 0 additions & 3 deletions src/extractor/afreeca_downloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,9 +31,6 @@ class Downloader_afreeca(Downloader):
single = True
display_name = 'AfreecaTV'

def init(self):
self.url = self.url.replace('afreeca_', '')

def read(self):
session = Session()
video = get_video(self.url, session)
Expand Down
4 changes: 2 additions & 2 deletions src/extractor/artstation_downloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ class Downloader_artstation(Downloader):
display_name = 'ArtStation'

def init(self):
self.url_main = 'https://www.artstation.com/{}'.format(self.id.replace('artstation_', '').replace('/', '/'))
self.url_main = 'https://www.artstation.com/{}'.format(self.id.replace('artstation_', '', 1).replace('/', '/'))

if '/artwork/' in self.url:
pass#raise NotImplementedError('Single post')
Expand All @@ -53,7 +53,7 @@ def name(self):
def read(self):
cw = self.customWidget
self.title = self.name
id = self.id.replace('artstation_', '').replace('/', '/')
id = self.id.replace('artstation_', '', 1).replace('/', '/')
if '/' in id:
type = id.split('/')[1]
id = id.split('/')[0]
Expand Down
1 change: 0 additions & 1 deletion src/extractor/asiansister_downloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@ class Downloader_asiansister(Downloader):

@try_n(4)
def init(self):
self.url = self.url.replace('asiansister_', '')
html = downloader.read_html(self.url)
self.soup = Soup(html)

Expand Down
1 change: 0 additions & 1 deletion src/extractor/asmhentai_downloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@ def init(self):

@classmethod
def fix_url(cls, url):
url = url.replace('asmhentai_', '')
id_ = get_id(url)
return 'https://asmhentai.com/g/{}/'.format(id_)

Expand Down
1 change: 0 additions & 1 deletion src/extractor/avgle_downloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ class Downloader_avgle(Downloader):
URLS = ['avgle.com']

def init(self):
self.url = self.url.replace('avgle_', '', 1)
if not self.customWidget.data_:
link = 'https://github.com/KurtBestor/Hitomi-Downloader/wiki/Chrome-Extension'
webbrowser.open(link)
Expand Down
1 change: 0 additions & 1 deletion src/extractor/baraag_downloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ def init(self):

@classmethod
def fix_url(cls, url):
url = url.replace('baraag_', '')
id_ = get_id(url) or url
return 'https://baraag.net/{}'.format(id_)

Expand Down
1 change: 0 additions & 1 deletion src/extractor/bcy_downloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@ class Downloader_bcy(Downloader):
display_name = '半次元'

def init(self):
self.url = self.url.replace('bcy_', '')
self.html = downloader.read_html(self.url)
self.info = get_info(self.url, self.html)

Expand Down
2 changes: 0 additions & 2 deletions src/extractor/bdsmlr_downloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,6 @@ class Downloader_bdsmlr(Downloader):
display_name = 'BDSMlr'

def init(self):
self.url = self.url.replace('bdsmlr_', '')

if u'bdsmlr.com/post/' in self.url:
return self.Invalid(tr_(u'개별 다운로드는 지원하지 않습니다: {}').format(self.url), fail=False)

Expand Down
6 changes: 3 additions & 3 deletions src/extractor/bili_downloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,8 +69,8 @@ class Downloader_bili(Downloader):

def init(self):
self.url = fix_url(self.url, self.customWidget)
if 'bili_' in self.url:
self.url = u'https://www.bilibili.com/video/{}'.format(self.url.replace('bili_', ''))
if 'bilibili.com' not in self.url.lower():
self.url = 'https://www.bilibili.com/video/{}'.format(self.url)
self.url = self.url.replace('m.bilibili', 'bilibili')

@property
Expand All @@ -96,7 +96,7 @@ def read(self):
title += (u'_p{}').format(page)
title = format_filename(title, self.id_, '.mp4')[:-4]
n = int(math.ceil(8.0 / len(videos)))
self.customWidget.print_(('n_threads: {}').format(n))
self.print_(('n_threads: {}').format(n))
self.enableSegment(n_threads=n)
self.title = title

Expand Down
3 changes: 0 additions & 3 deletions src/extractor/comicwalker_downloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,9 +50,6 @@ class Downloader_comicwalker(Downloader):
display_name = 'ComicWalker'
_soup = None
pages = None

def init(self):
self.url = url = self.url.replace('comicwalker_', '')

@property
def soup(self):
Expand Down
1 change: 0 additions & 1 deletion src/extractor/danbooru_downloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ class Downloader_danbooru(Downloader):
_name = None

def init(self):
self.url = self.url.replace('danbooru_', '')
if 'donmai.us' in self.url:
self.url = self.url.replace('http://', 'https://')
else:
Expand Down
2 changes: 1 addition & 1 deletion src/extractor/discord_emoji_downloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ def init(self):
pass

def read(self):
token_guild_id_list = self.url.replace("discord_", "", 1).split(
token_guild_id_list = self.url.split(
"/"
) # 값을 어떻게 받을지 몰라서 일단 나눴어요. discord_이메일/비밀번호/서버아이디 또는 discord_토큰/서버아이디 이런식으로 받게 해놨어요.

Expand Down
1 change: 0 additions & 1 deletion src/extractor/gelbooru_downloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,6 @@ class Downloader_gelbooru(Downloader):
_name = None

def init(self):
self.url = self.url.replace('gelbooru_', '')
if 'gelbooru.com' in self.url.lower():
self.url = self.url.replace('http://', 'https://')
else:
Expand Down
1 change: 0 additions & 1 deletion src/extractor/hameln_downloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ class Downloader_hameln(Downloader):
detect_removed = False

def init(self):
self.url = self.url.replace('hameln_', '')
id_ = re.find('/novel/([^/]+)', self.url)
if id_ is not None:
self.url = 'https://syosetu.org/novel/{}/'.format(id_)
Expand Down
4 changes: 0 additions & 4 deletions src/extractor/hanime_downloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,10 +41,6 @@ class Downloader_hanime(Downloader):
single = True
display_name = 'hanime.tv'

def init(self):
if self.url.startswith('hanime_'):
self.url = self.url.replace('hanime_', '', 1)

def read(self):
cw = self.customWidget
video, session = get_video(self.url)
Expand Down
2 changes: 0 additions & 2 deletions src/extractor/hf_downloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,6 @@ def f(_):
def get_username(url):
if 'user/' in url:
username = url.split('user/')[1].split('?')[0].split('/')[0]
else:
username = url.replace('hf_', '')
return username


Expand Down
2 changes: 0 additions & 2 deletions src/extractor/manatoki_downloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,6 @@ class Downloader_manatoki(Downloader):

@try_n(2)
def init(self):
self.url = self.url.replace('manatoki_', '')

self.session, self.soup, url = get_soup(self.url)
self.url = self.fix_url(url)

Expand Down
1 change: 0 additions & 1 deletion src/extractor/naverpost_downloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,6 @@ class DownloaderNaverPost(Downloader):
URLS = ["m.post.naver.com", "post.naver.com"]

def init(self):
self.url = self.url.replace("naver_post_", "")
self.parsed_url = urlparse(self.url) # url 나눔
self.soup = get_soup(self.url)

Expand Down
1 change: 0 additions & 1 deletion src/extractor/nhentai_downloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@ class Downloader_nhentai(Downloader):
display_name = 'nhentai'

def init(self):
self.url = self.url.replace('nhentai_', '')
self.url = 'https://nhentai.net/g/{}/'.format(self.id_)

@property
Expand Down
1 change: 0 additions & 1 deletion src/extractor/pawoo_downloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@ class Downloader_pawoo(Downloader):
URLS = ['pawoo.net']

def init(self):
self.url = self.url.replace('pawoo_', '')
self.url = 'https://pawoo.net/{}'.format(self.id_)
self.referer = self.url

Expand Down
1 change: 0 additions & 1 deletion src/extractor/pixiv_comic_downloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@ class Downloader_pixiv_comic(Downloader):
display_name = 'pixivコミック'

def init(self):
self.url = self.url.replace('pixiv_comic_', '')
if '/viewer/' in self.url:
html = downloader.read_html(self.url)
id = re.find('/works/([0-9]+)', html)
Expand Down
11 changes: 6 additions & 5 deletions src/extractor/pixiv_downloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,20 +44,21 @@ class Downloader_pixiv(Downloader):
info = None
_id = None
keep_date = True
strip_header = False
atts = ['_format', '_format_name', 'imgs']

def init(self):
self.url = clean_url(self.url)
url = self.url

# Determine the type
if 'bookmark.php?type=user' in url or headers['following'] in url:
if 'bookmark.php?type=user' in url or url.startswith(headers['following']):
type = 'following'
elif 'bookmark.php' in url or headers['bookmark'] in url or '/bookmarks/' in url:
elif 'bookmark.php' in url or url.startswith(headers['bookmark']) or '/bookmarks/' in url:
type = 'bookmark'
elif 'illust_id=' in url or headers['illust'] in url or '/artworks/' in url:
elif 'illust_id=' in url or url.startswith(headers['illust']) or '/artworks/' in url:
type = 'illust'
elif 'search.php' in url or headers['search'] in url:
elif 'search.php' in url or url.startswith(headers['search']):
type = 'search'
order = query_url(url).get('order', ['date_d'])[0] # data_d, date, popular_d, popular_male_d, popular_female_d
scd = query_url(url).get('scd', [None])[0] # 2019-09-27
Expand Down Expand Up @@ -91,7 +92,7 @@ def init(self):
'blt': blt,
'bgt': bgt,
'type': type_}
elif 'id=' in url and 'mode=' not in url or headers['user'] in url or 'pixiv.me' in url or '/users/' in url:
elif 'id=' in url and 'mode=' not in url or url.startswith(headers['user']) or 'pixiv.me' in url or '/users/' in url:
type = 'user'
else:
self.Invalid((u'[pixiv] Can not determine type: {}').format(url))
Expand Down
1 change: 0 additions & 1 deletion src/extractor/sankaku_downloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@ class Downloader_sankaku(Downloader):
display_name = 'Sankaku Complex'

def init(self):
self.url = self.url.replace('sankaku_', '')
if '/post/' in self.url:
return self.Invalid('Single post is not supported')

Expand Down
1 change: 0 additions & 1 deletion src/extractor/syosetu_downloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,6 @@ class Downloader_syosetu(Downloader):
display_name = '小説家になろう'

def init(self):
self.url = self.url.replace('syosetu_', '')
self.url = (u'https://ncode.syosetu.com/{}/').format(self.id_)

@property
Expand Down
2 changes: 0 additions & 2 deletions src/extractor/torrent_downloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,6 @@ def init(self):
global torrent
if torrent is None:
import torrent
if self.url.startswith('torrent_'):
self.url = self.url.replace('torrent_', '', 1)

@property
def name(self):
Expand Down
2 changes: 1 addition & 1 deletion src/extractor/twitch_downloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ def init(self):
url = 'https://' + url
self.url = url
else:
url = 'https://www.twitch.tv/videos/{}'.format(url.replace('twitch_', ''))
url = 'https://www.twitch.tv/videos/{}'.format(url)
self.url = url

@classmethod
Expand Down
17 changes: 13 additions & 4 deletions src/extractor/twitter_downloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,6 @@ class Downloader_twitter(Downloader):

def init(self):
self.session = get_session()
self.url = self.url.replace('twitter_', '')
#self.url = fix_url(self.url)
self.artist, self.username = get_artist_username(self.url, self.session)
if self.username == 'home':
Expand All @@ -84,7 +83,7 @@ def fix_url(cls, url):

@classmethod
def key_id(cls, url):
return cls.fix_url(url).lower()
return url.lower()

def read(self):
cw = self.customWidget
Expand Down Expand Up @@ -126,7 +125,10 @@ class TwitterAPI(object):
def __init__(self, session, cw=None):
self.session = session
self.cw = cw
csrf = hashlib.md5(str(time()).encode()).hexdigest()
csrf = session.cookies.get('ct0', domain='.twitter.com')
print('csrf:', csrf)
if not csrf:
csrf = hashlib.md5(str(time()).encode()).hexdigest()
hdr = {
"authorization": AUTH,
"x-twitter-client-language": "en",
Expand Down Expand Up @@ -182,7 +184,11 @@ def _call(self, url_api, referer='https://twitter.com', params=None):
if params:
url_api = update_url_query(url_api, params)
#print('call:', url_api)
data = downloader.read_json(url_api, referer, session=self.session)
r = self.session.get(url_api, headers={'Referer': referer})
csrf = r.cookies.get('ct0')
if csrf:
self.session.headers['x-csrf-token'] = csrf
data = json.loads(r.text)
return data

def search(self, query):
Expand Down Expand Up @@ -231,6 +237,9 @@ def _pagination(self, url_api, params=None, entry_tweet="tweet-", entry_cursor="
for try_ in range(n_try):
try:
data = self._call(url_api, params=params)
if 'globalObjects' not in data:
try_ = n_try
raise Exception(str(data['errors']))
tweets = data["globalObjects"]["tweets"]
break
except Exception as e:
Expand Down
2 changes: 0 additions & 2 deletions src/extractor/wikiart_downloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,6 @@ class Downloader_wikiart(Downloader):
display_name = 'WikiArt'

def init(self):
self.url = self.url.replace('wikiart_', '')

self.url = u'https://www.wikiart.org/en/{}'.format(self.id_)
html = downloader.read_html(self.url)
self.soup = Soup(html)
Expand Down
1 change: 0 additions & 1 deletion src/extractor/worldcos_downloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ class Downloader_worldcos(Downloader):
display_name = 'World Cosplay'

def init(self):
self.url = self.url.replace('worldcos_', '')
if 'worldcosplay.net' in self.url.lower():
self.url = self.url.replace('http://', 'https://')
else:
Expand Down
Loading

0 comments on commit 896f0cc

Please sign in to comment.