mirror of
https://github.com/vikstrous/pirate-get
synced 2025-01-10 10:04:21 +01:00
Fix PEP8 errors and unused import
This commit is contained in:
parent
06bf8ed713
commit
2212e292f0
@ -34,7 +34,8 @@ def parse_config_file(text):
|
||||
config.set('LocalDB', 'path', expanduser('~/downloads/pirate-get/db'))
|
||||
|
||||
config.add_section('Misc')
|
||||
# TODO: try to use https://docs.python.org/3/library/configparser.html#configparser.BasicInterpolation for interpolating in the command
|
||||
# TODO: try to use configparser.BasicInterpolation
|
||||
# for interpolating in the command
|
||||
config.set('Misc', 'openCommand', '')
|
||||
config.set('Misc', 'transmission', 'false')
|
||||
config.set('Misc', 'colors', 'true')
|
||||
@ -239,11 +240,13 @@ def search_mirrors(printer, pages, category, sort, action, search):
|
||||
headers=pirate.data.default_headers)
|
||||
f = request.urlopen(req, timeout=pirate.data.default_timeout)
|
||||
except IOError:
|
||||
printer.print('Could not fetch additional mirrors', color='WARN')
|
||||
printer.print('Could not fetch additional mirrors',
|
||||
color='WARN')
|
||||
else:
|
||||
if f.getcode() != 200:
|
||||
raise IOError('The proxy bay responded with an error.')
|
||||
for mirror in [i.decode('utf-8').strip() for i in f.readlines()][3:]:
|
||||
for mirror in [i.decode('utf-8').strip()
|
||||
for i in f.readlines()][3:]:
|
||||
mirrors[mirror] = None
|
||||
for mirror in pirate.data.blacklist:
|
||||
if mirror in mirrors:
|
||||
@ -305,7 +308,8 @@ def pirate_main(args):
|
||||
if args.source == 'local_tpb':
|
||||
results = pirate.local.search(args.database, args.search)
|
||||
elif args.source == 'tpb':
|
||||
results, site = search_mirrors(printer, args.pages, args.category, args.sort, args.action, args.search)
|
||||
results, site = search_mirrors(printer, args.pages, args.category,
|
||||
args.sort, args.action, args.search)
|
||||
|
||||
if len(results) == 0:
|
||||
printer.print('No results')
|
||||
@ -354,9 +358,11 @@ def pirate_main(args):
|
||||
elif code == 'p':
|
||||
printer.search_results(results)
|
||||
elif code == 'm':
|
||||
pirate.torrent.save_magnets(printer, choices, results, args.save_directory)
|
||||
pirate.torrent.save_magnets(printer, choices, results,
|
||||
args.save_directory)
|
||||
elif code == 't':
|
||||
pirate.torrent.save_torrents(printer, choices, results, args.save_directory)
|
||||
pirate.torrent.save_torrents(printer, choices, results,
|
||||
args.save_directory)
|
||||
elif not l:
|
||||
printer.print('No links entered!', color='WARN')
|
||||
else:
|
||||
|
@ -1,6 +1,5 @@
|
||||
import builtins
|
||||
import re
|
||||
import os
|
||||
import gzip
|
||||
import urllib.parse as parse
|
||||
import urllib.request as request
|
||||
@ -37,8 +36,8 @@ class Printer:
|
||||
kwargs.pop('color', None)
|
||||
return builtins.print(*args, **kwargs)
|
||||
|
||||
|
||||
# TODO: extract the name from the search results instead of from the magnet link when possible
|
||||
# TODO: extract the name from the search results
|
||||
# instead of from the magnet link when possible
|
||||
def search_results(self, results, local=None):
|
||||
columns = shutil.get_terminal_size((80, 20)).columns
|
||||
even = True
|
||||
@ -46,7 +45,9 @@ class Printer:
|
||||
if local:
|
||||
table = veryprettytable.VeryPrettyTable(['LINK', 'NAME'])
|
||||
else:
|
||||
table = veryprettytable.VeryPrettyTable(['LINK', 'SEED', 'LEECH', 'RATIO', 'SIZE', '', 'UPLOAD', 'NAME'])
|
||||
table = veryprettytable.VeryPrettyTable(['LINK', 'SEED', 'LEECH',
|
||||
'RATIO', 'SIZE', '',
|
||||
'UPLOAD', 'NAME'])
|
||||
table.align['NAME'] = 'l'
|
||||
table.align['SEED'] = 'r'
|
||||
table.align['LEECH'] = 'r'
|
||||
@ -82,8 +83,10 @@ class Printer:
|
||||
except ZeroDivisionError:
|
||||
ratio = float('inf')
|
||||
|
||||
content = [n, no_seeders, no_leechers, '{:.1f}'.format(ratio),
|
||||
'{:.1f}'.format(size), unit, date, torrent_name[:columns - 53]]
|
||||
content = [n, no_seeders, no_leechers,
|
||||
'{:.1f}'.format(ratio),
|
||||
'{:.1f}'.format(size),
|
||||
unit, date, torrent_name[:columns - 53]]
|
||||
|
||||
if even or not self.enable_color:
|
||||
table.add_row(content)
|
||||
@ -94,11 +97,11 @@ class Printer:
|
||||
even = not even
|
||||
self.print(table)
|
||||
|
||||
|
||||
def descriptions(self, chosen_links, results, site):
|
||||
for link in chosen_links:
|
||||
path = '/torrent/%s/' % results[link]['id']
|
||||
req = request.Request(site + path, headers=pirate.data.default_headers)
|
||||
req = request.Request(site + path,
|
||||
headers=pirate.data.default_headers)
|
||||
req.add_header('Accept-encoding', 'gzip')
|
||||
f = request.urlopen(req, timeout=pirate.data.default_timeout)
|
||||
|
||||
@ -118,7 +121,6 @@ class Printer:
|
||||
self.print('Description for "%s":' % torrent_name, color='zebra_1')
|
||||
self.print(desc, color='zebra_0')
|
||||
|
||||
|
||||
def file_lists(self, chosen_links, results, site):
|
||||
for link in chosen_links:
|
||||
path = '/ajax_details_filelist.php'
|
||||
@ -146,4 +148,4 @@ class Printer:
|
||||
|
||||
for f in files:
|
||||
self.print('{0[0]:>11} {0[1]}'.format(f), color=cur_color)
|
||||
cur_color = 'zebra_0' if (cur_color == 'zebra_1') else 'zebra_1'
|
||||
cur_color = 'zebra_0' if cur_color == 'zebra_1' else 'zebra_1'
|
||||
|
@ -44,10 +44,12 @@ def parse_sort(printer, sort):
|
||||
return 99
|
||||
|
||||
|
||||
# TODO: warn users when using a sort in a mode that doesn't accept sorts
|
||||
# TODO: warn users when using search terms in a mode that doesn't accept search terms
|
||||
# TODO: same with page parameter for top and top48h
|
||||
# TODO: warn the user if trying to use a minor category with top48h
|
||||
# TODO:
|
||||
# * warn users when using a sort in a mode that doesn't accept sorts
|
||||
# * warn users when using search terms in a mode
|
||||
# that doesn't accept search terms
|
||||
# * same with page parameter for top and top48h
|
||||
# * warn the user if trying to use a minor category with top48h
|
||||
def build_request_path(page, category, sort, mode, terms):
|
||||
if mode == 'browse':
|
||||
if(category == 0):
|
||||
@ -105,8 +107,10 @@ def parse_page(html):
|
||||
|
||||
# parse descriptions separately
|
||||
description = row.find('font', class_='detDesc').text
|
||||
size = re.findall(r'(?<=Size )[0-9.]+\s[KMGT]*[i ]*B', description)[0].split()
|
||||
uploaded = re.findall(r'(?<=Uploaded ).+(?=\, Size)', description)[0]
|
||||
size = re.findall(r'(?<=Size )[0-9.]+\s[KMGT]*[i ]*B',
|
||||
description)[0].split()
|
||||
uploaded = re.findall(r'(?<=Uploaded ).+(?=\, Size)',
|
||||
description)[0]
|
||||
|
||||
results.append({
|
||||
'magnet': magnet,
|
||||
@ -174,7 +178,8 @@ def save_torrents(printer, chosen_links, results, folder):
|
||||
try:
|
||||
torrent = get_torrent(info_hash)
|
||||
except urllib.error.HTTPError:
|
||||
printer.print('There is no cached file for this torrent :(', color='ERROR')
|
||||
printer.print('There is no cached file for this torrent :(',
|
||||
color='ERROR')
|
||||
else:
|
||||
open(file, 'wb').write(torrent)
|
||||
printer.print('Saved {:X} in {}'.format(info_hash, file))
|
||||
|
Loading…
Reference in New Issue
Block a user