1
0
mirror of https://github.com/vikstrous/pirate-get synced 2025-01-10 10:04:21 +01:00

simplify results array

This commit is contained in:
Viktor Stanchev 2015-09-03 22:18:38 -07:00
parent b8fa71d141
commit 1f04700158
4 changed files with 54 additions and 47 deletions

View File

@ -171,11 +171,11 @@ def main():
path = args.database
else:
path = config.get('LocalDB', 'path')
mags = pirate.local.search(path, args.search)
results = pirate.local.search(path, args.search)
sizes, uploaded = [], []
else:
mags, mirrors = [], {'https://thepiratebay.mn'}
results, mirrors = [], {'https://thepiratebay.mn'}
try:
req = request.Request('https://proxybay.co/list.txt',
headers=pirate.data.default_headers)
@ -192,7 +192,7 @@ def main():
for mirror in mirrors:
try:
print('Trying', mirror, end='... \n')
mags, sizes, uploaded, ids = pirate.torrent.remote(
results = pirate.torrent.remote(
pages=args.pages,
category=pirate.torrent.parse_category(args.category),
sort=pirate.torrent.parse_sort(args.sort),
@ -211,18 +211,18 @@ def main():
print('No available mirrors :(', color='WARN')
return
if not mags:
if len(results) == 0:
print('No results')
return
pirate.print.search_results(mags, sizes, uploaded, local=args.database)
pirate.print.search_results(results, local=args.database)
if args.first:
print('Choosing first result')
choices = [0]
elif args.download_all:
print('Downloading all results')
choices = range(len(mags))
choices = range(len(results))
else:
# New input loop to support different link options
while True:
@ -284,16 +284,16 @@ def main():
print('Bye.', color='alt')
return
elif code == 'd':
pirate.print.descriptions(choices, mags, site, ids)
pirate.print.descriptions(choices, results, site)
elif code == 'f':
pirate.print.file_lists(choices, mags, site, ids)
pirate.print.file_lists(choices, results, site)
elif code == 'p':
pirate.print.search_results(mags, sizes, uploaded)
pirate.print.search_results(results)
elif code == 'm':
pirate.torrent.save_magnets(choices, mags, config.get(
pirate.torrent.save_magnets(choices, results, config.get(
'Save', 'directory'))
elif code == 't':
pirate.torrent.save_torrents(choices, mags, config.get(
pirate.torrent.save_torrents(choices, results, config.get(
'Save', 'directory'))
elif not l:
print('No links entered!', color='WARN')
@ -307,13 +307,13 @@ def main():
if args.save_magnets or config.getboolean('Save', 'magnets'):
print('Saving selected magnets...')
pirate.torrent.save_magnets(choices, mags, config.get(
pirate.torrent.save_magnets(choices, results, config.get(
'Save', 'directory'))
save_to_file = True
if args.save_torrents or config.getboolean('Save', 'torrents'):
print('Saving selected torrents...')
pirate.torrent.save_torrents(choices, mags, config.get(
pirate.torrent.save_torrents(choices, results, config.get(
'Save', 'directory'))
save_to_file = True
@ -321,7 +321,7 @@ def main():
return
for choice in choices:
url = mags[int(choice)][0]
url = results[int(choice)]['magnet']
if args.transmission or config.getboolean('Misc', 'transmission'):
subprocess.call(transmission_command + ['--add', url])

View File

@ -31,7 +31,7 @@ def print(*args, **kwargs):
return builtins.print(*args, **kwargs)
def search_results(mags, sizes, uploaded, local=None):
def search_results(results, local=None):
columns = int(os.popen('stty size', 'r').read().split()[1])
cur_color = 'zebra_0'
@ -45,21 +45,26 @@ def search_results(mags, sizes, uploaded, local=None):
'SIZE', 'UPLOAD', 'NAME', length=columns - 52),
color='header')
for m, magnet in enumerate(mags):
for n, result in enumerate(results):
# Alternate between colors
cur_color = 'zebra_0' if cur_color == 'zebra_1' else 'zebra_1'
name = re.search(r'dn=([^\&]*)', magnet[0])
torrent_name = parse.unquote(name.group(1)).replace('+', ' ')
name = re.search(r'dn=([^\&]*)', result['magnet'])
torrent_name = parse.unquote_plus(name.group(1))
if local:
line = '{:5} {:{length}}'
content = [m, torrent_name[:columns]]
content = [n, torrent_name[:columns]]
else:
no_seeders, no_leechers = map(int, magnet[1:])
size, unit = (float(sizes[m][0]),
sizes[m][1]) if sizes else (0, '???')
date = uploaded[m]
no_seeders = int(result['seeds'])
no_leechers = int(result['leechers'])
if result['size'] != []:
size = float(result['size'][0])
unit = result['size'][1]
else:
size = 0
unit = '???'
date = result['uploaded']
# compute the S/L ratio (Higher is better)
try:
@ -69,17 +74,17 @@ def search_results(mags, sizes, uploaded, local=None):
line = ('{:4} {:5} {:5} {:5.1f} {:5.1f}'
' {:3} {:<11} {:{length}}')
content = [m, no_seeders, no_leechers, ratio,
content = [n, no_seeders, no_leechers, ratio,
size, unit, date, torrent_name[:columns - 52]]
# enhanced print output with justified columns
print(line.format(*content, length=columns - 52), color=cur_color)
def descriptions(chosen_links, mags, site, identifiers):
def descriptions(chosen_links, results, site):
for link in chosen_links:
link = int(link)
path = '/torrent/%s/' % identifiers[link]
path = '/torrent/%s/' % results[link]['id']
req = request.Request(site + path, headers=pirate.data.default_headers)
req.add_header('Accept-encoding', 'gzip')
f = request.urlopen(req, timeout=pirate.data.default_timeout)
@ -88,7 +93,7 @@ def descriptions(chosen_links, mags, site, identifiers):
f = gzip.GzipFile(fileobj=BytesIO(f.read()))
res = f.read().decode('utf-8')
name = re.search(r'dn=([^\&]*)', mags[link][0])
name = re.search(r'dn=([^\&]*)', results[link]['magnet'])
torrent_name = parse.unquote(name.group(1)).replace('+', ' ')
desc = re.search(r'<div class="nfo">\s*<pre>(.+?)(?=</pre>)',
res, re.DOTALL).group(1)
@ -101,10 +106,11 @@ def descriptions(chosen_links, mags, site, identifiers):
print(desc, color='zebra_0')
def file_lists(chosen_links, mags, site, identifiers):
def file_lists(chosen_links, results, site):
for link in chosen_links:
link = int(link)
path = '/ajax_details_filelist.php'
query = '?id=' + identifiers[int(link)]
query = '?id=' + results[link]['id']
req = request.Request(site + path + query,
headers=pirate.data.default_headers)
req.add_header('Accept-encoding', 'gzip')
@ -113,10 +119,14 @@ def file_lists(chosen_links, mags, site, identifiers):
if f.info().get('Content-Encoding') == 'gzip':
f = gzip.GzipFile(fileobj=BytesIO(f.read()))
# TODO: proper html decoding/parsing
res = f.read().decode('utf-8').replace('&nbsp;', ' ')
if 'File list not available.' in res:
print('File list not available.')
return
files = re.findall(r'<td align="left">\s*([^<]+?)\s*</td><td ali'
r'gn="right">\s*([^<]+?)\s*</tr>', res)
name = re.search(r'dn=([^\&]*)', mags[int(link)][0])
name = re.search(r'dn=([^\&]*)', results[link]['magnet'])
torrent_name = parse.unquote(name.group(1)).replace('+', ' ')
print('Files in "%s":' % torrent_name, color='zebra_1')

View File

@ -75,6 +75,7 @@ def build_request_path(page, category, sort, mode, terms):
raise Exception('Unknown mode.')
# this returns a list of dictionaries
def parse_page(html):
d = pq(html)
@ -97,7 +98,7 @@ def parse_page(html):
d('table#searchResult tr>td:nth-child(3)')))
leechers = list(map(lambda l: pq(l).text(),
d('table#searchResult tr>td:nth-child(4)')))
identifiers = list(map(lambda l: pq(l).attr('href').split('/')[2],
ids = list(map(lambda l: pq(l).attr('href').split('/')[2],
d('table#searchResult .detLink')))
sizes = []
@ -108,14 +109,13 @@ def parse_page(html):
sizes.append(re.findall(r'(?<=Size )[0-9.]+\s[KMGT]*[i ]*B', text)[0].split())
uploaded.append(re.findall(r'(?<=Uploaded ).+(?=\, Size)', text)[0])
return list(zip(magnets,seeds,leechers)), sizes, uploaded, identifiers
titles = ('magnet', 'seeds', 'leechers', 'size', 'uploaded', 'id')
rows = list(zip(magnets, seeds, leechers, sizes, uploaded, ids))
return [dict(zip(titles,row)) for row in rows]
def remote(pages, category, sort, mode, terms, mirror):
res_l = []
sizes = []
uploaded = []
identifiers = []
if pages < 1:
raise ValueError('Please provide an integer greater than 0 '
@ -134,18 +134,13 @@ def remote(pages, category, sort, mode, terms, mirror):
f = gzip.GzipFile(fileobj=BytesIO(f.read()))
res = f.read().decode('utf-8')
page_res_l, page_sizes, page_uploaded, page_identifiers = parse_page(res)
res_l += page_res_l
sizes += page_sizes
uploaded += page_uploaded
identifiers += page_identifiers
res_l += parse_page(res)
except KeyboardInterrupt:
print('\nCancelled.')
sys.exit(0)
# return the sizes in a separate list
return res_l, sizes, uploaded, identifiers
return res_l
def get_torrent(info_hash):
@ -161,9 +156,10 @@ def get_torrent(info_hash):
return torrent.read()
def save_torrents(chosen_links, mags, folder):
def save_torrents(chosen_links, results, folder):
for link in chosen_links:
magnet = mags[int(link)][0]
link = int(link)
magnet = results[link]['magnet']
name = re.search(r'dn=([^\&]*)', magnet)
torrent_name = parse.unquote(name.group(1)).replace('+', ' ')
info_hash = int(re.search(r'btih:([a-f0-9]{40})', magnet).group(1), 16)
@ -180,7 +176,8 @@ def save_torrents(chosen_links, mags, folder):
def save_magnets(chosen_links, mags, folder):
for link in chosen_links:
magnet = mags[int(link)][0]
link = int(link)
magnet = results[link]['magnet']
name = re.search(r'dn=([^\&]*)', magnet)
torrent_name = parse.unquote(name.group(1)).replace('+', ' ')
info_hash = int(re.search(r'btih:([a-f0-9]{40})', magnet).group(1), 16)

File diff suppressed because one or more lines are too long