2012-11-16 08:52:09 +01:00
|
|
|
#!/usr/bin/env python
|
2014-10-28 07:48:02 +01:00
|
|
|
#
|
|
|
|
# Copyright 2014, Viktor Stanchev and contributors
|
|
|
|
#
|
|
|
|
# This file is part of pirate-get.
|
|
|
|
#
|
|
|
|
# pirate-get is free software: you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU Affero General Public License as published by
|
|
|
|
# the Free Software Foundation, either version 3 of the License, or
|
|
|
|
# (at your option) any later version.
|
|
|
|
#
|
|
|
|
# pirate-get is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU Affero General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU Affero General Public License
|
|
|
|
# along with pirate-get. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
2013-12-14 14:30:11 +01:00
|
|
|
import os
|
2014-04-16 20:24:11 +02:00
|
|
|
import random
|
2014-12-03 19:38:24 +01:00
|
|
|
import re
|
|
|
|
import string
|
|
|
|
import gzip
|
|
|
|
import configparser
|
2013-02-21 21:19:40 +01:00
|
|
|
import argparse
|
2014-12-03 19:38:24 +01:00
|
|
|
import builtins
|
|
|
|
|
|
|
|
import webbrowser
|
|
|
|
import urllib.request as request
|
|
|
|
import urllib.parse as parse
|
|
|
|
|
|
|
|
from html.parser import HTMLParser
|
2013-12-10 14:41:56 +01:00
|
|
|
from pprint import pprint
|
2014-12-03 19:38:24 +01:00
|
|
|
from io import StringIO, BytesIO
|
2012-11-16 08:52:09 +01:00
|
|
|
|
2014-02-01 12:58:55 +01:00
|
|
|
|
2014-12-03 19:40:04 +01:00
|
|
|
class NoRedirection(request.HTTPErrorProcessor):
|
2014-02-01 12:58:55 +01:00
|
|
|
def http_response(self, request, response):
|
|
|
|
return response
|
|
|
|
|
|
|
|
https_response = http_response
|
2013-02-21 21:19:40 +01:00
|
|
|
|
|
|
|
# create a subclass and override the handler methods
|
|
|
|
class MyHTMLParser(HTMLParser):
|
|
|
|
title = ''
|
|
|
|
q = ''
|
|
|
|
state = 'looking'
|
|
|
|
results = []
|
|
|
|
|
|
|
|
def __init__(self, q):
|
|
|
|
HTMLParser.__init__(self)
|
|
|
|
self.q = q.lower()
|
|
|
|
|
|
|
|
def handle_starttag(self, tag, attrs):
|
|
|
|
if tag == 'title':
|
|
|
|
self.state = 'title'
|
|
|
|
if tag == 'magnet' and self.state == 'matched':
|
|
|
|
self.state = 'magnet'
|
|
|
|
|
|
|
|
def handle_data(self, data):
|
|
|
|
if self.state == 'title':
|
|
|
|
if data.lower().find(self.q) != -1:
|
|
|
|
self.title = data
|
|
|
|
self.state = 'matched'
|
|
|
|
else:
|
|
|
|
self.state = 'looking'
|
|
|
|
if self.state == 'magnet':
|
2014-12-03 19:41:31 +01:00
|
|
|
self.results.append([
|
|
|
|
'magnet:?xt=urn:btih:' +
|
|
|
|
parse.quote(data) +
|
|
|
|
'&dn=' +
|
|
|
|
parse.quote(self.title), '?', '?'])
|
2013-02-21 21:19:40 +01:00
|
|
|
self.state = 'looking'
|
|
|
|
|
|
|
|
|
|
|
|
def main():
|
2014-04-16 20:24:11 +02:00
|
|
|
# new ConfigParser
|
2014-12-03 19:40:04 +01:00
|
|
|
config = configparser.ConfigParser()
|
2014-04-16 20:24:11 +02:00
|
|
|
|
|
|
|
# default options so we dont die later
|
|
|
|
config.add_section('SaveToFile')
|
2014-12-03 19:43:12 +01:00
|
|
|
config.set('SaveToFile', 'enabled', 'false')
|
|
|
|
config.set('SaveToFile', 'directory', '~/downloads/pirate-get/')
|
2014-04-16 20:24:11 +02:00
|
|
|
|
|
|
|
# load user options, to override default ones
|
2014-11-11 09:05:05 +01:00
|
|
|
def config_to_load():
|
2014-12-03 19:43:12 +01:00
|
|
|
if os.path.isfile(os.path.expandvars('$XDG_CONFIG_HOME/pirate-get')):
|
|
|
|
return os.path.expandvars('$XDG_CONFIG_HOME/pirate-get')
|
2014-11-11 09:05:05 +01:00
|
|
|
else:
|
2014-12-03 19:43:12 +01:00
|
|
|
return os.path.expanduser('~/.config/pirate-get')
|
2014-11-11 09:05:05 +01:00
|
|
|
|
|
|
|
config.read([config_to_load()])
|
2014-04-16 20:24:11 +02:00
|
|
|
|
2014-12-03 19:47:38 +01:00
|
|
|
parser = argparse.ArgumentParser(
|
|
|
|
description='finds and downloads torrents from the Pirate Bay')
|
|
|
|
parser.add_argument('-b', dest='browse',
|
|
|
|
action='store_true',
|
|
|
|
help="display in Browse mode")
|
|
|
|
parser.add_argument('search', metavar='search',
|
|
|
|
nargs="*", help="term to search for")
|
|
|
|
parser.add_argument('-c', dest='category', metavar='category',
|
|
|
|
help="specify a category to search", default="All")
|
|
|
|
parser.add_argument('-s', dest='sort', metavar='sort',
|
|
|
|
help="specify a sort option", default="SeedersDsc")
|
|
|
|
parser.add_argument('-R', dest='recent', action='store_true',
|
|
|
|
help="torrents uploaded in the last 48hours."
|
|
|
|
"*ignored in searches*")
|
|
|
|
parser.add_argument('-l', dest='list_categories',
|
|
|
|
action='store_true',
|
|
|
|
help="list categories")
|
|
|
|
parser.add_argument('--list_sorts', dest='list_sorts',
|
|
|
|
action='store_true',
|
|
|
|
help="list Sortable Types")
|
|
|
|
parser.add_argument('-t', dest='transmission',
|
|
|
|
action='store_true',
|
|
|
|
help="call transmission-remote to start the download")
|
|
|
|
parser.add_argument('--custom', dest='command',
|
|
|
|
help="call custom command, %%s will be replaced with"
|
|
|
|
"the url")
|
|
|
|
parser.add_argument('--local', dest='database',
|
|
|
|
help="an xml file containing the Pirate Bay database")
|
|
|
|
parser.add_argument('-p', dest='pages', default=1,
|
|
|
|
help="the number of pages to fetch (doesn't work with"
|
|
|
|
"--local)")
|
|
|
|
parser.add_argument('-0', dest='first',
|
|
|
|
action='store_true',
|
|
|
|
help="choose the top result")
|
|
|
|
parser.add_argument('-a', dest='download_all',
|
|
|
|
action='store_true',
|
|
|
|
help="download all results")
|
|
|
|
parser.add_argument('--color', dest='color',
|
|
|
|
action='store_true',
|
|
|
|
help="use colored output")
|
|
|
|
|
|
|
|
categories = {
|
|
|
|
"All":"0",
|
|
|
|
"Audio":"100",
|
|
|
|
"Audio/Music":"101",
|
|
|
|
"Audio/Audio books":"102",
|
|
|
|
"Audio/Sound clips":"103",
|
|
|
|
"Audio/FLAC":"104",
|
|
|
|
"Audio/Other":"199",
|
|
|
|
"Video":"200",
|
|
|
|
"Video/Movies":"201",
|
|
|
|
"Video/Movies DVDR":"202",
|
|
|
|
"Video/Music videos":"203",
|
|
|
|
"Video/Movie clips":"204",
|
|
|
|
"Video/TV shows":"205",
|
|
|
|
"Video/Handheld":"206",
|
|
|
|
"Video/HD - Movies":"207",
|
|
|
|
"Video/HD - TV shows":"208",
|
|
|
|
"Video/3D":"209",
|
|
|
|
"Video/Other":"299",
|
|
|
|
"Applications":"300",
|
|
|
|
"Applications/Windows":"301",
|
|
|
|
"Applications/Mac":"302",
|
|
|
|
"Applications/UNIX":"303",
|
|
|
|
"Applications/Handheld":"304",
|
|
|
|
"Applications/IOS (iPad/iPhone)":"305",
|
|
|
|
"Applications/Android":"306",
|
|
|
|
"Applications/Other OS":"399",
|
|
|
|
"Games":"400",
|
|
|
|
"Games/PC":"401",
|
|
|
|
"Games/Mac":"402",
|
|
|
|
"Games/PSx":"403",
|
|
|
|
"Games/XBOX360":"404",
|
|
|
|
"Games/Wii":"405",
|
|
|
|
"Games/Handheld":"406",
|
|
|
|
"Games/IOS (iPad/iPhone)":"407",
|
|
|
|
"Games/Android":"408",
|
|
|
|
"Games/Other":"499",
|
|
|
|
"Porn":"500",
|
|
|
|
"Porn/Movies":"501",
|
|
|
|
"Porn/Movies DVDR":"502",
|
|
|
|
"Porn/Pictures":"503",
|
|
|
|
"Porn/Games":"504",
|
|
|
|
"Porn/HD - Movies":"505",
|
|
|
|
"Porn/Movie clips":"506",
|
|
|
|
"Porn/Other":"599",
|
|
|
|
"Other":"600",
|
|
|
|
"Other/E-books":"601",
|
|
|
|
"Other/Comics":"602",
|
|
|
|
"Other/Pictures":"603",
|
|
|
|
"Other/Covers":"604",
|
|
|
|
"Other/Physibles":"605",
|
|
|
|
"Other/Other":"699"}
|
|
|
|
|
|
|
|
sorts = {
|
|
|
|
"TitleDsc": "1", "TitleAsc": "2",
|
|
|
|
"DateDsc": "3", "DateAsc": "4",
|
|
|
|
"SizeDsc": "5", "SizeAsc": "6",
|
|
|
|
"SeedersDsc": "7", "SeedersAsc": "8",
|
|
|
|
"LeechersDsc": "9", "LeechersAsc": "10",
|
|
|
|
"CategoryDsc": "13", "CategoryAsc": "14",
|
|
|
|
"Default": "99"}
|
2014-10-28 04:28:48 +01:00
|
|
|
|
2013-02-26 22:48:02 +01:00
|
|
|
#todo: redo this with html parser instead of regex
|
2014-02-01 10:42:58 +01:00
|
|
|
def remote(args, mirror):
|
2013-02-26 22:48:02 +01:00
|
|
|
res_l = []
|
2013-02-27 18:14:39 +01:00
|
|
|
try:
|
|
|
|
pages = int(args.pages)
|
|
|
|
if pages < 1:
|
|
|
|
raise Exception('')
|
|
|
|
except Exception:
|
2014-12-03 19:47:38 +01:00
|
|
|
raise Exception("Please provide an integer greater than 0"
|
|
|
|
"for the number of pages to fetch.")
|
2013-02-27 18:14:39 +01:00
|
|
|
|
2014-06-06 06:49:07 +02:00
|
|
|
if str(args.category) in categories.values():
|
|
|
|
category = args.category;
|
|
|
|
elif args.category in categories.keys():
|
|
|
|
category = categories[args.category]
|
|
|
|
else:
|
|
|
|
category = "0";
|
2014-12-03 19:47:38 +01:00
|
|
|
print("Invalid category ignored", color="WARN")
|
2014-06-06 06:49:07 +02:00
|
|
|
|
2014-10-28 04:28:48 +01:00
|
|
|
if str(args.sort) in sorts.values():
|
|
|
|
sort = args.sort;
|
|
|
|
elif args.sort in sorts.keys():
|
|
|
|
sort = sorts[args.sort]
|
|
|
|
else:
|
|
|
|
sort = "99";
|
2014-12-03 19:47:38 +01:00
|
|
|
print("Invalid sort ignored", color="WARN")
|
2014-10-28 04:28:48 +01:00
|
|
|
|
2013-11-11 22:42:51 +01:00
|
|
|
# Catch the Ctrl-C exception and exit cleanly
|
|
|
|
try:
|
2014-05-13 21:02:43 +02:00
|
|
|
sizes = []
|
|
|
|
uploaded = []
|
2014-07-27 08:06:59 +02:00
|
|
|
identifiers = []
|
2014-12-03 19:47:38 +01:00
|
|
|
for page in range(pages):
|
2014-10-28 04:28:48 +01:00
|
|
|
if args.browse:
|
|
|
|
path = "/browse/"
|
|
|
|
if(category == "0"):
|
|
|
|
category = '100'
|
2014-12-03 19:47:38 +01:00
|
|
|
path = '/browse/' + '/'.join(str(i) for i in (
|
|
|
|
category, page, sort))
|
2014-10-28 04:28:48 +01:00
|
|
|
elif len(args.search) == 0:
|
2014-06-06 06:49:07 +02:00
|
|
|
path = "/top/48h" if args.recent else "/top/"
|
|
|
|
if(category == "0"):
|
|
|
|
path += 'all'
|
|
|
|
else:
|
|
|
|
path += category
|
|
|
|
else:
|
2014-12-03 19:47:38 +01:00
|
|
|
path = '/search/' + '/'.join(str(i) for i in (
|
|
|
|
"+".join(args.search),
|
|
|
|
page, sort,
|
|
|
|
category))
|
|
|
|
|
|
|
|
req = request.Request(mirror + path)
|
|
|
|
req.add_header('Accept-encoding', 'gzip')
|
|
|
|
f = request.urlopen(req)
|
2014-05-11 20:42:18 +02:00
|
|
|
if f.info().get('Content-Encoding') == 'gzip':
|
2014-12-03 19:47:38 +01:00
|
|
|
f = gzip.GzipFile(fileobj=BytesIO(f.read()))
|
|
|
|
res = f.read().decode('utf-8')
|
|
|
|
found = re.findall('"(magnet\:\?xt=[^"]*)|<td align="right">'
|
|
|
|
'([^<]+)</td>', res)
|
2013-11-11 22:42:51 +01:00
|
|
|
|
2014-02-16 19:22:10 +01:00
|
|
|
# check for a blocked mirror
|
2014-12-03 19:47:38 +01:00
|
|
|
no_results = re.search("\"No hits\.", res)
|
2014-06-13 01:03:15 +02:00
|
|
|
if found == [] and not no_results is None:
|
2014-12-03 19:47:38 +01:00
|
|
|
# Contradiction - we found no results,
|
|
|
|
# but the page didn't say there were no results.
|
|
|
|
# The page is probably not actually the pirate bay,
|
|
|
|
# so let's try another mirror
|
2014-02-16 19:22:10 +01:00
|
|
|
raise Exception("Blocked mirror detected.")
|
|
|
|
|
2013-11-11 22:42:51 +01:00
|
|
|
# get sizes as well and substitute the character
|
2014-12-03 19:47:38 +01:00
|
|
|
sizes.extend([match.replace(" ", " ")
|
|
|
|
for match in re.findall("(?<=Size )[0-9.]"
|
|
|
|
"+\ \;[KMGT]*[i ]*B", res)])
|
|
|
|
|
|
|
|
uploaded.extend([match.replace(" ", " ")
|
|
|
|
for match in re.findall("(?<=Uploaded )"
|
|
|
|
".+(?=\, Size)",res)])
|
|
|
|
|
|
|
|
identifiers.extend([match.replace(" ", " ")
|
|
|
|
for match in re.findall("(?<=/torrent/)"
|
|
|
|
"[0-9]+(?=/)",res)])
|
2014-10-28 04:28:48 +01:00
|
|
|
|
2013-11-11 22:42:51 +01:00
|
|
|
state = "seeds"
|
2014-12-03 19:47:38 +01:00
|
|
|
curr = ['', 0, 0] #magnet, seeds, leeches
|
2013-11-11 22:42:51 +01:00
|
|
|
for f in found:
|
|
|
|
if f[1] == '':
|
|
|
|
curr[0] = f[0]
|
2013-02-27 18:14:39 +01:00
|
|
|
else:
|
2013-11-11 22:42:51 +01:00
|
|
|
if state == 'seeds':
|
|
|
|
curr[1] = f[1]
|
|
|
|
state = 'leeches'
|
|
|
|
else:
|
|
|
|
curr[2] = f[1]
|
|
|
|
state = 'seeds'
|
|
|
|
res_l.append(curr)
|
|
|
|
curr = ['', 0, 0]
|
|
|
|
except KeyboardInterrupt :
|
2014-02-15 16:55:28 +01:00
|
|
|
print("\nCancelled.")
|
2013-11-11 22:42:51 +01:00
|
|
|
exit()
|
|
|
|
|
|
|
|
# return the sizes in a spearate list
|
2014-07-27 08:06:59 +02:00
|
|
|
return res_l, sizes, uploaded, identifiers
|
2013-02-21 21:19:40 +01:00
|
|
|
|
|
|
|
args = parser.parse_args()
|
2014-02-15 16:55:28 +01:00
|
|
|
|
|
|
|
def make_print():
|
|
|
|
if(args.color):
|
|
|
|
import colorama
|
|
|
|
colorama.init()
|
2014-12-03 19:47:38 +01:00
|
|
|
color_dict = {
|
|
|
|
"default": "",
|
|
|
|
"header": colorama.Back.WHITE + colorama.Fore.BLACK,
|
|
|
|
"zebra_0": "",
|
|
|
|
"zebra_1": colorama.Style.BRIGHT,
|
|
|
|
"WARN": colorama.Fore.YELLOW,
|
|
|
|
"ERROR": colorama.Fore.RED}
|
|
|
|
|
2014-02-15 16:55:28 +01:00
|
|
|
def n_print(*args, **kwargs):
|
|
|
|
"""Print with colors"""
|
|
|
|
try:
|
|
|
|
c = color_dict[kwargs.pop("color")]
|
2014-12-03 19:41:31 +01:00
|
|
|
args = (c + args[0],) + args[1:] + (colorama.Style.RESET_ALL,)
|
2014-02-15 16:55:28 +01:00
|
|
|
except KeyError as e:
|
|
|
|
pass
|
|
|
|
except IndexError as e:
|
|
|
|
pass
|
2014-12-03 19:40:04 +01:00
|
|
|
return builtins.print(*args, **kwargs)
|
2014-02-15 16:55:28 +01:00
|
|
|
else:
|
|
|
|
def n_print(*args, **kwargs):
|
|
|
|
if("color" in kwargs):
|
|
|
|
kwargs.pop('color')
|
2014-12-03 19:40:04 +01:00
|
|
|
return builtins.print(*args, **kwargs)
|
2014-02-15 16:55:28 +01:00
|
|
|
return n_print
|
|
|
|
|
|
|
|
print=make_print()
|
|
|
|
|
|
|
|
def local(args):
|
|
|
|
xml_str = ''
|
|
|
|
with open(args.database, 'r') as f:
|
|
|
|
xml_str += f.read()
|
|
|
|
htmlparser = MyHTMLParser(args.q)
|
|
|
|
htmlparser.feed(xml_str)
|
|
|
|
return htmlparser.results
|
|
|
|
|
2014-06-06 06:49:07 +02:00
|
|
|
|
|
|
|
if args.list_categories:
|
|
|
|
cur_color = "zebra_0"
|
|
|
|
for key, value in sorted(categories.iteritems()) :
|
|
|
|
cur_color = "zebra_0" if (cur_color == "zebra_1") else "zebra_1"
|
|
|
|
print(str(value) +"\t" + key, color=cur_color)
|
|
|
|
return
|
2014-10-28 04:28:48 +01:00
|
|
|
|
|
|
|
if args.list_sorts:
|
|
|
|
cur_color = "zebra_0"
|
2014-12-03 19:47:38 +01:00
|
|
|
for key, value in sorted(sorts.iteritems()):
|
2014-10-28 04:28:48 +01:00
|
|
|
cur_color = "zebra_0" if (cur_color == "zebra_1") else "zebra_1"
|
|
|
|
print(str(value) +"\t" + key, color=cur_color)
|
|
|
|
return
|
|
|
|
|
2013-02-21 21:19:40 +01:00
|
|
|
if args.database:
|
|
|
|
mags = local(args)
|
|
|
|
else:
|
2014-12-03 19:49:02 +01:00
|
|
|
mirrors = ["https://pirateproxy.sx"]
|
2014-02-01 10:42:58 +01:00
|
|
|
try:
|
2014-12-03 19:40:04 +01:00
|
|
|
opener = request.build_opener(NoRedirection)
|
2014-11-12 17:26:01 +01:00
|
|
|
f = opener.open("https://proxybay.info/list.txt")
|
2014-02-01 12:58:55 +01:00
|
|
|
if f.getcode() != 200:
|
|
|
|
raise Exception("The pirate bay responded with an error.")
|
2014-12-03 19:41:31 +01:00
|
|
|
res = f.read().decode('utf8')
|
|
|
|
mirrors.append(res.split("\n")[3:])
|
2014-02-01 10:42:58 +01:00
|
|
|
except:
|
2014-02-15 16:55:28 +01:00
|
|
|
print("Could not fetch additional mirrors", color="WARN")
|
2014-02-01 10:42:58 +01:00
|
|
|
for mirror in mirrors:
|
|
|
|
try:
|
|
|
|
print("Trying " + mirror)
|
2014-07-27 08:06:59 +02:00
|
|
|
mags, sizes, uploaded, identifiers = remote(args, mirror)
|
2014-02-01 10:42:58 +01:00
|
|
|
break
|
2014-12-03 19:42:47 +01:00
|
|
|
except Exception as e:
|
2014-02-15 18:13:16 +01:00
|
|
|
print(format(e))
|
2014-12-03 19:47:38 +01:00
|
|
|
print("Could not contact", mirror, color="WARN")
|
2013-02-21 21:19:40 +01:00
|
|
|
|
2014-02-15 18:13:16 +01:00
|
|
|
if not mags or len(mags) == 0:
|
|
|
|
print("no results")
|
|
|
|
return
|
2014-12-03 19:47:38 +01:00
|
|
|
|
2014-02-15 18:13:16 +01:00
|
|
|
# enhanced print output with column titles
|
2014-07-27 08:06:59 +02:00
|
|
|
def print_search_results():
|
2014-12-03 19:49:21 +01:00
|
|
|
columns = int(os.popen('stty size', 'r').read().split()[1]) - 52
|
2014-07-28 07:54:35 +02:00
|
|
|
cur_color = "zebra_0"
|
2014-12-03 19:49:21 +01:00
|
|
|
|
|
|
|
print("%5s %6s %6s %-5s %-11s %-11s %-*s" \
|
|
|
|
% ( "LINK", "SEED", "LEECH", "RATIO", "SIZE", "UPLOAD", columns, "NAME"),
|
|
|
|
color="header")
|
|
|
|
|
2014-07-28 07:54:35 +02:00
|
|
|
for m in range(len(mags)):
|
|
|
|
magnet = mags[m]
|
|
|
|
no_seeders = int(magnet[1])
|
|
|
|
no_leechers = int(magnet[2])
|
|
|
|
name = re.search("dn=([^\&]*)", magnet[0])
|
|
|
|
|
|
|
|
# compute the S/L ratio (Higher is better)
|
|
|
|
try:
|
|
|
|
ratio = no_seeders/no_leechers
|
|
|
|
except ZeroDivisionError:
|
|
|
|
ratio = -1
|
|
|
|
|
|
|
|
# Alternate between colors
|
|
|
|
cur_color = "zebra_0" if (cur_color == "zebra_1") else "zebra_1"
|
|
|
|
|
2014-12-03 19:41:31 +01:00
|
|
|
torrent_name = parse.unquote(name.group(1)).replace("+", " ")
|
2014-07-28 07:54:35 +02:00
|
|
|
# enhanced print output with justified columns
|
2014-12-03 19:49:21 +01:00
|
|
|
print("%5d %6d %6d %5.1f %-11s %-11s %s" % (
|
2014-07-28 07:54:35 +02:00
|
|
|
m, no_seeders, no_leechers, ratio ,sizes[m],
|
2014-12-03 19:49:21 +01:00
|
|
|
uploaded[m], torrent_name[:columns]), color=cur_color)
|
|
|
|
|
2014-07-27 08:06:59 +02:00
|
|
|
def print_descriptions(chosen_links):
|
|
|
|
for link in chosen_links:
|
2014-12-03 20:21:16 +01:00
|
|
|
link = int(link)
|
|
|
|
path = '/torrent/%s/' % identifiers[link]
|
|
|
|
req = request.Request(mirror + path)
|
|
|
|
req.add_header('Accept-encoding', 'gzip')
|
|
|
|
f = request.urlopen(req)
|
2014-12-03 19:40:04 +01:00
|
|
|
|
2014-07-27 08:06:59 +02:00
|
|
|
if f.info().get('Content-Encoding') == 'gzip':
|
2014-12-03 19:41:31 +01:00
|
|
|
f = gzip.GzipFile(fileobj=BytesIO(f.read()))
|
|
|
|
|
2014-12-03 20:21:16 +01:00
|
|
|
res = f.read().decode('utf-8')
|
|
|
|
name = re.search("dn=([^\&]*)", mags[link][0])
|
2014-12-03 19:40:04 +01:00
|
|
|
torrent_name = parse.unquote(name.group(1)).replace("+", " ")
|
|
|
|
desc = re.search(r"<div class=\"nfo\">\s*<pre>(.+?)(?=</pre>)",
|
|
|
|
res, re.DOTALL).group(1)
|
|
|
|
|
2014-07-27 08:06:59 +02:00
|
|
|
# Replace HTML links with markdown style versions
|
2014-12-03 19:47:38 +01:00
|
|
|
desc = re.sub(r"<a href=\"\s*([^\"]+?)\s*\"[^>]*>(\s*)([^<]+?)(\s*"
|
|
|
|
r")</a>", r"\2[\3](\1)\4", desc)
|
|
|
|
|
|
|
|
print('Description for "' + torrent_name + '":', color="zebra_1")
|
|
|
|
print(desc, color="zebra_0")
|
2014-07-27 08:06:59 +02:00
|
|
|
|
|
|
|
def print_fileLists(chosen_links):
|
|
|
|
for link in chosen_links:
|
|
|
|
path = '/ajax_details_filelist.php'
|
|
|
|
query = '?id=' + identifiers[int(link)]
|
2014-12-03 20:21:46 +01:00
|
|
|
req = request.Request(mirror + path + query)
|
|
|
|
req.add_header('Accept-encoding', 'gzip')
|
|
|
|
f = request.urlopen(req)
|
2014-12-03 19:40:04 +01:00
|
|
|
|
2014-07-27 08:06:59 +02:00
|
|
|
if f.info().get('Content-Encoding') == 'gzip':
|
2014-12-03 19:40:04 +01:00
|
|
|
f = gzip.GzipFile(fileobj=BytesIO(f.read()))
|
|
|
|
|
2014-07-27 08:06:59 +02:00
|
|
|
res = f.read().replace(" ", " ")
|
2014-12-03 19:47:38 +01:00
|
|
|
files = re.findall(r"<td align=\"left\">\s*([^<]+?)\s*</td><td ali"
|
|
|
|
r"gn=\"right\">\s*([^<]+?)\s*</tr>", res)
|
2014-07-28 07:54:35 +02:00
|
|
|
name = re.search("dn=([^\&]*)", mags[int(link)][0])
|
2014-12-03 19:41:31 +01:00
|
|
|
torrent_name = parse.unquote(name.group(1)).replace("+", " ")
|
|
|
|
|
|
|
|
print('Files in "' + torrent_name + '":', color="zebra_1")
|
2014-07-27 08:06:59 +02:00
|
|
|
cur_color = "zebra_0"
|
2014-12-03 19:47:38 +01:00
|
|
|
|
2014-07-27 08:06:59 +02:00
|
|
|
for f in files:
|
2014-12-03 19:47:38 +01:00
|
|
|
print("%-11s %s" % (f[1], f[0]), color=cur_color)
|
2014-07-27 08:06:59 +02:00
|
|
|
cur_color = "zebra_0" if (cur_color == "zebra_1") else "zebra_1"
|
|
|
|
|
|
|
|
print_search_results()
|
2014-02-15 18:13:16 +01:00
|
|
|
|
|
|
|
if args.first:
|
|
|
|
print("Choosing first result");
|
2014-05-12 06:14:22 +02:00
|
|
|
choices = [0]
|
2014-07-05 06:39:16 +02:00
|
|
|
elif args.download_all:
|
|
|
|
print("Downloading all results");
|
2014-12-03 19:47:38 +01:00
|
|
|
choices = range(len(mags))
|
2014-02-15 18:13:16 +01:00
|
|
|
else:
|
2014-07-27 08:06:59 +02:00
|
|
|
# New input loop to support different link options
|
2014-07-28 07:54:35 +02:00
|
|
|
while True:
|
|
|
|
try:
|
2014-12-03 19:47:53 +01:00
|
|
|
l = input("Select link(s) (Type 'h' for more options"
|
|
|
|
"['q' to quit]): ")
|
2014-07-28 07:54:35 +02:00
|
|
|
except KeyboardInterrupt :
|
|
|
|
print("\nCancelled.")
|
|
|
|
exit()
|
|
|
|
|
|
|
|
try:
|
|
|
|
# Very permissive handling
|
2014-10-29 07:26:37 +01:00
|
|
|
# Check for any occurances or d, f, p or q
|
|
|
|
cmd_code_match = re.search(r'([hdfpq])', l, flags=re.IGNORECASE)
|
2014-07-28 07:54:35 +02:00
|
|
|
if cmd_code_match:
|
|
|
|
code = cmd_code_match.group(0).lower()
|
2014-07-27 08:06:59 +02:00
|
|
|
else:
|
|
|
|
code = None
|
2014-07-28 07:54:35 +02:00
|
|
|
# Clean up command codes
|
|
|
|
l = re.sub(r"^[hdfp, ]*|[hdfp, ]*$", "", l)
|
2014-12-03 19:47:38 +01:00
|
|
|
# Substitute multiple consecutive spaces/commas for single comma
|
2014-07-28 07:54:35 +02:00
|
|
|
l = re.sub("[ ,]+", ",", l)
|
|
|
|
# Remove anything that isn't an integer or comma.
|
|
|
|
l = re.sub("[^0-9,]", "", l)
|
|
|
|
# Turn into list
|
|
|
|
choices = l.split(",")
|
2014-07-27 08:06:59 +02:00
|
|
|
# Act on option, if supplied
|
|
|
|
if code == 'h':
|
2014-12-03 19:47:38 +01:00
|
|
|
print("Options:",
|
|
|
|
"<links>: Download selected torrents",
|
|
|
|
"[d<links>]: Get descriptions",
|
|
|
|
"[f<links>]: Get files",
|
|
|
|
"[p] Print search results",
|
|
|
|
"[q] Quit", sep="\n")
|
2014-07-27 08:06:59 +02:00
|
|
|
continue
|
2014-10-29 07:26:37 +01:00
|
|
|
elif code == 'q':
|
|
|
|
print("User Cancelled.")
|
|
|
|
exit()
|
2014-07-27 08:06:59 +02:00
|
|
|
elif code == 'd':
|
|
|
|
print_descriptions(choices)
|
|
|
|
continue
|
|
|
|
elif code == 'f':
|
|
|
|
print_fileLists(choices)
|
|
|
|
continue
|
|
|
|
elif code == 'p':
|
|
|
|
print_search_results()
|
|
|
|
continue
|
|
|
|
elif not l:
|
|
|
|
print('No links entered!')
|
|
|
|
continue
|
|
|
|
else:
|
|
|
|
break
|
2014-12-03 19:42:47 +01:00
|
|
|
except Exception as e:
|
2014-07-27 08:06:59 +02:00
|
|
|
print('Exception:')
|
|
|
|
print(str(e))
|
2014-07-28 07:54:35 +02:00
|
|
|
choices = ()
|
2014-07-27 08:06:59 +02:00
|
|
|
break;
|
2014-02-15 18:13:16 +01:00
|
|
|
|
2014-12-03 19:47:53 +01:00
|
|
|
if config.getboolean('SaveToFile', 'enabled'):
|
2014-04-16 20:24:11 +02:00
|
|
|
# Save to file is enabled
|
2014-12-03 19:47:38 +01:00
|
|
|
fileName = os.path.expanduser(
|
|
|
|
config.get('SaveToFile','directory')
|
|
|
|
) + id_generator() + '.magnet'
|
|
|
|
|
|
|
|
print("Saving to File: " + fileName)
|
|
|
|
|
2014-04-16 20:24:11 +02:00
|
|
|
f = open(fileName, 'w')
|
|
|
|
for choice in choices:
|
|
|
|
choice = int(choice)
|
|
|
|
url = mags[choice][0]
|
|
|
|
f.write(url + '\n')
|
|
|
|
f.close()
|
|
|
|
|
|
|
|
else:
|
|
|
|
# use transmission as default
|
|
|
|
for choice in choices:
|
|
|
|
choice = int(choice)
|
|
|
|
url = mags[choice][0]
|
|
|
|
print(url)
|
|
|
|
if args.transmission:
|
2014-12-03 19:47:38 +01:00
|
|
|
os.system('transmission-remote --add "%s" ' % (url))
|
|
|
|
os.system('transmission-remote -l')
|
2014-05-08 17:17:24 +02:00
|
|
|
elif args.command:
|
2014-07-28 07:54:35 +02:00
|
|
|
os.system(args.command % (url))
|
2014-04-16 20:24:11 +02:00
|
|
|
else:
|
|
|
|
webbrowser.open(url)
|
|
|
|
|
|
|
|
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
|
|
|
|
return ''.join(random.choice(chars) for _ in range(size))
|
2013-02-21 21:19:40 +01:00
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
main()
|