2015-08-30 03:28:43 +02:00
|
|
|
import urllib.parse as parse
|
2018-05-28 07:46:14 +02:00
|
|
|
import base64
|
|
|
|
import csv
|
2015-08-30 03:28:43 +02:00
|
|
|
|
2018-05-28 07:46:14 +02:00
|
|
|
# this is used to remove null bytes from the input stream because
|
|
|
|
# apparently they exist
|
|
|
|
def replace_iter(iterable):
|
|
|
|
for value in iterable:
|
|
|
|
yield value.replace("\0", "")
|
2015-08-30 04:00:42 +02:00
|
|
|
|
2018-05-28 07:46:14 +02:00
|
|
|
# https://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size#1094933
|
|
|
|
def sizeof_fmt(num, suffix='B'):
|
|
|
|
for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']:
|
|
|
|
if abs(num) < 1024.0:
|
|
|
|
return "%3.1f %s%s" % (num, unit, suffix)
|
|
|
|
num /= 1024.0
|
|
|
|
return "%.1f %s%s" % (num, 'Yi', suffix)
|
2015-08-30 03:28:43 +02:00
|
|
|
|
|
|
|
def search(db, terms):
|
2018-05-28 07:46:14 +02:00
|
|
|
with open(db, 'r') as f:
|
|
|
|
results = []
|
|
|
|
reader = csv.reader(replace_iter(f), delimiter=';')
|
|
|
|
for row in reader:
|
|
|
|
# skip comments
|
|
|
|
if row[0][0] == '#':
|
|
|
|
continue
|
|
|
|
# 0 is date in rfc 3339 format
|
|
|
|
# 1 magnet link hash
|
|
|
|
# 2 is title
|
|
|
|
# 3 is size in bytes
|
|
|
|
if ' '.join(terms).lower() in row[2].lower():
|
|
|
|
result = {
|
|
|
|
'date': row[0],
|
|
|
|
'size': sizeof_fmt(int(row[3])),
|
|
|
|
'magnet':
|
|
|
|
'magnet:?xt=urn:btih:' +
|
|
|
|
base64.b16encode(base64.b64decode(row[1])).decode('utf-8') +
|
|
|
|
'&dn=' +
|
|
|
|
parse.quote(row[2]),
|
|
|
|
}
|
|
|
|
results.append(result)
|
|
|
|
# limit page size to not print walls of results
|
|
|
|
# TODO: consider pagination
|
|
|
|
results = results[:30]
|
|
|
|
return results
|