diff --git a/.gitignore b/.gitignore
index b292a69..0808890 100644
--- a/.gitignore
+++ b/.gitignore
@@ -2,6 +2,8 @@ src/magneticod/vendor
src/magneticod/Gopkg.lock
src/magneticow/vendor
src/magneticow/Gopkg.lock
+src/persistence/vendor
+src/persistence/Gopkg.lock
# Created by https://www.gitignore.io/api/linux,python,pycharm
diff --git a/.travis.yml b/.travis.yml
index ab96d01..fc167ea 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,15 +1,16 @@
sudo: enabled
dist: xenial
-language: python
-python:
- - "3.5"
-# versions 3.6.0 and 3.6.1 have bugs that affect magneticod
+language: go
+go:
+ - 1.8.3
before_install:
- "sudo apt-get update -qq"
- "sudo apt-get install python3-dev"
- "pip3 install mypy pylint"
+ - ""
+
install:
- "pip3 install ./magneticod"
- "pip3 install ./magneticow"
diff --git a/magneticow/magneticow/static/styles/homepage.css b/magneticow/magneticow/static/styles/homepage.css
deleted file mode 100644
index 384d922..0000000
--- a/magneticow/magneticow/static/styles/homepage.css
+++ /dev/null
@@ -1,27 +0,0 @@
-main {
- display: flex;
- align-items: center;
- align-content: center;
-
- height: calc(100vh - 2 * 3em);
- width: calc(100vw - 2 * 3em);
-}
-
-main form {
- max-width: 600px;
- width: 100%;
-
- margin-left: 0.5em;
-}
-
-main form input {
- width: 100%;
-}
-
-main > div {
- margin-right: 0.5em;
-}
-
-footer {
- margin-top: 0.833em;
-}
diff --git a/magneticow/magneticow/templates/homepage.html b/magneticow/magneticow/templates/homepage.html
deleted file mode 100644
index b6d19d8..0000000
--- a/magneticow/magneticow/templates/homepage.html
+++ /dev/null
@@ -1,23 +0,0 @@
-
-
-
-
- magneticow
-
-
-
-
-
-
-
- magneticow(pre-alpha)
-
-
-
-
-
-
diff --git a/magneticow/magneticow/templates/torrents.html b/magneticow/magneticow/templates/torrents.html
deleted file mode 100644
index 8dc543a..0000000
--- a/magneticow/magneticow/templates/torrents.html
+++ /dev/null
@@ -1,90 +0,0 @@
-
-
-
-
- {% if search %}"{{search}}"{% else %}Most recent torrents{% endif %} - magneticow
-
-
-
-
-
-
-
-
-
-
-
- |
-
- {% if sorted_by == "name ASC" %}
- Name ▲
- {% elif sorted_by == "name DESC" %}
- Name ▼
- {% else %}
- Name
- {% endif %}
- |
-
- {% if sorted_by == "total_size ASC" %}
- Size ▲
- {% elif sorted_by == "total_size DESC" %}
- Size ▼
- {% else %}
- Size
- {% endif %}
- |
-
- {% if sorted_by == "discovered_on ASC" %}
- Discovered on ▲
- {% elif sorted_by == "discovered_on DESC" %}
- Discovered on ▼
- {% else %}
- Discovered on
- {% endif %}
- |
-
-
-
- {% for torrent in torrents %}
-
-
- |
- {{ torrent.name }} |
- {{ torrent.size }} |
- {{ torrent.discovered_on }} |
-
- {% endfor %}
-
-
-
-
-
-
\ No newline at end of file
diff --git a/src/magneticod/bittorrent/operations.go b/src/magneticod/bittorrent/operations.go
index c9bcf78..845658c 100644
--- a/src/magneticod/bittorrent/operations.go
+++ b/src/magneticod/bittorrent/operations.go
@@ -8,6 +8,9 @@ import (
"github.com/anacrolix/torrent"
"github.com/anacrolix/torrent/metainfo"
"go.uber.org/zap"
+ "os"
+ "path"
+ "persistence"
)
@@ -40,48 +43,46 @@ func (ms *MetadataSink) awaitMetadata(infoHash metainfo.Hash, peer torrent.Peer)
return
}
- var files []metainfo.FileInfo
- if len(info.Files) == 0 {
- if strings.ContainsRune(info.Name, '/') {
- // A single file torrent cannot have any '/' characters in its name. We treat it as
- // illegal.
- zap.L().Sugar().Debugf("!!!! illegal character in name! \"%s\"", info.Name)
- return
- }
- files = []metainfo.FileInfo{{Length: info.Length, Path:[]string{info.Name}}}
- } else {
- // TODO: We have to make sure that anacrolix/torrent checks for '/' character in file paths
- // before concatenating them. This is currently assumed here. We should write a test for it.
- files = info.Files
+ var files []persistence.File
+ for _, file := range info.Files {
+ files = append(files, persistence.File{
+ Size: file.Length,
+ Path: file.DisplayPath(info),
+ })
}
var totalSize uint64
for _, file := range files {
- if file.Length < 0 {
+ if file.Size < 0 {
// All files' sizes must be greater than or equal to zero, otherwise treat them as
// illegal and ignore.
- zap.L().Sugar().Debugf("!!!! file size zero or less! \"%s\" (%d)", file.Path, file.Length)
+ zap.L().Sugar().Debugf("!!!! file size zero or less! \"%s\" (%d)", file.Path, file.Size)
return
}
- totalSize += uint64(file.Length)
+ totalSize += uint64(file.Size)
}
ms.flush(Metadata{
- InfoHash: infoHash[:],
- Name: info.Name,
- TotalSize: totalSize,
+ InfoHash: infoHash[:],
+ Name: info.Name,
+ TotalSize: totalSize,
DiscoveredOn: time.Now().Unix(),
- Files: files,
+ Files: files,
+ Peers: nil,
})
}
-func (fs *FileSink) awaitFile(infoHash []byte, filePath string, peer *torrent.Peer) {
+func (fs *FileSink) awaitFile(req *FileRequest) {
+ // Remove the download directory of the torrent after the operation is completed.
+ // TODO: what if RemoveAll() returns error, do we care, and if we do, how to handle it?
+ defer os.RemoveAll(path.Join(fs.baseDownloadDir, string(req.InfoHash)))
+
var infoHash_ [20]byte
- copy(infoHash_[:], infoHash)
+ copy(infoHash_[:], req.InfoHash)
t, isNew := fs.client.AddTorrentInfoHash(infoHash_)
- if peer != nil {
- t.AddPeers([]torrent.Peer{*peer})
+ if len(req.Peers) > 0 {
+ t.AddPeers(req.Peers)
}
if !isNew {
// Return immediately if we are trying to await on an ongoing file-downloading operation.
@@ -90,7 +91,7 @@ func (fs *FileSink) awaitFile(infoHash []byte, filePath string, peer *torrent.Pe
}
// Setup & start the timeout timer.
- timeout := time.After(fs.timeout)
+ timeout := time.After(fs.timeoutDuration)
// Once we return from this function, drop the torrent from the client.
// TODO: Check if dropping a torrent also cancels any outstanding read operations?
@@ -105,7 +106,7 @@ func (fs *FileSink) awaitFile(infoHash []byte, filePath string, peer *torrent.Pe
var match *torrent.File
for _, file := range t.Files() {
- if file.Path() == filePath {
+ if file.Path() == req.Path {
match = &file
} else {
file.Cancel()
@@ -117,13 +118,12 @@ func (fs *FileSink) awaitFile(infoHash []byte, filePath string, peer *torrent.Pe
zap.L().Warn(
"The leech (FileSink) has been requested to download a file which does not exist!",
- zap.ByteString("torrent", infoHash),
- zap.String("requestedFile", filePath),
+ zap.ByteString("torrent", req.InfoHash),
+ zap.String("requestedFile", req.Path),
zap.Strings("allFiles", filePaths),
)
}
-
reader := t.NewReader()
defer reader.Close()
@@ -133,18 +133,17 @@ func (fs *FileSink) awaitFile(infoHash []byte, filePath string, peer *torrent.Pe
select {
case fileData := <-fileDataChan:
if fileData != nil {
- fs.flush(File{
- torrentInfoHash: infoHash,
- path: match.Path(),
- data: fileData,
+ fs.flush(FileResult{
+ Request: req,
+ FileData: fileData,
})
}
case <- timeout:
zap.L().Debug(
"Timeout while downloading a file!",
- zap.ByteString("torrent", infoHash),
- zap.String("file", filePath),
+ zap.ByteString("torrent", req.InfoHash),
+ zap.String("file", req.Path),
)
}
}
@@ -156,9 +155,10 @@ func downloadFile(file torrent.File, reader *torrent.Reader, fileDataChan chan<-
fileData := make([]byte, file.Length())
n, err := readSeeker.Read(fileData)
if int64(n) != file.Length() {
+ infoHash := file.Torrent().InfoHash()
zap.L().Debug(
"Not all of a file could be read!",
- zap.ByteString("torrent", file.Torrent().InfoHash()[:]),
+ zap.ByteString("torrent", infoHash[:]),
zap.String("file", file.Path()),
zap.Int64("fileLength", file.Length()),
zap.Int("n", n),
@@ -167,10 +167,11 @@ func downloadFile(file torrent.File, reader *torrent.Reader, fileDataChan chan<-
return
}
if err != nil {
+ infoHash := file.Torrent().InfoHash()
zap.L().Debug(
"Error while downloading a file!",
zap.Error(err),
- zap.ByteString("torrent", file.Torrent().InfoHash()[:]),
+ zap.ByteString("torrent", infoHash[:]),
zap.String("file", file.Path()),
zap.Int64("fileLength", file.Length()),
zap.Int("n", n),
diff --git a/src/magneticod/bittorrent/sinkFile.go b/src/magneticod/bittorrent/sinkFile.go
index 2592fea..5d75c0c 100644
--- a/src/magneticod/bittorrent/sinkFile.go
+++ b/src/magneticod/bittorrent/sinkFile.go
@@ -12,28 +12,33 @@ import (
"go.uber.org/zap"
)
-
-type File struct{
- torrentInfoHash []byte
- path string
- data []byte
+type FileRequest struct {
+ InfoHash []byte
+ Path string
+ Peers []torrent.Peer
}
+type FileResult struct {
+ // Request field is the original Request
+ Request *FileRequest
+ FileData []byte
+}
type FileSink struct {
+ baseDownloadDir string
client *torrent.Client
- drain chan File
+ drain chan FileResult
terminated bool
termination chan interface{}
- timeout time.Duration
+ timeoutDuration time.Duration
}
// NewFileSink creates a new FileSink.
//
// cAddr : client address
// mlAddr: mainline DHT node address
-func NewFileSink(cAddr, mlAddr string, timeout time.Duration) *FileSink {
+func NewFileSink(cAddr, mlAddr string, timeoutDuration time.Duration) *FileSink {
fs := new(FileSink)
mlUDPAddr, err := net.ResolveUDPAddr("udp", mlAddr)
@@ -49,6 +54,11 @@ func NewFileSink(cAddr, mlAddr string, timeout time.Duration) *FileSink {
return nil
}
+ fs.baseDownloadDir = path.Join(
+ appdirs.UserCacheDir("magneticod", "", "", true),
+ "downloads",
+ )
+
fs.client, err = torrent.NewClient(&torrent.Config{
ListenAddr: cAddr,
DisableTrackers: true,
@@ -57,10 +67,7 @@ func NewFileSink(cAddr, mlAddr string, timeout time.Duration) *FileSink {
Passive: true,
NoSecurity: true,
},
- DefaultStorage: storage.NewFileByInfoHash(path.Join(
- appdirs.UserCacheDir("magneticod", "", "", true),
- "downloads",
- )),
+ DefaultStorage: storage.NewFileByInfoHash(fs.baseDownloadDir),
})
if err != nil {
zap.L().Fatal("Leech could NOT create a new torrent client!", zap.Error(err))
@@ -68,21 +75,26 @@ func NewFileSink(cAddr, mlAddr string, timeout time.Duration) *FileSink {
return nil
}
- fs.drain = make(chan File)
+ fs.drain = make(chan FileResult)
fs.termination = make(chan interface{})
- fs.timeout = timeout
+ fs.timeoutDuration = timeoutDuration
return fs
}
-
-// peer might be nil
-func (fs *FileSink) Sink(infoHash []byte, filePath string, peer *torrent.Peer) {
- go fs.awaitFile(infoHash, filePath, peer)
+// peer field is optional and might be nil.
+func (fs *FileSink) Sink(infoHash []byte, path string, peers []torrent.Peer) {
+ if fs.terminated {
+ zap.L().Panic("Trying to Sink() an already closed FileSink!")
+ }
+ go fs.awaitFile(&FileRequest{
+ InfoHash: infoHash,
+ Path: path,
+ Peers: peers,
+ })
}
-
-func (fs *FileSink) Drain() <-chan File {
+func (fs *FileSink) Drain() <-chan FileResult {
if fs.terminated {
zap.L().Panic("Trying to Drain() an already closed FileSink!")
}
@@ -98,7 +110,7 @@ func (fs *FileSink) Terminate() {
}
-func (fs *FileSink) flush(result File) {
+func (fs *FileSink) flush(result FileResult) {
if !fs.terminated {
fs.drain <- result
}
diff --git a/src/magneticod/bittorrent/sinkMetadata.go b/src/magneticod/bittorrent/sinkMetadata.go
index a01c9fe..a5761d9 100644
--- a/src/magneticod/bittorrent/sinkMetadata.go
+++ b/src/magneticod/bittorrent/sinkMetadata.go
@@ -1,13 +1,11 @@
package bittorrent
import (
- "net"
-
"go.uber.org/zap"
"github.com/anacrolix/torrent"
- "github.com/anacrolix/torrent/metainfo"
"magneticod/dht/mainline"
+ "persistence"
)
@@ -19,7 +17,13 @@ type Metadata struct {
TotalSize uint64
DiscoveredOn int64
// Files must be populated for both single-file and multi-file torrents!
- Files []metainfo.FileInfo
+ Files []persistence.File
+ // Peers is the list of the "active" peers at the time of fetching metadata. Currently, it's
+ // always nil as anacrolix/torrent does not support returning list of peers for a given torrent,
+ // but in the future, this information can be useful for the CompletingCoordinator which can use
+ // those Peers to download the README file (if any found).
+ Peers []torrent.Peer
+
}
@@ -31,11 +35,11 @@ type MetadataSink struct {
}
-func NewMetadataSink(laddr net.TCPAddr) *MetadataSink {
+func NewMetadataSink(laddr string) *MetadataSink {
ms := new(MetadataSink)
var err error
ms.client, err = torrent.NewClient(&torrent.Config{
- ListenAddr: laddr.String(),
+ ListenAddr: laddr,
DisableTrackers: true,
DisablePEX: true,
// TODO: Should we disable DHT to force the client to use the peers we supplied only, or not?
diff --git a/src/magneticod/coordinators.go b/src/magneticod/coordinators.go
new file mode 100644
index 0000000..e4e59a9
--- /dev/null
+++ b/src/magneticod/coordinators.go
@@ -0,0 +1,111 @@
+package main
+
+import (
+ "regexp"
+ "sync"
+ "time"
+
+ "github.com/anacrolix/torrent"
+
+ "persistence"
+
+ "magneticod/bittorrent"
+
+)
+
+type completionRequest struct {
+ infoHash []byte
+ path string
+ peers []torrent.Peer
+ time time.Time
+}
+
+type completionResult struct {
+ InfoHash []byte
+ Path string
+ Data []byte
+}
+
+type CompletingCoordinator struct {
+ database persistence.Database
+ maxReadmeSize uint
+ sink *bittorrent.FileSink
+ queue chan completionRequest
+ queueMutex sync.Mutex
+ outputChan chan completionResult
+ readmeRegex *regexp.Regexp
+ terminated bool
+ termination chan interface{}
+}
+
+type CompletingCoordinatorOpFlags struct {
+ LeechClAddr string
+ LeechMlAddr string
+ LeechTimeout time.Duration
+ ReadmeMaxSize uint
+ ReadmeRegex *regexp.Regexp
+}
+
+func NewCompletingCoordinator(database persistence.Database, opFlags CompletingCoordinatorOpFlags) (cc *CompletingCoordinator) {
+ cc = new(CompletingCoordinator)
+ cc.database = database
+ cc.maxReadmeSize = opFlags.ReadmeMaxSize
+ cc.sink = bittorrent.NewFileSink(opFlags.LeechClAddr, opFlags.LeechMlAddr, opFlags.LeechTimeout)
+ cc.queue = make(chan completionRequest, 100)
+ cc.readmeRegex = opFlags.ReadmeRegex
+ cc.termination = make(chan interface{})
+ return
+}
+
+func (cc *CompletingCoordinator) Request(infoHash []byte, path string, peers []torrent.Peer) {
+ cc.queueMutex.Lock()
+ defer cc.queueMutex.Unlock()
+
+ // If queue is full discard the oldest request as it is more likely to be outdated.
+ if len(cc.queue) == cap(cc.queue) {
+ <- cc.queue
+ }
+
+ // Imagine, if this function [Request()] was called by another goroutine right when we were
+ // here: the moment where we removed the oldest entry in the queue to free a single space for
+ // the newest one. Imagine, now, that the second Request() call manages to add its own entry
+ // to the queue, making the current goroutine wait until the cc.queue channel is available.
+ //
+ // Hence to prevent that we use cc.queueMutex
+
+ cc.queue <- completionRequest{
+ infoHash: infoHash,
+ path: path,
+ peers: peers,
+ time: time.Now(),
+ }
+}
+
+func (cc *CompletingCoordinator) Start() {
+ go cc.complete()
+}
+
+func (cc *CompletingCoordinator) Output() <-chan completionResult {
+ return cc.outputChan
+}
+
+func (cc *CompletingCoordinator) complete() {
+ for {
+ select {
+ case request := <-cc.queue:
+ // Discard requests older than 2 minutes.
+ // TODO: Instead of settling on 2 minutes as an arbitrary value, do some research to
+ // learn average peer lifetime in the BitTorrent network.
+ if time.Now().Sub(request.time) > 2 * time.Minute {
+ continue
+ }
+ cc.sink.Sink(request.infoHash, request.path, request.peers)
+
+ case <-cc.termination:
+ break
+
+ default:
+ cc.database.FindAnIncompleteTorrent(cc.readmeRegex, cc.maxReadmeSize)
+ }
+ }
+}
diff --git a/src/magneticod/dht/mainline/bloomFilter.go b/src/magneticod/dht/mainline/bloomFilter.go
new file mode 100644
index 0000000..29fa90c
--- /dev/null
+++ b/src/magneticod/dht/mainline/bloomFilter.go
@@ -0,0 +1,59 @@
+package mainline
+
+import (
+ "net"
+ "go.uber.org/zap"
+ "crypto/sha1"
+ "math/bits"
+ "math"
+)
+
+const (
+ k uint32 = 2
+ m uint32 = 256 * 8
+)
+
+type BloomFilter struct {
+ filter [m/8]byte
+}
+
+func (bf *BloomFilter) InsertIP(ip net.IP) {
+ if !(len(ip) == net.IPv4len || len(ip) == net.IPv6len) {
+ zap.S().Panicf("Attempted to insert an invalid IP to the bloom filter! %d", len(ip))
+ }
+
+ hash := sha1.Sum(ip)
+
+ var index1, index2 uint32
+ index1 = uint32(hash[0]) | uint32(hash[1]) << 8
+ index2 = uint32(hash[2]) | uint32(hash[3]) << 8
+
+ // truncate index to m (11 bits required)
+ index1 %= m
+ index2 %= m
+
+ // set bits at index1 and index2
+ bf.filter[index1 / 8] |= 0x01 << (index1 % 8)
+ bf.filter[index2 / 8] |= 0x01 << (index2 % 8)
+}
+
+func (bf *BloomFilter) Estimate() float64 {
+ // TODO: make it faster?
+ var nZeroes uint32 = 0
+ for _, b := range bf.filter {
+ nZeroes += 8 - uint32(bits.OnesCount8(uint8(b)))
+ }
+
+ var c uint32
+ if m - 1 < nZeroes {
+ c = m - 1
+ } else {
+ c = nZeroes
+ }
+ return math.Log(float64(c) / float64(m)) / (float64(k) * math.Log(1 - 1/float64(m)))
+}
+
+func (bf *BloomFilter) Filter() (filterCopy [m/8]byte) {
+ copy(filterCopy[:], bf.filter[:])
+ return filterCopy
+}
diff --git a/src/magneticod/dht/mainline/bloomFilter_test.go b/src/magneticod/dht/mainline/bloomFilter_test.go
new file mode 100644
index 0000000..6df9fd6
--- /dev/null
+++ b/src/magneticod/dht/mainline/bloomFilter_test.go
@@ -0,0 +1,64 @@
+package mainline
+
+import (
+ "bytes"
+ "testing"
+ "encoding/hex"
+ "strings"
+ "fmt"
+)
+
+func TestBEP33Filter(t *testing.T) {
+ bf := new(BloomFilter)
+ populateForBEP33(bf)
+
+ resultingFilter := bf.Filter()
+ var expectedFilter [256]byte
+ hex.Decode(expectedFilter[:], []byte(strings.Replace(
+ "F6C3F5EA A07FFD91 BDE89F77 7F26FB2B FF37BDB8 FB2BBAA2 FD3DDDE7 BACFFF75 EE7CCBAE" +
+ "FE5EEDB1 FBFAFF67 F6ABFF5E 43DDBCA3 FD9B9FFD F4FFD3E9 DFF12D1B DF59DB53 DBE9FA5B" +
+ "7FF3B8FD FCDE1AFB 8BEDD7BE 2F3EE71E BBBFE93B CDEEFE14 8246C2BC 5DBFF7E7 EFDCF24F" +
+ "D8DC7ADF FD8FFFDF DDFFF7A4 BBEEDF5C B95CE81F C7FCFF1F F4FFFFDF E5F7FDCB B7FD79B3" +
+ "FA1FC77B FE07FFF9 05B7B7FF C7FEFEFF E0B8370B B0CD3F5B 7F2BD93F EB4386CF DD6F7FD5" +
+ "BFAF2E9E BFFFFEEC D67ADBF7 C67F17EF D5D75EBA 6FFEBA7F FF47A91E B1BFBB53 E8ABFB57" +
+ "62ABE8FF 237279BF EFBFEEF5 FFC5FEBF DFE5ADFF ADFEE1FB 737FFFFB FD9F6AEF FEEE76B6" +
+ "FD8F72EF",
+ " ", "", -1)))
+ if !bytes.Equal(resultingFilter[:], expectedFilter[:]) {
+ t.Fail()
+ }
+}
+
+func TestBEP33Estimation(t *testing.T) {
+ bf := new(BloomFilter)
+ populateForBEP33(bf)
+
+ // Because Go lacks a truncate function for floats...
+ if fmt.Sprintf("%.5f", bf.Estimate())[:9] != "1224.9308" {
+ t.Errorf("Expected 1224.9308 got %f instead!", bf.Estimate())
+ }
+}
+
+func populateForBEP33(bf *BloomFilter) {
+ // 192.0.2.0 to 192.0.2.255 (both ranges inclusive)
+ addr := []byte{192, 0, 2, 0}
+ for i := 0; i <= 255; i++ {
+ addr[3] = uint8(i)
+ bf.InsertIP(addr)
+ }
+
+ // 2001:DB8:: to 2001:DB8::3E7 (both ranges inclusive)
+ addr = []byte{32, 1, 13, 184, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ for i := 0; i <= 2; i++ {
+ addr[14] = uint8(i)
+ for e := 0; e <= 255; e++ {
+ addr[15] = uint8(e)
+ bf.InsertIP(addr)
+ }
+ }
+ addr[14] = 3
+ for e := 0; e <= 231; e++ {
+ addr[15] = uint8(e)
+ bf.InsertIP(addr)
+ }
+}
diff --git a/src/magneticod/dht/mainline/codec.go b/src/magneticod/dht/mainline/codec.go
index ee66ed6..bd559c5 100644
--- a/src/magneticod/dht/mainline/codec.go
+++ b/src/magneticod/dht/mainline/codec.go
@@ -14,6 +14,7 @@ import (
"github.com/anacrolix/torrent/bencode"
"github.com/anacrolix/missinggo/iter"
"regexp"
+ "github.com/willf/bloom"
)
@@ -46,6 +47,21 @@ type QueryArguments struct {
Port int `bencode:"port,omitempty"`
// Use senders apparent DHT port
ImpliedPort int `bencode:"implied_port,omitempty"`
+
+ // Indicates whether the querying node is seeding the torrent it announces.
+ // Defined in BEP 33 "DHT Scrapes" for `announce_peer` queries.
+ Seed int `bencode:"seed,omitempty"`
+
+ // If 1, then the responding node should try to fill the `values` list with non-seed items on a
+ // best-effort basis."
+ // Defined in BEP 33 "DHT Scrapes" for `get_peers` queries.
+ NoSeed int `bencode:"noseed,omitempty"`
+ // If 1, then the responding node should add two fields to the "r" dictionary in the response:
+ // - `BFsd`: Bloom Filter (256 bytes) representing all stored seeds for that infohash
+ // - `BFpe`: Bloom Filter (256 bytes) representing all stored peers (leeches) for that
+ // infohash
+ // Defined in BEP 33 "DHT Scrapes" for `get_peers` queries.
+ Scrape int `bencode:"noseed,omitempty"`
}
@@ -58,6 +74,15 @@ type ResponseValues struct {
Token []byte `bencode:"token,omitempty"`
// Torrent peers
Values []CompactPeer `bencode:"values,omitempty"`
+
+ // If `scrape` is set to 1 in the `get_peers` query then the responding node should add the
+ // below two fields to the "r" dictionary in the response:
+ // Defined in BEP 33 "DHT Scrapes" for responses to `get_peers` queries.
+ // Bloom Filter (256 bytes) representing all stored seeds for that infohash:
+ BFsd *bloom.BloomFilter `bencode:"BFsd,omitempty"`
+ // Bloom Filter (256 bytes) representing all stored peers (leeches) for that infohash:
+ BFpe *bloom.BloomFilter `bencode:"BFpe,omitempty"`
+ // TODO: write marshallers for those fields above ^^
}
@@ -204,7 +229,7 @@ func (cni *CompactNodeInfo) UnmarshalBinary(b []byte) error {
func (cnis CompactNodeInfos) MarshalBencode() ([]byte, error) {
- ret := make([]byte, 0) // TODO: this doesn't look idiomatic at all, is this the right way?
+ var ret []byte
for _, cni := range cnis {
ret = append(ret, cni.MarshalBinary()...)
diff --git a/src/magneticod/dht/mainline/protocol.go b/src/magneticod/dht/mainline/protocol.go
index 42249ca..c2dd74a 100644
--- a/src/magneticod/dht/mainline/protocol.go
+++ b/src/magneticod/dht/mainline/protocol.go
@@ -31,7 +31,7 @@ type ProtocolEventHandlers struct {
}
-func NewProtocol(laddr net.UDPAddr, eventHandlers ProtocolEventHandlers) (p *Protocol) {
+func NewProtocol(laddr string, eventHandlers ProtocolEventHandlers) (p *Protocol) {
p = new(Protocol)
p.transport = NewTransport(laddr, p.onMessage)
p.eventHandlers = eventHandlers
diff --git a/src/magneticod/dht/mainline/service.go b/src/magneticod/dht/mainline/service.go
index 305371b..c01e5c1 100644
--- a/src/magneticod/dht/mainline/service.go
+++ b/src/magneticod/dht/mainline/service.go
@@ -38,7 +38,7 @@ type TrawlingServiceEventHandlers struct {
}
-func NewTrawlingService(laddr net.UDPAddr, eventHandlers TrawlingServiceEventHandlers) *TrawlingService {
+func NewTrawlingService(laddr string, eventHandlers TrawlingServiceEventHandlers) *TrawlingService {
service := new(TrawlingService)
service.protocol = NewProtocol(
laddr,
diff --git a/src/magneticod/dht/mainline/transport.go b/src/magneticod/dht/mainline/transport.go
index 064f52b..eaa2f4b 100644
--- a/src/magneticod/dht/mainline/transport.go
+++ b/src/magneticod/dht/mainline/transport.go
@@ -10,8 +10,8 @@ import (
type Transport struct {
- conn *net.UDPConn
- laddr net.UDPAddr
+ conn *net.UDPConn
+ laddr *net.UDPAddr
started bool
// OnMessage is the function that will be called when Transport receives a packet that is
@@ -21,10 +21,13 @@ type Transport struct {
}
-func NewTransport(laddr net.UDPAddr, onMessage func(*Message, net.Addr)) (*Transport) {
+func NewTransport(laddr string, onMessage func(*Message, net.Addr)) (*Transport) {
transport := new(Transport)
transport.onMessage = onMessage
- transport.laddr = laddr
+ var err error; transport.laddr, err = net.ResolveUDPAddr("udp", laddr)
+ if err != nil {
+ zap.L().Panic("Could not resolve the UDP address for the trawler!", zap.Error(err))
+ }
return transport
}
@@ -45,7 +48,7 @@ func (t *Transport) Start() {
t.started = true
var err error
- t.conn, err = net.ListenUDP("udp", &t.laddr)
+ t.conn, err = net.ListenUDP("udp", t.laddr)
if err != nil {
zap.L().Fatal("Could NOT create a UDP socket!", zap.Error(err))
}
diff --git a/src/magneticod/dht/managers.go b/src/magneticod/dht/managers.go
index 4ffabf6..1dbbba6 100644
--- a/src/magneticod/dht/managers.go
+++ b/src/magneticod/dht/managers.go
@@ -1,10 +1,6 @@
package dht
-import (
- "magneticod/dht/mainline"
- "net"
- "github.com/bradfitz/iter"
-)
+import "magneticod/dht/mainline"
type TrawlingManager struct {
@@ -14,29 +10,20 @@ type TrawlingManager struct {
}
-func NewTrawlingManager(mlAddrs []net.UDPAddr) *TrawlingManager {
+func NewTrawlingManager(mlAddrs []string) *TrawlingManager {
manager := new(TrawlingManager)
manager.output = make(chan mainline.TrawlingResult)
- if mlAddrs != nil {
- for _, addr := range mlAddrs {
- manager.services = append(manager.services, mainline.NewTrawlingService(
- addr,
- mainline.TrawlingServiceEventHandlers{
- OnResult: manager.onResult,
- },
- ))
- }
- } else {
- addr := net.UDPAddr{IP: []byte("\x00\x00\x00\x00"), Port: 0}
- for range iter.N(1) {
- manager.services = append(manager.services, mainline.NewTrawlingService(
- addr,
- mainline.TrawlingServiceEventHandlers{
- OnResult: manager.onResult,
- },
- ))
- }
+ if mlAddrs == nil {
+ mlAddrs = []string{"0.0.0.0:0"}
+ }
+ for _, addr := range mlAddrs {
+ manager.services = append(manager.services, mainline.NewTrawlingService(
+ addr,
+ mainline.TrawlingServiceEventHandlers{
+ OnResult: manager.onResult,
+ },
+ ))
}
for _, service := range manager.services {
diff --git a/src/magneticod/main.go b/src/magneticod/main.go
index c7ab3f3..1d8885f 100644
--- a/src/magneticod/main.go
+++ b/src/magneticod/main.go
@@ -1,20 +1,23 @@
package main
import (
+ "fmt"
"net"
"os"
"os/signal"
"regexp"
+ "time"
"github.com/jessevdk/go-flags"
"github.com/pkg/profile"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
+ "persistence"
+
"magneticod/bittorrent"
"magneticod/dht"
- "fmt"
- "time"
+ "github.com/anacrolix/torrent/metainfo"
)
type cmdFlags struct {
@@ -35,14 +38,13 @@ type cmdFlags struct {
LeechMlAddr string `long:"leech-ml-addr" descrition:"Address to be used by the mainline DHT node for fetching README files." default:"0.0.0.0:0"`
LeechTimeout uint `long:"leech-timeout" description:"Number of integer seconds to pass before a leech timeouts." default:"300"`
ReadmeMaxSize uint `long:"readme-max-size" description:"Maximum size -which must be greater than zero- of a description file in bytes." default:"20480"`
- ReadmeRegexes []string `long:"readme-regex" description:"Regular expression(s) which will be tested against the name of the README files, in the supplied order."`
+ ReadmeRegex string `long:"readme-regex" description:"Regular expression(s) which will be tested against the name of the README files, in the supplied order."`
Verbose []bool `short:"v" long:"verbose" description:"Increases verbosity."`
Profile string `long:"profile" description:"Enable profiling." default:""`
- // ==== Deprecated Flags ====
- // TODO: don't even support deprecated flags!
+ // ==== OLD Flags ====
// DatabaseFile is akin to Database flag, except that it was used when SQLite was the only
// persistence backend ever conceived, so it's the path* to the database file, which was -by
@@ -53,7 +55,6 @@ type cmdFlags struct {
// On BSDs? : TODO?
// On anywhere else: TODO?
// TODO: Is the path* absolute or can be relative as well?
- // DatabaseFile string
}
const (
@@ -70,19 +71,17 @@ type opFlags struct {
TrawlerMlAddrs []string
TrawlerMlInterval time.Duration
- // TODO: is this even supported by anacrolix/torrent?
FetcherAddr string
FetcherTimeout time.Duration
StatistMlAddrs []string
StatistMlTimeout time.Duration
- // TODO: is this even supported by anacrolix/torrent?
LeechClAddr string
LeechMlAddr string
LeechTimeout time.Duration
ReadmeMaxSize uint
- ReadmeRegexes []*regexp.Regexp
+ ReadmeRegex *regexp.Regexp
Verbosity int
@@ -90,12 +89,12 @@ type opFlags struct {
}
func main() {
- atom := zap.NewAtomicLevel()
+ loggerLevel := zap.NewAtomicLevel()
// Logging levels: ("debug", "info", "warn", "error", "dpanic", "panic", and "fatal").
logger := zap.New(zapcore.NewCore(
zapcore.NewConsoleEncoder(zap.NewDevelopmentEncoderConfig()),
zapcore.Lock(os.Stderr),
- atom,
+ loggerLevel,
))
defer logger.Sync()
zap.ReplaceGlobals(logger)
@@ -111,81 +110,70 @@ func main() {
switch opFlags.Verbosity {
case 0:
- atom.SetLevel(zap.WarnLevel)
+ loggerLevel.SetLevel(zap.WarnLevel)
case 1:
- atom.SetLevel(zap.InfoLevel)
- // Default: i.e. in case of 2 or more.
+ loggerLevel.SetLevel(zap.InfoLevel)
+ // Default: i.e. in case of 2 or more.
default:
- atom.SetLevel(zap.DebugLevel)
+ loggerLevel.SetLevel(zap.DebugLevel)
}
zap.ReplaceGlobals(logger)
- /*
- updating_manager := nil
- statistics_sink := nil
- completing_manager := nil
- file_sink := nil
- */
// Handle Ctrl-C gracefully.
- interrupt_chan := make(chan os.Signal)
- signal.Notify(interrupt_chan, os.Interrupt)
+ interruptChan := make(chan os.Signal)
+ signal.Notify(interruptChan, os.Interrupt)
- database, err := NewDatabase(opFlags.Database)
+ database, err := persistence.MakeDatabase(opFlags.DatabaseURL)
if err != nil {
- logger.Sugar().Fatalf("Could not open the database at `%s`: %s", opFlags.Database, err.Error())
+ logger.Sugar().Fatalf("Could not open the database at `%s`: %s", opFlags.DatabaseURL, err.Error())
}
- trawlingManager := dht.NewTrawlingManager(opFlags.MlTrawlerAddrs)
- metadataSink := bittorrent.NewMetadataSink(opFlags.FetcherAddr)
- fileSink := bittorrent.NewFileSink()
-
- go func() {
- for {
- select {
- case result := <-trawlingManager.Output():
- logger.Debug("result: ", zap.String("hash", result.InfoHash.String()))
- if !database.DoesExist(result.InfoHash[:]) {
- metadataSink.Sink(result)
- }
-
- case metadata := <-metadataSink.Drain():
- logger.Sugar().Infof("D I S C O V E R E D: `%s` %x",
- metadata.Name, metadata.InfoHash)
- if err := database.AddNewTorrent(metadata); err != nil {
- logger.Sugar().Fatalf("Could not add new torrent %x to the database: %s",
- metadata.InfoHash, err.Error())
- }
-
- case <-interrupt_chan:
- trawlingManager.Terminate()
- break
- }
- }
- }()
-
- go func() {
-
- }()
-
- go func() {
-
- }()
-
+ trawlingManager := dht.NewTrawlingManager(opFlags.TrawlerMlAddrs)
+ metadataSink := bittorrent.NewMetadataSink(opFlags.FetcherAddr)
+ completingCoordinator := NewCompletingCoordinator(database, CompletingCoordinatorOpFlags{
+ LeechClAddr: opFlags.LeechClAddr,
+ LeechMlAddr: opFlags.LeechMlAddr,
+ LeechTimeout: opFlags.LeechTimeout,
+ ReadmeMaxSize: opFlags.ReadmeMaxSize,
+ ReadmeRegex: opFlags.ReadmeRegex,
+ })
/*
- for {
- select {
+ refreshingCoordinator := NewRefreshingCoordinator(database, RefreshingCoordinatorOpFlags{
- case updating_manager.Output():
-
- case statistics_sink.Sink():
-
- case completing_manager.Output():
-
- case file_sink.Sink():
+ })
*/
- <-interrupt_chan
+ for {
+ select {
+ case result := <-trawlingManager.Output():
+ logger.Debug("result: ", zap.String("hash", result.InfoHash.String()))
+ exists, err := database.DoesTorrentExist(result.InfoHash[:])
+ if err != nil {
+ zap.L().Fatal("Could not check whether torrent exists!", zap.Error(err))
+ } else if !exists {
+ metadataSink.Sink(result)
+ }
+
+ case metadata := <-metadataSink.Drain():
+ if err := database.AddNewTorrent(metadata.InfoHash, metadata.Name, metadata.Files); err != nil {
+ logger.Sugar().Fatalf("Could not add new torrent %x to the database: %s",
+ metadata.InfoHash, err.Error())
+ }
+ logger.Sugar().Infof("D I S C O V E R E D: `%s` %x", metadata.Name, metadata.InfoHash)
+
+ if readmePath := findReadme(opFlags.ReadmeRegex, metadata.Files); readmePath != nil {
+ completingCoordinator.Request(metadata.InfoHash, *readmePath, metadata.Peers)
+ }
+
+ case result := <-completingCoordinator.Output():
+ database.AddReadme(result.InfoHash, result.Path, result.Data)
+
+ case <-interruptChan:
+ trawlingManager.Terminate()
+ break
+ }
+ }
}
func parseFlags() (opF opFlags) {
@@ -237,13 +225,13 @@ func parseFlags() (opF opFlags) {
}
if err = checkAddrs([]string{cmdF.LeechClAddr}); err != nil {
- zap.S().Fatal("Of argument `leech-cl-addr` %s", err.Error())
+ zap.S().Fatalf("Of argument `leech-cl-addr` %s", err.Error())
} else {
opF.LeechClAddr = cmdF.LeechClAddr
}
if err = checkAddrs([]string{cmdF.LeechMlAddr}); err != nil {
- zap.S().Fatal("Of argument `leech-ml-addr` %s", err.Error())
+ zap.S().Fatalf("Of argument `leech-ml-addr` %s", err.Error())
} else {
opF.LeechMlAddr = cmdF.LeechMlAddr
}
@@ -260,13 +248,9 @@ func parseFlags() (opF opFlags) {
opF.ReadmeMaxSize = cmdF.ReadmeMaxSize
}
- for i, s := range cmdF.ReadmeRegexes {
- regex, err := regexp.Compile(s)
- if err != nil {
- zap.S().Fatalf("Of argument `readme-regex` with %d(th) regex `%s`: %s", i + 1, s, err.Error())
- } else {
- opF.ReadmeRegexes = append(opF.ReadmeRegexes, regex)
- }
+ opF.ReadmeRegex, err = regexp.Compile(cmdF.ReadmeRegex)
+ if err != nil {
+ zap.S().Fatalf("Argument `readme-regex` is not a valid regex: %s", err.Error())
}
opF.Verbosity = len(cmdF.Verbose)
@@ -286,3 +270,15 @@ func checkAddrs(addrs []string) error {
}
return nil
}
+
+// findReadme looks for a possible Readme file whose path is matched by the pathRegex.
+// If there are multiple matches, the first one is returned.
+// If there are no matches, nil returned.
+func findReadme(pathRegex *regexp.Regexp, files []persistence.File) *string {
+ for _, file := range files {
+ if pathRegex.MatchString(file.Path) {
+ return &file.Path
+ }
+ }
+ return nil
+}
diff --git a/src/magneticod/persistence.go b/src/magneticod/persistence/database.go
similarity index 69%
rename from src/magneticod/persistence.go
rename to src/magneticod/persistence/database.go
index 1a938fa..ff866b1 100644
--- a/src/magneticod/persistence.go
+++ b/src/magneticod/persistence/database.go
@@ -1,4 +1,4 @@
-package main
+package persistence
import (
"bytes"
@@ -13,6 +13,7 @@ import (
"go.uber.org/zap"
"magneticod/bittorrent"
+ "regexp"
)
type engineType uint8
@@ -78,7 +79,7 @@ func NewDatabase(rawurl string) (*Database, error) {
}
-func (db *Database) DoesExist(infoHash []byte) bool {
+func (db *Database) DoesTorrentExist(infoHash []byte) bool {
for _, torrent := range db.newTorrents {
if bytes.Equal(infoHash, torrent.InfoHash) {
return true;
@@ -96,6 +97,30 @@ func (db *Database) DoesExist(infoHash []byte) bool {
return rows.Next()
}
+func (db *Database) FindAnIncompleteTorrent(pathRegex *regexp.Regexp, maxSize uint) error {
+ switch db.engine {
+ case SQLITE:
+ return db.findAnIncompleteTorrent_SQLite(pathRegex, maxSize)
+
+ default:
+ zap.L().Fatal("Unknown database engine!", zap.Uint8("engine", uint8(db.engine)))
+ return nil
+ }
+}
+
+func (db *Database) findAnIncompleteTorrent_SQLite(pathRegex *regexp.Regexp, maxSize uint) error {
+ // TODO: Prefer torrents with most seeders & leechs (i.e. most popular)
+ _, err := db.database.Query(`
+ SELECT torrents.info_hash, files.path FROM files WHERE files.path REGEXP ?
+ INNER JOIN torrents ON files.torrent_id = torrents.id LIMIT 1;
+ `)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
// AddNewTorrent adds a new torrent to the *queue* to be flushed to the persistent database.
func (db *Database) AddNewTorrent(torrent bittorrent.Metadata) error {
@@ -107,7 +132,7 @@ func (db *Database) AddNewTorrent(torrent bittorrent.Metadata) error {
// that it doesn't exists there, add it to the sink.
// Hence check for the last time whether the torrent exists in the database, and only if not,
// add it.
- if db.DoesExist(torrent.InfoHash) {
+ if db.DoesTorrentExist(torrent.InfoHash) {
return nil;
}
@@ -125,8 +150,14 @@ func (db *Database) AddNewTorrent(torrent bittorrent.Metadata) error {
}
+func (db *Database) AddReadme(infoHash []byte, path string, data []byte) error {
+ // TODO
+ return nil
+}
+
+
func (db *Database) commitNewTorrents() error {
- tx, err := db.database.Begin()
+ tx, err := db.database.Begin()
if err != nil {
return fmt.Errorf("sql.DB.Begin()! %s", err.Error())
}
@@ -216,11 +247,11 @@ func setupSqliteDatabase(database *sql.DB) error {
//
// Enable foreign key constraints in SQLite which are crucial to prevent programmer errors on
// our side.
- _, err := database.Exec(
- `PRAGMA journal_mode=WAL;
+ _, err := database.Exec(`
+ PRAGMA journal_mode=WAL;
PRAGMA temp_store=1;
- PRAGMA foreign_keys=ON;`,
- )
+ PRAGMA foreign_keys=ON;
+ `)
if err != nil {
return err
}
@@ -231,33 +262,28 @@ func setupSqliteDatabase(database *sql.DB) error {
}
// Essential, and valid for all user_version`s:
- _, err = tx.Exec(
- `CREATE TABLE IF NOT EXISTS torrents (
+ // TODO: "torrent_id" column of the "files" table can be NULL, how can we fix this in a new schema?
+ _, err = tx.Exec(`
+ CREATE TABLE IF NOT EXISTS torrents (
id INTEGER PRIMARY KEY,
info_hash BLOB NOT NULL UNIQUE,
name TEXT NOT NULL,
total_size INTEGER NOT NULL CHECK(total_size > 0),
discovered_on INTEGER NOT NULL CHECK(discovered_on > 0)
);
-
- CREATE INDEX IF NOT EXISTS info_hash_index ON torrents (info_hash);
-
CREATE TABLE IF NOT EXISTS files (
id INTEGER PRIMARY KEY,
torrent_id INTEGER REFERENCES torrents ON DELETE CASCADE ON UPDATE RESTRICT,
size INTEGER NOT NULL,
path TEXT NOT NULL
);
- `,
- )
+ `)
if err != nil {
return err
}
// Get the user_version:
- res, err := tx.Query(
- `PRAGMA user_version;`,
- )
+ res, err := tx.Query(`PRAGMA user_version;`)
if err != nil {
return err
}
@@ -265,18 +291,57 @@ func setupSqliteDatabase(database *sql.DB) error {
res.Next()
res.Scan(&userVersion)
- // Upgrade to the latest schema:
switch userVersion {
// Upgrade from user_version 0 to 1
+ // The Change:
+ // * `info_hash_index` is recreated as UNIQUE.
case 0:
- _, err = tx.Exec(
- `ALTER TABLE torrents ADD COLUMN readme TEXT;
- PRAGMA user_version = 1;`,
- )
+ zap.S().Warnf("Updating database schema from 0 to 1... (this might take a while)")
+ _, err = tx.Exec(`
+ DROP INDEX info_hash_index;
+ CREATE UNIQUE INDEX info_hash_index ON torrents (info_hash);
+ PRAGMA user_version = 1;
+ `)
+ if err != nil {
+ return err
+ }
+ fallthrough
+ // Upgrade from user_version 1 to 2
+ // The Change:
+ // * Added `is_readme` and `content` columns to the `files` table, and the constraints & the
+ // the indices they entail.
+ // * Added unique index `readme_index` on `files` table.
+ case 1:
+ zap.S().Warnf("Updating database schema from 1 to 2... (this might take a while)")
+ // We introduce two new columns here: content BLOB, and is_readme INTEGER which we treat as
+ // a bool (hence the CHECK).
+ // The reason for the change is that as we introduce the new "readme" feature which
+ // downloads a readme file as a torrent descriptor, we needed to store it somewhere in the
+ // database with the following conditions:
+ //
+ // 1. There can be one and only one readme (content) for a given torrent; hence the
+ // UNIQUE INDEX on (torrent_id, is_description) (remember that SQLite treats each NULL
+ // value as distinct [UNIQUE], see https://sqlite.org/nulls.html).
+ // 2. We would like to keep the readme (content) associated with the file it came from;
+ // hence we modify the files table instead of the torrents table.
+ //
+ // Regarding the implementation details, following constraints arise:
+ //
+ // 1. The column is_readme is either NULL or 1, and if it is 1, then content column cannot
+ // be NULL (but might be an empty BLOB). Vice versa, if content column of a row is,
+ // NULL then is_readme must be NULL.
+ //
+ // This is to prevent unused content fields filling up the database, and to catch
+ // programmers' errors.
+ _, err = tx.Exec(`
+ ALTER TABLE files ADD COLUMN is_readme INTEGER CHECK (is_readme IS NULL OR is_readme=1);
+ ALTER TABLE files ADD COLUMN content BLOB CHECK((content IS NULL AND is_readme IS NULL) OR (content IS NOT NULL AND is_readme=1));
+ CREATE UNIQUE INDEX readme_index ON files (torrent_id, is_readme);
+ PRAGMA user_version = 2;
+ `)
if err != nil {
return err
}
- // Add `fallthrough`s as needed to keep upgrading...
}
if err = tx.Commit(); err != nil {
diff --git a/src/magneticod/persistence_test.go b/src/magneticod/persistence/database_test.go
similarity index 97%
rename from src/magneticod/persistence_test.go
rename to src/magneticod/persistence/database_test.go
index 716f216..98ffafe 100644
--- a/src/magneticod/persistence_test.go
+++ b/src/magneticod/persistence/database_test.go
@@ -1,4 +1,4 @@
-package main
+package persistence
import (
"path"
diff --git a/src/magneticow/bindata.go b/src/magneticow/bindata.go
new file mode 100644
index 0000000..24f10a3
--- /dev/null
+++ b/src/magneticow/bindata.go
@@ -0,0 +1,262 @@
+package main
+
+import (
+ "fmt"
+ "io/ioutil"
+)
+
+// bindata_read reads the given file from disk. It returns
+// an error on failure.
+func bindata_read(path, name string) ([]byte, error) {
+ buf, err := ioutil.ReadFile(path)
+ if err != nil {
+ err = fmt.Errorf("Error reading asset %s at %s: %v", name, path, err)
+ }
+ return buf, err
+}
+
+
+// templates_torrent_html reads file data from disk.
+// It panics if something went wrong in the process.
+func templates_torrent_html() ([]byte, error) {
+ return bindata_read(
+ "/home/bora/labs/magnetico/src/magneticow/data/templates/torrent.html",
+ "templates/torrent.html",
+ )
+}
+
+// templates_feed_xml reads file data from disk.
+// It panics if something went wrong in the process.
+func templates_feed_xml() ([]byte, error) {
+ return bindata_read(
+ "/home/bora/labs/magnetico/src/magneticow/data/templates/feed.xml",
+ "templates/feed.xml",
+ )
+}
+
+// templates_homepage_html reads file data from disk.
+// It panics if something went wrong in the process.
+func templates_homepage_html() ([]byte, error) {
+ return bindata_read(
+ "/home/bora/labs/magnetico/src/magneticow/data/templates/homepage.html",
+ "templates/homepage.html",
+ )
+}
+
+// templates_statistics_html reads file data from disk.
+// It panics if something went wrong in the process.
+func templates_statistics_html() ([]byte, error) {
+ return bindata_read(
+ "/home/bora/labs/magnetico/src/magneticow/data/templates/statistics.html",
+ "templates/statistics.html",
+ )
+}
+
+// templates_torrents_html reads file data from disk.
+// It panics if something went wrong in the process.
+func templates_torrents_html() ([]byte, error) {
+ return bindata_read(
+ "/home/bora/labs/magnetico/src/magneticow/data/templates/torrents.html",
+ "templates/torrents.html",
+ )
+}
+
+// static_scripts_plotly_v1_26_1_min_js reads file data from disk.
+// It panics if something went wrong in the process.
+func static_scripts_plotly_v1_26_1_min_js() ([]byte, error) {
+ return bindata_read(
+ "/home/bora/labs/magnetico/src/magneticow/data/static/scripts/plotly-v1.26.1.min.js",
+ "static/scripts/plotly-v1.26.1.min.js",
+ )
+}
+
+// static_scripts_statistics_js reads file data from disk.
+// It panics if something went wrong in the process.
+func static_scripts_statistics_js() ([]byte, error) {
+ return bindata_read(
+ "/home/bora/labs/magnetico/src/magneticow/data/static/scripts/statistics.js",
+ "static/scripts/statistics.js",
+ )
+}
+
+// static_scripts_torrent_js reads file data from disk.
+// It panics if something went wrong in the process.
+func static_scripts_torrent_js() ([]byte, error) {
+ return bindata_read(
+ "/home/bora/labs/magnetico/src/magneticow/data/static/scripts/torrent.js",
+ "static/scripts/torrent.js",
+ )
+}
+
+// static_styles_reset_css reads file data from disk.
+// It panics if something went wrong in the process.
+func static_styles_reset_css() ([]byte, error) {
+ return bindata_read(
+ "/home/bora/labs/magnetico/src/magneticow/data/static/styles/reset.css",
+ "static/styles/reset.css",
+ )
+}
+
+// static_styles_statistics_css reads file data from disk.
+// It panics if something went wrong in the process.
+func static_styles_statistics_css() ([]byte, error) {
+ return bindata_read(
+ "/home/bora/labs/magnetico/src/magneticow/data/static/styles/statistics.css",
+ "static/styles/statistics.css",
+ )
+}
+
+// static_styles_torrent_css reads file data from disk.
+// It panics if something went wrong in the process.
+func static_styles_torrent_css() ([]byte, error) {
+ return bindata_read(
+ "/home/bora/labs/magnetico/src/magneticow/data/static/styles/torrent.css",
+ "static/styles/torrent.css",
+ )
+}
+
+// static_styles_torrents_css reads file data from disk.
+// It panics if something went wrong in the process.
+func static_styles_torrents_css() ([]byte, error) {
+ return bindata_read(
+ "/home/bora/labs/magnetico/src/magneticow/data/static/styles/torrents.css",
+ "static/styles/torrents.css",
+ )
+}
+
+// static_styles_homepage_css reads file data from disk.
+// It panics if something went wrong in the process.
+func static_styles_homepage_css() ([]byte, error) {
+ return bindata_read(
+ "/home/bora/labs/magnetico/src/magneticow/data/static/styles/homepage.css",
+ "static/styles/homepage.css",
+ )
+}
+
+// static_styles_essential_css reads file data from disk.
+// It panics if something went wrong in the process.
+func static_styles_essential_css() ([]byte, error) {
+ return bindata_read(
+ "/home/bora/labs/magnetico/src/magneticow/data/static/styles/essential.css",
+ "static/styles/essential.css",
+ )
+}
+
+// static_assets_magnet_gif reads file data from disk.
+// It panics if something went wrong in the process.
+func static_assets_magnet_gif() ([]byte, error) {
+ return bindata_read(
+ "/home/bora/labs/magnetico/src/magneticow/data/static/assets/magnet.gif",
+ "static/assets/magnet.gif",
+ )
+}
+
+// static_assets_feed_png reads file data from disk.
+// It panics if something went wrong in the process.
+func static_assets_feed_png() ([]byte, error) {
+ return bindata_read(
+ "/home/bora/labs/magnetico/src/magneticow/data/static/assets/feed.png",
+ "static/assets/feed.png",
+ )
+}
+
+// static_fonts_notomono_license_ofl_txt reads file data from disk.
+// It panics if something went wrong in the process.
+func static_fonts_notomono_license_ofl_txt() ([]byte, error) {
+ return bindata_read(
+ "/home/bora/labs/magnetico/src/magneticow/data/static/fonts/NotoMono/LICENSE_OFL.txt",
+ "static/fonts/NotoMono/LICENSE_OFL.txt",
+ )
+}
+
+// static_fonts_notomono_regular_ttf reads file data from disk.
+// It panics if something went wrong in the process.
+func static_fonts_notomono_regular_ttf() ([]byte, error) {
+ return bindata_read(
+ "/home/bora/labs/magnetico/src/magneticow/data/static/fonts/NotoMono/Regular.ttf",
+ "static/fonts/NotoMono/Regular.ttf",
+ )
+}
+
+// static_fonts_notosansui_license_ofl_txt reads file data from disk.
+// It panics if something went wrong in the process.
+func static_fonts_notosansui_license_ofl_txt() ([]byte, error) {
+ return bindata_read(
+ "/home/bora/labs/magnetico/src/magneticow/data/static/fonts/NotoSansUI/LICENSE_OFL.txt",
+ "static/fonts/NotoSansUI/LICENSE_OFL.txt",
+ )
+}
+
+// static_fonts_notosansui_bold_ttf reads file data from disk.
+// It panics if something went wrong in the process.
+func static_fonts_notosansui_bold_ttf() ([]byte, error) {
+ return bindata_read(
+ "/home/bora/labs/magnetico/src/magneticow/data/static/fonts/NotoSansUI/Bold.ttf",
+ "static/fonts/NotoSansUI/Bold.ttf",
+ )
+}
+
+// static_fonts_notosansui_bolditalic_ttf reads file data from disk.
+// It panics if something went wrong in the process.
+func static_fonts_notosansui_bolditalic_ttf() ([]byte, error) {
+ return bindata_read(
+ "/home/bora/labs/magnetico/src/magneticow/data/static/fonts/NotoSansUI/BoldItalic.ttf",
+ "static/fonts/NotoSansUI/BoldItalic.ttf",
+ )
+}
+
+// static_fonts_notosansui_italic_ttf reads file data from disk.
+// It panics if something went wrong in the process.
+func static_fonts_notosansui_italic_ttf() ([]byte, error) {
+ return bindata_read(
+ "/home/bora/labs/magnetico/src/magneticow/data/static/fonts/NotoSansUI/Italic.ttf",
+ "static/fonts/NotoSansUI/Italic.ttf",
+ )
+}
+
+// static_fonts_notosansui_regular_ttf reads file data from disk.
+// It panics if something went wrong in the process.
+func static_fonts_notosansui_regular_ttf() ([]byte, error) {
+ return bindata_read(
+ "/home/bora/labs/magnetico/src/magneticow/data/static/fonts/NotoSansUI/Regular.ttf",
+ "static/fonts/NotoSansUI/Regular.ttf",
+ )
+}
+
+// Asset loads and returns the asset for the given name.
+// It returns an error if the asset could not be found or
+// could not be loaded.
+func Asset(name string) ([]byte, error) {
+ if f, ok := _bindata[name]; ok {
+ return f()
+ }
+ return nil, fmt.Errorf("Asset %s not found", name)
+}
+
+// _bindata is a table, holding each asset generator, mapped to its name.
+var _bindata = map[string] func() ([]byte, error) {
+ "templates/torrent.html": templates_torrent_html,
+ "templates/feed.xml": templates_feed_xml,
+ "templates/homepage.html": templates_homepage_html,
+ "templates/statistics.html": templates_statistics_html,
+ "templates/torrents.html": templates_torrents_html,
+ "static/scripts/plotly-v1.26.1.min.js": static_scripts_plotly_v1_26_1_min_js,
+ "static/scripts/statistics.js": static_scripts_statistics_js,
+ "static/scripts/torrent.js": static_scripts_torrent_js,
+ "static/styles/reset.css": static_styles_reset_css,
+ "static/styles/statistics.css": static_styles_statistics_css,
+ "static/styles/torrent.css": static_styles_torrent_css,
+ "static/styles/torrents.css": static_styles_torrents_css,
+ "static/styles/homepage.css": static_styles_homepage_css,
+ "static/styles/essential.css": static_styles_essential_css,
+ "static/assets/magnet.gif": static_assets_magnet_gif,
+ "static/assets/feed.png": static_assets_feed_png,
+ "static/fonts/NotoMono/LICENSE_OFL.txt": static_fonts_notomono_license_ofl_txt,
+ "static/fonts/NotoMono/Regular.ttf": static_fonts_notomono_regular_ttf,
+ "static/fonts/NotoSansUI/LICENSE_OFL.txt": static_fonts_notosansui_license_ofl_txt,
+ "static/fonts/NotoSansUI/Bold.ttf": static_fonts_notosansui_bold_ttf,
+ "static/fonts/NotoSansUI/BoldItalic.ttf": static_fonts_notosansui_bolditalic_ttf,
+ "static/fonts/NotoSansUI/Italic.ttf": static_fonts_notosansui_italic_ttf,
+ "static/fonts/NotoSansUI/Regular.ttf": static_fonts_notosansui_regular_ttf,
+
+}
diff --git a/magneticow/magneticow/static/assets/feed.png b/src/magneticow/data/static/assets/feed.png
similarity index 100%
rename from magneticow/magneticow/static/assets/feed.png
rename to src/magneticow/data/static/assets/feed.png
diff --git a/magneticow/magneticow/static/assets/magnet.gif b/src/magneticow/data/static/assets/magnet.gif
similarity index 100%
rename from magneticow/magneticow/static/assets/magnet.gif
rename to src/magneticow/data/static/assets/magnet.gif
diff --git a/magneticow/magneticow/static/fonts/NotoMono/LICENSE_OFL.txt b/src/magneticow/data/static/fonts/NotoMono/LICENSE_OFL.txt
similarity index 100%
rename from magneticow/magneticow/static/fonts/NotoMono/LICENSE_OFL.txt
rename to src/magneticow/data/static/fonts/NotoMono/LICENSE_OFL.txt
diff --git a/magneticow/magneticow/static/fonts/NotoMono/Regular.ttf b/src/magneticow/data/static/fonts/NotoMono/Regular.ttf
similarity index 100%
rename from magneticow/magneticow/static/fonts/NotoMono/Regular.ttf
rename to src/magneticow/data/static/fonts/NotoMono/Regular.ttf
diff --git a/magneticow/magneticow/static/fonts/NotoSansUI/Bold.ttf b/src/magneticow/data/static/fonts/NotoSansUI/Bold.ttf
similarity index 100%
rename from magneticow/magneticow/static/fonts/NotoSansUI/Bold.ttf
rename to src/magneticow/data/static/fonts/NotoSansUI/Bold.ttf
diff --git a/magneticow/magneticow/static/fonts/NotoSansUI/BoldItalic.ttf b/src/magneticow/data/static/fonts/NotoSansUI/BoldItalic.ttf
similarity index 100%
rename from magneticow/magneticow/static/fonts/NotoSansUI/BoldItalic.ttf
rename to src/magneticow/data/static/fonts/NotoSansUI/BoldItalic.ttf
diff --git a/magneticow/magneticow/static/fonts/NotoSansUI/Italic.ttf b/src/magneticow/data/static/fonts/NotoSansUI/Italic.ttf
similarity index 100%
rename from magneticow/magneticow/static/fonts/NotoSansUI/Italic.ttf
rename to src/magneticow/data/static/fonts/NotoSansUI/Italic.ttf
diff --git a/magneticow/magneticow/static/fonts/NotoSansUI/LICENSE_OFL.txt b/src/magneticow/data/static/fonts/NotoSansUI/LICENSE_OFL.txt
similarity index 100%
rename from magneticow/magneticow/static/fonts/NotoSansUI/LICENSE_OFL.txt
rename to src/magneticow/data/static/fonts/NotoSansUI/LICENSE_OFL.txt
diff --git a/magneticow/magneticow/static/fonts/NotoSansUI/Regular.ttf b/src/magneticow/data/static/fonts/NotoSansUI/Regular.ttf
similarity index 100%
rename from magneticow/magneticow/static/fonts/NotoSansUI/Regular.ttf
rename to src/magneticow/data/static/fonts/NotoSansUI/Regular.ttf
diff --git a/magneticow/magneticow/static/scripts/plotly-v1.26.1.min.js b/src/magneticow/data/static/scripts/plotly-v1.26.1.min.js
similarity index 100%
rename from magneticow/magneticow/static/scripts/plotly-v1.26.1.min.js
rename to src/magneticow/data/static/scripts/plotly-v1.26.1.min.js
diff --git a/magneticow/magneticow/static/scripts/statistics.js b/src/magneticow/data/static/scripts/statistics.js
similarity index 100%
rename from magneticow/magneticow/static/scripts/statistics.js
rename to src/magneticow/data/static/scripts/statistics.js
diff --git a/magneticow/magneticow/static/scripts/torrent.js b/src/magneticow/data/static/scripts/torrent.js
similarity index 100%
rename from magneticow/magneticow/static/scripts/torrent.js
rename to src/magneticow/data/static/scripts/torrent.js
diff --git a/magneticow/magneticow/static/styles/essential.css b/src/magneticow/data/static/styles/essential.css
similarity index 94%
rename from magneticow/magneticow/static/styles/essential.css
rename to src/magneticow/data/static/styles/essential.css
index 6c940cb..6a92c4e 100644
--- a/magneticow/magneticow/static/styles/essential.css
+++ b/src/magneticow/data/static/styles/essential.css
@@ -39,7 +39,7 @@ html {
}
pre {
- font-family: 'Noto Mono';
+ font-family: 'Noto Mono', monospace;
line-height: 1.2em;
}
@@ -49,6 +49,12 @@ body {
line-height: 1.45;
}
+@media (max-width: 616px) {
+ body {
+ padding: 1em 8px 1em 8px;
+ }
+}
+
b {
font-weight: bold;
}
diff --git a/src/magneticow/data/static/styles/homepage.css b/src/magneticow/data/static/styles/homepage.css
new file mode 100644
index 0000000..dcca09c
--- /dev/null
+++ b/src/magneticow/data/static/styles/homepage.css
@@ -0,0 +1,38 @@
+main {
+ display: flex;
+ align-items: center;
+ align-content: center;
+
+ height: calc(100vh - 2*16px - 0.833em - 23px); /* 100vh - body's padding(s) - footer margin - footer height */
+ width: 100%;
+}
+
+@media (max-width: 616px) {
+ main {
+ flex-direction: column;
+ justify-content: center;
+ align-items: flex-start;
+ }
+}
+
+main div#magneticow {
+ white-space: nowrap;
+ margin: 0 0.5em 0.5em 0;
+}
+
+main form {
+ max-width: 600px;
+ width: 100%;
+}
+
+main form input {
+ width: 100%;
+}
+
+main > div {
+ margin-right: 0.5em;
+}
+
+footer {
+ margin-top: 0.833em;
+}
diff --git a/magneticow/magneticow/static/styles/reset.css b/src/magneticow/data/static/styles/reset.css
similarity index 100%
rename from magneticow/magneticow/static/styles/reset.css
rename to src/magneticow/data/static/styles/reset.css
diff --git a/magneticow/magneticow/static/styles/statistics.css b/src/magneticow/data/static/styles/statistics.css
similarity index 100%
rename from magneticow/magneticow/static/styles/statistics.css
rename to src/magneticow/data/static/styles/statistics.css
diff --git a/magneticow/magneticow/static/styles/torrent.css b/src/magneticow/data/static/styles/torrent.css
similarity index 100%
rename from magneticow/magneticow/static/styles/torrent.css
rename to src/magneticow/data/static/styles/torrent.css
diff --git a/magneticow/magneticow/static/styles/torrents.css b/src/magneticow/data/static/styles/torrents.css
similarity index 100%
rename from magneticow/magneticow/static/styles/torrents.css
rename to src/magneticow/data/static/styles/torrents.css
diff --git a/magneticow/magneticow/templates/feed.xml b/src/magneticow/data/templates/feed.xml
similarity index 66%
rename from magneticow/magneticow/templates/feed.xml
rename to src/magneticow/data/templates/feed.xml
index 997210c..6a3c230 100644
--- a/magneticow/magneticow/templates/feed.xml
+++ b/src/magneticow/data/templates/feed.xml
@@ -1,12 +1,15 @@
- {{ title }}
+ {{ .Title }}
+
{% for item in items %}
-
{{ item.title }}
+ {{ item.DiscoveredOn }}
{{ item.info_hash }}
+
{% endfor %}
diff --git a/src/magneticow/data/templates/homepage.html b/src/magneticow/data/templates/homepage.html
new file mode 100644
index 0000000..cbe3872
--- /dev/null
+++ b/src/magneticow/data/templates/homepage.html
@@ -0,0 +1,24 @@
+
+
+
+
+
+ magneticow
+
+
+
+
+
+
+
+ magneticow(pre-alpha)
+
+
+
+
+
+
diff --git a/magneticow/magneticow/templates/statistics.html b/src/magneticow/data/templates/statistics.html
similarity index 100%
rename from magneticow/magneticow/templates/statistics.html
rename to src/magneticow/data/templates/statistics.html
diff --git a/magneticow/magneticow/templates/torrent.html b/src/magneticow/data/templates/torrent.html
similarity index 100%
rename from magneticow/magneticow/templates/torrent.html
rename to src/magneticow/data/templates/torrent.html
diff --git a/src/magneticow/data/templates/torrents.html b/src/magneticow/data/templates/torrents.html
new file mode 100644
index 0000000..4caa44d
--- /dev/null
+++ b/src/magneticow/data/templates/torrents.html
@@ -0,0 +1,66 @@
+
+
+
+
+ {% if .search %}"{{.search}}"{% else %}Most recent torrents{% endif %} - magneticow
+
+
+
+
+
+
+
+
+
+
+
+ |
+ Name |
+ Size |
+ Discovered on |
+
+
+
+ {% for torrent in torrents %}
+
+
+ |
+ {{ torrent.name }} |
+ {{ torrent.size }} |
+ {{ torrent.discovered_on }} |
+
+ {% endfor %}
+
+
+
+
+
+
\ No newline at end of file
diff --git a/src/magneticow/main.go b/src/magneticow/main.go
index 046b183..2bc09d8 100644
--- a/src/magneticow/main.go
+++ b/src/magneticow/main.go
@@ -1,25 +1,20 @@
-// magneticow - Lightweight web interface for magnetico.
-// Copyright (C) 2017 Mert Bora ALPER
-// Dedicated to Cemile Binay, in whose hands I thrived.
-//
-// This program is free software: you can redistribute it and/or modify it under the terms of the
-// GNU General Public License as published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
-// even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License along with this program. If
-// not, see .
package main
import (
+ "html/template"
+ "log"
"net/http"
+ "strings"
"github.com/gorilla/mux"
+
+ "persistence"
)
+const N_TORRENTS = 20
+
+var templates map[string]*template.Template
+var database persistence.Database
func main() {
router := mux.NewRouter()
@@ -28,23 +23,48 @@ func main() {
router.HandleFunc("/torrents/{infohash}", torrentsInfohashHandler)
router.HandleFunc("/torrents/{infohash}/{name}", torrentsInfohashNameHandler)
router.HandleFunc("/statistics", statisticsHandler)
+ router.PathPrefix("/static").HandlerFunc(staticHandler)
+
router.HandleFunc("/feed", feedHandler)
+
+ templates = make(map[string]*template.Template)
+ templates["feed"] = template.Must(template.New("feed").Parse(string(mustAsset("templates/feed.xml"))))
+ templates["homepage"] = template.Must(template.New("homepage").Parse(string(mustAsset("templates/homepage.html"))))
+ templates["statistics"] = template.Must(template.New("statistics").Parse(string(mustAsset("templates/statistics.html"))))
+ templates["torrent"] = template.Must(template.New("torrent").Parse(string(mustAsset("templates/torrent.html"))))
+ templates["torrents"] = template.Must(template.New("torrents").Parse(string(mustAsset("templates/torrents.html"))))
+
+ var err error
+ database, err = persistence.MakeDatabase("sqlite3:///home/bora/.local/share/magneticod/database.sqlite3")
+ if err != nil {
+ panic(err.Error())
+ }
+
http.ListenAndServe(":8080", router)
}
func rootHandler(w http.ResponseWriter, r *http.Request) {
-
+ count, err := database.GetNumberOfTorrents()
+ if err != nil {
+ panic(err.Error())
+ }
+ templates["homepage"].Execute(w, count)
}
func torrentsHandler(w http.ResponseWriter, r *http.Request) {
-
+ /*
+ newestTorrents, err := database.NewestTorrents(N_TORRENTS)
+ if err != nil {
+ panic(err.Error())
+ }
+ templates["torrents"].Execute(w, nil)
+ */
}
-
func torrentsInfohashHandler(w http.ResponseWriter, r *http.Request) {
-
+ // redirect to torrents/{infohash}/name
}
@@ -61,3 +81,28 @@ func statisticsHandler(w http.ResponseWriter, r *http.Request) {
func feedHandler(w http.ResponseWriter, r *http.Request) {
}
+
+func staticHandler(w http.ResponseWriter, r *http.Request) {
+ data, err := Asset(r.URL.Path[1:])
+ if err != nil {
+ http.NotFound(w, r)
+ return
+ }
+
+ var contentType string
+ if strings.HasSuffix(r.URL.Path, ".css") {
+ contentType = "text/css; charset=utf-8"
+ } else { // fallback option
+ contentType = http.DetectContentType(data)
+ }
+ w.Header().Set("Content-Type", contentType)
+ w.Write(data)
+}
+
+func mustAsset(name string) []byte {
+ data, err := Asset(name)
+ if err != nil {
+ log.Panicf("Could NOT access the requested resource `%s`: %s", name, err.Error())
+ }
+ return data
+}
diff --git a/src/persistence/Gopkg.toml b/src/persistence/Gopkg.toml
new file mode 100644
index 0000000..403c7cd
--- /dev/null
+++ b/src/persistence/Gopkg.toml
@@ -0,0 +1,26 @@
+
+# Gopkg.toml example
+#
+# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
+# for detailed Gopkg.toml documentation.
+#
+# required = ["github.com/user/thing/cmd/thing"]
+# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
+#
+# [[constraint]]
+# name = "github.com/user/project"
+# version = "1.0.0"
+#
+# [[constraint]]
+# name = "github.com/user/project2"
+# branch = "dev"
+# source = "github.com/myfork/project2"
+#
+# [[override]]
+# name = "github.com/x/y"
+# version = "2.4.0"
+
+
+[[constraint]]
+ name = "go.uber.org/zap"
+ version = "1.6.0"
diff --git a/src/persistence/interface.go b/src/persistence/interface.go
new file mode 100644
index 0000000..5dc3eae
--- /dev/null
+++ b/src/persistence/interface.go
@@ -0,0 +1,116 @@
+package persistence
+
+import (
+ "fmt"
+ "regexp"
+ "net/url"
+)
+
+type Database interface {
+ Engine() databaseEngine
+ DoesTorrentExist(infoHash []byte) (bool, error)
+ // GiveAnIncompleteTorrentByInfoHash returns (*gives*) an incomplete -i.e. one that doesn't have
+ // readme downloaded yet- torrent from the database.
+ // GiveAnIncompleteTorrent might return a nil slice for infoHash, a nil string, and a nil err,
+ // meaning that no incomplete torrent could be found in the database (congrats!).
+ GiveAnIncompleteTorrent(pathRegex *regexp.Regexp, maxSize uint) (infoHash []byte, path string, err error)
+ GiveAStaleTorrent() (infoHash []byte, err error)
+ AddNewTorrent(infoHash []byte, name string, files []File) error
+ AddReadme(infoHash []byte, path string, content string) error
+ Close() error
+
+ // GetNumberOfTorrents returns the number of torrents saved in the database. Might be an
+ // approximation.
+ GetNumberOfTorrents() (uint, error)
+ NewestTorrents(n uint) ([]TorrentMetadata, error)
+ SearchTorrents(query string, orderBy orderingCriteria, descending bool, mustHaveReadme bool) ([]TorrentMetadata, error)
+ // GetTorrents returns the TorrentExtMetadata for the torrent of the given infoHash. Might return
+ // nil, nil if the torrent does not exist in the database.
+ GetTorrent(infoHash []byte) (*TorrentMetadata, error)
+ GetFiles(infoHash []byte) ([]File, error)
+ GetReadme(infoHash []byte) (string, error)
+ GetStatistics(from ISO8601, period uint) (*Statistics, error)
+}
+
+type orderingCriteria uint8
+
+const (
+ BY_NAME orderingCriteria = 1
+ BY_SIZE = 2
+ BY_DISCOVERED_ON = 3
+ BY_N_FILES = 4
+ BY_N_SEEDERS = 5
+ BY_N_LEECHERS = 6
+ BY_UPDATED_ON = 7
+ BY_N_SEEDERS_TO_N_LEECHERS_RATIO = 8
+ BY_N_SEEDERS_PLUS_N_LEECHERS = 9
+)
+
+
+type statisticsGranularity uint8
+type ISO8601 string
+
+const (
+ MINUTELY_STATISTICS statisticsGranularity = 1
+ HOURLY_STATISTICS = 2
+ DAILY_STATISTICS = 3
+ WEEKLY_STATISTICS = 4
+ MONTHLY_STATISTICS = 5
+ YEARLY_STATISTICS = 6
+)
+
+type databaseEngine uint8
+
+const (
+ SQLITE3_ENGINE databaseEngine = 1
+)
+
+type Statistics struct {
+ Granularity statisticsGranularity
+ From ISO8601
+ Period uint
+
+ // All these slices below have the exact length equal to the Period.
+ NTorrentsDiscovered []uint
+ NFilesDiscovered []uint
+ NReadmesDownloaded []uint
+ NTorrentsUpdated []uint
+}
+
+type File struct {
+ Size int64
+ Path string
+}
+
+type TorrentMetadata struct {
+ infoHash []byte
+ name string
+ size uint64
+ discoveredOn int64
+ hasReadme bool
+ nFiles uint
+ // values below 0 indicates that no data is available:
+ nSeeders int
+ nLeechers int
+ updatedOn int
+}
+
+func MakeDatabase(rawURL string) (Database, error) {
+ url_, err := url.Parse(rawURL)
+ if err != nil {
+ return nil, err
+ }
+
+ switch url_.Scheme {
+ case "sqlite3":
+ return makeSqlite3Database(url_)
+
+ case "postgresql":
+ return nil, fmt.Errorf("postgresql is not yet supported!")
+
+ case "mysql":
+ return nil, fmt.Errorf("mysql is not yet supported!")
+ }
+
+ return nil, fmt.Errorf("unknown URI scheme (database engine)!")
+}
diff --git a/src/persistence/mysql.go b/src/persistence/mysql.go
new file mode 100644
index 0000000..dc7cf83
--- /dev/null
+++ b/src/persistence/mysql.go
@@ -0,0 +1 @@
+package persistence
diff --git a/src/persistence/postgresql.go b/src/persistence/postgresql.go
new file mode 100644
index 0000000..dc7cf83
--- /dev/null
+++ b/src/persistence/postgresql.go
@@ -0,0 +1 @@
+package persistence
diff --git a/src/persistence/sqlite3.go b/src/persistence/sqlite3.go
new file mode 100644
index 0000000..6fed455
--- /dev/null
+++ b/src/persistence/sqlite3.go
@@ -0,0 +1,427 @@
+package persistence
+
+import (
+ "net/url"
+ "path"
+ "os"
+ "fmt"
+ "database/sql"
+ "regexp"
+
+ "go.uber.org/zap"
+ "time"
+)
+
+type sqlite3Database struct {
+ conn *sql.DB
+}
+
+func (db *sqlite3Database) Engine() databaseEngine {
+ return SQLITE3_ENGINE
+}
+
+func makeSqlite3Database(url_ *url.URL) (Database, error) {
+ db := new(sqlite3Database)
+
+ dbDir, _ := path.Split(url_.Path)
+ if err := os.MkdirAll(dbDir, 0755); err != nil {
+ return nil, fmt.Errorf("for directory `%s`: %s", dbDir, err.Error())
+ }
+
+ var err error
+ db.conn, err = sql.Open("sqlite3", url_.Path)
+ if err != nil {
+ return nil, err
+ }
+
+ // > Open may just validate its arguments without creating a connection to the database. To
+ // > verify that the data source name is valid, call Ping.
+ // https://golang.org/pkg/database/sql/#Open
+ if err = db.conn.Ping(); err != nil {
+ return nil, err
+ }
+
+ if err := db.setupDatabase(); err != nil {
+ return nil, err
+ }
+
+ return db, nil
+}
+
+func (db *sqlite3Database) DoesTorrentExist(infoHash []byte) (bool, error) {
+ rows, err := db.conn.Query("SELECT 1 FROM torrents WHERE info_hash = ?;", infoHash)
+ if err != nil {
+ return false, err;
+ }
+
+ // If rows.Next() returns true, meaning that the torrent is in the database, return true; else
+ // return false.
+ exists := rows.Next()
+
+ if err = rows.Close(); err != nil {
+ return false, err;
+ }
+
+ return exists, nil;
+}
+
+func (db *sqlite3Database) GiveAnIncompleteTorrent(pathRegex *regexp.Regexp, maxSize uint) (infoHash []byte, path string, err error) {
+ rows, err := db.conn.Query("SELECT info_hash FROM torrents WHERE has_readme = 0;")
+ if err != nil {
+ return nil, "", err
+ }
+
+ if rows.Next() != true {
+ return nil, "", nil
+ }
+
+ if err = rows.Scan(&infoHash); err != nil {
+ return nil, "", err
+ }
+
+ if err = rows.Close(); err != nil {
+ return nil, "", err
+ }
+
+ // TODO
+ return infoHash, "", nil
+}
+
+func (db *sqlite3Database) GiveAStaleTorrent() (infoHash []byte, err error) {
+ // TODO
+ return nil, nil
+}
+
+func (db *sqlite3Database) AddNewTorrent(infoHash []byte, name string, files []File) error {
+ // Although we check whether the torrent exists in the database before asking MetadataSink to
+ // fetch its metadata, the torrent can also exists in the Sink before that. Now, if a torrent in
+ // the sink is still being fetched, that's still not a problem as we just add the new peer for
+ // the torrent and exit, but if the torrent is complete (i.e. its metadata) and if its waiting
+ // in the channel to be received, a race condition arises when we query the database and seeing
+ // that it doesn't exists there, add it to the sink.
+ // Hence check for the last time whether the torrent exists in the database, and only if not,
+ // add it.
+ exists, err := db.DoesTorrentExist(infoHash)
+ if err != nil {
+ return err;
+ } else if exists {
+ return nil;
+ }
+
+ tx, err := db.conn.Begin()
+ if err != nil {
+ return err
+ }
+ // If everything goes as planned and no error occurs, we will commit the transaction before
+ // returning from the function so the tx.Rollback() call will fail, trying to rollback a
+ // committed transaction. BUT, if an error occurs, we'll get our transaction rollback'ed, which
+ // is nice.
+ defer tx.Rollback()
+
+ var total_size int64 = 0
+ for _, file := range files {
+ total_size += file.Size
+ }
+
+ res, err := tx.Exec(`
+ INSERT INTO torrents (
+ info_hash,
+ name,
+ total_size,
+ discovered_on,
+ n_files,
+ ) VALUES (?, ?, ?, ?, ?, ?, ?, ?);
+ `, infoHash, name, total_size, time.Now().Unix(), len(files))
+ if err != nil {
+ return err
+ }
+
+ var lastInsertId int64
+ if lastInsertId, err = res.LastInsertId(); err != nil {
+ return fmt.Errorf("sql.Result.LastInsertId()! %s", err.Error())
+ }
+
+ for _, file := range files {
+ _, err = tx.Exec("INSERT INTO files (torrent_id, size, path) VALUES (?, ?, ?);",
+ lastInsertId, file.Size, file.Path,
+ )
+ if err != nil {
+ return err
+ }
+ }
+
+ err = tx.Commit()
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (db *sqlite3Database) AddReadme(infoHash []byte, path string, content string) error {
+ _, err := db.conn.Exec(
+ `UPDATE files SET is_readme = 1, content = ?
+ WHERE path = ? AND (SELECT id FROM torrents WHERE info_hash = ?) = torrent_id;`,
+ content, path, infoHash,
+ )
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (db *sqlite3Database) Close() error {
+ return db.conn.Close()
+}
+
+func (db *sqlite3Database) GetNumberOfTorrents() (uint, error) {
+ // COUNT(ROWID) is much more inefficient since it scans the whole table, so use MAX(ROWID)
+ rows, err := db.conn.Query("SELECT MAX(ROWID) FROM torrents;")
+ if err != nil {
+ return 0, err
+ }
+
+ if rows.Next() != true {
+ fmt.Errorf("No rows returned from `SELECT MAX(ROWID)`!")
+ }
+
+ var n uint
+ if err = rows.Scan(&n); err != nil {
+ return 0, err
+ }
+
+ if err = rows.Close(); err != nil {
+ return 0, err
+ }
+
+ return n, nil
+}
+
+func (db *sqlite3Database) NewestTorrents(n uint) ([]TorrentMetadata, error) {
+ rows, err := db.conn.Query(`
+ SELECT
+ info_hash,
+ name,
+ total_size,
+ discovered_on,
+ has_readme,
+ n_files,
+ n_seeders,
+ n_leechers,
+ updated_on
+ FROM torrents
+ ORDER BY discovered_on DESC LIMIT ?;
+ `, n,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ var torrents []TorrentMetadata
+ for rows.Next() {
+ tm := new(TorrentMetadata)
+ rows.Scan(
+ &tm.infoHash, &tm.name, &tm.discoveredOn, &tm.hasReadme, &tm.nFiles, &tm.nSeeders,
+ &tm.nLeechers, &tm.updatedOn,
+ )
+ torrents = append(torrents, *tm)
+ }
+
+ if err = rows.Close(); err != nil {
+ return nil, err
+ }
+
+ return torrents, nil
+}
+
+func (db *sqlite3Database) SearchTorrents(query string, orderBy orderingCriteria, descending bool, mustHaveReadme bool) ([]TorrentMetadata, error) { // TODO
+ // TODO:
+ return nil, nil
+}
+
+func (db *sqlite3Database) GetTorrent(infoHash []byte) (*TorrentMetadata, error) {
+ rows, err := db.conn.Query(
+ `SELECT
+ info_hash,
+ name,
+ size,
+ discovered_on,
+ has_readme,
+ n_files,
+ n_seeders,
+ n_leechers,
+ updated_on
+ FROM torrents
+ WHERE info_hash = ?`,
+ infoHash,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ if rows.Next() != true {
+ return nil, nil
+ }
+
+ tm := new(TorrentMetadata)
+ rows.Scan(
+ &tm.infoHash, &tm.name, &tm.discoveredOn, &tm.hasReadme, &tm.nFiles, &tm.nSeeders,
+ &tm.nLeechers, &tm.updatedOn,
+ )
+
+ if err = rows.Close(); err != nil {
+ return nil, err
+ }
+
+ return tm, nil
+}
+
+func (db *sqlite3Database) GetFiles(infoHash []byte) ([]File, error) {
+ // TODO
+ return nil, nil
+}
+
+func (db *sqlite3Database) GetReadme(infoHash []byte) (string, error) {
+ // TODO
+ return "", nil
+}
+
+
+func (db *sqlite3Database) GetStatistics(from ISO8601, period uint) (*Statistics, error) {
+ // TODO
+ return nil, nil
+}
+
+func (db *sqlite3Database) commitQueuedTorrents() error {
+ return nil
+}
+
+func (db *sqlite3Database) setupDatabase() error {
+ // Enable Write-Ahead Logging for SQLite as "WAL provides more concurrency as readers do not
+ // block writers and a writer does not block readers. Reading and writing can proceed
+ // concurrently."
+ // Caveats:
+ // * Might be unsupported by OSes other than Windows and UNIXes.
+ // * Does not work over a network filesystem.
+ // * Transactions that involve changes against multiple ATTACHed databases are not atomic
+ // across all databases as a set.
+ // See: https://www.sqlite.org/wal.html
+ //
+ // Force SQLite to use disk, instead of memory, for all temporary files to reduce the memory
+ // footprint.
+ //
+ // Enable foreign key constraints in SQLite which are crucial to prevent programmer errors on
+ // our side.
+ _, err := db.conn.Exec(`
+ PRAGMA journal_mode=WAL;
+ PRAGMA temp_store=1;
+ PRAGMA foreign_keys=ON;
+ `)
+ if err != nil {
+ return err
+ }
+
+ tx, err := db.conn.Begin()
+ if err != nil {
+ return err
+ }
+ // If everything goes as planned and no error occurs, we will commit the transaction before
+ // returning from the function so the tx.Rollback() call will fail, trying to rollback a
+ // committed transaction. BUT, if an error occurs, we'll get our transaction rollback'ed, which
+ // is nice.
+ defer tx.Rollback()
+
+ // Essential, and valid for all user_version`s:
+ // TODO: "torrent_id" column of the "files" table can be NULL, how can we fix this in a new schema?
+ _, err = tx.Exec(`
+ CREATE TABLE IF NOT EXISTS torrents (
+ id INTEGER PRIMARY KEY,
+ info_hash BLOB NOT NULL UNIQUE,
+ name TEXT NOT NULL,
+ total_size INTEGER NOT NULL CHECK(total_size > 0),
+ discovered_on INTEGER NOT NULL CHECK(discovered_on > 0)
+ );
+ CREATE TABLE IF NOT EXISTS files (
+ id INTEGER PRIMARY KEY,
+ torrent_id INTEGER REFERENCES torrents ON DELETE CASCADE ON UPDATE RESTRICT,
+ size INTEGER NOT NULL,
+ path TEXT NOT NULL
+ );
+ `)
+ if err != nil {
+ return err
+ }
+
+ // Get the user_version:
+ res, err := tx.Query("PRAGMA user_version;")
+ if err != nil {
+ return err
+ }
+ var userVersion int;
+ if res.Next() != true {
+ return fmt.Errorf("PRAGMA user_version did not return any rows!")
+ }
+ if err = res.Scan(&userVersion); err != nil {
+ return err
+ }
+
+ switch userVersion {
+ // Upgrade from user_version 0 to 1
+ // The Change:
+ // * `info_hash_index` is recreated as UNIQUE.
+ case 0:
+ zap.S().Warnf("Updating database schema from 0 to 1... (this might take a while)")
+ _, err = tx.Exec(`
+ DROP INDEX info_hash_index;
+ CREATE UNIQUE INDEX info_hash_index ON torrents (info_hash);
+ PRAGMA user_version = 1;
+ `)
+ if err != nil {
+ return err
+ }
+ fallthrough
+ // Upgrade from user_version 1 to 2
+ // The Change:
+ // * Added `is_readme` and `content` columns to the `files` table, and the constraints & the
+ // the indices they entail.
+ // * Added unique index `readme_index` on `files` table.
+ case 1:
+ zap.S().Warnf("Updating database schema from 1 to 2... (this might take a while)")
+ // We introduce two new columns here: content BLOB, and is_readme INTEGER which we treat as
+ // a bool (hence the CHECK).
+ // The reason for the change is that as we introduce the new "readme" feature which
+ // downloads a readme file as a torrent descriptor, we needed to store it somewhere in the
+ // database with the following conditions:
+ //
+ // 1. There can be one and only one readme (content) for a given torrent; hence the
+ // UNIQUE INDEX on (torrent_id, is_description) (remember that SQLite treats each NULL
+ // value as distinct [UNIQUE], see https://sqlite.org/nulls.html).
+ // 2. We would like to keep the readme (content) associated with the file it came from;
+ // hence we modify the files table instead of the torrents table.
+ //
+ // Regarding the implementation details, following constraints arise:
+ //
+ // 1. The column is_readme is either NULL or 1, and if it is 1, then content column cannot
+ // be NULL (but might be an empty BLOB). Vice versa, if content column of a row is,
+ // NULL then is_readme must be NULL.
+ //
+ // This is to prevent unused content fields filling up the database, and to catch
+ // programmers' errors.
+ _, err = tx.Exec(`
+ ALTER TABLE files ADD COLUMN is_readme INTEGER CHECK (is_readme IS NULL OR is_readme=1) DEFAULT NULL;
+ ALTER TABLE files ADD COLUMN content BLOB CHECK((content IS NULL AND is_readme IS NULL) OR (content IS NOT NULL AND is_readme=1)) DEFAULT NULL;
+ CREATE UNIQUE INDEX readme_index ON files (torrent_id, is_readme);
+ PRAGMA user_version = 2;
+ `)
+ if err != nil {
+ return err
+ }
+ }
+
+ if err = tx.Commit(); err != nil {
+ return err
+ }
+
+ return nil
+}
\ No newline at end of file