2018-07-24 14:41:13 +02:00
|
|
|
package metadata
|
2017-11-03 00:15:13 +01:00
|
|
|
|
|
|
|
import (
|
2018-12-25 16:38:13 +01:00
|
|
|
"math/rand"
|
2019-05-19 01:07:37 +02:00
|
|
|
"net"
|
2018-08-07 09:32:12 +02:00
|
|
|
"sync"
|
2017-11-03 00:15:13 +01:00
|
|
|
"time"
|
|
|
|
|
|
|
|
"go.uber.org/zap"
|
|
|
|
|
2019-01-05 19:35:13 +01:00
|
|
|
"github.com/boramalper/magnetico/cmd/magneticod/dht"
|
2018-04-16 17:40:54 +02:00
|
|
|
"github.com/boramalper/magnetico/pkg/persistence"
|
2018-12-30 06:24:14 +01:00
|
|
|
"github.com/boramalper/magnetico/pkg/util"
|
2017-11-03 00:15:13 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
type Metadata struct {
|
|
|
|
InfoHash []byte
|
|
|
|
// Name should be thought of "Title" of the torrent. For single-file torrents, it is the name
|
|
|
|
// of the file, and for multi-file torrents, it is the name of the root directory.
|
|
|
|
Name string
|
|
|
|
TotalSize uint64
|
|
|
|
DiscoveredOn int64
|
|
|
|
// Files must be populated for both single-file and multi-file torrents!
|
|
|
|
Files []persistence.File
|
|
|
|
}
|
|
|
|
|
2018-07-24 14:41:13 +02:00
|
|
|
type Sink struct {
|
2019-05-19 01:07:37 +02:00
|
|
|
PeerID []byte
|
|
|
|
deadline time.Duration
|
|
|
|
maxNLeeches int
|
|
|
|
drain chan Metadata
|
|
|
|
|
|
|
|
incomingInfoHashes map[[20]byte][]net.TCPAddr
|
2018-08-07 09:32:12 +02:00
|
|
|
incomingInfoHashesMx sync.Mutex
|
2019-05-19 01:07:37 +02:00
|
|
|
|
|
|
|
terminated bool
|
|
|
|
termination chan interface{}
|
|
|
|
|
|
|
|
deleted int
|
2017-11-03 00:15:13 +01:00
|
|
|
}
|
|
|
|
|
2018-12-25 16:38:13 +01:00
|
|
|
func randomID() []byte {
|
|
|
|
/* > The peer_id is exactly 20 bytes (characters) long.
|
|
|
|
* >
|
|
|
|
* > There are mainly two conventions how to encode client and client version information into the peer_id,
|
|
|
|
* > Azureus-style and Shadow's-style.
|
|
|
|
* >
|
|
|
|
* > Azureus-style uses the following encoding: '-', two characters for client id, four ascii digits for version
|
|
|
|
* > number, '-', followed by random numbers.
|
|
|
|
* >
|
|
|
|
* > For example: '-AZ2060-'...
|
|
|
|
*
|
|
|
|
* https://wiki.theory.org/index.php/BitTorrentSpecification
|
|
|
|
*
|
|
|
|
* We encode the version number as:
|
|
|
|
* - First two digits for the major version number
|
|
|
|
* - Last two digits for the minor version number
|
|
|
|
* - Patch version number is not encoded.
|
|
|
|
*/
|
2019-05-19 01:07:37 +02:00
|
|
|
prefix := []byte("-MC0008-")
|
2018-12-25 16:38:13 +01:00
|
|
|
|
|
|
|
var rando []byte
|
|
|
|
for i := 20 - len(prefix); i >= 0; i-- {
|
|
|
|
rando = append(rando, randomDigit())
|
|
|
|
}
|
|
|
|
|
|
|
|
return append(prefix, rando...)
|
|
|
|
}
|
|
|
|
|
|
|
|
func randomDigit() byte {
|
|
|
|
var max, min int
|
|
|
|
max, min = '9', '0'
|
|
|
|
return byte(rand.Intn(max-min) + min)
|
|
|
|
}
|
|
|
|
|
2018-12-30 06:24:14 +01:00
|
|
|
func NewSink(deadline time.Duration, maxNLeeches int) *Sink {
|
2018-07-24 14:41:13 +02:00
|
|
|
ms := new(Sink)
|
2017-11-03 00:15:13 +01:00
|
|
|
|
2018-12-25 16:38:13 +01:00
|
|
|
ms.PeerID = randomID()
|
2017-11-03 00:15:13 +01:00
|
|
|
ms.deadline = deadline
|
2018-12-30 06:24:14 +01:00
|
|
|
ms.maxNLeeches = maxNLeeches
|
2019-05-19 01:07:37 +02:00
|
|
|
ms.drain = make(chan Metadata, 10)
|
|
|
|
ms.incomingInfoHashes = make(map[[20]byte][]net.TCPAddr)
|
2017-11-03 00:15:13 +01:00
|
|
|
ms.termination = make(chan interface{})
|
2018-12-25 16:38:13 +01:00
|
|
|
|
2019-05-19 01:07:37 +02:00
|
|
|
go func() {
|
|
|
|
for range time.Tick(deadline) {
|
|
|
|
ms.incomingInfoHashesMx.Lock()
|
|
|
|
l := len(ms.incomingInfoHashes)
|
|
|
|
ms.incomingInfoHashesMx.Unlock()
|
|
|
|
zap.L().Info("Sink status",
|
|
|
|
zap.Int("activeLeeches", l),
|
|
|
|
zap.Int("nDeleted", ms.deleted),
|
|
|
|
zap.Int("drainQueue", len(ms.drain)),
|
|
|
|
)
|
|
|
|
ms.deleted = 0
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2017-11-03 00:15:13 +01:00
|
|
|
return ms
|
|
|
|
}
|
|
|
|
|
2019-01-05 19:35:13 +01:00
|
|
|
func (ms *Sink) Sink(res dht.Result) {
|
2017-11-03 00:15:13 +01:00
|
|
|
if ms.terminated {
|
2018-07-24 14:41:13 +02:00
|
|
|
zap.L().Panic("Trying to Sink() an already closed Sink!")
|
2017-11-03 00:15:13 +01:00
|
|
|
}
|
2018-08-07 09:32:12 +02:00
|
|
|
ms.incomingInfoHashesMx.Lock()
|
|
|
|
defer ms.incomingInfoHashesMx.Unlock()
|
2017-11-03 00:15:13 +01:00
|
|
|
|
2018-12-30 06:24:14 +01:00
|
|
|
// cap the max # of leeches
|
|
|
|
if len(ms.incomingInfoHashes) >= ms.maxNLeeches {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-05-19 01:07:37 +02:00
|
|
|
infoHash := res.InfoHash()
|
|
|
|
peerAddrs := res.PeerAddrs()
|
|
|
|
|
2019-01-05 19:35:13 +01:00
|
|
|
if _, exists := ms.incomingInfoHashes[infoHash]; exists {
|
2017-11-05 02:50:20 +01:00
|
|
|
return
|
2019-05-19 01:07:37 +02:00
|
|
|
} else if len(peerAddrs) > 0 {
|
|
|
|
peer := peerAddrs[0]
|
|
|
|
ms.incomingInfoHashes[infoHash] = peerAddrs[1:]
|
|
|
|
|
|
|
|
go NewLeech(infoHash, &peer, ms.PeerID, LeechEventHandlers{
|
|
|
|
OnSuccess: ms.flush,
|
|
|
|
OnError: ms.onLeechError,
|
|
|
|
}).Do(time.Now().Add(ms.deadline))
|
2017-11-05 02:50:20 +01:00
|
|
|
}
|
|
|
|
|
2019-05-15 14:18:42 +02:00
|
|
|
zap.L().Debug("Sunk!", zap.Int("leeches", len(ms.incomingInfoHashes)), util.HexField("infoHash", infoHash[:]))
|
2017-11-03 00:15:13 +01:00
|
|
|
}
|
|
|
|
|
2018-07-24 14:41:13 +02:00
|
|
|
func (ms *Sink) Drain() <-chan Metadata {
|
2017-11-03 00:15:13 +01:00
|
|
|
if ms.terminated {
|
2018-07-24 14:41:13 +02:00
|
|
|
zap.L().Panic("Trying to Drain() an already closed Sink!")
|
2017-11-03 00:15:13 +01:00
|
|
|
}
|
|
|
|
return ms.drain
|
|
|
|
}
|
|
|
|
|
2018-07-24 14:41:13 +02:00
|
|
|
func (ms *Sink) Terminate() {
|
2017-11-03 00:15:13 +01:00
|
|
|
ms.terminated = true
|
|
|
|
close(ms.termination)
|
|
|
|
close(ms.drain)
|
|
|
|
}
|
|
|
|
|
2018-07-24 14:41:13 +02:00
|
|
|
func (ms *Sink) flush(result Metadata) {
|
2018-12-25 16:38:13 +01:00
|
|
|
if ms.terminated {
|
|
|
|
return
|
2017-11-03 00:15:13 +01:00
|
|
|
}
|
2018-12-25 16:38:13 +01:00
|
|
|
|
|
|
|
ms.drain <- result
|
|
|
|
// Delete the infoHash from ms.incomingInfoHashes ONLY AFTER once we've flushed the
|
|
|
|
// metadata!
|
2019-05-19 01:07:37 +02:00
|
|
|
ms.incomingInfoHashesMx.Lock()
|
|
|
|
defer ms.incomingInfoHashesMx.Unlock()
|
|
|
|
|
2018-12-25 16:38:13 +01:00
|
|
|
var infoHash [20]byte
|
|
|
|
copy(infoHash[:], result.InfoHash)
|
|
|
|
delete(ms.incomingInfoHashes, infoHash)
|
2017-11-03 00:15:13 +01:00
|
|
|
}
|
2018-07-24 14:41:13 +02:00
|
|
|
|
|
|
|
func (ms *Sink) onLeechError(infoHash [20]byte, err error) {
|
2018-08-03 10:28:50 +02:00
|
|
|
zap.L().Debug("leech error", util.HexField("infoHash", infoHash[:]), zap.Error(err))
|
2018-12-30 06:24:14 +01:00
|
|
|
|
2018-08-07 09:32:12 +02:00
|
|
|
ms.incomingInfoHashesMx.Lock()
|
2019-05-19 01:07:37 +02:00
|
|
|
defer ms.incomingInfoHashesMx.Unlock()
|
|
|
|
|
|
|
|
if len(ms.incomingInfoHashes[infoHash]) > 0 {
|
|
|
|
peer := ms.incomingInfoHashes[infoHash][0]
|
|
|
|
ms.incomingInfoHashes[infoHash] = ms.incomingInfoHashes[infoHash][1:]
|
|
|
|
go NewLeech(infoHash, &peer, ms.PeerID, LeechEventHandlers{
|
|
|
|
OnSuccess: ms.flush,
|
|
|
|
OnError: ms.onLeechError,
|
|
|
|
}).Do(time.Now().Add(ms.deadline))
|
|
|
|
} else {
|
|
|
|
ms.deleted++
|
|
|
|
delete(ms.incomingInfoHashes, infoHash)
|
|
|
|
}
|
2018-07-24 14:41:13 +02:00
|
|
|
}
|