From 85fb2f5ea9c18498592254564634a2ea9798d08a Mon Sep 17 00:00:00 2001 From: Bora Alper Date: Fri, 3 Aug 2018 16:16:33 +0300 Subject: [PATCH] resolved reqq question reqq question ============= reqq: An integer, the number of outstanding request messages this client supports without dropping any. The default in in libtorrent is 250. "handshake message" @ "Extension Protocol" @ http://www.bittorrent.org/beps/bep_0010.html TODO: maybe by requesting all pieces at once we are exceeding this limit? maybe we should request as we receive pieces? answer ====== almost every single peer I encountered (for brief 10 minutes... which I think is enough) had 255 as reqq value and the number of metadata pieces we requested very rarely exceeded 20... I think it's fair to assume that exceeding "that limit" will never be a question, and requesting the next piece as we receive the previous one might increase the latency, unnecessarily. --- cmd/magneticod/bittorrent/metadata/leech.go | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/cmd/magneticod/bittorrent/metadata/leech.go b/cmd/magneticod/bittorrent/metadata/leech.go index 7ed0832..22b5729 100644 --- a/cmd/magneticod/bittorrent/metadata/leech.go +++ b/cmd/magneticod/bittorrent/metadata/leech.go @@ -150,15 +150,6 @@ func (l *Leech) doExHandshake() error { } func (l *Leech) requestAllPieces() error { - // reqq - // An integer, the number of outstanding request messages this client supports without - // dropping any. The default in in libtorrent is 250. - // - // "handshake message" @ "Extension Protocol" @ http://www.bittorrent.org/beps/bep_0010.html - // - // TODO: maybe by requesting all pieces at once we are exceeding this limit? maybe we should - // request as we receive pieces? - // Request all the pieces of metadata nPieces := int(math.Ceil(float64(l.metadataSize) / math.Pow(2, 14))) for piece := 0; piece < nPieces; piece++ { @@ -318,7 +309,7 @@ func (l *Leech) Do(deadline time.Time) { l.OnError(errors.Wrap(err, "doExHandshake")) return } - + err = l.requestAllPieces() if err != nil { l.OnError(errors.Wrap(err, "requestAllPieces"))