fixed some go vet warnings, fixed formatting

This commit is contained in:
Bora Alper 2018-08-03 15:40:04 +03:00
parent e4bb7b5b35
commit 4b9b354171
16 changed files with 173 additions and 138 deletions

View File

@ -7,3 +7,16 @@ magneticow:
# TODO: minify files!
go-bindata -o="magneticow/bindata.go" -prefix="magneticow/data/" magneticow/data/...
go install magnetico/magneticow
test:
go test github.com/boramalper/magnetico/cmd/magneticod/...
@echo
go test github.com/boramalper/magnetico/cmd/magneticow/...
@echo
go test github.com/boramalper/magnetico/pkg/persistence/...
format:
gofmt -w cmd/magneticod
gofmt -w cmd/magneticow
gofmt -w pkg/persistence

View File

@ -37,12 +37,12 @@ type extDict struct {
}
type Leech struct {
infoHash [20]byte
peerAddr *net.TCPAddr
ev LeechEventHandlers
infoHash [20]byte
peerAddr *net.TCPAddr
ev LeechEventHandlers
conn *net.TCPConn
clientID [20]byte
conn *net.TCPConn
clientID [20]byte
ut_metadata uint8
metadataReceived, metadataSize uint
@ -50,8 +50,8 @@ type Leech struct {
}
type LeechEventHandlers struct {
OnSuccess func(Metadata) // must be supplied. args: metadata
OnError func([20]byte, error) // must be supplied. args: infohash, error
OnSuccess func(Metadata) // must be supplied. args: metadata
OnError func([20]byte, error) // must be supplied. args: infohash, error
}
func NewLeech(infoHash [20]byte, peerAddr *net.TCPAddr, ev LeechEventHandlers) *Leech {
@ -86,7 +86,9 @@ func (l *Leech) doBtHandshake() error {
))
// ASSERTION
if len(lHandshake) != 68 { panic(fmt.Sprintf("len(lHandshake) == %d", len(lHandshake))) }
if len(lHandshake) != 68 {
panic(fmt.Sprintf("len(lHandshake) == %d", len(lHandshake)))
}
err := l.writeAll(lHandshake)
if err != nil {
@ -148,15 +150,15 @@ func (l *Leech) doExHandshake() error {
}
func (l *Leech) requestAllPieces() error {
/*
* reqq
* An integer, the number of outstanding request messages this client supports without
* dropping any. The default in in libtorrent is 250.
* "handshake message" @ "Extension Protocol" @ http://www.bittorrent.org/beps/bep_0010.html
*
* TODO: maybe by requesting all pieces at once we are exceeding this limit? maybe we should
* request as we receive pieces?
*/
// reqq
// An integer, the number of outstanding request messages this client supports without
// dropping any. The default in in libtorrent is 250.
//
// "handshake message" @ "Extension Protocol" @ http://www.bittorrent.org/beps/bep_0010.html
//
// TODO: maybe by requesting all pieces at once we are exceeding this limit? maybe we should
// request as we receive pieces?
// Request all the pieces of metadata
nPieces := int(math.Ceil(float64(l.metadataSize) / math.Pow(2, 14)))
for piece := 0; piece < nPieces; piece++ {
@ -166,13 +168,13 @@ func (l *Leech) requestAllPieces() error {
MsgType: 0,
Piece: piece,
})
if err != nil { // ASSERT
if err != nil { // ASSERT
panic(errors.Wrap(err, "marshal extDict"))
}
err = l.writeAll([]byte(fmt.Sprintf(
"%s\x14%s%s",
toBigEndian(uint(2 + len(extDictDump)), 4),
toBigEndian(uint(2+len(extDictDump)), 4),
toBigEndian(uint(l.ut_metadata), 1),
extDictDump,
)))
@ -213,7 +215,7 @@ func (l *Leech) readExMessage() ([]byte, error) {
// Every extension message has at least 2 bytes.
if len(rMessage) < 2 {
continue;
continue
}
// We are interested only in extension messages, whose first byte is always 20
@ -483,4 +485,3 @@ func toBigEndian(i uint, n int) []byte {
return b
}

View File

@ -7,23 +7,23 @@ import (
"github.com/anacrolix/torrent/bencode"
)
var operationsTest_instances = []struct{
var operationsTest_instances = []struct {
dump []byte
surplus []byte
}{
// No Surplus
{
dump: []byte("d1:md11:ut_metadatai1ee13:metadata_sizei22528ee"),
dump: []byte("d1:md11:ut_metadatai1ee13:metadata_sizei22528ee"),
surplus: []byte(""),
},
// Surplus is an ASCII string
{
dump: []byte("d1:md11:ut_metadatai1ee13:metadata_sizei22528eeDENEME"),
dump: []byte("d1:md11:ut_metadatai1ee13:metadata_sizei22528eeDENEME"),
surplus: []byte("DENEME"),
},
// Surplus is a bencoded dictionary
{
dump: []byte("d1:md11:ut_metadatai1ee13:metadata_sizei22528eed3:inti1337ee"),
dump: []byte("d1:md11:ut_metadatai1ee13:metadata_sizei22528eed3:inti1337ee"),
surplus: []byte("d3:inti1337ee"),
},
}
@ -31,14 +31,14 @@ var operationsTest_instances = []struct{
func TestDecoder(t *testing.T) {
for i, instance := range operationsTest_instances {
buf := bytes.NewBuffer(instance.dump)
err := bencode.NewDecoder(buf).Decode(&struct {}{})
err := bencode.NewDecoder(buf).Decode(&struct{}{})
if err != nil {
t.Errorf("Couldn't decode the dump #%d! %s", i + 1, err.Error())
t.Errorf("Couldn't decode the dump #%d! %s", i+1, err.Error())
}
bufSurplus := buf.Bytes()
if !bytes.Equal(bufSurplus, instance.surplus) {
t.Errorf("Surplus #%d is not equal to what we expected! `%s`", i + 1, bufSurplus)
t.Errorf("Surplus #%d is not equal to what we expected! `%s`", i+1, bufSurplus)
}
}
}

View File

@ -64,7 +64,7 @@ func (ms *Sink) Sink(res mainline.TrawlingResult) {
leech := NewLeech(res.InfoHash, res.PeerAddr, LeechEventHandlers{
OnSuccess: ms.flush,
OnError: ms.onLeechError,
OnError: ms.onLeechError,
})
go leech.Do(time.Now().Add(ms.deadline))

View File

@ -258,7 +258,6 @@ func (e *Error) UnmarshalBencode(b []byte) (err error) {
}
if len(matches[2]) != msgLen {
return
return fmt.Errorf("error message have different lengths (%d vs %d) \"%s\"!", len(matches[2]), msgLen, matches[2])
}

View File

@ -221,9 +221,8 @@ func (p *Protocol) CalculateToken(address net.IP) []byte {
func (p *Protocol) VerifyToken(address net.IP, token []byte) bool {
p.tokenLock.Lock()
defer p.tokenLock.Unlock()
// TODO: implement VerifyToken()
panic("VerifyToken() not implemented yet!")
// TODO
return false
}
func (p *Protocol) updateTokenSecret() {

View File

@ -113,7 +113,7 @@ func (s *TrawlingService) bootstrap() {
if err != nil {
zap.L().Error("Could NOT resolve (UDP) address of the bootstrapping node!",
zap.String("node", node))
continue;
continue
}
s.protocol.SendMessage(NewFindNodeQuery(s.trueNodeID, target), addr)
@ -208,11 +208,11 @@ func (s *TrawlingService) onCongestion() {
*
* In case of congestion, decrease the maximum number of nodes to the 90% of the current value.
*/
if s.maxNeighbors < 200 {
zap.L().Warn("Max. number of neighbours are < 200 and there is still congestion!" +
"(check your network connection if this message recurs)")
return
}
if s.maxNeighbors < 200 {
zap.L().Warn("Max. number of neighbours are < 200 and there is still congestion!" +
"(check your network connection if this message recurs)")
return
}
s.maxNeighbors = uint(float32(s.maxNeighbors) * 0.9)
s.maxNeighbors = uint(float32(s.maxNeighbors) * 0.9)
}

View File

@ -36,8 +36,8 @@ func NewTransport(laddr string, onMessage func(*Message, *net.UDPAddr), onConges
*
* https://en.wikipedia.org/wiki/User_Datagram_Protocol
*/
t.buffer = make([]byte, 65507)
t.onMessage = onMessage
t.buffer = make([]byte, 65507)
t.onMessage = onMessage
t.onCongestion = onCongestion
var err error
@ -83,7 +83,7 @@ func (t *Transport) Start() {
}
func (t *Transport) Terminate() {
unix.Close(t.fd);
unix.Close(t.fd)
}
// readMessages is a goroutine!
@ -131,7 +131,7 @@ func (t *Transport) WriteMessages(msg *Message, addr *net.UDPAddr) {
if addrSA == nil {
zap.L().Debug("Wrong net address for the remote peer!",
zap.String("addr", addr.String()))
return;
return
}
err = unix.Sendto(t.fd, data, 0, addrSA)

View File

@ -37,7 +37,7 @@ type opFlags struct {
TrawlerMlInterval time.Duration
Verbosity int
Profile string
Profile string
}
func main() {
@ -76,7 +76,7 @@ func main() {
switch opFlags.Profile {
case "cpu":
file, err := os.OpenFile("magneticod_cpu.prof", os.O_CREATE | os.O_WRONLY, 0755)
file, err := os.OpenFile("magneticod_cpu.prof", os.O_CREATE|os.O_WRONLY, 0755)
if err != nil {
zap.L().Panic("Could not open the cpu profile file!", zap.Error(err))
}
@ -145,9 +145,9 @@ func parseFlags() (*opFlags, error) {
if cmdF.DatabaseURL == "" {
opF.DatabaseURL =
"sqlite3://" +
appdirs.UserDataDir("magneticod", "", "", false) +
"/database.sqlite3" +
"?_journal_mode=WAL" // https://github.com/mattn/go-sqlite3#connection-string
appdirs.UserDataDir("magneticod", "", "", false) +
"/database.sqlite3" +
"?_journal_mode=WAL" // https://github.com/mattn/go-sqlite3#connection-string
} else {
opF.DatabaseURL = cmdF.DatabaseURL
}

View File

@ -16,9 +16,8 @@ import (
func apiTorrentsHandler(w http.ResponseWriter, r *http.Request) {
// @lastOrderedValue AND @lastID are either both supplied or neither of them should be supplied
// at all; and if that is NOT the case, then return an error.
if q := r.URL.Query(); !(
(q.Get("lastOrderedValue") != "" && q.Get("lastID") != "") ||
(q.Get("lastOrderedValue") == "" && q.Get("lastID") == "")) {
if q := r.URL.Query(); !((q.Get("lastOrderedValue") != "" && q.Get("lastID") != "") ||
(q.Get("lastOrderedValue") == "" && q.Get("lastID") == "")) {
respondError(w, 400, "`lastOrderedValue`, `lastID` must be supplied altogether, if supplied.")
return
}
@ -43,7 +42,7 @@ func apiTorrentsHandler(w http.ResponseWriter, r *http.Request) {
if tq.Epoch == nil {
tq.Epoch = new(int64)
*tq.Epoch = time.Now().Unix() // epoch, if not supplied, is NOW.
*tq.Epoch = time.Now().Unix() // epoch, if not supplied, is NOW.
} else if *tq.Epoch <= 0 {
respondError(w, 400, "epoch must be greater than 0")
return

View File

@ -10,7 +10,6 @@ import (
"github.com/gorilla/mux"
)
// DONE
func rootHandler(w http.ResponseWriter, r *http.Request) {
nTorrents, err := database.GetNumberOfTorrents()
@ -128,7 +127,7 @@ func feedHandler(w http.ResponseWriter, r *http.Request) {
Title string
Torrents []persistence.TorrentMetadata
}{
Title: title,
Title: title,
Torrents: torrents,
})
if err != nil {

View File

@ -38,10 +38,10 @@ var decoder = schema.NewDecoder()
var templates map[string]*template.Template
var database persistence.Database
var opts struct{
var opts struct {
Addr string
Database string
Credentials map[string][]byte // TODO: encapsulate credentials and mutex for safety
Credentials map[string][]byte // TODO: encapsulate credentials and mutex for safety
CredentialsRWMutex sync.RWMutex
CredentialsPath string
}
@ -77,9 +77,9 @@ func main() {
continue
}
opts.Credentials = make(map[string][]byte) // Clear opts.Credentials
opts.Credentials = make(map[string][]byte) // Clear opts.Credentials
opts.CredentialsRWMutex.Unlock()
if err := loadCred(opts.CredentialsPath); err != nil { // Reload credentials
if err := loadCred(opts.CredentialsPath); err != nil { // Reload credentials
zap.L().Warn("couldn't load credentials", zap.Error(err))
}
}
@ -175,7 +175,6 @@ func respondError(w http.ResponseWriter, statusCode int, format string, a ...int
w.Write([]byte(fmt.Sprintf(format, a...)))
}
func mustAsset(name string) []byte {
data, err := Asset(name)
if err != nil {
@ -186,10 +185,10 @@ func mustAsset(name string) []byte {
func parseFlags() error {
var cmdFlags struct {
Addr string `short:"a" long:"addr" description:"Address (host:port) to serve on" default:":8080"`
Database string `short:"d" long:"database" description:"URL of the (magneticod) database"`
Cred string `short:"c" long:"credentials" description:"Path to the credentials file"`
NoAuth bool ` long:"no-auth" description:"Disables authorisation"`
Addr string `short:"a" long:"addr" description:"Address (host:port) to serve on" default:":8080"`
Database string `short:"d" long:"database" description:"URL of the (magneticod) database"`
Cred string `short:"c" long:"credentials" description:"Path to the credentials file"`
NoAuth bool ` long:"no-auth" description:"Disables authorisation"`
}
if _, err := flags.Parse(&cmdFlags); err != nil {
@ -205,9 +204,9 @@ func parseFlags() error {
if cmdFlags.Database == "" {
opts.Database =
"sqlite3://" +
appdirs.UserDataDir("magneticod", "", "", false) +
"/database.sqlite3" +
"?_journal_mode=WAL" // https://github.com/mattn/go-sqlite3#connection-string
appdirs.UserDataDir("magneticod", "", "", false) +
"/database.sqlite3" +
"?_journal_mode=WAL" // https://github.com/mattn/go-sqlite3#connection-string
} else {
opts.Database = cmdFlags.Database
}
@ -249,12 +248,12 @@ func loadCred(cred string) error {
line, err := reader.ReadBytes('\n')
if err != nil {
if err == io.EOF {
break;
break
}
return fmt.Errorf("error while reading line %d: %s", lineno, err.Error())
}
line = line[:len(line) - 1] // strip '\n'
line = line[:len(line)-1] // strip '\n'
/* The following regex checks if the line satisfies the following conditions:
*
@ -292,7 +291,7 @@ func loadCred(cred string) error {
func BasicAuth(handler http.HandlerFunc, realm string) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
username, password, ok := r.BasicAuth()
if !ok { // No credentials provided
if !ok { // No credentials provided
authenticate(w, realm)
return
}
@ -300,12 +299,12 @@ func BasicAuth(handler http.HandlerFunc, realm string) http.HandlerFunc {
opts.CredentialsRWMutex.RLock()
hashedPassword, ok := opts.Credentials[username]
opts.CredentialsRWMutex.RUnlock()
if !ok { // User not found
if !ok { // User not found
authenticate(w, realm)
return
}
if err := bcrypt.CompareHashAndPassword(hashedPassword, []byte(password)); err != nil { // Wrong password
if err := bcrypt.CompareHashAndPassword(hashedPassword, []byte(password)); err != nil { // Wrong password
authenticate(w, realm)
return
}

View File

@ -16,7 +16,7 @@ type schemaStruct struct {
}
type schemaRStruct struct {
Uint64 uint64 `schema:"ruint64,required"` // https://github.com/gorilla/schema/pull/68
Uint64 uint64 `schema:"ruint64,required"` // https://github.com/gorilla/schema/pull/68
}
// TestSchemaUnsuppliedNil tests that unsupplied values yield nil.
@ -26,9 +26,15 @@ func TestSchemaUnsuppliedNil(t *testing.T) {
t.Error("decoding error", err.Error())
}
if ss.PString != nil { t.Error("PString is not nil") }
if ss.PUint64 != nil { t.Error("PUint64 is not nil") }
if ss.PBool != nil { t.Error("PBool is not nil") }
if ss.PString != nil {
t.Error("PString is not nil")
}
if ss.PUint64 != nil {
t.Error("PUint64 is not nil")
}
if ss.PBool != nil {
t.Error("PBool is not nil")
}
}
// TestSchemaInvalidUint64 tests that an invalid uint64 value yields nil.
@ -38,7 +44,9 @@ func TestSchemaInvalidUint64(t *testing.T) {
ss := new(schemaStruct)
err := decoder.Decode(ss, dict)
if err == nil { t.Error("err is nil") }
if err == nil {
t.Error("err is nil")
}
}
// TestSchemaInvalidBool tests that an invalid bool value yields nil.
@ -48,18 +56,22 @@ func TestSchemaInvalidBool(t *testing.T) {
ss := new(schemaStruct)
err := decoder.Decode(ss, dict)
if err == nil { t.Error("err is nil") }
if err == nil {
t.Error("err is nil")
}
}
// TestSchemaOverflow tests that integer values greater than the maximum value a field can store
// leads to decoding errors, rather than silent overflows.
func TestSchemaOverflow(t *testing.T) {
dict := make(map[string][]string)
dict["puint64"] = []string{"18446744073709551616"} // 18,446,744,073,709,551,615 + 1
dict["puint64"] = []string{"18446744073709551616"} // 18,446,744,073,709,551,615 + 1
ss := new(schemaStruct)
err := decoder.Decode(ss, dict)
if err == nil { t.Error("err is nil") }
if err == nil {
t.Error("err is nil")
}
}
// TestSchemaEmptyString tests that empty string yields nil.
@ -72,7 +84,9 @@ func TestSchemaEmptyString(t *testing.T) {
t.Error("decoding error", err.Error())
}
if ss.PString != nil { t.Error("PString is not nil") }
if ss.PString != nil {
t.Error("PString is not nil")
}
}
// TestSchemaDefault tests if unsupplied values defaults to "zero" and doesn't err
@ -82,14 +96,22 @@ func TestSchemaDefault(t *testing.T) {
t.Error("decoding error", err.Error())
}
if ss.String != "" { t.Error("String is not empty") }
if ss.Uint64 != 0 { t.Error("Uint64 is not 0") }
if ss.Bool != false { t.Error("Bool is not false") }
if ss.String != "" {
t.Error("String is not empty")
}
if ss.Uint64 != 0 {
t.Error("Uint64 is not 0")
}
if ss.Bool != false {
t.Error("Bool is not false")
}
}
func TestSchemaRequired(t *testing.T) {
rs := new(schemaRStruct)
err := decoder.Decode(rs, make(map[string][]string))
if err == nil { t.Error("err is nil") }
if err == nil {
t.Error("err is nil")
}
fmt.Printf(err.Error())
}

View File

@ -42,6 +42,7 @@ type Database interface {
}
type OrderingCriteria uint8
const (
ByRelevance OrderingCriteria = iota
ByTotalSize
@ -51,17 +52,19 @@ const (
ByNLeechers
ByUpdatedOn
)
// TODO: search `swtich (orderBy)` and see if all cases are covered all the time
type databaseEngine uint8
const (
Sqlite3 databaseEngine = 1
)
type Statistics struct {
NDiscovered map[string]uint64 `json:"nDiscovered"`
NFiles map[string]uint64 `json:"nFiles"`
TotalSize map[string]uint64 `json:"totalSize"`
NDiscovered map[string]uint64 `json:"nDiscovered"`
NFiles map[string]uint64 `json:"nFiles"`
TotalSize map[string]uint64 `json:"totalSize"`
// All these slices below have the exact length equal to the Period.
//NDiscovered []uint64 `json:"nDiscovered"`
@ -69,18 +72,18 @@ type Statistics struct {
}
type File struct {
Size int64 `json:"size"`
Path string `json:"path"`
Size int64 `json:"size"`
Path string `json:"path"`
}
type TorrentMetadata struct {
ID uint64 `json:"id"`
InfoHash []byte `json:"infoHash"` // marshalled differently
Name string `json:"name"`
Size uint64 `json:"size"`
DiscoveredOn int64 `json:"discoveredOn"`
NFiles uint `json:"nFiles"`
Relevance float64 `json:"relevance"`
ID uint64 `json:"id"`
InfoHash []byte `json:"infoHash"` // marshalled differently
Name string `json:"name"`
Size uint64 `json:"size"`
DiscoveredOn int64 `json:"discoveredOn"`
NFiles uint `json:"nFiles"`
Relevance float64 `json:"relevance"`
}
func (tm *TorrentMetadata) MarshalJSON() ([]byte, error) {
@ -122,7 +125,7 @@ func MakeDatabase(rawURL string, logger *zap.Logger) (Database, error) {
func NewStatistics() (s *Statistics) {
s = new(Statistics)
s.NDiscovered = make(map[string]uint64)
s.NFiles = make(map[string]uint64)
s.TotalSize = make(map[string]uint64)
s.NFiles = make(map[string]uint64)
s.TotalSize = make(map[string]uint64)
return
}

View File

@ -7,13 +7,14 @@ import (
"time"
)
var yearRE = regexp.MustCompile(`^(\d{4})$`)
var yearRE = regexp.MustCompile(`^(\d{4})$`)
var monthRE = regexp.MustCompile(`^(\d{4})-(\d{2})$`)
var weekRE = regexp.MustCompile(`^(\d{4})-W(\d{2})$`)
var dayRE = regexp.MustCompile(`^(\d{4})-(\d{2})-(\d{2})$`)
var hourRE = regexp.MustCompile(`^(\d{4})-(\d{2})-(\d{2})T(\d{2})$`)
var weekRE = regexp.MustCompile(`^(\d{4})-W(\d{2})$`)
var dayRE = regexp.MustCompile(`^(\d{4})-(\d{2})-(\d{2})$`)
var hourRE = regexp.MustCompile(`^(\d{4})-(\d{2})-(\d{2})T(\d{2})$`)
type Granularity int
const (
Year Granularity = iota
Month
@ -50,7 +51,7 @@ func ParseISO8601(s string) (*time.Time, Granularity, error) {
if err != nil {
return nil, -1, err
}
t := time.Date(year, time.January, week * 7, 23, 59, 59, 0, time.UTC)
t := time.Date(year, time.January, week*7, 23, 59, 59, 0, time.UTC)
return &t, Week, nil
}
@ -122,15 +123,15 @@ func daysOfMonth(month time.Month, year int) int {
}
func isLeap(year int) bool {
if year % 4 != 0 {
return false
} else if year % 100 != 0 {
return true
} else if year % 400 != 0 {
return false
} else {
return true
}
if year%4 != 0 {
return false
} else if year%100 != 0 {
return true
} else if year%400 != 0 {
return false
} else {
return true
}
}
func atoi(s string) int {
@ -153,7 +154,7 @@ func parseYear(s string) (int, error) {
func parseMonth(s string) (time.Month, error) {
month := atoi(s)
if month <= 0 || month >= 13 {
if month <= 0 || month >= 13 {
return time.Month(-1), fmt.Errorf("month is not in range [01, 12]")
}
return time.Month(month), nil

View File

@ -147,7 +147,7 @@ func (db *sqlite3Database) GetNumberOfTorrents() (uint, error) {
defer rows.Close()
if rows.Next() != true {
fmt.Errorf("No rows returned from `SELECT MAX(ROWID)`")
return 0, fmt.Errorf("No rows returned from `SELECT MAX(ROWID)`")
}
var n uint
@ -174,7 +174,7 @@ func (db *sqlite3Database) QueryTorrents(
return nil, fmt.Errorf("lastOrderedValue and lastID should be supplied together, if supplied")
}
doJoin := query != ""
doJoin := query != ""
firstPage := lastID == nil
// executeTemplate is used to prepare the SQL query, WITH PLACEHOLDERS FOR USER INPUT.
@ -206,14 +206,14 @@ func (db *sqlite3Database) QueryTorrents(
ORDER BY {{.OrderOn}} {{AscOrDesc .Ascending}}, id {{AscOrDesc .Ascending}}
LIMIT ?;
`, struct {
DoJoin bool
DoJoin bool
FirstPage bool
OrderOn string
OrderOn string
Ascending bool
}{
DoJoin: doJoin,
DoJoin: doJoin,
FirstPage: firstPage,
OrderOn: orderOn(orderBy),
OrderOn: orderOn(orderBy),
Ascending: ascending,
}, template.FuncMap{
"GTEorLTE": func(ascending bool) string {
@ -347,24 +347,24 @@ func (db *sqlite3Database) GetStatistics(from string, n uint) (*Statistics, erro
}
var toTime time.Time
var timef string // time format: https://www.sqlite.org/lang_datefunc.html
var timef string // time format: https://www.sqlite.org/lang_datefunc.html
switch gran {
case Year:
toTime = fromTime.AddDate(int(n), 0, 0)
timef = "%Y"
timef = "%Y"
case Month:
toTime = fromTime.AddDate(0, int(n), 0)
timef = "%Y-%m"
timef = "%Y-%m"
case Week:
toTime = fromTime.AddDate(0, 0, int(n) * 7)
timef = "%Y-%W"
toTime = fromTime.AddDate(0, 0, int(n)*7)
timef = "%Y-%W"
case Day:
toTime = fromTime.AddDate(0, 0, int(n))
timef = "%Y-%m-%d"
timef = "%Y-%m-%d"
case Hour:
toTime = fromTime.Add(time.Duration(n) * time.Hour)
timef = "%Y-%m-%dT%H"
timef = "%Y-%m-%dT%H"
}
// TODO: make it faster!
@ -378,7 +378,7 @@ func (db *sqlite3Database) GetStatistics(from string, n uint) (*Statistics, erro
AND discovered_on >= ?
AND discovered_on <= ?
GROUP BY dt;`,
timef),
timef),
fromTime.Unix(), toTime.Unix())
defer rows.Close()
if err != nil {
@ -478,7 +478,7 @@ func (db *sqlite3Database) setupDatabase() error {
}
switch userVersion {
case 0: // FROZEN.
case 0: // FROZEN.
// Upgrade from user_version 0 to 1
// Changes:
// * `info_hash_index` is recreated as UNIQUE.
@ -493,7 +493,7 @@ func (db *sqlite3Database) setupDatabase() error {
}
fallthrough
case 1: // FROZEN.
case 1: // FROZEN.
// Upgrade from user_version 1 to 2
// Changes:
// * Added `n_seeders`, `n_leechers`, and `updated_on` columns to the `torrents` table, and
@ -538,7 +538,7 @@ func (db *sqlite3Database) setupDatabase() error {
}
fallthrough
case 2: // NOT FROZEN! (subject to change or complete removal)
case 2: // NOT FROZEN! (subject to change or complete removal)
// Upgrade from user_version 2 to 3
// Changes:
// * Created `torrents_idx` FTS5 virtual table.