Skip to content
This repository has been archived by the owner on Nov 2, 2018. It is now read-only.

Use BlockHeight to calculate uptime/downtime instead of timestamps. #2908

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
5 changes: 3 additions & 2 deletions modules/renter.go
Expand Up @@ -119,8 +119,9 @@ type HostDBEntry struct {

// HostDBScan represents a single scan event.
type HostDBScan struct {
Timestamp time.Time `json:"timestamp"`
Success bool `json:"success"`
Timestamp time.Time `json:"timestamp"`
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@DavidVorick is there still a need to keep the Timestamp?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm going to go ahead and say yes, we should keep the timestamp in the scan, just in case we find some use for it later. We may (or may not) phase it out eventually, but I can think of a few possible reasons we may want it at some point.

BlockHeight types.BlockHeight `json:"blockHeight"`
Success bool `json:"success"`
}

// HostScoreBreakdown provides a piece-by-piece explanation of why a host has
Expand Down
3 changes: 3 additions & 0 deletions modules/renter/hostdb/consts.go
Expand Up @@ -34,6 +34,9 @@ const (
// scans start getting compressed.
minScans = 12

// uptimeHalflife is the halftime used to decay the host up and downtime
uptimeHalflife = 30 * 24 * time.Hour

// recentInteractionWeightLimit caps the number of recent interactions as a
// percentage of the historic interactions, to be certain that a large
// amount of activity in a short period of time does not overwhelm the
Expand Down
24 changes: 13 additions & 11 deletions modules/renter/hostdb/hostweight.go
@@ -1,12 +1,12 @@
package hostdb

import (
"math"
"math/big"

"github.com/NebulousLabs/Sia/build"
"github.com/NebulousLabs/Sia/modules"
"github.com/NebulousLabs/Sia/types"
"math"
"math/big"
"time"
)

var (
Expand Down Expand Up @@ -310,10 +310,9 @@ func (hdb *HostDB) uptimeAdjustments(entry modules.HostDBEntry) float64 {
// host.
downtime := entry.HistoricDowntime
uptime := entry.HistoricUptime
recentTime := entry.ScanHistory[0].Timestamp
recentSuccess := entry.ScanHistory[0].Success
recentScan := entry.ScanHistory[0]
for _, scan := range entry.ScanHistory[1:] {
if recentTime.After(scan.Timestamp) {
if recentScan.Timestamp.After(scan.Timestamp) {
if build.DEBUG {
hdb.log.Critical("Host entry scan history not sorted.")
} else {
Expand All @@ -322,13 +321,16 @@ func (hdb *HostDB) uptimeAdjustments(entry modules.HostDBEntry) float64 {
// Ignore the unsorted scan entry.
continue
}
if recentSuccess {
uptime += scan.Timestamp.Sub(recentTime)

blocksPassed := scan.BlockHeight - recentScan.BlockHeight
timePassed := time.Duration(blocksPassed) * 10 * time.Minute

if recentScan.Success {
uptime += timePassed
} else {
downtime += scan.Timestamp.Sub(recentTime)
downtime += timePassed
}
recentTime = scan.Timestamp
recentSuccess = scan.Success
recentScan = scan
}
// Sanity check against 0 total time.
if uptime == 0 && downtime == 0 {
Expand Down
29 changes: 21 additions & 8 deletions modules/renter/hostdb/scan.go
Expand Up @@ -13,8 +13,25 @@ import (
"github.com/NebulousLabs/Sia/encoding"
"github.com/NebulousLabs/Sia/modules"
"github.com/NebulousLabs/fastrand"
"math"
)

// updateUptime decays a host's historic uptime or historic downtime.
// It also adds the new block height to the historic uptime or historic downtime.
func updateUptime(entry *modules.HostDBEntry, scan modules.HostDBScan, recentScan modules.HostDBScan) {
blocksPassed := scan.BlockHeight - recentScan.BlockHeight
timePassed := time.Duration(blocksPassed) * 10 * time.Minute
decay := time.Duration(math.Pow(0.5, float64(timePassed)/float64(uptimeHalflife)))
entry.HistoricUptime *= decay
entry.HistoricDowntime *= decay

if recentScan.Success {
entry.HistoricUptime += timePassed * decay
} else {
entry.HistoricDowntime += timePassed * decay
}
}

// queueScan will add a host to the queue to be scanned.
func (hdb *HostDB) queueScan(entry modules.HostDBEntry) {
// If this entry is already in the scan pool, can return immediately.
Expand Down Expand Up @@ -177,7 +194,7 @@ func (hdb *HostDB) updateEntry(entry modules.HostDBEntry, netErr error) {
// Before appending, make sure that the scan we just performed is
// timestamped after the previous scan performed. It may not be if the
// system clock has changed.
newEntry.ScanHistory = append(newEntry.ScanHistory, modules.HostDBScan{Timestamp: newTimestamp, Success: netErr == nil})
newEntry.ScanHistory = append(newEntry.ScanHistory, modules.HostDBScan{Timestamp: newTimestamp, BlockHeight: hdb.blockHeight, Success: netErr == nil})
}

// Check whether any of the recent scans demonstrate uptime. The pruning and
Expand All @@ -193,7 +210,8 @@ func (hdb *HostDB) updateEntry(entry modules.HostDBEntry, netErr error) {
// If the host has been offline for too long, delete the host from the
// hostdb. Only delete if there have been enough scans over a long enough
// period to be confident that the host really is offline for good.
if time.Now().Sub(newEntry.ScanHistory[0].Timestamp) > maxHostDowntime && !recentUptime && len(newEntry.ScanHistory) >= minScans {

if newEntry.HistoricUptime+newEntry.HistoricDowntime > maxHostDowntime && !recentUptime && len(newEntry.ScanHistory) >= minScans {
err := hdb.hostTree.Remove(newEntry.PublicKey)
if err != nil {
hdb.log.Println("ERROR: unable to remove host newEntry which has had a ton of downtime:", err)
Expand All @@ -206,12 +224,7 @@ func (hdb *HostDB) updateEntry(entry modules.HostDBEntry, netErr error) {

// Compress any old scans into the historic values.
for len(newEntry.ScanHistory) > minScans && time.Now().Sub(newEntry.ScanHistory[0].Timestamp) > maxHostDowntime {
timePassed := newEntry.ScanHistory[1].Timestamp.Sub(newEntry.ScanHistory[0].Timestamp)
if newEntry.ScanHistory[0].Success {
newEntry.HistoricUptime += timePassed
} else {
newEntry.HistoricDowntime += timePassed
}
updateUptime(&newEntry, newEntry.ScanHistory[1], newEntry.ScanHistory[0])
newEntry.ScanHistory = newEntry.ScanHistory[1:]
}

Expand Down
4 changes: 2 additions & 2 deletions modules/renter/hostdb/scan_test.go
Expand Up @@ -136,7 +136,7 @@ func TestUpdateEntry(t *testing.T) {
if !exists {
t.Fatal("Entry did not get inserted into the host tree")
}
updatedEntry.ScanHistory = append([]modules.HostDBScan{{Timestamp: time.Now().Add(maxHostDowntime * -1).Add(time.Hour * -1)}}, updatedEntry.ScanHistory...)
updatedEntry.ScanHistory = append([]modules.HostDBScan{{Timestamp: time.Now().Add(maxHostDowntime * -1).Add(time.Hour * -1), BlockHeight: hdbt.hdb.blockHeight}}, updatedEntry.ScanHistory...)
err = hdbt.hdb.hostTree.Modify(updatedEntry)
if err != nil {
t.Fatal(err)
Expand Down Expand Up @@ -164,7 +164,7 @@ func TestUpdateEntry(t *testing.T) {
if !exists {
t.Fatal("Entry did not get inserted into the host tree")
}
updatedEntry.ScanHistory = append([]modules.HostDBScan{{Success: true, Timestamp: time.Now().Add(time.Hour * 24 * 11 * -1)}}, updatedEntry.ScanHistory...)
updatedEntry.ScanHistory = append([]modules.HostDBScan{{Success: true, BlockHeight: hdbt.hdb.blockHeight, Timestamp: time.Now().Add(time.Hour * 24 * 11 * -1)}}, updatedEntry.ScanHistory...)
err = hdbt.hdb.hostTree.Modify(updatedEntry)
if err != nil {
t.Fatal(err)
Expand Down