Skip to content
This repository has been archived by the owner on Nov 2, 2018. It is now read-only.

Commit

Permalink
move erasure code to chunks, get rid of tracking map and move availab…
Browse files Browse the repository at this point in the history
…le out of metadata.go
  • Loading branch information
ChrisSchinnerl committed Jun 26, 2018
1 parent aad1f92 commit 755650e
Show file tree
Hide file tree
Showing 13 changed files with 264 additions and 215 deletions.
30 changes: 20 additions & 10 deletions modules/renter/download.go
Expand Up @@ -377,8 +377,18 @@ func (r *Renter) managedNewDownload(params downloadParams) (*download, error) {
}

// Determine which chunks to download.
minChunk := params.offset / params.file.ChunkSize()
maxChunk := (params.offset + params.length - 1) / params.file.ChunkSize()
minChunk, minChunkOffset := params.file.ChunkIndexByOffset(params.offset)
maxChunk, maxChunkOffset := params.file.ChunkIndexByOffset(params.offset + params.length)
if minChunk == params.file.NumChunks() || maxChunk == params.file.NumChunks() {
return nil, errors.New("download is requesting a chunk that is past the boundary of the file")
}
// If the maxChunkOffset is exactly 0 we need to subtract 1 chunk. e.g. if
// the chunkSize is 100 bytes and we want to download 100 bytes from offset
// 0, maxChunk would be 1 and maxChunkOffset would be 0. We want maxChunk
// to be 0 though since we don't actually need any data from chunk 1.
if maxChunk > 0 && maxChunkOffset == 0 {
maxChunk--
}

// For each chunk, assemble a mapping from the contract id to the index of
// the piece within the chunk that the contract is responsible for.
Expand Down Expand Up @@ -413,13 +423,13 @@ func (r *Renter) managedNewDownload(params downloadParams) (*download, error) {
for i := minChunk; i <= maxChunk; i++ {
udc := &unfinishedDownloadChunk{
destination: params.destination,
erasureCode: params.file.ErasureCode(),
erasureCode: params.file.ErasureCode(i),
masterKey: params.file.MasterKey(),

staticChunkIndex: i,
staticCacheID: fmt.Sprintf("%v:%v", d.staticSiaPath, i),
staticChunkMap: chunkMaps[i-minChunk],
staticChunkSize: params.file.ChunkSize(),
staticChunkSize: params.file.ChunkSize(i),
staticPieceSize: params.file.PieceSize(),

// TODO: 25ms is just a guess for a good default. Really, we want to
Expand All @@ -435,8 +445,8 @@ func (r *Renter) managedNewDownload(params downloadParams) (*download, error) {
staticNeedsMemory: params.needsMemory,
staticPriority: params.priority,

physicalChunkData: make([][]byte, params.file.ErasureCode().NumPieces()),
pieceUsage: make([]bool, params.file.ErasureCode().NumPieces()),
physicalChunkData: make([][]byte, params.file.ErasureCode(i).NumPieces()),
pieceUsage: make([]bool, params.file.ErasureCode(i).NumPieces()),

download: d,
staticStreamCache: r.staticStreamCache,
Expand All @@ -445,16 +455,16 @@ func (r *Renter) managedNewDownload(params downloadParams) (*download, error) {
// Set the fetchOffset - the offset within the chunk that we start
// downloading from.
if i == minChunk {
udc.staticFetchOffset = params.offset % params.file.ChunkSize()
udc.staticFetchOffset = minChunkOffset
} else {
udc.staticFetchOffset = 0
}
// Set the fetchLength - the number of bytes to fetch within the chunk
// that we start downloading from.
if i == maxChunk && (params.length+params.offset)%params.file.ChunkSize() != 0 {
udc.staticFetchLength = ((params.length + params.offset) % params.file.ChunkSize()) - udc.staticFetchOffset
if i == maxChunk && maxChunkOffset != 0 {
udc.staticFetchLength = maxChunkOffset - udc.staticFetchOffset
} else {
udc.staticFetchLength = params.file.ChunkSize() - udc.staticFetchOffset
udc.staticFetchLength = params.file.ChunkSize(i) - udc.staticFetchOffset
}
// Set the writeOffset within the destination for where the data should
// be written.
Expand Down
8 changes: 6 additions & 2 deletions modules/renter/downloadstreamer.go
Expand Up @@ -64,10 +64,14 @@ func (s *streamer) Read(p []byte) (n int, err error) {
}

// Calculate how much we can download. We never download more than a single chunk.
chunkSize := s.file.ChunkSize()
chunkIndex, chunkOffset := s.file.ChunkIndexByOffset(uint64(s.offset))
if chunkIndex == s.file.NumChunks() {
return 0, io.EOF
}
chunkSize := s.file.ChunkSize(chunkIndex)
remainingData := uint64(fileSize - s.offset)
requestedData := uint64(len(p))
remainingChunk := chunkSize - uint64(s.offset)%chunkSize
remainingChunk := chunkSize - chunkOffset
length := min(remainingData, requestedData, remainingChunk)

// Download data
Expand Down
4 changes: 2 additions & 2 deletions modules/renter/files.go
Expand Up @@ -144,7 +144,7 @@ func (r *Renter) FileList() []modules.FileInfo {
tf, exists := r.persist.Tracking[siaPath]
r.mu.RUnlock(lockID)
if exists {
localPath = tf.RepairPath
localPath = tf.LocalPath()
}

fileList = append(fileList, modules.FileInfo{
Expand Down Expand Up @@ -196,7 +196,7 @@ func (r *Renter) File(siaPath string) (modules.FileInfo, error) {
var localPath string
tf, exists := r.persist.Tracking[file.SiaPath()]
if exists {
localPath = tf.RepairPath
localPath = tf.LocalPath()
}
fileInfo = modules.FileInfo{
SiaPath: file.SiaPath(),
Expand Down
31 changes: 15 additions & 16 deletions modules/renter/files_test.go
Expand Up @@ -7,7 +7,6 @@ import (

"github.com/NebulousLabs/Sia/crypto"
"github.com/NebulousLabs/Sia/modules"
"github.com/NebulousLabs/Sia/modules/renter/siafile"
"github.com/NebulousLabs/Sia/types"
"github.com/NebulousLabs/errors"
)
Expand All @@ -32,7 +31,7 @@ func TestFileNumChunks(t *testing.T) {

for _, test := range tests {
rsc, _ := NewRSCode(test.piecesPerChunk, 1) // can't use 0
f := siafile.New(t.Name(), rsc, test.pieceSize, test.size)
f := newFile(t.Name(), rsc, test.pieceSize, test.size, 0777, "")
if f.NumChunks() != test.expNumChunks {
t.Errorf("Test %v: expected %v, got %v", test, test.expNumChunks, f.NumChunks())
}
Expand All @@ -42,7 +41,7 @@ func TestFileNumChunks(t *testing.T) {
// TestFileAvailable probes the available method of the file type.
func TestFileAvailable(t *testing.T) {
rsc, _ := NewRSCode(1, 1) // can't use 0
f := siafile.New(t.Name(), rsc, pieceSize, 100)
f := newFile(t.Name(), rsc, pieceSize, 100, 0777, "")
neverOffline := make(map[string]bool)

if f.Available(neverOffline) {
Expand All @@ -69,7 +68,7 @@ func TestFileAvailable(t *testing.T) {
func TestFileUploadedBytes(t *testing.T) {
// ensure that a piece fits within a sector
rsc, _ := NewRSCode(1, 3)
f := siafile.New(t.Name(), rsc, modules.SectorSize/2, 1000)
f := newFile(t.Name(), rsc, modules.SectorSize/2, 1000, 0777, "")
for i := uint64(0); i < 4; i++ {
err := f.AddPiece(types.SiaPublicKey{}, uint64(0), i, crypto.Hash{})
if err != nil {
Expand All @@ -85,7 +84,7 @@ func TestFileUploadedBytes(t *testing.T) {
// 100%, even if more pieces have been uploaded,
func TestFileUploadProgressPinning(t *testing.T) {
rsc, _ := NewRSCode(1, 1)
f := siafile.New(t.Name(), rsc, 2, 4)
f := newFile(t.Name(), rsc, 2, 4, 0777, "")
for i := uint64(0); i < 2; i++ {
err1 := f.AddPiece(types.SiaPublicKey{Key: []byte{byte(0)}}, uint64(0), i, crypto.Hash{})
err2 := f.AddPiece(types.SiaPublicKey{Key: []byte{byte(1)}}, uint64(0), i, crypto.Hash{})
Expand All @@ -111,7 +110,7 @@ func TestFileRedundancy(t *testing.T) {

for _, nData := range nDatas {
rsc, _ := NewRSCode(nData, 10)
f := siafile.New(t.Name(), rsc, 100, 1000)
f := newFile(t.Name(), rsc, 100, 1000, 0777, "")
// Test that an empty file has 0 redundancy.
if r := f.Redundancy(neverOffline, goodForRenew); r != 0 {
t.Error("expected 0 redundancy, got", r)
Expand Down Expand Up @@ -145,33 +144,33 @@ func TestFileRedundancy(t *testing.T) {
t.Fatal(err)
}
// 1.0 / MinPieces because the chunk with the least number of pieces has 1 piece.
expectedR := 1.0 / float64(f.ErasureCode().MinPieces())
expectedR := 1.0 / float64(f.ErasureCode(0).MinPieces())
if r := f.Redundancy(neverOffline, goodForRenew); r != expectedR {
t.Errorf("expected %f redundancy, got %f", expectedR, r)
}
// Test that adding a file contract that has erasureCode.MinPieces() pieces
// per chunk for all chunks results in a file with redundancy > 1.
for iChunk := uint64(0); iChunk < f.NumChunks(); iChunk++ {
for iPiece := uint64(1); iPiece < uint64(f.ErasureCode().MinPieces()); iPiece++ {
for iPiece := uint64(1); iPiece < uint64(f.ErasureCode(0).MinPieces()); iPiece++ {
err := f.AddPiece(types.SiaPublicKey{Key: []byte{byte(3)}}, iChunk, iPiece, crypto.Hash{})
if err != nil {
t.Fatal(err)
}
}
err := f.AddPiece(types.SiaPublicKey{Key: []byte{byte(4)}}, iChunk, uint64(f.ErasureCode().MinPieces()), crypto.Hash{})
err := f.AddPiece(types.SiaPublicKey{Key: []byte{byte(4)}}, iChunk, uint64(f.ErasureCode(0).MinPieces()), crypto.Hash{})
if err != nil {
t.Fatal(err)
}
}
// 1+MinPieces / MinPieces because the chunk with the least number of pieces has 1+MinPieces pieces.
expectedR = float64(1+f.ErasureCode().MinPieces()) / float64(f.ErasureCode().MinPieces())
expectedR = float64(1+f.ErasureCode(0).MinPieces()) / float64(f.ErasureCode(0).MinPieces())
if r := f.Redundancy(neverOffline, goodForRenew); r != expectedR {
t.Errorf("expected %f redundancy, got %f", expectedR, r)
}

// verify offline file contracts are not counted in the redundancy
for iChunk := uint64(0); iChunk < f.NumChunks(); iChunk++ {
for iPiece := uint64(0); iPiece < uint64(f.ErasureCode().MinPieces()); iPiece++ {
for iPiece := uint64(0); iPiece < uint64(f.ErasureCode(0).MinPieces()); iPiece++ {
err := f.AddPiece(types.SiaPublicKey{Key: []byte{byte(5)}}, iChunk, iPiece, crypto.Hash{})
if err != nil {
t.Fatal(err)
Expand All @@ -191,7 +190,8 @@ func TestFileRedundancy(t *testing.T) {

// TestFileExpiration probes the expiration method of the file type.
func TestFileExpiration(t *testing.T) {
f := newTestingFile()
rsc, _ := NewRSCode(1, 2)
f := newFile(t.Name(), rsc, pieceSize, 1000, 0777, "")
contracts := make(map[string]modules.RenterContract)
if f.Expiration(contracts) != 0 {
t.Error("file with no pieces should report as having no time remaining")
Expand Down Expand Up @@ -245,10 +245,9 @@ func TestRenterFileListLocalPath(t *testing.T) {
defer rt.Close()
id := rt.renter.mu.Lock()
f := newTestingFile()
f.SetLocalPath("TestPath")
rt.renter.files[f.SiaPath()] = f
rt.renter.persist.Tracking[f.SiaPath()] = trackedFile{
RepairPath: "TestPath",
}
rt.renter.persist.Tracking[f.SiaPath()] = f
rt.renter.mu.Unlock(id)
files := rt.renter.FileList()
if len(files) != 1 {
Expand Down Expand Up @@ -414,7 +413,7 @@ func TestRenterRenameFile(t *testing.T) {
}

// Renaming should also update the tracking set
rt.renter.persist.Tracking["1"] = trackedFile{"foo"}
rt.renter.persist.Tracking["1"] = f2
err = rt.renter.RenameFile("1", "1b")
if err != nil {
t.Fatal(err)
Expand Down
6 changes: 3 additions & 3 deletions modules/renter/persist.go
Expand Up @@ -55,7 +55,7 @@ type (
MaxDownloadSpeed int64
MaxUploadSpeed int64
StreamCacheSize uint64
Tracking map[string]trackedFile
Tracking map[string]*siafile.SiaFile
}
)

Expand Down Expand Up @@ -247,7 +247,7 @@ func (r *Renter) loadSiaFiles() error {
// load fetches the saved renter data from disk.
func (r *Renter) loadSettings() error {
r.persist = persistence{
Tracking: make(map[string]trackedFile),
Tracking: make(map[string]*siafile.SiaFile),
}
err := persist.LoadJSON(settingsMetadata, &r.persist, filepath.Join(r.persistDir, PersistFilename))
if os.IsNotExist(err) {
Expand Down Expand Up @@ -489,7 +489,7 @@ func convertPersistVersionFrom040To133(path string) error {
Version: persistVersion040,
}
p := persistence{
Tracking: make(map[string]trackedFile),
Tracking: make(map[string]*siafile.SiaFile),
}

err := persist.LoadJSON(metadata, &p, path)
Expand Down
2 changes: 1 addition & 1 deletion modules/renter/persist_test.go
Expand Up @@ -23,7 +23,7 @@ func newTestingFile() *siafile.SiaFile {

name := "testfile-" + strconv.Itoa(int(data[0]))

return siafile.New(name, rsc, pieceSize, 1000)
return newFile(name, rsc, pieceSize, 1000, 0777, "")
}

// equalFiles is a helper function that compares two files for equality.
Expand Down
9 changes: 0 additions & 9 deletions modules/renter/renter.go
Expand Up @@ -152,15 +152,6 @@ type hostContractor interface {
SetRateLimits(int64, int64, uint64)
}

// A trackedFile contains metadata about files being tracked by the Renter.
// Tracked files are actively repaired by the Renter. By default, files
// uploaded by the user are tracked, and files that are added (via loading a
// .sia file) are not.
type trackedFile struct {
// location of original file on disk
RepairPath string
}

// A Renter is responsible for tracking all of the files that a user has
// uploaded to Sia, as well as the locations and health of these files.
//
Expand Down
21 changes: 10 additions & 11 deletions modules/renter/siafile/compat.go
Expand Up @@ -40,18 +40,17 @@ func NewFromFileData(fd FileData) *SiaFile {
pieceSize: fd.PieceSize,
siaPath: fd.Name,
},
deleted: fd.Deleted,
erasureCode: fd.ErasureCode,
uid: fd.UID,
deleted: fd.Deleted,
uid: fd.UID,
}
chunks := make([]Chunk, file.NumChunks())
for i := range chunks {
chunks[i].erasureCodeType = [4]byte{0, 0, 0, 1}
binary.LittleEndian.PutUint32(chunks[i].erasureCodeParams[0:4], uint32(file.erasureCode.MinPieces()))
binary.LittleEndian.PutUint32(chunks[i].erasureCodeParams[4:8], uint32(file.erasureCode.NumPieces()-file.erasureCode.MinPieces()))
chunks[i].pieces = make([][]Piece, file.erasureCode.NumPieces())
file.chunks = make([]Chunk, len(fd.Chunks))
for i := range file.chunks {
file.chunks[i].erasureCode = fd.ErasureCode
file.chunks[i].erasureCodeType = [4]byte{0, 0, 0, 1}
binary.LittleEndian.PutUint32(file.chunks[i].erasureCodeParams[0:4], uint32(file.chunks[i].erasureCode.MinPieces()))
binary.LittleEndian.PutUint32(file.chunks[i].erasureCodeParams[4:8], uint32(file.chunks[i].erasureCode.NumPieces()-file.chunks[i].erasureCode.MinPieces()))
file.chunks[i].pieces = make([][]Piece, file.chunks[i].erasureCode.NumPieces())
}
file.chunks = chunks

// Populate the pubKeyTable of the file and add the pieces.
pubKeyMap := make(map[string]int)
Expand Down Expand Up @@ -83,7 +82,7 @@ func (sf *SiaFile) ExportFileData() FileData {
Name: sf.metadata.siaPath,
FileSize: uint64(sf.metadata.fileSize),
MasterKey: sf.metadata.masterKey,
ErasureCode: sf.erasureCode,
ErasureCode: sf.chunks[0].erasureCode,
PieceSize: sf.metadata.pieceSize,
Mode: sf.metadata.mode,
Deleted: sf.deleted,
Expand Down

0 comments on commit 755650e

Please sign in to comment.