github.com/avahowell/sia@v0.5.1-beta.0.20160524050156-83dcc3d37c94/modules/renter/repair_test.go (about)

     1  package renter
     2  
     3  import (
     4  	"bytes"
     5  	"crypto/rand"
     6  	"errors"
     7  	"reflect"
     8  	"strconv"
     9  	"sync"
    10  	"testing"
    11  	"time"
    12  
    13  	"github.com/NebulousLabs/Sia/crypto"
    14  	"github.com/NebulousLabs/Sia/modules"
    15  	"github.com/NebulousLabs/Sia/modules/renter/contractor"
    16  	"github.com/NebulousLabs/Sia/types"
    17  )
    18  
    19  // a testHost simulates a host. It implements the contractor.Editor interface.
    20  type testHost struct {
    21  	ip      modules.NetAddress
    22  	sectors map[crypto.Hash][]byte
    23  
    24  	// used to simulate real-world conditions
    25  	delay    time.Duration // transfers will take this long
    26  	failRate int           // transfers will randomly fail with probability 1/failRate
    27  
    28  	sync.Mutex
    29  }
    30  
    31  // stub implementations of the contractor.Editor methods
    32  func (h *testHost) Address() modules.NetAddress                           { return h.ip }
    33  func (h *testHost) Delete(crypto.Hash) error                              { return nil }
    34  func (h *testHost) Modify(crypto.Hash, crypto.Hash, uint64, []byte) error { return nil }
    35  func (h *testHost) EndHeight() types.BlockHeight                          { return 0 }
    36  func (h *testHost) Close() error                                          { return nil }
    37  
    38  // ContractID returns a fake (but unique) file contract ID.
    39  func (h *testHost) ContractID() types.FileContractID {
    40  	var fcid types.FileContractID
    41  	copy(fcid[:], h.ip)
    42  	return fcid
    43  }
    44  
    45  // Upload adds a piece to the testHost. It randomly fails according to the
    46  // testHost's parameters.
    47  func (h *testHost) Upload(data []byte) (crypto.Hash, error) {
    48  	// simulate I/O delay
    49  	time.Sleep(h.delay)
    50  
    51  	h.Lock()
    52  	defer h.Unlock()
    53  
    54  	// randomly fail
    55  	if n, _ := crypto.RandIntn(h.failRate); n == 0 {
    56  		return crypto.Hash{}, errors.New("no data")
    57  	}
    58  
    59  	root := crypto.MerkleRoot(data)
    60  	h.sectors[root] = data
    61  	return root, nil
    62  }
    63  
    64  // TestRepair tests the repair method of the file type.
    65  func TestRepair(t *testing.T) {
    66  	if testing.Short() {
    67  		t.SkipNow()
    68  	}
    69  
    70  	// generate data
    71  	const dataSize = 777
    72  	data := make([]byte, dataSize)
    73  	rand.Read(data)
    74  
    75  	// create Reed-Solomon encoder
    76  	rsc, err := NewRSCode(2, 10)
    77  	if err != nil {
    78  		t.Fatal(err)
    79  	}
    80  
    81  	// create hosts
    82  	const pieceSize = 10
    83  	hosts := make([]contractor.Editor, rsc.NumPieces())
    84  	for i := range hosts {
    85  		hosts[i] = &testHost{
    86  			sectors:  make(map[crypto.Hash][]byte),
    87  			ip:       modules.NetAddress(strconv.Itoa(i)),
    88  			delay:    time.Duration(i) * time.Millisecond,
    89  			failRate: 5, // 20% failure rate
    90  		}
    91  	}
    92  	// make one host really slow
    93  	hosts[0].(*testHost).delay = 100 * time.Millisecond
    94  	// make one host always fail
    95  	hosts[1].(*testHost).failRate = 1
    96  
    97  	// upload data to hosts
    98  	f := newFile("foo", rsc, pieceSize, dataSize)
    99  	r := bytes.NewReader(data)
   100  	for chunk, pieces := range f.incompleteChunks() {
   101  		err = f.repair(chunk, pieces, r, hosts)
   102  		// hostErrs are non-fatal
   103  		if _, ok := err.(hostErrs); ok {
   104  			continue
   105  		} else if err != nil {
   106  			t.Fatal(err)
   107  		}
   108  	}
   109  
   110  	// download data
   111  	chunks := make([][][]byte, f.numChunks())
   112  	for i := uint64(0); i < f.numChunks(); i++ {
   113  		chunks[i] = make([][]byte, rsc.NumPieces())
   114  	}
   115  	for _, h := range hosts {
   116  		contract, exists := f.contracts[h.ContractID()]
   117  		if !exists {
   118  			continue
   119  		}
   120  		for _, p := range contract.Pieces {
   121  			encPiece := h.(*testHost).sectors[p.MerkleRoot]
   122  			piece, err := deriveKey(f.masterKey, p.Chunk, p.Piece).DecryptBytes(encPiece)
   123  			if err != nil {
   124  				t.Fatal(err)
   125  			}
   126  			chunks[p.Chunk][p.Piece] = piece
   127  		}
   128  	}
   129  	buf := new(bytes.Buffer)
   130  	for _, chunk := range chunks {
   131  		err = rsc.Recover(chunk, f.chunkSize(), buf)
   132  		if err != nil {
   133  			t.Fatal(err)
   134  		}
   135  	}
   136  	buf.Truncate(dataSize)
   137  
   138  	if !bytes.Equal(buf.Bytes(), data) {
   139  		t.Fatal("recovered data does not match original")
   140  	}
   141  
   142  	/*
   143  		// These metrics can be used to assess the efficiency of the repair
   144  		// algorithm.
   145  
   146  		for i, h := range hosts {
   147  			host := h.(*testHost)
   148  			pieces := 0
   149  			for _, p := range host.pieceMap {
   150  				pieces += len(p)
   151  			}
   152  			t.Logf("Host #: %d\tDelay: %v\t# Pieces: %v\t# Chunks: %d", i, host.delay, pieces, len(host.pieceMap))
   153  		}
   154  	*/
   155  }
   156  
   157  // offlineHostDB is a mocked hostDB, used for testing the offlineChunks method
   158  // of the file type. It is implemented as a map from NetAddresses to booleans,
   159  // where the bool indicates whether the host is active.
   160  type offlineHostDB struct {
   161  	stubHostDB
   162  	hosts map[modules.NetAddress]bool
   163  }
   164  
   165  // IsOffline is a stub implementation of the IsOffline method.
   166  func (hdb *offlineHostDB) IsOffline(addr modules.NetAddress) bool {
   167  	return !hdb.hosts[addr]
   168  }
   169  
   170  // TestOfflineChunks tests the offlineChunks method of the file type.
   171  func TestOfflineChunks(t *testing.T) {
   172  	// Create a mock hostdb.
   173  	hdb := &offlineHostDB{
   174  		hosts: map[modules.NetAddress]bool{
   175  			"foo": false,
   176  			"bar": false,
   177  			"baz": true,
   178  		},
   179  	}
   180  	rsc, _ := NewRSCode(1, 1)
   181  	f := &file{
   182  		erasureCode: rsc,
   183  		contracts: map[types.FileContractID]fileContract{
   184  			{0}: {IP: "foo", Pieces: []pieceData{{0, 0, crypto.Hash{}}, {1, 0, crypto.Hash{}}}},
   185  			{1}: {IP: "bar", Pieces: []pieceData{{0, 1, crypto.Hash{}}}},
   186  			{2}: {IP: "baz", Pieces: []pieceData{{1, 1, crypto.Hash{}}}},
   187  		},
   188  	}
   189  
   190  	// pieces 0.0, 0.1, and 1.0 are offline. Since redundancy is 1,
   191  	// offlineChunks should report only chunk 0 as needing repair.
   192  	expChunks := map[uint64][]uint64{
   193  		0: {0, 1},
   194  	}
   195  	chunks := f.offlineChunks(hdb)
   196  	if !reflect.DeepEqual(chunks, expChunks) {
   197  		// pieces may have been in a different order
   198  		if !reflect.DeepEqual(chunks, map[uint64][]uint64{0: {1, 0}}) {
   199  			t.Fatalf("offlineChunks did not return correct chunks: expected %v, got %v", expChunks, chunks)
   200  		}
   201  	}
   202  }