gitlab.com/SkynetLabs/skyd@v1.6.9/skymodules/renter/workerupload_test.go (about)

     1  package renter
     2  
     3  import (
     4  	"context"
     5  	"math"
     6  	"testing"
     7  	"time"
     8  
     9  	"gitlab.com/SkynetLabs/skyd/siatest/dependencies"
    10  	"go.sia.tech/siad/modules"
    11  )
    12  
    13  // testProcessUploadChunkBasic tests processing a valid, needed chunk.
    14  func testProcessUploadChunkBasic(t *testing.T, chunk func(wt *workerTester) *unfinishedUploadChunk) {
    15  	t.Parallel()
    16  
    17  	// create worker.
    18  	wt, err := newWorkerTesterCustomDependency(t.Name(), &dependencies.DependencyDisableWorker{}, modules.ProdDependencies)
    19  	if err != nil {
    20  		t.Fatal(err)
    21  	}
    22  	defer func() {
    23  		if err := wt.Close(); err != nil {
    24  			t.Fatal(err)
    25  		}
    26  	}()
    27  
    28  	uuc := chunk(wt)
    29  	wt.mu.Lock()
    30  	wt.unprocessedChunks.PushBack(uuc)
    31  	wt.unprocessedChunks.PushBack(uuc)
    32  	wt.unprocessedChunks.PushBack(uuc)
    33  	wt.mu.Unlock()
    34  	uuc.mu.Lock()
    35  	uuc.pieceUsage[0] = true // mark first piece as used
    36  	uuc.mu.Unlock()
    37  	_ = wt.staticRenter.staticRepairMemoryManager.Request(context.Background(), modules.SectorSize*uint64(uuc.staticPiecesNeeded-1), true)
    38  	nc, pieceIndex := wt.managedProcessUploadChunk(uuc)
    39  	if nc == nil {
    40  		t.Error("next chunk shouldn't be nil")
    41  	}
    42  	uuc.mu.Lock()
    43  	if pieceIndex != 1 {
    44  		t.Error("expected pieceIndex to be 1 since piece 0 is marked as used", pieceIndex)
    45  	}
    46  	if uuc.piecesRegistered != 1 {
    47  		t.Errorf("piecesRegistered %v != %v", uuc.piecesRegistered, 1)
    48  	}
    49  	if uuc.workersRemaining != 0 {
    50  		t.Errorf("workersRemaining %v != %v", uuc.workersRemaining, 0)
    51  	}
    52  	if len(uuc.unusedHosts) != 0 {
    53  		t.Errorf("unusedHosts %v != %v", len(uuc.unusedHosts), 0)
    54  	}
    55  	if !uuc.pieceUsage[1] {
    56  		t.Errorf("expected pieceUsage[1] to be true")
    57  	}
    58  	if len(uuc.workersStandby) != 0 {
    59  		t.Errorf("expected %v standby workers got %v", 0, len(uuc.workersStandby))
    60  	}
    61  	uuc.mu.Unlock()
    62  	wt.mu.Lock()
    63  	if wt.unprocessedChunks.Len() != 3 {
    64  		t.Errorf("unprocessedChunks %v != %v", wt.unprocessedChunks.Len(), 3)
    65  	}
    66  	wt.mu.Unlock()
    67  }
    68  
    69  // testProcessUploadChunkNoHelpNeeded tests processing a chunk that the worker
    70  // could help with but no help is needed at the moment.
    71  func testProcessUploadChunkNoHelpNeeded(t *testing.T, chunk func(wt *workerTester) *unfinishedUploadChunk) {
    72  	t.Parallel()
    73  
    74  	// create worker.
    75  	wt, err := newWorkerTesterCustomDependency(t.Name(), &dependencies.DependencyDisableWorker{}, modules.ProdDependencies)
    76  	if err != nil {
    77  		t.Fatal(err)
    78  	}
    79  	defer func() {
    80  		if err := wt.Close(); err != nil {
    81  			t.Fatal(err)
    82  		}
    83  	}()
    84  
    85  	uuc := chunk(wt)
    86  	pieces := uuc.staticPiecesNeeded
    87  	wt.mu.Lock()
    88  	wt.unprocessedChunks = newUploadChunks()
    89  	wt.mu.Unlock()
    90  	uuc.mu.Lock()
    91  	uuc.piecesRegistered = uuc.staticPiecesNeeded
    92  	uuc.mu.Unlock()
    93  	_ = uuc.staticMemoryManager.Request(context.Background(), modules.SectorSize*uint64(pieces), true)
    94  	nc, pieceIndex := wt.managedProcessUploadChunk(uuc)
    95  	if nc != nil {
    96  		t.Error("next chunk should be nil")
    97  	}
    98  	uuc.mu.Lock()
    99  	if pieceIndex != 0 {
   100  		t.Error("expected pieceIndex to be 0", pieceIndex)
   101  	}
   102  	if uuc.piecesRegistered != uuc.staticPiecesNeeded {
   103  		t.Errorf("piecesRegistered %v != %v", uuc.piecesRegistered, 1)
   104  	}
   105  	if uuc.workersRemaining != 1 {
   106  		t.Errorf("workersRemaining %v != %v", uuc.workersRemaining, 1)
   107  	}
   108  	if len(uuc.unusedHosts) != 1 {
   109  		t.Errorf("unusedHosts %v != %v", len(uuc.unusedHosts), 1)
   110  	}
   111  	for i, pu := range uuc.pieceUsage {
   112  		// Only index 0 is false.
   113  		if b := i != 0; b != pu {
   114  			t.Errorf("%v: expected %v but was %v", i, b, pu)
   115  		}
   116  	}
   117  	// Standby workers are woken.
   118  	if len(uuc.workersStandby) != 0 {
   119  		t.Errorf("expected %v standby workers got %v", 0, len(uuc.workersStandby))
   120  	}
   121  	uuc.mu.Unlock()
   122  	wt.mu.Lock()
   123  	if wt.unprocessedChunks.Len() != 1 {
   124  		t.Errorf("unprocessedChunks %v != %v", wt.unprocessedChunks.Len(), 0)
   125  	}
   126  	wt.mu.Unlock()
   127  }
   128  
   129  // testProcessUploadChunkNotACandiate tests processing a chunk that the worker
   130  // is not a valid candidate for.
   131  func testProcessUploadChunkNotACandidate(t *testing.T, chunk func(wt *workerTester) *unfinishedUploadChunk) {
   132  	t.Parallel()
   133  
   134  	// create worker.
   135  	wt, err := newWorkerTesterCustomDependency(t.Name(), &dependencies.DependencyDisableWorker{}, modules.ProdDependencies)
   136  	if err != nil {
   137  		t.Fatal(err)
   138  	}
   139  	defer func() {
   140  		if err := wt.Close(); err != nil {
   141  			t.Fatal(err)
   142  		}
   143  	}()
   144  
   145  	uuc := chunk(wt)
   146  	pieces := uuc.staticPiecesNeeded
   147  	wt.mu.Lock()
   148  	wt.unprocessedChunks.PushBack(uuc)
   149  	wt.unprocessedChunks.PushBack(uuc)
   150  	wt.unprocessedChunks.PushBack(uuc)
   151  	wt.mu.Unlock()
   152  	uuc.mu.Lock()
   153  	uuc.unusedHosts = make(map[string]struct{})
   154  	uuc.mu.Unlock()
   155  	_ = uuc.staticMemoryManager.Request(context.Background(), modules.SectorSize*uint64(pieces), true)
   156  	nc, pieceIndex := wt.managedProcessUploadChunk(uuc)
   157  	if nc != nil {
   158  		t.Error("next chunk should be nil")
   159  	}
   160  	uuc.mu.Lock()
   161  	if pieceIndex != 0 {
   162  		t.Error("expected pieceIndex to be 0", pieceIndex)
   163  	}
   164  	if uuc.piecesRegistered != 0 {
   165  		t.Errorf("piecesRegistered %v != %v", uuc.piecesRegistered, 0)
   166  	}
   167  	if uuc.workersRemaining != 0 {
   168  		t.Errorf("workersRemaining %v != %v", uuc.workersRemaining, 0)
   169  	}
   170  	if len(uuc.unusedHosts) != 0 {
   171  		t.Errorf("unusedHosts %v != %v", len(uuc.unusedHosts), 0)
   172  	}
   173  	for _, pu := range uuc.pieceUsage {
   174  		// managedCleanUpUploadChunk sets all elements to true
   175  		if !pu {
   176  			t.Errorf("expected pu to be true")
   177  		}
   178  	}
   179  	// Standby workers are woken.
   180  	if len(uuc.workersStandby) != 0 {
   181  		t.Errorf("expected %v standby workers got %v", 0, len(uuc.workersStandby))
   182  	}
   183  	uuc.mu.Unlock()
   184  	wt.mu.Lock()
   185  	if wt.unprocessedChunks.Len() != 3 {
   186  		t.Errorf("unprocessedChunks %v != %v", wt.unprocessedChunks.Len(), 3)
   187  	}
   188  	wt.mu.Unlock()
   189  }
   190  
   191  // testProcessUploadChunkNotACandiate tests processing a chunk that was already
   192  // completed.
   193  func testProcessUploadChunkCompleted(t *testing.T, chunk func(wt *workerTester) *unfinishedUploadChunk) {
   194  	t.Parallel()
   195  
   196  	// create worker.
   197  	wt, err := newWorkerTesterCustomDependency(t.Name(), &dependencies.DependencyDisableWorker{}, modules.ProdDependencies)
   198  	if err != nil {
   199  		t.Fatal(err)
   200  	}
   201  	defer func() {
   202  		if err := wt.Close(); err != nil {
   203  			t.Fatal(err)
   204  		}
   205  	}()
   206  
   207  	uuc := chunk(wt)
   208  	pieces := uuc.staticPiecesNeeded
   209  	wt.mu.Lock()
   210  	wt.unprocessedChunks.PushBack(uuc)
   211  	wt.unprocessedChunks.PushBack(uuc)
   212  	wt.unprocessedChunks.PushBack(uuc)
   213  	wt.mu.Unlock()
   214  	uuc.mu.Lock()
   215  	uuc.piecesCompleted = uuc.staticPiecesNeeded
   216  	uuc.mu.Unlock()
   217  	_ = uuc.staticMemoryManager.Request(context.Background(), modules.SectorSize*uint64(pieces), true)
   218  	nc, pieceIndex := wt.managedProcessUploadChunk(uuc)
   219  	if nc != nil {
   220  		t.Error("next chunk should be nil")
   221  	}
   222  	uuc.mu.Lock()
   223  	if pieceIndex != 0 {
   224  		t.Error("expected pieceIndex to be 0", pieceIndex)
   225  	}
   226  	if uuc.piecesRegistered != 0 {
   227  		t.Errorf("piecesRegistered %v != %v", uuc.piecesRegistered, 0)
   228  	}
   229  	if uuc.workersRemaining != 0 {
   230  		t.Errorf("workersRemaining %v != %v", uuc.workersRemaining, 0)
   231  	}
   232  	if len(uuc.unusedHosts) != 1 {
   233  		t.Errorf("unusedHosts %v != %v", len(uuc.unusedHosts), 1)
   234  	}
   235  	for _, pu := range uuc.pieceUsage {
   236  		// managedCleanUpUploadChunk sets all elements to true
   237  		if !pu {
   238  			t.Errorf("expected pu to be true")
   239  		}
   240  	}
   241  	// Standby workers are woken.
   242  	if len(uuc.workersStandby) != 0 {
   243  		t.Errorf("expected %v standby workers got %v", 0, len(uuc.workersStandby))
   244  	}
   245  	uuc.mu.Unlock()
   246  	wt.mu.Lock()
   247  	if wt.unprocessedChunks.Len() != 3 {
   248  		t.Errorf("unprocessedChunks %v != %v", wt.unprocessedChunks.Len(), 3)
   249  	}
   250  	wt.mu.Unlock()
   251  }
   252  
   253  // testProcessUploadChunkNotACandiateOnCooldown tests processing a chunk that
   254  // the worker is not a candidate for and also the worker is currently on a
   255  // cooldown.
   256  func testProcessUploadChunk_NotACandidateCooldown(t *testing.T, chunk func(wt *workerTester) *unfinishedUploadChunk) {
   257  	t.Parallel()
   258  
   259  	// create worker.
   260  	wt, err := newWorkerTesterCustomDependency(t.Name(), &dependencies.DependencyDisableWorker{}, modules.ProdDependencies)
   261  	if err != nil {
   262  		t.Fatal(err)
   263  	}
   264  	defer func() {
   265  		if err := wt.Close(); err != nil {
   266  			t.Fatal(err)
   267  		}
   268  	}()
   269  
   270  	uuc := chunk(wt)
   271  	pieces := uuc.staticPiecesNeeded
   272  	wt.mu.Lock()
   273  	wt.uploadRecentFailure = time.Now()
   274  	wt.uploadConsecutiveFailures = math.MaxInt32
   275  	wt.unprocessedChunks.PushBack(uuc)
   276  	wt.unprocessedChunks.PushBack(uuc)
   277  	wt.unprocessedChunks.PushBack(uuc)
   278  	wt.mu.Unlock()
   279  	uuc.mu.Lock()
   280  	uuc.unusedHosts = make(map[string]struct{})
   281  	uuc.mu.Unlock()
   282  	_ = uuc.staticMemoryManager.Request(context.Background(), modules.SectorSize*uint64(pieces), true)
   283  	nc, pieceIndex := wt.managedProcessUploadChunk(uuc)
   284  	if nc != nil {
   285  		t.Error("next chunk should be nil")
   286  	}
   287  	uuc.mu.Lock()
   288  	if pieceIndex != 0 {
   289  		t.Error("expected pieceIndex to be 0", pieceIndex)
   290  	}
   291  	if uuc.piecesRegistered != 0 {
   292  		t.Errorf("piecesRegistered %v != %v", uuc.piecesRegistered, 0)
   293  	}
   294  	if uuc.workersRemaining != -3 {
   295  		t.Errorf("workersRemaining %v != %v", uuc.workersRemaining, 0)
   296  	}
   297  	if len(uuc.unusedHosts) != 0 {
   298  		t.Errorf("unusedHosts %v != %v", len(uuc.unusedHosts), 0)
   299  	}
   300  	for _, pu := range uuc.pieceUsage {
   301  		// managedCleanUpUploadChunk sets all elements to true
   302  		if !pu {
   303  			t.Errorf("expected pu to be true")
   304  		}
   305  	}
   306  	// Standby workers are woken.
   307  	if len(uuc.workersStandby) != 0 {
   308  		t.Errorf("expected %v standby workers got %v", 0, len(uuc.workersStandby))
   309  	}
   310  	uuc.mu.Unlock()
   311  	wt.mu.Lock()
   312  	if wt.unprocessedChunks.Len() != 0 {
   313  		t.Errorf("unprocessedChunks %v != %v", wt.unprocessedChunks.Len(), 0)
   314  	}
   315  	wt.mu.Unlock()
   316  }
   317  
   318  // testProcessUploadChunkCompletedCooldown tests processing a chunk that was already completed
   319  // worker is not a candidate for and also the worker is currently on a cooldown.
   320  func testProcessUploadChunkCompletedCooldown(t *testing.T, chunk func(wt *workerTester) *unfinishedUploadChunk) {
   321  	t.Parallel()
   322  
   323  	// create worker.
   324  	wt, err := newWorkerTesterCustomDependency(t.Name(), &dependencies.DependencyDisableWorker{}, modules.ProdDependencies)
   325  	if err != nil {
   326  		t.Fatal(err)
   327  	}
   328  	defer func() {
   329  		if err := wt.Close(); err != nil {
   330  			t.Fatal(err)
   331  		}
   332  	}()
   333  
   334  	uuc := chunk(wt)
   335  	pieces := uuc.staticPiecesNeeded
   336  	wt.mu.Lock()
   337  	wt.uploadRecentFailure = time.Now()
   338  	wt.uploadConsecutiveFailures = math.MaxInt32
   339  	wt.unprocessedChunks.PushBack(uuc)
   340  	wt.unprocessedChunks.PushBack(uuc)
   341  	wt.unprocessedChunks.PushBack(uuc)
   342  	wt.mu.Unlock()
   343  	uuc.mu.Lock()
   344  	uuc.piecesCompleted = uuc.staticPiecesNeeded
   345  	uuc.mu.Unlock()
   346  	_ = uuc.staticMemoryManager.Request(context.Background(), modules.SectorSize*uint64(pieces), true)
   347  	nc, pieceIndex := wt.managedProcessUploadChunk(uuc)
   348  	if nc != nil {
   349  		t.Error("next chunk should be nil")
   350  	}
   351  	uuc.mu.Lock()
   352  	if pieceIndex != 0 {
   353  		t.Error("expected pieceIndex to be 0", pieceIndex)
   354  	}
   355  	if uuc.piecesRegistered != 0 {
   356  		t.Errorf("piecesRegistered %v != %v", uuc.piecesRegistered, 0)
   357  	}
   358  	if uuc.workersRemaining != -3 {
   359  		t.Errorf("workersRemaining %v != %v", uuc.workersRemaining, 0)
   360  	}
   361  	if len(uuc.unusedHosts) != 1 {
   362  		t.Errorf("unusedHosts %v != %v", len(uuc.unusedHosts), 1)
   363  	}
   364  	for _, pu := range uuc.pieceUsage {
   365  		// managedCleanUpUploadChunk sets all elements to true
   366  		if !pu {
   367  			t.Errorf("expected pu to be true")
   368  		}
   369  	}
   370  	// Standby workers are woken.
   371  	if len(uuc.workersStandby) != 0 {
   372  		t.Errorf("expected %v standby workers got %v", 0, len(uuc.workersStandby))
   373  	}
   374  	uuc.mu.Unlock()
   375  	wt.mu.Lock()
   376  	if wt.unprocessedChunks.Len() != 0 {
   377  		t.Errorf("unprocessedChunks %v != %v", wt.unprocessedChunks.Len(), 0)
   378  	}
   379  	wt.mu.Unlock()
   380  }
   381  
   382  // testProcessUploadChunkNotGoodForUpload tests processing a chunk with a worker
   383  // that's not good for uploading.
   384  func testProcessUploadChunkNotGoodForUpload(t *testing.T, chunk func(wt *workerTester) *unfinishedUploadChunk) {
   385  	t.Parallel()
   386  
   387  	// create worker.
   388  	wt, err := newWorkerTesterCustomDependency(t.Name(), &dependencies.DependencyDisableWorker{}, modules.ProdDependencies)
   389  	if err != nil {
   390  		t.Fatal(err)
   391  	}
   392  	defer func() {
   393  		if err := wt.Close(); err != nil {
   394  			t.Fatal(err)
   395  		}
   396  	}()
   397  
   398  	uuc := chunk(wt)
   399  	pieces := uuc.staticPiecesNeeded
   400  	wt.mu.Lock()
   401  	wt.unprocessedChunks.PushBack(uuc)
   402  	wt.unprocessedChunks.PushBack(uuc)
   403  	wt.unprocessedChunks.PushBack(uuc)
   404  	wt.mu.Unlock()
   405  
   406  	// mark contract as bad
   407  	err = wt.staticRenter.CancelContract(wt.staticCache().staticContractID)
   408  	if err != nil {
   409  		t.Fatal(err)
   410  	}
   411  	wt.managedUpdateCache()
   412  	_ = uuc.staticMemoryManager.Request(context.Background(), modules.SectorSize*uint64(pieces), true)
   413  	nc, pieceIndex := wt.managedProcessUploadChunk(uuc)
   414  	if nc != nil {
   415  		t.Error("next chunk should be nil")
   416  	}
   417  	uuc.mu.Lock()
   418  	if pieceIndex != 0 {
   419  		t.Error("expected pieceIndex to be 0", pieceIndex)
   420  	}
   421  	if uuc.piecesRegistered != 0 {
   422  		t.Errorf("piecesRegistered %v != %v", uuc.piecesRegistered, 0)
   423  	}
   424  	if uuc.workersRemaining != -3 {
   425  		t.Errorf("workersRemaining %v != %v", uuc.workersRemaining, 0)
   426  	}
   427  	if len(uuc.unusedHosts) != 1 {
   428  		t.Errorf("unusedHosts %v != %v", len(uuc.unusedHosts), 1)
   429  	}
   430  	for _, pu := range uuc.pieceUsage {
   431  		// managedCleanUpUploadChunk sets all elements to true
   432  		if !pu {
   433  			t.Errorf("expected pu to be true")
   434  		}
   435  	}
   436  	// Standby workers are woken.
   437  	if len(uuc.workersStandby) != 0 {
   438  		t.Errorf("expected %v standby workers got %v", 0, len(uuc.workersStandby))
   439  	}
   440  	uuc.mu.Unlock()
   441  	wt.mu.Lock()
   442  	if wt.unprocessedChunks.Len() != 0 {
   443  		t.Errorf("unprocessedChunks %v != %v", wt.unprocessedChunks.Len(), 0)
   444  	}
   445  	wt.mu.Unlock()
   446  }
   447  
   448  // TestProcessUploadChunk is a unit test for managedProcessUploadChunk.
   449  func TestProcessUploadChunk(t *testing.T) {
   450  	if testing.Short() {
   451  		t.SkipNow()
   452  	}
   453  
   454  	// some vars for the test.
   455  	pieces := 10
   456  
   457  	// helper method to create a valid upload chunk.
   458  	chunk := func(wt *workerTester) *unfinishedUploadChunk {
   459  		return &unfinishedUploadChunk{
   460  			unusedHosts: map[string]struct{}{
   461  				wt.staticHostPubKey.String(): {},
   462  			},
   463  			staticPiecesNeeded:        pieces,
   464  			piecesCompleted:           0,
   465  			piecesRegistered:          0,
   466  			pieceUsage:                make([]bool, pieces),
   467  			released:                  true,
   468  			workersRemaining:          1,
   469  			physicalChunkData:         make([][]byte, pieces),
   470  			logicalChunkData:          make([][]byte, pieces),
   471  			staticAvailableChan:       make(chan struct{}),
   472  			staticUploadCompletedChan: make(chan struct{}),
   473  			staticMemoryNeeded:        uint64(pieces) * modules.SectorSize,
   474  			staticMemoryManager:       wt.staticRenter.staticRepairMemoryManager,
   475  		}
   476  	}
   477  
   478  	t.Run("Basic", func(t *testing.T) {
   479  		testProcessUploadChunkBasic(t, chunk)
   480  	})
   481  	t.Run("Completed", func(t *testing.T) {
   482  		testProcessUploadChunkCompleted(t, chunk)
   483  	})
   484  	t.Run("CompletedOnCooldown", func(t *testing.T) {
   485  		testProcessUploadChunkCompletedCooldown(t, chunk)
   486  	})
   487  	t.Run("NoHelpNeeded", func(t *testing.T) {
   488  		testProcessUploadChunkNoHelpNeeded(t, chunk)
   489  	})
   490  	t.Run("NotACandidate", func(t *testing.T) {
   491  		testProcessUploadChunkNotACandidate(t, chunk)
   492  	})
   493  	t.Run("NotACandidateOnCooldown", func(t *testing.T) {
   494  		testProcessUploadChunk_NotACandidateCooldown(t, chunk)
   495  	})
   496  	t.Run("NotGoodForUpload", func(t *testing.T) {
   497  		testProcessUploadChunkNotGoodForUpload(t, chunk)
   498  	})
   499  }