github.com/nebulouslabs/sia@v1.3.7/modules/renter/workerupload.go (about)

     1  package renter
     2  
     3  import (
     4  	"time"
     5  
     6  	"github.com/NebulousLabs/Sia/build"
     7  )
     8  
     9  // managedDropChunk will remove a worker from the responsibility of tracking a chunk.
    10  //
    11  // This function is managed instead of static because it is against convention
    12  // to be calling functions on other objects (in this case, the renter) while
    13  // holding a lock.
    14  func (w *worker) managedDropChunk(uc *unfinishedUploadChunk) {
    15  	uc.mu.Lock()
    16  	uc.workersRemaining--
    17  	uc.mu.Unlock()
    18  	w.renter.managedCleanUpUploadChunk(uc)
    19  }
    20  
    21  // managedDropUploadChunks will release all of the upload chunks that the worker
    22  // has received.
    23  func (w *worker) managedDropUploadChunks() {
    24  	// Make a copy of the slice under lock, clear the slice, then drop the
    25  	// chunks without a lock (managed function).
    26  	var chunksToDrop []*unfinishedUploadChunk
    27  	w.mu.Lock()
    28  	for i := 0; i < len(w.unprocessedChunks); i++ {
    29  		chunksToDrop = append(chunksToDrop, w.unprocessedChunks[i])
    30  	}
    31  	w.unprocessedChunks = w.unprocessedChunks[:0]
    32  	w.mu.Unlock()
    33  
    34  	for i := 0; i < len(chunksToDrop); i++ {
    35  		w.managedDropChunk(chunksToDrop[i])
    36  	}
    37  }
    38  
    39  // managedKillUploading will disable all uploading for the worker.
    40  func (w *worker) managedKillUploading() {
    41  	// Mark the worker as disabled so that incoming chunks are rejected.
    42  	w.mu.Lock()
    43  	w.uploadTerminated = true
    44  	w.mu.Unlock()
    45  
    46  	// After the worker is marked as disabled, clear out all of the chunks.
    47  	w.managedDropUploadChunks()
    48  }
    49  
    50  // managedNextUploadChunk will pull the next potential chunk out of the worker's
    51  // work queue for uploading.
    52  func (w *worker) managedNextUploadChunk() (nextChunk *unfinishedUploadChunk, pieceIndex uint64) {
    53  	// Loop through the unprocessed chunks and find some work to do.
    54  	for {
    55  		// Pull a chunk off of the unprocessed chunks stack.
    56  		w.mu.Lock()
    57  		if len(w.unprocessedChunks) <= 0 {
    58  			w.mu.Unlock()
    59  			break
    60  		}
    61  		chunk := w.unprocessedChunks[0]
    62  		w.unprocessedChunks = w.unprocessedChunks[1:]
    63  		w.mu.Unlock()
    64  
    65  		// Process the chunk and return it if valid.
    66  		nextChunk, pieceIndex := w.managedProcessUploadChunk(chunk)
    67  		if nextChunk != nil {
    68  			return nextChunk, pieceIndex
    69  		}
    70  	}
    71  	return nil, 0 // no work found
    72  }
    73  
    74  // managedQueueUploadChunk will take a chunk and add it to the worker's repair
    75  // stack.
    76  func (w *worker) managedQueueUploadChunk(uc *unfinishedUploadChunk) {
    77  	// Check that the worker is allowed to be uploading before grabbing the
    78  	// worker lock.
    79  	utility, exists := w.renter.hostContractor.ContractUtility(w.contract.HostPublicKey)
    80  	goodForUpload := exists && utility.GoodForUpload
    81  	w.mu.Lock()
    82  	if !goodForUpload || w.uploadTerminated || w.onUploadCooldown() {
    83  		// The worker should not be uploading, remove the chunk.
    84  		w.mu.Unlock()
    85  		w.managedDropChunk(uc)
    86  		return
    87  	}
    88  	w.unprocessedChunks = append(w.unprocessedChunks, uc)
    89  	w.mu.Unlock()
    90  
    91  	// Send a signal informing the work thread that there is work.
    92  	select {
    93  	case w.uploadChan <- struct{}{}:
    94  	default:
    95  	}
    96  }
    97  
    98  // managedUpload will perform some upload work.
    99  func (w *worker) managedUpload(uc *unfinishedUploadChunk, pieceIndex uint64) {
   100  	// Open an editing connection to the host.
   101  	e, err := w.renter.hostContractor.Editor(w.contract.HostPublicKey, w.renter.tg.StopChan())
   102  	if err != nil {
   103  		w.renter.log.Debugln("Worker failed to acquire an editor:", err)
   104  		w.managedUploadFailed(uc, pieceIndex)
   105  		return
   106  	}
   107  	defer e.Close()
   108  
   109  	// Perform the upload, and update the failure stats based on the success of
   110  	// the upload attempt.
   111  	root, err := e.Upload(uc.physicalChunkData[pieceIndex])
   112  	if err != nil {
   113  		w.renter.log.Debugln("Worker failed to upload via the editor:", err)
   114  		w.managedUploadFailed(uc, pieceIndex)
   115  		return
   116  	}
   117  	w.mu.Lock()
   118  	w.uploadConsecutiveFailures = 0
   119  	w.mu.Unlock()
   120  
   121  	// Update the renter metadata.
   122  	addr := e.Address()
   123  	endHeight := e.EndHeight()
   124  	id := w.renter.mu.Lock()
   125  	uc.renterFile.mu.Lock()
   126  	contract, exists := uc.renterFile.contracts[w.contract.ID]
   127  	if !exists {
   128  		contract = fileContract{
   129  			ID:          w.contract.ID,
   130  			IP:          addr,
   131  			WindowStart: endHeight,
   132  		}
   133  	}
   134  	contract.Pieces = append(contract.Pieces, pieceData{
   135  		Chunk:      uc.index,
   136  		Piece:      pieceIndex,
   137  		MerkleRoot: root,
   138  	})
   139  	uc.renterFile.contracts[w.contract.ID] = contract
   140  	w.renter.saveFile(uc.renterFile)
   141  	uc.renterFile.mu.Unlock()
   142  	w.renter.mu.Unlock(id)
   143  
   144  	// Upload is complete. Update the state of the chunk and the renter's memory
   145  	// available to reflect the completed upload.
   146  	uc.mu.Lock()
   147  	releaseSize := len(uc.physicalChunkData[pieceIndex])
   148  	uc.piecesRegistered--
   149  	uc.piecesCompleted++
   150  	uc.physicalChunkData[pieceIndex] = nil
   151  	uc.memoryReleased += uint64(releaseSize)
   152  	uc.mu.Unlock()
   153  	w.renter.memoryManager.Return(uint64(releaseSize))
   154  	w.renter.managedCleanUpUploadChunk(uc)
   155  }
   156  
   157  // onUploadCooldown returns true if the worker is on cooldown from failed
   158  // uploads.
   159  func (w *worker) onUploadCooldown() bool {
   160  	requiredCooldown := uploadFailureCooldown
   161  	for i := 0; i < w.uploadConsecutiveFailures && i < maxConsecutivePenalty; i++ {
   162  		requiredCooldown *= 2
   163  	}
   164  	return time.Now().Before(w.uploadRecentFailure.Add(requiredCooldown))
   165  }
   166  
   167  // managedProcessUploadChunk will process a chunk from the worker chunk queue.
   168  func (w *worker) managedProcessUploadChunk(uc *unfinishedUploadChunk) (nextChunk *unfinishedUploadChunk, pieceIndex uint64) {
   169  	// Determine the usability value of this worker.
   170  	utility, exists := w.renter.hostContractor.ContractUtility(w.contract.HostPublicKey)
   171  	goodForUpload := exists && utility.GoodForUpload
   172  	w.mu.Lock()
   173  	onCooldown := w.onUploadCooldown()
   174  	w.mu.Unlock()
   175  
   176  	// Determine what sort of help this chunk needs.
   177  	uc.mu.Lock()
   178  	_, candidateHost := uc.unusedHosts[w.hostPubKey.String()]
   179  	chunkComplete := uc.piecesNeeded <= uc.piecesCompleted
   180  	needsHelp := uc.piecesNeeded > uc.piecesCompleted+uc.piecesRegistered
   181  	// If the chunk does not need help from this worker, release the chunk.
   182  	if chunkComplete || !candidateHost || !goodForUpload || onCooldown {
   183  		// This worker no longer needs to track this chunk.
   184  		uc.mu.Unlock()
   185  		w.managedDropChunk(uc)
   186  		return nil, 0
   187  	}
   188  
   189  	// If the worker does not need help, add the worker to the sent of standby
   190  	// chunks.
   191  	if !needsHelp {
   192  		uc.workersStandby = append(uc.workersStandby, w)
   193  		uc.mu.Unlock()
   194  		w.renter.managedCleanUpUploadChunk(uc)
   195  		return nil, 0
   196  	}
   197  
   198  	// If the chunk needs help from this worker, find a piece to upload and
   199  	// return the stats for that piece.
   200  	//
   201  	// Select a piece and mark that a piece has been selected.
   202  	index := -1
   203  	for i := 0; i < len(uc.pieceUsage); i++ {
   204  		if !uc.pieceUsage[i] {
   205  			index = i
   206  			uc.pieceUsage[i] = true
   207  			break
   208  		}
   209  	}
   210  	if index == -1 {
   211  		build.Critical("worker was supposed to upload but couldn't find unused piece")
   212  		uc.mu.Unlock()
   213  		w.managedDropChunk(uc)
   214  		return nil, 0
   215  	}
   216  	delete(uc.unusedHosts, w.hostPubKey.String())
   217  	uc.piecesRegistered++
   218  	uc.workersRemaining--
   219  	uc.mu.Unlock()
   220  	return uc, uint64(index)
   221  }
   222  
   223  // managedUploadFailed is called if a worker failed to upload part of an unfinished
   224  // chunk.
   225  func (w *worker) managedUploadFailed(uc *unfinishedUploadChunk, pieceIndex uint64) {
   226  	// Mark the failure in the worker if the gateway says we are online. It's
   227  	// not the worker's fault if we are offline.
   228  	if w.renter.g.Online() {
   229  		w.mu.Lock()
   230  		w.uploadRecentFailure = time.Now()
   231  		w.uploadConsecutiveFailures++
   232  		w.mu.Unlock()
   233  	}
   234  
   235  	// Unregister the piece from the chunk and hunt for a replacement.
   236  	uc.mu.Lock()
   237  	uc.piecesRegistered--
   238  	uc.pieceUsage[pieceIndex] = false
   239  	uc.mu.Unlock()
   240  
   241  	// Notify the standby workers of the chunk
   242  	uc.managedNotifyStandbyWorkers()
   243  	w.renter.managedCleanUpUploadChunk(uc)
   244  
   245  	// Because the worker is now on cooldown, drop all remaining chunks.
   246  	w.managedDropUploadChunks()
   247  }