github.com/Synthesix/Sia@v1.3.3-0.20180413141344-f863baeed3ca/modules/renter/workerupload.go (about)

     1  package renter
     2  
     3  import (
     4  	"time"
     5  )
     6  
     7  // managedDropChunk will remove a worker from the responsibility of tracking a chunk.
     8  //
     9  // This function is managed instead of static because it is against convention
    10  // to be calling functions on other objects (in this case, the renter) while
    11  // holding a lock.
    12  func (w *worker) managedDropChunk(uc *unfinishedUploadChunk) {
    13  	uc.mu.Lock()
    14  	uc.workersRemaining--
    15  	uc.mu.Unlock()
    16  	w.renter.managedCleanUpUploadChunk(uc)
    17  }
    18  
    19  // managedDropUploadChunks will release all of the upload chunks that the worker
    20  // has received.
    21  func (w *worker) managedDropUploadChunks() {
    22  	// Make a copy of the slice under lock, clear the slice, then drop the
    23  	// chunks without a lock (managed function).
    24  	var chunksToDrop []*unfinishedUploadChunk
    25  	w.mu.Lock()
    26  	for i := 0; i < len(w.unprocessedChunks); i++ {
    27  		chunksToDrop = append(chunksToDrop, w.unprocessedChunks[i])
    28  	}
    29  	w.unprocessedChunks = w.unprocessedChunks[:0]
    30  	w.mu.Unlock()
    31  
    32  	for i := 0; i < len(chunksToDrop); i++ {
    33  		w.managedDropChunk(chunksToDrop[i])
    34  	}
    35  }
    36  
    37  // managedKillUploading will disable all uploading for the worker.
    38  func (w *worker) managedKillUploading() {
    39  	// Mark the worker as disabled so that incoming chunks are rejected.
    40  	w.mu.Lock()
    41  	w.uploadTerminated = true
    42  	w.mu.Unlock()
    43  
    44  	// After the worker is marked as disabled, clear out all of the chunks.
    45  	w.managedDropUploadChunks()
    46  }
    47  
    48  // managedNextUploadChunk will pull the next potential chunk out of the worker's
    49  // work queue for uploading.
    50  func (w *worker) managedNextUploadChunk() (nextChunk *unfinishedUploadChunk, pieceIndex uint64) {
    51  	// Loop through the unprocessed chunks and find some work to do.
    52  	for {
    53  		// Pull a chunk off of the unprocessed chunks stack.
    54  		w.mu.Lock()
    55  		if len(w.unprocessedChunks) <= 0 {
    56  			w.mu.Unlock()
    57  			break
    58  		}
    59  		chunk := w.unprocessedChunks[0]
    60  		w.unprocessedChunks = w.unprocessedChunks[1:]
    61  		w.mu.Unlock()
    62  
    63  		// Process the chunk and return it if valid.
    64  		nextChunk, pieceIndex := w.managedProcessUploadChunk(chunk)
    65  		if nextChunk != nil {
    66  			return nextChunk, pieceIndex
    67  		}
    68  	}
    69  	return nil, 0 // no work found
    70  }
    71  
    72  // managedQueueUploadChunk will take a chunk and add it to the worker's repair
    73  // stack.
    74  func (w *worker) managedQueueUploadChunk(uc *unfinishedUploadChunk) {
    75  	// Check that the worker is allowed to be uploading before grabbing the
    76  	// worker lock.
    77  	utility, exists := w.renter.hostContractor.ContractUtility(w.contract.ID)
    78  	goodForUpload := exists && utility.GoodForUpload
    79  	w.mu.Lock()
    80  	if !goodForUpload || w.uploadTerminated || w.onUploadCooldown() {
    81  		// The worker should not be uploading, remove the chunk.
    82  		w.mu.Unlock()
    83  		w.managedDropChunk(uc)
    84  		return
    85  	}
    86  	w.unprocessedChunks = append(w.unprocessedChunks, uc)
    87  	w.mu.Unlock()
    88  
    89  	// Send a signal informing the work thread that there is work.
    90  	select {
    91  	case w.uploadChan <- struct{}{}:
    92  	default:
    93  	}
    94  }
    95  
    96  // managedUpload will perform some upload work.
    97  func (w *worker) managedUpload(uc *unfinishedUploadChunk, pieceIndex uint64) {
    98  	// Open an editing connection to the host.
    99  	e, err := w.renter.hostContractor.Editor(w.contract.ID, w.renter.tg.StopChan())
   100  	if err != nil {
   101  		w.renter.log.Debugln("Worker failed to acquire an editor:", err)
   102  		w.managedUploadFailed(uc, pieceIndex)
   103  		return
   104  	}
   105  	defer e.Close()
   106  
   107  	// Perform the upload, and update the failure stats based on the success of
   108  	// the upload attempt.
   109  	root, err := e.Upload(uc.physicalChunkData[pieceIndex])
   110  	if err != nil {
   111  		w.renter.log.Debugln("Worker failed to upload via the editor:", err)
   112  		w.managedUploadFailed(uc, pieceIndex)
   113  		return
   114  	}
   115  	w.mu.Lock()
   116  	w.uploadConsecutiveFailures = 0
   117  	w.mu.Unlock()
   118  
   119  	// Update the renter metadata.
   120  	addr := e.Address()
   121  	endHeight := e.EndHeight()
   122  	id := w.renter.mu.Lock()
   123  	uc.renterFile.mu.Lock()
   124  	contract, exists := uc.renterFile.contracts[w.contract.ID]
   125  	if !exists {
   126  		contract = fileContract{
   127  			ID:          w.contract.ID,
   128  			IP:          addr,
   129  			WindowStart: endHeight,
   130  		}
   131  	}
   132  	contract.Pieces = append(contract.Pieces, pieceData{
   133  		Chunk:      uc.index,
   134  		Piece:      pieceIndex,
   135  		MerkleRoot: root,
   136  	})
   137  	uc.renterFile.contracts[w.contract.ID] = contract
   138  	w.renter.saveFile(uc.renterFile)
   139  	uc.renterFile.mu.Unlock()
   140  	w.renter.mu.Unlock(id)
   141  
   142  	// Upload is complete. Update the state of the chunk and the renter's memory
   143  	// available to reflect the completed upload.
   144  	uc.mu.Lock()
   145  	releaseSize := len(uc.physicalChunkData[pieceIndex])
   146  	uc.piecesRegistered--
   147  	uc.piecesCompleted++
   148  	uc.physicalChunkData[pieceIndex] = nil
   149  	uc.memoryReleased += uint64(releaseSize)
   150  	uc.mu.Unlock()
   151  	w.renter.memoryManager.Return(uint64(releaseSize))
   152  	w.renter.managedCleanUpUploadChunk(uc)
   153  }
   154  
   155  // onUploadCooldown returns true if the worker is on cooldown from failed
   156  // uploads.
   157  func (w *worker) onUploadCooldown() bool {
   158  	requiredCooldown := uploadFailureCooldown
   159  	for i := 0; i < w.uploadConsecutiveFailures && i < maxConsecutivePenalty; i++ {
   160  		requiredCooldown *= 2
   161  	}
   162  	return time.Now().Before(w.uploadRecentFailure.Add(requiredCooldown))
   163  }
   164  
   165  // managedProcessUploadChunk will process a chunk from the worker chunk queue.
   166  func (w *worker) managedProcessUploadChunk(uc *unfinishedUploadChunk) (nextChunk *unfinishedUploadChunk, pieceIndex uint64) {
   167  	// Determine the usability value of this worker.
   168  	utility, exists := w.renter.hostContractor.ContractUtility(w.contract.ID)
   169  	goodForUpload := exists && utility.GoodForUpload
   170  	w.mu.Lock()
   171  	onCooldown := w.onUploadCooldown()
   172  	w.mu.Unlock()
   173  
   174  	// Determine what sort of help this chunk needs.
   175  	uc.mu.Lock()
   176  	_, candidateHost := uc.unusedHosts[w.hostPubKey.String()]
   177  	chunkComplete := uc.piecesNeeded <= uc.piecesCompleted
   178  	needsHelp := uc.piecesNeeded > uc.piecesCompleted+uc.piecesRegistered
   179  	// If the chunk does not need help from this worker, release the chunk.
   180  	if chunkComplete || !candidateHost || !goodForUpload || onCooldown {
   181  		// This worker no longer needs to track this chunk.
   182  		uc.mu.Unlock()
   183  		w.managedDropChunk(uc)
   184  		return nil, 0
   185  	}
   186  
   187  	// If the worker does not need help, add the worker to the sent of standby
   188  	// chunks.
   189  	if !needsHelp {
   190  		uc.workersStandby = append(uc.workersStandby, w)
   191  		uc.mu.Unlock()
   192  		w.renter.managedCleanUpUploadChunk(uc)
   193  		return nil, 0
   194  	}
   195  
   196  	// If the chunk needs help from this worker, find a piece to upload and
   197  	// return the stats for that piece.
   198  	//
   199  	// Select a piece and mark that a piece has been selected.
   200  	index := 0
   201  	for i := 0; i < len(uc.pieceUsage); i++ {
   202  		if !uc.pieceUsage[i] {
   203  			index = i
   204  			uc.pieceUsage[i] = true
   205  			break
   206  		}
   207  	}
   208  	delete(uc.unusedHosts, w.hostPubKey.String())
   209  	uc.piecesRegistered++
   210  	uc.workersRemaining--
   211  	uc.mu.Unlock()
   212  	return uc, uint64(index)
   213  }
   214  
   215  // managedUploadFailed is called if a worker failed to upload part of an unfinished
   216  // chunk.
   217  func (w *worker) managedUploadFailed(uc *unfinishedUploadChunk, pieceIndex uint64) {
   218  	// Mark the failure in the worker if the gateway says we are online. It's
   219  	// not the worker's fault if we are offline.
   220  	if w.renter.g.Online() {
   221  		w.mu.Lock()
   222  		w.uploadRecentFailure = time.Now()
   223  		w.uploadConsecutiveFailures++
   224  		w.mu.Unlock()
   225  	}
   226  
   227  	// Unregister the piece from the chunk and hunt for a replacement.
   228  	uc.mu.Lock()
   229  	uc.piecesRegistered--
   230  	uc.pieceUsage[pieceIndex] = false
   231  	uc.mu.Unlock()
   232  
   233  	// Notify the standby workers of the chunk
   234  	uc.managedNotifyStandbyWorkers()
   235  	w.renter.managedCleanUpUploadChunk(uc)
   236  
   237  	// Because the worker is now on cooldown, drop all remaining chunks.
   238  	w.managedDropUploadChunks()
   239  }