github.com/nebulouslabs/sia@v1.3.7/modules/renter/worker.go (about)

     1  package renter
     2  
     3  import (
     4  	"sync"
     5  	"time"
     6  
     7  	"github.com/NebulousLabs/Sia/modules"
     8  	"github.com/NebulousLabs/Sia/types"
     9  )
    10  
    11  // A worker listens for work on a certain host.
    12  //
    13  // The mutex of the worker only protects the 'unprocessedChunks' and the
    14  // 'standbyChunks' fields of the worker. The rest of the fields are only
    15  // interacted with exclusively by the primary worker thread, and only one of
    16  // those ever exists at a time.
    17  //
    18  // The workers have a concept of 'cooldown' for uploads and downloads. If a
    19  // download or upload operation fails, the assumption is that future attempts
    20  // are also likely to fail, because whatever condition resulted in the failure
    21  // will still be present until some time has passed. Without any cooldowns,
    22  // uploading and downloading with flaky hosts in the worker sets has
    23  // substantially reduced overall performance and throughput.
    24  type worker struct {
    25  	// The contract and host used by this worker.
    26  	contract   modules.RenterContract
    27  	hostPubKey types.SiaPublicKey
    28  	renter     *Renter
    29  
    30  	// Download variables that are not protected by a mutex, but also do not
    31  	// need to be protected by a mutex, as they are only accessed by the master
    32  	// thread for the worker.
    33  	ownedDownloadConsecutiveFailures int       // How many failures in a row?
    34  	ownedDownloadRecentFailure       time.Time // How recent was the last failure?
    35  
    36  	// Download variables related to queuing work. They have a separate mutex to
    37  	// minimize lock contention.
    38  	downloadChan       chan struct{}              // Notifications of new work. Takes priority over uploads.
    39  	downloadChunks     []*unfinishedDownloadChunk // Yet unprocessed work items.
    40  	downloadMu         sync.Mutex
    41  	downloadTerminated bool // Has downloading been terminated for this worker?
    42  
    43  	// Upload variables.
    44  	unprocessedChunks         []*unfinishedUploadChunk // Yet unprocessed work items.
    45  	uploadChan                chan struct{}            // Notifications of new work.
    46  	uploadConsecutiveFailures int                      // How many times in a row uploading has failed.
    47  	uploadRecentFailure       time.Time                // How recent was the last failure?
    48  	uploadTerminated          bool                     // Have we stopped uploading?
    49  
    50  	// Utilities.
    51  	//
    52  	// The mutex is only needed when interacting with 'downloadChunks' and
    53  	// 'unprocessedChunks', as everything else is only accessed from the single
    54  	// master thread.
    55  	killChan chan struct{} // Worker will shut down if a signal is sent down this channel.
    56  	mu       sync.Mutex
    57  }
    58  
    59  // updateWorkerPool will grab the set of contracts from the contractor and
    60  // update the worker pool to match.
    61  func (r *Renter) managedUpdateWorkerPool() {
    62  	contractSlice := r.hostContractor.Contracts()
    63  	contractMap := make(map[types.FileContractID]modules.RenterContract)
    64  	for i := 0; i < len(contractSlice); i++ {
    65  		contractMap[contractSlice[i].ID] = contractSlice[i]
    66  	}
    67  
    68  	// Add a worker for any contract that does not already have a worker.
    69  	for id, contract := range contractMap {
    70  		lockID := r.mu.Lock()
    71  		_, exists := r.workerPool[id]
    72  		if !exists {
    73  			worker := &worker{
    74  				contract:   contract,
    75  				hostPubKey: contract.HostPublicKey,
    76  
    77  				downloadChan: make(chan struct{}, 1),
    78  				killChan:     make(chan struct{}),
    79  				uploadChan:   make(chan struct{}, 1),
    80  
    81  				renter: r,
    82  			}
    83  			r.workerPool[id] = worker
    84  			go worker.threadedWorkLoop()
    85  		}
    86  		r.mu.Unlock(lockID)
    87  	}
    88  
    89  	// Remove a worker for any worker that is not in the set of new contracts.
    90  	lockID := r.mu.Lock()
    91  	for id, worker := range r.workerPool {
    92  		_, exists := contractMap[id]
    93  		if !exists {
    94  			delete(r.workerPool, id)
    95  			close(worker.killChan)
    96  		}
    97  	}
    98  	r.mu.Unlock(lockID)
    99  }
   100  
   101  // threadedWorkLoop repeatedly issues work to a worker, stopping when the worker
   102  // is killed or when the thread group is closed.
   103  func (w *worker) threadedWorkLoop() {
   104  	err := w.renter.tg.Add()
   105  	if err != nil {
   106  		return
   107  	}
   108  	defer w.renter.tg.Done()
   109  	defer w.managedKillUploading()
   110  	defer w.managedKillDownloading()
   111  
   112  	for {
   113  		// Perform one stpe of processing download work.
   114  		downloadChunk := w.managedNextDownloadChunk()
   115  		if downloadChunk != nil {
   116  			// managedDownload will handle removing the worker internally. If
   117  			// the chunk is dropped from the worker, the worker will be removed
   118  			// from the chunk. If the worker executes a download (success or
   119  			// failure), the worker will be removed from the chunk. If the
   120  			// worker is put on standby, it will not be removed from the chunk.
   121  			w.managedDownload(downloadChunk)
   122  			continue
   123  		}
   124  
   125  		// Perform one step of processing upload work.
   126  		chunk, pieceIndex := w.managedNextUploadChunk()
   127  		if chunk != nil {
   128  			w.managedUpload(chunk, pieceIndex)
   129  			continue
   130  		}
   131  
   132  		// Block until new work is received via the upload or download channels,
   133  		// or until a kill or stop signal is received.
   134  		select {
   135  		case <-w.downloadChan:
   136  			continue
   137  		case <-w.uploadChan:
   138  			continue
   139  		case <-w.killChan:
   140  			return
   141  		case <-w.renter.tg.StopChan():
   142  			return
   143  		}
   144  	}
   145  }