gitlab.com/SiaPrime/SiaPrime@v1.4.1/modules/renter/download.go (about)

     1  package renter
     2  
     3  // The download code follows a hopefully clean/intuitive flow for getting super
     4  // high and computationally efficient parallelism on downloads. When a download
     5  // is requested, it gets split into its respective chunks (which are downloaded
     6  // individually) and then put into the download heap. The primary purpose of the
     7  // download heap is to keep downloads on standby until there is enough memory
     8  // available to send the downloads off to the workers. The heap is sorted first
     9  // by priority, but then a few other criteria as well.
    10  //
    11  // Some downloads, in particular downloads issued by the repair code, have
    12  // already had their memory allocated. These downloads get to skip the heap and
    13  // go straight for the workers.
    14  //
    15  // When a download is distributed to workers, it is given to every single worker
    16  // without checking whether that worker is appropriate for the download. Each
    17  // worker has their own queue, which is bottlenecked by the fact that a worker
    18  // can only process one item at a time. When the worker gets to a download
    19  // request, it determines whether it is suited for downloading that particular
    20  // file. The criteria it uses include whether or not it has a piece of that
    21  // chunk, how many other workers are currently downloading pieces or have
    22  // completed pieces for that chunk, and finally things like worker latency and
    23  // worker price.
    24  //
    25  // If the worker chooses to download a piece, it will register itself with that
    26  // piece, so that other workers know how many workers are downloading each
    27  // piece. This keeps everything cleanly coordinated and prevents too many
    28  // workers from downloading a given piece, while at the same time you don't need
    29  // a giant messy coordinator tracking everything. If a worker chooses not to
    30  // download a piece, it will add itself to the list of standby workers, so that
    31  // in the event of a failure, the worker can be returned to and used again as a
    32  // backup worker. The worker may also decide that it is not suitable at all (for
    33  // example, if the worker has recently had some consecutive failures, or if the
    34  // worker doesn't have access to a piece of that chunk), in which case it will
    35  // mark itself as unavailable to the chunk.
    36  //
    37  // As workers complete, they will release memory and check on the overall state
    38  // of the chunk. If some workers fail, they will enlist the standby workers to
    39  // pick up the slack.
    40  //
    41  // When the final required piece finishes downloading, the worker who completed
    42  // the final piece will spin up a separate thread to decrypt, decode, and write
    43  // out the download. That thread will then clean up any remaining resources, and
    44  // if this was the final unfinished chunk in the download, it'll mark the
    45  // download as complete.
    46  
    47  // The download process has a slightly complicating factor, which is overdrive
    48  // workers. Traditionally, if you need 10 pieces to recover a file, you will use
    49  // 10 workers. But if you have an overdrive of '2', you will actually use 12
    50  // workers, meaning you download 2 more pieces than you need. This means that up
    51  // to two of the workers can be slow or fail and the download can still complete
    52  // quickly. This complicates resource handling, because not all memory can be
    53  // released as soon as a download completes - there may be overdrive workers
    54  // still out fetching the file. To handle this, a catchall 'cleanUp' function is
    55  // used which gets called every time a worker finishes, and every time recovery
    56  // completes. The result is that memory gets cleaned up as required, and no
    57  // overarching coordination is needed between the overdrive workers (who do not
    58  // even know that they are overdrive workers) and the recovery function.
    59  
    60  // By default, the download code organizes itself around having maximum possible
    61  // throughput. That is, it is highly parallel, and exploits that parallelism as
    62  // efficiently and effectively as possible. The hostdb does a good of selecting
    63  // for hosts that have good traits, so we can generally assume that every host
    64  // or worker at our disposable is reasonably effective in all dimensions, and
    65  // that the overall selection is generally geared towards the user's
    66  // preferences.
    67  //
    68  // We can leverage the standby workers in each unfinishedDownloadChunk to
    69  // emphasize various traits. For example, if we want to prioritize latency,
    70  // we'll put a filter in the 'managedProcessDownloadChunk' function that has a
    71  // worker go standby instead of accept a chunk if the latency is higher than the
    72  // targeted latency. These filters can target other traits as well, such as
    73  // price and total throughput.
    74  
    75  import (
    76  	"fmt"
    77  	"io"
    78  	"net/http"
    79  	"os"
    80  	"path/filepath"
    81  	"sync"
    82  	"sync/atomic"
    83  	"time"
    84  
    85  	"gitlab.com/NebulousLabs/errors"
    86  
    87  	"gitlab.com/SiaPrime/SiaPrime/build"
    88  	"gitlab.com/SiaPrime/SiaPrime/modules"
    89  	"gitlab.com/SiaPrime/SiaPrime/modules/renter/siafile"
    90  	"gitlab.com/SiaPrime/SiaPrime/persist"
    91  	"gitlab.com/SiaPrime/SiaPrime/types"
    92  )
    93  
    94  type (
    95  	// A download is a file download that has been queued by the renter.
    96  	download struct {
    97  		// Data progress variables.
    98  		atomicDataReceived         uint64 // Incremented as data completes, will stop at 100% file progress.
    99  		atomicTotalDataTransferred uint64 // Incremented as data arrives, includes overdrive, contract negotiation, etc.
   100  
   101  		// Other progress variables.
   102  		chunksRemaining uint64        // Number of chunks whose downloads are incomplete.
   103  		completeChan    chan struct{} // Closed once the download is complete.
   104  		err             error         // Only set if there was an error which prevented the download from completing.
   105  
   106  		// downloadCompleteFunc is a slice of functions which are called when
   107  		// completeChan is closed.
   108  		downloadCompleteFuncs []func(error) error
   109  
   110  		// Timestamp information.
   111  		endTime         time.Time // Set immediately before closing 'completeChan'.
   112  		staticStartTime time.Time // Set immediately when the download object is created.
   113  
   114  		// Basic information about the file.
   115  		destination           downloadDestination
   116  		destinationString     string          // The string reported to the user to indicate the download's destination.
   117  		staticDestinationType string          // "memory buffer", "http stream", "file", etc.
   118  		staticLength          uint64          // Length to download starting from the offset.
   119  		staticOffset          uint64          // Offset within the file to start the download.
   120  		staticSiaPath         modules.SiaPath // The path of the siafile at the time the download started.
   121  
   122  		// Retrieval settings for the file.
   123  		staticLatencyTarget time.Duration // In milliseconds. Lower latency results in lower total system throughput.
   124  		staticOverdrive     int           // How many extra pieces to download to prevent slow hosts from being a bottleneck.
   125  		staticPriority      uint64        // Downloads with higher priority will complete first.
   126  
   127  		// Utilities.
   128  		log           *persist.Logger // Same log as the renter.
   129  		memoryManager *memoryManager  // Same memoryManager used across the renter.
   130  		mu            sync.Mutex      // Unique to the download object.
   131  	}
   132  
   133  	// downloadParams is the set of parameters to use when downloading a file.
   134  	downloadParams struct {
   135  		destination       downloadDestination // The place to write the downloaded data.
   136  		destinationType   string              // "file", "buffer", "http stream", etc.
   137  		destinationString string              // The string to report to the user for the destination.
   138  		file              *siafile.Snapshot   // The file to download.
   139  
   140  		latencyTarget time.Duration // Workers above this latency will be automatically put on standby initially.
   141  		length        uint64        // Length of download. Cannot be 0.
   142  		needsMemory   bool          // Whether new memory needs to be allocated to perform the download.
   143  		offset        uint64        // Offset within the file to start the download. Must be less than the total filesize.
   144  		overdrive     int           // How many extra pieces to download to prevent slow hosts from being a bottleneck.
   145  		priority      uint64        // Files with a higher priority will be downloaded first.
   146  	}
   147  )
   148  
   149  // managedCancel cancels a download by marking it as failed.
   150  func (d *download) managedCancel() {
   151  	d.managedFail(modules.ErrDownloadCancelled)
   152  }
   153  
   154  // managedFail will mark the download as complete, but with the provided error.
   155  // If the download has already failed, the error will be updated to be a
   156  // concatenation of the previous error and the new error.
   157  func (d *download) managedFail(err error) {
   158  	d.mu.Lock()
   159  	defer d.mu.Unlock()
   160  
   161  	// If the download is already complete, extend the error.
   162  	complete := d.staticComplete()
   163  	if complete && d.err != nil {
   164  		return
   165  	} else if complete && d.err == nil {
   166  		d.log.Critical("download is marked as completed without error, but then managedFail was called with err:", err)
   167  		return
   168  	}
   169  
   170  	// Mark the download as complete and set the error.
   171  	d.err = err
   172  	d.markComplete()
   173  }
   174  
   175  // markComplete is a helper method which closes the completeChan and and
   176  // executes the downloadCompleteFuncs. The completeChan should always be closed
   177  // using this method.
   178  func (d *download) markComplete() {
   179  	// Avoid calling markComplete multiple times. In a production build
   180  	// build.Critical won't panic which is fine since we set
   181  	// downloadCompleteFunc to nil after executing them. We still don't want to
   182  	// close the completeChan again though to avoid a crash.
   183  	if d.staticComplete() {
   184  		build.Critical("Can't call markComplete multiple times")
   185  	} else {
   186  		defer close(d.completeChan)
   187  	}
   188  	// Execute the downloadCompleteFuncs before closing the channel. This gives
   189  	// the initiator of the download the nice guarantee that waiting for the
   190  	// completeChan to be closed also means that the downloadCompleteFuncs are
   191  	// done.
   192  	var err error
   193  	for _, f := range d.downloadCompleteFuncs {
   194  		err = errors.Compose(err, f(d.err))
   195  	}
   196  	// Log potential errors.
   197  	if err != nil {
   198  		d.log.Println("Failed to execute at least one downloadCompleteFunc", err)
   199  	}
   200  	// Set downloadCompleteFuncs to nil to avoid executing them multiple times.
   201  	d.downloadCompleteFuncs = nil
   202  }
   203  
   204  // onComplete registers a function to be called when the download is completed.
   205  // This can either mean that the download succeeded or failed. The registered
   206  // functions are executed in the same order as they are registered and waiting
   207  // for the download's completeChan to be closed implies that the registered
   208  // functions were executed.
   209  func (d *download) onComplete(f func(error) error) {
   210  	select {
   211  	case <-d.completeChan:
   212  		if err := f(d.err); err != nil {
   213  			d.log.Println("Failed to execute downloadCompleteFunc", err)
   214  		}
   215  		return
   216  	default:
   217  	}
   218  	d.downloadCompleteFuncs = append(d.downloadCompleteFuncs, f)
   219  }
   220  
   221  // staticComplete is a helper function to indicate whether or not the download
   222  // has completed.
   223  func (d *download) staticComplete() bool {
   224  	select {
   225  	case <-d.completeChan:
   226  		return true
   227  	default:
   228  		return false
   229  	}
   230  }
   231  
   232  // Err returns the error encountered by a download, if it exists.
   233  func (d *download) Err() (err error) {
   234  	d.mu.Lock()
   235  	err = d.err
   236  	d.mu.Unlock()
   237  	return err
   238  }
   239  
   240  // OnComplete registers a function to be called when the download is completed.
   241  // This can either mean that the download succeeded or failed. The registered
   242  // functions are executed in the same order as they are registered and waiting
   243  // for the download's completeChan to be closed implies that the registered
   244  // functions were executed.
   245  func (d *download) OnComplete(f func(error) error) {
   246  	d.mu.Lock()
   247  	defer d.mu.Unlock()
   248  	d.onComplete(f)
   249  }
   250  
   251  // Download performs a file download using the passed parameters and blocks
   252  // until the download is finished.
   253  func (r *Renter) Download(p modules.RenterDownloadParameters) error {
   254  	if err := r.tg.Add(); err != nil {
   255  		return err
   256  	}
   257  	defer r.tg.Done()
   258  	d, err := r.managedDownload(p)
   259  	if err != nil {
   260  		return err
   261  	}
   262  	// Block until the download has completed
   263  	select {
   264  	case <-d.completeChan:
   265  		return d.Err()
   266  	case <-r.tg.StopChan():
   267  		return errors.New("download interrupted by shutdown")
   268  	}
   269  }
   270  
   271  // DownloadAsync performs a file download using the passed parameters without
   272  // blocking until the download is finished.
   273  func (r *Renter) DownloadAsync(p modules.RenterDownloadParameters, f func(error) error) (cancel func(), err error) {
   274  	if err := r.tg.Add(); err != nil {
   275  		return nil, err
   276  	}
   277  	defer r.tg.Done()
   278  	d, err := r.managedDownload(p)
   279  	if err != nil {
   280  		return nil, err
   281  	}
   282  	if f != nil {
   283  		d.onComplete(f)
   284  	}
   285  	return d.managedCancel, err
   286  }
   287  
   288  // managedDownload performs a file download using the passed parameters and
   289  // returns the download object and an error that indicates if the download
   290  // setup was successful.
   291  func (r *Renter) managedDownload(p modules.RenterDownloadParameters) (*download, error) {
   292  	// Lookup the file associated with the nickname.
   293  	entry, err := r.staticFileSet.Open(p.SiaPath)
   294  	if err != nil {
   295  		return nil, err
   296  	}
   297  	defer entry.Close()
   298  	defer entry.UpdateAccessTime()
   299  
   300  	// Validate download parameters.
   301  	isHTTPResp := p.Httpwriter != nil
   302  	if p.Async && isHTTPResp {
   303  		return nil, errors.New("cannot async download to http response")
   304  	}
   305  	if isHTTPResp && p.Destination != "" {
   306  		return nil, errors.New("destination cannot be specified when downloading to http response")
   307  	}
   308  	if !isHTTPResp && p.Destination == "" {
   309  		return nil, errors.New("destination not supplied")
   310  	}
   311  	if p.Destination != "" && !filepath.IsAbs(p.Destination) {
   312  		return nil, errors.New("destination must be an absolute path")
   313  	}
   314  	if p.Offset == entry.Size() && entry.Size() != 0 {
   315  		return nil, errors.New("offset equals filesize")
   316  	}
   317  	// Sentinel: if length == 0, download the entire file.
   318  	if p.Length == 0 {
   319  		if p.Offset > entry.Size() {
   320  			return nil, errors.New("offset cannot be greater than file size")
   321  		}
   322  		p.Length = entry.Size() - p.Offset
   323  	}
   324  	// Check whether offset and length is valid.
   325  	if p.Offset < 0 || p.Offset+p.Length > entry.Size() {
   326  		return nil, fmt.Errorf("offset and length combination invalid, max byte is at index %d", entry.Size()-1)
   327  	}
   328  
   329  	// Instantiate the correct downloadWriter implementation.
   330  	var dw downloadDestination
   331  	var destinationType string
   332  	if isHTTPResp {
   333  		dw = newDownloadDestinationWriter(p.Httpwriter)
   334  		destinationType = "http stream"
   335  	} else {
   336  		osFile, err := os.OpenFile(p.Destination, os.O_CREATE|os.O_WRONLY, entry.Mode())
   337  		if err != nil {
   338  			return nil, err
   339  		}
   340  		dw = &downloadDestinationFile{deps: r.deps, f: osFile, staticChunkSize: int64(entry.ChunkSize())}
   341  		destinationType = "file"
   342  	}
   343  
   344  	// If the destination is a httpWriter, we set the Content-Length in the
   345  	// header.
   346  	if isHTTPResp {
   347  		w, ok := p.Httpwriter.(http.ResponseWriter)
   348  		if ok {
   349  			w.Header().Set("Content-Length", fmt.Sprint(p.Length))
   350  		}
   351  	}
   352  
   353  	// Prepare snapshot.
   354  	snap, err := entry.Snapshot()
   355  	if err != nil {
   356  		return nil, err
   357  	}
   358  	// Create the download object.
   359  	d, err := r.managedNewDownload(downloadParams{
   360  		destination:       dw,
   361  		destinationType:   destinationType,
   362  		destinationString: p.Destination,
   363  		file:              snap,
   364  
   365  		latencyTarget: 25e3 * time.Millisecond, // TODO: high default until full latency support is added.
   366  		length:        p.Length,
   367  		needsMemory:   true,
   368  		offset:        p.Offset,
   369  		overdrive:     3, // TODO: moderate default until full overdrive support is added.
   370  		priority:      5, // TODO: moderate default until full priority support is added.
   371  	})
   372  	if closer, ok := dw.(io.Closer); err != nil && ok {
   373  		// If the destination can be closed we do so.
   374  		return nil, errors.Compose(err, closer.Close())
   375  	} else if err != nil {
   376  		return nil, err
   377  	}
   378  
   379  	// Register some cleanup for when the download is done.
   380  	d.OnComplete(func(_ error) error {
   381  		// close the destination if possible.
   382  		if closer, ok := dw.(io.Closer); ok {
   383  			return closer.Close()
   384  		}
   385  		// sanity check that we close files.
   386  		if destinationType == "file" {
   387  			build.Critical("file wasn't closed after download")
   388  		}
   389  		return nil
   390  	})
   391  
   392  	// Add the download object to the download history if it's not a stream.
   393  	if destinationType != destinationTypeSeekStream {
   394  		r.downloadHistoryMu.Lock()
   395  		r.downloadHistory = append(r.downloadHistory, d)
   396  		r.downloadHistoryMu.Unlock()
   397  	}
   398  
   399  	// Return the download object
   400  	return d, nil
   401  }
   402  
   403  // managedNewDownload creates and initializes a download based on the provided
   404  // parameters.
   405  func (r *Renter) managedNewDownload(params downloadParams) (*download, error) {
   406  	// Input validation.
   407  	if params.file == nil {
   408  		return nil, errors.New("no file provided when requesting download")
   409  	}
   410  	if params.length < 0 {
   411  		return nil, errors.New("download length must be zero or a positive whole number")
   412  	}
   413  	if params.offset < 0 {
   414  		return nil, errors.New("download offset cannot be a negative number")
   415  	}
   416  	if params.offset+params.length > params.file.Size() {
   417  		return nil, errors.New("download is requesting data past the boundary of the file")
   418  	}
   419  
   420  	// Create the download object.
   421  	d := &download{
   422  		completeChan: make(chan struct{}),
   423  
   424  		staticStartTime: time.Now(),
   425  
   426  		destination:           params.destination,
   427  		destinationString:     params.destinationString,
   428  		staticDestinationType: params.destinationType,
   429  		staticLatencyTarget:   params.latencyTarget,
   430  		staticLength:          params.length,
   431  		staticOffset:          params.offset,
   432  		staticOverdrive:       params.overdrive,
   433  		staticSiaPath:         params.file.SiaPath(),
   434  		staticPriority:        params.priority,
   435  
   436  		log:           r.log,
   437  		memoryManager: r.memoryManager,
   438  	}
   439  
   440  	// Update the endTime of the download when it's done. Also nil out the
   441  	// destination pointer so that the garbage collector does not think any
   442  	// memory is still being used.
   443  	d.onComplete(func(_ error) error {
   444  		d.endTime = time.Now()
   445  		d.destination = nil
   446  		return nil
   447  	})
   448  
   449  	// Nothing more to do for 0-byte files or 0-length downloads.
   450  	if d.staticLength == 0 {
   451  		d.markComplete()
   452  		return d, nil
   453  	}
   454  
   455  	// Determine which chunks to download.
   456  	minChunk, minChunkOffset := params.file.ChunkIndexByOffset(params.offset)
   457  	maxChunk, maxChunkOffset := params.file.ChunkIndexByOffset(params.offset + params.length)
   458  	// If the maxChunkOffset is exactly 0 we need to subtract 1 chunk. e.g. if
   459  	// the chunkSize is 100 bytes and we want to download 100 bytes from offset
   460  	// 0, maxChunk would be 1 and maxChunkOffset would be 0. We want maxChunk
   461  	// to be 0 though since we don't actually need any data from chunk 1.
   462  	if maxChunk > 0 && maxChunkOffset == 0 {
   463  		maxChunk--
   464  	}
   465  	// Make sure the requested chunks are within the boundaries.
   466  	if minChunk == params.file.NumChunks() || maxChunk == params.file.NumChunks() {
   467  		return nil, errors.New("download is requesting a chunk that is past the boundary of the file")
   468  	}
   469  
   470  	// For each chunk, assemble a mapping from the contract id to the index of
   471  	// the piece within the chunk that the contract is responsible for.
   472  	chunkMaps := make([]map[string]downloadPieceInfo, maxChunk-minChunk+1)
   473  	for chunkIndex := minChunk; chunkIndex <= maxChunk; chunkIndex++ {
   474  		// Create the map.
   475  		chunkMaps[chunkIndex-minChunk] = make(map[string]downloadPieceInfo)
   476  		// Get the pieces for the chunk.
   477  		pieces := params.file.Pieces(uint64(chunkIndex))
   478  		for pieceIndex, pieceSet := range pieces {
   479  			for _, piece := range pieceSet {
   480  				// Sanity check - the same worker should not have two pieces for
   481  				// the same chunk.
   482  				_, exists := chunkMaps[chunkIndex-minChunk][piece.HostPubKey.String()]
   483  				if exists {
   484  					r.log.Println("ERROR: Worker has multiple pieces uploaded for the same chunk.", params.file.SiaPath(), chunkIndex, pieceIndex, piece.HostPubKey.String())
   485  				}
   486  				chunkMaps[chunkIndex-minChunk][piece.HostPubKey.String()] = downloadPieceInfo{
   487  					index: uint64(pieceIndex),
   488  					root:  piece.MerkleRoot,
   489  				}
   490  			}
   491  		}
   492  	}
   493  
   494  	// Queue the downloads for each chunk.
   495  	writeOffset := int64(0) // where to write a chunk within the download destination.
   496  	d.chunksRemaining += maxChunk - minChunk + 1
   497  	for i := minChunk; i <= maxChunk; i++ {
   498  		udc := &unfinishedDownloadChunk{
   499  			destination: params.destination,
   500  			erasureCode: params.file.ErasureCode(),
   501  			masterKey:   params.file.MasterKey(),
   502  
   503  			staticChunkIndex: i,
   504  			staticCacheID:    fmt.Sprintf("%v:%v", d.staticSiaPath, i),
   505  			staticChunkMap:   chunkMaps[i-minChunk],
   506  			staticChunkSize:  params.file.ChunkSize(),
   507  			staticPieceSize:  params.file.PieceSize(),
   508  
   509  			// TODO: 25ms is just a guess for a good default. Really, we want to
   510  			// set the latency target such that slower workers will pick up the
   511  			// later chunks, but only if there's a very strong chance that
   512  			// they'll finish before the earlier chunks finish, so that they do
   513  			// no contribute to low latency.
   514  			//
   515  			// TODO: There is some sane minimum latency that should actually be
   516  			// set based on the number of pieces 'n', and the 'n' fastest
   517  			// workers that we have.
   518  			staticLatencyTarget: params.latencyTarget + (25 * time.Duration(i-minChunk)), // Increase target by 25ms per chunk.
   519  			staticNeedsMemory:   params.needsMemory,
   520  			staticPriority:      params.priority,
   521  
   522  			completedPieces:   make([]bool, params.file.ErasureCode().NumPieces()),
   523  			physicalChunkData: make([][]byte, params.file.ErasureCode().NumPieces()),
   524  			pieceUsage:        make([]bool, params.file.ErasureCode().NumPieces()),
   525  
   526  			download:   d,
   527  			renterFile: params.file,
   528  		}
   529  
   530  		// Set the fetchOffset - the offset within the chunk that we start
   531  		// downloading from.
   532  		if i == minChunk {
   533  			udc.staticFetchOffset = minChunkOffset
   534  		} else {
   535  			udc.staticFetchOffset = 0
   536  		}
   537  		// Set the fetchLength - the number of bytes to fetch within the chunk
   538  		// that we start downloading from.
   539  		if i == maxChunk && maxChunkOffset != 0 {
   540  			udc.staticFetchLength = maxChunkOffset - udc.staticFetchOffset
   541  		} else {
   542  			udc.staticFetchLength = params.file.ChunkSize() - udc.staticFetchOffset
   543  		}
   544  		// Set the writeOffset within the destination for where the data should
   545  		// be written.
   546  		udc.staticWriteOffset = writeOffset
   547  		writeOffset += int64(udc.staticFetchLength)
   548  
   549  		// TODO: Currently all chunks are given overdrive. This should probably
   550  		// be changed once the hostdb knows how to measure host speed/latency
   551  		// and once we can assign overdrive dynamically.
   552  		udc.staticOverdrive = params.overdrive
   553  
   554  		// Add this chunk to the chunk heap, and notify the download loop that
   555  		// there is work to do.
   556  		r.managedAddChunkToDownloadHeap(udc)
   557  		select {
   558  		case r.newDownloads <- struct{}{}:
   559  		default:
   560  		}
   561  	}
   562  	return d, nil
   563  }
   564  
   565  // DownloadHistory returns the list of downloads that have been performed. Will
   566  // include downloads that have not yet completed. Downloads will be roughly,
   567  // but not precisely, sorted according to start time.
   568  //
   569  // TODO: Currently the DownloadHistory only contains downloads from this
   570  // session, does not contain downloads that were executed for the purposes of
   571  // repairing, and has no way to clear the download history if it gets long or
   572  // unwieldy. It's not entirely certain which of the missing features are
   573  // actually desirable, please consult core team + app dev community before
   574  // deciding what to implement.
   575  func (r *Renter) DownloadHistory() []modules.DownloadInfo {
   576  	r.downloadHistoryMu.Lock()
   577  	defer r.downloadHistoryMu.Unlock()
   578  
   579  	downloads := make([]modules.DownloadInfo, len(r.downloadHistory))
   580  	for i := range r.downloadHistory {
   581  		// Order from most recent to least recent.
   582  		d := r.downloadHistory[len(r.downloadHistory)-i-1]
   583  		d.mu.Lock() // Lock required for d.endTime only.
   584  		downloads[i] = modules.DownloadInfo{
   585  			Destination:     d.destinationString,
   586  			DestinationType: d.staticDestinationType,
   587  			Length:          d.staticLength,
   588  			Offset:          d.staticOffset,
   589  			SiaPath:         d.staticSiaPath,
   590  
   591  			Completed:            d.staticComplete(),
   592  			EndTime:              d.endTime,
   593  			Received:             atomic.LoadUint64(&d.atomicDataReceived),
   594  			StartTime:            d.staticStartTime,
   595  			StartTimeUnix:        d.staticStartTime.UnixNano(),
   596  			TotalDataTransferred: atomic.LoadUint64(&d.atomicTotalDataTransferred),
   597  		}
   598  		// Release download lock before calling d.Err(), which will acquire the
   599  		// lock. The error needs to be checked separately because we need to
   600  		// know if it's 'nil' before grabbing the error string.
   601  		d.mu.Unlock()
   602  		if d.Err() != nil {
   603  			downloads[i].Error = d.Err().Error()
   604  		} else {
   605  			downloads[i].Error = ""
   606  		}
   607  	}
   608  	return downloads
   609  }
   610  
   611  // ClearDownloadHistory clears the renter's download history inclusive of the
   612  // provided before and after timestamps
   613  //
   614  // TODO: This function can be improved by implementing a binary search, the
   615  // trick will be making the binary search be just as readable while handling
   616  // all the edge cases
   617  func (r *Renter) ClearDownloadHistory(after, before time.Time) error {
   618  	if err := r.tg.Add(); err != nil {
   619  		return err
   620  	}
   621  	defer r.tg.Done()
   622  	r.downloadHistoryMu.Lock()
   623  	defer r.downloadHistoryMu.Unlock()
   624  
   625  	// Check to confirm there are downloads to clear
   626  	if len(r.downloadHistory) == 0 {
   627  		return nil
   628  	}
   629  
   630  	// Timestamp validation
   631  	if before.Before(after) {
   632  		return errors.New("before timestamp can not be newer then after timestamp")
   633  	}
   634  
   635  	// Clear download history if both before and after timestamps are zero values
   636  	if before.Equal(types.EndOfTime) && after.IsZero() {
   637  		r.downloadHistory = r.downloadHistory[:0]
   638  		return nil
   639  	}
   640  
   641  	// Find and return downloads that are not within the given range
   642  	withinTimespan := func(t time.Time) bool {
   643  		return (t.After(after) || t.Equal(after)) && (t.Before(before) || t.Equal(before))
   644  	}
   645  	filtered := r.downloadHistory[:0]
   646  	for _, d := range r.downloadHistory {
   647  		if !withinTimespan(d.staticStartTime) {
   648  			filtered = append(filtered, d)
   649  		}
   650  	}
   651  	r.downloadHistory = filtered
   652  	return nil
   653  }