github.com/digdeepmining/go-atheios@v1.5.13-0.20180902133602-d5687a2e6f43/eth/downloader/downloader.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package downloader contains the manual full chain synchronisation.
    18  package downloader
    19  
    20  import (
    21  	"crypto/rand"
    22  	"errors"
    23  	"fmt"
    24  	"math"
    25  	"math/big"
    26  	"strings"
    27  	"sync"
    28  	"sync/atomic"
    29  	"time"
    30  
    31  	ethereum "github.com/atheioschain/go-atheios"
    32  	"github.com/atheioschain/go-atheios/common"
    33  	"github.com/atheioschain/go-atheios/core/types"
    34  	"github.com/atheioschain/go-atheios/ethdb"
    35  	"github.com/atheioschain/go-atheios/event"
    36  	"github.com/atheioschain/go-atheios/logger"
    37  	"github.com/atheioschain/go-atheios/logger/glog"
    38  	"github.com/atheioschain/go-atheios/params"
    39  	"github.com/atheioschain/go-atheios/trie"
    40  	"github.com/rcrowley/go-metrics"
    41  )
    42  
    43  var (
    44  	MaxHashFetch    = 512 // Amount of hashes to be fetched per retrieval request
    45  	MaxBlockFetch   = 128 // Amount of blocks to be fetched per retrieval request
    46  	MaxHeaderFetch  = 192 // Amount of block headers to be fetched per retrieval request
    47  	MaxSkeletonSize = 128 // Number of header fetches to need for a skeleton assembly
    48  	MaxBodyFetch    = 128 // Amount of block bodies to be fetched per retrieval request
    49  	MaxReceiptFetch = 256 // Amount of transaction receipts to allow fetching per request
    50  	MaxStateFetch   = 384 // Amount of node state values to allow fetching per request
    51  
    52  	MaxForkAncestry  = 3 * params.EpochDuration.Uint64() // Maximum chain reorganisation
    53  	rttMinEstimate   = 2 * time.Second                   // Minimum round-trip time to target for download requests
    54  	rttMaxEstimate   = 20 * time.Second                  // Maximum rount-trip time to target for download requests
    55  	rttMinConfidence = 0.1                               // Worse confidence factor in our estimated RTT value
    56  	ttlScaling       = 3                                 // Constant scaling factor for RTT -> TTL conversion
    57  	ttlLimit         = time.Minute                       // Maximum TTL allowance to prevent reaching crazy timeouts
    58  
    59  	qosTuningPeers   = 5    // Number of peers to tune based on (best peers)
    60  	qosConfidenceCap = 10   // Number of peers above which not to modify RTT confidence
    61  	qosTuningImpact  = 0.25 // Impact that a new tuning target has on the previous value
    62  
    63  	maxQueuedHeaders  = 32 * 1024 // [eth/62] Maximum number of headers to queue for import (DOS protection)
    64  	maxHeadersProcess = 2048      // Number of header download results to import at once into the chain
    65  	maxResultsProcess = 2048      // Number of content download results to import at once into the chain
    66  
    67  	fsHeaderCheckFrequency = 100        // Verification frequency of the downloaded headers during fast sync
    68  	fsHeaderSafetyNet      = 2048       // Number of headers to discard in case a chain violation is detected
    69  	fsHeaderForceVerify    = 24         // Number of headers to verify before and after the pivot to accept it
    70  	fsPivotInterval        = 256        // Number of headers out of which to randomize the pivot point
    71  	fsMinFullBlocks        = 64         // Number of blocks to retrieve fully even in fast sync
    72  	fsCriticalTrials       = uint32(32) // Number of times to retry in the cricical section before bailing
    73  )
    74  
    75  var (
    76  	errBusy                    = errors.New("busy")
    77  	errUnknownPeer             = errors.New("peer is unknown or unhealthy")
    78  	errBadPeer                 = errors.New("action from bad peer ignored")
    79  	errStallingPeer            = errors.New("peer is stalling")
    80  	errNoPeers                 = errors.New("no peers to keep download active")
    81  	errTimeout                 = errors.New("timeout")
    82  	errEmptyHeaderSet          = errors.New("empty header set by peer")
    83  	errPeersUnavailable        = errors.New("no peers available or all tried for download")
    84  	errInvalidAncestor         = errors.New("retrieved ancestor is invalid")
    85  	errInvalidChain            = errors.New("retrieved hash chain is invalid")
    86  	errInvalidBlock            = errors.New("retrieved block is invalid")
    87  	errInvalidBody             = errors.New("retrieved block body is invalid")
    88  	errInvalidReceipt          = errors.New("retrieved receipt is invalid")
    89  	errCancelBlockFetch        = errors.New("block download canceled (requested)")
    90  	errCancelHeaderFetch       = errors.New("block header download canceled (requested)")
    91  	errCancelBodyFetch         = errors.New("block body download canceled (requested)")
    92  	errCancelReceiptFetch      = errors.New("receipt download canceled (requested)")
    93  	errCancelStateFetch        = errors.New("state data download canceled (requested)")
    94  	errCancelHeaderProcessing  = errors.New("header processing canceled (requested)")
    95  	errCancelContentProcessing = errors.New("content processing canceled (requested)")
    96  	errNoSyncActive            = errors.New("no sync active")
    97  	errTooOld                  = errors.New("peer doesn't speak recent enough protocol version (need version >= 62)")
    98  )
    99  
   100  type Downloader struct {
   101  	mode SyncMode       // Synchronisation mode defining the strategy used (per sync cycle)
   102  	mux  *event.TypeMux // Event multiplexer to announce sync operation events
   103  
   104  	queue *queue   // Scheduler for selecting the hashes to download
   105  	peers *peerSet // Set of active peers from which download can proceed
   106  
   107  	fsPivotLock  *types.Header // Pivot header on critical section entry (cannot change between retries)
   108  	fsPivotFails uint32        // Number of subsequent fast sync failures in the critical section
   109  
   110  	rttEstimate   uint64 // Round trip time to target for download requests
   111  	rttConfidence uint64 // Confidence in the estimated RTT (unit: millionths to allow atomic ops)
   112  
   113  	// Statistics
   114  	syncStatsChainOrigin uint64       // Origin block number where syncing started at
   115  	syncStatsChainHeight uint64       // Highest block number known when syncing started
   116  	syncStatsStateDone   uint64       // Number of state trie entries already pulled
   117  	syncStatsLock        sync.RWMutex // Lock protecting the sync stats fields
   118  
   119  	// Callbacks
   120  	hasHeader        headerCheckFn            // Checks if a header is present in the chain
   121  	hasBlockAndState blockAndStateCheckFn     // Checks if a block and associated state is present in the chain
   122  	getHeader        headerRetrievalFn        // Retrieves a header from the chain
   123  	getBlock         blockRetrievalFn         // Retrieves a block from the chain
   124  	headHeader       headHeaderRetrievalFn    // Retrieves the head header from the chain
   125  	headBlock        headBlockRetrievalFn     // Retrieves the head block from the chain
   126  	headFastBlock    headFastBlockRetrievalFn // Retrieves the head fast-sync block from the chain
   127  	commitHeadBlock  headBlockCommitterFn     // Commits a manually assembled block as the chain head
   128  	getTd            tdRetrievalFn            // Retrieves the TD of a block from the chain
   129  	insertHeaders    headerChainInsertFn      // Injects a batch of headers into the chain
   130  	insertBlocks     blockChainInsertFn       // Injects a batch of blocks into the chain
   131  	insertReceipts   receiptChainInsertFn     // Injects a batch of blocks and their receipts into the chain
   132  	rollback         chainRollbackFn          // Removes a batch of recently added chain links
   133  	dropPeer         peerDropFn               // Drops a peer for misbehaving
   134  
   135  	// Status
   136  	synchroniseMock func(id string, hash common.Hash) error // Replacement for synchronise during testing
   137  	synchronising   int32
   138  	notified        int32
   139  
   140  	// Channels
   141  	newPeerCh     chan *peer
   142  	headerCh      chan dataPack        // [eth/62] Channel receiving inbound block headers
   143  	bodyCh        chan dataPack        // [eth/62] Channel receiving inbound block bodies
   144  	receiptCh     chan dataPack        // [eth/63] Channel receiving inbound receipts
   145  	stateCh       chan dataPack        // [eth/63] Channel receiving inbound node state data
   146  	bodyWakeCh    chan bool            // [eth/62] Channel to signal the block body fetcher of new tasks
   147  	receiptWakeCh chan bool            // [eth/63] Channel to signal the receipt fetcher of new tasks
   148  	stateWakeCh   chan bool            // [eth/63] Channel to signal the state fetcher of new tasks
   149  	headerProcCh  chan []*types.Header // [eth/62] Channel to feed the header processor new tasks
   150  
   151  	// Cancellation and termination
   152  	cancelPeer string        // Identifier of the peer currently being used as the master (cancel on drop)
   153  	cancelCh   chan struct{} // Channel to cancel mid-flight syncs
   154  	cancelLock sync.RWMutex  // Lock to protect the cancel channel and peer in delivers
   155  
   156  	quitCh   chan struct{} // Quit channel to signal termination
   157  	quitLock sync.RWMutex  // Lock to prevent double closes
   158  
   159  	// Testing hooks
   160  	syncInitHook     func(uint64, uint64)  // Method to call upon initiating a new sync run
   161  	bodyFetchHook    func([]*types.Header) // Method to call upon starting a block body fetch
   162  	receiptFetchHook func([]*types.Header) // Method to call upon starting a receipt fetch
   163  	chainInsertHook  func([]*fetchResult)  // Method to call upon inserting a chain of blocks (possibly in multiple invocations)
   164  }
   165  
   166  // New creates a new downloader to fetch hashes and blocks from remote peers.
   167  func New(mode SyncMode, stateDb ethdb.Database, mux *event.TypeMux, hasHeader headerCheckFn, hasBlockAndState blockAndStateCheckFn,
   168  	getHeader headerRetrievalFn, getBlock blockRetrievalFn, headHeader headHeaderRetrievalFn, headBlock headBlockRetrievalFn,
   169  	headFastBlock headFastBlockRetrievalFn, commitHeadBlock headBlockCommitterFn, getTd tdRetrievalFn, insertHeaders headerChainInsertFn,
   170  	insertBlocks blockChainInsertFn, insertReceipts receiptChainInsertFn, rollback chainRollbackFn, dropPeer peerDropFn) *Downloader {
   171  
   172  	dl := &Downloader{
   173  		mode:             mode,
   174  		mux:              mux,
   175  		queue:            newQueue(stateDb),
   176  		peers:            newPeerSet(),
   177  		rttEstimate:      uint64(rttMaxEstimate),
   178  		rttConfidence:    uint64(1000000),
   179  		hasHeader:        hasHeader,
   180  		hasBlockAndState: hasBlockAndState,
   181  		getHeader:        getHeader,
   182  		getBlock:         getBlock,
   183  		headHeader:       headHeader,
   184  		headBlock:        headBlock,
   185  		headFastBlock:    headFastBlock,
   186  		commitHeadBlock:  commitHeadBlock,
   187  		getTd:            getTd,
   188  		insertHeaders:    insertHeaders,
   189  		insertBlocks:     insertBlocks,
   190  		insertReceipts:   insertReceipts,
   191  		rollback:         rollback,
   192  		dropPeer:         dropPeer,
   193  		newPeerCh:        make(chan *peer, 1),
   194  		headerCh:         make(chan dataPack, 1),
   195  		bodyCh:           make(chan dataPack, 1),
   196  		receiptCh:        make(chan dataPack, 1),
   197  		stateCh:          make(chan dataPack, 1),
   198  		bodyWakeCh:       make(chan bool, 1),
   199  		receiptWakeCh:    make(chan bool, 1),
   200  		stateWakeCh:      make(chan bool, 1),
   201  		headerProcCh:     make(chan []*types.Header, 1),
   202  		quitCh:           make(chan struct{}),
   203  	}
   204  	go dl.qosTuner()
   205  	return dl
   206  }
   207  
   208  // Progress retrieves the synchronisation boundaries, specifically the origin
   209  // block where synchronisation started at (may have failed/suspended); the block
   210  // or header sync is currently at; and the latest known block which the sync targets.
   211  //
   212  // In addition, during the state download phase of fast synchronisation the number
   213  // of processed and the total number of known states are also returned. Otherwise
   214  // these are zero.
   215  func (d *Downloader) Progress() ethereum.SyncProgress {
   216  	// Fetch the pending state count outside of the lock to prevent unforeseen deadlocks
   217  	pendingStates := uint64(d.queue.PendingNodeData())
   218  
   219  	// Lock the current stats and return the progress
   220  	d.syncStatsLock.RLock()
   221  	defer d.syncStatsLock.RUnlock()
   222  
   223  	current := uint64(0)
   224  	switch d.mode {
   225  	case FullSync:
   226  		current = d.headBlock().NumberU64()
   227  	case FastSync:
   228  		current = d.headFastBlock().NumberU64()
   229  	case LightSync:
   230  		current = d.headHeader().Number.Uint64()
   231  	}
   232  	return ethereum.SyncProgress{
   233  		StartingBlock: d.syncStatsChainOrigin,
   234  		CurrentBlock:  current,
   235  		HighestBlock:  d.syncStatsChainHeight,
   236  		PulledStates:  d.syncStatsStateDone,
   237  		KnownStates:   d.syncStatsStateDone + pendingStates,
   238  	}
   239  }
   240  
   241  // Synchronising returns whether the downloader is currently retrieving blocks.
   242  func (d *Downloader) Synchronising() bool {
   243  	return atomic.LoadInt32(&d.synchronising) > 0
   244  }
   245  
   246  // RegisterPeer injects a new download peer into the set of block source to be
   247  // used for fetching hashes and blocks from.
   248  func (d *Downloader) RegisterPeer(id string, version int, currentHead currentHeadRetrievalFn,
   249  	getRelHeaders relativeHeaderFetcherFn, getAbsHeaders absoluteHeaderFetcherFn, getBlockBodies blockBodyFetcherFn,
   250  	getReceipts receiptFetcherFn, getNodeData stateFetcherFn) error {
   251  
   252  	glog.V(logger.Detail).Infoln("Registering peer", id)
   253  	if err := d.peers.Register(newPeer(id, version, currentHead, getRelHeaders, getAbsHeaders, getBlockBodies, getReceipts, getNodeData)); err != nil {
   254  		glog.V(logger.Error).Infoln("Register failed:", err)
   255  		return err
   256  	}
   257  	d.qosReduceConfidence()
   258  
   259  	return nil
   260  }
   261  
   262  // UnregisterPeer remove a peer from the known list, preventing any action from
   263  // the specified peer. An effort is also made to return any pending fetches into
   264  // the queue.
   265  func (d *Downloader) UnregisterPeer(id string) error {
   266  	// Unregister the peer from the active peer set and revoke any fetch tasks
   267  	glog.V(logger.Detail).Infoln("Unregistering peer", id)
   268  	if err := d.peers.Unregister(id); err != nil {
   269  		glog.V(logger.Error).Infoln("Unregister failed:", err)
   270  		return err
   271  	}
   272  	d.queue.Revoke(id)
   273  
   274  	// If this peer was the master peer, abort sync immediately
   275  	d.cancelLock.RLock()
   276  	master := id == d.cancelPeer
   277  	d.cancelLock.RUnlock()
   278  
   279  	if master {
   280  		d.cancel()
   281  	}
   282  	return nil
   283  }
   284  
   285  // Synchronise tries to sync up our local block chain with a remote peer, both
   286  // adding various sanity checks as well as wrapping it with various log entries.
   287  func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode SyncMode) error {
   288  	glog.V(logger.Detail).Infof("Attempting synchronisation: %v, head [%x…], TD %v", id, head[:4], td)
   289  
   290  	err := d.synchronise(id, head, td, mode)
   291  	switch err {
   292  	case nil:
   293  		glog.V(logger.Detail).Infof("Synchronisation completed")
   294  
   295  	case errBusy:
   296  		glog.V(logger.Detail).Infof("Synchronisation already in progress")
   297  
   298  	case errTimeout, errBadPeer, errStallingPeer,
   299  		errEmptyHeaderSet, errPeersUnavailable, errTooOld,
   300  		errInvalidAncestor, errInvalidChain:
   301  		glog.V(logger.Debug).Infof("Removing peer %v: %v", id, err)
   302  		d.dropPeer(id)
   303  
   304  	default:
   305  		glog.V(logger.Warn).Infof("Synchronisation failed: %v", err)
   306  	}
   307  	return err
   308  }
   309  
   310  // synchronise will select the peer and use it for synchronising. If an empty string is given
   311  // it will use the best peer possible and synchronize if it's TD is higher than our own. If any of the
   312  // checks fail an error will be returned. This method is synchronous
   313  func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode SyncMode) error {
   314  	// Mock out the synchronisation if testing
   315  	if d.synchroniseMock != nil {
   316  		return d.synchroniseMock(id, hash)
   317  	}
   318  	// Make sure only one goroutine is ever allowed past this point at once
   319  	if !atomic.CompareAndSwapInt32(&d.synchronising, 0, 1) {
   320  		return errBusy
   321  	}
   322  	defer atomic.StoreInt32(&d.synchronising, 0)
   323  
   324  	// Post a user notification of the sync (only once per session)
   325  	if atomic.CompareAndSwapInt32(&d.notified, 0, 1) {
   326  		glog.V(logger.Info).Infoln("Block synchronisation started")
   327  	}
   328  	// Reset the queue, peer set and wake channels to clean any internal leftover state
   329  	d.queue.Reset()
   330  	d.peers.Reset()
   331  
   332  	for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh, d.stateWakeCh} {
   333  		select {
   334  		case <-ch:
   335  		default:
   336  		}
   337  	}
   338  	for _, ch := range []chan dataPack{d.headerCh, d.bodyCh, d.receiptCh, d.stateCh} {
   339  		for empty := false; !empty; {
   340  			select {
   341  			case <-ch:
   342  			default:
   343  				empty = true
   344  			}
   345  		}
   346  	}
   347  	for empty := false; !empty; {
   348  		select {
   349  		case <-d.headerProcCh:
   350  		default:
   351  			empty = true
   352  		}
   353  	}
   354  	// Create cancel channel for aborting mid-flight and mark the master peer
   355  	d.cancelLock.Lock()
   356  	d.cancelCh = make(chan struct{})
   357  	d.cancelPeer = id
   358  	d.cancelLock.Unlock()
   359  
   360  	defer d.cancel() // No matter what, we can't leave the cancel channel open
   361  
   362  	// Set the requested sync mode, unless it's forbidden
   363  	d.mode = mode
   364  	if d.mode == FastSync && atomic.LoadUint32(&d.fsPivotFails) >= fsCriticalTrials {
   365  		d.mode = FullSync
   366  	}
   367  	// Retrieve the origin peer and initiate the downloading process
   368  	p := d.peers.Peer(id)
   369  	if p == nil {
   370  		return errUnknownPeer
   371  	}
   372  	return d.syncWithPeer(p, hash, td)
   373  }
   374  
   375  // syncWithPeer starts a block synchronization based on the hash chain from the
   376  // specified peer and head hash.
   377  func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err error) {
   378  	d.mux.Post(StartEvent{})
   379  	defer func() {
   380  		// reset on error
   381  		if err != nil {
   382  			d.mux.Post(FailedEvent{err})
   383  		} else {
   384  			d.mux.Post(DoneEvent{})
   385  		}
   386  	}()
   387  	if p.version < 62 {
   388  		return errTooOld
   389  	}
   390  
   391  	glog.V(logger.Debug).Infof("Synchronising with the network using: %s [eth/%d]", p.id, p.version)
   392  	defer func(start time.Time) {
   393  		glog.V(logger.Debug).Infof("Synchronisation terminated after %v", time.Since(start))
   394  	}(time.Now())
   395  
   396  	// Look up the sync boundaries: the common ancestor and the target block
   397  	latest, err := d.fetchHeight(p)
   398  	if err != nil {
   399  		return err
   400  	}
   401  	height := latest.Number.Uint64()
   402  
   403  	origin, err := d.findAncestor(p, height)
   404  	if err != nil {
   405  		return err
   406  	}
   407  	d.syncStatsLock.Lock()
   408  	if d.syncStatsChainHeight <= origin || d.syncStatsChainOrigin > origin {
   409  		d.syncStatsChainOrigin = origin
   410  	}
   411  	d.syncStatsChainHeight = height
   412  	d.syncStatsLock.Unlock()
   413  
   414  	// Initiate the sync using a concurrent header and content retrieval algorithm
   415  	pivot := uint64(0)
   416  	switch d.mode {
   417  	case LightSync:
   418  		pivot = height
   419  	case FastSync:
   420  		// Calculate the new fast/slow sync pivot point
   421  		if d.fsPivotLock == nil {
   422  			pivotOffset, err := rand.Int(rand.Reader, big.NewInt(int64(fsPivotInterval)))
   423  			if err != nil {
   424  				panic(fmt.Sprintf("Failed to access crypto random source: %v", err))
   425  			}
   426  			if height > uint64(fsMinFullBlocks)+pivotOffset.Uint64() {
   427  				pivot = height - uint64(fsMinFullBlocks) - pivotOffset.Uint64()
   428  			}
   429  		} else {
   430  			// Pivot point locked in, use this and do not pick a new one!
   431  			pivot = d.fsPivotLock.Number.Uint64()
   432  		}
   433  		// If the point is below the origin, move origin back to ensure state download
   434  		if pivot < origin {
   435  			if pivot > 0 {
   436  				origin = pivot - 1
   437  			} else {
   438  				origin = 0
   439  			}
   440  		}
   441  		glog.V(logger.Debug).Infof("Fast syncing until pivot block #%d", pivot)
   442  	}
   443  	d.queue.Prepare(origin+1, d.mode, pivot, latest)
   444  	if d.syncInitHook != nil {
   445  		d.syncInitHook(origin, height)
   446  	}
   447  	return d.spawnSync(origin+1,
   448  		func() error { return d.fetchHeaders(p, origin+1) },    // Headers are always retrieved
   449  		func() error { return d.processHeaders(origin+1, td) }, // Headers are always retrieved
   450  		func() error { return d.fetchBodies(origin + 1) },      // Bodies are retrieved during normal and fast sync
   451  		func() error { return d.fetchReceipts(origin + 1) },    // Receipts are retrieved during fast sync
   452  		func() error { return d.fetchNodeData() },              // Node state data is retrieved during fast sync
   453  	)
   454  }
   455  
   456  // spawnSync runs d.process and all given fetcher functions to completion in
   457  // separate goroutines, returning the first error that appears.
   458  func (d *Downloader) spawnSync(origin uint64, fetchers ...func() error) error {
   459  	var wg sync.WaitGroup
   460  	errc := make(chan error, len(fetchers)+1)
   461  	wg.Add(len(fetchers) + 1)
   462  	go func() { defer wg.Done(); errc <- d.processContent() }()
   463  	for _, fn := range fetchers {
   464  		fn := fn
   465  		go func() { defer wg.Done(); errc <- fn() }()
   466  	}
   467  	// Wait for the first error, then terminate the others.
   468  	var err error
   469  	for i := 0; i < len(fetchers)+1; i++ {
   470  		if i == len(fetchers) {
   471  			// Close the queue when all fetchers have exited.
   472  			// This will cause the block processor to end when
   473  			// it has processed the queue.
   474  			d.queue.Close()
   475  		}
   476  		if err = <-errc; err != nil {
   477  			break
   478  		}
   479  	}
   480  	d.queue.Close()
   481  	d.cancel()
   482  	wg.Wait()
   483  
   484  	// If sync failed in the critical section, bump the fail counter
   485  	if err != nil && d.mode == FastSync && d.fsPivotLock != nil {
   486  		atomic.AddUint32(&d.fsPivotFails, 1)
   487  	}
   488  	return err
   489  }
   490  
   491  // cancel cancels all of the operations and resets the queue. It returns true
   492  // if the cancel operation was completed.
   493  func (d *Downloader) cancel() {
   494  	// Close the current cancel channel
   495  	d.cancelLock.Lock()
   496  	if d.cancelCh != nil {
   497  		select {
   498  		case <-d.cancelCh:
   499  			// Channel was already closed
   500  		default:
   501  			close(d.cancelCh)
   502  		}
   503  	}
   504  	d.cancelLock.Unlock()
   505  }
   506  
   507  // Terminate interrupts the downloader, canceling all pending operations.
   508  // The downloader cannot be reused after calling Terminate.
   509  func (d *Downloader) Terminate() {
   510  	// Close the termination channel (make sure double close is allowed)
   511  	d.quitLock.Lock()
   512  	select {
   513  	case <-d.quitCh:
   514  	default:
   515  		close(d.quitCh)
   516  	}
   517  	d.quitLock.Unlock()
   518  
   519  	// Cancel any pending download requests
   520  	d.cancel()
   521  }
   522  
   523  // fetchHeight retrieves the head header of the remote peer to aid in estimating
   524  // the total time a pending synchronisation would take.
   525  func (d *Downloader) fetchHeight(p *peer) (*types.Header, error) {
   526  	glog.V(logger.Debug).Infof("%v: retrieving remote chain height", p)
   527  
   528  	// Request the advertised remote head block and wait for the response
   529  	head, _ := p.currentHead()
   530  	go p.getRelHeaders(head, 1, 0, false)
   531  
   532  	timeout := time.After(d.requestTTL())
   533  	for {
   534  		select {
   535  		case <-d.cancelCh:
   536  			return nil, errCancelBlockFetch
   537  
   538  		case packet := <-d.headerCh:
   539  			// Discard anything not from the origin peer
   540  			if packet.PeerId() != p.id {
   541  				glog.V(logger.Debug).Infof("Received headers from incorrect peer(%s)", packet.PeerId())
   542  				break
   543  			}
   544  			// Make sure the peer actually gave something valid
   545  			headers := packet.(*headerPack).headers
   546  			if len(headers) != 1 {
   547  				glog.V(logger.Debug).Infof("%v: invalid number of head headers: %d != 1", p, len(headers))
   548  				return nil, errBadPeer
   549  			}
   550  			return headers[0], nil
   551  
   552  		case <-timeout:
   553  			glog.V(logger.Debug).Infof("%v: head header timeout", p)
   554  			return nil, errTimeout
   555  
   556  		case <-d.bodyCh:
   557  		case <-d.stateCh:
   558  		case <-d.receiptCh:
   559  			// Out of bounds delivery, ignore
   560  		}
   561  	}
   562  }
   563  
   564  // findAncestor tries to locate the common ancestor link of the local chain and
   565  // a remote peers blockchain. In the general case when our node was in sync and
   566  // on the correct chain, checking the top N links should already get us a match.
   567  // In the rare scenario when we ended up on a long reorganisation (i.e. none of
   568  // the head links match), we do a binary search to find the common ancestor.
   569  func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
   570  	glog.V(logger.Debug).Infof("%v: looking for common ancestor (remote height %d)", p, height)
   571  
   572  	// Figure out the valid ancestor range to prevent rewrite attacks
   573  	floor, ceil := int64(-1), d.headHeader().Number.Uint64()
   574  	if d.mode == FullSync {
   575  		ceil = d.headBlock().NumberU64()
   576  	} else if d.mode == FastSync {
   577  		ceil = d.headFastBlock().NumberU64()
   578  	}
   579  	if ceil >= MaxForkAncestry {
   580  		floor = int64(ceil - MaxForkAncestry)
   581  	}
   582  	// Request the topmost blocks to short circuit binary ancestor lookup
   583  	head := ceil
   584  	if head > height {
   585  		head = height
   586  	}
   587  	from := int64(head) - int64(MaxHeaderFetch)
   588  	if from < 0 {
   589  		from = 0
   590  	}
   591  	// Span out with 15 block gaps into the future to catch bad head reports
   592  	limit := 2 * MaxHeaderFetch / 16
   593  	count := 1 + int((int64(ceil)-from)/16)
   594  	if count > limit {
   595  		count = limit
   596  	}
   597  	go p.getAbsHeaders(uint64(from), count, 15, false)
   598  
   599  	// Wait for the remote response to the head fetch
   600  	number, hash := uint64(0), common.Hash{}
   601  	timeout := time.After(d.requestTTL())
   602  
   603  	for finished := false; !finished; {
   604  		select {
   605  		case <-d.cancelCh:
   606  			return 0, errCancelHeaderFetch
   607  
   608  		case packet := <-d.headerCh:
   609  			// Discard anything not from the origin peer
   610  			if packet.PeerId() != p.id {
   611  				glog.V(logger.Debug).Infof("Received headers from incorrect peer(%s)", packet.PeerId())
   612  				break
   613  			}
   614  			// Make sure the peer actually gave something valid
   615  			headers := packet.(*headerPack).headers
   616  			if len(headers) == 0 {
   617  				glog.V(logger.Warn).Infof("%v: empty head header set", p)
   618  				return 0, errEmptyHeaderSet
   619  			}
   620  			// Make sure the peer's reply conforms to the request
   621  			for i := 0; i < len(headers); i++ {
   622  				if number := headers[i].Number.Int64(); number != from+int64(i)*16 {
   623  					glog.V(logger.Warn).Infof("%v: head header set (item %d) broke chain ordering: requested %d, got %d", p, i, from+int64(i)*16, number)
   624  					return 0, errInvalidChain
   625  				}
   626  			}
   627  			// Check if a common ancestor was found
   628  			finished = true
   629  			for i := len(headers) - 1; i >= 0; i-- {
   630  				// Skip any headers that underflow/overflow our requested set
   631  				if headers[i].Number.Int64() < from || headers[i].Number.Uint64() > ceil {
   632  					continue
   633  				}
   634  				// Otherwise check if we already know the header or not
   635  				if (d.mode == FullSync && d.hasBlockAndState(headers[i].Hash())) || (d.mode != FullSync && d.hasHeader(headers[i].Hash())) {
   636  					number, hash = headers[i].Number.Uint64(), headers[i].Hash()
   637  
   638  					// If every header is known, even future ones, the peer straight out lied about its head
   639  					if number > height && i == limit-1 {
   640  						glog.V(logger.Warn).Infof("%v: lied about chain head: reported %d, found above %d", p, height, number)
   641  						return 0, errStallingPeer
   642  					}
   643  					break
   644  				}
   645  			}
   646  
   647  		case <-timeout:
   648  			glog.V(logger.Debug).Infof("%v: head header timeout", p)
   649  			return 0, errTimeout
   650  
   651  		case <-d.bodyCh:
   652  		case <-d.stateCh:
   653  		case <-d.receiptCh:
   654  			// Out of bounds delivery, ignore
   655  		}
   656  	}
   657  	// If the head fetch already found an ancestor, return
   658  	if !common.EmptyHash(hash) {
   659  		if int64(number) <= floor {
   660  			glog.V(logger.Warn).Infof("%v: potential rewrite attack: #%d [%x…] <= #%d limit", p, number, hash[:4], floor)
   661  			return 0, errInvalidAncestor
   662  		}
   663  		glog.V(logger.Debug).Infof("%v: common ancestor: #%d [%x…]", p, number, hash[:4])
   664  		return number, nil
   665  	}
   666  	// Ancestor not found, we need to binary search over our chain
   667  	start, end := uint64(0), head
   668  	if floor > 0 {
   669  		start = uint64(floor)
   670  	}
   671  	for start+1 < end {
   672  		// Split our chain interval in two, and request the hash to cross check
   673  		check := (start + end) / 2
   674  
   675  		timeout := time.After(d.requestTTL())
   676  		go p.getAbsHeaders(uint64(check), 1, 0, false)
   677  
   678  		// Wait until a reply arrives to this request
   679  		for arrived := false; !arrived; {
   680  			select {
   681  			case <-d.cancelCh:
   682  				return 0, errCancelHeaderFetch
   683  
   684  			case packer := <-d.headerCh:
   685  				// Discard anything not from the origin peer
   686  				if packer.PeerId() != p.id {
   687  					glog.V(logger.Debug).Infof("Received headers from incorrect peer(%s)", packer.PeerId())
   688  					break
   689  				}
   690  				// Make sure the peer actually gave something valid
   691  				headers := packer.(*headerPack).headers
   692  				if len(headers) != 1 {
   693  					glog.V(logger.Debug).Infof("%v: invalid search header set (%d)", p, len(headers))
   694  					return 0, errBadPeer
   695  				}
   696  				arrived = true
   697  
   698  				// Modify the search interval based on the response
   699  				if (d.mode == FullSync && !d.hasBlockAndState(headers[0].Hash())) || (d.mode != FullSync && !d.hasHeader(headers[0].Hash())) {
   700  					end = check
   701  					break
   702  				}
   703  				header := d.getHeader(headers[0].Hash()) // Independent of sync mode, header surely exists
   704  				if header.Number.Uint64() != check {
   705  					glog.V(logger.Debug).Infof("%v: non requested header #%d [%x…], instead of #%d", p, header.Number, header.Hash().Bytes()[:4], check)
   706  					return 0, errBadPeer
   707  				}
   708  				start = check
   709  
   710  			case <-timeout:
   711  				glog.V(logger.Debug).Infof("%v: search header timeout", p)
   712  				return 0, errTimeout
   713  
   714  			case <-d.bodyCh:
   715  			case <-d.stateCh:
   716  			case <-d.receiptCh:
   717  				// Out of bounds delivery, ignore
   718  			}
   719  		}
   720  	}
   721  	// Ensure valid ancestry and return
   722  	if int64(start) <= floor {
   723  		glog.V(logger.Warn).Infof("%v: potential rewrite attack: #%d [%x…] <= #%d limit", p, start, hash[:4], floor)
   724  		return 0, errInvalidAncestor
   725  	}
   726  	glog.V(logger.Debug).Infof("%v: common ancestor: #%d [%x…]", p, start, hash[:4])
   727  	return start, nil
   728  }
   729  
   730  // fetchHeaders keeps retrieving headers concurrently from the number
   731  // requested, until no more are returned, potentially throttling on the way. To
   732  // facilitate concurrency but still protect against malicious nodes sending bad
   733  // headers, we construct a header chain skeleton using the "origin" peer we are
   734  // syncing with, and fill in the missing headers using anyone else. Headers from
   735  // other peers are only accepted if they map cleanly to the skeleton. If no one
   736  // can fill in the skeleton - not even the origin peer - it's assumed invalid and
   737  // the origin is dropped.
   738  func (d *Downloader) fetchHeaders(p *peer, from uint64) error {
   739  	glog.V(logger.Debug).Infof("%v: directing header downloads from #%d", p, from)
   740  	defer glog.V(logger.Debug).Infof("%v: header download terminated", p)
   741  
   742  	// Create a timeout timer, and the associated header fetcher
   743  	skeleton := true            // Skeleton assembly phase or finishing up
   744  	request := time.Now()       // time of the last skeleton fetch request
   745  	timeout := time.NewTimer(0) // timer to dump a non-responsive active peer
   746  	<-timeout.C                 // timeout channel should be initially empty
   747  	defer timeout.Stop()
   748  
   749  	getHeaders := func(from uint64) {
   750  		request = time.Now()
   751  		timeout.Reset(d.requestTTL())
   752  
   753  		if skeleton {
   754  			glog.V(logger.Detail).Infof("%v: fetching %d skeleton headers from #%d", p, MaxHeaderFetch, from)
   755  			go p.getAbsHeaders(from+uint64(MaxHeaderFetch)-1, MaxSkeletonSize, MaxHeaderFetch-1, false)
   756  		} else {
   757  			glog.V(logger.Detail).Infof("%v: fetching %d full headers from #%d", p, MaxHeaderFetch, from)
   758  			go p.getAbsHeaders(from, MaxHeaderFetch, 0, false)
   759  		}
   760  	}
   761  	// Start pulling the header chain skeleton until all is done
   762  	getHeaders(from)
   763  
   764  	for {
   765  		select {
   766  		case <-d.cancelCh:
   767  			return errCancelHeaderFetch
   768  
   769  		case packet := <-d.headerCh:
   770  			// Make sure the active peer is giving us the skeleton headers
   771  			if packet.PeerId() != p.id {
   772  				glog.V(logger.Debug).Infof("Received skeleton headers from incorrect peer (%s)", packet.PeerId())
   773  				break
   774  			}
   775  			headerReqTimer.UpdateSince(request)
   776  			timeout.Stop()
   777  
   778  			// If the skeleton's finished, pull any remaining head headers directly from the origin
   779  			if packet.Items() == 0 && skeleton {
   780  				skeleton = false
   781  				getHeaders(from)
   782  				continue
   783  			}
   784  			// If no more headers are inbound, notify the content fetchers and return
   785  			if packet.Items() == 0 {
   786  				glog.V(logger.Debug).Infof("%v: no available headers", p)
   787  				select {
   788  				case d.headerProcCh <- nil:
   789  					return nil
   790  				case <-d.cancelCh:
   791  					return errCancelHeaderFetch
   792  				}
   793  			}
   794  			headers := packet.(*headerPack).headers
   795  
   796  			// If we received a skeleton batch, resolve internals concurrently
   797  			if skeleton {
   798  				filled, proced, err := d.fillHeaderSkeleton(from, headers)
   799  				if err != nil {
   800  					glog.V(logger.Debug).Infof("%v: skeleton chain invalid: %v", p, err)
   801  					return errInvalidChain
   802  				}
   803  				headers = filled[proced:]
   804  				from += uint64(proced)
   805  			}
   806  			// Insert all the new headers and fetch the next batch
   807  			if len(headers) > 0 {
   808  				glog.V(logger.Detail).Infof("%v: schedule %d headers from #%d", p, len(headers), from)
   809  				select {
   810  				case d.headerProcCh <- headers:
   811  				case <-d.cancelCh:
   812  					return errCancelHeaderFetch
   813  				}
   814  				from += uint64(len(headers))
   815  			}
   816  			getHeaders(from)
   817  
   818  		case <-timeout.C:
   819  			// Header retrieval timed out, consider the peer bad and drop
   820  			glog.V(logger.Debug).Infof("%v: header request timed out", p)
   821  			headerTimeoutMeter.Mark(1)
   822  			d.dropPeer(p.id)
   823  
   824  			// Finish the sync gracefully instead of dumping the gathered data though
   825  			for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh, d.stateWakeCh} {
   826  				select {
   827  				case ch <- false:
   828  				case <-d.cancelCh:
   829  				}
   830  			}
   831  			select {
   832  			case d.headerProcCh <- nil:
   833  			case <-d.cancelCh:
   834  			}
   835  			return errBadPeer
   836  		}
   837  	}
   838  }
   839  
   840  // fillHeaderSkeleton concurrently retrieves headers from all our available peers
   841  // and maps them to the provided skeleton header chain.
   842  //
   843  // Any partial results from the beginning of the skeleton is (if possible) forwarded
   844  // immediately to the header processor to keep the rest of the pipeline full even
   845  // in the case of header stalls.
   846  //
   847  // The method returs the entire filled skeleton and also the number of headers
   848  // already forwarded for processing.
   849  func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, int, error) {
   850  	glog.V(logger.Debug).Infof("Filling up skeleton from #%d", from)
   851  	d.queue.ScheduleSkeleton(from, skeleton)
   852  
   853  	var (
   854  		deliver = func(packet dataPack) (int, error) {
   855  			pack := packet.(*headerPack)
   856  			return d.queue.DeliverHeaders(pack.peerId, pack.headers, d.headerProcCh)
   857  		}
   858  		expire   = func() map[string]int { return d.queue.ExpireHeaders(d.requestTTL()) }
   859  		throttle = func() bool { return false }
   860  		reserve  = func(p *peer, count int) (*fetchRequest, bool, error) {
   861  			return d.queue.ReserveHeaders(p, count), false, nil
   862  		}
   863  		fetch    = func(p *peer, req *fetchRequest) error { return p.FetchHeaders(req.From, MaxHeaderFetch) }
   864  		capacity = func(p *peer) int { return p.HeaderCapacity(d.requestRTT()) }
   865  		setIdle  = func(p *peer, accepted int) { p.SetHeadersIdle(accepted) }
   866  	)
   867  	err := d.fetchParts(errCancelHeaderFetch, d.headerCh, deliver, d.queue.headerContCh, expire,
   868  		d.queue.PendingHeaders, d.queue.InFlightHeaders, throttle, reserve,
   869  		nil, fetch, d.queue.CancelHeaders, capacity, d.peers.HeaderIdlePeers, setIdle, "Header")
   870  
   871  	glog.V(logger.Debug).Infof("Skeleton fill terminated: %v", err)
   872  
   873  	filled, proced := d.queue.RetrieveHeaders()
   874  	return filled, proced, err
   875  }
   876  
   877  // fetchBodies iteratively downloads the scheduled block bodies, taking any
   878  // available peers, reserving a chunk of blocks for each, waiting for delivery
   879  // and also periodically checking for timeouts.
   880  func (d *Downloader) fetchBodies(from uint64) error {
   881  	glog.V(logger.Debug).Infof("Downloading block bodies from #%d", from)
   882  
   883  	var (
   884  		deliver = func(packet dataPack) (int, error) {
   885  			pack := packet.(*bodyPack)
   886  			return d.queue.DeliverBodies(pack.peerId, pack.transactions, pack.uncles)
   887  		}
   888  		expire   = func() map[string]int { return d.queue.ExpireBodies(d.requestTTL()) }
   889  		fetch    = func(p *peer, req *fetchRequest) error { return p.FetchBodies(req) }
   890  		capacity = func(p *peer) int { return p.BlockCapacity(d.requestRTT()) }
   891  		setIdle  = func(p *peer, accepted int) { p.SetBodiesIdle(accepted) }
   892  	)
   893  	err := d.fetchParts(errCancelBodyFetch, d.bodyCh, deliver, d.bodyWakeCh, expire,
   894  		d.queue.PendingBlocks, d.queue.InFlightBlocks, d.queue.ShouldThrottleBlocks, d.queue.ReserveBodies,
   895  		d.bodyFetchHook, fetch, d.queue.CancelBodies, capacity, d.peers.BodyIdlePeers, setIdle, "Body")
   896  
   897  	glog.V(logger.Debug).Infof("Block body download terminated: %v", err)
   898  	return err
   899  }
   900  
   901  // fetchReceipts iteratively downloads the scheduled block receipts, taking any
   902  // available peers, reserving a chunk of receipts for each, waiting for delivery
   903  // and also periodically checking for timeouts.
   904  func (d *Downloader) fetchReceipts(from uint64) error {
   905  	glog.V(logger.Debug).Infof("Downloading receipts from #%d", from)
   906  
   907  	var (
   908  		deliver = func(packet dataPack) (int, error) {
   909  			pack := packet.(*receiptPack)
   910  			return d.queue.DeliverReceipts(pack.peerId, pack.receipts)
   911  		}
   912  		expire   = func() map[string]int { return d.queue.ExpireReceipts(d.requestTTL()) }
   913  		fetch    = func(p *peer, req *fetchRequest) error { return p.FetchReceipts(req) }
   914  		capacity = func(p *peer) int { return p.ReceiptCapacity(d.requestRTT()) }
   915  		setIdle  = func(p *peer, accepted int) { p.SetReceiptsIdle(accepted) }
   916  	)
   917  	err := d.fetchParts(errCancelReceiptFetch, d.receiptCh, deliver, d.receiptWakeCh, expire,
   918  		d.queue.PendingReceipts, d.queue.InFlightReceipts, d.queue.ShouldThrottleReceipts, d.queue.ReserveReceipts,
   919  		d.receiptFetchHook, fetch, d.queue.CancelReceipts, capacity, d.peers.ReceiptIdlePeers, setIdle, "Receipt")
   920  
   921  	glog.V(logger.Debug).Infof("Receipt download terminated: %v", err)
   922  	return err
   923  }
   924  
   925  // fetchNodeData iteratively downloads the scheduled state trie nodes, taking any
   926  // available peers, reserving a chunk of nodes for each, waiting for delivery and
   927  // also periodically checking for timeouts.
   928  func (d *Downloader) fetchNodeData() error {
   929  	glog.V(logger.Debug).Infof("Downloading node state data")
   930  
   931  	var (
   932  		deliver = func(packet dataPack) (int, error) {
   933  			start := time.Now()
   934  			return d.queue.DeliverNodeData(packet.PeerId(), packet.(*statePack).states, func(delivered int, progressed bool, err error) {
   935  				// If the peer returned old-requested data, forgive
   936  				if err == trie.ErrNotRequested {
   937  					glog.V(logger.Debug).Infof("peer %s: replied to stale state request, forgiving", packet.PeerId())
   938  					return
   939  				}
   940  				if err != nil {
   941  					// If the node data processing failed, the root hash is very wrong, abort
   942  					glog.V(logger.Error).Infof("peer %d: state processing failed: %v", packet.PeerId(), err)
   943  					d.cancel()
   944  					return
   945  				}
   946  				// Processing succeeded, notify state fetcher of continuation
   947  				pending := d.queue.PendingNodeData()
   948  				if pending > 0 {
   949  					select {
   950  					case d.stateWakeCh <- true:
   951  					default:
   952  					}
   953  				}
   954  				d.syncStatsLock.Lock()
   955  				d.syncStatsStateDone += uint64(delivered)
   956  				syncStatsStateDone := d.syncStatsStateDone // Thread safe copy for the log below
   957  				d.syncStatsLock.Unlock()
   958  
   959  				// If real database progress was made, reset any fast-sync pivot failure
   960  				if progressed && atomic.LoadUint32(&d.fsPivotFails) > 1 {
   961  					glog.V(logger.Debug).Infof("fast-sync progressed, resetting fail counter from %d", atomic.LoadUint32(&d.fsPivotFails))
   962  					atomic.StoreUint32(&d.fsPivotFails, 1) // Don't ever reset to 0, as that will unlock the pivot block
   963  				}
   964  				// Log a message to the user and return
   965  				if delivered > 0 {
   966  					glog.V(logger.Info).Infof("imported %3d state entries in %9v: processed %d, pending at least %d", delivered, common.PrettyDuration(time.Since(start)), syncStatsStateDone, pending)
   967  				}
   968  			})
   969  		}
   970  		expire   = func() map[string]int { return d.queue.ExpireNodeData(d.requestTTL()) }
   971  		throttle = func() bool { return false }
   972  		reserve  = func(p *peer, count int) (*fetchRequest, bool, error) {
   973  			return d.queue.ReserveNodeData(p, count), false, nil
   974  		}
   975  		fetch    = func(p *peer, req *fetchRequest) error { return p.FetchNodeData(req) }
   976  		capacity = func(p *peer) int { return p.NodeDataCapacity(d.requestRTT()) }
   977  		setIdle  = func(p *peer, accepted int) { p.SetNodeDataIdle(accepted) }
   978  	)
   979  	err := d.fetchParts(errCancelStateFetch, d.stateCh, deliver, d.stateWakeCh, expire,
   980  		d.queue.PendingNodeData, d.queue.InFlightNodeData, throttle, reserve, nil, fetch,
   981  		d.queue.CancelNodeData, capacity, d.peers.NodeDataIdlePeers, setIdle, "State")
   982  
   983  	glog.V(logger.Debug).Infof("Node state data download terminated: %v", err)
   984  	return err
   985  }
   986  
   987  // fetchParts iteratively downloads scheduled block parts, taking any available
   988  // peers, reserving a chunk of fetch requests for each, waiting for delivery and
   989  // also periodically checking for timeouts.
   990  //
   991  // As the scheduling/timeout logic mostly is the same for all downloaded data
   992  // types, this method is used by each for data gathering and is instrumented with
   993  // various callbacks to handle the slight differences between processing them.
   994  //
   995  // The instrumentation parameters:
   996  //  - errCancel:   error type to return if the fetch operation is cancelled (mostly makes logging nicer)
   997  //  - deliveryCh:  channel from which to retrieve downloaded data packets (merged from all concurrent peers)
   998  //  - deliver:     processing callback to deliver data packets into type specific download queues (usually within `queue`)
   999  //  - wakeCh:      notification channel for waking the fetcher when new tasks are available (or sync completed)
  1000  //  - expire:      task callback method to abort requests that took too long and return the faulty peers (traffic shaping)
  1001  //  - pending:     task callback for the number of requests still needing download (detect completion/non-completability)
  1002  //  - inFlight:    task callback for the number of in-progress requests (wait for all active downloads to finish)
  1003  //  - throttle:    task callback to check if the processing queue is full and activate throttling (bound memory use)
  1004  //  - reserve:     task callback to reserve new download tasks to a particular peer (also signals partial completions)
  1005  //  - fetchHook:   tester callback to notify of new tasks being initiated (allows testing the scheduling logic)
  1006  //  - fetch:       network callback to actually send a particular download request to a physical remote peer
  1007  //  - cancel:      task callback to abort an in-flight download request and allow rescheduling it (in case of lost peer)
  1008  //  - capacity:    network callback to retrieve the estimated type-specific bandwidth capacity of a peer (traffic shaping)
  1009  //  - idle:        network callback to retrieve the currently (type specific) idle peers that can be assigned tasks
  1010  //  - setIdle:     network callback to set a peer back to idle and update its estimated capacity (traffic shaping)
  1011  //  - kind:        textual label of the type being downloaded to display in log mesages
  1012  func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliver func(dataPack) (int, error), wakeCh chan bool,
  1013  	expire func() map[string]int, pending func() int, inFlight func() bool, throttle func() bool, reserve func(*peer, int) (*fetchRequest, bool, error),
  1014  	fetchHook func([]*types.Header), fetch func(*peer, *fetchRequest) error, cancel func(*fetchRequest), capacity func(*peer) int,
  1015  	idle func() ([]*peer, int), setIdle func(*peer, int), kind string) error {
  1016  
  1017  	// Create a ticker to detect expired retrieval tasks
  1018  	ticker := time.NewTicker(100 * time.Millisecond)
  1019  	defer ticker.Stop()
  1020  
  1021  	update := make(chan struct{}, 1)
  1022  
  1023  	// Prepare the queue and fetch block parts until the block header fetcher's done
  1024  	finished := false
  1025  	for {
  1026  		select {
  1027  		case <-d.cancelCh:
  1028  			return errCancel
  1029  
  1030  		case packet := <-deliveryCh:
  1031  			// If the peer was previously banned and failed to deliver it's pack
  1032  			// in a reasonable time frame, ignore it's message.
  1033  			if peer := d.peers.Peer(packet.PeerId()); peer != nil {
  1034  				// Deliver the received chunk of data and check chain validity
  1035  				accepted, err := deliver(packet)
  1036  				if err == errInvalidChain {
  1037  					return err
  1038  				}
  1039  				// Unless a peer delivered something completely else than requested (usually
  1040  				// caused by a timed out request which came through in the end), set it to
  1041  				// idle. If the delivery's stale, the peer should have already been idled.
  1042  				if err != errStaleDelivery {
  1043  					setIdle(peer, accepted)
  1044  				}
  1045  				// Issue a log to the user to see what's going on
  1046  				switch {
  1047  				case err == nil && packet.Items() == 0:
  1048  					glog.V(logger.Detail).Infof("%s: no %s delivered", peer, strings.ToLower(kind))
  1049  				case err == nil:
  1050  					glog.V(logger.Detail).Infof("%s: delivered %s %s(s)", peer, packet.Stats(), strings.ToLower(kind))
  1051  				default:
  1052  					glog.V(logger.Detail).Infof("%s: %s delivery failed: %v", peer, strings.ToLower(kind), err)
  1053  				}
  1054  			}
  1055  			// Blocks assembled, try to update the progress
  1056  			select {
  1057  			case update <- struct{}{}:
  1058  			default:
  1059  			}
  1060  
  1061  		case cont := <-wakeCh:
  1062  			// The header fetcher sent a continuation flag, check if it's done
  1063  			if !cont {
  1064  				finished = true
  1065  			}
  1066  			// Headers arrive, try to update the progress
  1067  			select {
  1068  			case update <- struct{}{}:
  1069  			default:
  1070  			}
  1071  
  1072  		case <-ticker.C:
  1073  			// Sanity check update the progress
  1074  			select {
  1075  			case update <- struct{}{}:
  1076  			default:
  1077  			}
  1078  
  1079  		case <-update:
  1080  			// Short circuit if we lost all our peers
  1081  			if d.peers.Len() == 0 {
  1082  				return errNoPeers
  1083  			}
  1084  			// Check for fetch request timeouts and demote the responsible peers
  1085  			for pid, fails := range expire() {
  1086  				if peer := d.peers.Peer(pid); peer != nil {
  1087  					// If a lot of retrieval elements expired, we might have overestimated the remote peer or perhaps
  1088  					// ourselves. Only reset to minimal throughput but don't drop just yet. If even the minimal times
  1089  					// out that sync wise we need to get rid of the peer.
  1090  					//
  1091  					// The reason the minimum threshold is 2 is because the downloader tries to estimate the bandwidth
  1092  					// and latency of a peer separately, which requires pushing the measures capacity a bit and seeing
  1093  					// how response times reacts, to it always requests one more than the minimum (i.e. min 2).
  1094  					if fails > 2 {
  1095  						glog.V(logger.Detail).Infof("%s: %s delivery timeout", peer, strings.ToLower(kind))
  1096  						setIdle(peer, 0)
  1097  					} else {
  1098  						glog.V(logger.Debug).Infof("%s: stalling %s delivery, dropping", peer, strings.ToLower(kind))
  1099  						d.dropPeer(pid)
  1100  					}
  1101  				}
  1102  			}
  1103  			// If there's nothing more to fetch, wait or terminate
  1104  			if pending() == 0 {
  1105  				if !inFlight() && finished {
  1106  					glog.V(logger.Debug).Infof("%s fetching completed", kind)
  1107  					return nil
  1108  				}
  1109  				break
  1110  			}
  1111  			// Send a download request to all idle peers, until throttled
  1112  			progressed, throttled, running := false, false, inFlight()
  1113  			idles, total := idle()
  1114  
  1115  			for _, peer := range idles {
  1116  				// Short circuit if throttling activated
  1117  				if throttle() {
  1118  					throttled = true
  1119  					break
  1120  				}
  1121  				// Reserve a chunk of fetches for a peer. A nil can mean either that
  1122  				// no more headers are available, or that the peer is known not to
  1123  				// have them.
  1124  				request, progress, err := reserve(peer, capacity(peer))
  1125  				if err != nil {
  1126  					return err
  1127  				}
  1128  				if progress {
  1129  					progressed = true
  1130  				}
  1131  				if request == nil {
  1132  					continue
  1133  				}
  1134  				if glog.V(logger.Detail) {
  1135  					if request.From > 0 {
  1136  						glog.Infof("%s: requesting %s(s) from #%d", peer, strings.ToLower(kind), request.From)
  1137  					} else if len(request.Headers) > 0 {
  1138  						glog.Infof("%s: requesting %d %s(s), first at #%d", peer, len(request.Headers), strings.ToLower(kind), request.Headers[0].Number)
  1139  					} else {
  1140  						glog.Infof("%s: requesting %d %s(s)", peer, len(request.Hashes), strings.ToLower(kind))
  1141  					}
  1142  				}
  1143  				// Fetch the chunk and make sure any errors return the hashes to the queue
  1144  				if fetchHook != nil {
  1145  					fetchHook(request.Headers)
  1146  				}
  1147  				if err := fetch(peer, request); err != nil {
  1148  					// Although we could try and make an attempt to fix this, this error really
  1149  					// means that we've double allocated a fetch task to a peer. If that is the
  1150  					// case, the internal state of the downloader and the queue is very wrong so
  1151  					// better hard crash and note the error instead of silently accumulating into
  1152  					// a much bigger issue.
  1153  					panic(fmt.Sprintf("%v: %s fetch assignment failed", peer, strings.ToLower(kind)))
  1154  				}
  1155  				running = true
  1156  			}
  1157  			// Make sure that we have peers available for fetching. If all peers have been tried
  1158  			// and all failed throw an error
  1159  			if !progressed && !throttled && !running && len(idles) == total && pending() > 0 {
  1160  				return errPeersUnavailable
  1161  			}
  1162  		}
  1163  	}
  1164  }
  1165  
  1166  // processHeaders takes batches of retrieved headers from an input channel and
  1167  // keeps processing and scheduling them into the header chain and downloader's
  1168  // queue until the stream ends or a failure occurs.
  1169  func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
  1170  	// Calculate the pivoting point for switching from fast to slow sync
  1171  	pivot := d.queue.FastSyncPivot()
  1172  
  1173  	// Keep a count of uncertain headers to roll back
  1174  	rollback := []*types.Header{}
  1175  	defer func() {
  1176  		if len(rollback) > 0 {
  1177  			// Flatten the headers and roll them back
  1178  			hashes := make([]common.Hash, len(rollback))
  1179  			for i, header := range rollback {
  1180  				hashes[i] = header.Hash()
  1181  			}
  1182  			lastHeader, lastFastBlock, lastBlock := d.headHeader().Number, common.Big0, common.Big0
  1183  			if d.headFastBlock != nil {
  1184  				lastFastBlock = d.headFastBlock().Number()
  1185  			}
  1186  			if d.headBlock != nil {
  1187  				lastBlock = d.headBlock().Number()
  1188  			}
  1189  			d.rollback(hashes)
  1190  			curFastBlock, curBlock := common.Big0, common.Big0
  1191  			if d.headFastBlock != nil {
  1192  				curFastBlock = d.headFastBlock().Number()
  1193  			}
  1194  			if d.headBlock != nil {
  1195  				curBlock = d.headBlock().Number()
  1196  			}
  1197  			glog.V(logger.Warn).Infof("Rolled back %d headers (LH: %d->%d, FB: %d->%d, LB: %d->%d)",
  1198  				len(hashes), lastHeader, d.headHeader().Number, lastFastBlock, curFastBlock, lastBlock, curBlock)
  1199  
  1200  			// If we're already past the pivot point, this could be an attack, thread carefully
  1201  			if rollback[len(rollback)-1].Number.Uint64() > pivot {
  1202  				// If we didn't ever fail, lock in te pivot header (must! not! change!)
  1203  				if atomic.LoadUint32(&d.fsPivotFails) == 0 {
  1204  					for _, header := range rollback {
  1205  						if header.Number.Uint64() == pivot {
  1206  							glog.V(logger.Warn).Infof("Fast-sync critical section failure, locked pivot to header #%d [%x…]", pivot, header.Hash().Bytes()[:4])
  1207  							d.fsPivotLock = header
  1208  						}
  1209  					}
  1210  				}
  1211  			}
  1212  		}
  1213  	}()
  1214  
  1215  	// Wait for batches of headers to process
  1216  	gotHeaders := false
  1217  
  1218  	for {
  1219  		select {
  1220  		case <-d.cancelCh:
  1221  			return errCancelHeaderProcessing
  1222  
  1223  		case headers := <-d.headerProcCh:
  1224  			// Terminate header processing if we synced up
  1225  			if len(headers) == 0 {
  1226  				// Notify everyone that headers are fully processed
  1227  				for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh, d.stateWakeCh} {
  1228  					select {
  1229  					case ch <- false:
  1230  					case <-d.cancelCh:
  1231  					}
  1232  				}
  1233  				// If no headers were retrieved at all, the peer violated it's TD promise that it had a
  1234  				// better chain compared to ours. The only exception is if it's promised blocks were
  1235  				// already imported by other means (e.g. fecher):
  1236  				//
  1237  				// R <remote peer>, L <local node>: Both at block 10
  1238  				// R: Mine block 11, and propagate it to L
  1239  				// L: Queue block 11 for import
  1240  				// L: Notice that R's head and TD increased compared to ours, start sync
  1241  				// L: Import of block 11 finishes
  1242  				// L: Sync begins, and finds common ancestor at 11
  1243  				// L: Request new headers up from 11 (R's TD was higher, it must have something)
  1244  				// R: Nothing to give
  1245  				if d.mode != LightSync {
  1246  					if !gotHeaders && td.Cmp(d.getTd(d.headBlock().Hash())) > 0 {
  1247  						return errStallingPeer
  1248  					}
  1249  				}
  1250  				// If fast or light syncing, ensure promised headers are indeed delivered. This is
  1251  				// needed to detect scenarios where an attacker feeds a bad pivot and then bails out
  1252  				// of delivering the post-pivot blocks that would flag the invalid content.
  1253  				//
  1254  				// This check cannot be executed "as is" for full imports, since blocks may still be
  1255  				// queued for processing when the header download completes. However, as long as the
  1256  				// peer gave us something useful, we're already happy/progressed (above check).
  1257  				if d.mode == FastSync || d.mode == LightSync {
  1258  					if td.Cmp(d.getTd(d.headHeader().Hash())) > 0 {
  1259  						return errStallingPeer
  1260  					}
  1261  				}
  1262  				// Disable any rollback and return
  1263  				rollback = nil
  1264  				return nil
  1265  			}
  1266  			// Otherwise split the chunk of headers into batches and process them
  1267  			gotHeaders = true
  1268  
  1269  			for len(headers) > 0 {
  1270  				// Terminate if something failed in between processing chunks
  1271  				select {
  1272  				case <-d.cancelCh:
  1273  					return errCancelHeaderProcessing
  1274  				default:
  1275  				}
  1276  				// Select the next chunk of headers to import
  1277  				limit := maxHeadersProcess
  1278  				if limit > len(headers) {
  1279  					limit = len(headers)
  1280  				}
  1281  				chunk := headers[:limit]
  1282  
  1283  				// In case of header only syncing, validate the chunk immediately
  1284  				if d.mode == FastSync || d.mode == LightSync {
  1285  					// Collect the yet unknown headers to mark them as uncertain
  1286  					unknown := make([]*types.Header, 0, len(headers))
  1287  					for _, header := range chunk {
  1288  						if !d.hasHeader(header.Hash()) {
  1289  							unknown = append(unknown, header)
  1290  						}
  1291  					}
  1292  					// If we're importing pure headers, verify based on their recentness
  1293  					frequency := fsHeaderCheckFrequency
  1294  					if chunk[len(chunk)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot {
  1295  						frequency = 1
  1296  					}
  1297  					if n, err := d.insertHeaders(chunk, frequency); err != nil {
  1298  						// If some headers were inserted, add them too to the rollback list
  1299  						if n > 0 {
  1300  							rollback = append(rollback, chunk[:n]...)
  1301  						}
  1302  						glog.V(logger.Debug).Infof("invalid header #%d [%x…]: %v", chunk[n].Number, chunk[n].Hash().Bytes()[:4], err)
  1303  						return errInvalidChain
  1304  					}
  1305  					// All verifications passed, store newly found uncertain headers
  1306  					rollback = append(rollback, unknown...)
  1307  					if len(rollback) > fsHeaderSafetyNet {
  1308  						rollback = append(rollback[:0], rollback[len(rollback)-fsHeaderSafetyNet:]...)
  1309  					}
  1310  				}
  1311  				// If we're fast syncing and just pulled in the pivot, make sure it's the one locked in
  1312  				if d.mode == FastSync && d.fsPivotLock != nil && chunk[0].Number.Uint64() <= pivot && chunk[len(chunk)-1].Number.Uint64() >= pivot {
  1313  					if pivot := chunk[int(pivot-chunk[0].Number.Uint64())]; pivot.Hash() != d.fsPivotLock.Hash() {
  1314  						glog.V(logger.Warn).Infof("Pivot doesn't match locked in version: have #%v [%x…], want #%v [%x…]", pivot.Number, pivot.Hash().Bytes()[:4], d.fsPivotLock.Number, d.fsPivotLock.Hash().Bytes()[:4])
  1315  						return errInvalidChain
  1316  					}
  1317  				}
  1318  				// Unless we're doing light chains, schedule the headers for associated content retrieval
  1319  				if d.mode == FullSync || d.mode == FastSync {
  1320  					// If we've reached the allowed number of pending headers, stall a bit
  1321  					for d.queue.PendingBlocks() >= maxQueuedHeaders || d.queue.PendingReceipts() >= maxQueuedHeaders {
  1322  						select {
  1323  						case <-d.cancelCh:
  1324  							return errCancelHeaderProcessing
  1325  						case <-time.After(time.Second):
  1326  						}
  1327  					}
  1328  					// Otherwise insert the headers for content retrieval
  1329  					inserts := d.queue.Schedule(chunk, origin)
  1330  					if len(inserts) != len(chunk) {
  1331  						glog.V(logger.Debug).Infof("stale headers")
  1332  						return errBadPeer
  1333  					}
  1334  				}
  1335  				headers = headers[limit:]
  1336  				origin += uint64(limit)
  1337  			}
  1338  			// Signal the content downloaders of the availablility of new tasks
  1339  			for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh, d.stateWakeCh} {
  1340  				select {
  1341  				case ch <- true:
  1342  				default:
  1343  				}
  1344  			}
  1345  		}
  1346  	}
  1347  }
  1348  
  1349  // processContent takes fetch results from the queue and tries to import them
  1350  // into the chain. The type of import operation will depend on the result contents.
  1351  func (d *Downloader) processContent() error {
  1352  	pivot := d.queue.FastSyncPivot()
  1353  	for {
  1354  		results := d.queue.WaitResults()
  1355  		if len(results) == 0 {
  1356  			return nil // queue empty
  1357  		}
  1358  		if d.chainInsertHook != nil {
  1359  			d.chainInsertHook(results)
  1360  		}
  1361  		// Actually import the blocks
  1362  		if glog.V(logger.Debug) {
  1363  			first, last := results[0].Header, results[len(results)-1].Header
  1364  			glog.Infof("Inserting chain with %d items (#%d [%x…] - #%d [%x…])", len(results), first.Number, first.Hash().Bytes()[:4], last.Number, last.Hash().Bytes()[:4])
  1365  		}
  1366  		for len(results) != 0 {
  1367  			// Check for any termination requests
  1368  			select {
  1369  			case <-d.quitCh:
  1370  				return errCancelContentProcessing
  1371  			default:
  1372  			}
  1373  			// Retrieve the a batch of results to import
  1374  			var (
  1375  				blocks   = make([]*types.Block, 0, maxResultsProcess)
  1376  				receipts = make([]types.Receipts, 0, maxResultsProcess)
  1377  			)
  1378  			items := int(math.Min(float64(len(results)), float64(maxResultsProcess)))
  1379  			for _, result := range results[:items] {
  1380  				switch {
  1381  				case d.mode == FullSync:
  1382  					blocks = append(blocks, types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles))
  1383  				case d.mode == FastSync:
  1384  					blocks = append(blocks, types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles))
  1385  					if result.Header.Number.Uint64() <= pivot {
  1386  						receipts = append(receipts, result.Receipts)
  1387  					}
  1388  				}
  1389  			}
  1390  			// Try to process the results, aborting if there's an error
  1391  			var (
  1392  				err   error
  1393  				index int
  1394  			)
  1395  			switch {
  1396  			case len(receipts) > 0:
  1397  				index, err = d.insertReceipts(blocks, receipts)
  1398  				if err == nil && blocks[len(blocks)-1].NumberU64() == pivot {
  1399  					glog.V(logger.Debug).Infof("Committing block #%d [%x…] as the new head", blocks[len(blocks)-1].Number(), blocks[len(blocks)-1].Hash().Bytes()[:4])
  1400  					index, err = len(blocks)-1, d.commitHeadBlock(blocks[len(blocks)-1].Hash())
  1401  				}
  1402  			default:
  1403  				index, err = d.insertBlocks(blocks)
  1404  			}
  1405  			if err != nil {
  1406  				glog.V(logger.Debug).Infof("Result #%d [%x…] processing failed: %v", results[index].Header.Number, results[index].Header.Hash().Bytes()[:4], err)
  1407  				return errInvalidChain
  1408  			}
  1409  			// Shift the results to the next batch
  1410  			results = results[items:]
  1411  		}
  1412  	}
  1413  }
  1414  
  1415  // DeliverHeaders injects a new batch of block headers received from a remote
  1416  // node into the download schedule.
  1417  func (d *Downloader) DeliverHeaders(id string, headers []*types.Header) (err error) {
  1418  	return d.deliver(id, d.headerCh, &headerPack{id, headers}, headerInMeter, headerDropMeter)
  1419  }
  1420  
  1421  // DeliverBodies injects a new batch of block bodies received from a remote node.
  1422  func (d *Downloader) DeliverBodies(id string, transactions [][]*types.Transaction, uncles [][]*types.Header) (err error) {
  1423  	return d.deliver(id, d.bodyCh, &bodyPack{id, transactions, uncles}, bodyInMeter, bodyDropMeter)
  1424  }
  1425  
  1426  // DeliverReceipts injects a new batch of receipts received from a remote node.
  1427  func (d *Downloader) DeliverReceipts(id string, receipts [][]*types.Receipt) (err error) {
  1428  	return d.deliver(id, d.receiptCh, &receiptPack{id, receipts}, receiptInMeter, receiptDropMeter)
  1429  }
  1430  
  1431  // DeliverNodeData injects a new batch of node state data received from a remote node.
  1432  func (d *Downloader) DeliverNodeData(id string, data [][]byte) (err error) {
  1433  	return d.deliver(id, d.stateCh, &statePack{id, data}, stateInMeter, stateDropMeter)
  1434  }
  1435  
  1436  // deliver injects a new batch of data received from a remote node.
  1437  func (d *Downloader) deliver(id string, destCh chan dataPack, packet dataPack, inMeter, dropMeter metrics.Meter) (err error) {
  1438  	// Update the delivery metrics for both good and failed deliveries
  1439  	inMeter.Mark(int64(packet.Items()))
  1440  	defer func() {
  1441  		if err != nil {
  1442  			dropMeter.Mark(int64(packet.Items()))
  1443  		}
  1444  	}()
  1445  	// Deliver or abort if the sync is canceled while queuing
  1446  	d.cancelLock.RLock()
  1447  	cancel := d.cancelCh
  1448  	d.cancelLock.RUnlock()
  1449  	if cancel == nil {
  1450  		return errNoSyncActive
  1451  	}
  1452  	select {
  1453  	case destCh <- packet:
  1454  		return nil
  1455  	case <-cancel:
  1456  		return errNoSyncActive
  1457  	}
  1458  }
  1459  
  1460  // qosTuner is the quality of service tuning loop that occasionally gathers the
  1461  // peer latency statistics and updates the estimated request round trip time.
  1462  func (d *Downloader) qosTuner() {
  1463  	for {
  1464  		// Retrieve the current median RTT and integrate into the previoust target RTT
  1465  		rtt := time.Duration(float64(1-qosTuningImpact)*float64(atomic.LoadUint64(&d.rttEstimate)) + qosTuningImpact*float64(d.peers.medianRTT()))
  1466  		atomic.StoreUint64(&d.rttEstimate, uint64(rtt))
  1467  
  1468  		// A new RTT cycle passed, increase our confidence in the estimated RTT
  1469  		conf := atomic.LoadUint64(&d.rttConfidence)
  1470  		conf = conf + (1000000-conf)/2
  1471  		atomic.StoreUint64(&d.rttConfidence, conf)
  1472  
  1473  		// Log the new QoS values and sleep until the next RTT
  1474  		glog.V(logger.Debug).Infof("Quality of service: rtt %v, conf %.3f, ttl %v", rtt, float64(conf)/1000000.0, d.requestTTL())
  1475  		select {
  1476  		case <-d.quitCh:
  1477  			return
  1478  		case <-time.After(rtt):
  1479  		}
  1480  	}
  1481  }
  1482  
  1483  // qosReduceConfidence is meant to be called when a new peer joins the downloader's
  1484  // peer set, needing to reduce the confidence we have in out QoS estimates.
  1485  func (d *Downloader) qosReduceConfidence() {
  1486  	// If we have a single peer, confidence is always 1
  1487  	peers := uint64(d.peers.Len())
  1488  	if peers == 1 {
  1489  		atomic.StoreUint64(&d.rttConfidence, 1000000)
  1490  		return
  1491  	}
  1492  	// If we have a ton of peers, don't drop confidence)
  1493  	if peers >= uint64(qosConfidenceCap) {
  1494  		return
  1495  	}
  1496  	// Otherwise drop the confidence factor
  1497  	conf := atomic.LoadUint64(&d.rttConfidence) * (peers - 1) / peers
  1498  	if float64(conf)/1000000 < rttMinConfidence {
  1499  		conf = uint64(rttMinConfidence * 1000000)
  1500  	}
  1501  	atomic.StoreUint64(&d.rttConfidence, conf)
  1502  
  1503  	rtt := time.Duration(atomic.LoadUint64(&d.rttEstimate))
  1504  	glog.V(logger.Debug).Infof("Quality of service: rtt %v, conf %.3f, ttl %v", rtt, float64(conf)/1000000.0, d.requestTTL())
  1505  }
  1506  
  1507  // requestRTT returns the current target round trip time for a download request
  1508  // to complete in.
  1509  //
  1510  // Note, the returned RTT is .9 of the actually estimated RTT. The reason is that
  1511  // the downloader tries to adapt queries to the RTT, so multiple RTT values can
  1512  // be adapted to, but smaller ones are preffered (stabler download stream).
  1513  func (d *Downloader) requestRTT() time.Duration {
  1514  	return time.Duration(atomic.LoadUint64(&d.rttEstimate)) * 9 / 10
  1515  }
  1516  
  1517  // requestTTL returns the current timeout allowance for a single download request
  1518  // to finish under.
  1519  func (d *Downloader) requestTTL() time.Duration {
  1520  	var (
  1521  		rtt  = time.Duration(atomic.LoadUint64(&d.rttEstimate))
  1522  		conf = float64(atomic.LoadUint64(&d.rttConfidence)) / 1000000.0
  1523  	)
  1524  	ttl := time.Duration(ttlScaling) * time.Duration(float64(rtt)/conf)
  1525  	if ttl > ttlLimit {
  1526  		ttl = ttlLimit
  1527  	}
  1528  	return ttl
  1529  }