github.com/aquanetwork/aquachain@v1.7.8/aqua/downloader/downloader.go (about)

     1  // Copyright 2015 The aquachain Authors
     2  // This file is part of the aquachain library.
     3  //
     4  // The aquachain library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The aquachain library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the aquachain library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package downloader contains the manual full chain synchronisation.
    18  package downloader
    19  
    20  import (
    21  	"errors"
    22  	"fmt"
    23  	"math/big"
    24  	"sync"
    25  	"sync/atomic"
    26  	"time"
    27  
    28  	aquachain "gitlab.com/aquachain/aquachain"
    29  	"gitlab.com/aquachain/aquachain/aqua/event"
    30  	"gitlab.com/aquachain/aquachain/aquadb"
    31  	"gitlab.com/aquachain/aquachain/common"
    32  	"gitlab.com/aquachain/aquachain/common/log"
    33  	"gitlab.com/aquachain/aquachain/common/metrics"
    34  	"gitlab.com/aquachain/aquachain/core"
    35  	"gitlab.com/aquachain/aquachain/core/types"
    36  	"gitlab.com/aquachain/aquachain/params"
    37  )
    38  
    39  var (
    40  	MaxHashFetch    = 512 // Amount of hashes to be fetched per retrieval request
    41  	MaxBlockFetch   = 128 // Amount of blocks to be fetched per retrieval request
    42  	MaxHeaderFetch  = 192 // Amount of block headers to be fetched per retrieval request
    43  	MaxSkeletonSize = 128 // Number of header fetches to need for a skeleton assembly
    44  	MaxBodyFetch    = 128 // Amount of block bodies to be fetched per retrieval request
    45  	MaxReceiptFetch = 256 // Amount of transaction receipts to allow fetching per request
    46  	MaxStateFetch   = 384 // Amount of node state values to allow fetching per request
    47  
    48  	MaxForkAncestry  = 3 * params.EpochDuration // Maximum chain reorganisation
    49  	rttMinEstimate   = 2 * time.Second          // Minimum round-trip time to target for download requests
    50  	rttMaxEstimate   = 20 * time.Second         // Maximum rount-trip time to target for download requests
    51  	rttMinConfidence = 0.1                      // Worse confidence factor in our estimated RTT value
    52  	ttlScaling       = 3                        // Constant scaling factor for RTT -> TTL conversion
    53  	ttlLimit         = time.Minute              // Maximum TTL allowance to prevent reaching crazy timeouts
    54  
    55  	qosTuningPeers   = 5    // Number of peers to tune based on (best peers)
    56  	qosConfidenceCap = 10   // Number of peers above which not to modify RTT confidence
    57  	qosTuningImpact  = 0.25 // Impact that a new tuning target has on the previous value
    58  
    59  	maxQueuedHeaders  = 32 * 1024 // [aqua/62] Maximum number of headers to queue for import (DOS protection)
    60  	maxHeadersProcess = 2048      // Number of header download results to import at once into the chain
    61  	maxResultsProcess = 2048      // Number of content download results to import at once into the chain
    62  
    63  	fsHeaderCheckFrequency = 100             // Verification frequency of the downloaded headers during fast sync
    64  	fsHeaderSafetyNet      = 2048            // Number of headers to discard in case a chain violation is detected
    65  	fsHeaderForceVerify    = 24              // Number of headers to verify before and after the pivot to accept it
    66  	fsHeaderContCheck      = 3 * time.Second // Time interval to check for header continuations during state download
    67  	fsMinFullBlocks        = 64              // Number of blocks to retrieve fully even in fast sync
    68  )
    69  
    70  var (
    71  	errBusy                    = errors.New("busy")
    72  	errUnknownPeer             = errors.New("peer is unknown or unhealthy")
    73  	errBadPeer                 = errors.New("action from evil peer ignored")
    74  	errStallingPeer            = errors.New("peer is stalling")
    75  	errNoPeers                 = errors.New("no peers to keep download active")
    76  	errTimeout                 = errors.New("timeout")
    77  	errEmptyHeaderSet          = errors.New("empty header set by peer")
    78  	errPeersUnavailable        = errors.New("no peers available or all tried for download")
    79  	errInvalidAncestor         = errors.New("retrieved ancestor is invalid")
    80  	errInvalidChain            = errors.New("retrieved hash chain is invalid")
    81  	errInvalidBlock            = errors.New("retrieved block is invalid")
    82  	errInvalidBody             = errors.New("retrieved block body is invalid")
    83  	errInvalidReceipt          = errors.New("retrieved receipt is invalid")
    84  	errCancelBlockFetch        = errors.New("block download canceled (requested)")
    85  	errCancelHeaderFetch       = errors.New("block header download canceled (requested)")
    86  	errCancelBodyFetch         = errors.New("block body download canceled (requested)")
    87  	errCancelReceiptFetch      = errors.New("receipt download canceled (requested)")
    88  	errCancelStateFetch        = errors.New("state data download canceled (requested)")
    89  	errCancelHeaderProcessing  = errors.New("header processing canceled (requested)")
    90  	errCancelContentProcessing = errors.New("content processing canceled (requested)")
    91  	errNoSyncActive            = errors.New("no sync active")
    92  	errTooOld                  = errors.New("peer doesn't speak recent enough protocol version (need version >= 62)")
    93  )
    94  
    95  type Downloader struct {
    96  	mode SyncMode       // Synchronisation mode defining the strategy used (per sync cycle)
    97  	mux  *event.TypeMux // Event multiplexer to announce sync operation events
    98  
    99  	queue   *queue   // Scheduler for selecting the hashes to download
   100  	peers   *peerSet // Set of active peers from which download can proceed
   101  	stateDB aquadb.Database
   102  
   103  	rttEstimate   uint64 // Round trip time to target for download requests
   104  	rttConfidence uint64 // Confidence in the estimated RTT (unit: millionths to allow atomic ops)
   105  
   106  	// Statistics
   107  	syncStatsChainOrigin uint64 // Origin block number where syncing started at
   108  	syncStatsChainHeight uint64 // Highest block number known when syncing started
   109  	syncStatsState       stateSyncStats
   110  	syncStatsLock        sync.RWMutex // Lock protecting the sync stats fields
   111  
   112  	lightchain LightChain
   113  	blockchain BlockChain
   114  
   115  	// Callbacks
   116  	dropPeer peerDropFn // Drops a peer for misbehaving
   117  
   118  	// Status
   119  	synchroniseMock func(id string, hash common.Hash) error // Replacement for synchronise during testing
   120  	synchronising   int32
   121  	notified        int32
   122  	committed       int32
   123  
   124  	// Channels
   125  	headerCh      chan dataPack        // [aqua/62] Channel receiving inbound block headers
   126  	bodyCh        chan dataPack        // [aqua/62] Channel receiving inbound block bodies
   127  	receiptCh     chan dataPack        // [aqua/63] Channel receiving inbound receipts
   128  	bodyWakeCh    chan bool            // [aqua/62] Channel to signal the block body fetcher of new tasks
   129  	receiptWakeCh chan bool            // [aqua/63] Channel to signal the receipt fetcher of new tasks
   130  	headerProcCh  chan []*types.Header // [aqua/62] Channel to feed the header processor new tasks
   131  
   132  	// for stateFetcher
   133  	stateSyncStart chan *stateSync
   134  	trackStateReq  chan *stateReq
   135  	stateCh        chan dataPack // [aqua/63] Channel receiving inbound node state data
   136  
   137  	// Cancellation and termination
   138  	cancelPeer string        // Identifier of the peer currently being used as the master (cancel on drop)
   139  	cancelCh   chan struct{} // Channel to cancel mid-flight syncs
   140  	cancelLock sync.RWMutex  // Lock to protect the cancel channel and peer in delivers
   141  
   142  	quitCh   chan struct{} // Quit channel to signal termination
   143  	quitLock sync.RWMutex  // Lock to prevent double closes
   144  
   145  	// Testing hooks
   146  	syncInitHook     func(uint64, uint64)  // Method to call upon initiating a new sync run
   147  	bodyFetchHook    func([]*types.Header) // Method to call upon starting a block body fetch
   148  	receiptFetchHook func([]*types.Header) // Method to call upon starting a receipt fetch
   149  	chainInsertHook  func([]*fetchResult)  // Method to call upon inserting a chain of blocks (possibly in multiple invocations)
   150  }
   151  
   152  // LightChain encapsulates functions required to synchronise a light chain.
   153  type LightChain interface {
   154  	// HasHeader verifies a header's presence in the local chain.
   155  	HasHeader(common.Hash, uint64) bool
   156  
   157  	// GetHeaderByHash retrieves a header from the local chain.
   158  	GetHeaderByHash(common.Hash) *types.Header
   159  
   160  	// CurrentHeader retrieves the head header from the local chain.
   161  	CurrentHeader() *types.Header
   162  
   163  	// GetTd returns the total difficulty of a local block.
   164  	GetTd(common.Hash, uint64) *big.Int
   165  
   166  	// InsertHeaderChain inserts a batch of headers into the local chain.
   167  	InsertHeaderChain([]*types.Header, int) (int, error)
   168  
   169  	// Rollback removes a few recently added elements from the local chain.
   170  	Rollback([]common.Hash)
   171  
   172  	GetBlockVersion(*big.Int) types.HeaderVersion
   173  }
   174  
   175  // BlockChain encapsulates functions required to sync a (full or fast) blockchain.
   176  type BlockChain interface {
   177  	LightChain
   178  
   179  	// HasBlock verifies a block's presence in the local chain.
   180  	HasBlock(common.Hash, uint64) bool
   181  
   182  	// GetBlockByHash retrieves a block from the local chain.
   183  	GetBlockByHash(common.Hash) *types.Block
   184  
   185  	// CurrentBlock retrieves the head block from the local chain.
   186  	CurrentBlock() *types.Block
   187  
   188  	// CurrentFastBlock retrieves the head fast block from the local chain.
   189  	CurrentFastBlock() *types.Block
   190  
   191  	// FastSyncCommitHead directly commits the head block to a certain entity.
   192  	FastSyncCommitHead(common.Hash) error
   193  
   194  	// InsertChain inserts a batch of blocks into the local chain.
   195  	InsertChain(types.Blocks) (int, error)
   196  
   197  	// InsertReceiptChain inserts a batch of receipts into the local chain.
   198  	InsertReceiptChain(types.Blocks, []types.Receipts) (int, error)
   199  }
   200  
   201  // New creates a new downloader to fetch hashes and blocks from remote peers.
   202  func New(mode SyncMode, stateDb aquadb.Database, mux *event.TypeMux, chain BlockChain, lightchain LightChain, dropPeer peerDropFn) *Downloader {
   203  	if lightchain == nil {
   204  		lightchain = chain
   205  	}
   206  	dl := &Downloader{
   207  		mode:           mode,
   208  		stateDB:        stateDb,
   209  		mux:            mux,
   210  		queue:          newQueue(lightchain.GetBlockVersion),
   211  		peers:          newPeerSet(),
   212  		rttEstimate:    uint64(rttMaxEstimate),
   213  		rttConfidence:  uint64(1000000),
   214  		blockchain:     chain,
   215  		lightchain:     lightchain,
   216  		dropPeer:       dropPeer,
   217  		headerCh:       make(chan dataPack, 1),
   218  		bodyCh:         make(chan dataPack, 1),
   219  		receiptCh:      make(chan dataPack, 1),
   220  		bodyWakeCh:     make(chan bool, 1),
   221  		receiptWakeCh:  make(chan bool, 1),
   222  		headerProcCh:   make(chan []*types.Header, 1),
   223  		quitCh:         make(chan struct{}),
   224  		stateCh:        make(chan dataPack),
   225  		stateSyncStart: make(chan *stateSync),
   226  		syncStatsState: stateSyncStats{
   227  			processed: core.GetTrieSyncProgress(stateDb),
   228  		},
   229  		trackStateReq: make(chan *stateReq),
   230  	}
   231  	go dl.qosTuner()
   232  	go dl.stateFetcher()
   233  	return dl
   234  }
   235  
   236  // Progress retrieves the synchronisation boundaries, specifically the origin
   237  // block where synchronisation started at (may have failed/suspended); the block
   238  // or header sync is currently at; and the latest known block which the sync targets.
   239  //
   240  // In addition, during the state download phase of fast synchronisation the number
   241  // of processed and the total number of known states are also returned. Otherwise
   242  // these are zero.
   243  func (d *Downloader) Progress() aquachain.SyncProgress {
   244  	// Lock the current stats and return the progress
   245  	d.syncStatsLock.RLock()
   246  	defer d.syncStatsLock.RUnlock()
   247  
   248  	current := uint64(0)
   249  	switch d.mode {
   250  	case FullSync:
   251  		current = d.blockchain.CurrentBlock().NumberU64()
   252  	case FastSync:
   253  		current = d.blockchain.CurrentFastBlock().NumberU64()
   254  	case LightSync:
   255  		current = d.lightchain.CurrentHeader().Number.Uint64()
   256  		common.Report("light sync activated but should not be")
   257  	}
   258  	return aquachain.SyncProgress{
   259  		StartingBlock: d.syncStatsChainOrigin,
   260  		CurrentBlock:  current,
   261  		HighestBlock:  d.syncStatsChainHeight,
   262  		PulledStates:  d.syncStatsState.processed,
   263  		KnownStates:   d.syncStatsState.processed + d.syncStatsState.pending,
   264  	}
   265  }
   266  
   267  // Synchronising returns whether the downloader is currently retrieving blocks.
   268  func (d *Downloader) Synchronising() bool {
   269  	return atomic.LoadInt32(&d.synchronising) > 0
   270  }
   271  
   272  // RegisterPeer injects a new download peer into the set of block source to be
   273  // used for fetching hashes and blocks from.
   274  func (d *Downloader) RegisterPeer(id string, version int, peer Peer) error {
   275  	logger := log.New("peer", id)
   276  	logger.Trace("Registering sync peer")
   277  	if err := d.peers.Register(newPeerConnection(id, version, peer, logger)); err != nil {
   278  		logger.Error("Failed to register sync peer", "err", err)
   279  		return err
   280  	}
   281  	d.qosReduceConfidence()
   282  
   283  	return nil
   284  }
   285  
   286  // RegisterLightPeer injects a light client peer, wrapping it so it appears as a regular peer.
   287  func (d *Downloader) RegisterLightPeer(id string, version int, peer LightPeer) error {
   288  	return d.RegisterPeer(id, version, &lightPeerWrapper{peer})
   289  }
   290  
   291  // UnregisterPeer remove a peer from the known list, preventing any action from
   292  // the specified peer. An effort is also made to return any pending fetches into
   293  // the queue.
   294  func (d *Downloader) UnregisterPeer(id string) error {
   295  	// Unregister the peer from the active peer set and revoke any fetch tasks
   296  	logger := log.New("peer", id)
   297  	logger.Trace("Unregistering sync peer")
   298  	if err := d.peers.Unregister(id); err != nil {
   299  		logger.Error("Failed to unregister sync peer", "err", err)
   300  		return err
   301  	}
   302  	d.queue.Revoke(id)
   303  
   304  	// If this peer was the master peer, abort sync immediately
   305  	d.cancelLock.RLock()
   306  	master := id == d.cancelPeer
   307  	d.cancelLock.RUnlock()
   308  
   309  	if master {
   310  		d.Cancel()
   311  	}
   312  	return nil
   313  }
   314  
   315  // Synchronise tries to sync up our local block chain with a remote peer, both
   316  // adding various sanity checks as well as wrapping it with various log entries.
   317  func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode SyncMode) error {
   318  	err := d.synchronise(id, head, td, mode)
   319  	switch err {
   320  	case nil:
   321  	case errBusy:
   322  	case errBadPeer:
   323  		log.Debug("Synchronisation failed, dropping peer", "peer", id, "err", err)
   324  		if d.dropPeer == nil {
   325  			// The dropPeer method is nil when `--copydb` is used for a local copy.
   326  			// Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored
   327  			log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", id)
   328  		} else {
   329  			d.dropPeer(id)
   330  		}
   331  	case errTimeout, errStallingPeer,
   332  		errEmptyHeaderSet, errPeersUnavailable, errTooOld,
   333  		errInvalidAncestor, errInvalidChain:
   334  		log.Warn("Synchronisation failed, dropping peer", "peer", id, "err", err)
   335  		if d.dropPeer == nil {
   336  			// The dropPeer method is nil when `--copydb` is used for a local copy.
   337  			// Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored
   338  			log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", id)
   339  		} else {
   340  			d.dropPeer(id)
   341  		}
   342  	default:
   343  		log.Warn("Synchronisation failed, retrying", "err", err)
   344  	}
   345  	return err
   346  }
   347  
   348  // synchronise will select the peer and use it for synchronising. If an empty string is given
   349  // it will use the best peer possible and synchronize if its TD is higher than our own. If any of the
   350  // checks fail an error will be returned. This method is synchronous
   351  func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode SyncMode) error {
   352  	// Mock out the synchronisation if testing
   353  	if d.synchroniseMock != nil {
   354  		return d.synchroniseMock(id, hash)
   355  	}
   356  	// Make sure only one goroutine is ever allowed past this point at once
   357  	if !atomic.CompareAndSwapInt32(&d.synchronising, 0, 1) {
   358  		return errBusy
   359  	}
   360  	defer atomic.StoreInt32(&d.synchronising, 0)
   361  
   362  	// Post a user notification of the sync (only once per session)
   363  	if atomic.CompareAndSwapInt32(&d.notified, 0, 1) {
   364  		log.Info("Block synchronisation started")
   365  	}
   366  	// Reset the queue, peer set and wake channels to clean any internal leftover state
   367  	d.queue.Reset()
   368  	d.peers.Reset()
   369  
   370  	for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} {
   371  		select {
   372  		case <-ch:
   373  		default:
   374  		}
   375  	}
   376  	for _, ch := range []chan dataPack{d.headerCh, d.bodyCh, d.receiptCh} {
   377  		for empty := false; !empty; {
   378  			select {
   379  			case <-ch:
   380  			default:
   381  				empty = true
   382  			}
   383  		}
   384  	}
   385  	for empty := false; !empty; {
   386  		select {
   387  		case <-d.headerProcCh:
   388  		default:
   389  			empty = true
   390  		}
   391  	}
   392  	// Create cancel channel for aborting mid-flight and mark the master peer
   393  	d.cancelLock.Lock()
   394  	d.cancelCh = make(chan struct{})
   395  	d.cancelPeer = id
   396  	d.cancelLock.Unlock()
   397  
   398  	defer d.Cancel() // No matter what, we can't leave the cancel channel open
   399  
   400  	// Set the requested sync mode, unless it's forbidden
   401  	d.mode = mode
   402  
   403  	// Retrieve the origin peer and initiate the downloading process
   404  	p := d.peers.Peer(id)
   405  	if p == nil {
   406  		return errUnknownPeer
   407  	}
   408  	return d.syncWithPeer(p, hash, td)
   409  }
   410  
   411  // syncWithPeer starts a block synchronization based on the hash chain from the
   412  // specified peer and head hash.
   413  func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.Int) (err error) {
   414  	d.mux.Post(StartEvent{})
   415  	defer func() {
   416  		// reset on error
   417  		if err != nil {
   418  			d.mux.Post(FailedEvent{err})
   419  		} else {
   420  			d.mux.Post(DoneEvent{})
   421  		}
   422  	}()
   423  	if p.version < 62 {
   424  		return errTooOld
   425  	}
   426  
   427  	log.Debug("Synchronising with the network", "peer", p.id, "aqua", p.version, "head", hash, "td", td, "mode", d.mode)
   428  	defer func(start time.Time) {
   429  		log.Debug("Synchronisation terminated", "elapsed", time.Since(start))
   430  	}(time.Now())
   431  
   432  	// Look up the sync boundaries: the common ancestor and the target block
   433  	latest, err := d.fetchHeight(p)
   434  	if err != nil {
   435  		return err
   436  	}
   437  	height := latest.Number.Uint64()
   438  
   439  	origin, err := d.findAncestor(p, height)
   440  	if err != nil {
   441  		return err
   442  	}
   443  	d.syncStatsLock.Lock()
   444  	if d.syncStatsChainHeight <= origin || d.syncStatsChainOrigin > origin {
   445  		d.syncStatsChainOrigin = origin
   446  	}
   447  	d.syncStatsChainHeight = height
   448  	d.syncStatsLock.Unlock()
   449  
   450  	// Ensure our origin point is below any fast sync pivot point
   451  	pivot := uint64(0)
   452  	if d.mode == FastSync {
   453  		if height <= uint64(fsMinFullBlocks) {
   454  			origin = 0
   455  		} else {
   456  			pivot = height - uint64(fsMinFullBlocks)
   457  			if pivot <= origin {
   458  				origin = pivot - 1
   459  			}
   460  		}
   461  	}
   462  	d.committed = 1
   463  	if d.mode == FastSync && pivot != 0 {
   464  		d.committed = 0
   465  	}
   466  	// Initiate the sync using a concurrent header and content retrieval algorithm
   467  	d.queue.Prepare(origin+1, d.mode)
   468  	if d.syncInitHook != nil {
   469  		d.syncInitHook(origin, height)
   470  	}
   471  
   472  	fetchers := []func() error{
   473  		func() error { return d.fetchHeaders(p, origin+1, pivot) }, // Headers are always retrieved
   474  		func() error { return d.fetchBodies(origin + 1) },          // Bodies are retrieved during normal and fast sync
   475  		func() error { return d.fetchReceipts(origin + 1) },        // Receipts are retrieved during fast sync
   476  		func() error { return d.processHeaders(origin+1, pivot, td) },
   477  	}
   478  	if d.mode == FastSync {
   479  		fetchers = append(fetchers, func() error { return d.processFastSyncContent(latest) })
   480  	} else if d.mode == FullSync {
   481  		fetchers = append(fetchers, d.processFullSyncContent)
   482  	}
   483  	return d.spawnSync(fetchers)
   484  }
   485  
   486  // spawnSync runs d.process and all given fetcher functions to completion in
   487  // separate goroutines, returning the first error that appears.
   488  func (d *Downloader) spawnSync(fetchers []func() error) error {
   489  	var wg sync.WaitGroup
   490  	errc := make(chan error, len(fetchers))
   491  	wg.Add(len(fetchers))
   492  	for _, fn := range fetchers {
   493  		fn := fn
   494  		go func() { defer wg.Done(); errc <- fn() }()
   495  	}
   496  	// Wait for the first error, then terminate the others.
   497  	var err error
   498  	for i := 0; i < len(fetchers); i++ {
   499  		if i == len(fetchers)-1 {
   500  			// Close the queue when all fetchers have exited.
   501  			// This will cause the block processor to end when
   502  			// it has processed the queue.
   503  			d.queue.Close()
   504  		}
   505  		if err = <-errc; err != nil {
   506  			break
   507  		}
   508  	}
   509  	d.queue.Close()
   510  	d.Cancel()
   511  	wg.Wait()
   512  	return err
   513  }
   514  
   515  // Cancel cancels all of the operations and resets the queue. It returns true
   516  // if the cancel operation was completed.
   517  func (d *Downloader) Cancel() {
   518  	// Close the current cancel channel
   519  	d.cancelLock.Lock()
   520  	if d.cancelCh != nil {
   521  		select {
   522  		case <-d.cancelCh:
   523  			// Channel was already closed
   524  		default:
   525  			close(d.cancelCh)
   526  		}
   527  	}
   528  	d.cancelLock.Unlock()
   529  }
   530  
   531  // Terminate interrupts the downloader, canceling all pending operations.
   532  // The downloader cannot be reused after calling Terminate.
   533  func (d *Downloader) Terminate() {
   534  	// Close the termination channel (make sure double close is allowed)
   535  	d.quitLock.Lock()
   536  	select {
   537  	case <-d.quitCh:
   538  	default:
   539  		close(d.quitCh)
   540  	}
   541  	d.quitLock.Unlock()
   542  
   543  	// Cancel any pending download requests
   544  	d.Cancel()
   545  }
   546  
   547  // fetchHeight retrieves the head header of the remote peer to aid in estimating
   548  // the total time a pending synchronisation would take.
   549  func (d *Downloader) fetchHeight(p *peerConnection) (*types.Header, error) {
   550  	p.log.Debug("Retrieving remote chain height")
   551  
   552  	// Request the advertised remote head block and wait for the response
   553  	head, _ := p.peer.Head()
   554  
   555  	go p.peer.RequestHeadersByHash(head, 1, 0, false)
   556  
   557  	ttl := d.requestTTL()
   558  	timeout := time.After(ttl)
   559  	for {
   560  		select {
   561  		case <-d.cancelCh:
   562  			return nil, errCancelBlockFetch
   563  
   564  		case packet := <-d.headerCh:
   565  			// Discard anything not from the origin peer
   566  			if packet.PeerId() != p.id {
   567  				log.Debug("Received headers from incorrect peer", "peer", packet.PeerId())
   568  				break
   569  			}
   570  			// Make sure the peer actually gave something valid
   571  			headers := packet.(*headerPack).headers
   572  			if len(headers) != 1 {
   573  				p.log.Debug("Multiple headers for single request", "headers", len(headers))
   574  				return nil, errBadPeer
   575  			}
   576  			head := headers[0]
   577  			p.log.Debug("Remote head header identified", "number", head.Number, "hash-no-nonce", head.HashNoNonce(), "nonce", head.Nonce)
   578  			return head, nil
   579  
   580  		case <-timeout:
   581  			p.log.Debug("Waiting for head header timed out", "elapsed", ttl)
   582  			return nil, errTimeout
   583  
   584  		case <-d.bodyCh:
   585  		case <-d.receiptCh:
   586  			// Out of bounds delivery, ignore
   587  		}
   588  	}
   589  }
   590  
   591  // findAncestor tries to locate the common ancestor link of the local chain and
   592  // a remote peers blockchain. In the general case when our node was in sync and
   593  // on the correct chain, checking the top N links should already get us a match.
   594  // In the rare scenario when we ended up on a long reorganisation (i.e. none of
   595  // the head links match), we do a binary search to find the common ancestor.
   596  func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, error) {
   597  	// Figure out the valid ancestor range to prevent rewrite attacks
   598  	floor, ceil := int64(-1), d.lightchain.CurrentHeader().Number.Uint64()
   599  
   600  	if d.mode == FullSync {
   601  		ceil = d.blockchain.CurrentBlock().NumberU64()
   602  	} else if d.mode == FastSync {
   603  		ceil = d.blockchain.CurrentFastBlock().NumberU64()
   604  	}
   605  	if ceil >= MaxForkAncestry {
   606  		floor = int64(ceil - MaxForkAncestry)
   607  	}
   608  	p.log.Debug("Looking for common ancestor", "local", ceil, "remote", height)
   609  
   610  	// Request the topmost blocks to short circuit binary ancestor lookup
   611  	head := ceil
   612  	if head > height {
   613  		head = height
   614  	}
   615  	from := int64(head) - int64(MaxHeaderFetch)
   616  	if from < 0 {
   617  		from = 0
   618  	}
   619  	// Span out with 15 block gaps into the future to catch bad head reports
   620  	limit := 2 * MaxHeaderFetch / 16
   621  	count := 1 + int((int64(ceil)-from)/16)
   622  	if count > limit {
   623  		count = limit
   624  	}
   625  	go p.peer.RequestHeadersByNumber(uint64(from), count, 15, false)
   626  
   627  	// Wait for the remote response to the head fetch
   628  	number, hash := uint64(0), common.Hash{}
   629  
   630  	ttl := d.requestTTL()
   631  	timeout := time.After(ttl)
   632  
   633  	for finished := false; !finished; {
   634  		select {
   635  		case <-d.cancelCh:
   636  			return 0, errCancelHeaderFetch
   637  
   638  		case packet := <-d.headerCh:
   639  			// Discard anything not from the origin peer
   640  			if packet.PeerId() != p.id {
   641  				log.Debug("Received headers from incorrect peer", "peer", packet.PeerId())
   642  				break
   643  			}
   644  			// Make sure the peer actually gave something valid
   645  			headers := packet.(*headerPack).headers
   646  			if len(headers) == 0 {
   647  				p.log.Warn("Empty head header set")
   648  				return 0, errEmptyHeaderSet
   649  			}
   650  			// Make sure the peer's reply conforms to the request
   651  			for i := 0; i < len(headers); i++ {
   652  				if number := headers[i].Number.Int64(); number != from+int64(i)*16 {
   653  					p.log.Warn("Head headers broke chain ordering", "index", i, "requested", from+int64(i)*16, "received", number)
   654  					return 0, errInvalidChain
   655  				}
   656  			}
   657  			// Check if a common ancestor was found
   658  			finished = true
   659  			for i := len(headers) - 1; i >= 0; i-- {
   660  				// Skip any headers that underflow/overflow our requested set
   661  				if headers[i].Number.Int64() < from || headers[i].Number.Uint64() > ceil {
   662  					continue
   663  				}
   664  				var version byte = 0
   665  				switch d.mode {
   666  				case FullSync:
   667  					// cache the header hash with the correct version (using the block height)
   668  					version = byte(d.blockchain.GetBlockVersion(headers[i].Number))
   669  					hcache := headers[i].SetVersion(version)
   670  					// check if we already know the header or not
   671  					if d.blockchain.HasBlock(hcache, headers[i].Number.Uint64()) {
   672  						number, hash = headers[i].Number.Uint64(), hcache
   673  					}
   674  				default:
   675  					// cache the header hash with the correct version (using the block height)
   676  					version = byte(d.lightchain.GetBlockVersion(headers[i].Number))
   677  					hcache := headers[i].SetVersion(version)
   678  					// check if we already know the header or not
   679  					if d.lightchain.HasHeader(hcache, headers[i].Number.Uint64()) {
   680  						number, hash = headers[i].Number.Uint64(), hcache
   681  					}
   682  				}
   683  
   684  				// If every header is known, even future ones, the peer straight out lied about its head
   685  				if number > height && i == limit-1 {
   686  					p.log.Warn("Lied about chain head", "reported", height, "found", number)
   687  					return 0, errStallingPeer
   688  				}
   689  				break
   690  			}
   691  
   692  		case <-timeout:
   693  			p.log.Debug("Waiting for head header timed out", "elapsed", ttl)
   694  			return 0, errTimeout
   695  
   696  		case <-d.bodyCh:
   697  		case <-d.receiptCh:
   698  			// Out of bounds delivery, ignore
   699  		}
   700  	}
   701  	// If the head fetch already found an ancestor, return
   702  	if !common.EmptyHash(hash) {
   703  		if int64(number) <= floor {
   704  			p.log.Warn("Ancestor below allowance", "number", number, "hash", hash, "allowance", floor)
   705  			return 0, errInvalidAncestor
   706  		}
   707  		p.log.Debug("Found common ancestor", "number", number, "hash", hash)
   708  		return number, nil
   709  	}
   710  	// Ancestor not found, we need to binary search over our chain
   711  	start, end := uint64(0), head
   712  	if floor > 0 {
   713  		start = uint64(floor)
   714  	}
   715  	for start+1 < end {
   716  		// Split our chain interval in two, and request the hash to cross check
   717  		check := (start + end) / 2
   718  
   719  		ttl := d.requestTTL()
   720  		timeout := time.After(ttl)
   721  
   722  		go p.peer.RequestHeadersByNumber(check, 1, 0, false)
   723  
   724  		// Wait until a reply arrives to this request
   725  		for arrived := false; !arrived; {
   726  			select {
   727  			case <-d.cancelCh:
   728  				return 0, errCancelHeaderFetch
   729  
   730  			case packer := <-d.headerCh:
   731  				// Discard anything not from the origin peer
   732  				if packer.PeerId() != p.id {
   733  					log.Debug("Received headers from incorrect peer", "peer", packer.PeerId())
   734  					break
   735  				}
   736  				// Make sure the peer actually gave something valid
   737  				headers := packer.(*headerPack).headers
   738  				if len(headers) != 1 {
   739  					p.log.Debug("Multiple headers for single request", "headers", len(headers))
   740  					return 0, errBadPeer
   741  				}
   742  				arrived = true
   743  
   744  				// Modify the search interval based on the response
   745  				firstversion := byte(d.blockchain.GetBlockVersion(headers[0].Number))
   746  				if (d.mode == FullSync && !d.blockchain.HasBlock(headers[0].SetVersion(firstversion), headers[0].Number.Uint64())) ||
   747  					(d.mode != FullSync && !d.lightchain.HasHeader(headers[0].SetVersion(firstversion), headers[0].Number.Uint64())) {
   748  					end = check
   749  					break
   750  				}
   751  				header := d.lightchain.GetHeaderByHash(headers[0].Hash()) // Independent of sync mode, header surely exists
   752  				if header.Number.Uint64() != check {
   753  					p.log.Debug("Received non requested header", "number", header.Number, "hash", header.Hash(), "request", check)
   754  					return 0, errBadPeer
   755  				}
   756  				start = check
   757  
   758  			case <-timeout:
   759  				p.log.Debug("Waiting for search header timed out", "elapsed", ttl)
   760  				return 0, errTimeout
   761  
   762  			case <-d.bodyCh:
   763  			case <-d.receiptCh:
   764  				// Out of bounds delivery, ignore
   765  			}
   766  		}
   767  	}
   768  	// Ensure valid ancestry and return
   769  	if int64(start) <= floor {
   770  		p.log.Warn("Ancestor below allowance", "number", start, "hash", hash, "allowance", floor)
   771  		return 0, errInvalidAncestor
   772  	}
   773  	p.log.Debug("Found common ancestor", "number", start, "hash", hash)
   774  	return start, nil
   775  }
   776  
   777  // fetchHeaders keeps retrieving headers concurrently from the number
   778  // requested, until no more are returned, potentially throttling on the way. To
   779  // facilitate concurrency but still protect against malicious nodes sending bad
   780  // headers, we construct a header chain skeleton using the "origin" peer we are
   781  // syncing with, and fill in the missing headers using anyone else. Headers from
   782  // other peers are only accepted if they map cleanly to the skeleton. If no one
   783  // can fill in the skeleton - not even the origin peer - it's assumed invalid and
   784  // the origin is dropped.
   785  func (d *Downloader) fetchHeaders(p *peerConnection, from uint64, pivot uint64) error {
   786  	p.log.Debug("Directing header downloads", "origin", from)
   787  	defer p.log.Debug("Header download terminated")
   788  
   789  	// Create a timeout timer, and the associated header fetcher
   790  	skeleton := true            // Skeleton assembly phase or finishing up
   791  	request := time.Now()       // time of the last skeleton fetch request
   792  	timeout := time.NewTimer(0) // timer to dump a non-responsive active peer
   793  	<-timeout.C                 // timeout channel should be initially empty
   794  	defer timeout.Stop()
   795  
   796  	var ttl time.Duration
   797  	getHeaders := func(from uint64) {
   798  		request = time.Now()
   799  
   800  		ttl = d.requestTTL()
   801  		timeout.Reset(ttl)
   802  
   803  		if skeleton {
   804  			p.log.Trace("Fetching skeleton headers", "count", MaxHeaderFetch, "from", from)
   805  			go p.peer.RequestHeadersByNumber(from+uint64(MaxHeaderFetch)-1, MaxSkeletonSize, MaxHeaderFetch-1, false)
   806  		} else {
   807  			p.log.Trace("Fetching full headers", "count", MaxHeaderFetch, "from", from)
   808  			go p.peer.RequestHeadersByNumber(from, MaxHeaderFetch, 0, false)
   809  		}
   810  	}
   811  	// Start pulling the header chain skeleton until all is done
   812  	getHeaders(from)
   813  
   814  	for {
   815  		select {
   816  		case <-d.cancelCh:
   817  			return errCancelHeaderFetch
   818  
   819  		case packet := <-d.headerCh:
   820  			// Make sure the active peer is giving us the skeleton headers
   821  			if packet.PeerId() != p.id {
   822  				log.Debug("Received skeleton from incorrect peer", "peer", packet.PeerId())
   823  				break
   824  			}
   825  			headerReqTimer.UpdateSince(request)
   826  			timeout.Stop()
   827  
   828  			// If the skeleton's finished, pull any remaining head headers directly from the origin
   829  			if packet.Items() == 0 && skeleton {
   830  				skeleton = false
   831  				getHeaders(from)
   832  				continue
   833  			}
   834  			// If no more headers are inbound, notify the content fetchers and return
   835  			if packet.Items() == 0 {
   836  				// Don't abort header fetches while the pivot is downloading
   837  				if atomic.LoadInt32(&d.committed) == 0 && pivot <= from {
   838  					p.log.Debug("No headers, waiting for pivot commit")
   839  					select {
   840  					case <-time.After(fsHeaderContCheck):
   841  						getHeaders(from)
   842  						continue
   843  					case <-d.cancelCh:
   844  						return errCancelHeaderFetch
   845  					}
   846  				}
   847  				// Pivot done (or not in fast sync) and no more headers, terminate the process
   848  				p.log.Debug("No more headers available")
   849  				select {
   850  				case d.headerProcCh <- nil:
   851  					return nil
   852  				case <-d.cancelCh:
   853  					return errCancelHeaderFetch
   854  				}
   855  			}
   856  			headers := packet.(*headerPack).headers
   857  
   858  			// If we received a skeleton batch, resolve internals concurrently
   859  			if skeleton {
   860  				filled, proced, err := d.fillHeaderSkeleton(from, headers)
   861  				if err != nil {
   862  					p.log.Debug("Skeleton chain invalid", "err", err)
   863  					return errInvalidChain
   864  				}
   865  				headers = filled[proced:]
   866  				from += uint64(proced)
   867  			}
   868  			// Insert all the new headers and fetch the next batch
   869  			if len(headers) > 0 {
   870  				p.log.Trace("Scheduling new headers", "count", len(headers), "from", from)
   871  				select {
   872  				case d.headerProcCh <- headers:
   873  				case <-d.cancelCh:
   874  					return errCancelHeaderFetch
   875  				}
   876  				from += uint64(len(headers))
   877  			}
   878  			getHeaders(from)
   879  
   880  		case <-timeout.C:
   881  			if d.dropPeer == nil {
   882  				// The dropPeer method is nil when `--copydb` is used for a local copy.
   883  				// Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored
   884  				p.log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", p.id)
   885  				break
   886  			}
   887  			// Header retrieval timed out, consider the peer bad and drop
   888  			p.log.Debug("Header request timed out", "elapsed", ttl)
   889  			headerTimeoutMeter.Mark(1)
   890  			d.dropPeer(p.id)
   891  
   892  			// Finish the sync gracefully instead of dumping the gathered data though
   893  			for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} {
   894  				select {
   895  				case ch <- false:
   896  				case <-d.cancelCh:
   897  				}
   898  			}
   899  			select {
   900  			case d.headerProcCh <- nil:
   901  			case <-d.cancelCh:
   902  			}
   903  			return errBadPeer
   904  		}
   905  	}
   906  }
   907  
   908  // fillHeaderSkeleton concurrently retrieves headers from all our available peers
   909  // and maps them to the provided skeleton header chain.
   910  //
   911  // Any partial results from the beginning of the skeleton is (if possible) forwarded
   912  // immediately to the header processor to keep the rest of the pipeline full even
   913  // in the case of header stalls.
   914  //
   915  // The method returs the entire filled skeleton and also the number of headers
   916  // already forwarded for processing.
   917  func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, int, error) {
   918  	log.Debug("Filling up skeleton", "from", from)
   919  	d.queue.ScheduleSkeleton(from, skeleton)
   920  
   921  	var (
   922  		deliver = func(packet dataPack) (int, error) {
   923  			pack := packet.(*headerPack)
   924  			return d.queue.DeliverHeaders(pack.peerId, pack.headers, d.headerProcCh)
   925  		}
   926  		expire   = func() map[string]int { return d.queue.ExpireHeaders(d.requestTTL()) }
   927  		throttle = func() bool { return false }
   928  		reserve  = func(p *peerConnection, count int) (*fetchRequest, bool, error) {
   929  			return d.queue.ReserveHeaders(p, count), false, nil
   930  		}
   931  		fetch    = func(p *peerConnection, req *fetchRequest) error { return p.FetchHeaders(req.From, MaxHeaderFetch) }
   932  		capacity = func(p *peerConnection) int { return p.HeaderCapacity(d.requestRTT()) }
   933  		setIdle  = func(p *peerConnection, accepted int) { p.SetHeadersIdle(accepted) }
   934  	)
   935  	err := d.fetchParts(errCancelHeaderFetch, d.headerCh, deliver, d.queue.headerContCh, expire,
   936  		d.queue.PendingHeaders, d.queue.InFlightHeaders, throttle, reserve,
   937  		nil, fetch, d.queue.CancelHeaders, capacity, d.peers.HeaderIdlePeers, setIdle, "headers")
   938  
   939  	log.Debug("Skeleton fill terminated", "err", err)
   940  
   941  	filled, proced := d.queue.RetrieveHeaders()
   942  	return filled, proced, err
   943  }
   944  
   945  // fetchBodies iteratively downloads the scheduled block bodies, taking any
   946  // available peers, reserving a chunk of blocks for each, waiting for delivery
   947  // and also periodically checking for timeouts.
   948  func (d *Downloader) fetchBodies(from uint64) error {
   949  	log.Debug("Downloading block bodies", "origin", from)
   950  
   951  	var (
   952  		deliver = func(packet dataPack) (int, error) {
   953  			pack := packet.(*bodyPack)
   954  			return d.queue.DeliverBodies(pack.peerId, pack.transactions, pack.uncles)
   955  		}
   956  		expire   = func() map[string]int { return d.queue.ExpireBodies(d.requestTTL()) }
   957  		fetch    = func(p *peerConnection, req *fetchRequest) error { return p.FetchBodies(req) }
   958  		capacity = func(p *peerConnection) int { return p.BlockCapacity(d.requestRTT()) }
   959  		setIdle  = func(p *peerConnection, accepted int) { p.SetBodiesIdle(accepted) }
   960  	)
   961  	err := d.fetchParts(errCancelBodyFetch, d.bodyCh, deliver, d.bodyWakeCh, expire,
   962  		d.queue.PendingBlocks, d.queue.InFlightBlocks, d.queue.ShouldThrottleBlocks, d.queue.ReserveBodies,
   963  		d.bodyFetchHook, fetch, d.queue.CancelBodies, capacity, d.peers.BodyIdlePeers, setIdle, "bodies")
   964  
   965  	log.Debug("Block body download terminated", "err", err)
   966  	return err
   967  }
   968  
   969  // fetchReceipts iteratively downloads the scheduled block receipts, taking any
   970  // available peers, reserving a chunk of receipts for each, waiting for delivery
   971  // and also periodically checking for timeouts.
   972  func (d *Downloader) fetchReceipts(from uint64) error {
   973  	log.Debug("Downloading transaction receipts", "origin", from)
   974  
   975  	var (
   976  		deliver = func(packet dataPack) (int, error) {
   977  			pack := packet.(*receiptPack)
   978  			return d.queue.DeliverReceipts(pack.peerId, pack.receipts)
   979  		}
   980  		expire   = func() map[string]int { return d.queue.ExpireReceipts(d.requestTTL()) }
   981  		fetch    = func(p *peerConnection, req *fetchRequest) error { return p.FetchReceipts(req) }
   982  		capacity = func(p *peerConnection) int { return p.ReceiptCapacity(d.requestRTT()) }
   983  		setIdle  = func(p *peerConnection, accepted int) { p.SetReceiptsIdle(accepted) }
   984  	)
   985  	err := d.fetchParts(errCancelReceiptFetch, d.receiptCh, deliver, d.receiptWakeCh, expire,
   986  		d.queue.PendingReceipts, d.queue.InFlightReceipts, d.queue.ShouldThrottleReceipts, d.queue.ReserveReceipts,
   987  		d.receiptFetchHook, fetch, d.queue.CancelReceipts, capacity, d.peers.ReceiptIdlePeers, setIdle, "receipts")
   988  
   989  	log.Debug("Transaction receipt download terminated", "err", err)
   990  	return err
   991  }
   992  
   993  // fetchParts iteratively downloads scheduled block parts, taking any available
   994  // peers, reserving a chunk of fetch requests for each, waiting for delivery and
   995  // also periodically checking for timeouts.
   996  //
   997  // As the scheduling/timeout logic mostly is the same for all downloaded data
   998  // types, this method is used by each for data gathering and is instrumented with
   999  // various callbacks to handle the slight differences between processing them.
  1000  //
  1001  // The instrumentation parameters:
  1002  //  - errCancel:   error type to return if the fetch operation is cancelled (mostly makes logging nicer)
  1003  //  - deliveryCh:  channel from which to retrieve downloaded data packets (merged from all concurrent peers)
  1004  //  - deliver:     processing callback to deliver data packets into type specific download queues (usually within `queue`)
  1005  //  - wakeCh:      notification channel for waking the fetcher when new tasks are available (or sync completed)
  1006  //  - expire:      task callback method to abort requests that took too long and return the faulty peers (traffic shaping)
  1007  //  - pending:     task callback for the number of requests still needing download (detect completion/non-completability)
  1008  //  - inFlight:    task callback for the number of in-progress requests (wait for all active downloads to finish)
  1009  //  - throttle:    task callback to check if the processing queue is full and activate throttling (bound memory use)
  1010  //  - reserve:     task callback to reserve new download tasks to a particular peer (also signals partial completions)
  1011  //  - fetchHook:   tester callback to notify of new tasks being initiated (allows testing the scheduling logic)
  1012  //  - fetch:       network callback to actually send a particular download request to a physical remote peer
  1013  //  - cancel:      task callback to abort an in-flight download request and allow rescheduling it (in case of lost peer)
  1014  //  - capacity:    network callback to retrieve the estimated type-specific bandwidth capacity of a peer (traffic shaping)
  1015  //  - idle:        network callback to retrieve the currently (type specific) idle peers that can be assigned tasks
  1016  //  - setIdle:     network callback to set a peer back to idle and update its estimated capacity (traffic shaping)
  1017  //  - kind:        textual label of the type being downloaded to display in log mesages
  1018  func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliver func(dataPack) (int, error), wakeCh chan bool,
  1019  	expire func() map[string]int, pending func() int, inFlight func() bool, throttle func() bool, reserve func(*peerConnection, int) (*fetchRequest, bool, error),
  1020  	fetchHook func([]*types.Header), fetch func(*peerConnection, *fetchRequest) error, cancel func(*fetchRequest), capacity func(*peerConnection) int,
  1021  	idle func() ([]*peerConnection, int), setIdle func(*peerConnection, int), kind string) error {
  1022  
  1023  	// Create a ticker to detect expired retrieval tasks
  1024  	ticker := time.NewTicker(100 * time.Millisecond)
  1025  	defer ticker.Stop()
  1026  
  1027  	update := make(chan struct{}, 1)
  1028  
  1029  	// Prepare the queue and fetch block parts until the block header fetcher's done
  1030  	finished := false
  1031  	for {
  1032  		select {
  1033  		case <-d.cancelCh:
  1034  			return errCancel
  1035  
  1036  		case packet := <-deliveryCh:
  1037  			// If the peer was previously banned and failed to deliver its pack
  1038  			// in a reasonable time frame, ignore its message.
  1039  			if peer := d.peers.Peer(packet.PeerId()); peer != nil {
  1040  				// Deliver the received chunk of data and check chain validity
  1041  				accepted, err := deliver(packet)
  1042  				if err == errInvalidChain {
  1043  					return err
  1044  				}
  1045  				// Unless a peer delivered something completely else than requested (usually
  1046  				// caused by a timed out request which came through in the end), set it to
  1047  				// idle. If the delivery's stale, the peer should have already been idled.
  1048  				if err != errStaleDelivery {
  1049  					setIdle(peer, accepted)
  1050  				}
  1051  				// Issue a log to the user to see what's going on
  1052  				switch {
  1053  				case err == nil && packet.Items() == 0:
  1054  					peer.log.Trace("Requested data not delivered", "type", kind)
  1055  				case err == nil:
  1056  					peer.log.Trace("Delivered new batch of data", "type", kind, "count", packet.Stats())
  1057  				default:
  1058  					peer.log.Trace("Failed to deliver retrieved data", "type", kind, "err", err)
  1059  				}
  1060  			}
  1061  			// Blocks assembled, try to update the progress
  1062  			select {
  1063  			case update <- struct{}{}:
  1064  			default:
  1065  			}
  1066  
  1067  		case cont := <-wakeCh:
  1068  			// The header fetcher sent a continuation flag, check if it's done
  1069  			if !cont {
  1070  				finished = true
  1071  			}
  1072  			// Headers arrive, try to update the progress
  1073  			select {
  1074  			case update <- struct{}{}:
  1075  			default:
  1076  			}
  1077  
  1078  		case <-ticker.C:
  1079  			// Sanity check update the progress
  1080  			select {
  1081  			case update <- struct{}{}:
  1082  			default:
  1083  			}
  1084  
  1085  		case <-update:
  1086  			// Short circuit if we lost all our peers
  1087  			if d.peers.Len() == 0 {
  1088  				return errNoPeers
  1089  			}
  1090  			// Check for fetch request timeouts and demote the responsible peers
  1091  			for pid, fails := range expire() {
  1092  				if peer := d.peers.Peer(pid); peer != nil {
  1093  					// If a lot of retrieval elements expired, we might have overestimated the remote peer or perhaps
  1094  					// ourselves. Only reset to minimal throughput but don't drop just yet. If even the minimal times
  1095  					// out that sync wise we need to get rid of the peer.
  1096  					//
  1097  					// The reason the minimum threshold is 2 is because the downloader tries to estimate the bandwidth
  1098  					// and latency of a peer separately, which requires pushing the measures capacity a bit and seeing
  1099  					// how response times reacts, to it always requests one more than the minimum (i.e. min 2).
  1100  					if fails > 2 {
  1101  						peer.log.Trace("Data delivery timed out", "type", kind)
  1102  						setIdle(peer, 0)
  1103  					} else {
  1104  						peer.log.Debug("Stalling delivery, dropping", "type", kind)
  1105  						if d.dropPeer == nil {
  1106  							// The dropPeer method is nil when `--copydb` is used for a local copy.
  1107  							// Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored
  1108  							peer.log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", pid)
  1109  						} else {
  1110  							d.dropPeer(pid)
  1111  						}
  1112  					}
  1113  				}
  1114  			}
  1115  			// If there's nothing more to fetch, wait or terminate
  1116  			if pending() == 0 {
  1117  				if !inFlight() && finished {
  1118  					log.Debug("Data fetching completed", "type", kind)
  1119  					return nil
  1120  				}
  1121  				break
  1122  			}
  1123  			// Send a download request to all idle peers, until throttled
  1124  			progressed, throttled, running := false, false, inFlight()
  1125  			idles, total := idle()
  1126  
  1127  			for _, peer := range idles {
  1128  				// Short circuit if throttling activated
  1129  				if throttle() {
  1130  					throttled = true
  1131  					break
  1132  				}
  1133  				// Short circuit if there is no more available task.
  1134  				if pending() == 0 {
  1135  					break
  1136  				}
  1137  				// Reserve a chunk of fetches for a peer. A nil can mean either that
  1138  				// no more headers are available, or that the peer is known not to
  1139  				// have them.
  1140  				request, progress, err := reserve(peer, capacity(peer))
  1141  				if err != nil {
  1142  					return err
  1143  				}
  1144  				if progress {
  1145  					progressed = true
  1146  				}
  1147  				if request == nil {
  1148  					continue
  1149  				}
  1150  				if request.From > 0 {
  1151  					peer.log.Trace("Requesting new batch of data", "type", kind, "from", request.From)
  1152  				} else {
  1153  					peer.log.Trace("Requesting new batch of data", "type", kind, "count", len(request.Headers), "from", request.Headers[0].Number)
  1154  				}
  1155  				// Fetch the chunk and make sure any errors return the hashes to the queue
  1156  				if fetchHook != nil {
  1157  					fetchHook(request.Headers)
  1158  				}
  1159  				if err := fetch(peer, request); err != nil {
  1160  					// Although we could try and make an attempt to fix this, this error really
  1161  					// means that we've double allocated a fetch task to a peer. If that is the
  1162  					// case, the internal state of the downloader and the queue is very wrong so
  1163  					// better hard crash and note the error instead of silently accumulating into
  1164  					// a much bigger issue.
  1165  					panic(fmt.Sprintf("%v: %s fetch assignment failed", peer, kind))
  1166  				}
  1167  				running = true
  1168  			}
  1169  			// Make sure that we have peers available for fetching. If all peers have been tried
  1170  			// and all failed throw an error
  1171  			if !progressed && !throttled && !running && len(idles) == total && pending() > 0 {
  1172  				return errPeersUnavailable
  1173  			}
  1174  		}
  1175  	}
  1176  }
  1177  
  1178  // processHeaders takes batches of retrieved headers from an input channel and
  1179  // keeps processing and scheduling them into the header chain and downloader's
  1180  // queue until the stream ends or a failure occurs.
  1181  func (d *Downloader) processHeaders(origin uint64, pivot uint64, td *big.Int) error {
  1182  	// Keep a count of uncertain headers to roll back
  1183  	rollback := []*types.Header{}
  1184  	defer func() {
  1185  		if len(rollback) > 0 {
  1186  			// Flatten the headers and roll them back
  1187  			hashes := make([]common.Hash, len(rollback))
  1188  			for i, header := range rollback {
  1189  				hashes[i] = header.Hash()
  1190  			}
  1191  			lastHeader, lastFastBlock, lastBlock := d.lightchain.CurrentHeader().Number, common.Big0, common.Big0
  1192  			//if d.mode != LightSync {
  1193  			lastFastBlock = d.blockchain.CurrentFastBlock().Number()
  1194  			lastBlock = d.blockchain.CurrentBlock().Number()
  1195  			//}
  1196  			d.lightchain.Rollback(hashes)
  1197  			curFastBlock, curBlock := common.Big0, common.Big0
  1198  			//if d.mode != LightSync {
  1199  			curFastBlock = d.blockchain.CurrentFastBlock().Number()
  1200  			curBlock = d.blockchain.CurrentBlock().Number()
  1201  			//}
  1202  			log.Warn("Rolled back headers", "count", len(hashes),
  1203  				"header", fmt.Sprintf("%d->%d", lastHeader, d.lightchain.CurrentHeader().Number),
  1204  				"fast", fmt.Sprintf("%d->%d", lastFastBlock, curFastBlock),
  1205  				"block", fmt.Sprintf("%d->%d", lastBlock, curBlock))
  1206  		}
  1207  	}()
  1208  
  1209  	// Wait for batches of headers to process
  1210  	gotHeaders := false
  1211  
  1212  	for {
  1213  		select {
  1214  		case <-d.cancelCh:
  1215  			return errCancelHeaderProcessing
  1216  
  1217  		case headers := <-d.headerProcCh:
  1218  			// Terminate header processing if we synced up
  1219  			if len(headers) == 0 {
  1220  				// Notify everyone that headers are fully processed
  1221  				for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} {
  1222  					select {
  1223  					case ch <- false:
  1224  					case <-d.cancelCh:
  1225  					}
  1226  				}
  1227  				// If no headers were retrieved at all, the peer violated its TD promise that it had a
  1228  				// better chain compared to ours. The only exception is if its promised blocks were
  1229  				// already imported by other means (e.g. fecher):
  1230  				//
  1231  				// R <remote peer>, L <local node>: Both at block 10
  1232  				// R: Mine block 11, and propagate it to L
  1233  				// L: Queue block 11 for import
  1234  				// L: Notice that R's head and TD increased compared to ours, start sync
  1235  				// L: Import of block 11 finishes
  1236  				// L: Sync begins, and finds common ancestor at 11
  1237  				// L: Request new headers up from 11 (R's TD was higher, it must have something)
  1238  				// R: Nothing to give
  1239  				//if d.mode != LightSync {
  1240  				head := d.blockchain.CurrentBlock()
  1241  				if !gotHeaders && td.Cmp(d.blockchain.GetTd(head.Hash(), head.NumberU64())) > 0 {
  1242  					return errStallingPeer
  1243  				}
  1244  				//}
  1245  				// If fast or light syncing, ensure promised headers are indeed delivered. This is
  1246  				// needed to detect scenarios where an attacker feeds a bad pivot and then bails out
  1247  				// of delivering the post-pivot blocks that would flag the invalid content.
  1248  				//
  1249  				// This check cannot be executed "as is" for full imports, since blocks may still be
  1250  				// queued for processing when the header download completes. However, as long as the
  1251  				// peer gave us something useful, we're already happy/progressed (above check).
  1252  				//if d.mode == FastSync || d.mode == LightSync {
  1253  				if d.mode == FastSync {
  1254  					head := d.lightchain.CurrentHeader()
  1255  					if td.Cmp(d.lightchain.GetTd(head.SetVersion(byte(d.lightchain.GetBlockVersion(head.Number))), head.Number.Uint64())) > 0 {
  1256  						return errStallingPeer
  1257  					}
  1258  				}
  1259  				// Disable any rollback and return
  1260  				rollback = nil
  1261  				return nil
  1262  			}
  1263  			// Otherwise split the chunk of headers into batches and process them
  1264  			gotHeaders = true
  1265  
  1266  			for len(headers) > 0 {
  1267  				// Terminate if something failed in between processing chunks
  1268  				select {
  1269  				case <-d.cancelCh:
  1270  					return errCancelHeaderProcessing
  1271  				default:
  1272  				}
  1273  				// Select the next chunk of headers to import
  1274  				limit := maxHeadersProcess
  1275  				if limit > len(headers) {
  1276  					limit = len(headers)
  1277  				}
  1278  				chunk := headers[:limit]
  1279  
  1280  				// In case of header only syncing, validate the chunk immediately
  1281  				//if d.mode == FastSync || d.mode == LightSync {
  1282  				if d.mode == FastSync {
  1283  					// Collect the yet unknown headers to mark them as uncertain
  1284  					unknown := make([]*types.Header, 0, len(headers))
  1285  					for _, header := range chunk { // copies
  1286  						header.SetVersion(byte(d.lightchain.GetBlockVersion(header.Number)))
  1287  						if !d.lightchain.HasHeader(header.Hash(), header.Number.Uint64()) {
  1288  							unknown = append(unknown, header)
  1289  						}
  1290  					}
  1291  					// If we're importing pure headers, verify based on their recentness
  1292  					frequency := fsHeaderCheckFrequency
  1293  					if chunk[len(chunk)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot {
  1294  						frequency = 1
  1295  					}
  1296  					if n, err := d.lightchain.InsertHeaderChain(chunk, frequency); err != nil {
  1297  						// If some headers were inserted, add them too to the rollback list
  1298  						if n > 0 {
  1299  							rollback = append(rollback, chunk[:n]...)
  1300  						}
  1301  						log.Debug("Invalid header encountered", "number", chunk[n].Number, "hash", chunk[n].Hash(), "algo", chunk[n].Version, "err", err)
  1302  						return errInvalidChain
  1303  					}
  1304  					// All verifications passed, store newly found uncertain headers
  1305  					rollback = append(rollback, unknown...)
  1306  					if len(rollback) > fsHeaderSafetyNet {
  1307  						rollback = append(rollback[:0], rollback[len(rollback)-fsHeaderSafetyNet:]...)
  1308  					}
  1309  				}
  1310  				// Unless we're doing light chains, schedule the headers for associated content retrieval
  1311  				if d.mode == FullSync || d.mode == FastSync {
  1312  					// If we've reached the allowed number of pending headers, stall a bit
  1313  					for d.queue.PendingBlocks() >= maxQueuedHeaders || d.queue.PendingReceipts() >= maxQueuedHeaders {
  1314  						select {
  1315  						case <-d.cancelCh:
  1316  							return errCancelHeaderProcessing
  1317  						case <-time.After(time.Second):
  1318  						}
  1319  					}
  1320  					// Otherwise insert the headers for content retrieval
  1321  					inserts := d.queue.Schedule(chunk, origin)
  1322  					if len(inserts) != len(chunk) {
  1323  						log.Debug("Stale headers")
  1324  						return errBadPeer
  1325  					}
  1326  				}
  1327  				headers = headers[limit:]
  1328  				origin += uint64(limit)
  1329  			}
  1330  
  1331  			// Update the highest block number we know if a higher one is found.
  1332  			d.syncStatsLock.Lock()
  1333  			if d.syncStatsChainHeight < origin {
  1334  				d.syncStatsChainHeight = origin - 1
  1335  			}
  1336  			d.syncStatsLock.Unlock()
  1337  
  1338  			// Signal the content downloaders of the availablility of new tasks
  1339  			for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} {
  1340  				select {
  1341  				case ch <- true:
  1342  				default:
  1343  				}
  1344  			}
  1345  		}
  1346  	}
  1347  }
  1348  
  1349  // processFullSyncContent takes fetch results from the queue and imports them into the chain.
  1350  func (d *Downloader) processFullSyncContent() error {
  1351  	for {
  1352  		results := d.queue.Results(true)
  1353  		if len(results) == 0 {
  1354  			return nil
  1355  		}
  1356  		if d.chainInsertHook != nil {
  1357  			d.chainInsertHook(results)
  1358  		}
  1359  		if err := d.importBlockResults(results); err != nil {
  1360  			return err
  1361  		}
  1362  	}
  1363  }
  1364  
  1365  func (d *Downloader) importBlockResults(results []*fetchResult) error {
  1366  	// Check for any early termination requests
  1367  	if len(results) == 0 {
  1368  		return nil
  1369  	}
  1370  	select {
  1371  	case <-d.quitCh:
  1372  		return errCancelContentProcessing
  1373  	default:
  1374  	}
  1375  	// Retrieve the a batch of results to import
  1376  	first, last := results[0].Header, results[len(results)-1].Header
  1377  	log.Debug("Inserting downloaded chain", "items", len(results),
  1378  		"firstnum", first.Number, "firsthash", first.Hash(), "algo", first.Version,
  1379  		"lastnum", last.Number, "lasthash", last.Hash(), "algo", last.Version,
  1380  	)
  1381  	blocks := make([]*types.Block, len(results))
  1382  	for i, result := range results {
  1383  		result.Header.Version = d.blockchain.GetBlockVersion(result.Header.Number)
  1384  		for i := range result.Uncles {
  1385  			result.Uncles[i].Version = d.blockchain.GetBlockVersion(result.Uncles[i].Number)
  1386  		}
  1387  		blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles)
  1388  	}
  1389  	if index, err := d.blockchain.InsertChain(blocks); err != nil {
  1390  		log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "err", err)
  1391  		return errInvalidChain
  1392  	}
  1393  	return nil
  1394  }
  1395  
  1396  // processFastSyncContent takes fetch results from the queue and writes them to the
  1397  // database. It also controls the synchronisation of state nodes of the pivot block.
  1398  func (d *Downloader) processFastSyncContent(latest *types.Header) error {
  1399  	// Start syncing state of the reported head block. This should get us most of
  1400  	// the state of the pivot block.
  1401  	stateSync := d.syncState(latest.Root)
  1402  	defer stateSync.Cancel()
  1403  	go func() {
  1404  		if err := stateSync.Wait(); err != nil && err != errCancelStateFetch {
  1405  			d.queue.Close() // wake up WaitResults
  1406  		}
  1407  	}()
  1408  	// Figure out the ideal pivot block. Note, that this goalpost may move if the
  1409  	// sync takes long enough for the chain head to move significantly.
  1410  	pivot := uint64(0)
  1411  	if height := latest.Number.Uint64(); height > uint64(fsMinFullBlocks) {
  1412  		pivot = height - uint64(fsMinFullBlocks)
  1413  	}
  1414  	// To cater for moving pivot points, track the pivot block and subsequently
  1415  	// accumulated download results separatey.
  1416  	var (
  1417  		oldPivot *fetchResult   // Locked in pivot block, might change eventually
  1418  		oldTail  []*fetchResult // Downloaded content after the pivot
  1419  	)
  1420  	for {
  1421  		// Wait for the next batch of downloaded data to be available, and if the pivot
  1422  		// block became stale, move the goalpost
  1423  		results := d.queue.Results(oldPivot == nil) // Block if we're not monitoring pivot staleness
  1424  		if len(results) == 0 {
  1425  			// If pivot sync is done, stop
  1426  			if oldPivot == nil {
  1427  				return stateSync.Cancel()
  1428  			}
  1429  			// If sync failed, stop
  1430  			select {
  1431  			case <-d.cancelCh:
  1432  				return stateSync.Cancel()
  1433  			default:
  1434  			}
  1435  		}
  1436  		if d.chainInsertHook != nil {
  1437  			d.chainInsertHook(results)
  1438  		}
  1439  		if oldPivot != nil {
  1440  			results = append(append([]*fetchResult{oldPivot}, oldTail...), results...)
  1441  		}
  1442  		// Split around the pivot block and process the two sides via fast/full sync
  1443  		if atomic.LoadInt32(&d.committed) == 0 {
  1444  			latest = results[len(results)-1].Header
  1445  			if height := latest.Number.Uint64(); height > pivot+2*uint64(fsMinFullBlocks) {
  1446  				log.Warn("Pivot became stale, moving", "old", pivot, "new", height-uint64(fsMinFullBlocks))
  1447  				pivot = height - uint64(fsMinFullBlocks)
  1448  			}
  1449  		}
  1450  		P, beforeP, afterP := splitAroundPivot(pivot, results)
  1451  		if err := d.commitFastSyncData(beforeP, stateSync); err != nil {
  1452  			return err
  1453  		}
  1454  		if P != nil {
  1455  			// If new pivot block found, cancel old state retrieval and restart
  1456  			if oldPivot != P {
  1457  				stateSync.Cancel()
  1458  
  1459  				stateSync = d.syncState(P.Header.Root)
  1460  				defer stateSync.Cancel()
  1461  				go func() {
  1462  					if err := stateSync.Wait(); err != nil && err != errCancelStateFetch {
  1463  						d.queue.Close() // wake up WaitResults
  1464  					}
  1465  				}()
  1466  				oldPivot = P
  1467  			}
  1468  			// Wait for completion, occasionally checking for pivot staleness
  1469  			select {
  1470  			case <-stateSync.done:
  1471  				if stateSync.err != nil {
  1472  					return stateSync.err
  1473  				}
  1474  				if err := d.commitPivotBlock(P); err != nil {
  1475  					return err
  1476  				}
  1477  				oldPivot = nil
  1478  
  1479  			case <-time.After(time.Second):
  1480  				oldTail = afterP
  1481  				continue
  1482  			}
  1483  		}
  1484  		// Fast sync done, pivot commit done, full import
  1485  		if err := d.importBlockResults(afterP); err != nil {
  1486  			return err
  1487  		}
  1488  	}
  1489  }
  1490  
  1491  func splitAroundPivot(pivot uint64, results []*fetchResult) (p *fetchResult, before, after []*fetchResult) {
  1492  	for _, result := range results {
  1493  		num := result.Header.Number.Uint64()
  1494  		switch {
  1495  		case num < pivot:
  1496  			before = append(before, result)
  1497  		case num == pivot:
  1498  			p = result
  1499  		default:
  1500  			after = append(after, result)
  1501  		}
  1502  	}
  1503  	return p, before, after
  1504  }
  1505  
  1506  func (d *Downloader) commitFastSyncData(results []*fetchResult, stateSync *stateSync) error {
  1507  	// Check for any early termination requests
  1508  	if len(results) == 0 {
  1509  		return nil
  1510  	}
  1511  	select {
  1512  	case <-d.quitCh:
  1513  		return errCancelContentProcessing
  1514  	case <-stateSync.done:
  1515  		if err := stateSync.Wait(); err != nil {
  1516  			return err
  1517  		}
  1518  	default:
  1519  	}
  1520  	// Retrieve the a batch of results to import
  1521  	first, last := results[0].Header, results[len(results)-1].Header
  1522  	log.Debug("Inserting fast-sync blocks", "items", len(results),
  1523  		"firstnum", first.Number, "firsthash", first.Hash(),
  1524  		"lastnumn", last.Number, "lasthash", last.Hash(),
  1525  	)
  1526  	blocks := make([]*types.Block, len(results))
  1527  	receipts := make([]types.Receipts, len(results))
  1528  	for i, result := range results {
  1529  		blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles)
  1530  		receipts[i] = result.Receipts
  1531  	}
  1532  	if index, err := d.blockchain.InsertReceiptChain(blocks, receipts); err != nil {
  1533  		log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err)
  1534  		return errInvalidChain
  1535  	}
  1536  	return nil
  1537  }
  1538  
  1539  func (d *Downloader) commitPivotBlock(result *fetchResult) error {
  1540  	block := types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles)
  1541  	log.Debug("Committing fast sync pivot as new head", "number", block.Number(), "hash", block.Hash())
  1542  	if _, err := d.blockchain.InsertReceiptChain([]*types.Block{block}, []types.Receipts{result.Receipts}); err != nil {
  1543  		return err
  1544  	}
  1545  	if err := d.blockchain.FastSyncCommitHead(block.Hash()); err != nil {
  1546  		return err
  1547  	}
  1548  	atomic.StoreInt32(&d.committed, 1)
  1549  	return nil
  1550  }
  1551  
  1552  // DeliverHeaders injects a new batch of block headers received from a remote
  1553  // node into the download schedule.
  1554  func (d *Downloader) DeliverHeaders(id string, headers []*types.Header) (err error) {
  1555  	return d.deliver(id, d.headerCh, &headerPack{id, headers}, headerInMeter, headerDropMeter)
  1556  }
  1557  
  1558  // DeliverBodies injects a new batch of block bodies received from a remote node.
  1559  func (d *Downloader) DeliverBodies(id string, transactions [][]*types.Transaction, uncles [][]*types.Header) (err error) {
  1560  	return d.deliver(id, d.bodyCh, &bodyPack{id, transactions, uncles}, bodyInMeter, bodyDropMeter)
  1561  }
  1562  
  1563  // DeliverReceipts injects a new batch of receipts received from a remote node.
  1564  func (d *Downloader) DeliverReceipts(id string, receipts [][]*types.Receipt) (err error) {
  1565  	return d.deliver(id, d.receiptCh, &receiptPack{id, receipts}, receiptInMeter, receiptDropMeter)
  1566  }
  1567  
  1568  // DeliverNodeData injects a new batch of node state data received from a remote node.
  1569  func (d *Downloader) DeliverNodeData(id string, data [][]byte) (err error) {
  1570  	return d.deliver(id, d.stateCh, &statePack{id, data}, stateInMeter, stateDropMeter)
  1571  }
  1572  
  1573  // deliver injects a new batch of data received from a remote node.
  1574  func (d *Downloader) deliver(id string, destCh chan dataPack, packet dataPack, inMeter, dropMeter metrics.Meter) (err error) {
  1575  	// Update the delivery metrics for both good and failed deliveries
  1576  	inMeter.Mark(int64(packet.Items()))
  1577  	defer func() {
  1578  		if err != nil {
  1579  			dropMeter.Mark(int64(packet.Items()))
  1580  		}
  1581  	}()
  1582  	// Deliver or abort if the sync is canceled while queuing
  1583  	d.cancelLock.RLock()
  1584  	cancel := d.cancelCh
  1585  	d.cancelLock.RUnlock()
  1586  	if cancel == nil {
  1587  		return errNoSyncActive
  1588  	}
  1589  	select {
  1590  	case destCh <- packet:
  1591  		return nil
  1592  	case <-cancel:
  1593  		return errNoSyncActive
  1594  	}
  1595  }
  1596  
  1597  // qosTuner is the quality of service tuning loop that occasionally gathers the
  1598  // peer latency statistics and updates the estimated request round trip time.
  1599  func (d *Downloader) qosTuner() {
  1600  	for {
  1601  		// Retrieve the current median RTT and integrate into the previoust target RTT
  1602  		rtt := time.Duration((1-qosTuningImpact)*float64(atomic.LoadUint64(&d.rttEstimate)) + qosTuningImpact*float64(d.peers.medianRTT()))
  1603  		atomic.StoreUint64(&d.rttEstimate, uint64(rtt))
  1604  
  1605  		// A new RTT cycle passed, increase our confidence in the estimated RTT
  1606  		conf := atomic.LoadUint64(&d.rttConfidence)
  1607  		conf = conf + (1000000-conf)/2
  1608  		atomic.StoreUint64(&d.rttConfidence, conf)
  1609  
  1610  		// Log the new QoS values and sleep until the next RTT
  1611  		log.Trace("Recalculated downloader QoS values", "rtt", rtt, "confidence", float64(conf)/1000000.0, "ttl", d.requestTTL())
  1612  		select {
  1613  		case <-d.quitCh:
  1614  			return
  1615  		case <-time.After(rtt):
  1616  		}
  1617  	}
  1618  }
  1619  
  1620  // qosReduceConfidence is meant to be called when a new peer joins the downloader's
  1621  // peer set, needing to reduce the confidence we have in out QoS estimates.
  1622  func (d *Downloader) qosReduceConfidence() {
  1623  	// If we have a single peer, confidence is always 1
  1624  	peers := uint64(d.peers.Len())
  1625  	if peers == 0 {
  1626  		// Ensure peer connectivity races don't catch us off guard
  1627  		return
  1628  	}
  1629  	if peers == 1 {
  1630  		atomic.StoreUint64(&d.rttConfidence, 1000000)
  1631  		return
  1632  	}
  1633  	// If we have a ton of peers, don't drop confidence)
  1634  	if peers >= uint64(qosConfidenceCap) {
  1635  		return
  1636  	}
  1637  	// Otherwise drop the confidence factor
  1638  	conf := atomic.LoadUint64(&d.rttConfidence) * (peers - 1) / peers
  1639  	if float64(conf)/1000000 < rttMinConfidence {
  1640  		conf = uint64(rttMinConfidence * 1000000)
  1641  	}
  1642  	atomic.StoreUint64(&d.rttConfidence, conf)
  1643  
  1644  	rtt := time.Duration(atomic.LoadUint64(&d.rttEstimate))
  1645  	log.Trace("Relaxed downloader QoS values", "rtt", rtt, "confidence", float64(conf)/1000000.0, "ttl", d.requestTTL())
  1646  }
  1647  
  1648  // requestRTT returns the current target round trip time for a download request
  1649  // to complete in.
  1650  //
  1651  // Note, the returned RTT is .9 of the actually estimated RTT. The reason is that
  1652  // the downloader tries to adapt queries to the RTT, so multiple RTT values can
  1653  // be adapted to, but smaller ones are preffered (stabler download stream).
  1654  func (d *Downloader) requestRTT() time.Duration {
  1655  	return time.Duration(atomic.LoadUint64(&d.rttEstimate)) * 9 / 10
  1656  }
  1657  
  1658  // requestTTL returns the current timeout allowance for a single download request
  1659  // to finish under.
  1660  func (d *Downloader) requestTTL() time.Duration {
  1661  	var (
  1662  		rtt  = time.Duration(atomic.LoadUint64(&d.rttEstimate))
  1663  		conf = float64(atomic.LoadUint64(&d.rttConfidence)) / 1000000.0
  1664  	)
  1665  	ttl := time.Duration(ttlScaling) * time.Duration(float64(rtt)/conf)
  1666  	if ttl > ttlLimit {
  1667  		ttl = ttlLimit
  1668  	}
  1669  	return ttl
  1670  }