github.com/neatlab/neatio@v1.7.3-0.20220425043230-d903e92fcc75/neatptc/downloader/downloader.go (about)

     1  package downloader
     2  
     3  import (
     4  	"errors"
     5  	"fmt"
     6  	"math/big"
     7  	"sync"
     8  	"sync/atomic"
     9  	"time"
    10  
    11  	"github.com/neatlab/neatio"
    12  	"github.com/neatlab/neatio/chain/core/rawdb"
    13  	"github.com/neatlab/neatio/chain/core/types"
    14  	"github.com/neatlab/neatio/chain/log"
    15  	"github.com/neatlab/neatio/neatdb"
    16  	"github.com/neatlab/neatio/params"
    17  	"github.com/neatlab/neatio/utilities/common"
    18  	"github.com/neatlab/neatio/utilities/event"
    19  	"github.com/neatlab/neatio/utilities/metrics"
    20  )
    21  
    22  var (
    23  	MaxHashFetch    = 512
    24  	MaxBlockFetch   = 128
    25  	MaxHeaderFetch  = 192
    26  	MaxSkeletonSize = 128
    27  	MaxBodyFetch    = 128
    28  	MaxReceiptFetch = 256
    29  	MaxStateFetch   = 384
    30  
    31  	MaxForkAncestry  = 3 * params.EpochDuration
    32  	rttMinEstimate   = 2 * time.Second
    33  	rttMaxEstimate   = 20 * time.Second
    34  	rttMinConfidence = 0.1
    35  	ttlScaling       = 3
    36  	ttlLimit         = time.Minute
    37  
    38  	qosTuningPeers   = 5
    39  	qosConfidenceCap = 10
    40  	qosTuningImpact  = 0.25
    41  
    42  	maxQueuedHeaders  = 32 * 1024
    43  	maxHeadersProcess = 2048
    44  	maxResultsProcess = 2048
    45  
    46  	fsHeaderCheckFrequency = 100
    47  	fsHeaderSafetyNet      = 2048
    48  	fsHeaderForceVerify    = 24
    49  	fsHeaderContCheck      = 3 * time.Second
    50  	fsMinFullBlocks        = 64
    51  )
    52  
    53  var (
    54  	errBusy                    = errors.New("busy")
    55  	errUnknownPeer             = errors.New("peer is unknown or unhealthy")
    56  	errBadPeer                 = errors.New("action from bad peer ignored")
    57  	errStallingPeer            = errors.New("peer is stalling")
    58  	errNoPeers                 = errors.New("no peers to keep download active")
    59  	errTimeout                 = errors.New("timeout")
    60  	errEmptyHeaderSet          = errors.New("empty header set by peer")
    61  	errPeersUnavailable        = errors.New("no peers available or all tried for download")
    62  	errInvalidAncestor         = errors.New("retrieved ancestor is invalid")
    63  	errInvalidChain            = errors.New("retrieved hash chain is invalid")
    64  	errInvalidBlock            = errors.New("retrieved block is invalid")
    65  	errInvalidBody             = errors.New("retrieved block body is invalid")
    66  	errInvalidReceipt          = errors.New("retrieved receipt is invalid")
    67  	errCancelBlockFetch        = errors.New("block download canceled (requested)")
    68  	errCancelHeaderFetch       = errors.New("block header download canceled (requested)")
    69  	errCancelBodyFetch         = errors.New("block body download canceled (requested)")
    70  	errCancelReceiptFetch      = errors.New("receipt download canceled (requested)")
    71  	errCancelStateFetch        = errors.New("state data download canceled (requested)")
    72  	errCancelHeaderProcessing  = errors.New("header processing canceled (requested)")
    73  	errCancelContentProcessing = errors.New("content processing canceled (requested)")
    74  	errNoSyncActive            = errors.New("no sync active")
    75  	errTooOld                  = errors.New("peer doesn't speak recent enough protocol version (need version >= 62)")
    76  )
    77  
    78  type Downloader struct {
    79  	mode SyncMode
    80  	mux  *event.TypeMux
    81  
    82  	queue   *queue
    83  	peers   *peerSet
    84  	stateDB neatdb.Database
    85  
    86  	rttEstimate   uint64
    87  	rttConfidence uint64
    88  
    89  	syncStatsChainOrigin uint64
    90  	syncStatsChainHeight uint64
    91  	syncStatsState       stateSyncStats
    92  	syncStatsLock        sync.RWMutex
    93  
    94  	lightchain LightChain
    95  	blockchain BlockChain
    96  
    97  	dropPeer peerDropFn
    98  
    99  	synchroniseMock func(id string, hash common.Hash) error
   100  	synchronising   int32
   101  	notified        int32
   102  	committed       int32
   103  
   104  	headerCh      chan dataPack
   105  	bodyCh        chan dataPack
   106  	receiptCh     chan dataPack
   107  	bodyWakeCh    chan bool
   108  	receiptWakeCh chan bool
   109  	headerProcCh  chan []*types.Header
   110  
   111  	stateSyncStart chan *stateSync
   112  	trackStateReq  chan *stateReq
   113  	stateCh        chan dataPack
   114  
   115  	cancelPeer string
   116  	cancelCh   chan struct{}
   117  	cancelLock sync.RWMutex
   118  
   119  	quitCh   chan struct{}
   120  	quitLock sync.RWMutex
   121  
   122  	syncInitHook     func(uint64, uint64)
   123  	bodyFetchHook    func([]*types.Header)
   124  	receiptFetchHook func([]*types.Header)
   125  	chainInsertHook  func([]*fetchResult)
   126  
   127  	logger log.Logger
   128  }
   129  
   130  type LightChain interface {
   131  	HasHeader(common.Hash, uint64) bool
   132  
   133  	GetHeaderByHash(common.Hash) *types.Header
   134  
   135  	CurrentHeader() *types.Header
   136  
   137  	GetTd(common.Hash, uint64) *big.Int
   138  
   139  	InsertHeaderChain([]*types.Header, int) (int, error)
   140  
   141  	Rollback([]common.Hash)
   142  }
   143  
   144  type BlockChain interface {
   145  	LightChain
   146  
   147  	HasBlock(common.Hash, uint64) bool
   148  
   149  	GetBlockByHash(common.Hash) *types.Block
   150  
   151  	CurrentBlock() *types.Block
   152  
   153  	CurrentFastBlock() *types.Block
   154  
   155  	FastSyncCommitHead(common.Hash) error
   156  
   157  	InsertChain(types.Blocks) (int, error)
   158  
   159  	InsertReceiptChain(types.Blocks, []types.Receipts) (int, error)
   160  }
   161  
   162  func New(mode SyncMode, stateDb neatdb.Database, mux *event.TypeMux, chain BlockChain, lightchain LightChain, dropPeer peerDropFn, logger log.Logger) *Downloader {
   163  	if lightchain == nil {
   164  		lightchain = chain
   165  	}
   166  
   167  	dl := &Downloader{
   168  		mode:           mode,
   169  		stateDB:        stateDb,
   170  		mux:            mux,
   171  		queue:          newQueue(),
   172  		peers:          newPeerSet(),
   173  		rttEstimate:    uint64(rttMaxEstimate),
   174  		rttConfidence:  uint64(1000000),
   175  		blockchain:     chain,
   176  		lightchain:     lightchain,
   177  		dropPeer:       dropPeer,
   178  		headerCh:       make(chan dataPack, 1),
   179  		bodyCh:         make(chan dataPack, 1),
   180  		receiptCh:      make(chan dataPack, 1),
   181  		bodyWakeCh:     make(chan bool, 1),
   182  		receiptWakeCh:  make(chan bool, 1),
   183  		headerProcCh:   make(chan []*types.Header, 1),
   184  		quitCh:         make(chan struct{}),
   185  		stateCh:        make(chan dataPack),
   186  		stateSyncStart: make(chan *stateSync),
   187  		syncStatsState: stateSyncStats{
   188  			processed: rawdb.ReadFastTrieProgress(stateDb),
   189  		},
   190  		trackStateReq: make(chan *stateReq),
   191  
   192  		logger: logger,
   193  	}
   194  	go dl.qosTuner()
   195  	go dl.stateFetcher()
   196  	return dl
   197  }
   198  
   199  func (d *Downloader) Progress() neatio.SyncProgress {
   200  
   201  	d.syncStatsLock.RLock()
   202  	defer d.syncStatsLock.RUnlock()
   203  
   204  	current := uint64(0)
   205  	switch d.mode {
   206  	case FullSync:
   207  		current = d.blockchain.CurrentBlock().NumberU64()
   208  	case FastSync:
   209  		current = d.blockchain.CurrentFastBlock().NumberU64()
   210  	}
   211  	return neatio.SyncProgress{
   212  		StartingBlock: d.syncStatsChainOrigin,
   213  		CurrentBlock:  current,
   214  		HighestBlock:  d.syncStatsChainHeight,
   215  		PulledStates:  d.syncStatsState.processed,
   216  		KnownStates:   d.syncStatsState.processed + d.syncStatsState.pending,
   217  	}
   218  }
   219  
   220  func (d *Downloader) Synchronising() bool {
   221  	return atomic.LoadInt32(&d.synchronising) > 0
   222  }
   223  
   224  func (d *Downloader) RegisterPeer(id string, version int, peer Peer) error {
   225  	logger := d.logger.New("peer", id)
   226  	logger.Trace("Registering sync peer")
   227  	if err := d.peers.Register(newPeerConnection(id, version, peer, logger)); err != nil {
   228  		logger.Error("Failed to register sync peer", "err", err)
   229  		return err
   230  	}
   231  	d.qosReduceConfidence()
   232  
   233  	return nil
   234  }
   235  
   236  func (d *Downloader) RegisterLightPeer(id string, version int, peer LightPeer) error {
   237  	return d.RegisterPeer(id, version, &lightPeerWrapper{peer})
   238  }
   239  
   240  func (d *Downloader) UnregisterPeer(id string) error {
   241  
   242  	logger := d.logger.New("peer", id)
   243  	logger.Trace("Unregistering sync peer")
   244  	if err := d.peers.Unregister(id); err != nil {
   245  		logger.Error("Failed to unregister sync peer", "err", err)
   246  		return err
   247  	}
   248  	d.queue.Revoke(id)
   249  
   250  	d.cancelLock.RLock()
   251  	master := id == d.cancelPeer
   252  	d.cancelLock.RUnlock()
   253  
   254  	if master {
   255  		d.Cancel()
   256  	}
   257  	return nil
   258  }
   259  
   260  func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode SyncMode) error {
   261  	err := d.synchronise(id, head, td, mode)
   262  	switch err {
   263  	case nil:
   264  	case errBusy:
   265  
   266  	case errTimeout, errBadPeer, errStallingPeer,
   267  		errEmptyHeaderSet, errPeersUnavailable, errTooOld,
   268  		errInvalidAncestor, errInvalidChain:
   269  		d.logger.Warn("Synchronisation failed, dropping peer", "peer", id, "err", err)
   270  		if d.dropPeer == nil {
   271  
   272  			d.logger.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", id)
   273  		} else {
   274  			d.dropPeer(id)
   275  		}
   276  	default:
   277  		d.logger.Warn("Synchronisation failed, retrying", "err", err)
   278  	}
   279  	return err
   280  }
   281  
   282  func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode SyncMode) error {
   283  
   284  	if d.synchroniseMock != nil {
   285  		return d.synchroniseMock(id, hash)
   286  	}
   287  
   288  	if !atomic.CompareAndSwapInt32(&d.synchronising, 0, 1) {
   289  		return errBusy
   290  	}
   291  	defer atomic.StoreInt32(&d.synchronising, 0)
   292  
   293  	if atomic.CompareAndSwapInt32(&d.notified, 0, 1) {
   294  		d.logger.Info("Block synchronisation started")
   295  	}
   296  
   297  	d.queue.Reset()
   298  	d.peers.Reset()
   299  
   300  	for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} {
   301  		select {
   302  		case <-ch:
   303  		default:
   304  		}
   305  	}
   306  	for _, ch := range []chan dataPack{d.headerCh, d.bodyCh, d.receiptCh} {
   307  		for empty := false; !empty; {
   308  			select {
   309  			case <-ch:
   310  			default:
   311  				empty = true
   312  			}
   313  		}
   314  	}
   315  	for empty := false; !empty; {
   316  		select {
   317  		case <-d.headerProcCh:
   318  		default:
   319  			empty = true
   320  		}
   321  	}
   322  
   323  	d.cancelLock.Lock()
   324  	d.cancelCh = make(chan struct{})
   325  	d.cancelPeer = id
   326  	d.cancelLock.Unlock()
   327  
   328  	defer d.Cancel()
   329  
   330  	d.mode = mode
   331  
   332  	p := d.peers.Peer(id)
   333  	if p == nil {
   334  		return errUnknownPeer
   335  	}
   336  	return d.syncWithPeer(p, hash, td)
   337  }
   338  
   339  func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.Int) (err error) {
   340  	d.mux.Post(StartEvent{})
   341  	defer func() {
   342  
   343  		if err != nil {
   344  			d.mux.Post(FailedEvent{err})
   345  		} else {
   346  			d.mux.Post(DoneEvent{})
   347  		}
   348  	}()
   349  	if p.version < 62 {
   350  		return errTooOld
   351  	}
   352  
   353  	d.logger.Debug("Synchronising with the network", "peer", p.id, "eth", p.version, "head", hash, "td", td, "mode", d.mode)
   354  	defer func(start time.Time) {
   355  		d.logger.Debug("Synchronisation terminated", "elapsed", time.Since(start))
   356  	}(time.Now())
   357  
   358  	latest, err := d.fetchHeight(p)
   359  	if err != nil {
   360  		return err
   361  	}
   362  	height := latest.Number.Uint64()
   363  
   364  	origin, err := d.findAncestor(p, height)
   365  	if err != nil {
   366  		return err
   367  	}
   368  	d.syncStatsLock.Lock()
   369  	if d.syncStatsChainHeight <= origin || d.syncStatsChainOrigin > origin {
   370  		d.syncStatsChainOrigin = origin
   371  	}
   372  	d.syncStatsChainHeight = height
   373  	d.syncStatsLock.Unlock()
   374  
   375  	pivot := uint64(0)
   376  	if d.mode == FastSync {
   377  		if height <= uint64(fsMinFullBlocks) {
   378  			origin = 0
   379  		} else {
   380  			pivot = height - uint64(fsMinFullBlocks)
   381  			if pivot <= origin {
   382  				origin = pivot - 1
   383  			}
   384  		}
   385  	}
   386  	d.committed = 1
   387  	if d.mode == FastSync && pivot != 0 {
   388  		d.committed = 0
   389  	}
   390  
   391  	d.queue.Prepare(origin+1, d.mode)
   392  	if d.syncInitHook != nil {
   393  		d.syncInitHook(origin, height)
   394  	}
   395  
   396  	fetchers := []func() error{
   397  		func() error { return d.fetchHeaders(p, origin+1, pivot) },
   398  		func() error { return d.fetchBodies(origin + 1) },
   399  		func() error { return d.fetchReceipts(origin + 1) },
   400  		func() error { return d.processHeaders(origin+1, pivot, td) },
   401  	}
   402  	if d.mode == FastSync {
   403  		fetchers = append(fetchers, func() error { return d.processFastSyncContent(latest) })
   404  	} else if d.mode == FullSync {
   405  		fetchers = append(fetchers, d.processFullSyncContent)
   406  	}
   407  	return d.spawnSync(fetchers)
   408  }
   409  
   410  func (d *Downloader) spawnSync(fetchers []func() error) error {
   411  	var wg sync.WaitGroup
   412  	errc := make(chan error, len(fetchers))
   413  	wg.Add(len(fetchers))
   414  	for _, fn := range fetchers {
   415  		fn := fn
   416  		go func() { defer wg.Done(); errc <- fn() }()
   417  	}
   418  
   419  	var err error
   420  	for i := 0; i < len(fetchers); i++ {
   421  		if i == len(fetchers)-1 {
   422  
   423  			d.queue.Close()
   424  		}
   425  		if err = <-errc; err != nil {
   426  			break
   427  		}
   428  	}
   429  	d.queue.Close()
   430  	d.Cancel()
   431  	wg.Wait()
   432  	return err
   433  }
   434  
   435  func (d *Downloader) Cancel() {
   436  
   437  	d.cancelLock.Lock()
   438  	if d.cancelCh != nil {
   439  		select {
   440  		case <-d.cancelCh:
   441  
   442  		default:
   443  			close(d.cancelCh)
   444  		}
   445  	}
   446  	d.cancelLock.Unlock()
   447  }
   448  
   449  func (d *Downloader) Terminate() {
   450  
   451  	d.quitLock.Lock()
   452  	select {
   453  	case <-d.quitCh:
   454  	default:
   455  		close(d.quitCh)
   456  	}
   457  	d.quitLock.Unlock()
   458  
   459  	d.Cancel()
   460  }
   461  
   462  func (d *Downloader) fetchHeight(p *peerConnection) (*types.Header, error) {
   463  	p.log.Debug("Retrieving remote chain height")
   464  
   465  	head, _ := p.peer.Head()
   466  	go p.peer.RequestHeadersByHash(head, 1, 0, false)
   467  
   468  	ttl := d.requestTTL()
   469  	timeout := time.After(ttl)
   470  	for {
   471  		select {
   472  		case <-d.cancelCh:
   473  			return nil, errCancelBlockFetch
   474  
   475  		case packet := <-d.headerCh:
   476  
   477  			if packet.PeerId() != p.id {
   478  				d.logger.Debug("Received headers from incorrect peer", "peer", packet.PeerId())
   479  				break
   480  			}
   481  
   482  			headers := packet.(*headerPack).headers
   483  			if len(headers) != 1 {
   484  				p.log.Debug("Multiple headers for single request", "headers", len(headers))
   485  				return nil, errBadPeer
   486  			}
   487  			head := headers[0]
   488  			p.log.Debug("Remote head header identified", "number", head.Number, "hash", head.Hash())
   489  			return head, nil
   490  
   491  		case <-timeout:
   492  			p.log.Debug("Waiting for head header timed out", "elapsed", ttl)
   493  			return nil, errTimeout
   494  
   495  		case <-d.bodyCh:
   496  		case <-d.receiptCh:
   497  
   498  		}
   499  	}
   500  }
   501  
   502  func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, error) {
   503  
   504  	floor, ceil := int64(-1), d.lightchain.CurrentHeader().Number.Uint64()
   505  
   506  	if d.mode == FullSync {
   507  		ceil = d.blockchain.CurrentBlock().NumberU64()
   508  	} else if d.mode == FastSync {
   509  		ceil = d.blockchain.CurrentFastBlock().NumberU64()
   510  	}
   511  	if ceil >= MaxForkAncestry {
   512  		floor = int64(ceil - MaxForkAncestry)
   513  	}
   514  	p.log.Debug("Looking for common ancestor", "local", ceil, "remote", height)
   515  
   516  	head := ceil
   517  	if head > height {
   518  		head = height
   519  	}
   520  	from := int64(head) - int64(MaxHeaderFetch)
   521  	if from < 0 {
   522  		from = 0
   523  	}
   524  
   525  	limit := 2 * MaxHeaderFetch / 16
   526  	count := 1 + int((int64(ceil)-from)/16)
   527  	if count > limit {
   528  		count = limit
   529  	}
   530  	go p.peer.RequestHeadersByNumber(uint64(from), count, 15, false)
   531  
   532  	number, hash := uint64(0), common.Hash{}
   533  
   534  	ttl := d.requestTTL()
   535  	timeout := time.After(ttl)
   536  
   537  	for finished := false; !finished; {
   538  		select {
   539  		case <-d.cancelCh:
   540  			return 0, errCancelHeaderFetch
   541  
   542  		case packet := <-d.headerCh:
   543  
   544  			if packet.PeerId() != p.id {
   545  				d.logger.Debug("Received headers from incorrect peer", "peer", packet.PeerId())
   546  				break
   547  			}
   548  
   549  			headers := packet.(*headerPack).headers
   550  			if len(headers) == 0 {
   551  				p.log.Warn("Empty head header set")
   552  				return 0, errEmptyHeaderSet
   553  			}
   554  
   555  			for i := 0; i < len(headers); i++ {
   556  				if number := headers[i].Number.Int64(); number != from+int64(i)*16 {
   557  					p.log.Warn("Head headers broke chain ordering", "index", i, "requested", from+int64(i)*16, "received", number)
   558  					return 0, errInvalidChain
   559  				}
   560  			}
   561  
   562  			finished = true
   563  			for i := len(headers) - 1; i >= 0; i-- {
   564  
   565  				if headers[i].Number.Int64() < from || headers[i].Number.Uint64() > ceil {
   566  					continue
   567  				}
   568  
   569  				if (d.mode == FullSync && d.blockchain.HasBlock(headers[i].Hash(), headers[i].Number.Uint64())) || (d.mode != FullSync && d.lightchain.HasHeader(headers[i].Hash(), headers[i].Number.Uint64())) {
   570  					number, hash = headers[i].Number.Uint64(), headers[i].Hash()
   571  
   572  					if number > height && i == limit-1 {
   573  						p.log.Warn("Lied about chain head", "reported", height, "found", number)
   574  						return 0, errStallingPeer
   575  					}
   576  					break
   577  				}
   578  			}
   579  
   580  		case <-timeout:
   581  			p.log.Debug("Waiting for head header timed out", "elapsed", ttl)
   582  			return 0, errTimeout
   583  
   584  		case <-d.bodyCh:
   585  		case <-d.receiptCh:
   586  
   587  		}
   588  	}
   589  
   590  	if !common.EmptyHash(hash) {
   591  		if int64(number) <= floor {
   592  			p.log.Warn("Ancestor below allowance", "number", number, "hash", hash, "allowance", floor)
   593  			return 0, errInvalidAncestor
   594  		}
   595  		p.log.Debug("Found common ancestor", "number", number, "hash", hash)
   596  		return number, nil
   597  	}
   598  
   599  	start, end := uint64(0), head
   600  	if floor > 0 {
   601  		start = uint64(floor)
   602  	}
   603  	for start+1 < end {
   604  
   605  		check := (start + end) / 2
   606  
   607  		ttl := d.requestTTL()
   608  		timeout := time.After(ttl)
   609  
   610  		go p.peer.RequestHeadersByNumber(check, 1, 0, false)
   611  
   612  		for arrived := false; !arrived; {
   613  			select {
   614  			case <-d.cancelCh:
   615  				return 0, errCancelHeaderFetch
   616  
   617  			case packer := <-d.headerCh:
   618  
   619  				if packer.PeerId() != p.id {
   620  					d.logger.Debug("Received headers from incorrect peer", "peer", packer.PeerId())
   621  					break
   622  				}
   623  
   624  				headers := packer.(*headerPack).headers
   625  				if len(headers) != 1 {
   626  					p.log.Debug("Multiple headers for single request", "headers", len(headers))
   627  					return 0, errBadPeer
   628  				}
   629  				arrived = true
   630  
   631  				if (d.mode == FullSync && !d.blockchain.HasBlock(headers[0].Hash(), headers[0].Number.Uint64())) || (d.mode != FullSync && !d.lightchain.HasHeader(headers[0].Hash(), headers[0].Number.Uint64())) {
   632  					end = check
   633  					break
   634  				}
   635  				header := d.lightchain.GetHeaderByHash(headers[0].Hash())
   636  				if header.Number.Uint64() != check {
   637  					p.log.Debug("Received non requested header", "number", header.Number, "hash", header.Hash(), "request", check)
   638  					return 0, errBadPeer
   639  				}
   640  				start = check
   641  
   642  			case <-timeout:
   643  				p.log.Debug("Waiting for search header timed out", "elapsed", ttl)
   644  				return 0, errTimeout
   645  
   646  			case <-d.bodyCh:
   647  			case <-d.receiptCh:
   648  
   649  			}
   650  		}
   651  	}
   652  
   653  	if int64(start) <= floor {
   654  		p.log.Warn("Ancestor below allowance", "number", start, "hash", hash, "allowance", floor)
   655  		return 0, errInvalidAncestor
   656  	}
   657  	p.log.Debug("Found common ancestor", "number", start, "hash", hash)
   658  	return start, nil
   659  }
   660  
   661  func (d *Downloader) fetchHeaders(p *peerConnection, from uint64, pivot uint64) error {
   662  	p.log.Debug("Directing header downloads", "origin", from)
   663  	defer p.log.Debug("Header download terminated")
   664  
   665  	skeleton := true
   666  	request := time.Now()
   667  	timeout := time.NewTimer(0)
   668  	<-timeout.C
   669  	defer timeout.Stop()
   670  
   671  	var ttl time.Duration
   672  	getHeaders := func(from uint64) {
   673  		request = time.Now()
   674  
   675  		ttl = d.requestTTL()
   676  		timeout.Reset(ttl)
   677  
   678  		if skeleton {
   679  			p.log.Trace("Fetching skeleton headers", "count", MaxHeaderFetch, "from", from)
   680  			go p.peer.RequestHeadersByNumber(from+uint64(MaxHeaderFetch)-1, MaxSkeletonSize, MaxHeaderFetch-1, false)
   681  		} else {
   682  			p.log.Trace("Fetching full headers", "count", MaxHeaderFetch, "from", from)
   683  			go p.peer.RequestHeadersByNumber(from, MaxHeaderFetch, 0, false)
   684  		}
   685  	}
   686  
   687  	getHeaders(from)
   688  
   689  	for {
   690  		select {
   691  		case <-d.cancelCh:
   692  			return errCancelHeaderFetch
   693  
   694  		case packet := <-d.headerCh:
   695  
   696  			if packet.PeerId() != p.id {
   697  				d.logger.Debug("Received skeleton from incorrect peer", "peer", packet.PeerId())
   698  				break
   699  			}
   700  			headerReqTimer.UpdateSince(request)
   701  			timeout.Stop()
   702  
   703  			if packet.Items() == 0 && skeleton {
   704  				skeleton = false
   705  				getHeaders(from)
   706  				continue
   707  			}
   708  
   709  			if packet.Items() == 0 {
   710  
   711  				if atomic.LoadInt32(&d.committed) == 0 && pivot <= from {
   712  					p.log.Debug("No headers, waiting for pivot commit")
   713  					select {
   714  					case <-time.After(fsHeaderContCheck):
   715  						getHeaders(from)
   716  						continue
   717  					case <-d.cancelCh:
   718  						return errCancelHeaderFetch
   719  					}
   720  				}
   721  
   722  				p.log.Debug("No more headers available")
   723  				select {
   724  				case d.headerProcCh <- nil:
   725  					return nil
   726  				case <-d.cancelCh:
   727  					return errCancelHeaderFetch
   728  				}
   729  			}
   730  			headers := packet.(*headerPack).headers
   731  
   732  			if skeleton {
   733  				filled, proced, err := d.fillHeaderSkeleton(from, headers)
   734  				if err != nil {
   735  					p.log.Debug("Skeleton chain invalid", "err", err)
   736  					return errInvalidChain
   737  				}
   738  				headers = filled[proced:]
   739  				from += uint64(proced)
   740  			}
   741  
   742  			if len(headers) > 0 {
   743  				p.log.Trace("Scheduling new headers", "count", len(headers), "from", from)
   744  				select {
   745  				case d.headerProcCh <- headers:
   746  				case <-d.cancelCh:
   747  					return errCancelHeaderFetch
   748  				}
   749  				from += uint64(len(headers))
   750  			}
   751  			getHeaders(from)
   752  
   753  		case <-timeout.C:
   754  			if d.dropPeer == nil {
   755  
   756  				p.log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", p.id)
   757  				break
   758  			}
   759  
   760  			p.log.Debug("Header request timed out", "elapsed", ttl)
   761  			headerTimeoutMeter.Mark(1)
   762  			d.dropPeer(p.id)
   763  
   764  			for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} {
   765  				select {
   766  				case ch <- false:
   767  				case <-d.cancelCh:
   768  				}
   769  			}
   770  			select {
   771  			case d.headerProcCh <- nil:
   772  			case <-d.cancelCh:
   773  			}
   774  			return errBadPeer
   775  		}
   776  	}
   777  }
   778  
   779  func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, int, error) {
   780  	d.logger.Debug("Filling up skeleton", "from", from)
   781  	d.queue.ScheduleSkeleton(from, skeleton)
   782  
   783  	var (
   784  		deliver = func(packet dataPack) (int, error) {
   785  			pack := packet.(*headerPack)
   786  			return d.queue.DeliverHeaders(pack.peerId, pack.headers, d.headerProcCh)
   787  		}
   788  		expire   = func() map[string]int { return d.queue.ExpireHeaders(d.requestTTL()) }
   789  		throttle = func() bool { return false }
   790  		reserve  = func(p *peerConnection, count int) (*fetchRequest, bool, error) {
   791  			return d.queue.ReserveHeaders(p, count), false, nil
   792  		}
   793  		fetch    = func(p *peerConnection, req *fetchRequest) error { return p.FetchHeaders(req.From, MaxHeaderFetch) }
   794  		capacity = func(p *peerConnection) int { return p.HeaderCapacity(d.requestRTT()) }
   795  		setIdle  = func(p *peerConnection, accepted int) { p.SetHeadersIdle(accepted) }
   796  	)
   797  	err := d.fetchParts(errCancelHeaderFetch, d.headerCh, deliver, d.queue.headerContCh, expire,
   798  		d.queue.PendingHeaders, d.queue.InFlightHeaders, throttle, reserve,
   799  		nil, fetch, d.queue.CancelHeaders, capacity, d.peers.HeaderIdlePeers, setIdle, "headers")
   800  
   801  	d.logger.Debug("Skeleton fill terminated", "err", err)
   802  
   803  	filled, proced := d.queue.RetrieveHeaders()
   804  	return filled, proced, err
   805  }
   806  
   807  func (d *Downloader) fetchBodies(from uint64) error {
   808  	d.logger.Debug("Downloading block bodies", "origin", from)
   809  
   810  	var (
   811  		deliver = func(packet dataPack) (int, error) {
   812  			pack := packet.(*bodyPack)
   813  			return d.queue.DeliverBodies(pack.peerId, pack.transactions, pack.uncles)
   814  		}
   815  		expire   = func() map[string]int { return d.queue.ExpireBodies(d.requestTTL()) }
   816  		fetch    = func(p *peerConnection, req *fetchRequest) error { return p.FetchBodies(req) }
   817  		capacity = func(p *peerConnection) int { return p.BlockCapacity(d.requestRTT()) }
   818  		setIdle  = func(p *peerConnection, accepted int) { p.SetBodiesIdle(accepted) }
   819  	)
   820  	err := d.fetchParts(errCancelBodyFetch, d.bodyCh, deliver, d.bodyWakeCh, expire,
   821  		d.queue.PendingBlocks, d.queue.InFlightBlocks, d.queue.ShouldThrottleBlocks, d.queue.ReserveBodies,
   822  		d.bodyFetchHook, fetch, d.queue.CancelBodies, capacity, d.peers.BodyIdlePeers, setIdle, "bodies")
   823  
   824  	d.logger.Debug("Block body download terminated", "err", err)
   825  	return err
   826  }
   827  
   828  func (d *Downloader) fetchReceipts(from uint64) error {
   829  	d.logger.Debug("Downloading transaction receipts", "origin", from)
   830  
   831  	var (
   832  		deliver = func(packet dataPack) (int, error) {
   833  			pack := packet.(*receiptPack)
   834  			return d.queue.DeliverReceipts(pack.peerId, pack.receipts)
   835  		}
   836  		expire   = func() map[string]int { return d.queue.ExpireReceipts(d.requestTTL()) }
   837  		fetch    = func(p *peerConnection, req *fetchRequest) error { return p.FetchReceipts(req) }
   838  		capacity = func(p *peerConnection) int { return p.ReceiptCapacity(d.requestRTT()) }
   839  		setIdle  = func(p *peerConnection, accepted int) { p.SetReceiptsIdle(accepted) }
   840  	)
   841  	err := d.fetchParts(errCancelReceiptFetch, d.receiptCh, deliver, d.receiptWakeCh, expire,
   842  		d.queue.PendingReceipts, d.queue.InFlightReceipts, d.queue.ShouldThrottleReceipts, d.queue.ReserveReceipts,
   843  		d.receiptFetchHook, fetch, d.queue.CancelReceipts, capacity, d.peers.ReceiptIdlePeers, setIdle, "receipts")
   844  
   845  	d.logger.Debug("Transaction receipt download terminated", "err", err)
   846  	return err
   847  }
   848  
   849  func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliver func(dataPack) (int, error), wakeCh chan bool,
   850  	expire func() map[string]int, pending func() int, inFlight func() bool, throttle func() bool, reserve func(*peerConnection, int) (*fetchRequest, bool, error),
   851  	fetchHook func([]*types.Header), fetch func(*peerConnection, *fetchRequest) error, cancel func(*fetchRequest), capacity func(*peerConnection) int,
   852  	idle func() ([]*peerConnection, int), setIdle func(*peerConnection, int), kind string) error {
   853  
   854  	ticker := time.NewTicker(100 * time.Millisecond)
   855  	defer ticker.Stop()
   856  
   857  	update := make(chan struct{}, 1)
   858  
   859  	finished := false
   860  	for {
   861  		select {
   862  		case <-d.cancelCh:
   863  			return errCancel
   864  
   865  		case packet := <-deliveryCh:
   866  
   867  			if peer := d.peers.Peer(packet.PeerId()); peer != nil {
   868  
   869  				accepted, err := deliver(packet)
   870  				if err == errInvalidChain {
   871  					return err
   872  				}
   873  
   874  				if err != errStaleDelivery {
   875  					setIdle(peer, accepted)
   876  				}
   877  
   878  				switch {
   879  				case err == nil && packet.Items() == 0:
   880  					peer.log.Trace("Requested data not delivered", "type", kind)
   881  				case err == nil:
   882  					peer.log.Trace("Delivered new batch of data", "type", kind, "count", packet.Stats())
   883  				default:
   884  					peer.log.Trace("Failed to deliver retrieved data", "type", kind, "err", err)
   885  				}
   886  			}
   887  
   888  			select {
   889  			case update <- struct{}{}:
   890  			default:
   891  			}
   892  
   893  		case cont := <-wakeCh:
   894  
   895  			if !cont {
   896  				finished = true
   897  			}
   898  
   899  			select {
   900  			case update <- struct{}{}:
   901  			default:
   902  			}
   903  
   904  		case <-ticker.C:
   905  
   906  			select {
   907  			case update <- struct{}{}:
   908  			default:
   909  			}
   910  
   911  		case <-update:
   912  
   913  			if d.peers.Len() == 0 {
   914  				return errNoPeers
   915  			}
   916  
   917  			for pid, fails := range expire() {
   918  				if peer := d.peers.Peer(pid); peer != nil {
   919  
   920  					if fails > 2 {
   921  						peer.log.Trace("Data delivery timed out", "type", kind)
   922  						setIdle(peer, 0)
   923  					} else {
   924  						peer.log.Debug("Stalling delivery, dropping", "type", kind)
   925  						if d.dropPeer == nil {
   926  
   927  							peer.log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", pid)
   928  						} else {
   929  							d.dropPeer(pid)
   930  						}
   931  					}
   932  				}
   933  			}
   934  
   935  			if pending() == 0 {
   936  				if !inFlight() && finished {
   937  					d.logger.Debug("Data fetching completed", "type", kind)
   938  					return nil
   939  				}
   940  				break
   941  			}
   942  
   943  			progressed, throttled, running := false, false, inFlight()
   944  			idles, total := idle()
   945  
   946  			for _, peer := range idles {
   947  
   948  				if throttle() {
   949  					throttled = true
   950  					break
   951  				}
   952  
   953  				if pending() == 0 {
   954  					break
   955  				}
   956  
   957  				request, progress, err := reserve(peer, capacity(peer))
   958  				if err != nil {
   959  					return err
   960  				}
   961  				if progress {
   962  					progressed = true
   963  				}
   964  				if request == nil {
   965  					continue
   966  				}
   967  				if request.From > 0 {
   968  					peer.log.Trace("Requesting new batch of data", "type", kind, "from", request.From)
   969  				} else {
   970  					peer.log.Trace("Requesting new batch of data", "type", kind, "count", len(request.Headers), "from", request.Headers[0].Number)
   971  				}
   972  
   973  				if fetchHook != nil {
   974  					fetchHook(request.Headers)
   975  				}
   976  				if err := fetch(peer, request); err != nil {
   977  
   978  					panic(fmt.Sprintf("%v: %s fetch assignment failed", peer, kind))
   979  				}
   980  				running = true
   981  			}
   982  
   983  			if !progressed && !throttled && !running && len(idles) == total && pending() > 0 {
   984  				return errPeersUnavailable
   985  			}
   986  		}
   987  	}
   988  }
   989  
   990  func (d *Downloader) processHeaders(origin uint64, pivot uint64, td *big.Int) error {
   991  
   992  	rollback := []*types.Header{}
   993  	defer func() {
   994  		if len(rollback) > 0 {
   995  
   996  			hashes := make([]common.Hash, len(rollback))
   997  			for i, header := range rollback {
   998  				hashes[i] = header.Hash()
   999  			}
  1000  			lastHeader, lastFastBlock, lastBlock := d.lightchain.CurrentHeader().Number, common.Big0, common.Big0
  1001  			lastFastBlock = d.blockchain.CurrentFastBlock().Number()
  1002  			lastBlock = d.blockchain.CurrentBlock().Number()
  1003  			d.lightchain.Rollback(hashes)
  1004  			curFastBlock, curBlock := common.Big0, common.Big0
  1005  			curFastBlock = d.blockchain.CurrentFastBlock().Number()
  1006  			curBlock = d.blockchain.CurrentBlock().Number()
  1007  			d.logger.Warn("Rolled back headers", "count", len(hashes),
  1008  				"header", fmt.Sprintf("%d->%d", lastHeader, d.lightchain.CurrentHeader().Number),
  1009  				"fast", fmt.Sprintf("%d->%d", lastFastBlock, curFastBlock),
  1010  				"block", fmt.Sprintf("%d->%d", lastBlock, curBlock))
  1011  		}
  1012  	}()
  1013  
  1014  	gotHeaders := false
  1015  
  1016  	for {
  1017  		select {
  1018  		case <-d.cancelCh:
  1019  			return errCancelHeaderProcessing
  1020  
  1021  		case headers := <-d.headerProcCh:
  1022  
  1023  			if len(headers) == 0 {
  1024  
  1025  				for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} {
  1026  					select {
  1027  					case ch <- false:
  1028  					case <-d.cancelCh:
  1029  					}
  1030  				}
  1031  
  1032  				head := d.blockchain.CurrentBlock()
  1033  				if !gotHeaders && td.Cmp(d.blockchain.GetTd(head.Hash(), head.NumberU64())) > 0 {
  1034  					return errStallingPeer
  1035  				}
  1036  
  1037  				if d.mode == FastSync {
  1038  					head := d.lightchain.CurrentHeader()
  1039  					if td.Cmp(d.lightchain.GetTd(head.Hash(), head.Number.Uint64())) > 0 {
  1040  						return errStallingPeer
  1041  					}
  1042  				}
  1043  
  1044  				rollback = nil
  1045  				return nil
  1046  			}
  1047  
  1048  			gotHeaders = true
  1049  
  1050  			for len(headers) > 0 {
  1051  
  1052  				select {
  1053  				case <-d.cancelCh:
  1054  					return errCancelHeaderProcessing
  1055  				default:
  1056  				}
  1057  
  1058  				limit := maxHeadersProcess
  1059  				if limit > len(headers) {
  1060  					limit = len(headers)
  1061  				}
  1062  				chunk := headers[:limit]
  1063  
  1064  				if d.mode == FastSync {
  1065  
  1066  					unknown := make([]*types.Header, 0, len(headers))
  1067  					for _, header := range chunk {
  1068  						if !d.lightchain.HasHeader(header.Hash(), header.Number.Uint64()) {
  1069  							unknown = append(unknown, header)
  1070  						}
  1071  					}
  1072  
  1073  					frequency := fsHeaderCheckFrequency
  1074  					if chunk[len(chunk)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot {
  1075  						frequency = 1
  1076  					}
  1077  					if n, err := d.lightchain.InsertHeaderChain(chunk, frequency); err != nil {
  1078  
  1079  						if n > 0 {
  1080  							rollback = append(rollback, chunk[:n]...)
  1081  						}
  1082  						d.logger.Debug("Invalid header encountered", "number", chunk[n].Number, "hash", chunk[n].Hash(), "err", err)
  1083  						return errInvalidChain
  1084  					}
  1085  
  1086  					rollback = append(rollback, unknown...)
  1087  					if len(rollback) > fsHeaderSafetyNet {
  1088  						rollback = append(rollback[:0], rollback[len(rollback)-fsHeaderSafetyNet:]...)
  1089  					}
  1090  				}
  1091  
  1092  				if d.mode == FullSync || d.mode == FastSync {
  1093  
  1094  					for d.queue.PendingBlocks() >= maxQueuedHeaders || d.queue.PendingReceipts() >= maxQueuedHeaders {
  1095  						select {
  1096  						case <-d.cancelCh:
  1097  							return errCancelHeaderProcessing
  1098  						case <-time.After(time.Second):
  1099  						}
  1100  					}
  1101  
  1102  					inserts := d.queue.Schedule(chunk, origin)
  1103  					if len(inserts) != len(chunk) {
  1104  						d.logger.Debug("Stale headers")
  1105  						return errBadPeer
  1106  					}
  1107  				}
  1108  				headers = headers[limit:]
  1109  				origin += uint64(limit)
  1110  			}
  1111  
  1112  			d.syncStatsLock.Lock()
  1113  			if d.syncStatsChainHeight < origin {
  1114  				d.syncStatsChainHeight = origin - 1
  1115  			}
  1116  			d.syncStatsLock.Unlock()
  1117  
  1118  			for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} {
  1119  				select {
  1120  				case ch <- true:
  1121  				default:
  1122  				}
  1123  			}
  1124  		}
  1125  	}
  1126  }
  1127  
  1128  func (d *Downloader) processFullSyncContent() error {
  1129  	for {
  1130  		results := d.queue.Results(true)
  1131  		if len(results) == 0 {
  1132  			return nil
  1133  		}
  1134  		if d.chainInsertHook != nil {
  1135  			d.chainInsertHook(results)
  1136  		}
  1137  		if err := d.importBlockResults(results); err != nil {
  1138  			return err
  1139  		}
  1140  	}
  1141  }
  1142  
  1143  func (d *Downloader) importBlockResults(results []*fetchResult) error {
  1144  
  1145  	if len(results) == 0 {
  1146  		return nil
  1147  	}
  1148  	select {
  1149  	case <-d.quitCh:
  1150  		return errCancelContentProcessing
  1151  	default:
  1152  	}
  1153  
  1154  	first, last := results[0].Header, results[len(results)-1].Header
  1155  	d.logger.Debug("Inserting downloaded chain", "items", len(results),
  1156  		"firstnum", first.Number, "firsthash", first.Hash(),
  1157  		"lastnum", last.Number, "lasthash", last.Hash(),
  1158  	)
  1159  	blocks := make([]*types.Block, len(results))
  1160  	for i, result := range results {
  1161  		blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles)
  1162  	}
  1163  	if index, err := d.blockchain.InsertChain(blocks); err != nil {
  1164  		d.logger.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err)
  1165  		return errInvalidChain
  1166  	}
  1167  	return nil
  1168  }
  1169  
  1170  func (d *Downloader) processFastSyncContent(latest *types.Header) error {
  1171  
  1172  	stateSync := d.syncState(latest.Root)
  1173  	defer stateSync.Cancel()
  1174  	go func() {
  1175  		if err := stateSync.Wait(); err != nil && err != errCancelStateFetch {
  1176  			d.queue.Close()
  1177  		}
  1178  	}()
  1179  
  1180  	pivot := uint64(0)
  1181  	if height := latest.Number.Uint64(); height > uint64(fsMinFullBlocks) {
  1182  		pivot = height - uint64(fsMinFullBlocks)
  1183  	}
  1184  
  1185  	var (
  1186  		oldPivot *fetchResult
  1187  		oldTail  []*fetchResult
  1188  	)
  1189  	for {
  1190  
  1191  		results := d.queue.Results(oldPivot == nil)
  1192  		if len(results) == 0 {
  1193  
  1194  			if oldPivot == nil {
  1195  				return stateSync.Cancel()
  1196  			}
  1197  
  1198  			select {
  1199  			case <-d.cancelCh:
  1200  				return stateSync.Cancel()
  1201  			default:
  1202  			}
  1203  		}
  1204  		if d.chainInsertHook != nil {
  1205  			d.chainInsertHook(results)
  1206  		}
  1207  		if oldPivot != nil {
  1208  			results = append(append([]*fetchResult{oldPivot}, oldTail...), results...)
  1209  		}
  1210  
  1211  		if atomic.LoadInt32(&d.committed) == 0 {
  1212  			latest = results[len(results)-1].Header
  1213  			if height := latest.Number.Uint64(); height > pivot+2*uint64(fsMinFullBlocks) {
  1214  				d.logger.Warn("Pivot became stale, moving", "old", pivot, "new", height-uint64(fsMinFullBlocks))
  1215  				pivot = height - uint64(fsMinFullBlocks)
  1216  			}
  1217  		}
  1218  		P, beforeP, afterP := splitAroundPivot(pivot, results)
  1219  		if err := d.commitFastSyncData(beforeP, stateSync); err != nil {
  1220  			return err
  1221  		}
  1222  		if P != nil {
  1223  
  1224  			if oldPivot != P {
  1225  				stateSync.Cancel()
  1226  
  1227  				stateSync = d.syncState(P.Header.Root)
  1228  				defer stateSync.Cancel()
  1229  				go func() {
  1230  					if err := stateSync.Wait(); err != nil && err != errCancelStateFetch {
  1231  						d.queue.Close()
  1232  					}
  1233  				}()
  1234  				oldPivot = P
  1235  			}
  1236  
  1237  			select {
  1238  			case <-stateSync.done:
  1239  				if stateSync.err != nil {
  1240  					return stateSync.err
  1241  				}
  1242  				if err := d.commitPivotBlock(P); err != nil {
  1243  					return err
  1244  				}
  1245  				oldPivot = nil
  1246  
  1247  			case <-time.After(time.Second):
  1248  				oldTail = afterP
  1249  				continue
  1250  			}
  1251  		}
  1252  
  1253  		if err := d.importBlockResults(afterP); err != nil {
  1254  			return err
  1255  		}
  1256  	}
  1257  }
  1258  
  1259  func splitAroundPivot(pivot uint64, results []*fetchResult) (p *fetchResult, before, after []*fetchResult) {
  1260  	for _, result := range results {
  1261  		num := result.Header.Number.Uint64()
  1262  		switch {
  1263  		case num < pivot:
  1264  			before = append(before, result)
  1265  		case num == pivot:
  1266  			p = result
  1267  		default:
  1268  			after = append(after, result)
  1269  		}
  1270  	}
  1271  	return p, before, after
  1272  }
  1273  
  1274  func (d *Downloader) commitFastSyncData(results []*fetchResult, stateSync *stateSync) error {
  1275  
  1276  	if len(results) == 0 {
  1277  		return nil
  1278  	}
  1279  	select {
  1280  	case <-d.quitCh:
  1281  		return errCancelContentProcessing
  1282  	case <-stateSync.done:
  1283  		if err := stateSync.Wait(); err != nil {
  1284  			return err
  1285  		}
  1286  	default:
  1287  	}
  1288  
  1289  	first, last := results[0].Header, results[len(results)-1].Header
  1290  	d.logger.Debug("Inserting fast-sync blocks", "items", len(results),
  1291  		"firstnum", first.Number, "firsthash", first.Hash(),
  1292  		"lastnumn", last.Number, "lasthash", last.Hash(),
  1293  	)
  1294  	blocks := make([]*types.Block, len(results))
  1295  	receipts := make([]types.Receipts, len(results))
  1296  	for i, result := range results {
  1297  		blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles)
  1298  		receipts[i] = result.Receipts
  1299  	}
  1300  	if index, err := d.blockchain.InsertReceiptChain(blocks, receipts); err != nil {
  1301  		d.logger.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err)
  1302  		return errInvalidChain
  1303  	}
  1304  	return nil
  1305  }
  1306  
  1307  func (d *Downloader) commitPivotBlock(result *fetchResult) error {
  1308  	block := types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles)
  1309  	d.logger.Debug("Committing fast sync pivot as new head", "number", block.Number(), "hash", block.Hash())
  1310  	if _, err := d.blockchain.InsertReceiptChain([]*types.Block{block}, []types.Receipts{result.Receipts}); err != nil {
  1311  		return err
  1312  	}
  1313  	if err := d.blockchain.FastSyncCommitHead(block.Hash()); err != nil {
  1314  		return err
  1315  	}
  1316  	atomic.StoreInt32(&d.committed, 1)
  1317  	return nil
  1318  }
  1319  
  1320  func (d *Downloader) DeliverHeaders(id string, headers []*types.Header) (err error) {
  1321  	return d.deliver(id, d.headerCh, &headerPack{id, headers}, headerInMeter, headerDropMeter)
  1322  }
  1323  
  1324  func (d *Downloader) DeliverBodies(id string, transactions [][]*types.Transaction, uncles [][]*types.Header) (err error) {
  1325  	return d.deliver(id, d.bodyCh, &bodyPack{id, transactions, uncles}, bodyInMeter, bodyDropMeter)
  1326  }
  1327  
  1328  func (d *Downloader) DeliverReceipts(id string, receipts [][]*types.Receipt) (err error) {
  1329  	return d.deliver(id, d.receiptCh, &receiptPack{id, receipts}, receiptInMeter, receiptDropMeter)
  1330  }
  1331  
  1332  func (d *Downloader) DeliverNodeData(id string, data [][]byte) (err error) {
  1333  	return d.deliver(id, d.stateCh, &statePack{id, data}, stateInMeter, stateDropMeter)
  1334  }
  1335  
  1336  func (d *Downloader) deliver(id string, destCh chan dataPack, packet dataPack, inMeter, dropMeter metrics.Meter) (err error) {
  1337  
  1338  	inMeter.Mark(int64(packet.Items()))
  1339  	defer func() {
  1340  		if err != nil {
  1341  			dropMeter.Mark(int64(packet.Items()))
  1342  		}
  1343  	}()
  1344  
  1345  	d.cancelLock.RLock()
  1346  	cancel := d.cancelCh
  1347  	d.cancelLock.RUnlock()
  1348  	if cancel == nil {
  1349  		return errNoSyncActive
  1350  	}
  1351  	select {
  1352  	case destCh <- packet:
  1353  		return nil
  1354  	case <-cancel:
  1355  		return errNoSyncActive
  1356  	}
  1357  }
  1358  
  1359  func (d *Downloader) qosTuner() {
  1360  	for {
  1361  
  1362  		rtt := time.Duration((1-qosTuningImpact)*float64(atomic.LoadUint64(&d.rttEstimate)) + qosTuningImpact*float64(d.peers.medianRTT()))
  1363  		atomic.StoreUint64(&d.rttEstimate, uint64(rtt))
  1364  
  1365  		conf := atomic.LoadUint64(&d.rttConfidence)
  1366  		conf = conf + (1000000-conf)/2
  1367  		atomic.StoreUint64(&d.rttConfidence, conf)
  1368  
  1369  		d.logger.Debug("Recalculated downloader QoS values", "rtt", rtt, "confidence", float64(conf)/1000000.0, "ttl", d.requestTTL())
  1370  		select {
  1371  		case <-d.quitCh:
  1372  			return
  1373  		case <-time.After(rtt):
  1374  		}
  1375  	}
  1376  }
  1377  
  1378  func (d *Downloader) qosReduceConfidence() {
  1379  
  1380  	peers := uint64(d.peers.Len())
  1381  	if peers == 0 {
  1382  
  1383  		return
  1384  	}
  1385  	if peers == 1 {
  1386  		atomic.StoreUint64(&d.rttConfidence, 1000000)
  1387  		return
  1388  	}
  1389  
  1390  	if peers >= uint64(qosConfidenceCap) {
  1391  		return
  1392  	}
  1393  
  1394  	conf := atomic.LoadUint64(&d.rttConfidence) * (peers - 1) / peers
  1395  	if float64(conf)/1000000 < rttMinConfidence {
  1396  		conf = uint64(rttMinConfidence * 1000000)
  1397  	}
  1398  	atomic.StoreUint64(&d.rttConfidence, conf)
  1399  
  1400  	rtt := time.Duration(atomic.LoadUint64(&d.rttEstimate))
  1401  	d.logger.Debug("Relaxed downloader QoS values", "rtt", rtt, "confidence", float64(conf)/1000000.0, "ttl", d.requestTTL())
  1402  }
  1403  
  1404  func (d *Downloader) requestRTT() time.Duration {
  1405  	return time.Duration(atomic.LoadUint64(&d.rttEstimate)) * 9 / 10
  1406  }
  1407  
  1408  func (d *Downloader) requestTTL() time.Duration {
  1409  	var (
  1410  		rtt  = time.Duration(atomic.LoadUint64(&d.rttEstimate))
  1411  		conf = float64(atomic.LoadUint64(&d.rttConfidence)) / 1000000.0
  1412  	)
  1413  	ttl := time.Duration(ttlScaling) * time.Duration(float64(rtt)/conf)
  1414  	if ttl > ttlLimit {
  1415  		ttl = ttlLimit
  1416  	}
  1417  	return ttl
  1418  }