github.com/linapex/ethereum-go-chinese@v0.0.0-20190316121929-f8b7a73c3fa1/eth/downloader/downloader.go (about)

     1  
     2  //<developer>
     3  //    <name>linapex 曹一峰</name>
     4  //    <email>linapex@163.com</email>
     5  //    <wx>superexc</wx>
     6  //    <qqgroup>128148617</qqgroup>
     7  //    <url>https://jsq.ink</url>
     8  //    <role>pku engineer</role>
     9  //    <date>2019-03-16 19:16:37</date>
    10  //</624450087904350208>
    11  
    12  
    13  //Package downloader contains the manual full chain synchronisation.
    14  package downloader
    15  
    16  import (
    17  	"errors"
    18  	"fmt"
    19  	"math/big"
    20  	"sync"
    21  	"sync/atomic"
    22  	"time"
    23  
    24  	ethereum "github.com/ethereum/go-ethereum"
    25  	"github.com/ethereum/go-ethereum/common"
    26  	"github.com/ethereum/go-ethereum/core/rawdb"
    27  	"github.com/ethereum/go-ethereum/core/types"
    28  	"github.com/ethereum/go-ethereum/ethdb"
    29  	"github.com/ethereum/go-ethereum/event"
    30  	"github.com/ethereum/go-ethereum/log"
    31  	"github.com/ethereum/go-ethereum/metrics"
    32  	"github.com/ethereum/go-ethereum/params"
    33  )
    34  
    35  var (
    36  MaxHashFetch    = 512 //每个检索请求要获取的哈希数
    37  MaxBlockFetch   = 128 //每个检索请求要获取的块的数量
    38  MaxHeaderFetch  = 192 //Amount of block headers to be fetched per retrieval request
    39  MaxSkeletonSize = 128 //骨架程序集所需的头提取数
    40  MaxBodyFetch    = 128 //每个检索请求要获取的块体数量
    41  MaxReceiptFetch = 256 //Amount of transaction receipts to allow fetching per request
    42  MaxStateFetch   = 384 //允许每个请求提取的节点状态值的数量
    43  
    44  MaxForkAncestry  = 3 * params.EpochDuration //最大链重组
    45  rttMinEstimate   = 2 * time.Second          //下载请求到目标的最短往返时间
    46  rttMaxEstimate   = 20 * time.Second         //下载请求到达目标的最大往返时间
    47  rttMinConfidence = 0.1                      //估计的RTT值的置信系数更差
    48  ttlScaling       = 3                        //RTT->TTL转换的恒定比例因子
    49  ttlLimit         = time.Minute              //防止达到疯狂超时的最大TTL允许值
    50  
    51  qosTuningPeers   = 5    //要基于的对等数(最佳对等数)
    52  qosConfidenceCap = 10   //不修改RTT置信度的对等数
    53  qosTuningImpact  = 0.25 //Impact that a new tuning target has on the previous value
    54  
    55  maxQueuedHeaders  = 32 * 1024 //[ETH/62]要排队导入的头的最大数目(DOS保护)
    56  maxHeadersProcess = 2048      //一次导入到链中的头下载结果数
    57  maxResultsProcess = 2048      //一次导入到链中的内容下载结果数
    58  
    59  reorgProtThreshold   = 48 //禁用mini reorg保护的最近块的阈值数目
    60  reorgProtHeaderDelay = 2  //要延迟交付以覆盖小型重新订购的邮件头数
    61  
    62  fsHeaderCheckFrequency = 100             //Verification frequency of the downloaded headers during fast sync
    63  fsHeaderSafetyNet      = 2048            //检测到链冲突时要丢弃的头数
    64  fsHeaderForceVerify    = 24              //要在接受透视之前和之后验证的标题数
    65  fsHeaderContCheck      = 3 * time.Second //Time interval to check for header continuations during state download
    66  fsMinFullBlocks        = 64              //即使在快速同步中也要完全检索的块数
    67  )
    68  
    69  var (
    70  	errBusy                    = errors.New("busy")
    71  	errUnknownPeer             = errors.New("peer is unknown or unhealthy")
    72  	errBadPeer                 = errors.New("action from bad peer ignored")
    73  	errStallingPeer            = errors.New("peer is stalling")
    74  	errNoPeers                 = errors.New("no peers to keep download active")
    75  	errTimeout                 = errors.New("timeout")
    76  	errEmptyHeaderSet          = errors.New("empty header set by peer")
    77  	errPeersUnavailable        = errors.New("no peers available or all tried for download")
    78  	errInvalidAncestor         = errors.New("retrieved ancestor is invalid")
    79  	errInvalidChain            = errors.New("retrieved hash chain is invalid")
    80  	errInvalidBlock            = errors.New("retrieved block is invalid")
    81  	errInvalidBody             = errors.New("retrieved block body is invalid")
    82  	errInvalidReceipt          = errors.New("retrieved receipt is invalid")
    83  	errCancelBlockFetch        = errors.New("block download canceled (requested)")
    84  	errCancelHeaderFetch       = errors.New("block header download canceled (requested)")
    85  	errCancelBodyFetch         = errors.New("block body download canceled (requested)")
    86  	errCancelReceiptFetch      = errors.New("receipt download canceled (requested)")
    87  	errCancelStateFetch        = errors.New("state data download canceled (requested)")
    88  	errCancelHeaderProcessing  = errors.New("header processing canceled (requested)")
    89  	errCancelContentProcessing = errors.New("content processing canceled (requested)")
    90  	errNoSyncActive            = errors.New("no sync active")
    91  	errTooOld                  = errors.New("peer doesn't speak recent enough protocol version (need version >= 62)")
    92  )
    93  
    94  type Downloader struct {
    95  mode SyncMode       //定义所用策略的同步模式(每个同步周期)
    96  mux  *event.TypeMux //事件同步器宣布同步操作事件
    97  
    98  genesis uint64   //限制同步到的Genesis块编号(例如Light Client CHT)
    99  queue   *queue   //用于选择要下载的哈希的计划程序
   100  peers   *peerSet //可从中继续下载的活动对等点集
   101  	stateDB ethdb.Database
   102  
   103  rttEstimate   uint64 //目标下载请求的往返时间
   104  rttConfidence uint64 //估计RTT的置信度(单位:百万分之一允许原子操作)
   105  
   106  //统计
   107  syncStatsChainOrigin uint64 //开始同步的起始块编号
   108  syncStatsChainHeight uint64 //开始同步时已知的最高块号
   109  	syncStatsState       stateSyncStats
   110  syncStatsLock        sync.RWMutex //锁定保护同步状态字段
   111  
   112  	lightchain LightChain
   113  	blockchain BlockChain
   114  
   115  //回调
   116  dropPeer peerDropFn //因行为不端而丢掉一个同伴
   117  
   118  //状态
   119  synchroniseMock func(id string, hash common.Hash) error //Replacement for synchronise during testing
   120  	synchronising   int32
   121  	notified        int32
   122  	committed       int32
   123  
   124  //渠道
   125  headerCh      chan dataPack        //[ETH/62]接收入站数据块头的通道
   126  bodyCh        chan dataPack        //[ETH/62]接收入站闭塞体的信道
   127  receiptCh     chan dataPack        //[ETH/63]接收入站收据的通道
   128  bodyWakeCh    chan bool            //[ETH/62]向新任务的块体获取器发送信号的通道
   129  receiptWakeCh chan bool            //[ETH/63]向接收新任务的接收者发送信号的通道
   130  headerProcCh  chan []*types.Header //[eth/62] Channel to feed the header processor new tasks
   131  
   132  //用于StateFetcher
   133  	stateSyncStart chan *stateSync
   134  	trackStateReq  chan *stateReq
   135  stateCh        chan dataPack //[ETH/63]接收入站节点状态数据的通道
   136  
   137  //取消和终止
   138  cancelPeer string         //当前用作主机的对等机的标识符(删除时取消)
   139  cancelCh   chan struct{}  //取消飞行中同步的频道
   140  cancelLock sync.RWMutex   //锁定以保护取消通道和对等端传递
   141  cancelWg   sync.WaitGroup //确保所有取出器Goroutine都已退出。
   142  
   143  quitCh   chan struct{} //退出通道至信号终止
   144  quitLock sync.RWMutex  //锁定以防止双重关闭
   145  
   146  //测试钩
   147  syncInitHook     func(uint64, uint64)  //启动新同步运行时调用的方法
   148  bodyFetchHook    func([]*types.Header) //启动块体提取时要调用的方法
   149  receiptFetchHook func([]*types.Header) //Method to call upon starting a receipt fetch
   150  chainInsertHook  func([]*fetchResult)  //在插入块链时调用的方法(可能在多个调用中)
   151  }
   152  
   153  //LightChain封装了同步轻链所需的功能。
   154  type LightChain interface {
   155  //HasHeader verifies a header's presence in the local chain.
   156  	HasHeader(common.Hash, uint64) bool
   157  
   158  //GetHeaderByHash从本地链检索头。
   159  	GetHeaderByHash(common.Hash) *types.Header
   160  
   161  //currentHeader从本地链中检索头标头。
   162  	CurrentHeader() *types.Header
   163  
   164  //gettd返回本地块的总难度。
   165  	GetTd(common.Hash, uint64) *big.Int
   166  
   167  //InsertHeaderChain将一批头插入本地链。
   168  	InsertHeaderChain([]*types.Header, int) (int, error)
   169  
   170  //回滚从本地链中删除一些最近添加的元素。
   171  	Rollback([]common.Hash)
   172  }
   173  
   174  //区块链封装了同步(完整或快速)区块链所需的功能。
   175  type BlockChain interface {
   176  	LightChain
   177  
   178  //hasblock验证块在本地链中的存在。
   179  	HasBlock(common.Hash, uint64) bool
   180  
   181  //HasFastBlock验证快速块在本地链中的存在。
   182  	HasFastBlock(common.Hash, uint64) bool
   183  
   184  //GetBlockByHash从本地链中检索块。
   185  	GetBlockByHash(common.Hash) *types.Block
   186  
   187  //currentBlock从本地链检索头块。
   188  	CurrentBlock() *types.Block
   189  
   190  //currentFastBlock从本地链检索头快速块。
   191  	CurrentFastBlock() *types.Block
   192  
   193  //fastsynccommithead直接将头块提交给某个实体。
   194  	FastSyncCommitHead(common.Hash) error
   195  
   196  //插入链将一批块插入到本地链中。
   197  	InsertChain(types.Blocks) (int, error)
   198  
   199  //InsertReceiptChain将一批收据插入本地链。
   200  	InsertReceiptChain(types.Blocks, []types.Receipts) (int, error)
   201  }
   202  
   203  //新建创建一个新的下载程序,从远程对等端获取哈希和块。
   204  func New(mode SyncMode, stateDb ethdb.Database, mux *event.TypeMux, chain BlockChain, lightchain LightChain, dropPeer peerDropFn) *Downloader {
   205  	if lightchain == nil {
   206  		lightchain = chain
   207  	}
   208  
   209  	dl := &Downloader{
   210  		mode:           mode,
   211  		stateDB:        stateDb,
   212  		mux:            mux,
   213  		queue:          newQueue(),
   214  		peers:          newPeerSet(),
   215  		rttEstimate:    uint64(rttMaxEstimate),
   216  		rttConfidence:  uint64(1000000),
   217  		blockchain:     chain,
   218  		lightchain:     lightchain,
   219  		dropPeer:       dropPeer,
   220  		headerCh:       make(chan dataPack, 1),
   221  		bodyCh:         make(chan dataPack, 1),
   222  		receiptCh:      make(chan dataPack, 1),
   223  		bodyWakeCh:     make(chan bool, 1),
   224  		receiptWakeCh:  make(chan bool, 1),
   225  		headerProcCh:   make(chan []*types.Header, 1),
   226  		quitCh:         make(chan struct{}),
   227  		stateCh:        make(chan dataPack),
   228  		stateSyncStart: make(chan *stateSync),
   229  		syncStatsState: stateSyncStats{
   230  			processed: rawdb.ReadFastTrieProgress(stateDb),
   231  		},
   232  		trackStateReq: make(chan *stateReq),
   233  	}
   234  	go dl.qosTuner()
   235  	go dl.stateFetcher()
   236  	return dl
   237  }
   238  
   239  //进程检索同步边界,特别是起源。
   240  //同步开始于的块(可能已失败/暂停);块
   241  //或头同步当前位于;以及同步目标的最新已知块。
   242  //
   243  //此外,在快速同步的状态下载阶段,
   244  //同时返回已处理状态和已知状态总数。否则
   245  //这些都是零。
   246  func (d *Downloader) Progress() ethereum.SyncProgress {
   247  //锁定当前状态并返回进度
   248  	d.syncStatsLock.RLock()
   249  	defer d.syncStatsLock.RUnlock()
   250  
   251  	current := uint64(0)
   252  	switch d.mode {
   253  	case FullSync:
   254  		current = d.blockchain.CurrentBlock().NumberU64()
   255  	case FastSync:
   256  		current = d.blockchain.CurrentFastBlock().NumberU64()
   257  	case LightSync:
   258  		current = d.lightchain.CurrentHeader().Number.Uint64()
   259  	}
   260  	return ethereum.SyncProgress{
   261  		StartingBlock: d.syncStatsChainOrigin,
   262  		CurrentBlock:  current,
   263  		HighestBlock:  d.syncStatsChainHeight,
   264  		PulledStates:  d.syncStatsState.processed,
   265  		KnownStates:   d.syncStatsState.processed + d.syncStatsState.pending,
   266  	}
   267  }
   268  
   269  //同步返回下载程序当前是否正在检索块。
   270  func (d *Downloader) Synchronising() bool {
   271  	return atomic.LoadInt32(&d.synchronising) > 0
   272  }
   273  
   274  //registerpeer将一个新的下载对等注入到要
   275  //用于从获取哈希和块。
   276  func (d *Downloader) RegisterPeer(id string, version int, peer Peer) error {
   277  	logger := log.New("peer", id)
   278  	logger.Trace("Registering sync peer")
   279  	if err := d.peers.Register(newPeerConnection(id, version, peer, logger)); err != nil {
   280  		logger.Error("Failed to register sync peer", "err", err)
   281  		return err
   282  	}
   283  	d.qosReduceConfidence()
   284  
   285  	return nil
   286  }
   287  
   288  //Regiterlightpeer注入一个轻量级客户端对等端,将其包装起来,使其看起来像一个普通对等端。
   289  func (d *Downloader) RegisterLightPeer(id string, version int, peer LightPeer) error {
   290  	return d.RegisterPeer(id, version, &lightPeerWrapper{peer})
   291  }
   292  
   293  //注销对等机从已知列表中删除对等机,以阻止
   294  //指定的对等机。还将努力将任何挂起的回迁返回到
   295  //排队。
   296  func (d *Downloader) UnregisterPeer(id string) error {
   297  //从活动对等机集中注销对等机并撤消任何获取任务
   298  	logger := log.New("peer", id)
   299  	logger.Trace("Unregistering sync peer")
   300  	if err := d.peers.Unregister(id); err != nil {
   301  		logger.Error("Failed to unregister sync peer", "err", err)
   302  		return err
   303  	}
   304  	d.queue.Revoke(id)
   305  
   306  //如果此对等是主对等,则立即中止同步
   307  	d.cancelLock.RLock()
   308  	master := id == d.cancelPeer
   309  	d.cancelLock.RUnlock()
   310  
   311  	if master {
   312  		d.cancel()
   313  	}
   314  	return nil
   315  }
   316  
   317  //Synchronise尝试将本地区块链与远程对等机同步,两者都是
   318  //添加各种健全性检查,并用各种日志条目包装它。
   319  func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode SyncMode) error {
   320  	err := d.synchronise(id, head, td, mode)
   321  	switch err {
   322  	case nil:
   323  	case errBusy:
   324  
   325  	case errTimeout, errBadPeer, errStallingPeer,
   326  		errEmptyHeaderSet, errPeersUnavailable, errTooOld,
   327  		errInvalidAncestor, errInvalidChain:
   328  		log.Warn("Synchronisation failed, dropping peer", "peer", id, "err", err)
   329  		if d.dropPeer == nil {
   330  //当对本地副本使用“--copydb”时,droppeer方法为nil。
   331  //如果压缩在错误的时间命中,则可能发生超时,并且可以忽略。
   332  			log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", id)
   333  		} else {
   334  			d.dropPeer(id)
   335  		}
   336  	default:
   337  		log.Warn("Synchronisation failed, retrying", "err", err)
   338  	}
   339  	return err
   340  }
   341  
   342  //同步将选择对等机并使用它进行同步。如果给出空字符串
   343  //如果它的td比我们自己的高,它将使用尽可能最好的对等机并进行同步。如果有
   344  //检查失败,将返回错误。此方法是同步的
   345  func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode SyncMode) error {
   346  //模拟同步如果测试
   347  	if d.synchroniseMock != nil {
   348  		return d.synchroniseMock(id, hash)
   349  	}
   350  //确保一次只允许一个Goroutine通过此点
   351  	if !atomic.CompareAndSwapInt32(&d.synchronising, 0, 1) {
   352  		return errBusy
   353  	}
   354  	defer atomic.StoreInt32(&d.synchronising, 0)
   355  
   356  //发布同步的用户通知(每个会话仅一次)
   357  	if atomic.CompareAndSwapInt32(&d.notified, 0, 1) {
   358  		log.Info("Block synchronisation started")
   359  	}
   360  //重置队列、对等设置和唤醒通道以清除任何内部剩余状态
   361  	d.queue.Reset()
   362  	d.peers.Reset()
   363  
   364  	for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} {
   365  		select {
   366  		case <-ch:
   367  		default:
   368  		}
   369  	}
   370  	for _, ch := range []chan dataPack{d.headerCh, d.bodyCh, d.receiptCh} {
   371  		for empty := false; !empty; {
   372  			select {
   373  			case <-ch:
   374  			default:
   375  				empty = true
   376  			}
   377  		}
   378  	}
   379  	for empty := false; !empty; {
   380  		select {
   381  		case <-d.headerProcCh:
   382  		default:
   383  			empty = true
   384  		}
   385  	}
   386  //为中途中止创建取消频道并标记主对等机
   387  	d.cancelLock.Lock()
   388  	d.cancelCh = make(chan struct{})
   389  	d.cancelPeer = id
   390  	d.cancelLock.Unlock()
   391  
   392  defer d.Cancel() //不管怎样,我们不能让取消频道一直开着
   393  
   394  //设置请求的同步模式,除非被禁止
   395  	d.mode = mode
   396  
   397  //Retrieve the origin peer and initiate the downloading process
   398  	p := d.peers.Peer(id)
   399  	if p == nil {
   400  		return errUnknownPeer
   401  	}
   402  	return d.syncWithPeer(p, hash, td)
   403  }
   404  
   405  //SyncWithPeer根据来自
   406  //指定的对等和头哈希。
   407  func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.Int) (err error) {
   408  	d.mux.Post(StartEvent{})
   409  	defer func() {
   410  //错误重置
   411  		if err != nil {
   412  			d.mux.Post(FailedEvent{err})
   413  		} else {
   414  			d.mux.Post(DoneEvent{})
   415  		}
   416  	}()
   417  	if p.version < 62 {
   418  		return errTooOld
   419  	}
   420  
   421  	log.Debug("Synchronising with the network", "peer", p.id, "eth", p.version, "head", hash, "td", td, "mode", d.mode)
   422  	defer func(start time.Time) {
   423  		log.Debug("Synchronisation terminated", "elapsed", time.Since(start))
   424  	}(time.Now())
   425  
   426  //查找同步边界:共同祖先和目标块
   427  	latest, err := d.fetchHeight(p)
   428  	if err != nil {
   429  		return err
   430  	}
   431  	height := latest.Number.Uint64()
   432  
   433  	origin, err := d.findAncestor(p, latest)
   434  	if err != nil {
   435  		return err
   436  	}
   437  	d.syncStatsLock.Lock()
   438  	if d.syncStatsChainHeight <= origin || d.syncStatsChainOrigin > origin {
   439  		d.syncStatsChainOrigin = origin
   440  	}
   441  	d.syncStatsChainHeight = height
   442  	d.syncStatsLock.Unlock()
   443  
   444  //确保我们的原点在任何快速同步轴点之下
   445  	pivot := uint64(0)
   446  	if d.mode == FastSync {
   447  		if height <= uint64(fsMinFullBlocks) {
   448  			origin = 0
   449  		} else {
   450  			pivot = height - uint64(fsMinFullBlocks)
   451  			if pivot <= origin {
   452  				origin = pivot - 1
   453  			}
   454  		}
   455  	}
   456  	d.committed = 1
   457  	if d.mode == FastSync && pivot != 0 {
   458  		d.committed = 0
   459  	}
   460  //使用并发头和内容检索算法启动同步
   461  	d.queue.Prepare(origin+1, d.mode)
   462  	if d.syncInitHook != nil {
   463  		d.syncInitHook(origin, height)
   464  	}
   465  
   466  	fetchers := []func() error{
   467  func() error { return d.fetchHeaders(p, origin+1, pivot) }, //始终检索邮件头
   468  func() error { return d.fetchBodies(origin + 1) },          //在正常和快速同步期间检索主体
   469  func() error { return d.fetchReceipts(origin + 1) },        //在快速同步过程中检索收据
   470  		func() error { return d.processHeaders(origin+1, pivot, td) },
   471  	}
   472  	if d.mode == FastSync {
   473  		fetchers = append(fetchers, func() error { return d.processFastSyncContent(latest) })
   474  	} else if d.mode == FullSync {
   475  		fetchers = append(fetchers, d.processFullSyncContent)
   476  	}
   477  	return d.spawnSync(fetchers)
   478  }
   479  
   480  //spawnSync runs d.process and all given fetcher functions to completion in
   481  //分离goroutine,返回出现的第一个错误。
   482  func (d *Downloader) spawnSync(fetchers []func() error) error {
   483  	errc := make(chan error, len(fetchers))
   484  	d.cancelWg.Add(len(fetchers))
   485  	for _, fn := range fetchers {
   486  		fn := fn
   487  		go func() { defer d.cancelWg.Done(); errc <- fn() }()
   488  	}
   489  //等待第一个错误,然后终止其他错误。
   490  	var err error
   491  	for i := 0; i < len(fetchers); i++ {
   492  		if i == len(fetchers)-1 {
   493  //当所有提取程序退出时关闭队列。
   494  //这将导致块处理器在
   495  //它已经处理了队列。
   496  			d.queue.Close()
   497  		}
   498  		if err = <-errc; err != nil {
   499  			break
   500  		}
   501  	}
   502  	d.queue.Close()
   503  	d.Cancel()
   504  	return err
   505  }
   506  
   507  //取消中止所有操作并重置队列。但是,取消是
   508  //not wait for the running download goroutines to finish. This method should be
   509  //从下载程序内部取消下载时使用。
   510  func (d *Downloader) cancel() {
   511  //关闭当前取消频道
   512  	d.cancelLock.Lock()
   513  	if d.cancelCh != nil {
   514  		select {
   515  		case <-d.cancelCh:
   516  //频道已关闭
   517  		default:
   518  			close(d.cancelCh)
   519  		}
   520  	}
   521  	d.cancelLock.Unlock()
   522  }
   523  
   524  //取消中止所有操作,并等待所有下载Goroutines到
   525  //返回前完成。
   526  func (d *Downloader) Cancel() {
   527  	d.cancel()
   528  	d.cancelWg.Wait()
   529  }
   530  
   531  //Terminate interrupts the downloader, canceling all pending operations.
   532  //调用terminate后,下载程序不能再使用。
   533  func (d *Downloader) Terminate() {
   534  //关闭终端通道(确保允许双重关闭)
   535  	d.quitLock.Lock()
   536  	select {
   537  	case <-d.quitCh:
   538  	default:
   539  		close(d.quitCh)
   540  	}
   541  	d.quitLock.Unlock()
   542  
   543  //取消任何挂起的下载请求
   544  	d.Cancel()
   545  }
   546  
   547  //fetchHeight retrieves the head header of the remote peer to aid in estimating
   548  //等待同步所需的总时间。
   549  func (d *Downloader) fetchHeight(p *peerConnection) (*types.Header, error) {
   550  	p.log.Debug("Retrieving remote chain height")
   551  
   552  //请求公布的远程头块并等待响应
   553  	head, _ := p.peer.Head()
   554  	go p.peer.RequestHeadersByHash(head, 1, 0, false)
   555  
   556  	ttl := d.requestTTL()
   557  	timeout := time.After(ttl)
   558  	for {
   559  		select {
   560  		case <-d.cancelCh:
   561  			return nil, errCancelBlockFetch
   562  
   563  		case packet := <-d.headerCh:
   564  //丢弃源对等机以外的任何内容
   565  			if packet.PeerId() != p.id {
   566  				log.Debug("Received headers from incorrect peer", "peer", packet.PeerId())
   567  				break
   568  			}
   569  //Make sure the peer actually gave something valid
   570  			headers := packet.(*headerPack).headers
   571  			if len(headers) != 1 {
   572  				p.log.Debug("Multiple headers for single request", "headers", len(headers))
   573  				return nil, errBadPeer
   574  			}
   575  			head := headers[0]
   576  			p.log.Debug("Remote head header identified", "number", head.Number, "hash", head.Hash())
   577  			return head, nil
   578  
   579  		case <-timeout:
   580  			p.log.Debug("Waiting for head header timed out", "elapsed", ttl)
   581  			return nil, errTimeout
   582  
   583  		case <-d.bodyCh:
   584  		case <-d.receiptCh:
   585  //越界交货,忽略
   586  		}
   587  	}
   588  }
   589  
   590  //CalculateRequestSpan计算在试图确定
   591  //共同祖先。
   592  //返回peer.requestHeadersByNumber要使用的参数:
   593  //起始块编号
   594  //Count-要请求的头数
   595  //skip-要跳过的头数
   596  //并返回“max”,即远程对等方预期返回的最后一个块,
   597  //给定(从、计数、跳过)
   598  func calculateRequestSpan(remoteHeight, localHeight uint64) (int64, int, int, uint64) {
   599  	var (
   600  		from     int
   601  		count    int
   602  		MaxCount = MaxHeaderFetch / 16
   603  	)
   604  //请求头是我们将请求的最高块。如果请求头没有偏移,
   605  //我们将要到达的最高街区是距离头部16个街区,这意味着我们
   606  //在高度差的情况下,将不必要地获取14或15个块。
   607  //我们和同龄人之间是1-2个街区,这是最常见的
   608  	requestHead := int(remoteHeight) - 1
   609  	if requestHead < 0 {
   610  		requestHead = 0
   611  	}
   612  //RequestBottom是我们希望在查询中包含的最低块
   613  //理想情况下,我们希望包括在自己的头脑下面
   614  	requestBottom := int(localHeight - 1)
   615  	if requestBottom < 0 {
   616  		requestBottom = 0
   617  	}
   618  	totalSpan := requestHead - requestBottom
   619  	span := 1 + totalSpan/MaxCount
   620  	if span < 2 {
   621  		span = 2
   622  	}
   623  	if span > 16 {
   624  		span = 16
   625  	}
   626  
   627  	count = 1 + totalSpan/span
   628  	if count > MaxCount {
   629  		count = MaxCount
   630  	}
   631  	if count < 2 {
   632  		count = 2
   633  	}
   634  	from = requestHead - (count-1)*span
   635  	if from < 0 {
   636  		from = 0
   637  	}
   638  	max := from + (count-1)*span
   639  	return int64(from), count, span - 1, uint64(max)
   640  }
   641  
   642  //findancestor试图定位本地链的共同祖先链接,并且
   643  //远程对等区块链。在一般情况下,当我们的节点处于同步状态时,
   644  //在正确的链条上,检查顶部的N个链环应该已经得到了匹配。
   645  //在罕见的情况下,当我们结束了长期的重组(即没有
   646  //头部链接匹配),我们进行二进制搜索以找到共同的祖先。
   647  func (d *Downloader) findAncestor(p *peerConnection, remoteHeader *types.Header) (uint64, error) {
   648  //找出有效的祖先范围以防止重写攻击
   649  	var (
   650  		floor        = int64(-1)
   651  		localHeight  uint64
   652  		remoteHeight = remoteHeader.Number.Uint64()
   653  	)
   654  	switch d.mode {
   655  	case FullSync:
   656  		localHeight = d.blockchain.CurrentBlock().NumberU64()
   657  	case FastSync:
   658  		localHeight = d.blockchain.CurrentFastBlock().NumberU64()
   659  	default:
   660  		localHeight = d.lightchain.CurrentHeader().Number.Uint64()
   661  	}
   662  	p.log.Debug("Looking for common ancestor", "local", localHeight, "remote", remoteHeight)
   663  	if localHeight >= MaxForkAncestry {
   664  //我们超过了最大REORG阈值,找到最早的分叉点
   665  		floor = int64(localHeight - MaxForkAncestry)
   666  
   667  //如果我们进行灯光同步,确保地板不低于CHT,如
   668  //在此点之前的所有标题都将丢失。
   669  		if d.mode == LightSync {
   670  //如果我们不知道当前的CHT位置,找到它
   671  			if d.genesis == 0 {
   672  				header := d.lightchain.CurrentHeader()
   673  				for header != nil {
   674  					d.genesis = header.Number.Uint64()
   675  					if floor >= int64(d.genesis)-1 {
   676  						break
   677  					}
   678  					header = d.lightchain.GetHeaderByHash(header.ParentHash)
   679  				}
   680  			}
   681  //我们已经知道“创世”的街区号了,盖楼到那
   682  			if floor < int64(d.genesis)-1 {
   683  				floor = int64(d.genesis) - 1
   684  			}
   685  		}
   686  	}
   687  	from, count, skip, max := calculateRequestSpan(remoteHeight, localHeight)
   688  
   689  	p.log.Trace("Span searching for common ancestor", "count", count, "from", from, "skip", skip)
   690  	go p.peer.RequestHeadersByNumber(uint64(from), count, skip, false)
   691  
   692  //等待对头提取的远程响应
   693  	number, hash := uint64(0), common.Hash{}
   694  
   695  	ttl := d.requestTTL()
   696  	timeout := time.After(ttl)
   697  
   698  	for finished := false; !finished; {
   699  		select {
   700  		case <-d.cancelCh:
   701  			return 0, errCancelHeaderFetch
   702  
   703  		case packet := <-d.headerCh:
   704  //丢弃源对等机以外的任何内容
   705  			if packet.PeerId() != p.id {
   706  				log.Debug("Received headers from incorrect peer", "peer", packet.PeerId())
   707  				break
   708  			}
   709  //确保对方给出了有效的信息
   710  			headers := packet.(*headerPack).headers
   711  			if len(headers) == 0 {
   712  				p.log.Warn("Empty head header set")
   713  				return 0, errEmptyHeaderSet
   714  			}
   715  //确保对等方的答复符合请求
   716  			for i, header := range headers {
   717  				expectNumber := from + int64(i)*int64((skip+1))
   718  				if number := header.Number.Int64(); number != expectNumber {
   719  					p.log.Warn("Head headers broke chain ordering", "index", i, "requested", expectNumber, "received", number)
   720  					return 0, errInvalidChain
   721  				}
   722  			}
   723  //检查是否找到共同祖先
   724  			finished = true
   725  			for i := len(headers) - 1; i >= 0; i-- {
   726  //跳过任何下溢/溢出请求集的头
   727  				if headers[i].Number.Int64() < from || headers[i].Number.Uint64() > max {
   728  					continue
   729  				}
   730  //否则检查我们是否已经知道标题
   731  				h := headers[i].Hash()
   732  				n := headers[i].Number.Uint64()
   733  
   734  				var known bool
   735  				switch d.mode {
   736  				case FullSync:
   737  					known = d.blockchain.HasBlock(h, n)
   738  				case FastSync:
   739  					known = d.blockchain.HasFastBlock(h, n)
   740  				default:
   741  					known = d.lightchain.HasHeader(h, n)
   742  				}
   743  				if known {
   744  					number, hash = n, h
   745  					break
   746  				}
   747  			}
   748  
   749  		case <-timeout:
   750  			p.log.Debug("Waiting for head header timed out", "elapsed", ttl)
   751  			return 0, errTimeout
   752  
   753  		case <-d.bodyCh:
   754  		case <-d.receiptCh:
   755  //越界交货,忽略
   756  		}
   757  	}
   758  //如果head fetch已经找到祖先,则返回
   759  	if hash != (common.Hash{}) {
   760  		if int64(number) <= floor {
   761  			p.log.Warn("Ancestor below allowance", "number", number, "hash", hash, "allowance", floor)
   762  			return 0, errInvalidAncestor
   763  		}
   764  		p.log.Debug("Found common ancestor", "number", number, "hash", hash)
   765  		return number, nil
   766  	}
   767  //找不到祖先,我们需要在链上进行二进制搜索
   768  	start, end := uint64(0), remoteHeight
   769  	if floor > 0 {
   770  		start = uint64(floor)
   771  	}
   772  	p.log.Trace("Binary searching for common ancestor", "start", start, "end", end)
   773  
   774  	for start+1 < end {
   775  //将链间隔拆分为两个,并请求哈希进行交叉检查
   776  		check := (start + end) / 2
   777  
   778  		ttl := d.requestTTL()
   779  		timeout := time.After(ttl)
   780  
   781  		go p.peer.RequestHeadersByNumber(check, 1, 0, false)
   782  
   783  //等待答复到达此请求
   784  		for arrived := false; !arrived; {
   785  			select {
   786  			case <-d.cancelCh:
   787  				return 0, errCancelHeaderFetch
   788  
   789  			case packer := <-d.headerCh:
   790  //丢弃源对等机以外的任何内容
   791  				if packer.PeerId() != p.id {
   792  					log.Debug("Received headers from incorrect peer", "peer", packer.PeerId())
   793  					break
   794  				}
   795  //确保对方给出了有效的信息
   796  				headers := packer.(*headerPack).headers
   797  				if len(headers) != 1 {
   798  					p.log.Debug("Multiple headers for single request", "headers", len(headers))
   799  					return 0, errBadPeer
   800  				}
   801  				arrived = true
   802  
   803  //根据响应修改搜索间隔
   804  				h := headers[0].Hash()
   805  				n := headers[0].Number.Uint64()
   806  
   807  				var known bool
   808  				switch d.mode {
   809  				case FullSync:
   810  					known = d.blockchain.HasBlock(h, n)
   811  				case FastSync:
   812  					known = d.blockchain.HasFastBlock(h, n)
   813  				default:
   814  					known = d.lightchain.HasHeader(h, n)
   815  				}
   816  				if !known {
   817  					end = check
   818  					break
   819  				}
   820  header := d.lightchain.GetHeaderByHash(h) //独立于同步模式,头文件肯定存在
   821  				if header.Number.Uint64() != check {
   822  					p.log.Debug("Received non requested header", "number", header.Number, "hash", header.Hash(), "request", check)
   823  					return 0, errBadPeer
   824  				}
   825  				start = check
   826  				hash = h
   827  
   828  			case <-timeout:
   829  				p.log.Debug("Waiting for search header timed out", "elapsed", ttl)
   830  				return 0, errTimeout
   831  
   832  			case <-d.bodyCh:
   833  			case <-d.receiptCh:
   834  //越界交货,忽略
   835  			}
   836  		}
   837  	}
   838  //确保有效的祖传和回归
   839  	if int64(start) <= floor {
   840  		p.log.Warn("Ancestor below allowance", "number", start, "hash", hash, "allowance", floor)
   841  		return 0, errInvalidAncestor
   842  	}
   843  	p.log.Debug("Found common ancestor", "number", start, "hash", hash)
   844  	return start, nil
   845  }
   846  
   847  //FetchHeaders始终从数字中同时检索头
   848  //请求,直到不再返回,可能会在途中限制。到
   849  //方便并发,但仍能防止恶意节点发送错误
   850  //headers,我们使用“origin”对等体构造一个header链骨架。
   851  //正在与同步,并使用其他人填写丢失的邮件头。报头
   852  //只有当其他对等点干净地映射到骨架时,才接受它们。如果没有人
   853  //可以填充骨架-甚至不是源节点-它被假定为无效和
   854  //原点被删除。
   855  func (d *Downloader) fetchHeaders(p *peerConnection, from uint64, pivot uint64) error {
   856  	p.log.Debug("Directing header downloads", "origin", from)
   857  	defer p.log.Debug("Header download terminated")
   858  
   859  //创建超时计时器和相关联的头提取程序
   860  skeleton := true            //骨架装配阶段或完成
   861  request := time.Now()       //最后一个骨架获取请求的时间
   862  timeout := time.NewTimer(0) //转储非响应活动对等机的计时器
   863  <-timeout.C                 //超时通道最初应为空
   864  	defer timeout.Stop()
   865  
   866  	var ttl time.Duration
   867  	getHeaders := func(from uint64) {
   868  		request = time.Now()
   869  
   870  		ttl = d.requestTTL()
   871  		timeout.Reset(ttl)
   872  
   873  		if skeleton {
   874  			p.log.Trace("Fetching skeleton headers", "count", MaxHeaderFetch, "from", from)
   875  			go p.peer.RequestHeadersByNumber(from+uint64(MaxHeaderFetch)-1, MaxSkeletonSize, MaxHeaderFetch-1, false)
   876  		} else {
   877  			p.log.Trace("Fetching full headers", "count", MaxHeaderFetch, "from", from)
   878  			go p.peer.RequestHeadersByNumber(from, MaxHeaderFetch, 0, false)
   879  		}
   880  	}
   881  //开始拉动收割台链条骨架,直到全部完成。
   882  	getHeaders(from)
   883  
   884  	for {
   885  		select {
   886  		case <-d.cancelCh:
   887  			return errCancelHeaderFetch
   888  
   889  		case packet := <-d.headerCh:
   890  //确保活动对等端正在向我们提供骨架头
   891  			if packet.PeerId() != p.id {
   892  				log.Debug("Received skeleton from incorrect peer", "peer", packet.PeerId())
   893  				break
   894  			}
   895  			headerReqTimer.UpdateSince(request)
   896  			timeout.Stop()
   897  
   898  //如果骨架已完成,则直接从原点拉出任何剩余的头部标题。
   899  			if packet.Items() == 0 && skeleton {
   900  				skeleton = false
   901  				getHeaders(from)
   902  				continue
   903  			}
   904  //如果没有更多的头是入站的,通知内容提取程序并返回
   905  			if packet.Items() == 0 {
   906  //下载数据透视时不要中止头提取
   907  				if atomic.LoadInt32(&d.committed) == 0 && pivot <= from {
   908  					p.log.Debug("No headers, waiting for pivot commit")
   909  					select {
   910  					case <-time.After(fsHeaderContCheck):
   911  						getHeaders(from)
   912  						continue
   913  					case <-d.cancelCh:
   914  						return errCancelHeaderFetch
   915  					}
   916  				}
   917  //透视完成(或不快速同步)并且没有更多的头,终止进程
   918  				p.log.Debug("No more headers available")
   919  				select {
   920  				case d.headerProcCh <- nil:
   921  					return nil
   922  				case <-d.cancelCh:
   923  					return errCancelHeaderFetch
   924  				}
   925  			}
   926  			headers := packet.(*headerPack).headers
   927  
   928  //如果我们接收到一个框架批处理,那么同时解析内部构件
   929  			if skeleton {
   930  				filled, proced, err := d.fillHeaderSkeleton(from, headers)
   931  				if err != nil {
   932  					p.log.Debug("Skeleton chain invalid", "err", err)
   933  					return errInvalidChain
   934  				}
   935  				headers = filled[proced:]
   936  				from += uint64(proced)
   937  			} else {
   938  //如果我们正在接近链头,但还没有到达,请延迟。
   939  //最后几个头,这样头上的微小重新排序不会导致无效哈希
   940  //链误差
   941  				if n := len(headers); n > 0 {
   942  //找回我们现在的头颅
   943  					head := uint64(0)
   944  					if d.mode == LightSync {
   945  						head = d.lightchain.CurrentHeader().Number.Uint64()
   946  					} else {
   947  						head = d.blockchain.CurrentFastBlock().NumberU64()
   948  						if full := d.blockchain.CurrentBlock().NumberU64(); head < full {
   949  							head = full
   950  						}
   951  					}
   952  //如果磁头比此批老得多,请延迟最后几个磁头
   953  					if head+uint64(reorgProtThreshold) < headers[n-1].Number.Uint64() {
   954  						delay := reorgProtHeaderDelay
   955  						if delay > n {
   956  							delay = n
   957  						}
   958  						headers = headers[:n-delay]
   959  					}
   960  				}
   961  			}
   962  //插入所有新标题并获取下一批
   963  			if len(headers) > 0 {
   964  				p.log.Trace("Scheduling new headers", "count", len(headers), "from", from)
   965  				select {
   966  				case d.headerProcCh <- headers:
   967  				case <-d.cancelCh:
   968  					return errCancelHeaderFetch
   969  				}
   970  				from += uint64(len(headers))
   971  				getHeaders(from)
   972  			} else {
   973  //没有发送邮件头,或者所有邮件都被延迟,请稍睡片刻,然后重试。
   974  				p.log.Trace("All headers delayed, waiting")
   975  				select {
   976  				case <-time.After(fsHeaderContCheck):
   977  					getHeaders(from)
   978  					continue
   979  				case <-d.cancelCh:
   980  					return errCancelHeaderFetch
   981  				}
   982  			}
   983  
   984  		case <-timeout.C:
   985  			if d.dropPeer == nil {
   986  //当对本地副本使用“--copydb”时,droppeer方法为nil。
   987  //如果压缩在错误的时间命中,则可能发生超时,并且可以忽略。
   988  				p.log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", p.id)
   989  				break
   990  			}
   991  //头检索超时,考虑对等机错误并丢弃
   992  			p.log.Debug("Header request timed out", "elapsed", ttl)
   993  			headerTimeoutMeter.Mark(1)
   994  			d.dropPeer(p.id)
   995  
   996  //但是,请优雅地完成同步,而不是转储收集的数据
   997  			for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} {
   998  				select {
   999  				case ch <- false:
  1000  				case <-d.cancelCh:
  1001  				}
  1002  			}
  1003  			select {
  1004  			case d.headerProcCh <- nil:
  1005  			case <-d.cancelCh:
  1006  			}
  1007  			return errBadPeer
  1008  		}
  1009  	}
  1010  }
  1011  
  1012  //FillHeaderskeleton同时从所有可用的对等端检索头
  1013  //并将它们映射到提供的骨架头链。
  1014  //
  1015  //从骨架开始的任何部分结果(如果可能)都将被转发
  1016  //立即发送到头处理器,以保持管道的其余部分保持平衡
  1017  //如果收割台失速。
  1018  //
  1019  //该方法返回整个填充骨架以及头的数量。
  1020  //已转发进行处理。
  1021  func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, int, error) {
  1022  	log.Debug("Filling up skeleton", "from", from)
  1023  	d.queue.ScheduleSkeleton(from, skeleton)
  1024  
  1025  	var (
  1026  		deliver = func(packet dataPack) (int, error) {
  1027  			pack := packet.(*headerPack)
  1028  			return d.queue.DeliverHeaders(pack.peerID, pack.headers, d.headerProcCh)
  1029  		}
  1030  		expire   = func() map[string]int { return d.queue.ExpireHeaders(d.requestTTL()) }
  1031  		throttle = func() bool { return false }
  1032  		reserve  = func(p *peerConnection, count int) (*fetchRequest, bool, error) {
  1033  			return d.queue.ReserveHeaders(p, count), false, nil
  1034  		}
  1035  		fetch    = func(p *peerConnection, req *fetchRequest) error { return p.FetchHeaders(req.From, MaxHeaderFetch) }
  1036  		capacity = func(p *peerConnection) int { return p.HeaderCapacity(d.requestRTT()) }
  1037  		setIdle  = func(p *peerConnection, accepted int) { p.SetHeadersIdle(accepted) }
  1038  	)
  1039  	err := d.fetchParts(errCancelHeaderFetch, d.headerCh, deliver, d.queue.headerContCh, expire,
  1040  		d.queue.PendingHeaders, d.queue.InFlightHeaders, throttle, reserve,
  1041  		nil, fetch, d.queue.CancelHeaders, capacity, d.peers.HeaderIdlePeers, setIdle, "headers")
  1042  
  1043  	log.Debug("Skeleton fill terminated", "err", err)
  1044  
  1045  	filled, proced := d.queue.RetrieveHeaders()
  1046  	return filled, proced, err
  1047  }
  1048  
  1049  //fetchbodies迭代下载计划的块体,获取
  1050  //可用对等机,为每个对等机保留一大块数据块,等待传递
  1051  //并定期检查超时情况。
  1052  func (d *Downloader) fetchBodies(from uint64) error {
  1053  	log.Debug("Downloading block bodies", "origin", from)
  1054  
  1055  	var (
  1056  		deliver = func(packet dataPack) (int, error) {
  1057  			pack := packet.(*bodyPack)
  1058  			return d.queue.DeliverBodies(pack.peerID, pack.transactions, pack.uncles)
  1059  		}
  1060  		expire   = func() map[string]int { return d.queue.ExpireBodies(d.requestTTL()) }
  1061  		fetch    = func(p *peerConnection, req *fetchRequest) error { return p.FetchBodies(req) }
  1062  		capacity = func(p *peerConnection) int { return p.BlockCapacity(d.requestRTT()) }
  1063  		setIdle  = func(p *peerConnection, accepted int) { p.SetBodiesIdle(accepted) }
  1064  	)
  1065  	err := d.fetchParts(errCancelBodyFetch, d.bodyCh, deliver, d.bodyWakeCh, expire,
  1066  		d.queue.PendingBlocks, d.queue.InFlightBlocks, d.queue.ShouldThrottleBlocks, d.queue.ReserveBodies,
  1067  		d.bodyFetchHook, fetch, d.queue.CancelBodies, capacity, d.peers.BodyIdlePeers, setIdle, "bodies")
  1068  
  1069  	log.Debug("Block body download terminated", "err", err)
  1070  	return err
  1071  }
  1072  
  1073  //fetchreceipts迭代地下载计划的块接收,获取
  1074  //可用的对等方,为每个对等方保留一大块收据,等待传递
  1075  //并定期检查超时情况。
  1076  func (d *Downloader) fetchReceipts(from uint64) error {
  1077  	log.Debug("Downloading transaction receipts", "origin", from)
  1078  
  1079  	var (
  1080  		deliver = func(packet dataPack) (int, error) {
  1081  			pack := packet.(*receiptPack)
  1082  			return d.queue.DeliverReceipts(pack.peerID, pack.receipts)
  1083  		}
  1084  		expire   = func() map[string]int { return d.queue.ExpireReceipts(d.requestTTL()) }
  1085  		fetch    = func(p *peerConnection, req *fetchRequest) error { return p.FetchReceipts(req) }
  1086  		capacity = func(p *peerConnection) int { return p.ReceiptCapacity(d.requestRTT()) }
  1087  		setIdle  = func(p *peerConnection, accepted int) { p.SetReceiptsIdle(accepted) }
  1088  	)
  1089  	err := d.fetchParts(errCancelReceiptFetch, d.receiptCh, deliver, d.receiptWakeCh, expire,
  1090  		d.queue.PendingReceipts, d.queue.InFlightReceipts, d.queue.ShouldThrottleReceipts, d.queue.ReserveReceipts,
  1091  		d.receiptFetchHook, fetch, d.queue.CancelReceipts, capacity, d.peers.ReceiptIdlePeers, setIdle, "receipts")
  1092  
  1093  	log.Debug("Transaction receipt download terminated", "err", err)
  1094  	return err
  1095  }
  1096  
  1097  //fetchparts迭代地下载计划的块部件,获取任何可用的
  1098  //对等机,为每个对等机保留一大块获取请求,等待传递和
  1099  //还要定期检查超时情况。
  1100  //
  1101  //由于所有下载的数据的调度/超时逻辑基本相同
  1102  //类型,此方法由每个方法用于数据收集,并使用
  1103  //处理它们之间的细微差别的各种回调。
  1104  //
  1105  //仪器参数:
  1106  //-errCancel:取消提取操作时返回的错误类型(主要使日志记录更好)
  1107  //-deliverych:从中检索下载数据包的通道(从所有并发对等机合并)
  1108  //-deliver:处理回调以将数据包传递到特定于类型的下载队列(通常在“queue”内)
  1109  //-wakech:通知通道,用于在新任务可用(或同步完成)时唤醒提取程序。
  1110  //-expire:任务回调方法,用于中止耗时太长的请求并返回故障对等端(流量形成)
  1111  //-挂起:对仍需要下载的请求数的任务回调(检测完成/不可完成性)
  1112  //-机上:正在进行的请求数的任务回调(等待所有活动下载完成)
  1113  //-限制:任务回调以检查处理队列是否已满并激活限制(绑定内存使用)
  1114  //-reserve:任务回调,将新的下载任务保留给特定的对等方(也表示部分完成)
  1115  //-fetchhook:tester回调,通知正在启动的新任务(允许测试调度逻辑)
  1116  //-fetch:网络回调,实际向物理远程对等端发送特定下载请求
  1117  //-取消:任务回调,以中止飞行中的下载请求并允许重新安排(如果对等机丢失)
  1118  //-容量:网络回调以检索对等机的估计类型特定带宽容量(流量形成)
  1119  //-idle:网络回调以检索当前(特定类型)可分配任务的空闲对等机
  1120  //-set idle:网络回调,将对等机设置回空闲状态,并更新其估计容量(流量形成)
  1121  //-kind:下载类型的文本标签,显示在日志消息中
  1122  func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliver func(dataPack) (int, error), wakeCh chan bool,
  1123  	expire func() map[string]int, pending func() int, inFlight func() bool, throttle func() bool, reserve func(*peerConnection, int) (*fetchRequest, bool, error),
  1124  	fetchHook func([]*types.Header), fetch func(*peerConnection, *fetchRequest) error, cancel func(*fetchRequest), capacity func(*peerConnection) int,
  1125  	idle func() ([]*peerConnection, int), setIdle func(*peerConnection, int), kind string) error {
  1126  
  1127  //创建一个标记器以检测过期的检索任务
  1128  	ticker := time.NewTicker(100 * time.Millisecond)
  1129  	defer ticker.Stop()
  1130  
  1131  	update := make(chan struct{}, 1)
  1132  
  1133  //准备队列并获取块部件,直到块头获取器完成
  1134  	finished := false
  1135  	for {
  1136  		select {
  1137  		case <-d.cancelCh:
  1138  			return errCancel
  1139  
  1140  		case packet := <-deliveryCh:
  1141  //如果之前同伴被禁止并且未能递送包裹
  1142  //在合理的时间范围内,忽略其消息。
  1143  			if peer := d.peers.Peer(packet.PeerId()); peer != nil {
  1144  //传递接收到的数据块并检查链的有效性
  1145  				accepted, err := deliver(packet)
  1146  				if err == errInvalidChain {
  1147  					return err
  1148  				}
  1149  //除非一个同伴提供了完全不需要的东西(通常
  1150  //由最后通过的超时请求引起),将其设置为
  1151  //空闲的如果传递已过时,则对等端应该已经空闲。
  1152  				if err != errStaleDelivery {
  1153  					setIdle(peer, accepted)
  1154  				}
  1155  //向用户发布日志以查看发生了什么
  1156  				switch {
  1157  				case err == nil && packet.Items() == 0:
  1158  					peer.log.Trace("Requested data not delivered", "type", kind)
  1159  				case err == nil:
  1160  					peer.log.Trace("Delivered new batch of data", "type", kind, "count", packet.Stats())
  1161  				default:
  1162  					peer.log.Trace("Failed to deliver retrieved data", "type", kind, "err", err)
  1163  				}
  1164  			}
  1165  //组装块,尝试更新进度
  1166  			select {
  1167  			case update <- struct{}{}:
  1168  			default:
  1169  			}
  1170  
  1171  		case cont := <-wakeCh:
  1172  //头提取程序发送了一个继续标志,检查是否已完成
  1173  			if !cont {
  1174  				finished = true
  1175  			}
  1176  //邮件头到达,请尝试更新进度
  1177  			select {
  1178  			case update <- struct{}{}:
  1179  			default:
  1180  			}
  1181  
  1182  		case <-ticker.C:
  1183  //健全检查更新进度
  1184  			select {
  1185  			case update <- struct{}{}:
  1186  			default:
  1187  			}
  1188  
  1189  		case <-update:
  1190  //如果我们失去所有同龄人,就会短路
  1191  			if d.peers.Len() == 0 {
  1192  				return errNoPeers
  1193  			}
  1194  //检查获取请求超时并降级负责的对等方
  1195  			for pid, fails := range expire() {
  1196  				if peer := d.peers.Peer(pid); peer != nil {
  1197  //如果许多检索元素过期,我们可能高估了远程对等机,或者
  1198  //我们自己。只重置为最小吞吐量,但不要立即下降。即使是最短的时间
  1199  //在同步方面,我们需要摆脱同龄人。
  1200  //
  1201  //最小阈值为2的原因是下载程序试图估计带宽
  1202  //以及对等端的延迟,这需要稍微推一下度量容量并查看
  1203  //响应时间如何反应,对它总是要求一个以上的最小值(即最小2)。
  1204  					if fails > 2 {
  1205  						peer.log.Trace("Data delivery timed out", "type", kind)
  1206  						setIdle(peer, 0)
  1207  					} else {
  1208  						peer.log.Debug("Stalling delivery, dropping", "type", kind)
  1209  						if d.dropPeer == nil {
  1210  //当对本地副本使用“--copydb”时,droppeer方法为nil。
  1211  //如果压缩在错误的时间命中,则可能发生超时,并且可以忽略。
  1212  							peer.log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", pid)
  1213  						} else {
  1214  							d.dropPeer(pid)
  1215  						}
  1216  					}
  1217  				}
  1218  			}
  1219  //如果没有其他东西可以获取,请等待或终止
  1220  			if pending() == 0 {
  1221  				if !inFlight() && finished {
  1222  					log.Debug("Data fetching completed", "type", kind)
  1223  					return nil
  1224  				}
  1225  				break
  1226  			}
  1227  //向所有空闲对等端发送下载请求,直到被阻止
  1228  			progressed, throttled, running := false, false, inFlight()
  1229  			idles, total := idle()
  1230  
  1231  			for _, peer := range idles {
  1232  //节流启动时短路
  1233  				if throttle() {
  1234  					throttled = true
  1235  					break
  1236  				}
  1237  //如果没有更多可用任务,则短路。
  1238  				if pending() == 0 {
  1239  					break
  1240  				}
  1241  //为对等机保留一大块获取。一个零可以意味着
  1242  //没有更多的头可用,或者对等端已知不可用
  1243  //拥有它们。
  1244  				request, progress, err := reserve(peer, capacity(peer))
  1245  				if err != nil {
  1246  					return err
  1247  				}
  1248  				if progress {
  1249  					progressed = true
  1250  				}
  1251  				if request == nil {
  1252  					continue
  1253  				}
  1254  				if request.From > 0 {
  1255  					peer.log.Trace("Requesting new batch of data", "type", kind, "from", request.From)
  1256  				} else {
  1257  					peer.log.Trace("Requesting new batch of data", "type", kind, "count", len(request.Headers), "from", request.Headers[0].Number)
  1258  				}
  1259  //获取块并确保任何错误都将哈希返回到队列
  1260  				if fetchHook != nil {
  1261  					fetchHook(request.Headers)
  1262  				}
  1263  				if err := fetch(peer, request); err != nil {
  1264  //虽然我们可以尝试修复此错误,但实际上
  1265  //意味着我们已经将一个获取任务双重分配给了一个对等方。如果那是
  1266  //案例,下载器和队列的内部状态是非常错误的,所以
  1267  //更好的硬崩溃和注意错误,而不是默默地累积到
  1268  //更大的问题。
  1269  					panic(fmt.Sprintf("%v: %s fetch assignment failed", peer, kind))
  1270  				}
  1271  				running = true
  1272  			}
  1273  //确保我们有可供提取的对等点。如果所有同龄人都被试过
  1274  //所有的失败都会引发一个错误
  1275  			if !progressed && !throttled && !running && len(idles) == total && pending() > 0 {
  1276  				return errPeersUnavailable
  1277  			}
  1278  		}
  1279  	}
  1280  }
  1281  
  1282  //processHeaders从输入通道获取一批检索到的头,并且
  1283  //继续处理并将它们调度到头链和下载程序中
  1284  //排队直到流结束或发生故障。
  1285  func (d *Downloader) processHeaders(origin uint64, pivot uint64, td *big.Int) error {
  1286  //保留不确定的头数以回滚
  1287  	rollback := []*types.Header{}
  1288  	defer func() {
  1289  		if len(rollback) > 0 {
  1290  //压平收割台并将其回滚
  1291  			hashes := make([]common.Hash, len(rollback))
  1292  			for i, header := range rollback {
  1293  				hashes[i] = header.Hash()
  1294  			}
  1295  			lastHeader, lastFastBlock, lastBlock := d.lightchain.CurrentHeader().Number, common.Big0, common.Big0
  1296  			if d.mode != LightSync {
  1297  				lastFastBlock = d.blockchain.CurrentFastBlock().Number()
  1298  				lastBlock = d.blockchain.CurrentBlock().Number()
  1299  			}
  1300  			d.lightchain.Rollback(hashes)
  1301  			curFastBlock, curBlock := common.Big0, common.Big0
  1302  			if d.mode != LightSync {
  1303  				curFastBlock = d.blockchain.CurrentFastBlock().Number()
  1304  				curBlock = d.blockchain.CurrentBlock().Number()
  1305  			}
  1306  			log.Warn("Rolled back headers", "count", len(hashes),
  1307  				"header", fmt.Sprintf("%d->%d", lastHeader, d.lightchain.CurrentHeader().Number),
  1308  				"fast", fmt.Sprintf("%d->%d", lastFastBlock, curFastBlock),
  1309  				"block", fmt.Sprintf("%d->%d", lastBlock, curBlock))
  1310  		}
  1311  	}()
  1312  
  1313  //等待处理成批的邮件头
  1314  	gotHeaders := false
  1315  
  1316  	for {
  1317  		select {
  1318  		case <-d.cancelCh:
  1319  			return errCancelHeaderProcessing
  1320  
  1321  		case headers := <-d.headerProcCh:
  1322  //如果同步,则终止头处理
  1323  			if len(headers) == 0 {
  1324  //通知所有人邮件头已完全处理
  1325  				for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} {
  1326  					select {
  1327  					case ch <- false:
  1328  					case <-d.cancelCh:
  1329  					}
  1330  				}
  1331  //如果没有检索到任何头,则对等端违反了其td承诺,即
  1332  //链条比我们的好。唯一的例外是如果它承诺的块
  1333  //已通过其他方式导入(例如,获取器):
  1334  //
  1335  //R<remote peer>,L<local node>:都在数据块10上
  1336  //R:我的11号区,然后传播到L
  1337  //L:队列块11用于导入
  1338  //L:注意R的头和TD比我们的要高,开始同步。
  1339  //L:11号块饰面的进口
  1340  //L:Sync开始,在11找到共同祖先
  1341  //L:从11点开始请求新的标题(R的td更高,它必须有一些东西)
  1342  //R:没什么可以给的
  1343  				if d.mode != LightSync {
  1344  					head := d.blockchain.CurrentBlock()
  1345  					if !gotHeaders && td.Cmp(d.blockchain.GetTd(head.Hash(), head.NumberU64())) > 0 {
  1346  						return errStallingPeer
  1347  					}
  1348  				}
  1349  //如果同步速度快或很轻,请确保确实交付了承诺的头文件。这是
  1350  //需要检测攻击者输入错误的轴然后诱饵离开的场景
  1351  //传递将标记无效内容的post-pivot块。
  1352  //
  1353  //由于块可能仍然是
  1354  //头下载完成后排队等待处理。但是,只要
  1355  //同行给了我们一些有用的东西,我们已经很高兴/进步了(上面的检查)。
  1356  				if d.mode == FastSync || d.mode == LightSync {
  1357  					head := d.lightchain.CurrentHeader()
  1358  					if td.Cmp(d.lightchain.GetTd(head.Hash(), head.Number.Uint64())) > 0 {
  1359  						return errStallingPeer
  1360  					}
  1361  				}
  1362  //禁用任何回滚并返回
  1363  				rollback = nil
  1364  				return nil
  1365  			}
  1366  //否则,将头块分割成批并处理它们
  1367  			gotHeaders = true
  1368  
  1369  			for len(headers) > 0 {
  1370  //在处理块之间发生故障时终止
  1371  				select {
  1372  				case <-d.cancelCh:
  1373  					return errCancelHeaderProcessing
  1374  				default:
  1375  				}
  1376  //选择要导入的下一个标题块
  1377  				limit := maxHeadersProcess
  1378  				if limit > len(headers) {
  1379  					limit = len(headers)
  1380  				}
  1381  				chunk := headers[:limit]
  1382  
  1383  //如果只同步头,请立即验证块。
  1384  				if d.mode == FastSync || d.mode == LightSync {
  1385  //收集尚未确定的邮件头,将其标记为不确定邮件头
  1386  					unknown := make([]*types.Header, 0, len(headers))
  1387  					for _, header := range chunk {
  1388  						if !d.lightchain.HasHeader(header.Hash(), header.Number.Uint64()) {
  1389  							unknown = append(unknown, header)
  1390  						}
  1391  					}
  1392  //如果我们要导入纯头,请根据它们的最近性进行验证。
  1393  					frequency := fsHeaderCheckFrequency
  1394  					if chunk[len(chunk)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot {
  1395  						frequency = 1
  1396  					}
  1397  					if n, err := d.lightchain.InsertHeaderChain(chunk, frequency); err != nil {
  1398  //如果插入了一些头,请将它们也添加到回滚列表中。
  1399  						if n > 0 {
  1400  							rollback = append(rollback, chunk[:n]...)
  1401  						}
  1402  						log.Debug("Invalid header encountered", "number", chunk[n].Number, "hash", chunk[n].Hash(), "err", err)
  1403  						return errInvalidChain
  1404  					}
  1405  //所有验证通过,存储新发现的不确定头
  1406  					rollback = append(rollback, unknown...)
  1407  					if len(rollback) > fsHeaderSafetyNet {
  1408  						rollback = append(rollback[:0], rollback[len(rollback)-fsHeaderSafetyNet:]...)
  1409  					}
  1410  				}
  1411  //除非我们在做轻链,否则请为相关的内容检索安排标题。
  1412  				if d.mode == FullSync || d.mode == FastSync {
  1413  //如果达到了允许的挂起头的数目,请暂停一点。
  1414  					for d.queue.PendingBlocks() >= maxQueuedHeaders || d.queue.PendingReceipts() >= maxQueuedHeaders {
  1415  						select {
  1416  						case <-d.cancelCh:
  1417  							return errCancelHeaderProcessing
  1418  						case <-time.After(time.Second):
  1419  						}
  1420  					}
  1421  //否则插入标题进行内容检索
  1422  					inserts := d.queue.Schedule(chunk, origin)
  1423  					if len(inserts) != len(chunk) {
  1424  						log.Debug("Stale headers")
  1425  						return errBadPeer
  1426  					}
  1427  				}
  1428  				headers = headers[limit:]
  1429  				origin += uint64(limit)
  1430  			}
  1431  
  1432  //更新我们知道的最高块号,如果找到更高的块号。
  1433  			d.syncStatsLock.Lock()
  1434  			if d.syncStatsChainHeight < origin {
  1435  				d.syncStatsChainHeight = origin - 1
  1436  			}
  1437  			d.syncStatsLock.Unlock()
  1438  
  1439  //向内容下载者发出新任务可用性的信号
  1440  			for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} {
  1441  				select {
  1442  				case ch <- true:
  1443  				default:
  1444  				}
  1445  			}
  1446  		}
  1447  	}
  1448  }
  1449  
  1450  //processfullsyncContent从队列中获取结果并将其导入到链中。
  1451  func (d *Downloader) processFullSyncContent() error {
  1452  	for {
  1453  		results := d.queue.Results(true)
  1454  		if len(results) == 0 {
  1455  			return nil
  1456  		}
  1457  		if d.chainInsertHook != nil {
  1458  			d.chainInsertHook(results)
  1459  		}
  1460  		if err := d.importBlockResults(results); err != nil {
  1461  			return err
  1462  		}
  1463  	}
  1464  }
  1465  
  1466  func (d *Downloader) importBlockResults(results []*fetchResult) error {
  1467  //检查是否有任何提前终止请求
  1468  	if len(results) == 0 {
  1469  		return nil
  1470  	}
  1471  	select {
  1472  	case <-d.quitCh:
  1473  		return errCancelContentProcessing
  1474  	default:
  1475  	}
  1476  //检索要导入的一批结果
  1477  	first, last := results[0].Header, results[len(results)-1].Header
  1478  	log.Debug("Inserting downloaded chain", "items", len(results),
  1479  		"firstnum", first.Number, "firsthash", first.Hash(),
  1480  		"lastnum", last.Number, "lasthash", last.Hash(),
  1481  	)
  1482  	blocks := make([]*types.Block, len(results))
  1483  	for i, result := range results {
  1484  		blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles)
  1485  	}
  1486  	if index, err := d.blockchain.InsertChain(blocks); err != nil {
  1487  		if index < len(results) {
  1488  			log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err)
  1489  		} else {
  1490  //区块链.go中的insertchain方法有时会返回一个越界索引,
  1491  //当需要预处理块以导入侧链时。
  1492  //导入程序将汇总一个新的要导入的块列表,这是一个超集
  1493  //从下载器发送的块中,索引将关闭。
  1494  			log.Debug("Downloaded item processing failed on sidechain import", "index", index, "err", err)
  1495  		}
  1496  		return errInvalidChain
  1497  	}
  1498  	return nil
  1499  }
  1500  
  1501  //processFastSyncContent从队列获取结果并将其写入
  1502  //数据库。它还控制枢轴块状态节点的同步。
  1503  func (d *Downloader) processFastSyncContent(latest *types.Header) error {
  1504  //开始同步报告的头块的状态。这应该让我们
  1505  //透视图块的状态。
  1506  	stateSync := d.syncState(latest.Root)
  1507  	defer stateSync.Cancel()
  1508  	go func() {
  1509  		if err := stateSync.Wait(); err != nil && err != errCancelStateFetch {
  1510  d.queue.Close() //唤醒结果
  1511  		}
  1512  	}()
  1513  //找出理想的轴块。注意,如果
  1514  //同步需要足够长的时间,链头才能显著移动。
  1515  	pivot := uint64(0)
  1516  	if height := latest.Number.Uint64(); height > uint64(fsMinFullBlocks) {
  1517  		pivot = height - uint64(fsMinFullBlocks)
  1518  	}
  1519  //为了适应移动的轴点,跟踪轴块,然后
  1520  //单独累计下载结果。
  1521  	var (
  1522  oldPivot *fetchResult   //锁定在轴块中,可能最终更改
  1523  oldTail  []*fetchResult //在透视之后下载的内容
  1524  	)
  1525  	for {
  1526  //等待下一批下载的数据可用,如果
  1527  //布洛克变僵了,移动门柱
  1528  results := d.queue.Results(oldPivot == nil) //如果我们不监视数据透视过时,请阻止
  1529  		if len(results) == 0 {
  1530  //如果透视同步完成,则停止
  1531  			if oldPivot == nil {
  1532  				return stateSync.Cancel()
  1533  			}
  1534  //如果同步失败,请停止
  1535  			select {
  1536  			case <-d.cancelCh:
  1537  				return stateSync.Cancel()
  1538  			default:
  1539  			}
  1540  		}
  1541  		if d.chainInsertHook != nil {
  1542  			d.chainInsertHook(results)
  1543  		}
  1544  		if oldPivot != nil {
  1545  			results = append(append([]*fetchResult{oldPivot}, oldTail...), results...)
  1546  		}
  1547  //围绕轴块拆分并通过快速/完全同步处理两侧
  1548  		if atomic.LoadInt32(&d.committed) == 0 {
  1549  			latest = results[len(results)-1].Header
  1550  			if height := latest.Number.Uint64(); height > pivot+2*uint64(fsMinFullBlocks) {
  1551  				log.Warn("Pivot became stale, moving", "old", pivot, "new", height-uint64(fsMinFullBlocks))
  1552  				pivot = height - uint64(fsMinFullBlocks)
  1553  			}
  1554  		}
  1555  		P, beforeP, afterP := splitAroundPivot(pivot, results)
  1556  		if err := d.commitFastSyncData(beforeP, stateSync); err != nil {
  1557  			return err
  1558  		}
  1559  		if P != nil {
  1560  //如果找到新的数据透视块,请取消旧的状态检索并重新启动
  1561  			if oldPivot != P {
  1562  				stateSync.Cancel()
  1563  
  1564  				stateSync = d.syncState(P.Header.Root)
  1565  				defer stateSync.Cancel()
  1566  				go func() {
  1567  					if err := stateSync.Wait(); err != nil && err != errCancelStateFetch {
  1568  d.queue.Close() //唤醒结果
  1569  					}
  1570  				}()
  1571  				oldPivot = P
  1572  			}
  1573  //等待完成,偶尔检查数据透视是否过时
  1574  			select {
  1575  			case <-stateSync.done:
  1576  				if stateSync.err != nil {
  1577  					return stateSync.err
  1578  				}
  1579  				if err := d.commitPivotBlock(P); err != nil {
  1580  					return err
  1581  				}
  1582  				oldPivot = nil
  1583  
  1584  			case <-time.After(time.Second):
  1585  				oldTail = afterP
  1586  				continue
  1587  			}
  1588  		}
  1589  //快速同步完成,透视提交完成,完全导入
  1590  		if err := d.importBlockResults(afterP); err != nil {
  1591  			return err
  1592  		}
  1593  	}
  1594  }
  1595  
  1596  func splitAroundPivot(pivot uint64, results []*fetchResult) (p *fetchResult, before, after []*fetchResult) {
  1597  	for _, result := range results {
  1598  		num := result.Header.Number.Uint64()
  1599  		switch {
  1600  		case num < pivot:
  1601  			before = append(before, result)
  1602  		case num == pivot:
  1603  			p = result
  1604  		default:
  1605  			after = append(after, result)
  1606  		}
  1607  	}
  1608  	return p, before, after
  1609  }
  1610  
  1611  func (d *Downloader) commitFastSyncData(results []*fetchResult, stateSync *stateSync) error {
  1612  //检查是否有任何提前终止请求
  1613  	if len(results) == 0 {
  1614  		return nil
  1615  	}
  1616  	select {
  1617  	case <-d.quitCh:
  1618  		return errCancelContentProcessing
  1619  	case <-stateSync.done:
  1620  		if err := stateSync.Wait(); err != nil {
  1621  			return err
  1622  		}
  1623  	default:
  1624  	}
  1625  //检索要导入的一批结果
  1626  	first, last := results[0].Header, results[len(results)-1].Header
  1627  	log.Debug("Inserting fast-sync blocks", "items", len(results),
  1628  		"firstnum", first.Number, "firsthash", first.Hash(),
  1629  		"lastnumn", last.Number, "lasthash", last.Hash(),
  1630  	)
  1631  	blocks := make([]*types.Block, len(results))
  1632  	receipts := make([]types.Receipts, len(results))
  1633  	for i, result := range results {
  1634  		blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles)
  1635  		receipts[i] = result.Receipts
  1636  	}
  1637  	if index, err := d.blockchain.InsertReceiptChain(blocks, receipts); err != nil {
  1638  		log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err)
  1639  		return errInvalidChain
  1640  	}
  1641  	return nil
  1642  }
  1643  
  1644  func (d *Downloader) commitPivotBlock(result *fetchResult) error {
  1645  	block := types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles)
  1646  	log.Debug("Committing fast sync pivot as new head", "number", block.Number(), "hash", block.Hash())
  1647  	if _, err := d.blockchain.InsertReceiptChain([]*types.Block{block}, []types.Receipts{result.Receipts}); err != nil {
  1648  		return err
  1649  	}
  1650  	if err := d.blockchain.FastSyncCommitHead(block.Hash()); err != nil {
  1651  		return err
  1652  	}
  1653  	atomic.StoreInt32(&d.committed, 1)
  1654  	return nil
  1655  }
  1656  
  1657  //DeliverHeaders插入从远程服务器接收的新批块头
  1658  //进入下载计划。
  1659  func (d *Downloader) DeliverHeaders(id string, headers []*types.Header) (err error) {
  1660  	return d.deliver(id, d.headerCh, &headerPack{id, headers}, headerInMeter, headerDropMeter)
  1661  }
  1662  
  1663  //deliverbodies注入从远程节点接收的新批块体。
  1664  func (d *Downloader) DeliverBodies(id string, transactions [][]*types.Transaction, uncles [][]*types.Header) (err error) {
  1665  	return d.deliver(id, d.bodyCh, &bodyPack{id, transactions, uncles}, bodyInMeter, bodyDropMeter)
  1666  }
  1667  
  1668  //DeliverReceipts插入从远程节点接收的新一批收据。
  1669  func (d *Downloader) DeliverReceipts(id string, receipts [][]*types.Receipt) (err error) {
  1670  	return d.deliver(id, d.receiptCh, &receiptPack{id, receipts}, receiptInMeter, receiptDropMeter)
  1671  }
  1672  
  1673  //DeliverNodeData注入从远程节点接收到的新一批节点状态数据。
  1674  func (d *Downloader) DeliverNodeData(id string, data [][]byte) (err error) {
  1675  	return d.deliver(id, d.stateCh, &statePack{id, data}, stateInMeter, stateDropMeter)
  1676  }
  1677  
  1678  //deliver注入从远程节点接收的新批数据。
  1679  func (d *Downloader) deliver(id string, destCh chan dataPack, packet dataPack, inMeter, dropMeter metrics.Meter) (err error) {
  1680  //更新好交付和失败交付的交付指标
  1681  	inMeter.Mark(int64(packet.Items()))
  1682  	defer func() {
  1683  		if err != nil {
  1684  			dropMeter.Mark(int64(packet.Items()))
  1685  		}
  1686  	}()
  1687  //如果在排队时取消同步,则传递或中止
  1688  	d.cancelLock.RLock()
  1689  	cancel := d.cancelCh
  1690  	d.cancelLock.RUnlock()
  1691  	if cancel == nil {
  1692  		return errNoSyncActive
  1693  	}
  1694  	select {
  1695  	case destCh <- packet:
  1696  		return nil
  1697  	case <-cancel:
  1698  		return errNoSyncActive
  1699  	}
  1700  }
  1701  
  1702  //Qostener是服务质量优化循环,偶尔收集
  1703  //对等延迟统计并更新估计的请求往返时间。
  1704  func (d *Downloader) qosTuner() {
  1705  	for {
  1706  //检索当前中间RTT并集成到以前的目标RTT中
  1707  		rtt := time.Duration((1-qosTuningImpact)*float64(atomic.LoadUint64(&d.rttEstimate)) + qosTuningImpact*float64(d.peers.medianRTT()))
  1708  		atomic.StoreUint64(&d.rttEstimate, uint64(rtt))
  1709  
  1710  //通过了一个新的RTT周期,增加了我们对估计的RTT的信心。
  1711  		conf := atomic.LoadUint64(&d.rttConfidence)
  1712  		conf = conf + (1000000-conf)/2
  1713  		atomic.StoreUint64(&d.rttConfidence, conf)
  1714  
  1715  //记录新的QoS值并休眠到下一个RTT
  1716  		log.Debug("Recalculated downloader QoS values", "rtt", rtt, "confidence", float64(conf)/1000000.0, "ttl", d.requestTTL())
  1717  		select {
  1718  		case <-d.quitCh:
  1719  			return
  1720  		case <-time.After(rtt):
  1721  		}
  1722  	}
  1723  }
  1724  
  1725  //QosReduceConfidence是指当新对等加入下载程序时调用的。
  1726  //对等集,需要降低我们对QoS估计的信心。
  1727  func (d *Downloader) qosReduceConfidence() {
  1728  //如果我们只有一个同伴,那么信心总是1
  1729  	peers := uint64(d.peers.Len())
  1730  	if peers == 0 {
  1731  //确保对等连接竞赛不会让我们措手不及
  1732  		return
  1733  	}
  1734  	if peers == 1 {
  1735  		atomic.StoreUint64(&d.rttConfidence, 1000000)
  1736  		return
  1737  	}
  1738  //如果我们有很多同龄人,不要放弃信心)
  1739  	if peers >= uint64(qosConfidenceCap) {
  1740  		return
  1741  	}
  1742  //否则,降低置信系数
  1743  	conf := atomic.LoadUint64(&d.rttConfidence) * (peers - 1) / peers
  1744  	if float64(conf)/1000000 < rttMinConfidence {
  1745  		conf = uint64(rttMinConfidence * 1000000)
  1746  	}
  1747  	atomic.StoreUint64(&d.rttConfidence, conf)
  1748  
  1749  	rtt := time.Duration(atomic.LoadUint64(&d.rttEstimate))
  1750  	log.Debug("Relaxed downloader QoS values", "rtt", rtt, "confidence", float64(conf)/1000000.0, "ttl", d.requestTTL())
  1751  }
  1752  
  1753  //requestrtt返回下载请求的当前目标往返时间
  1754  //完成。
  1755  //
  1756  //注意,返回的RTT是实际估计RTT的.9。原因是
  1757  //下载程序尝试使查询适应RTT,因此多个RTT值可以
  1758  //适应,但较小的是首选(更稳定的下载流)。
  1759  func (d *Downloader) requestRTT() time.Duration {
  1760  	return time.Duration(atomic.LoadUint64(&d.rttEstimate)) * 9 / 10
  1761  }
  1762  
  1763  //REQUESTTL返回单个下载请求的当前超时允许值
  1764  //在…之下完成。
  1765  func (d *Downloader) requestTTL() time.Duration {
  1766  	var (
  1767  		rtt  = time.Duration(atomic.LoadUint64(&d.rttEstimate))
  1768  		conf = float64(atomic.LoadUint64(&d.rttConfidence)) / 1000000.0
  1769  	)
  1770  	ttl := time.Duration(ttlScaling) * time.Duration(float64(rtt)/conf)
  1771  	if ttl > ttlLimit {
  1772  		ttl = ttlLimit
  1773  	}
  1774  	return ttl
  1775  }
  1776