gitee.com/liu-zhao234568/cntest@v1.0.0/eth/protocols/snap/sync.go (about)

     1  // Copyright 2020 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package snap
    18  
    19  import (
    20  	"bytes"
    21  	"encoding/json"
    22  	"errors"
    23  	"fmt"
    24  	"math/big"
    25  	"math/rand"
    26  	"sort"
    27  	"sync"
    28  	"time"
    29  
    30  	"gitee.com/liu-zhao234568/cntest/common"
    31  	"gitee.com/liu-zhao234568/cntest/common/math"
    32  	"gitee.com/liu-zhao234568/cntest/core/rawdb"
    33  	"gitee.com/liu-zhao234568/cntest/core/state"
    34  	"gitee.com/liu-zhao234568/cntest/core/state/snapshot"
    35  	"gitee.com/liu-zhao234568/cntest/crypto"
    36  	"gitee.com/liu-zhao234568/cntest/ethdb"
    37  	"gitee.com/liu-zhao234568/cntest/event"
    38  	"gitee.com/liu-zhao234568/cntest/light"
    39  	"gitee.com/liu-zhao234568/cntest/log"
    40  	"gitee.com/liu-zhao234568/cntest/p2p/msgrate"
    41  	"gitee.com/liu-zhao234568/cntest/rlp"
    42  	"gitee.com/liu-zhao234568/cntest/trie"
    43  	"golang.org/x/crypto/sha3"
    44  )
    45  
    46  var (
    47  	// emptyRoot is the known root hash of an empty trie.
    48  	emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
    49  
    50  	// emptyCode is the known hash of the empty EVM bytecode.
    51  	emptyCode = crypto.Keccak256Hash(nil)
    52  )
    53  
    54  const (
    55  	// minRequestSize is the minimum number of bytes to request from a remote peer.
    56  	// This number is used as the low cap for account and storage range requests.
    57  	// Bytecode and trienode are limited inherently by item count (1).
    58  	minRequestSize = 64 * 1024
    59  
    60  	// maxRequestSize is the maximum number of bytes to request from a remote peer.
    61  	// This number is used as the high cap for account and storage range requests.
    62  	// Bytecode and trienode are limited more explicitly by the caps below.
    63  	maxRequestSize = 512 * 1024
    64  
    65  	// maxCodeRequestCount is the maximum number of bytecode blobs to request in a
    66  	// single query. If this number is too low, we're not filling responses fully
    67  	// and waste round trip times. If it's too high, we're capping responses and
    68  	// waste bandwidth.
    69  	//
    70  	// Depoyed bytecodes are currently capped at 24KB, so the minimum request
    71  	// size should be maxRequestSize / 24K. Assuming that most contracts do not
    72  	// come close to that, requesting 4x should be a good approximation.
    73  	maxCodeRequestCount = maxRequestSize / (24 * 1024) * 4
    74  
    75  	// maxTrieRequestCount is the maximum number of trie node blobs to request in
    76  	// a single query. If this number is too low, we're not filling responses fully
    77  	// and waste round trip times. If it's too high, we're capping responses and
    78  	// waste bandwidth.
    79  	maxTrieRequestCount = maxRequestSize / 512
    80  )
    81  
    82  var (
    83  	// accountConcurrency is the number of chunks to split the account trie into
    84  	// to allow concurrent retrievals.
    85  	accountConcurrency = 16
    86  
    87  	// storageConcurrency is the number of chunks to split the a large contract
    88  	// storage trie into to allow concurrent retrievals.
    89  	storageConcurrency = 16
    90  )
    91  
    92  // ErrCancelled is returned from snap syncing if the operation was prematurely
    93  // terminated.
    94  var ErrCancelled = errors.New("sync cancelled")
    95  
    96  // accountRequest tracks a pending account range request to ensure responses are
    97  // to actual requests and to validate any security constraints.
    98  //
    99  // Concurrency note: account requests and responses are handled concurrently from
   100  // the main runloop to allow Merkle proof verifications on the peer's thread and
   101  // to drop on invalid response. The request struct must contain all the data to
   102  // construct the response without accessing runloop internals (i.e. task). That
   103  // is only included to allow the runloop to match a response to the task being
   104  // synced without having yet another set of maps.
   105  type accountRequest struct {
   106  	peer string    // Peer to which this request is assigned
   107  	id   uint64    // Request ID of this request
   108  	time time.Time // Timestamp when the request was sent
   109  
   110  	deliver chan *accountResponse // Channel to deliver successful response on
   111  	revert  chan *accountRequest  // Channel to deliver request failure on
   112  	cancel  chan struct{}         // Channel to track sync cancellation
   113  	timeout *time.Timer           // Timer to track delivery timeout
   114  	stale   chan struct{}         // Channel to signal the request was dropped
   115  
   116  	origin common.Hash // First account requested to allow continuation checks
   117  	limit  common.Hash // Last account requested to allow non-overlapping chunking
   118  
   119  	task *accountTask // Task which this request is filling (only access fields through the runloop!!)
   120  }
   121  
   122  // accountResponse is an already Merkle-verified remote response to an account
   123  // range request. It contains the subtrie for the requested account range and
   124  // the database that's going to be filled with the internal nodes on commit.
   125  type accountResponse struct {
   126  	task *accountTask // Task which this request is filling
   127  
   128  	hashes   []common.Hash    // Account hashes in the returned range
   129  	accounts []*state.Account // Expanded accounts in the returned range
   130  
   131  	cont bool // Whether the account range has a continuation
   132  }
   133  
   134  // bytecodeRequest tracks a pending bytecode request to ensure responses are to
   135  // actual requests and to validate any security constraints.
   136  //
   137  // Concurrency note: bytecode requests and responses are handled concurrently from
   138  // the main runloop to allow Keccak256 hash verifications on the peer's thread and
   139  // to drop on invalid response. The request struct must contain all the data to
   140  // construct the response without accessing runloop internals (i.e. task). That
   141  // is only included to allow the runloop to match a response to the task being
   142  // synced without having yet another set of maps.
   143  type bytecodeRequest struct {
   144  	peer string    // Peer to which this request is assigned
   145  	id   uint64    // Request ID of this request
   146  	time time.Time // Timestamp when the request was sent
   147  
   148  	deliver chan *bytecodeResponse // Channel to deliver successful response on
   149  	revert  chan *bytecodeRequest  // Channel to deliver request failure on
   150  	cancel  chan struct{}          // Channel to track sync cancellation
   151  	timeout *time.Timer            // Timer to track delivery timeout
   152  	stale   chan struct{}          // Channel to signal the request was dropped
   153  
   154  	hashes []common.Hash // Bytecode hashes to validate responses
   155  	task   *accountTask  // Task which this request is filling (only access fields through the runloop!!)
   156  }
   157  
   158  // bytecodeResponse is an already verified remote response to a bytecode request.
   159  type bytecodeResponse struct {
   160  	task *accountTask // Task which this request is filling
   161  
   162  	hashes []common.Hash // Hashes of the bytecode to avoid double hashing
   163  	codes  [][]byte      // Actual bytecodes to store into the database (nil = missing)
   164  }
   165  
   166  // storageRequest tracks a pending storage ranges request to ensure responses are
   167  // to actual requests and to validate any security constraints.
   168  //
   169  // Concurrency note: storage requests and responses are handled concurrently from
   170  // the main runloop to allow Merkel proof verifications on the peer's thread and
   171  // to drop on invalid response. The request struct must contain all the data to
   172  // construct the response without accessing runloop internals (i.e. tasks). That
   173  // is only included to allow the runloop to match a response to the task being
   174  // synced without having yet another set of maps.
   175  type storageRequest struct {
   176  	peer string    // Peer to which this request is assigned
   177  	id   uint64    // Request ID of this request
   178  	time time.Time // Timestamp when the request was sent
   179  
   180  	deliver chan *storageResponse // Channel to deliver successful response on
   181  	revert  chan *storageRequest  // Channel to deliver request failure on
   182  	cancel  chan struct{}         // Channel to track sync cancellation
   183  	timeout *time.Timer           // Timer to track delivery timeout
   184  	stale   chan struct{}         // Channel to signal the request was dropped
   185  
   186  	accounts []common.Hash // Account hashes to validate responses
   187  	roots    []common.Hash // Storage roots to validate responses
   188  
   189  	origin common.Hash // First storage slot requested to allow continuation checks
   190  	limit  common.Hash // Last storage slot requested to allow non-overlapping chunking
   191  
   192  	mainTask *accountTask // Task which this response belongs to (only access fields through the runloop!!)
   193  	subTask  *storageTask // Task which this response is filling (only access fields through the runloop!!)
   194  }
   195  
   196  // storageResponse is an already Merkle-verified remote response to a storage
   197  // range request. It contains the subtries for the requested storage ranges and
   198  // the databases that's going to be filled with the internal nodes on commit.
   199  type storageResponse struct {
   200  	mainTask *accountTask // Task which this response belongs to
   201  	subTask  *storageTask // Task which this response is filling
   202  
   203  	accounts []common.Hash // Account hashes requested, may be only partially filled
   204  	roots    []common.Hash // Storage roots requested, may be only partially filled
   205  
   206  	hashes [][]common.Hash // Storage slot hashes in the returned range
   207  	slots  [][][]byte      // Storage slot values in the returned range
   208  
   209  	cont bool // Whether the last storage range has a continuation
   210  }
   211  
   212  // trienodeHealRequest tracks a pending state trie request to ensure responses
   213  // are to actual requests and to validate any security constraints.
   214  //
   215  // Concurrency note: trie node requests and responses are handled concurrently from
   216  // the main runloop to allow Keccak256 hash verifications on the peer's thread and
   217  // to drop on invalid response. The request struct must contain all the data to
   218  // construct the response without accessing runloop internals (i.e. task). That
   219  // is only included to allow the runloop to match a response to the task being
   220  // synced without having yet another set of maps.
   221  type trienodeHealRequest struct {
   222  	peer string    // Peer to which this request is assigned
   223  	id   uint64    // Request ID of this request
   224  	time time.Time // Timestamp when the request was sent
   225  
   226  	deliver chan *trienodeHealResponse // Channel to deliver successful response on
   227  	revert  chan *trienodeHealRequest  // Channel to deliver request failure on
   228  	cancel  chan struct{}              // Channel to track sync cancellation
   229  	timeout *time.Timer                // Timer to track delivery timeout
   230  	stale   chan struct{}              // Channel to signal the request was dropped
   231  
   232  	hashes []common.Hash   // Trie node hashes to validate responses
   233  	paths  []trie.SyncPath // Trie node paths requested for rescheduling
   234  
   235  	task *healTask // Task which this request is filling (only access fields through the runloop!!)
   236  }
   237  
   238  // trienodeHealResponse is an already verified remote response to a trie node request.
   239  type trienodeHealResponse struct {
   240  	task *healTask // Task which this request is filling
   241  
   242  	hashes []common.Hash   // Hashes of the trie nodes to avoid double hashing
   243  	paths  []trie.SyncPath // Trie node paths requested for rescheduling missing ones
   244  	nodes  [][]byte        // Actual trie nodes to store into the database (nil = missing)
   245  }
   246  
   247  // bytecodeHealRequest tracks a pending bytecode request to ensure responses are to
   248  // actual requests and to validate any security constraints.
   249  //
   250  // Concurrency note: bytecode requests and responses are handled concurrently from
   251  // the main runloop to allow Keccak256 hash verifications on the peer's thread and
   252  // to drop on invalid response. The request struct must contain all the data to
   253  // construct the response without accessing runloop internals (i.e. task). That
   254  // is only included to allow the runloop to match a response to the task being
   255  // synced without having yet another set of maps.
   256  type bytecodeHealRequest struct {
   257  	peer string    // Peer to which this request is assigned
   258  	id   uint64    // Request ID of this request
   259  	time time.Time // Timestamp when the request was sent
   260  
   261  	deliver chan *bytecodeHealResponse // Channel to deliver successful response on
   262  	revert  chan *bytecodeHealRequest  // Channel to deliver request failure on
   263  	cancel  chan struct{}              // Channel to track sync cancellation
   264  	timeout *time.Timer                // Timer to track delivery timeout
   265  	stale   chan struct{}              // Channel to signal the request was dropped
   266  
   267  	hashes []common.Hash // Bytecode hashes to validate responses
   268  	task   *healTask     // Task which this request is filling (only access fields through the runloop!!)
   269  }
   270  
   271  // bytecodeHealResponse is an already verified remote response to a bytecode request.
   272  type bytecodeHealResponse struct {
   273  	task *healTask // Task which this request is filling
   274  
   275  	hashes []common.Hash // Hashes of the bytecode to avoid double hashing
   276  	codes  [][]byte      // Actual bytecodes to store into the database (nil = missing)
   277  }
   278  
   279  // accountTask represents the sync task for a chunk of the account snapshot.
   280  type accountTask struct {
   281  	// These fields get serialized to leveldb on shutdown
   282  	Next     common.Hash                    // Next account to sync in this interval
   283  	Last     common.Hash                    // Last account to sync in this interval
   284  	SubTasks map[common.Hash][]*storageTask // Storage intervals needing fetching for large contracts
   285  
   286  	// These fields are internals used during runtime
   287  	req  *accountRequest  // Pending request to fill this task
   288  	res  *accountResponse // Validate response filling this task
   289  	pend int              // Number of pending subtasks for this round
   290  
   291  	needCode  []bool // Flags whether the filling accounts need code retrieval
   292  	needState []bool // Flags whether the filling accounts need storage retrieval
   293  	needHeal  []bool // Flags whether the filling accounts's state was chunked and need healing
   294  
   295  	codeTasks  map[common.Hash]struct{}    // Code hashes that need retrieval
   296  	stateTasks map[common.Hash]common.Hash // Account hashes->roots that need full state retrieval
   297  
   298  	genBatch ethdb.Batch     // Batch used by the node generator
   299  	genTrie  *trie.StackTrie // Node generator from storage slots
   300  
   301  	done bool // Flag whether the task can be removed
   302  }
   303  
   304  // storageTask represents the sync task for a chunk of the storage snapshot.
   305  type storageTask struct {
   306  	Next common.Hash // Next account to sync in this interval
   307  	Last common.Hash // Last account to sync in this interval
   308  
   309  	// These fields are internals used during runtime
   310  	root common.Hash     // Storage root hash for this instance
   311  	req  *storageRequest // Pending request to fill this task
   312  
   313  	genBatch ethdb.Batch     // Batch used by the node generator
   314  	genTrie  *trie.StackTrie // Node generator from storage slots
   315  
   316  	done bool // Flag whether the task can be removed
   317  }
   318  
   319  // healTask represents the sync task for healing the snap-synced chunk boundaries.
   320  type healTask struct {
   321  	scheduler *trie.Sync // State trie sync scheduler defining the tasks
   322  
   323  	trieTasks map[common.Hash]trie.SyncPath // Set of trie node tasks currently queued for retrieval
   324  	codeTasks map[common.Hash]struct{}      // Set of byte code tasks currently queued for retrieval
   325  }
   326  
   327  // syncProgress is a database entry to allow suspending and resuming a snapshot state
   328  // sync. Opposed to full and fast sync, there is no way to restart a suspended
   329  // snap sync without prior knowledge of the suspension point.
   330  type syncProgress struct {
   331  	Tasks []*accountTask // The suspended account tasks (contract tasks within)
   332  
   333  	// Status report during syncing phase
   334  	AccountSynced  uint64             // Number of accounts downloaded
   335  	AccountBytes   common.StorageSize // Number of account trie bytes persisted to disk
   336  	BytecodeSynced uint64             // Number of bytecodes downloaded
   337  	BytecodeBytes  common.StorageSize // Number of bytecode bytes downloaded
   338  	StorageSynced  uint64             // Number of storage slots downloaded
   339  	StorageBytes   common.StorageSize // Number of storage trie bytes persisted to disk
   340  
   341  	// Status report during healing phase
   342  	TrienodeHealSynced uint64             // Number of state trie nodes downloaded
   343  	TrienodeHealBytes  common.StorageSize // Number of state trie bytes persisted to disk
   344  	TrienodeHealDups   uint64             // Number of state trie nodes already processed
   345  	TrienodeHealNops   uint64             // Number of state trie nodes not requested
   346  	BytecodeHealSynced uint64             // Number of bytecodes downloaded
   347  	BytecodeHealBytes  common.StorageSize // Number of bytecodes persisted to disk
   348  	BytecodeHealDups   uint64             // Number of bytecodes already processed
   349  	BytecodeHealNops   uint64             // Number of bytecodes not requested
   350  }
   351  
   352  // SyncPeer abstracts out the methods required for a peer to be synced against
   353  // with the goal of allowing the construction of mock peers without the full
   354  // blown networking.
   355  type SyncPeer interface {
   356  	// ID retrieves the peer's unique identifier.
   357  	ID() string
   358  
   359  	// RequestAccountRange fetches a batch of accounts rooted in a specific account
   360  	// trie, starting with the origin.
   361  	RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error
   362  
   363  	// RequestStorageRanges fetches a batch of storage slots belonging to one or
   364  	// more accounts. If slots from only one accout is requested, an origin marker
   365  	// may also be used to retrieve from there.
   366  	RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error
   367  
   368  	// RequestByteCodes fetches a batch of bytecodes by hash.
   369  	RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error
   370  
   371  	// RequestTrieNodes fetches a batch of account or storage trie nodes rooted in
   372  	// a specificstate trie.
   373  	RequestTrieNodes(id uint64, root common.Hash, paths []TrieNodePathSet, bytes uint64) error
   374  
   375  	// Log retrieves the peer's own contextual logger.
   376  	Log() log.Logger
   377  }
   378  
   379  // Syncer is an Ethereum account and storage trie syncer based on snapshots and
   380  // the  snap protocol. It's purpose is to download all the accounts and storage
   381  // slots from remote peers and reassemble chunks of the state trie, on top of
   382  // which a state sync can be run to fix any gaps / overlaps.
   383  //
   384  // Every network request has a variety of failure events:
   385  //   - The peer disconnects after task assignment, failing to send the request
   386  //   - The peer disconnects after sending the request, before delivering on it
   387  //   - The peer remains connected, but does not deliver a response in time
   388  //   - The peer delivers a stale response after a previous timeout
   389  //   - The peer delivers a refusal to serve the requested state
   390  type Syncer struct {
   391  	db ethdb.KeyValueStore // Database to store the trie nodes into (and dedup)
   392  
   393  	root    common.Hash    // Current state trie root being synced
   394  	tasks   []*accountTask // Current account task set being synced
   395  	snapped bool           // Flag to signal that snap phase is done
   396  	healer  *healTask      // Current state healing task being executed
   397  	update  chan struct{}  // Notification channel for possible sync progression
   398  
   399  	peers    map[string]SyncPeer // Currently active peers to download from
   400  	peerJoin *event.Feed         // Event feed to react to peers joining
   401  	peerDrop *event.Feed         // Event feed to react to peers dropping
   402  	rates    *msgrate.Trackers   // Message throughput rates for peers
   403  
   404  	// Request tracking during syncing phase
   405  	statelessPeers map[string]struct{} // Peers that failed to deliver state data
   406  	accountIdlers  map[string]struct{} // Peers that aren't serving account requests
   407  	bytecodeIdlers map[string]struct{} // Peers that aren't serving bytecode requests
   408  	storageIdlers  map[string]struct{} // Peers that aren't serving storage requests
   409  
   410  	accountReqs  map[uint64]*accountRequest  // Account requests currently running
   411  	bytecodeReqs map[uint64]*bytecodeRequest // Bytecode requests currently running
   412  	storageReqs  map[uint64]*storageRequest  // Storage requests currently running
   413  
   414  	accountSynced  uint64             // Number of accounts downloaded
   415  	accountBytes   common.StorageSize // Number of account trie bytes persisted to disk
   416  	bytecodeSynced uint64             // Number of bytecodes downloaded
   417  	bytecodeBytes  common.StorageSize // Number of bytecode bytes downloaded
   418  	storageSynced  uint64             // Number of storage slots downloaded
   419  	storageBytes   common.StorageSize // Number of storage trie bytes persisted to disk
   420  
   421  	// Request tracking during healing phase
   422  	trienodeHealIdlers map[string]struct{} // Peers that aren't serving trie node requests
   423  	bytecodeHealIdlers map[string]struct{} // Peers that aren't serving bytecode requests
   424  
   425  	trienodeHealReqs map[uint64]*trienodeHealRequest // Trie node requests currently running
   426  	bytecodeHealReqs map[uint64]*bytecodeHealRequest // Bytecode requests currently running
   427  
   428  	trienodeHealSynced uint64             // Number of state trie nodes downloaded
   429  	trienodeHealBytes  common.StorageSize // Number of state trie bytes persisted to disk
   430  	trienodeHealDups   uint64             // Number of state trie nodes already processed
   431  	trienodeHealNops   uint64             // Number of state trie nodes not requested
   432  	bytecodeHealSynced uint64             // Number of bytecodes downloaded
   433  	bytecodeHealBytes  common.StorageSize // Number of bytecodes persisted to disk
   434  	bytecodeHealDups   uint64             // Number of bytecodes already processed
   435  	bytecodeHealNops   uint64             // Number of bytecodes not requested
   436  
   437  	stateWriter        ethdb.Batch        // Shared batch writer used for persisting raw states
   438  	accountHealed      uint64             // Number of accounts downloaded during the healing stage
   439  	accountHealedBytes common.StorageSize // Number of raw account bytes persisted to disk during the healing stage
   440  	storageHealed      uint64             // Number of storage slots downloaded during the healing stage
   441  	storageHealedBytes common.StorageSize // Number of raw storage bytes persisted to disk during the healing stage
   442  
   443  	startTime time.Time // Time instance when snapshot sync started
   444  	logTime   time.Time // Time instance when status was last reported
   445  
   446  	pend sync.WaitGroup // Tracks network request goroutines for graceful shutdown
   447  	lock sync.RWMutex   // Protects fields that can change outside of sync (peers, reqs, root)
   448  }
   449  
   450  // NewSyncer creates a new snapshot syncer to download the Ethereum state over the
   451  // snap protocol.
   452  func NewSyncer(db ethdb.KeyValueStore) *Syncer {
   453  	return &Syncer{
   454  		db: db,
   455  
   456  		peers:    make(map[string]SyncPeer),
   457  		peerJoin: new(event.Feed),
   458  		peerDrop: new(event.Feed),
   459  		rates:    msgrate.NewTrackers(log.New("proto", "snap")),
   460  		update:   make(chan struct{}, 1),
   461  
   462  		accountIdlers:  make(map[string]struct{}),
   463  		storageIdlers:  make(map[string]struct{}),
   464  		bytecodeIdlers: make(map[string]struct{}),
   465  
   466  		accountReqs:  make(map[uint64]*accountRequest),
   467  		storageReqs:  make(map[uint64]*storageRequest),
   468  		bytecodeReqs: make(map[uint64]*bytecodeRequest),
   469  
   470  		trienodeHealIdlers: make(map[string]struct{}),
   471  		bytecodeHealIdlers: make(map[string]struct{}),
   472  
   473  		trienodeHealReqs: make(map[uint64]*trienodeHealRequest),
   474  		bytecodeHealReqs: make(map[uint64]*bytecodeHealRequest),
   475  		stateWriter:      db.NewBatch(),
   476  	}
   477  }
   478  
   479  // Register injects a new data source into the syncer's peerset.
   480  func (s *Syncer) Register(peer SyncPeer) error {
   481  	// Make sure the peer is not registered yet
   482  	id := peer.ID()
   483  
   484  	s.lock.Lock()
   485  	if _, ok := s.peers[id]; ok {
   486  		log.Error("Snap peer already registered", "id", id)
   487  
   488  		s.lock.Unlock()
   489  		return errors.New("already registered")
   490  	}
   491  	s.peers[id] = peer
   492  	s.rates.Track(id, msgrate.NewTracker(s.rates.MeanCapacities(), s.rates.MedianRoundTrip()))
   493  
   494  	// Mark the peer as idle, even if no sync is running
   495  	s.accountIdlers[id] = struct{}{}
   496  	s.storageIdlers[id] = struct{}{}
   497  	s.bytecodeIdlers[id] = struct{}{}
   498  	s.trienodeHealIdlers[id] = struct{}{}
   499  	s.bytecodeHealIdlers[id] = struct{}{}
   500  	s.lock.Unlock()
   501  
   502  	// Notify any active syncs that a new peer can be assigned data
   503  	s.peerJoin.Send(id)
   504  	return nil
   505  }
   506  
   507  // Unregister injects a new data source into the syncer's peerset.
   508  func (s *Syncer) Unregister(id string) error {
   509  	// Remove all traces of the peer from the registry
   510  	s.lock.Lock()
   511  	if _, ok := s.peers[id]; !ok {
   512  		log.Error("Snap peer not registered", "id", id)
   513  
   514  		s.lock.Unlock()
   515  		return errors.New("not registered")
   516  	}
   517  	delete(s.peers, id)
   518  	s.rates.Untrack(id)
   519  
   520  	// Remove status markers, even if no sync is running
   521  	delete(s.statelessPeers, id)
   522  
   523  	delete(s.accountIdlers, id)
   524  	delete(s.storageIdlers, id)
   525  	delete(s.bytecodeIdlers, id)
   526  	delete(s.trienodeHealIdlers, id)
   527  	delete(s.bytecodeHealIdlers, id)
   528  	s.lock.Unlock()
   529  
   530  	// Notify any active syncs that pending requests need to be reverted
   531  	s.peerDrop.Send(id)
   532  	return nil
   533  }
   534  
   535  // Sync starts (or resumes a previous) sync cycle to iterate over an state trie
   536  // with the given root and reconstruct the nodes based on the snapshot leaves.
   537  // Previously downloaded segments will not be redownloaded of fixed, rather any
   538  // errors will be healed after the leaves are fully accumulated.
   539  func (s *Syncer) Sync(root common.Hash, cancel chan struct{}) error {
   540  	// Move the trie root from any previous value, revert stateless markers for
   541  	// any peers and initialize the syncer if it was not yet run
   542  	s.lock.Lock()
   543  	s.root = root
   544  	s.healer = &healTask{
   545  		scheduler: state.NewStateSync(root, s.db, nil, s.onHealState),
   546  		trieTasks: make(map[common.Hash]trie.SyncPath),
   547  		codeTasks: make(map[common.Hash]struct{}),
   548  	}
   549  	s.statelessPeers = make(map[string]struct{})
   550  	s.lock.Unlock()
   551  
   552  	if s.startTime == (time.Time{}) {
   553  		s.startTime = time.Now()
   554  	}
   555  	// Retrieve the previous sync status from LevelDB and abort if already synced
   556  	s.loadSyncStatus()
   557  	if len(s.tasks) == 0 && s.healer.scheduler.Pending() == 0 {
   558  		log.Debug("Snapshot sync already completed")
   559  		return nil
   560  	}
   561  	defer func() { // Persist any progress, independent of failure
   562  		for _, task := range s.tasks {
   563  			s.forwardAccountTask(task)
   564  		}
   565  		s.cleanAccountTasks()
   566  		s.saveSyncStatus()
   567  	}()
   568  
   569  	log.Debug("Starting snapshot sync cycle", "root", root)
   570  
   571  	// Flush out the last committed raw states
   572  	defer func() {
   573  		if s.stateWriter.ValueSize() > 0 {
   574  			s.stateWriter.Write()
   575  			s.stateWriter.Reset()
   576  		}
   577  	}()
   578  	defer s.report(true)
   579  
   580  	// Whether sync completed or not, disregard any future packets
   581  	defer func() {
   582  		log.Debug("Terminating snapshot sync cycle", "root", root)
   583  		s.lock.Lock()
   584  		s.accountReqs = make(map[uint64]*accountRequest)
   585  		s.storageReqs = make(map[uint64]*storageRequest)
   586  		s.bytecodeReqs = make(map[uint64]*bytecodeRequest)
   587  		s.trienodeHealReqs = make(map[uint64]*trienodeHealRequest)
   588  		s.bytecodeHealReqs = make(map[uint64]*bytecodeHealRequest)
   589  		s.lock.Unlock()
   590  	}()
   591  	// Keep scheduling sync tasks
   592  	peerJoin := make(chan string, 16)
   593  	peerJoinSub := s.peerJoin.Subscribe(peerJoin)
   594  	defer peerJoinSub.Unsubscribe()
   595  
   596  	peerDrop := make(chan string, 16)
   597  	peerDropSub := s.peerDrop.Subscribe(peerDrop)
   598  	defer peerDropSub.Unsubscribe()
   599  
   600  	// Create a set of unique channels for this sync cycle. We need these to be
   601  	// ephemeral so a data race doesn't accidentally deliver something stale on
   602  	// a persistent channel across syncs (yup, this happened)
   603  	var (
   604  		accountReqFails      = make(chan *accountRequest)
   605  		storageReqFails      = make(chan *storageRequest)
   606  		bytecodeReqFails     = make(chan *bytecodeRequest)
   607  		accountResps         = make(chan *accountResponse)
   608  		storageResps         = make(chan *storageResponse)
   609  		bytecodeResps        = make(chan *bytecodeResponse)
   610  		trienodeHealReqFails = make(chan *trienodeHealRequest)
   611  		bytecodeHealReqFails = make(chan *bytecodeHealRequest)
   612  		trienodeHealResps    = make(chan *trienodeHealResponse)
   613  		bytecodeHealResps    = make(chan *bytecodeHealResponse)
   614  	)
   615  	for {
   616  		// Remove all completed tasks and terminate sync if everything's done
   617  		s.cleanStorageTasks()
   618  		s.cleanAccountTasks()
   619  		if len(s.tasks) == 0 && s.healer.scheduler.Pending() == 0 {
   620  			return nil
   621  		}
   622  		// Assign all the data retrieval tasks to any free peers
   623  		s.assignAccountTasks(accountResps, accountReqFails, cancel)
   624  		s.assignBytecodeTasks(bytecodeResps, bytecodeReqFails, cancel)
   625  		s.assignStorageTasks(storageResps, storageReqFails, cancel)
   626  
   627  		if len(s.tasks) == 0 {
   628  			// Sync phase done, run heal phase
   629  			s.assignTrienodeHealTasks(trienodeHealResps, trienodeHealReqFails, cancel)
   630  			s.assignBytecodeHealTasks(bytecodeHealResps, bytecodeHealReqFails, cancel)
   631  		}
   632  		// Wait for something to happen
   633  		select {
   634  		case <-s.update:
   635  			// Something happened (new peer, delivery, timeout), recheck tasks
   636  		case <-peerJoin:
   637  			// A new peer joined, try to schedule it new tasks
   638  		case id := <-peerDrop:
   639  			s.revertRequests(id)
   640  		case <-cancel:
   641  			return ErrCancelled
   642  
   643  		case req := <-accountReqFails:
   644  			s.revertAccountRequest(req)
   645  		case req := <-bytecodeReqFails:
   646  			s.revertBytecodeRequest(req)
   647  		case req := <-storageReqFails:
   648  			s.revertStorageRequest(req)
   649  		case req := <-trienodeHealReqFails:
   650  			s.revertTrienodeHealRequest(req)
   651  		case req := <-bytecodeHealReqFails:
   652  			s.revertBytecodeHealRequest(req)
   653  
   654  		case res := <-accountResps:
   655  			s.processAccountResponse(res)
   656  		case res := <-bytecodeResps:
   657  			s.processBytecodeResponse(res)
   658  		case res := <-storageResps:
   659  			s.processStorageResponse(res)
   660  		case res := <-trienodeHealResps:
   661  			s.processTrienodeHealResponse(res)
   662  		case res := <-bytecodeHealResps:
   663  			s.processBytecodeHealResponse(res)
   664  		}
   665  		// Report stats if something meaningful happened
   666  		s.report(false)
   667  	}
   668  }
   669  
   670  // loadSyncStatus retrieves a previously aborted sync status from the database,
   671  // or generates a fresh one if none is available.
   672  func (s *Syncer) loadSyncStatus() {
   673  	var progress syncProgress
   674  
   675  	if status := rawdb.ReadSnapshotSyncStatus(s.db); status != nil {
   676  		if err := json.Unmarshal(status, &progress); err != nil {
   677  			log.Error("Failed to decode snap sync status", "err", err)
   678  		} else {
   679  			for _, task := range progress.Tasks {
   680  				log.Debug("Scheduled account sync task", "from", task.Next, "last", task.Last)
   681  			}
   682  			s.tasks = progress.Tasks
   683  			for _, task := range s.tasks {
   684  				task.genBatch = ethdb.HookedBatch{
   685  					Batch: s.db.NewBatch(),
   686  					OnPut: func(key []byte, value []byte) {
   687  						s.accountBytes += common.StorageSize(len(key) + len(value))
   688  					},
   689  				}
   690  				task.genTrie = trie.NewStackTrie(task.genBatch)
   691  
   692  				for _, subtasks := range task.SubTasks {
   693  					for _, subtask := range subtasks {
   694  						subtask.genBatch = ethdb.HookedBatch{
   695  							Batch: s.db.NewBatch(),
   696  							OnPut: func(key []byte, value []byte) {
   697  								s.storageBytes += common.StorageSize(len(key) + len(value))
   698  							},
   699  						}
   700  						subtask.genTrie = trie.NewStackTrie(subtask.genBatch)
   701  					}
   702  				}
   703  			}
   704  			s.snapped = len(s.tasks) == 0
   705  
   706  			s.accountSynced = progress.AccountSynced
   707  			s.accountBytes = progress.AccountBytes
   708  			s.bytecodeSynced = progress.BytecodeSynced
   709  			s.bytecodeBytes = progress.BytecodeBytes
   710  			s.storageSynced = progress.StorageSynced
   711  			s.storageBytes = progress.StorageBytes
   712  
   713  			s.trienodeHealSynced = progress.TrienodeHealSynced
   714  			s.trienodeHealBytes = progress.TrienodeHealBytes
   715  			s.bytecodeHealSynced = progress.BytecodeHealSynced
   716  			s.bytecodeHealBytes = progress.BytecodeHealBytes
   717  			return
   718  		}
   719  	}
   720  	// Either we've failed to decode the previus state, or there was none.
   721  	// Start a fresh sync by chunking up the account range and scheduling
   722  	// them for retrieval.
   723  	s.tasks = nil
   724  	s.accountSynced, s.accountBytes = 0, 0
   725  	s.bytecodeSynced, s.bytecodeBytes = 0, 0
   726  	s.storageSynced, s.storageBytes = 0, 0
   727  	s.trienodeHealSynced, s.trienodeHealBytes = 0, 0
   728  	s.bytecodeHealSynced, s.bytecodeHealBytes = 0, 0
   729  
   730  	var next common.Hash
   731  	step := new(big.Int).Sub(
   732  		new(big.Int).Div(
   733  			new(big.Int).Exp(common.Big2, common.Big256, nil),
   734  			big.NewInt(int64(accountConcurrency)),
   735  		), common.Big1,
   736  	)
   737  	for i := 0; i < accountConcurrency; i++ {
   738  		last := common.BigToHash(new(big.Int).Add(next.Big(), step))
   739  		if i == accountConcurrency-1 {
   740  			// Make sure we don't overflow if the step is not a proper divisor
   741  			last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
   742  		}
   743  		batch := ethdb.HookedBatch{
   744  			Batch: s.db.NewBatch(),
   745  			OnPut: func(key []byte, value []byte) {
   746  				s.accountBytes += common.StorageSize(len(key) + len(value))
   747  			},
   748  		}
   749  		s.tasks = append(s.tasks, &accountTask{
   750  			Next:     next,
   751  			Last:     last,
   752  			SubTasks: make(map[common.Hash][]*storageTask),
   753  			genBatch: batch,
   754  			genTrie:  trie.NewStackTrie(batch),
   755  		})
   756  		log.Debug("Created account sync task", "from", next, "last", last)
   757  		next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
   758  	}
   759  }
   760  
   761  // saveSyncStatus marshals the remaining sync tasks into leveldb.
   762  func (s *Syncer) saveSyncStatus() {
   763  	// Serialize any partial progress to disk before spinning down
   764  	for _, task := range s.tasks {
   765  		if err := task.genBatch.Write(); err != nil {
   766  			log.Error("Failed to persist account slots", "err", err)
   767  		}
   768  		for _, subtasks := range task.SubTasks {
   769  			for _, subtask := range subtasks {
   770  				if err := subtask.genBatch.Write(); err != nil {
   771  					log.Error("Failed to persist storage slots", "err", err)
   772  				}
   773  			}
   774  		}
   775  	}
   776  	// Store the actual progress markers
   777  	progress := &syncProgress{
   778  		Tasks:              s.tasks,
   779  		AccountSynced:      s.accountSynced,
   780  		AccountBytes:       s.accountBytes,
   781  		BytecodeSynced:     s.bytecodeSynced,
   782  		BytecodeBytes:      s.bytecodeBytes,
   783  		StorageSynced:      s.storageSynced,
   784  		StorageBytes:       s.storageBytes,
   785  		TrienodeHealSynced: s.trienodeHealSynced,
   786  		TrienodeHealBytes:  s.trienodeHealBytes,
   787  		BytecodeHealSynced: s.bytecodeHealSynced,
   788  		BytecodeHealBytes:  s.bytecodeHealBytes,
   789  	}
   790  	status, err := json.Marshal(progress)
   791  	if err != nil {
   792  		panic(err) // This can only fail during implementation
   793  	}
   794  	rawdb.WriteSnapshotSyncStatus(s.db, status)
   795  }
   796  
   797  // cleanAccountTasks removes account range retrieval tasks that have already been
   798  // completed.
   799  func (s *Syncer) cleanAccountTasks() {
   800  	// If the sync was already done before, don't even bother
   801  	if len(s.tasks) == 0 {
   802  		return
   803  	}
   804  	// Sync wasn't finished previously, check for any task that can be finalized
   805  	for i := 0; i < len(s.tasks); i++ {
   806  		if s.tasks[i].done {
   807  			s.tasks = append(s.tasks[:i], s.tasks[i+1:]...)
   808  			i--
   809  		}
   810  	}
   811  	// If everything was just finalized just, generate the account trie and start heal
   812  	if len(s.tasks) == 0 {
   813  		s.lock.Lock()
   814  		s.snapped = true
   815  		s.lock.Unlock()
   816  
   817  		// Push the final sync report
   818  		s.reportSyncProgress(true)
   819  	}
   820  }
   821  
   822  // cleanStorageTasks iterates over all the account tasks and storage sub-tasks
   823  // within, cleaning any that have been completed.
   824  func (s *Syncer) cleanStorageTasks() {
   825  	for _, task := range s.tasks {
   826  		for account, subtasks := range task.SubTasks {
   827  			// Remove storage range retrieval tasks that completed
   828  			for j := 0; j < len(subtasks); j++ {
   829  				if subtasks[j].done {
   830  					subtasks = append(subtasks[:j], subtasks[j+1:]...)
   831  					j--
   832  				}
   833  			}
   834  			if len(subtasks) > 0 {
   835  				task.SubTasks[account] = subtasks
   836  				continue
   837  			}
   838  			// If all storage chunks are done, mark the account as done too
   839  			for j, hash := range task.res.hashes {
   840  				if hash == account {
   841  					task.needState[j] = false
   842  				}
   843  			}
   844  			delete(task.SubTasks, account)
   845  			task.pend--
   846  
   847  			// If this was the last pending task, forward the account task
   848  			if task.pend == 0 {
   849  				s.forwardAccountTask(task)
   850  			}
   851  		}
   852  	}
   853  }
   854  
   855  // assignAccountTasks attempts to match idle peers to pending account range
   856  // retrievals.
   857  func (s *Syncer) assignAccountTasks(success chan *accountResponse, fail chan *accountRequest, cancel chan struct{}) {
   858  	s.lock.Lock()
   859  	defer s.lock.Unlock()
   860  
   861  	// Sort the peers by download capacity to use faster ones if many available
   862  	idlers := &capacitySort{
   863  		ids:  make([]string, 0, len(s.accountIdlers)),
   864  		caps: make([]int, 0, len(s.accountIdlers)),
   865  	}
   866  	targetTTL := s.rates.TargetTimeout()
   867  	for id := range s.accountIdlers {
   868  		if _, ok := s.statelessPeers[id]; ok {
   869  			continue
   870  		}
   871  		idlers.ids = append(idlers.ids, id)
   872  		idlers.caps = append(idlers.caps, s.rates.Capacity(id, AccountRangeMsg, targetTTL))
   873  	}
   874  	if len(idlers.ids) == 0 {
   875  		return
   876  	}
   877  	sort.Sort(sort.Reverse(idlers))
   878  
   879  	// Iterate over all the tasks and try to find a pending one
   880  	for _, task := range s.tasks {
   881  		// Skip any tasks already filling
   882  		if task.req != nil || task.res != nil {
   883  			continue
   884  		}
   885  		// Task pending retrieval, try to find an idle peer. If no such peer
   886  		// exists, we probably assigned tasks for all (or they are stateless).
   887  		// Abort the entire assignment mechanism.
   888  		if len(idlers.ids) == 0 {
   889  			return
   890  		}
   891  		var (
   892  			idle = idlers.ids[0]
   893  			peer = s.peers[idle]
   894  			cap  = idlers.caps[0]
   895  		)
   896  		idlers.ids, idlers.caps = idlers.ids[1:], idlers.caps[1:]
   897  
   898  		// Matched a pending task to an idle peer, allocate a unique request id
   899  		var reqid uint64
   900  		for {
   901  			reqid = uint64(rand.Int63())
   902  			if reqid == 0 {
   903  				continue
   904  			}
   905  			if _, ok := s.accountReqs[reqid]; ok {
   906  				continue
   907  			}
   908  			break
   909  		}
   910  		// Generate the network query and send it to the peer
   911  		req := &accountRequest{
   912  			peer:    idle,
   913  			id:      reqid,
   914  			time:    time.Now(),
   915  			deliver: success,
   916  			revert:  fail,
   917  			cancel:  cancel,
   918  			stale:   make(chan struct{}),
   919  			origin:  task.Next,
   920  			limit:   task.Last,
   921  			task:    task,
   922  		}
   923  		req.timeout = time.AfterFunc(s.rates.TargetTimeout(), func() {
   924  			peer.Log().Debug("Account range request timed out", "reqid", reqid)
   925  			s.rates.Update(idle, AccountRangeMsg, 0, 0)
   926  			s.scheduleRevertAccountRequest(req)
   927  		})
   928  		s.accountReqs[reqid] = req
   929  		delete(s.accountIdlers, idle)
   930  
   931  		s.pend.Add(1)
   932  		go func(root common.Hash) {
   933  			defer s.pend.Done()
   934  
   935  			// Attempt to send the remote request and revert if it fails
   936  			if cap > maxRequestSize {
   937  				cap = maxRequestSize
   938  			}
   939  			if cap < minRequestSize { // Don't bother with peers below a bare minimum performance
   940  				cap = minRequestSize
   941  			}
   942  			if err := peer.RequestAccountRange(reqid, root, req.origin, req.limit, uint64(cap)); err != nil {
   943  				peer.Log().Debug("Failed to request account range", "err", err)
   944  				s.scheduleRevertAccountRequest(req)
   945  			}
   946  		}(s.root)
   947  
   948  		// Inject the request into the task to block further assignments
   949  		task.req = req
   950  	}
   951  }
   952  
   953  // assignBytecodeTasks attempts to match idle peers to pending code retrievals.
   954  func (s *Syncer) assignBytecodeTasks(success chan *bytecodeResponse, fail chan *bytecodeRequest, cancel chan struct{}) {
   955  	s.lock.Lock()
   956  	defer s.lock.Unlock()
   957  
   958  	// Sort the peers by download capacity to use faster ones if many available
   959  	idlers := &capacitySort{
   960  		ids:  make([]string, 0, len(s.bytecodeIdlers)),
   961  		caps: make([]int, 0, len(s.bytecodeIdlers)),
   962  	}
   963  	targetTTL := s.rates.TargetTimeout()
   964  	for id := range s.bytecodeIdlers {
   965  		if _, ok := s.statelessPeers[id]; ok {
   966  			continue
   967  		}
   968  		idlers.ids = append(idlers.ids, id)
   969  		idlers.caps = append(idlers.caps, s.rates.Capacity(id, ByteCodesMsg, targetTTL))
   970  	}
   971  	if len(idlers.ids) == 0 {
   972  		return
   973  	}
   974  	sort.Sort(sort.Reverse(idlers))
   975  
   976  	// Iterate over all the tasks and try to find a pending one
   977  	for _, task := range s.tasks {
   978  		// Skip any tasks not in the bytecode retrieval phase
   979  		if task.res == nil {
   980  			continue
   981  		}
   982  		// Skip tasks that are already retrieving (or done with) all codes
   983  		if len(task.codeTasks) == 0 {
   984  			continue
   985  		}
   986  		// Task pending retrieval, try to find an idle peer. If no such peer
   987  		// exists, we probably assigned tasks for all (or they are stateless).
   988  		// Abort the entire assignment mechanism.
   989  		if len(idlers.ids) == 0 {
   990  			return
   991  		}
   992  		var (
   993  			idle = idlers.ids[0]
   994  			peer = s.peers[idle]
   995  			cap  = idlers.caps[0]
   996  		)
   997  		idlers.ids, idlers.caps = idlers.ids[1:], idlers.caps[1:]
   998  
   999  		// Matched a pending task to an idle peer, allocate a unique request id
  1000  		var reqid uint64
  1001  		for {
  1002  			reqid = uint64(rand.Int63())
  1003  			if reqid == 0 {
  1004  				continue
  1005  			}
  1006  			if _, ok := s.bytecodeReqs[reqid]; ok {
  1007  				continue
  1008  			}
  1009  			break
  1010  		}
  1011  		// Generate the network query and send it to the peer
  1012  		if cap > maxCodeRequestCount {
  1013  			cap = maxCodeRequestCount
  1014  		}
  1015  		hashes := make([]common.Hash, 0, cap)
  1016  		for hash := range task.codeTasks {
  1017  			delete(task.codeTasks, hash)
  1018  			hashes = append(hashes, hash)
  1019  			if len(hashes) >= cap {
  1020  				break
  1021  			}
  1022  		}
  1023  		req := &bytecodeRequest{
  1024  			peer:    idle,
  1025  			id:      reqid,
  1026  			time:    time.Now(),
  1027  			deliver: success,
  1028  			revert:  fail,
  1029  			cancel:  cancel,
  1030  			stale:   make(chan struct{}),
  1031  			hashes:  hashes,
  1032  			task:    task,
  1033  		}
  1034  		req.timeout = time.AfterFunc(s.rates.TargetTimeout(), func() {
  1035  			peer.Log().Debug("Bytecode request timed out", "reqid", reqid)
  1036  			s.rates.Update(idle, ByteCodesMsg, 0, 0)
  1037  			s.scheduleRevertBytecodeRequest(req)
  1038  		})
  1039  		s.bytecodeReqs[reqid] = req
  1040  		delete(s.bytecodeIdlers, idle)
  1041  
  1042  		s.pend.Add(1)
  1043  		go func() {
  1044  			defer s.pend.Done()
  1045  
  1046  			// Attempt to send the remote request and revert if it fails
  1047  			if err := peer.RequestByteCodes(reqid, hashes, maxRequestSize); err != nil {
  1048  				log.Debug("Failed to request bytecodes", "err", err)
  1049  				s.scheduleRevertBytecodeRequest(req)
  1050  			}
  1051  		}()
  1052  	}
  1053  }
  1054  
  1055  // assignStorageTasks attempts to match idle peers to pending storage range
  1056  // retrievals.
  1057  func (s *Syncer) assignStorageTasks(success chan *storageResponse, fail chan *storageRequest, cancel chan struct{}) {
  1058  	s.lock.Lock()
  1059  	defer s.lock.Unlock()
  1060  
  1061  	// Sort the peers by download capacity to use faster ones if many available
  1062  	idlers := &capacitySort{
  1063  		ids:  make([]string, 0, len(s.storageIdlers)),
  1064  		caps: make([]int, 0, len(s.storageIdlers)),
  1065  	}
  1066  	targetTTL := s.rates.TargetTimeout()
  1067  	for id := range s.storageIdlers {
  1068  		if _, ok := s.statelessPeers[id]; ok {
  1069  			continue
  1070  		}
  1071  		idlers.ids = append(idlers.ids, id)
  1072  		idlers.caps = append(idlers.caps, s.rates.Capacity(id, StorageRangesMsg, targetTTL))
  1073  	}
  1074  	if len(idlers.ids) == 0 {
  1075  		return
  1076  	}
  1077  	sort.Sort(sort.Reverse(idlers))
  1078  
  1079  	// Iterate over all the tasks and try to find a pending one
  1080  	for _, task := range s.tasks {
  1081  		// Skip any tasks not in the storage retrieval phase
  1082  		if task.res == nil {
  1083  			continue
  1084  		}
  1085  		// Skip tasks that are already retrieving (or done with) all small states
  1086  		if len(task.SubTasks) == 0 && len(task.stateTasks) == 0 {
  1087  			continue
  1088  		}
  1089  		// Task pending retrieval, try to find an idle peer. If no such peer
  1090  		// exists, we probably assigned tasks for all (or they are stateless).
  1091  		// Abort the entire assignment mechanism.
  1092  		if len(idlers.ids) == 0 {
  1093  			return
  1094  		}
  1095  		var (
  1096  			idle = idlers.ids[0]
  1097  			peer = s.peers[idle]
  1098  			cap  = idlers.caps[0]
  1099  		)
  1100  		idlers.ids, idlers.caps = idlers.ids[1:], idlers.caps[1:]
  1101  
  1102  		// Matched a pending task to an idle peer, allocate a unique request id
  1103  		var reqid uint64
  1104  		for {
  1105  			reqid = uint64(rand.Int63())
  1106  			if reqid == 0 {
  1107  				continue
  1108  			}
  1109  			if _, ok := s.storageReqs[reqid]; ok {
  1110  				continue
  1111  			}
  1112  			break
  1113  		}
  1114  		// Generate the network query and send it to the peer. If there are
  1115  		// large contract tasks pending, complete those before diving into
  1116  		// even more new contracts.
  1117  		if cap > maxRequestSize {
  1118  			cap = maxRequestSize
  1119  		}
  1120  		if cap < minRequestSize { // Don't bother with peers below a bare minimum performance
  1121  			cap = minRequestSize
  1122  		}
  1123  		storageSets := cap / 1024
  1124  
  1125  		var (
  1126  			accounts = make([]common.Hash, 0, storageSets)
  1127  			roots    = make([]common.Hash, 0, storageSets)
  1128  			subtask  *storageTask
  1129  		)
  1130  		for account, subtasks := range task.SubTasks {
  1131  			for _, st := range subtasks {
  1132  				// Skip any subtasks already filling
  1133  				if st.req != nil {
  1134  					continue
  1135  				}
  1136  				// Found an incomplete storage chunk, schedule it
  1137  				accounts = append(accounts, account)
  1138  				roots = append(roots, st.root)
  1139  				subtask = st
  1140  				break // Large contract chunks are downloaded individually
  1141  			}
  1142  			if subtask != nil {
  1143  				break // Large contract chunks are downloaded individually
  1144  			}
  1145  		}
  1146  		if subtask == nil {
  1147  			// No large contract required retrieval, but small ones available
  1148  			for acccount, root := range task.stateTasks {
  1149  				delete(task.stateTasks, acccount)
  1150  
  1151  				accounts = append(accounts, acccount)
  1152  				roots = append(roots, root)
  1153  
  1154  				if len(accounts) >= storageSets {
  1155  					break
  1156  				}
  1157  			}
  1158  		}
  1159  		// If nothing was found, it means this task is actually already fully
  1160  		// retrieving, but large contracts are hard to detect. Skip to the next.
  1161  		if len(accounts) == 0 {
  1162  			continue
  1163  		}
  1164  		req := &storageRequest{
  1165  			peer:     idle,
  1166  			id:       reqid,
  1167  			time:     time.Now(),
  1168  			deliver:  success,
  1169  			revert:   fail,
  1170  			cancel:   cancel,
  1171  			stale:    make(chan struct{}),
  1172  			accounts: accounts,
  1173  			roots:    roots,
  1174  			mainTask: task,
  1175  			subTask:  subtask,
  1176  		}
  1177  		if subtask != nil {
  1178  			req.origin = subtask.Next
  1179  			req.limit = subtask.Last
  1180  		}
  1181  		req.timeout = time.AfterFunc(s.rates.TargetTimeout(), func() {
  1182  			peer.Log().Debug("Storage request timed out", "reqid", reqid)
  1183  			s.rates.Update(idle, StorageRangesMsg, 0, 0)
  1184  			s.scheduleRevertStorageRequest(req)
  1185  		})
  1186  		s.storageReqs[reqid] = req
  1187  		delete(s.storageIdlers, idle)
  1188  
  1189  		s.pend.Add(1)
  1190  		go func(root common.Hash) {
  1191  			defer s.pend.Done()
  1192  
  1193  			// Attempt to send the remote request and revert if it fails
  1194  			var origin, limit []byte
  1195  			if subtask != nil {
  1196  				origin, limit = req.origin[:], req.limit[:]
  1197  			}
  1198  			if err := peer.RequestStorageRanges(reqid, root, accounts, origin, limit, uint64(cap)); err != nil {
  1199  				log.Debug("Failed to request storage", "err", err)
  1200  				s.scheduleRevertStorageRequest(req)
  1201  			}
  1202  		}(s.root)
  1203  
  1204  		// Inject the request into the subtask to block further assignments
  1205  		if subtask != nil {
  1206  			subtask.req = req
  1207  		}
  1208  	}
  1209  }
  1210  
  1211  // assignTrienodeHealTasks attempts to match idle peers to trie node requests to
  1212  // heal any trie errors caused by the snap sync's chunked retrieval model.
  1213  func (s *Syncer) assignTrienodeHealTasks(success chan *trienodeHealResponse, fail chan *trienodeHealRequest, cancel chan struct{}) {
  1214  	s.lock.Lock()
  1215  	defer s.lock.Unlock()
  1216  
  1217  	// Sort the peers by download capacity to use faster ones if many available
  1218  	idlers := &capacitySort{
  1219  		ids:  make([]string, 0, len(s.trienodeHealIdlers)),
  1220  		caps: make([]int, 0, len(s.trienodeHealIdlers)),
  1221  	}
  1222  	targetTTL := s.rates.TargetTimeout()
  1223  	for id := range s.trienodeHealIdlers {
  1224  		if _, ok := s.statelessPeers[id]; ok {
  1225  			continue
  1226  		}
  1227  		idlers.ids = append(idlers.ids, id)
  1228  		idlers.caps = append(idlers.caps, s.rates.Capacity(id, TrieNodesMsg, targetTTL))
  1229  	}
  1230  	if len(idlers.ids) == 0 {
  1231  		return
  1232  	}
  1233  	sort.Sort(sort.Reverse(idlers))
  1234  
  1235  	// Iterate over pending tasks and try to find a peer to retrieve with
  1236  	for len(s.healer.trieTasks) > 0 || s.healer.scheduler.Pending() > 0 {
  1237  		// If there are not enough trie tasks queued to fully assign, fill the
  1238  		// queue from the state sync scheduler. The trie synced schedules these
  1239  		// together with bytecodes, so we need to queue them combined.
  1240  		var (
  1241  			have = len(s.healer.trieTasks) + len(s.healer.codeTasks)
  1242  			want = maxTrieRequestCount + maxCodeRequestCount
  1243  		)
  1244  		if have < want {
  1245  			nodes, paths, codes := s.healer.scheduler.Missing(want - have)
  1246  			for i, hash := range nodes {
  1247  				s.healer.trieTasks[hash] = paths[i]
  1248  			}
  1249  			for _, hash := range codes {
  1250  				s.healer.codeTasks[hash] = struct{}{}
  1251  			}
  1252  		}
  1253  		// If all the heal tasks are bytecodes or already downloading, bail
  1254  		if len(s.healer.trieTasks) == 0 {
  1255  			return
  1256  		}
  1257  		// Task pending retrieval, try to find an idle peer. If no such peer
  1258  		// exists, we probably assigned tasks for all (or they are stateless).
  1259  		// Abort the entire assignment mechanism.
  1260  		if len(idlers.ids) == 0 {
  1261  			return
  1262  		}
  1263  		var (
  1264  			idle = idlers.ids[0]
  1265  			peer = s.peers[idle]
  1266  			cap  = idlers.caps[0]
  1267  		)
  1268  		idlers.ids, idlers.caps = idlers.ids[1:], idlers.caps[1:]
  1269  
  1270  		// Matched a pending task to an idle peer, allocate a unique request id
  1271  		var reqid uint64
  1272  		for {
  1273  			reqid = uint64(rand.Int63())
  1274  			if reqid == 0 {
  1275  				continue
  1276  			}
  1277  			if _, ok := s.trienodeHealReqs[reqid]; ok {
  1278  				continue
  1279  			}
  1280  			break
  1281  		}
  1282  		// Generate the network query and send it to the peer
  1283  		if cap > maxTrieRequestCount {
  1284  			cap = maxTrieRequestCount
  1285  		}
  1286  		var (
  1287  			hashes   = make([]common.Hash, 0, cap)
  1288  			paths    = make([]trie.SyncPath, 0, cap)
  1289  			pathsets = make([]TrieNodePathSet, 0, cap)
  1290  		)
  1291  		for hash, pathset := range s.healer.trieTasks {
  1292  			delete(s.healer.trieTasks, hash)
  1293  
  1294  			hashes = append(hashes, hash)
  1295  			paths = append(paths, pathset)
  1296  			pathsets = append(pathsets, [][]byte(pathset)) // TODO(karalabe): group requests by account hash
  1297  
  1298  			if len(hashes) >= cap {
  1299  				break
  1300  			}
  1301  		}
  1302  		req := &trienodeHealRequest{
  1303  			peer:    idle,
  1304  			id:      reqid,
  1305  			time:    time.Now(),
  1306  			deliver: success,
  1307  			revert:  fail,
  1308  			cancel:  cancel,
  1309  			stale:   make(chan struct{}),
  1310  			hashes:  hashes,
  1311  			paths:   paths,
  1312  			task:    s.healer,
  1313  		}
  1314  		req.timeout = time.AfterFunc(s.rates.TargetTimeout(), func() {
  1315  			peer.Log().Debug("Trienode heal request timed out", "reqid", reqid)
  1316  			s.rates.Update(idle, TrieNodesMsg, 0, 0)
  1317  			s.scheduleRevertTrienodeHealRequest(req)
  1318  		})
  1319  		s.trienodeHealReqs[reqid] = req
  1320  		delete(s.trienodeHealIdlers, idle)
  1321  
  1322  		s.pend.Add(1)
  1323  		go func(root common.Hash) {
  1324  			defer s.pend.Done()
  1325  
  1326  			// Attempt to send the remote request and revert if it fails
  1327  			if err := peer.RequestTrieNodes(reqid, root, pathsets, maxRequestSize); err != nil {
  1328  				log.Debug("Failed to request trienode healers", "err", err)
  1329  				s.scheduleRevertTrienodeHealRequest(req)
  1330  			}
  1331  		}(s.root)
  1332  	}
  1333  }
  1334  
  1335  // assignBytecodeHealTasks attempts to match idle peers to bytecode requests to
  1336  // heal any trie errors caused by the snap sync's chunked retrieval model.
  1337  func (s *Syncer) assignBytecodeHealTasks(success chan *bytecodeHealResponse, fail chan *bytecodeHealRequest, cancel chan struct{}) {
  1338  	s.lock.Lock()
  1339  	defer s.lock.Unlock()
  1340  
  1341  	// Sort the peers by download capacity to use faster ones if many available
  1342  	idlers := &capacitySort{
  1343  		ids:  make([]string, 0, len(s.bytecodeHealIdlers)),
  1344  		caps: make([]int, 0, len(s.bytecodeHealIdlers)),
  1345  	}
  1346  	targetTTL := s.rates.TargetTimeout()
  1347  	for id := range s.bytecodeHealIdlers {
  1348  		if _, ok := s.statelessPeers[id]; ok {
  1349  			continue
  1350  		}
  1351  		idlers.ids = append(idlers.ids, id)
  1352  		idlers.caps = append(idlers.caps, s.rates.Capacity(id, ByteCodesMsg, targetTTL))
  1353  	}
  1354  	if len(idlers.ids) == 0 {
  1355  		return
  1356  	}
  1357  	sort.Sort(sort.Reverse(idlers))
  1358  
  1359  	// Iterate over pending tasks and try to find a peer to retrieve with
  1360  	for len(s.healer.codeTasks) > 0 || s.healer.scheduler.Pending() > 0 {
  1361  		// If there are not enough trie tasks queued to fully assign, fill the
  1362  		// queue from the state sync scheduler. The trie synced schedules these
  1363  		// together with trie nodes, so we need to queue them combined.
  1364  		var (
  1365  			have = len(s.healer.trieTasks) + len(s.healer.codeTasks)
  1366  			want = maxTrieRequestCount + maxCodeRequestCount
  1367  		)
  1368  		if have < want {
  1369  			nodes, paths, codes := s.healer.scheduler.Missing(want - have)
  1370  			for i, hash := range nodes {
  1371  				s.healer.trieTasks[hash] = paths[i]
  1372  			}
  1373  			for _, hash := range codes {
  1374  				s.healer.codeTasks[hash] = struct{}{}
  1375  			}
  1376  		}
  1377  		// If all the heal tasks are trienodes or already downloading, bail
  1378  		if len(s.healer.codeTasks) == 0 {
  1379  			return
  1380  		}
  1381  		// Task pending retrieval, try to find an idle peer. If no such peer
  1382  		// exists, we probably assigned tasks for all (or they are stateless).
  1383  		// Abort the entire assignment mechanism.
  1384  		if len(idlers.ids) == 0 {
  1385  			return
  1386  		}
  1387  		var (
  1388  			idle = idlers.ids[0]
  1389  			peer = s.peers[idle]
  1390  			cap  = idlers.caps[0]
  1391  		)
  1392  		idlers.ids, idlers.caps = idlers.ids[1:], idlers.caps[1:]
  1393  
  1394  		// Matched a pending task to an idle peer, allocate a unique request id
  1395  		var reqid uint64
  1396  		for {
  1397  			reqid = uint64(rand.Int63())
  1398  			if reqid == 0 {
  1399  				continue
  1400  			}
  1401  			if _, ok := s.bytecodeHealReqs[reqid]; ok {
  1402  				continue
  1403  			}
  1404  			break
  1405  		}
  1406  		// Generate the network query and send it to the peer
  1407  		if cap > maxCodeRequestCount {
  1408  			cap = maxCodeRequestCount
  1409  		}
  1410  		hashes := make([]common.Hash, 0, cap)
  1411  		for hash := range s.healer.codeTasks {
  1412  			delete(s.healer.codeTasks, hash)
  1413  
  1414  			hashes = append(hashes, hash)
  1415  			if len(hashes) >= cap {
  1416  				break
  1417  			}
  1418  		}
  1419  		req := &bytecodeHealRequest{
  1420  			peer:    idle,
  1421  			id:      reqid,
  1422  			time:    time.Now(),
  1423  			deliver: success,
  1424  			revert:  fail,
  1425  			cancel:  cancel,
  1426  			stale:   make(chan struct{}),
  1427  			hashes:  hashes,
  1428  			task:    s.healer,
  1429  		}
  1430  		req.timeout = time.AfterFunc(s.rates.TargetTimeout(), func() {
  1431  			peer.Log().Debug("Bytecode heal request timed out", "reqid", reqid)
  1432  			s.rates.Update(idle, ByteCodesMsg, 0, 0)
  1433  			s.scheduleRevertBytecodeHealRequest(req)
  1434  		})
  1435  		s.bytecodeHealReqs[reqid] = req
  1436  		delete(s.bytecodeHealIdlers, idle)
  1437  
  1438  		s.pend.Add(1)
  1439  		go func() {
  1440  			defer s.pend.Done()
  1441  
  1442  			// Attempt to send the remote request and revert if it fails
  1443  			if err := peer.RequestByteCodes(reqid, hashes, maxRequestSize); err != nil {
  1444  				log.Debug("Failed to request bytecode healers", "err", err)
  1445  				s.scheduleRevertBytecodeHealRequest(req)
  1446  			}
  1447  		}()
  1448  	}
  1449  }
  1450  
  1451  // revertRequests locates all the currently pending reuqests from a particular
  1452  // peer and reverts them, rescheduling for others to fulfill.
  1453  func (s *Syncer) revertRequests(peer string) {
  1454  	// Gather the requests first, revertals need the lock too
  1455  	s.lock.Lock()
  1456  	var accountReqs []*accountRequest
  1457  	for _, req := range s.accountReqs {
  1458  		if req.peer == peer {
  1459  			accountReqs = append(accountReqs, req)
  1460  		}
  1461  	}
  1462  	var bytecodeReqs []*bytecodeRequest
  1463  	for _, req := range s.bytecodeReqs {
  1464  		if req.peer == peer {
  1465  			bytecodeReqs = append(bytecodeReqs, req)
  1466  		}
  1467  	}
  1468  	var storageReqs []*storageRequest
  1469  	for _, req := range s.storageReqs {
  1470  		if req.peer == peer {
  1471  			storageReqs = append(storageReqs, req)
  1472  		}
  1473  	}
  1474  	var trienodeHealReqs []*trienodeHealRequest
  1475  	for _, req := range s.trienodeHealReqs {
  1476  		if req.peer == peer {
  1477  			trienodeHealReqs = append(trienodeHealReqs, req)
  1478  		}
  1479  	}
  1480  	var bytecodeHealReqs []*bytecodeHealRequest
  1481  	for _, req := range s.bytecodeHealReqs {
  1482  		if req.peer == peer {
  1483  			bytecodeHealReqs = append(bytecodeHealReqs, req)
  1484  		}
  1485  	}
  1486  	s.lock.Unlock()
  1487  
  1488  	// Revert all the requests matching the peer
  1489  	for _, req := range accountReqs {
  1490  		s.revertAccountRequest(req)
  1491  	}
  1492  	for _, req := range bytecodeReqs {
  1493  		s.revertBytecodeRequest(req)
  1494  	}
  1495  	for _, req := range storageReqs {
  1496  		s.revertStorageRequest(req)
  1497  	}
  1498  	for _, req := range trienodeHealReqs {
  1499  		s.revertTrienodeHealRequest(req)
  1500  	}
  1501  	for _, req := range bytecodeHealReqs {
  1502  		s.revertBytecodeHealRequest(req)
  1503  	}
  1504  }
  1505  
  1506  // scheduleRevertAccountRequest asks the event loop to clean up an account range
  1507  // request and return all failed retrieval tasks to the scheduler for reassignment.
  1508  func (s *Syncer) scheduleRevertAccountRequest(req *accountRequest) {
  1509  	select {
  1510  	case req.revert <- req:
  1511  		// Sync event loop notified
  1512  	case <-req.cancel:
  1513  		// Sync cycle got cancelled
  1514  	case <-req.stale:
  1515  		// Request already reverted
  1516  	}
  1517  }
  1518  
  1519  // revertAccountRequest cleans up an account range request and returns all failed
  1520  // retrieval tasks to the scheduler for reassignment.
  1521  //
  1522  // Note, this needs to run on the event runloop thread to reschedule to idle peers.
  1523  // On peer threads, use scheduleRevertAccountRequest.
  1524  func (s *Syncer) revertAccountRequest(req *accountRequest) {
  1525  	log.Debug("Reverting account request", "peer", req.peer, "reqid", req.id)
  1526  	select {
  1527  	case <-req.stale:
  1528  		log.Trace("Account request already reverted", "peer", req.peer, "reqid", req.id)
  1529  		return
  1530  	default:
  1531  	}
  1532  	close(req.stale)
  1533  
  1534  	// Remove the request from the tracked set
  1535  	s.lock.Lock()
  1536  	delete(s.accountReqs, req.id)
  1537  	s.lock.Unlock()
  1538  
  1539  	// If there's a timeout timer still running, abort it and mark the account
  1540  	// task as not-pending, ready for resheduling
  1541  	req.timeout.Stop()
  1542  	if req.task.req == req {
  1543  		req.task.req = nil
  1544  	}
  1545  }
  1546  
  1547  // scheduleRevertBytecodeRequest asks the event loop to clean up a bytecode request
  1548  // and return all failed retrieval tasks to the scheduler for reassignment.
  1549  func (s *Syncer) scheduleRevertBytecodeRequest(req *bytecodeRequest) {
  1550  	select {
  1551  	case req.revert <- req:
  1552  		// Sync event loop notified
  1553  	case <-req.cancel:
  1554  		// Sync cycle got cancelled
  1555  	case <-req.stale:
  1556  		// Request already reverted
  1557  	}
  1558  }
  1559  
  1560  // revertBytecodeRequest cleans up a bytecode request and returns all failed
  1561  // retrieval tasks to the scheduler for reassignment.
  1562  //
  1563  // Note, this needs to run on the event runloop thread to reschedule to idle peers.
  1564  // On peer threads, use scheduleRevertBytecodeRequest.
  1565  func (s *Syncer) revertBytecodeRequest(req *bytecodeRequest) {
  1566  	log.Debug("Reverting bytecode request", "peer", req.peer)
  1567  	select {
  1568  	case <-req.stale:
  1569  		log.Trace("Bytecode request already reverted", "peer", req.peer, "reqid", req.id)
  1570  		return
  1571  	default:
  1572  	}
  1573  	close(req.stale)
  1574  
  1575  	// Remove the request from the tracked set
  1576  	s.lock.Lock()
  1577  	delete(s.bytecodeReqs, req.id)
  1578  	s.lock.Unlock()
  1579  
  1580  	// If there's a timeout timer still running, abort it and mark the code
  1581  	// retrievals as not-pending, ready for resheduling
  1582  	req.timeout.Stop()
  1583  	for _, hash := range req.hashes {
  1584  		req.task.codeTasks[hash] = struct{}{}
  1585  	}
  1586  }
  1587  
  1588  // scheduleRevertStorageRequest asks the event loop to clean up a storage range
  1589  // request and return all failed retrieval tasks to the scheduler for reassignment.
  1590  func (s *Syncer) scheduleRevertStorageRequest(req *storageRequest) {
  1591  	select {
  1592  	case req.revert <- req:
  1593  		// Sync event loop notified
  1594  	case <-req.cancel:
  1595  		// Sync cycle got cancelled
  1596  	case <-req.stale:
  1597  		// Request already reverted
  1598  	}
  1599  }
  1600  
  1601  // revertStorageRequest cleans up a storage range request and returns all failed
  1602  // retrieval tasks to the scheduler for reassignment.
  1603  //
  1604  // Note, this needs to run on the event runloop thread to reschedule to idle peers.
  1605  // On peer threads, use scheduleRevertStorageRequest.
  1606  func (s *Syncer) revertStorageRequest(req *storageRequest) {
  1607  	log.Debug("Reverting storage request", "peer", req.peer)
  1608  	select {
  1609  	case <-req.stale:
  1610  		log.Trace("Storage request already reverted", "peer", req.peer, "reqid", req.id)
  1611  		return
  1612  	default:
  1613  	}
  1614  	close(req.stale)
  1615  
  1616  	// Remove the request from the tracked set
  1617  	s.lock.Lock()
  1618  	delete(s.storageReqs, req.id)
  1619  	s.lock.Unlock()
  1620  
  1621  	// If there's a timeout timer still running, abort it and mark the storage
  1622  	// task as not-pending, ready for resheduling
  1623  	req.timeout.Stop()
  1624  	if req.subTask != nil {
  1625  		req.subTask.req = nil
  1626  	} else {
  1627  		for i, account := range req.accounts {
  1628  			req.mainTask.stateTasks[account] = req.roots[i]
  1629  		}
  1630  	}
  1631  }
  1632  
  1633  // scheduleRevertTrienodeHealRequest asks the event loop to clean up a trienode heal
  1634  // request and return all failed retrieval tasks to the scheduler for reassignment.
  1635  func (s *Syncer) scheduleRevertTrienodeHealRequest(req *trienodeHealRequest) {
  1636  	select {
  1637  	case req.revert <- req:
  1638  		// Sync event loop notified
  1639  	case <-req.cancel:
  1640  		// Sync cycle got cancelled
  1641  	case <-req.stale:
  1642  		// Request already reverted
  1643  	}
  1644  }
  1645  
  1646  // revertTrienodeHealRequest cleans up a trienode heal request and returns all
  1647  // failed retrieval tasks to the scheduler for reassignment.
  1648  //
  1649  // Note, this needs to run on the event runloop thread to reschedule to idle peers.
  1650  // On peer threads, use scheduleRevertTrienodeHealRequest.
  1651  func (s *Syncer) revertTrienodeHealRequest(req *trienodeHealRequest) {
  1652  	log.Debug("Reverting trienode heal request", "peer", req.peer)
  1653  	select {
  1654  	case <-req.stale:
  1655  		log.Trace("Trienode heal request already reverted", "peer", req.peer, "reqid", req.id)
  1656  		return
  1657  	default:
  1658  	}
  1659  	close(req.stale)
  1660  
  1661  	// Remove the request from the tracked set
  1662  	s.lock.Lock()
  1663  	delete(s.trienodeHealReqs, req.id)
  1664  	s.lock.Unlock()
  1665  
  1666  	// If there's a timeout timer still running, abort it and mark the trie node
  1667  	// retrievals as not-pending, ready for resheduling
  1668  	req.timeout.Stop()
  1669  	for i, hash := range req.hashes {
  1670  		req.task.trieTasks[hash] = req.paths[i]
  1671  	}
  1672  }
  1673  
  1674  // scheduleRevertBytecodeHealRequest asks the event loop to clean up a bytecode heal
  1675  // request and return all failed retrieval tasks to the scheduler for reassignment.
  1676  func (s *Syncer) scheduleRevertBytecodeHealRequest(req *bytecodeHealRequest) {
  1677  	select {
  1678  	case req.revert <- req:
  1679  		// Sync event loop notified
  1680  	case <-req.cancel:
  1681  		// Sync cycle got cancelled
  1682  	case <-req.stale:
  1683  		// Request already reverted
  1684  	}
  1685  }
  1686  
  1687  // revertBytecodeHealRequest cleans up a bytecode heal request and returns all
  1688  // failed retrieval tasks to the scheduler for reassignment.
  1689  //
  1690  // Note, this needs to run on the event runloop thread to reschedule to idle peers.
  1691  // On peer threads, use scheduleRevertBytecodeHealRequest.
  1692  func (s *Syncer) revertBytecodeHealRequest(req *bytecodeHealRequest) {
  1693  	log.Debug("Reverting bytecode heal request", "peer", req.peer)
  1694  	select {
  1695  	case <-req.stale:
  1696  		log.Trace("Bytecode heal request already reverted", "peer", req.peer, "reqid", req.id)
  1697  		return
  1698  	default:
  1699  	}
  1700  	close(req.stale)
  1701  
  1702  	// Remove the request from the tracked set
  1703  	s.lock.Lock()
  1704  	delete(s.bytecodeHealReqs, req.id)
  1705  	s.lock.Unlock()
  1706  
  1707  	// If there's a timeout timer still running, abort it and mark the code
  1708  	// retrievals as not-pending, ready for resheduling
  1709  	req.timeout.Stop()
  1710  	for _, hash := range req.hashes {
  1711  		req.task.codeTasks[hash] = struct{}{}
  1712  	}
  1713  }
  1714  
  1715  // processAccountResponse integrates an already validated account range response
  1716  // into the account tasks.
  1717  func (s *Syncer) processAccountResponse(res *accountResponse) {
  1718  	// Switch the task from pending to filling
  1719  	res.task.req = nil
  1720  	res.task.res = res
  1721  
  1722  	// Ensure that the response doesn't overflow into the subsequent task
  1723  	last := res.task.Last.Big()
  1724  	for i, hash := range res.hashes {
  1725  		// Mark the range complete if the last is already included.
  1726  		// Keep iteration to delete the extra states if exists.
  1727  		cmp := hash.Big().Cmp(last)
  1728  		if cmp == 0 {
  1729  			res.cont = false
  1730  			continue
  1731  		}
  1732  		if cmp > 0 {
  1733  			// Chunk overflown, cut off excess
  1734  			res.hashes = res.hashes[:i]
  1735  			res.accounts = res.accounts[:i]
  1736  			res.cont = false // Mark range completed
  1737  			break
  1738  		}
  1739  	}
  1740  	// Iterate over all the accounts and assemble which ones need further sub-
  1741  	// filling before the entire account range can be persisted.
  1742  	res.task.needCode = make([]bool, len(res.accounts))
  1743  	res.task.needState = make([]bool, len(res.accounts))
  1744  	res.task.needHeal = make([]bool, len(res.accounts))
  1745  
  1746  	res.task.codeTasks = make(map[common.Hash]struct{})
  1747  	res.task.stateTasks = make(map[common.Hash]common.Hash)
  1748  
  1749  	resumed := make(map[common.Hash]struct{})
  1750  
  1751  	res.task.pend = 0
  1752  	for i, account := range res.accounts {
  1753  		// Check if the account is a contract with an unknown code
  1754  		if !bytes.Equal(account.CodeHash, emptyCode[:]) {
  1755  			if code := rawdb.ReadCodeWithPrefix(s.db, common.BytesToHash(account.CodeHash)); code == nil {
  1756  				res.task.codeTasks[common.BytesToHash(account.CodeHash)] = struct{}{}
  1757  				res.task.needCode[i] = true
  1758  				res.task.pend++
  1759  			}
  1760  		}
  1761  		// Check if the account is a contract with an unknown storage trie
  1762  		if account.Root != emptyRoot {
  1763  			if node, err := s.db.Get(account.Root[:]); err != nil || node == nil {
  1764  				// If there was a previous large state retrieval in progress,
  1765  				// don't restart it from scratch. This happens if a sync cycle
  1766  				// is interrupted and resumed later. However, *do* update the
  1767  				// previous root hash.
  1768  				if subtasks, ok := res.task.SubTasks[res.hashes[i]]; ok {
  1769  					log.Debug("Resuming large storage retrieval", "account", res.hashes[i], "root", account.Root)
  1770  					for _, subtask := range subtasks {
  1771  						subtask.root = account.Root
  1772  					}
  1773  					res.task.needHeal[i] = true
  1774  					resumed[res.hashes[i]] = struct{}{}
  1775  				} else {
  1776  					res.task.stateTasks[res.hashes[i]] = account.Root
  1777  				}
  1778  				res.task.needState[i] = true
  1779  				res.task.pend++
  1780  			}
  1781  		}
  1782  	}
  1783  	// Delete any subtasks that have been aborted but not resumed. This may undo
  1784  	// some progress if a new peer gives us less accounts than an old one, but for
  1785  	// now we have to live with that.
  1786  	for hash := range res.task.SubTasks {
  1787  		if _, ok := resumed[hash]; !ok {
  1788  			log.Debug("Aborting suspended storage retrieval", "account", hash)
  1789  			delete(res.task.SubTasks, hash)
  1790  		}
  1791  	}
  1792  	// If the account range contained no contracts, or all have been fully filled
  1793  	// beforehand, short circuit storage filling and forward to the next task
  1794  	if res.task.pend == 0 {
  1795  		s.forwardAccountTask(res.task)
  1796  		return
  1797  	}
  1798  	// Some accounts are incomplete, leave as is for the storage and contract
  1799  	// task assigners to pick up and fill.
  1800  }
  1801  
  1802  // processBytecodeResponse integrates an already validated bytecode response
  1803  // into the account tasks.
  1804  func (s *Syncer) processBytecodeResponse(res *bytecodeResponse) {
  1805  	batch := s.db.NewBatch()
  1806  
  1807  	var (
  1808  		codes uint64
  1809  	)
  1810  	for i, hash := range res.hashes {
  1811  		code := res.codes[i]
  1812  
  1813  		// If the bytecode was not delivered, reschedule it
  1814  		if code == nil {
  1815  			res.task.codeTasks[hash] = struct{}{}
  1816  			continue
  1817  		}
  1818  		// Code was delivered, mark it not needed any more
  1819  		for j, account := range res.task.res.accounts {
  1820  			if res.task.needCode[j] && hash == common.BytesToHash(account.CodeHash) {
  1821  				res.task.needCode[j] = false
  1822  				res.task.pend--
  1823  			}
  1824  		}
  1825  		// Push the bytecode into a database batch
  1826  		codes++
  1827  		rawdb.WriteCode(batch, hash, code)
  1828  	}
  1829  	bytes := common.StorageSize(batch.ValueSize())
  1830  	if err := batch.Write(); err != nil {
  1831  		log.Crit("Failed to persist bytecodes", "err", err)
  1832  	}
  1833  	s.bytecodeSynced += codes
  1834  	s.bytecodeBytes += bytes
  1835  
  1836  	log.Debug("Persisted set of bytecodes", "count", codes, "bytes", bytes)
  1837  
  1838  	// If this delivery completed the last pending task, forward the account task
  1839  	// to the next chunk
  1840  	if res.task.pend == 0 {
  1841  		s.forwardAccountTask(res.task)
  1842  		return
  1843  	}
  1844  	// Some accounts are still incomplete, leave as is for the storage and contract
  1845  	// task assigners to pick up and fill.
  1846  }
  1847  
  1848  // processStorageResponse integrates an already validated storage response
  1849  // into the account tasks.
  1850  func (s *Syncer) processStorageResponse(res *storageResponse) {
  1851  	// Switch the subtask from pending to idle
  1852  	if res.subTask != nil {
  1853  		res.subTask.req = nil
  1854  	}
  1855  	batch := ethdb.HookedBatch{
  1856  		Batch: s.db.NewBatch(),
  1857  		OnPut: func(key []byte, value []byte) {
  1858  			s.storageBytes += common.StorageSize(len(key) + len(value))
  1859  		},
  1860  	}
  1861  	var (
  1862  		slots           int
  1863  		oldStorageBytes = s.storageBytes
  1864  	)
  1865  	// Iterate over all the accounts and reconstruct their storage tries from the
  1866  	// delivered slots
  1867  	for i, account := range res.accounts {
  1868  		// If the account was not delivered, reschedule it
  1869  		if i >= len(res.hashes) {
  1870  			res.mainTask.stateTasks[account] = res.roots[i]
  1871  			continue
  1872  		}
  1873  		// State was delivered, if complete mark as not needed any more, otherwise
  1874  		// mark the account as needing healing
  1875  		for j, hash := range res.mainTask.res.hashes {
  1876  			if account != hash {
  1877  				continue
  1878  			}
  1879  			acc := res.mainTask.res.accounts[j]
  1880  
  1881  			// If the packet contains multiple contract storage slots, all
  1882  			// but the last are surely complete. The last contract may be
  1883  			// chunked, so check it's continuation flag.
  1884  			if res.subTask == nil && res.mainTask.needState[j] && (i < len(res.hashes)-1 || !res.cont) {
  1885  				res.mainTask.needState[j] = false
  1886  				res.mainTask.pend--
  1887  			}
  1888  			// If the last contract was chunked, mark it as needing healing
  1889  			// to avoid writing it out to disk prematurely.
  1890  			if res.subTask == nil && !res.mainTask.needHeal[j] && i == len(res.hashes)-1 && res.cont {
  1891  				res.mainTask.needHeal[j] = true
  1892  			}
  1893  			// If the last contract was chunked, we need to switch to large
  1894  			// contract handling mode
  1895  			if res.subTask == nil && i == len(res.hashes)-1 && res.cont {
  1896  				// If we haven't yet started a large-contract retrieval, create
  1897  				// the subtasks for it within the main account task
  1898  				if tasks, ok := res.mainTask.SubTasks[account]; !ok {
  1899  					var (
  1900  						keys    = res.hashes[i]
  1901  						chunks  = uint64(storageConcurrency)
  1902  						lastKey common.Hash
  1903  					)
  1904  					if len(keys) > 0 {
  1905  						lastKey = keys[len(keys)-1]
  1906  					}
  1907  					// If the number of slots remaining is low, decrease the
  1908  					// number of chunks. Somewhere on the order of 10-15K slots
  1909  					// fit into a packet of 500KB. A key/slot pair is maximum 64
  1910  					// bytes, so pessimistically maxRequestSize/64 = 8K.
  1911  					//
  1912  					// Chunk so that at least 2 packets are needed to fill a task.
  1913  					if estimate, err := estimateRemainingSlots(len(keys), lastKey); err == nil {
  1914  						if n := estimate / (2 * (maxRequestSize / 64)); n+1 < chunks {
  1915  							chunks = n + 1
  1916  						}
  1917  						log.Debug("Chunked large contract", "initiators", len(keys), "tail", lastKey, "remaining", estimate, "chunks", chunks)
  1918  					} else {
  1919  						log.Debug("Chunked large contract", "initiators", len(keys), "tail", lastKey, "chunks", chunks)
  1920  					}
  1921  					r := newHashRange(lastKey, chunks)
  1922  
  1923  					// Our first task is the one that was just filled by this response.
  1924  					batch := ethdb.HookedBatch{
  1925  						Batch: s.db.NewBatch(),
  1926  						OnPut: func(key []byte, value []byte) {
  1927  							s.storageBytes += common.StorageSize(len(key) + len(value))
  1928  						},
  1929  					}
  1930  					tasks = append(tasks, &storageTask{
  1931  						Next:     common.Hash{},
  1932  						Last:     r.End(),
  1933  						root:     acc.Root,
  1934  						genBatch: batch,
  1935  						genTrie:  trie.NewStackTrie(batch),
  1936  					})
  1937  					for r.Next() {
  1938  						batch := ethdb.HookedBatch{
  1939  							Batch: s.db.NewBatch(),
  1940  							OnPut: func(key []byte, value []byte) {
  1941  								s.storageBytes += common.StorageSize(len(key) + len(value))
  1942  							},
  1943  						}
  1944  						tasks = append(tasks, &storageTask{
  1945  							Next:     r.Start(),
  1946  							Last:     r.End(),
  1947  							root:     acc.Root,
  1948  							genBatch: batch,
  1949  							genTrie:  trie.NewStackTrie(batch),
  1950  						})
  1951  					}
  1952  					for _, task := range tasks {
  1953  						log.Debug("Created storage sync task", "account", account, "root", acc.Root, "from", task.Next, "last", task.Last)
  1954  					}
  1955  					res.mainTask.SubTasks[account] = tasks
  1956  
  1957  					// Since we've just created the sub-tasks, this response
  1958  					// is surely for the first one (zero origin)
  1959  					res.subTask = tasks[0]
  1960  				}
  1961  			}
  1962  			// If we're in large contract delivery mode, forward the subtask
  1963  			if res.subTask != nil {
  1964  				// Ensure the response doesn't overflow into the subsequent task
  1965  				last := res.subTask.Last.Big()
  1966  				// Find the first overflowing key. While at it, mark res as complete
  1967  				// if we find the range to include or pass the 'last'
  1968  				index := sort.Search(len(res.hashes[i]), func(k int) bool {
  1969  					cmp := res.hashes[i][k].Big().Cmp(last)
  1970  					if cmp >= 0 {
  1971  						res.cont = false
  1972  					}
  1973  					return cmp > 0
  1974  				})
  1975  				if index >= 0 {
  1976  					// cut off excess
  1977  					res.hashes[i] = res.hashes[i][:index]
  1978  					res.slots[i] = res.slots[i][:index]
  1979  				}
  1980  				// Forward the relevant storage chunk (even if created just now)
  1981  				if res.cont {
  1982  					res.subTask.Next = incHash(res.hashes[i][len(res.hashes[i])-1])
  1983  				} else {
  1984  					res.subTask.done = true
  1985  				}
  1986  			}
  1987  		}
  1988  		// Iterate over all the complete contracts, reconstruct the trie nodes and
  1989  		// push them to disk. If the contract is chunked, the trie nodes will be
  1990  		// reconstructed later.
  1991  		slots += len(res.hashes[i])
  1992  
  1993  		if i < len(res.hashes)-1 || res.subTask == nil {
  1994  			tr := trie.NewStackTrie(batch)
  1995  			for j := 0; j < len(res.hashes[i]); j++ {
  1996  				tr.Update(res.hashes[i][j][:], res.slots[i][j])
  1997  			}
  1998  			tr.Commit()
  1999  		}
  2000  		// Persist the received storage segements. These flat state maybe
  2001  		// outdated during the sync, but it can be fixed later during the
  2002  		// snapshot generation.
  2003  		for j := 0; j < len(res.hashes[i]); j++ {
  2004  			rawdb.WriteStorageSnapshot(batch, account, res.hashes[i][j], res.slots[i][j])
  2005  
  2006  			// If we're storing large contracts, generate the trie nodes
  2007  			// on the fly to not trash the gluing points
  2008  			if i == len(res.hashes)-1 && res.subTask != nil {
  2009  				res.subTask.genTrie.Update(res.hashes[i][j][:], res.slots[i][j])
  2010  			}
  2011  		}
  2012  	}
  2013  	// Large contracts could have generated new trie nodes, flush them to disk
  2014  	if res.subTask != nil {
  2015  		if res.subTask.done {
  2016  			if root, err := res.subTask.genTrie.Commit(); err != nil {
  2017  				log.Error("Failed to commit stack slots", "err", err)
  2018  			} else if root == res.subTask.root {
  2019  				// If the chunk's root is an overflown but full delivery, clear the heal request
  2020  				for i, account := range res.mainTask.res.hashes {
  2021  					if account == res.accounts[len(res.accounts)-1] {
  2022  						res.mainTask.needHeal[i] = false
  2023  					}
  2024  				}
  2025  			}
  2026  		}
  2027  		if res.subTask.genBatch.ValueSize() > ethdb.IdealBatchSize || res.subTask.done {
  2028  			if err := res.subTask.genBatch.Write(); err != nil {
  2029  				log.Error("Failed to persist stack slots", "err", err)
  2030  			}
  2031  			res.subTask.genBatch.Reset()
  2032  		}
  2033  	}
  2034  	// Flush anything written just now and update the stats
  2035  	if err := batch.Write(); err != nil {
  2036  		log.Crit("Failed to persist storage slots", "err", err)
  2037  	}
  2038  	s.storageSynced += uint64(slots)
  2039  
  2040  	log.Debug("Persisted set of storage slots", "accounts", len(res.hashes), "slots", slots, "bytes", s.storageBytes-oldStorageBytes)
  2041  
  2042  	// If this delivery completed the last pending task, forward the account task
  2043  	// to the next chunk
  2044  	if res.mainTask.pend == 0 {
  2045  		s.forwardAccountTask(res.mainTask)
  2046  		return
  2047  	}
  2048  	// Some accounts are still incomplete, leave as is for the storage and contract
  2049  	// task assigners to pick up and fill.
  2050  }
  2051  
  2052  // processTrienodeHealResponse integrates an already validated trienode response
  2053  // into the healer tasks.
  2054  func (s *Syncer) processTrienodeHealResponse(res *trienodeHealResponse) {
  2055  	for i, hash := range res.hashes {
  2056  		node := res.nodes[i]
  2057  
  2058  		// If the trie node was not delivered, reschedule it
  2059  		if node == nil {
  2060  			res.task.trieTasks[hash] = res.paths[i]
  2061  			continue
  2062  		}
  2063  		// Push the trie node into the state syncer
  2064  		s.trienodeHealSynced++
  2065  		s.trienodeHealBytes += common.StorageSize(len(node))
  2066  
  2067  		err := s.healer.scheduler.Process(trie.SyncResult{Hash: hash, Data: node})
  2068  		switch err {
  2069  		case nil:
  2070  		case trie.ErrAlreadyProcessed:
  2071  			s.trienodeHealDups++
  2072  		case trie.ErrNotRequested:
  2073  			s.trienodeHealNops++
  2074  		default:
  2075  			log.Error("Invalid trienode processed", "hash", hash, "err", err)
  2076  		}
  2077  	}
  2078  	batch := s.db.NewBatch()
  2079  	if err := s.healer.scheduler.Commit(batch); err != nil {
  2080  		log.Error("Failed to commit healing data", "err", err)
  2081  	}
  2082  	if err := batch.Write(); err != nil {
  2083  		log.Crit("Failed to persist healing data", "err", err)
  2084  	}
  2085  	log.Debug("Persisted set of healing data", "type", "trienodes", "bytes", common.StorageSize(batch.ValueSize()))
  2086  }
  2087  
  2088  // processBytecodeHealResponse integrates an already validated bytecode response
  2089  // into the healer tasks.
  2090  func (s *Syncer) processBytecodeHealResponse(res *bytecodeHealResponse) {
  2091  	for i, hash := range res.hashes {
  2092  		node := res.codes[i]
  2093  
  2094  		// If the trie node was not delivered, reschedule it
  2095  		if node == nil {
  2096  			res.task.codeTasks[hash] = struct{}{}
  2097  			continue
  2098  		}
  2099  		// Push the trie node into the state syncer
  2100  		s.bytecodeHealSynced++
  2101  		s.bytecodeHealBytes += common.StorageSize(len(node))
  2102  
  2103  		err := s.healer.scheduler.Process(trie.SyncResult{Hash: hash, Data: node})
  2104  		switch err {
  2105  		case nil:
  2106  		case trie.ErrAlreadyProcessed:
  2107  			s.bytecodeHealDups++
  2108  		case trie.ErrNotRequested:
  2109  			s.bytecodeHealNops++
  2110  		default:
  2111  			log.Error("Invalid bytecode processed", "hash", hash, "err", err)
  2112  		}
  2113  	}
  2114  	batch := s.db.NewBatch()
  2115  	if err := s.healer.scheduler.Commit(batch); err != nil {
  2116  		log.Error("Failed to commit healing data", "err", err)
  2117  	}
  2118  	if err := batch.Write(); err != nil {
  2119  		log.Crit("Failed to persist healing data", "err", err)
  2120  	}
  2121  	log.Debug("Persisted set of healing data", "type", "bytecode", "bytes", common.StorageSize(batch.ValueSize()))
  2122  }
  2123  
  2124  // forwardAccountTask takes a filled account task and persists anything available
  2125  // into the database, after which it forwards the next account marker so that the
  2126  // task's next chunk may be filled.
  2127  func (s *Syncer) forwardAccountTask(task *accountTask) {
  2128  	// Remove any pending delivery
  2129  	res := task.res
  2130  	if res == nil {
  2131  		return // nothing to forward
  2132  	}
  2133  	task.res = nil
  2134  
  2135  	// Persist the received account segements. These flat state maybe
  2136  	// outdated during the sync, but it can be fixed later during the
  2137  	// snapshot generation.
  2138  	oldAccountBytes := s.accountBytes
  2139  
  2140  	batch := ethdb.HookedBatch{
  2141  		Batch: s.db.NewBatch(),
  2142  		OnPut: func(key []byte, value []byte) {
  2143  			s.accountBytes += common.StorageSize(len(key) + len(value))
  2144  		},
  2145  	}
  2146  	for i, hash := range res.hashes {
  2147  		if task.needCode[i] || task.needState[i] {
  2148  			break
  2149  		}
  2150  		slim := snapshot.SlimAccountRLP(res.accounts[i].Nonce, res.accounts[i].Balance, res.accounts[i].Root, res.accounts[i].CodeHash)
  2151  		rawdb.WriteAccountSnapshot(batch, hash, slim)
  2152  
  2153  		// If the task is complete, drop it into the stack trie to generate
  2154  		// account trie nodes for it
  2155  		if !task.needHeal[i] {
  2156  			full, err := snapshot.FullAccountRLP(slim) // TODO(karalabe): Slim parsing can be omitted
  2157  			if err != nil {
  2158  				panic(err) // Really shouldn't ever happen
  2159  			}
  2160  			task.genTrie.Update(hash[:], full)
  2161  		}
  2162  	}
  2163  	// Flush anything written just now and update the stats
  2164  	if err := batch.Write(); err != nil {
  2165  		log.Crit("Failed to persist accounts", "err", err)
  2166  	}
  2167  	s.accountSynced += uint64(len(res.accounts))
  2168  
  2169  	// Task filling persisted, push it the chunk marker forward to the first
  2170  	// account still missing data.
  2171  	for i, hash := range res.hashes {
  2172  		if task.needCode[i] || task.needState[i] {
  2173  			return
  2174  		}
  2175  		task.Next = incHash(hash)
  2176  	}
  2177  	// All accounts marked as complete, track if the entire task is done
  2178  	task.done = !res.cont
  2179  
  2180  	// Stack trie could have generated trie nodes, push them to disk (we need to
  2181  	// flush after finalizing task.done. It's fine even if we crash and lose this
  2182  	// write as it will only cause more data to be downloaded during heal.
  2183  	if task.done {
  2184  		if _, err := task.genTrie.Commit(); err != nil {
  2185  			log.Error("Failed to commit stack account", "err", err)
  2186  		}
  2187  	}
  2188  	if task.genBatch.ValueSize() > ethdb.IdealBatchSize || task.done {
  2189  		if err := task.genBatch.Write(); err != nil {
  2190  			log.Error("Failed to persist stack account", "err", err)
  2191  		}
  2192  		task.genBatch.Reset()
  2193  	}
  2194  	log.Debug("Persisted range of accounts", "accounts", len(res.accounts), "bytes", s.accountBytes-oldAccountBytes)
  2195  }
  2196  
  2197  // OnAccounts is a callback method to invoke when a range of accounts are
  2198  // received from a remote peer.
  2199  func (s *Syncer) OnAccounts(peer SyncPeer, id uint64, hashes []common.Hash, accounts [][]byte, proof [][]byte) error {
  2200  	size := common.StorageSize(len(hashes) * common.HashLength)
  2201  	for _, account := range accounts {
  2202  		size += common.StorageSize(len(account))
  2203  	}
  2204  	for _, node := range proof {
  2205  		size += common.StorageSize(len(node))
  2206  	}
  2207  	logger := peer.Log().New("reqid", id)
  2208  	logger.Trace("Delivering range of accounts", "hashes", len(hashes), "accounts", len(accounts), "proofs", len(proof), "bytes", size)
  2209  
  2210  	// Whether or not the response is valid, we can mark the peer as idle and
  2211  	// notify the scheduler to assign a new task. If the response is invalid,
  2212  	// we'll drop the peer in a bit.
  2213  	s.lock.Lock()
  2214  	if _, ok := s.peers[peer.ID()]; ok {
  2215  		s.accountIdlers[peer.ID()] = struct{}{}
  2216  	}
  2217  	select {
  2218  	case s.update <- struct{}{}:
  2219  	default:
  2220  	}
  2221  	// Ensure the response is for a valid request
  2222  	req, ok := s.accountReqs[id]
  2223  	if !ok {
  2224  		// Request stale, perhaps the peer timed out but came through in the end
  2225  		logger.Warn("Unexpected account range packet")
  2226  		s.lock.Unlock()
  2227  		return nil
  2228  	}
  2229  	delete(s.accountReqs, id)
  2230  	s.rates.Update(peer.ID(), AccountRangeMsg, time.Since(req.time), int(size))
  2231  
  2232  	// Clean up the request timeout timer, we'll see how to proceed further based
  2233  	// on the actual delivered content
  2234  	if !req.timeout.Stop() {
  2235  		// The timeout is already triggered, and this request will be reverted+rescheduled
  2236  		s.lock.Unlock()
  2237  		return nil
  2238  	}
  2239  	// Response is valid, but check if peer is signalling that it does not have
  2240  	// the requested data. For account range queries that means the state being
  2241  	// retrieved was either already pruned remotely, or the peer is not yet
  2242  	// synced to our head.
  2243  	if len(hashes) == 0 && len(accounts) == 0 && len(proof) == 0 {
  2244  		logger.Debug("Peer rejected account range request", "root", s.root)
  2245  		s.statelessPeers[peer.ID()] = struct{}{}
  2246  		s.lock.Unlock()
  2247  
  2248  		// Signal this request as failed, and ready for rescheduling
  2249  		s.scheduleRevertAccountRequest(req)
  2250  		return nil
  2251  	}
  2252  	root := s.root
  2253  	s.lock.Unlock()
  2254  
  2255  	// Reconstruct a partial trie from the response and verify it
  2256  	keys := make([][]byte, len(hashes))
  2257  	for i, key := range hashes {
  2258  		keys[i] = common.CopyBytes(key[:])
  2259  	}
  2260  	nodes := make(light.NodeList, len(proof))
  2261  	for i, node := range proof {
  2262  		nodes[i] = node
  2263  	}
  2264  	proofdb := nodes.NodeSet()
  2265  
  2266  	var end []byte
  2267  	if len(keys) > 0 {
  2268  		end = keys[len(keys)-1]
  2269  	}
  2270  	cont, err := trie.VerifyRangeProof(root, req.origin[:], end, keys, accounts, proofdb)
  2271  	if err != nil {
  2272  		logger.Warn("Account range failed proof", "err", err)
  2273  		// Signal this request as failed, and ready for rescheduling
  2274  		s.scheduleRevertAccountRequest(req)
  2275  		return err
  2276  	}
  2277  	accs := make([]*state.Account, len(accounts))
  2278  	for i, account := range accounts {
  2279  		acc := new(state.Account)
  2280  		if err := rlp.DecodeBytes(account, acc); err != nil {
  2281  			panic(err) // We created these blobs, we must be able to decode them
  2282  		}
  2283  		accs[i] = acc
  2284  	}
  2285  	response := &accountResponse{
  2286  		task:     req.task,
  2287  		hashes:   hashes,
  2288  		accounts: accs,
  2289  		cont:     cont,
  2290  	}
  2291  	select {
  2292  	case req.deliver <- response:
  2293  	case <-req.cancel:
  2294  	case <-req.stale:
  2295  	}
  2296  	return nil
  2297  }
  2298  
  2299  // OnByteCodes is a callback method to invoke when a batch of contract
  2300  // bytes codes are received from a remote peer.
  2301  func (s *Syncer) OnByteCodes(peer SyncPeer, id uint64, bytecodes [][]byte) error {
  2302  	s.lock.RLock()
  2303  	syncing := !s.snapped
  2304  	s.lock.RUnlock()
  2305  
  2306  	if syncing {
  2307  		return s.onByteCodes(peer, id, bytecodes)
  2308  	}
  2309  	return s.onHealByteCodes(peer, id, bytecodes)
  2310  }
  2311  
  2312  // onByteCodes is a callback method to invoke when a batch of contract
  2313  // bytes codes are received from a remote peer in the syncing phase.
  2314  func (s *Syncer) onByteCodes(peer SyncPeer, id uint64, bytecodes [][]byte) error {
  2315  	var size common.StorageSize
  2316  	for _, code := range bytecodes {
  2317  		size += common.StorageSize(len(code))
  2318  	}
  2319  	logger := peer.Log().New("reqid", id)
  2320  	logger.Trace("Delivering set of bytecodes", "bytecodes", len(bytecodes), "bytes", size)
  2321  
  2322  	// Whether or not the response is valid, we can mark the peer as idle and
  2323  	// notify the scheduler to assign a new task. If the response is invalid,
  2324  	// we'll drop the peer in a bit.
  2325  	s.lock.Lock()
  2326  	if _, ok := s.peers[peer.ID()]; ok {
  2327  		s.bytecodeIdlers[peer.ID()] = struct{}{}
  2328  	}
  2329  	select {
  2330  	case s.update <- struct{}{}:
  2331  	default:
  2332  	}
  2333  	// Ensure the response is for a valid request
  2334  	req, ok := s.bytecodeReqs[id]
  2335  	if !ok {
  2336  		// Request stale, perhaps the peer timed out but came through in the end
  2337  		logger.Warn("Unexpected bytecode packet")
  2338  		s.lock.Unlock()
  2339  		return nil
  2340  	}
  2341  	delete(s.bytecodeReqs, id)
  2342  	s.rates.Update(peer.ID(), ByteCodesMsg, time.Since(req.time), len(bytecodes))
  2343  
  2344  	// Clean up the request timeout timer, we'll see how to proceed further based
  2345  	// on the actual delivered content
  2346  	if !req.timeout.Stop() {
  2347  		// The timeout is already triggered, and this request will be reverted+rescheduled
  2348  		s.lock.Unlock()
  2349  		return nil
  2350  	}
  2351  
  2352  	// Response is valid, but check if peer is signalling that it does not have
  2353  	// the requested data. For bytecode range queries that means the peer is not
  2354  	// yet synced.
  2355  	if len(bytecodes) == 0 {
  2356  		logger.Debug("Peer rejected bytecode request")
  2357  		s.statelessPeers[peer.ID()] = struct{}{}
  2358  		s.lock.Unlock()
  2359  
  2360  		// Signal this request as failed, and ready for rescheduling
  2361  		s.scheduleRevertBytecodeRequest(req)
  2362  		return nil
  2363  	}
  2364  	s.lock.Unlock()
  2365  
  2366  	// Cross reference the requested bytecodes with the response to find gaps
  2367  	// that the serving node is missing
  2368  	hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
  2369  	hash := make([]byte, 32)
  2370  
  2371  	codes := make([][]byte, len(req.hashes))
  2372  	for i, j := 0, 0; i < len(bytecodes); i++ {
  2373  		// Find the next hash that we've been served, leaving misses with nils
  2374  		hasher.Reset()
  2375  		hasher.Write(bytecodes[i])
  2376  		hasher.Read(hash)
  2377  
  2378  		for j < len(req.hashes) && !bytes.Equal(hash, req.hashes[j][:]) {
  2379  			j++
  2380  		}
  2381  		if j < len(req.hashes) {
  2382  			codes[j] = bytecodes[i]
  2383  			j++
  2384  			continue
  2385  		}
  2386  		// We've either ran out of hashes, or got unrequested data
  2387  		logger.Warn("Unexpected bytecodes", "count", len(bytecodes)-i)
  2388  		// Signal this request as failed, and ready for rescheduling
  2389  		s.scheduleRevertBytecodeRequest(req)
  2390  		return errors.New("unexpected bytecode")
  2391  	}
  2392  	// Response validated, send it to the scheduler for filling
  2393  	response := &bytecodeResponse{
  2394  		task:   req.task,
  2395  		hashes: req.hashes,
  2396  		codes:  codes,
  2397  	}
  2398  	select {
  2399  	case req.deliver <- response:
  2400  	case <-req.cancel:
  2401  	case <-req.stale:
  2402  	}
  2403  	return nil
  2404  }
  2405  
  2406  // OnStorage is a callback method to invoke when ranges of storage slots
  2407  // are received from a remote peer.
  2408  func (s *Syncer) OnStorage(peer SyncPeer, id uint64, hashes [][]common.Hash, slots [][][]byte, proof [][]byte) error {
  2409  	// Gather some trace stats to aid in debugging issues
  2410  	var (
  2411  		hashCount int
  2412  		slotCount int
  2413  		size      common.StorageSize
  2414  	)
  2415  	for _, hashset := range hashes {
  2416  		size += common.StorageSize(common.HashLength * len(hashset))
  2417  		hashCount += len(hashset)
  2418  	}
  2419  	for _, slotset := range slots {
  2420  		for _, slot := range slotset {
  2421  			size += common.StorageSize(len(slot))
  2422  		}
  2423  		slotCount += len(slotset)
  2424  	}
  2425  	for _, node := range proof {
  2426  		size += common.StorageSize(len(node))
  2427  	}
  2428  	logger := peer.Log().New("reqid", id)
  2429  	logger.Trace("Delivering ranges of storage slots", "accounts", len(hashes), "hashes", hashCount, "slots", slotCount, "proofs", len(proof), "size", size)
  2430  
  2431  	// Whether or not the response is valid, we can mark the peer as idle and
  2432  	// notify the scheduler to assign a new task. If the response is invalid,
  2433  	// we'll drop the peer in a bit.
  2434  	s.lock.Lock()
  2435  	if _, ok := s.peers[peer.ID()]; ok {
  2436  		s.storageIdlers[peer.ID()] = struct{}{}
  2437  	}
  2438  	select {
  2439  	case s.update <- struct{}{}:
  2440  	default:
  2441  	}
  2442  	// Ensure the response is for a valid request
  2443  	req, ok := s.storageReqs[id]
  2444  	if !ok {
  2445  		// Request stale, perhaps the peer timed out but came through in the end
  2446  		logger.Warn("Unexpected storage ranges packet")
  2447  		s.lock.Unlock()
  2448  		return nil
  2449  	}
  2450  	delete(s.storageReqs, id)
  2451  	s.rates.Update(peer.ID(), StorageRangesMsg, time.Since(req.time), int(size))
  2452  
  2453  	// Clean up the request timeout timer, we'll see how to proceed further based
  2454  	// on the actual delivered content
  2455  	if !req.timeout.Stop() {
  2456  		// The timeout is already triggered, and this request will be reverted+rescheduled
  2457  		s.lock.Unlock()
  2458  		return nil
  2459  	}
  2460  
  2461  	// Reject the response if the hash sets and slot sets don't match, or if the
  2462  	// peer sent more data than requested.
  2463  	if len(hashes) != len(slots) {
  2464  		s.lock.Unlock()
  2465  		s.scheduleRevertStorageRequest(req) // reschedule request
  2466  		logger.Warn("Hash and slot set size mismatch", "hashset", len(hashes), "slotset", len(slots))
  2467  		return errors.New("hash and slot set size mismatch")
  2468  	}
  2469  	if len(hashes) > len(req.accounts) {
  2470  		s.lock.Unlock()
  2471  		s.scheduleRevertStorageRequest(req) // reschedule request
  2472  		logger.Warn("Hash set larger than requested", "hashset", len(hashes), "requested", len(req.accounts))
  2473  		return errors.New("hash set larger than requested")
  2474  	}
  2475  	// Response is valid, but check if peer is signalling that it does not have
  2476  	// the requested data. For storage range queries that means the state being
  2477  	// retrieved was either already pruned remotely, or the peer is not yet
  2478  	// synced to our head.
  2479  	if len(hashes) == 0 {
  2480  		logger.Debug("Peer rejected storage request")
  2481  		s.statelessPeers[peer.ID()] = struct{}{}
  2482  		s.lock.Unlock()
  2483  		s.scheduleRevertStorageRequest(req) // reschedule request
  2484  		return nil
  2485  	}
  2486  	s.lock.Unlock()
  2487  
  2488  	// Reconstruct the partial tries from the response and verify them
  2489  	var cont bool
  2490  
  2491  	for i := 0; i < len(hashes); i++ {
  2492  		// Convert the keys and proofs into an internal format
  2493  		keys := make([][]byte, len(hashes[i]))
  2494  		for j, key := range hashes[i] {
  2495  			keys[j] = common.CopyBytes(key[:])
  2496  		}
  2497  		nodes := make(light.NodeList, 0, len(proof))
  2498  		if i == len(hashes)-1 {
  2499  			for _, node := range proof {
  2500  				nodes = append(nodes, node)
  2501  			}
  2502  		}
  2503  		var err error
  2504  		if len(nodes) == 0 {
  2505  			// No proof has been attached, the response must cover the entire key
  2506  			// space and hash to the origin root.
  2507  			_, err = trie.VerifyRangeProof(req.roots[i], nil, nil, keys, slots[i], nil)
  2508  			if err != nil {
  2509  				s.scheduleRevertStorageRequest(req) // reschedule request
  2510  				logger.Warn("Storage slots failed proof", "err", err)
  2511  				return err
  2512  			}
  2513  		} else {
  2514  			// A proof was attached, the response is only partial, check that the
  2515  			// returned data is indeed part of the storage trie
  2516  			proofdb := nodes.NodeSet()
  2517  
  2518  			var end []byte
  2519  			if len(keys) > 0 {
  2520  				end = keys[len(keys)-1]
  2521  			}
  2522  			cont, err = trie.VerifyRangeProof(req.roots[i], req.origin[:], end, keys, slots[i], proofdb)
  2523  			if err != nil {
  2524  				s.scheduleRevertStorageRequest(req) // reschedule request
  2525  				logger.Warn("Storage range failed proof", "err", err)
  2526  				return err
  2527  			}
  2528  		}
  2529  	}
  2530  	// Partial tries reconstructed, send them to the scheduler for storage filling
  2531  	response := &storageResponse{
  2532  		mainTask: req.mainTask,
  2533  		subTask:  req.subTask,
  2534  		accounts: req.accounts,
  2535  		roots:    req.roots,
  2536  		hashes:   hashes,
  2537  		slots:    slots,
  2538  		cont:     cont,
  2539  	}
  2540  	select {
  2541  	case req.deliver <- response:
  2542  	case <-req.cancel:
  2543  	case <-req.stale:
  2544  	}
  2545  	return nil
  2546  }
  2547  
  2548  // OnTrieNodes is a callback method to invoke when a batch of trie nodes
  2549  // are received from a remote peer.
  2550  func (s *Syncer) OnTrieNodes(peer SyncPeer, id uint64, trienodes [][]byte) error {
  2551  	var size common.StorageSize
  2552  	for _, node := range trienodes {
  2553  		size += common.StorageSize(len(node))
  2554  	}
  2555  	logger := peer.Log().New("reqid", id)
  2556  	logger.Trace("Delivering set of healing trienodes", "trienodes", len(trienodes), "bytes", size)
  2557  
  2558  	// Whether or not the response is valid, we can mark the peer as idle and
  2559  	// notify the scheduler to assign a new task. If the response is invalid,
  2560  	// we'll drop the peer in a bit.
  2561  	s.lock.Lock()
  2562  	if _, ok := s.peers[peer.ID()]; ok {
  2563  		s.trienodeHealIdlers[peer.ID()] = struct{}{}
  2564  	}
  2565  	select {
  2566  	case s.update <- struct{}{}:
  2567  	default:
  2568  	}
  2569  	// Ensure the response is for a valid request
  2570  	req, ok := s.trienodeHealReqs[id]
  2571  	if !ok {
  2572  		// Request stale, perhaps the peer timed out but came through in the end
  2573  		logger.Warn("Unexpected trienode heal packet")
  2574  		s.lock.Unlock()
  2575  		return nil
  2576  	}
  2577  	delete(s.trienodeHealReqs, id)
  2578  	s.rates.Update(peer.ID(), TrieNodesMsg, time.Since(req.time), len(trienodes))
  2579  
  2580  	// Clean up the request timeout timer, we'll see how to proceed further based
  2581  	// on the actual delivered content
  2582  	if !req.timeout.Stop() {
  2583  		// The timeout is already triggered, and this request will be reverted+rescheduled
  2584  		s.lock.Unlock()
  2585  		return nil
  2586  	}
  2587  
  2588  	// Response is valid, but check if peer is signalling that it does not have
  2589  	// the requested data. For bytecode range queries that means the peer is not
  2590  	// yet synced.
  2591  	if len(trienodes) == 0 {
  2592  		logger.Debug("Peer rejected trienode heal request")
  2593  		s.statelessPeers[peer.ID()] = struct{}{}
  2594  		s.lock.Unlock()
  2595  
  2596  		// Signal this request as failed, and ready for rescheduling
  2597  		s.scheduleRevertTrienodeHealRequest(req)
  2598  		return nil
  2599  	}
  2600  	s.lock.Unlock()
  2601  
  2602  	// Cross reference the requested trienodes with the response to find gaps
  2603  	// that the serving node is missing
  2604  	hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
  2605  	hash := make([]byte, 32)
  2606  
  2607  	nodes := make([][]byte, len(req.hashes))
  2608  	for i, j := 0, 0; i < len(trienodes); i++ {
  2609  		// Find the next hash that we've been served, leaving misses with nils
  2610  		hasher.Reset()
  2611  		hasher.Write(trienodes[i])
  2612  		hasher.Read(hash)
  2613  
  2614  		for j < len(req.hashes) && !bytes.Equal(hash, req.hashes[j][:]) {
  2615  			j++
  2616  		}
  2617  		if j < len(req.hashes) {
  2618  			nodes[j] = trienodes[i]
  2619  			j++
  2620  			continue
  2621  		}
  2622  		// We've either ran out of hashes, or got unrequested data
  2623  		logger.Warn("Unexpected healing trienodes", "count", len(trienodes)-i)
  2624  		// Signal this request as failed, and ready for rescheduling
  2625  		s.scheduleRevertTrienodeHealRequest(req)
  2626  		return errors.New("unexpected healing trienode")
  2627  	}
  2628  	// Response validated, send it to the scheduler for filling
  2629  	response := &trienodeHealResponse{
  2630  		task:   req.task,
  2631  		hashes: req.hashes,
  2632  		paths:  req.paths,
  2633  		nodes:  nodes,
  2634  	}
  2635  	select {
  2636  	case req.deliver <- response:
  2637  	case <-req.cancel:
  2638  	case <-req.stale:
  2639  	}
  2640  	return nil
  2641  }
  2642  
  2643  // onHealByteCodes is a callback method to invoke when a batch of contract
  2644  // bytes codes are received from a remote peer in the healing phase.
  2645  func (s *Syncer) onHealByteCodes(peer SyncPeer, id uint64, bytecodes [][]byte) error {
  2646  	var size common.StorageSize
  2647  	for _, code := range bytecodes {
  2648  		size += common.StorageSize(len(code))
  2649  	}
  2650  	logger := peer.Log().New("reqid", id)
  2651  	logger.Trace("Delivering set of healing bytecodes", "bytecodes", len(bytecodes), "bytes", size)
  2652  
  2653  	// Whether or not the response is valid, we can mark the peer as idle and
  2654  	// notify the scheduler to assign a new task. If the response is invalid,
  2655  	// we'll drop the peer in a bit.
  2656  	s.lock.Lock()
  2657  	if _, ok := s.peers[peer.ID()]; ok {
  2658  		s.bytecodeHealIdlers[peer.ID()] = struct{}{}
  2659  	}
  2660  	select {
  2661  	case s.update <- struct{}{}:
  2662  	default:
  2663  	}
  2664  	// Ensure the response is for a valid request
  2665  	req, ok := s.bytecodeHealReqs[id]
  2666  	if !ok {
  2667  		// Request stale, perhaps the peer timed out but came through in the end
  2668  		logger.Warn("Unexpected bytecode heal packet")
  2669  		s.lock.Unlock()
  2670  		return nil
  2671  	}
  2672  	delete(s.bytecodeHealReqs, id)
  2673  	s.rates.Update(peer.ID(), ByteCodesMsg, time.Since(req.time), len(bytecodes))
  2674  
  2675  	// Clean up the request timeout timer, we'll see how to proceed further based
  2676  	// on the actual delivered content
  2677  	if !req.timeout.Stop() {
  2678  		// The timeout is already triggered, and this request will be reverted+rescheduled
  2679  		s.lock.Unlock()
  2680  		return nil
  2681  	}
  2682  
  2683  	// Response is valid, but check if peer is signalling that it does not have
  2684  	// the requested data. For bytecode range queries that means the peer is not
  2685  	// yet synced.
  2686  	if len(bytecodes) == 0 {
  2687  		logger.Debug("Peer rejected bytecode heal request")
  2688  		s.statelessPeers[peer.ID()] = struct{}{}
  2689  		s.lock.Unlock()
  2690  
  2691  		// Signal this request as failed, and ready for rescheduling
  2692  		s.scheduleRevertBytecodeHealRequest(req)
  2693  		return nil
  2694  	}
  2695  	s.lock.Unlock()
  2696  
  2697  	// Cross reference the requested bytecodes with the response to find gaps
  2698  	// that the serving node is missing
  2699  	hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
  2700  	hash := make([]byte, 32)
  2701  
  2702  	codes := make([][]byte, len(req.hashes))
  2703  	for i, j := 0, 0; i < len(bytecodes); i++ {
  2704  		// Find the next hash that we've been served, leaving misses with nils
  2705  		hasher.Reset()
  2706  		hasher.Write(bytecodes[i])
  2707  		hasher.Read(hash)
  2708  
  2709  		for j < len(req.hashes) && !bytes.Equal(hash, req.hashes[j][:]) {
  2710  			j++
  2711  		}
  2712  		if j < len(req.hashes) {
  2713  			codes[j] = bytecodes[i]
  2714  			j++
  2715  			continue
  2716  		}
  2717  		// We've either ran out of hashes, or got unrequested data
  2718  		logger.Warn("Unexpected healing bytecodes", "count", len(bytecodes)-i)
  2719  		// Signal this request as failed, and ready for rescheduling
  2720  		s.scheduleRevertBytecodeHealRequest(req)
  2721  		return errors.New("unexpected healing bytecode")
  2722  	}
  2723  	// Response validated, send it to the scheduler for filling
  2724  	response := &bytecodeHealResponse{
  2725  		task:   req.task,
  2726  		hashes: req.hashes,
  2727  		codes:  codes,
  2728  	}
  2729  	select {
  2730  	case req.deliver <- response:
  2731  	case <-req.cancel:
  2732  	case <-req.stale:
  2733  	}
  2734  	return nil
  2735  }
  2736  
  2737  // onHealState is a callback method to invoke when a flat state(account
  2738  // or storage slot) is downloded during the healing stage. The flat states
  2739  // can be persisted blindly and can be fixed later in the generation stage.
  2740  // Note it's not concurrent safe, please handle the concurrent issue outside.
  2741  func (s *Syncer) onHealState(paths [][]byte, value []byte) error {
  2742  	if len(paths) == 1 {
  2743  		var account state.Account
  2744  		if err := rlp.DecodeBytes(value, &account); err != nil {
  2745  			return nil
  2746  		}
  2747  		blob := snapshot.SlimAccountRLP(account.Nonce, account.Balance, account.Root, account.CodeHash)
  2748  		rawdb.WriteAccountSnapshot(s.stateWriter, common.BytesToHash(paths[0]), blob)
  2749  		s.accountHealed += 1
  2750  		s.accountHealedBytes += common.StorageSize(1 + common.HashLength + len(blob))
  2751  	}
  2752  	if len(paths) == 2 {
  2753  		rawdb.WriteStorageSnapshot(s.stateWriter, common.BytesToHash(paths[0]), common.BytesToHash(paths[1]), value)
  2754  		s.storageHealed += 1
  2755  		s.storageHealedBytes += common.StorageSize(1 + 2*common.HashLength + len(value))
  2756  	}
  2757  	if s.stateWriter.ValueSize() > ethdb.IdealBatchSize {
  2758  		s.stateWriter.Write() // It's fine to ignore the error here
  2759  		s.stateWriter.Reset()
  2760  	}
  2761  	return nil
  2762  }
  2763  
  2764  // hashSpace is the total size of the 256 bit hash space for accounts.
  2765  var hashSpace = new(big.Int).Exp(common.Big2, common.Big256, nil)
  2766  
  2767  // report calculates various status reports and provides it to the user.
  2768  func (s *Syncer) report(force bool) {
  2769  	if len(s.tasks) > 0 {
  2770  		s.reportSyncProgress(force)
  2771  		return
  2772  	}
  2773  	s.reportHealProgress(force)
  2774  }
  2775  
  2776  // reportSyncProgress calculates various status reports and provides it to the user.
  2777  func (s *Syncer) reportSyncProgress(force bool) {
  2778  	// Don't report all the events, just occasionally
  2779  	if !force && time.Since(s.logTime) < 8*time.Second {
  2780  		return
  2781  	}
  2782  	// Don't report anything until we have a meaningful progress
  2783  	synced := s.accountBytes + s.bytecodeBytes + s.storageBytes
  2784  	if synced == 0 {
  2785  		return
  2786  	}
  2787  	accountGaps := new(big.Int)
  2788  	for _, task := range s.tasks {
  2789  		accountGaps.Add(accountGaps, new(big.Int).Sub(task.Last.Big(), task.Next.Big()))
  2790  	}
  2791  	accountFills := new(big.Int).Sub(hashSpace, accountGaps)
  2792  	if accountFills.BitLen() == 0 {
  2793  		return
  2794  	}
  2795  	s.logTime = time.Now()
  2796  	estBytes := float64(new(big.Int).Div(
  2797  		new(big.Int).Mul(new(big.Int).SetUint64(uint64(synced)), hashSpace),
  2798  		accountFills,
  2799  	).Uint64())
  2800  
  2801  	elapsed := time.Since(s.startTime)
  2802  	estTime := elapsed / time.Duration(synced) * time.Duration(estBytes)
  2803  
  2804  	// Create a mega progress report
  2805  	var (
  2806  		progress = fmt.Sprintf("%.2f%%", float64(synced)*100/estBytes)
  2807  		accounts = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.accountSynced), s.accountBytes.TerminalString())
  2808  		storage  = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.storageSynced), s.storageBytes.TerminalString())
  2809  		bytecode = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.bytecodeSynced), s.bytecodeBytes.TerminalString())
  2810  	)
  2811  	log.Info("State sync in progress", "synced", progress, "state", synced,
  2812  		"accounts", accounts, "slots", storage, "codes", bytecode, "eta", common.PrettyDuration(estTime-elapsed))
  2813  }
  2814  
  2815  // reportHealProgress calculates various status reports and provides it to the user.
  2816  func (s *Syncer) reportHealProgress(force bool) {
  2817  	// Don't report all the events, just occasionally
  2818  	if !force && time.Since(s.logTime) < 8*time.Second {
  2819  		return
  2820  	}
  2821  	s.logTime = time.Now()
  2822  
  2823  	// Create a mega progress report
  2824  	var (
  2825  		trienode = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.trienodeHealSynced), s.trienodeHealBytes.TerminalString())
  2826  		bytecode = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.bytecodeHealSynced), s.bytecodeHealBytes.TerminalString())
  2827  		accounts = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.accountHealed), s.accountHealedBytes.TerminalString())
  2828  		storage  = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.storageHealed), s.storageHealedBytes.TerminalString())
  2829  	)
  2830  	log.Info("State heal in progress", "accounts", accounts, "slots", storage,
  2831  		"codes", bytecode, "nodes", trienode, "pending", s.healer.scheduler.Pending())
  2832  }
  2833  
  2834  // estimateRemainingSlots tries to determine roughly how many slots are left in
  2835  // a contract storage, based on the number of keys and the last hash. This method
  2836  // assumes that the hashes are lexicographically ordered and evenly distributed.
  2837  func estimateRemainingSlots(hashes int, last common.Hash) (uint64, error) {
  2838  	if last == (common.Hash{}) {
  2839  		return 0, errors.New("last hash empty")
  2840  	}
  2841  	space := new(big.Int).Mul(math.MaxBig256, big.NewInt(int64(hashes)))
  2842  	space.Div(space, last.Big())
  2843  	if !space.IsUint64() {
  2844  		// Gigantic address space probably due to too few or malicious slots
  2845  		return 0, errors.New("too few slots for estimation")
  2846  	}
  2847  	return space.Uint64() - uint64(hashes), nil
  2848  }
  2849  
  2850  // capacitySort implements the Sort interface, allowing sorting by peer message
  2851  // throughput. Note, callers should use sort.Reverse to get the desired effect
  2852  // of highest capacity being at the front.
  2853  type capacitySort struct {
  2854  	ids  []string
  2855  	caps []int
  2856  }
  2857  
  2858  func (s *capacitySort) Len() int {
  2859  	return len(s.ids)
  2860  }
  2861  
  2862  func (s *capacitySort) Less(i, j int) bool {
  2863  	return s.caps[i] < s.caps[j]
  2864  }
  2865  
  2866  func (s *capacitySort) Swap(i, j int) {
  2867  	s.ids[i], s.ids[j] = s.ids[j], s.ids[i]
  2868  	s.caps[i], s.caps[j] = s.caps[j], s.caps[i]
  2869  }