github.1485827954.workers.dev/ethereum/go-ethereum@v1.14.3/eth/protocols/snap/sync.go (about)

     1  // Copyright 2020 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package snap
    18  
    19  import (
    20  	"bytes"
    21  	"encoding/json"
    22  	"errors"
    23  	"fmt"
    24  	gomath "math"
    25  	"math/big"
    26  	"math/rand"
    27  	"sort"
    28  	"sync"
    29  	"sync/atomic"
    30  	"time"
    31  
    32  	"github.com/ethereum/go-ethereum/common"
    33  	"github.com/ethereum/go-ethereum/common/math"
    34  	"github.com/ethereum/go-ethereum/core/rawdb"
    35  	"github.com/ethereum/go-ethereum/core/state"
    36  	"github.com/ethereum/go-ethereum/core/types"
    37  	"github.com/ethereum/go-ethereum/crypto"
    38  	"github.com/ethereum/go-ethereum/ethdb"
    39  	"github.com/ethereum/go-ethereum/event"
    40  	"github.com/ethereum/go-ethereum/log"
    41  	"github.com/ethereum/go-ethereum/p2p/msgrate"
    42  	"github.com/ethereum/go-ethereum/rlp"
    43  	"github.com/ethereum/go-ethereum/trie"
    44  	"github.com/ethereum/go-ethereum/trie/trienode"
    45  )
    46  
    47  const (
    48  	// minRequestSize is the minimum number of bytes to request from a remote peer.
    49  	// This number is used as the low cap for account and storage range requests.
    50  	// Bytecode and trienode are limited inherently by item count (1).
    51  	minRequestSize = 64 * 1024
    52  
    53  	// maxRequestSize is the maximum number of bytes to request from a remote peer.
    54  	// This number is used as the high cap for account and storage range requests.
    55  	// Bytecode and trienode are limited more explicitly by the caps below.
    56  	maxRequestSize = 512 * 1024
    57  
    58  	// maxCodeRequestCount is the maximum number of bytecode blobs to request in a
    59  	// single query. If this number is too low, we're not filling responses fully
    60  	// and waste round trip times. If it's too high, we're capping responses and
    61  	// waste bandwidth.
    62  	//
    63  	// Deployed bytecodes are currently capped at 24KB, so the minimum request
    64  	// size should be maxRequestSize / 24K. Assuming that most contracts do not
    65  	// come close to that, requesting 4x should be a good approximation.
    66  	maxCodeRequestCount = maxRequestSize / (24 * 1024) * 4
    67  
    68  	// maxTrieRequestCount is the maximum number of trie node blobs to request in
    69  	// a single query. If this number is too low, we're not filling responses fully
    70  	// and waste round trip times. If it's too high, we're capping responses and
    71  	// waste bandwidth.
    72  	maxTrieRequestCount = maxRequestSize / 512
    73  
    74  	// trienodeHealRateMeasurementImpact is the impact a single measurement has on
    75  	// the local node's trienode processing capacity. A value closer to 0 reacts
    76  	// slower to sudden changes, but it is also more stable against temporary hiccups.
    77  	trienodeHealRateMeasurementImpact = 0.005
    78  
    79  	// minTrienodeHealThrottle is the minimum divisor for throttling trie node
    80  	// heal requests to avoid overloading the local node and excessively expanding
    81  	// the state trie breadth wise.
    82  	minTrienodeHealThrottle = 1
    83  
    84  	// maxTrienodeHealThrottle is the maximum divisor for throttling trie node
    85  	// heal requests to avoid overloading the local node and exessively expanding
    86  	// the state trie bedth wise.
    87  	maxTrienodeHealThrottle = maxTrieRequestCount
    88  
    89  	// trienodeHealThrottleIncrease is the multiplier for the throttle when the
    90  	// rate of arriving data is higher than the rate of processing it.
    91  	trienodeHealThrottleIncrease = 1.33
    92  
    93  	// trienodeHealThrottleDecrease is the divisor for the throttle when the
    94  	// rate of arriving data is lower than the rate of processing it.
    95  	trienodeHealThrottleDecrease = 1.25
    96  
    97  	// batchSizeThreshold is the maximum size allowed for gentrie batch.
    98  	batchSizeThreshold = 8 * 1024 * 1024
    99  )
   100  
   101  var (
   102  	// accountConcurrency is the number of chunks to split the account trie into
   103  	// to allow concurrent retrievals.
   104  	accountConcurrency = 16
   105  
   106  	// storageConcurrency is the number of chunks to split the a large contract
   107  	// storage trie into to allow concurrent retrievals.
   108  	storageConcurrency = 16
   109  )
   110  
   111  // ErrCancelled is returned from snap syncing if the operation was prematurely
   112  // terminated.
   113  var ErrCancelled = errors.New("sync cancelled")
   114  
   115  // accountRequest tracks a pending account range request to ensure responses are
   116  // to actual requests and to validate any security constraints.
   117  //
   118  // Concurrency note: account requests and responses are handled concurrently from
   119  // the main runloop to allow Merkle proof verifications on the peer's thread and
   120  // to drop on invalid response. The request struct must contain all the data to
   121  // construct the response without accessing runloop internals (i.e. task). That
   122  // is only included to allow the runloop to match a response to the task being
   123  // synced without having yet another set of maps.
   124  type accountRequest struct {
   125  	peer string    // Peer to which this request is assigned
   126  	id   uint64    // Request ID of this request
   127  	time time.Time // Timestamp when the request was sent
   128  
   129  	deliver chan *accountResponse // Channel to deliver successful response on
   130  	revert  chan *accountRequest  // Channel to deliver request failure on
   131  	cancel  chan struct{}         // Channel to track sync cancellation
   132  	timeout *time.Timer           // Timer to track delivery timeout
   133  	stale   chan struct{}         // Channel to signal the request was dropped
   134  
   135  	origin common.Hash // First account requested to allow continuation checks
   136  	limit  common.Hash // Last account requested to allow non-overlapping chunking
   137  
   138  	task *accountTask // Task which this request is filling (only access fields through the runloop!!)
   139  }
   140  
   141  // accountResponse is an already Merkle-verified remote response to an account
   142  // range request. It contains the subtrie for the requested account range and
   143  // the database that's going to be filled with the internal nodes on commit.
   144  type accountResponse struct {
   145  	task *accountTask // Task which this request is filling
   146  
   147  	hashes   []common.Hash         // Account hashes in the returned range
   148  	accounts []*types.StateAccount // Expanded accounts in the returned range
   149  
   150  	cont bool // Whether the account range has a continuation
   151  }
   152  
   153  // bytecodeRequest tracks a pending bytecode request to ensure responses are to
   154  // actual requests and to validate any security constraints.
   155  //
   156  // Concurrency note: bytecode requests and responses are handled concurrently from
   157  // the main runloop to allow Keccak256 hash verifications on the peer's thread and
   158  // to drop on invalid response. The request struct must contain all the data to
   159  // construct the response without accessing runloop internals (i.e. task). That
   160  // is only included to allow the runloop to match a response to the task being
   161  // synced without having yet another set of maps.
   162  type bytecodeRequest struct {
   163  	peer string    // Peer to which this request is assigned
   164  	id   uint64    // Request ID of this request
   165  	time time.Time // Timestamp when the request was sent
   166  
   167  	deliver chan *bytecodeResponse // Channel to deliver successful response on
   168  	revert  chan *bytecodeRequest  // Channel to deliver request failure on
   169  	cancel  chan struct{}          // Channel to track sync cancellation
   170  	timeout *time.Timer            // Timer to track delivery timeout
   171  	stale   chan struct{}          // Channel to signal the request was dropped
   172  
   173  	hashes []common.Hash // Bytecode hashes to validate responses
   174  	task   *accountTask  // Task which this request is filling (only access fields through the runloop!!)
   175  }
   176  
   177  // bytecodeResponse is an already verified remote response to a bytecode request.
   178  type bytecodeResponse struct {
   179  	task *accountTask // Task which this request is filling
   180  
   181  	hashes []common.Hash // Hashes of the bytecode to avoid double hashing
   182  	codes  [][]byte      // Actual bytecodes to store into the database (nil = missing)
   183  }
   184  
   185  // storageRequest tracks a pending storage ranges request to ensure responses are
   186  // to actual requests and to validate any security constraints.
   187  //
   188  // Concurrency note: storage requests and responses are handled concurrently from
   189  // the main runloop to allow Merkle proof verifications on the peer's thread and
   190  // to drop on invalid response. The request struct must contain all the data to
   191  // construct the response without accessing runloop internals (i.e. tasks). That
   192  // is only included to allow the runloop to match a response to the task being
   193  // synced without having yet another set of maps.
   194  type storageRequest struct {
   195  	peer string    // Peer to which this request is assigned
   196  	id   uint64    // Request ID of this request
   197  	time time.Time // Timestamp when the request was sent
   198  
   199  	deliver chan *storageResponse // Channel to deliver successful response on
   200  	revert  chan *storageRequest  // Channel to deliver request failure on
   201  	cancel  chan struct{}         // Channel to track sync cancellation
   202  	timeout *time.Timer           // Timer to track delivery timeout
   203  	stale   chan struct{}         // Channel to signal the request was dropped
   204  
   205  	accounts []common.Hash // Account hashes to validate responses
   206  	roots    []common.Hash // Storage roots to validate responses
   207  
   208  	origin common.Hash // First storage slot requested to allow continuation checks
   209  	limit  common.Hash // Last storage slot requested to allow non-overlapping chunking
   210  
   211  	mainTask *accountTask // Task which this response belongs to (only access fields through the runloop!!)
   212  	subTask  *storageTask // Task which this response is filling (only access fields through the runloop!!)
   213  }
   214  
   215  // storageResponse is an already Merkle-verified remote response to a storage
   216  // range request. It contains the subtries for the requested storage ranges and
   217  // the databases that's going to be filled with the internal nodes on commit.
   218  type storageResponse struct {
   219  	mainTask *accountTask // Task which this response belongs to
   220  	subTask  *storageTask // Task which this response is filling
   221  
   222  	accounts []common.Hash // Account hashes requested, may be only partially filled
   223  	roots    []common.Hash // Storage roots requested, may be only partially filled
   224  
   225  	hashes [][]common.Hash // Storage slot hashes in the returned range
   226  	slots  [][][]byte      // Storage slot values in the returned range
   227  
   228  	cont bool // Whether the last storage range has a continuation
   229  }
   230  
   231  // trienodeHealRequest tracks a pending state trie request to ensure responses
   232  // are to actual requests and to validate any security constraints.
   233  //
   234  // Concurrency note: trie node requests and responses are handled concurrently from
   235  // the main runloop to allow Keccak256 hash verifications on the peer's thread and
   236  // to drop on invalid response. The request struct must contain all the data to
   237  // construct the response without accessing runloop internals (i.e. task). That
   238  // is only included to allow the runloop to match a response to the task being
   239  // synced without having yet another set of maps.
   240  type trienodeHealRequest struct {
   241  	peer string    // Peer to which this request is assigned
   242  	id   uint64    // Request ID of this request
   243  	time time.Time // Timestamp when the request was sent
   244  
   245  	deliver chan *trienodeHealResponse // Channel to deliver successful response on
   246  	revert  chan *trienodeHealRequest  // Channel to deliver request failure on
   247  	cancel  chan struct{}              // Channel to track sync cancellation
   248  	timeout *time.Timer                // Timer to track delivery timeout
   249  	stale   chan struct{}              // Channel to signal the request was dropped
   250  
   251  	paths  []string      // Trie node paths for identifying trie node
   252  	hashes []common.Hash // Trie node hashes to validate responses
   253  
   254  	task *healTask // Task which this request is filling (only access fields through the runloop!!)
   255  }
   256  
   257  // trienodeHealResponse is an already verified remote response to a trie node request.
   258  type trienodeHealResponse struct {
   259  	task *healTask // Task which this request is filling
   260  
   261  	paths  []string      // Paths of the trie nodes
   262  	hashes []common.Hash // Hashes of the trie nodes to avoid double hashing
   263  	nodes  [][]byte      // Actual trie nodes to store into the database (nil = missing)
   264  }
   265  
   266  // bytecodeHealRequest tracks a pending bytecode request to ensure responses are to
   267  // actual requests and to validate any security constraints.
   268  //
   269  // Concurrency note: bytecode requests and responses are handled concurrently from
   270  // the main runloop to allow Keccak256 hash verifications on the peer's thread and
   271  // to drop on invalid response. The request struct must contain all the data to
   272  // construct the response without accessing runloop internals (i.e. task). That
   273  // is only included to allow the runloop to match a response to the task being
   274  // synced without having yet another set of maps.
   275  type bytecodeHealRequest struct {
   276  	peer string    // Peer to which this request is assigned
   277  	id   uint64    // Request ID of this request
   278  	time time.Time // Timestamp when the request was sent
   279  
   280  	deliver chan *bytecodeHealResponse // Channel to deliver successful response on
   281  	revert  chan *bytecodeHealRequest  // Channel to deliver request failure on
   282  	cancel  chan struct{}              // Channel to track sync cancellation
   283  	timeout *time.Timer                // Timer to track delivery timeout
   284  	stale   chan struct{}              // Channel to signal the request was dropped
   285  
   286  	hashes []common.Hash // Bytecode hashes to validate responses
   287  	task   *healTask     // Task which this request is filling (only access fields through the runloop!!)
   288  }
   289  
   290  // bytecodeHealResponse is an already verified remote response to a bytecode request.
   291  type bytecodeHealResponse struct {
   292  	task *healTask // Task which this request is filling
   293  
   294  	hashes []common.Hash // Hashes of the bytecode to avoid double hashing
   295  	codes  [][]byte      // Actual bytecodes to store into the database (nil = missing)
   296  }
   297  
   298  // accountTask represents the sync task for a chunk of the account snapshot.
   299  type accountTask struct {
   300  	// These fields get serialized to key-value store on shutdown
   301  	Next     common.Hash                    // Next account to sync in this interval
   302  	Last     common.Hash                    // Last account to sync in this interval
   303  	SubTasks map[common.Hash][]*storageTask // Storage intervals needing fetching for large contracts
   304  
   305  	// This is a list of account hashes whose storage are already completed
   306  	// in this cycle. This field is newly introduced in v1.14 and will be
   307  	// empty if the task is resolved from legacy progress data. Furthermore,
   308  	// this additional field will be ignored by legacy Geth. The only side
   309  	// effect is that these contracts might be resynced in the new cycle,
   310  	// retaining the legacy behavior.
   311  	StorageCompleted []common.Hash `json:",omitempty"`
   312  
   313  	// These fields are internals used during runtime
   314  	req  *accountRequest  // Pending request to fill this task
   315  	res  *accountResponse // Validate response filling this task
   316  	pend int              // Number of pending subtasks for this round
   317  
   318  	needCode  []bool // Flags whether the filling accounts need code retrieval
   319  	needState []bool // Flags whether the filling accounts need storage retrieval
   320  	needHeal  []bool // Flags whether the filling accounts's state was chunked and need healing
   321  
   322  	codeTasks      map[common.Hash]struct{}    // Code hashes that need retrieval
   323  	stateTasks     map[common.Hash]common.Hash // Account hashes->roots that need full state retrieval
   324  	stateCompleted map[common.Hash]struct{}    // Account hashes whose storage have been completed
   325  
   326  	genBatch ethdb.Batch // Batch used by the node generator
   327  	genTrie  genTrie     // Node generator from storage slots
   328  
   329  	done bool // Flag whether the task can be removed
   330  }
   331  
   332  // activeSubTasks returns the set of storage tasks covered by the current account
   333  // range. Normally this would be the entire subTask set, but on a sync interrupt
   334  // and later resume it can happen that a shorter account range is retrieved. This
   335  // method ensures that we only start up the subtasks covered by the latest account
   336  // response.
   337  //
   338  // Nil is returned if the account range is empty.
   339  func (task *accountTask) activeSubTasks() map[common.Hash][]*storageTask {
   340  	if len(task.res.hashes) == 0 {
   341  		return nil
   342  	}
   343  	var (
   344  		tasks = make(map[common.Hash][]*storageTask)
   345  		last  = task.res.hashes[len(task.res.hashes)-1]
   346  	)
   347  	for hash, subTasks := range task.SubTasks {
   348  		subTasks := subTasks // closure
   349  		if hash.Cmp(last) <= 0 {
   350  			tasks[hash] = subTasks
   351  		}
   352  	}
   353  	return tasks
   354  }
   355  
   356  // storageTask represents the sync task for a chunk of the storage snapshot.
   357  type storageTask struct {
   358  	Next common.Hash // Next account to sync in this interval
   359  	Last common.Hash // Last account to sync in this interval
   360  
   361  	// These fields are internals used during runtime
   362  	root common.Hash     // Storage root hash for this instance
   363  	req  *storageRequest // Pending request to fill this task
   364  
   365  	genBatch ethdb.Batch // Batch used by the node generator
   366  	genTrie  genTrie     // Node generator from storage slots
   367  
   368  	done bool // Flag whether the task can be removed
   369  }
   370  
   371  // healTask represents the sync task for healing the snap-synced chunk boundaries.
   372  type healTask struct {
   373  	scheduler *trie.Sync // State trie sync scheduler defining the tasks
   374  
   375  	trieTasks map[string]common.Hash   // Set of trie node tasks currently queued for retrieval, indexed by node path
   376  	codeTasks map[common.Hash]struct{} // Set of byte code tasks currently queued for retrieval, indexed by code hash
   377  }
   378  
   379  // SyncProgress is a database entry to allow suspending and resuming a snapshot state
   380  // sync. Opposed to full and fast sync, there is no way to restart a suspended
   381  // snap sync without prior knowledge of the suspension point.
   382  type SyncProgress struct {
   383  	Tasks []*accountTask // The suspended account tasks (contract tasks within)
   384  
   385  	// Status report during syncing phase
   386  	AccountSynced  uint64             // Number of accounts downloaded
   387  	AccountBytes   common.StorageSize // Number of account trie bytes persisted to disk
   388  	BytecodeSynced uint64             // Number of bytecodes downloaded
   389  	BytecodeBytes  common.StorageSize // Number of bytecode bytes downloaded
   390  	StorageSynced  uint64             // Number of storage slots downloaded
   391  	StorageBytes   common.StorageSize // Number of storage trie bytes persisted to disk
   392  
   393  	// Status report during healing phase
   394  	TrienodeHealSynced uint64             // Number of state trie nodes downloaded
   395  	TrienodeHealBytes  common.StorageSize // Number of state trie bytes persisted to disk
   396  	BytecodeHealSynced uint64             // Number of bytecodes downloaded
   397  	BytecodeHealBytes  common.StorageSize // Number of bytecodes persisted to disk
   398  }
   399  
   400  // SyncPending is analogous to SyncProgress, but it's used to report on pending
   401  // ephemeral sync progress that doesn't get persisted into the database.
   402  type SyncPending struct {
   403  	TrienodeHeal uint64 // Number of state trie nodes pending
   404  	BytecodeHeal uint64 // Number of bytecodes pending
   405  }
   406  
   407  // SyncPeer abstracts out the methods required for a peer to be synced against
   408  // with the goal of allowing the construction of mock peers without the full
   409  // blown networking.
   410  type SyncPeer interface {
   411  	// ID retrieves the peer's unique identifier.
   412  	ID() string
   413  
   414  	// RequestAccountRange fetches a batch of accounts rooted in a specific account
   415  	// trie, starting with the origin.
   416  	RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error
   417  
   418  	// RequestStorageRanges fetches a batch of storage slots belonging to one or
   419  	// more accounts. If slots from only one account is requested, an origin marker
   420  	// may also be used to retrieve from there.
   421  	RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error
   422  
   423  	// RequestByteCodes fetches a batch of bytecodes by hash.
   424  	RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error
   425  
   426  	// RequestTrieNodes fetches a batch of account or storage trie nodes rooted in
   427  	// a specific state trie.
   428  	RequestTrieNodes(id uint64, root common.Hash, paths []TrieNodePathSet, bytes uint64) error
   429  
   430  	// Log retrieves the peer's own contextual logger.
   431  	Log() log.Logger
   432  }
   433  
   434  // Syncer is an Ethereum account and storage trie syncer based on snapshots and
   435  // the  snap protocol. It's purpose is to download all the accounts and storage
   436  // slots from remote peers and reassemble chunks of the state trie, on top of
   437  // which a state sync can be run to fix any gaps / overlaps.
   438  //
   439  // Every network request has a variety of failure events:
   440  //   - The peer disconnects after task assignment, failing to send the request
   441  //   - The peer disconnects after sending the request, before delivering on it
   442  //   - The peer remains connected, but does not deliver a response in time
   443  //   - The peer delivers a stale response after a previous timeout
   444  //   - The peer delivers a refusal to serve the requested state
   445  type Syncer struct {
   446  	db     ethdb.KeyValueStore // Database to store the trie nodes into (and dedup)
   447  	scheme string              // Node scheme used in node database
   448  
   449  	root    common.Hash    // Current state trie root being synced
   450  	tasks   []*accountTask // Current account task set being synced
   451  	snapped bool           // Flag to signal that snap phase is done
   452  	healer  *healTask      // Current state healing task being executed
   453  	update  chan struct{}  // Notification channel for possible sync progression
   454  
   455  	peers    map[string]SyncPeer // Currently active peers to download from
   456  	peerJoin *event.Feed         // Event feed to react to peers joining
   457  	peerDrop *event.Feed         // Event feed to react to peers dropping
   458  	rates    *msgrate.Trackers   // Message throughput rates for peers
   459  
   460  	// Request tracking during syncing phase
   461  	statelessPeers map[string]struct{} // Peers that failed to deliver state data
   462  	accountIdlers  map[string]struct{} // Peers that aren't serving account requests
   463  	bytecodeIdlers map[string]struct{} // Peers that aren't serving bytecode requests
   464  	storageIdlers  map[string]struct{} // Peers that aren't serving storage requests
   465  
   466  	accountReqs  map[uint64]*accountRequest  // Account requests currently running
   467  	bytecodeReqs map[uint64]*bytecodeRequest // Bytecode requests currently running
   468  	storageReqs  map[uint64]*storageRequest  // Storage requests currently running
   469  
   470  	accountSynced  uint64             // Number of accounts downloaded
   471  	accountBytes   common.StorageSize // Number of account trie bytes persisted to disk
   472  	bytecodeSynced uint64             // Number of bytecodes downloaded
   473  	bytecodeBytes  common.StorageSize // Number of bytecode bytes downloaded
   474  	storageSynced  uint64             // Number of storage slots downloaded
   475  	storageBytes   common.StorageSize // Number of storage trie bytes persisted to disk
   476  
   477  	extProgress *SyncProgress // progress that can be exposed to external caller.
   478  
   479  	// Request tracking during healing phase
   480  	trienodeHealIdlers map[string]struct{} // Peers that aren't serving trie node requests
   481  	bytecodeHealIdlers map[string]struct{} // Peers that aren't serving bytecode requests
   482  
   483  	trienodeHealReqs map[uint64]*trienodeHealRequest // Trie node requests currently running
   484  	bytecodeHealReqs map[uint64]*bytecodeHealRequest // Bytecode requests currently running
   485  
   486  	trienodeHealRate      float64       // Average heal rate for processing trie node data
   487  	trienodeHealPend      atomic.Uint64 // Number of trie nodes currently pending for processing
   488  	trienodeHealThrottle  float64       // Divisor for throttling the amount of trienode heal data requested
   489  	trienodeHealThrottled time.Time     // Timestamp the last time the throttle was updated
   490  
   491  	trienodeHealSynced uint64             // Number of state trie nodes downloaded
   492  	trienodeHealBytes  common.StorageSize // Number of state trie bytes persisted to disk
   493  	trienodeHealDups   uint64             // Number of state trie nodes already processed
   494  	trienodeHealNops   uint64             // Number of state trie nodes not requested
   495  	bytecodeHealSynced uint64             // Number of bytecodes downloaded
   496  	bytecodeHealBytes  common.StorageSize // Number of bytecodes persisted to disk
   497  	bytecodeHealDups   uint64             // Number of bytecodes already processed
   498  	bytecodeHealNops   uint64             // Number of bytecodes not requested
   499  
   500  	stateWriter        ethdb.Batch        // Shared batch writer used for persisting raw states
   501  	accountHealed      uint64             // Number of accounts downloaded during the healing stage
   502  	accountHealedBytes common.StorageSize // Number of raw account bytes persisted to disk during the healing stage
   503  	storageHealed      uint64             // Number of storage slots downloaded during the healing stage
   504  	storageHealedBytes common.StorageSize // Number of raw storage bytes persisted to disk during the healing stage
   505  
   506  	startTime time.Time // Time instance when snapshot sync started
   507  	logTime   time.Time // Time instance when status was last reported
   508  
   509  	pend sync.WaitGroup // Tracks network request goroutines for graceful shutdown
   510  	lock sync.RWMutex   // Protects fields that can change outside of sync (peers, reqs, root)
   511  }
   512  
   513  // NewSyncer creates a new snapshot syncer to download the Ethereum state over the
   514  // snap protocol.
   515  func NewSyncer(db ethdb.KeyValueStore, scheme string) *Syncer {
   516  	return &Syncer{
   517  		db:     db,
   518  		scheme: scheme,
   519  
   520  		peers:    make(map[string]SyncPeer),
   521  		peerJoin: new(event.Feed),
   522  		peerDrop: new(event.Feed),
   523  		rates:    msgrate.NewTrackers(log.New("proto", "snap")),
   524  		update:   make(chan struct{}, 1),
   525  
   526  		accountIdlers:  make(map[string]struct{}),
   527  		storageIdlers:  make(map[string]struct{}),
   528  		bytecodeIdlers: make(map[string]struct{}),
   529  
   530  		accountReqs:  make(map[uint64]*accountRequest),
   531  		storageReqs:  make(map[uint64]*storageRequest),
   532  		bytecodeReqs: make(map[uint64]*bytecodeRequest),
   533  
   534  		trienodeHealIdlers: make(map[string]struct{}),
   535  		bytecodeHealIdlers: make(map[string]struct{}),
   536  
   537  		trienodeHealReqs:     make(map[uint64]*trienodeHealRequest),
   538  		bytecodeHealReqs:     make(map[uint64]*bytecodeHealRequest),
   539  		trienodeHealThrottle: maxTrienodeHealThrottle, // Tune downward instead of insta-filling with junk
   540  		stateWriter:          db.NewBatch(),
   541  
   542  		extProgress: new(SyncProgress),
   543  	}
   544  }
   545  
   546  // Register injects a new data source into the syncer's peerset.
   547  func (s *Syncer) Register(peer SyncPeer) error {
   548  	// Make sure the peer is not registered yet
   549  	id := peer.ID()
   550  
   551  	s.lock.Lock()
   552  	if _, ok := s.peers[id]; ok {
   553  		log.Error("Snap peer already registered", "id", id)
   554  
   555  		s.lock.Unlock()
   556  		return errors.New("already registered")
   557  	}
   558  	s.peers[id] = peer
   559  	s.rates.Track(id, msgrate.NewTracker(s.rates.MeanCapacities(), s.rates.MedianRoundTrip()))
   560  
   561  	// Mark the peer as idle, even if no sync is running
   562  	s.accountIdlers[id] = struct{}{}
   563  	s.storageIdlers[id] = struct{}{}
   564  	s.bytecodeIdlers[id] = struct{}{}
   565  	s.trienodeHealIdlers[id] = struct{}{}
   566  	s.bytecodeHealIdlers[id] = struct{}{}
   567  	s.lock.Unlock()
   568  
   569  	// Notify any active syncs that a new peer can be assigned data
   570  	s.peerJoin.Send(id)
   571  	return nil
   572  }
   573  
   574  // Unregister injects a new data source into the syncer's peerset.
   575  func (s *Syncer) Unregister(id string) error {
   576  	// Remove all traces of the peer from the registry
   577  	s.lock.Lock()
   578  	if _, ok := s.peers[id]; !ok {
   579  		log.Error("Snap peer not registered", "id", id)
   580  
   581  		s.lock.Unlock()
   582  		return errors.New("not registered")
   583  	}
   584  	delete(s.peers, id)
   585  	s.rates.Untrack(id)
   586  
   587  	// Remove status markers, even if no sync is running
   588  	delete(s.statelessPeers, id)
   589  
   590  	delete(s.accountIdlers, id)
   591  	delete(s.storageIdlers, id)
   592  	delete(s.bytecodeIdlers, id)
   593  	delete(s.trienodeHealIdlers, id)
   594  	delete(s.bytecodeHealIdlers, id)
   595  	s.lock.Unlock()
   596  
   597  	// Notify any active syncs that pending requests need to be reverted
   598  	s.peerDrop.Send(id)
   599  	return nil
   600  }
   601  
   602  // Sync starts (or resumes a previous) sync cycle to iterate over a state trie
   603  // with the given root and reconstruct the nodes based on the snapshot leaves.
   604  // Previously downloaded segments will not be redownloaded of fixed, rather any
   605  // errors will be healed after the leaves are fully accumulated.
   606  func (s *Syncer) Sync(root common.Hash, cancel chan struct{}) error {
   607  	// Move the trie root from any previous value, revert stateless markers for
   608  	// any peers and initialize the syncer if it was not yet run
   609  	s.lock.Lock()
   610  	s.root = root
   611  	s.healer = &healTask{
   612  		scheduler: state.NewStateSync(root, s.db, s.onHealState, s.scheme),
   613  		trieTasks: make(map[string]common.Hash),
   614  		codeTasks: make(map[common.Hash]struct{}),
   615  	}
   616  	s.statelessPeers = make(map[string]struct{})
   617  	s.lock.Unlock()
   618  
   619  	if s.startTime == (time.Time{}) {
   620  		s.startTime = time.Now()
   621  	}
   622  	// Retrieve the previous sync status from LevelDB and abort if already synced
   623  	s.loadSyncStatus()
   624  	if len(s.tasks) == 0 && s.healer.scheduler.Pending() == 0 {
   625  		log.Debug("Snapshot sync already completed")
   626  		return nil
   627  	}
   628  	defer func() { // Persist any progress, independent of failure
   629  		for _, task := range s.tasks {
   630  			s.forwardAccountTask(task)
   631  		}
   632  		s.cleanAccountTasks()
   633  		s.saveSyncStatus()
   634  	}()
   635  
   636  	log.Debug("Starting snapshot sync cycle", "root", root)
   637  
   638  	// Flush out the last committed raw states
   639  	defer func() {
   640  		if s.stateWriter.ValueSize() > 0 {
   641  			s.stateWriter.Write()
   642  			s.stateWriter.Reset()
   643  		}
   644  	}()
   645  	defer s.report(true)
   646  	// commit any trie- and bytecode-healing data.
   647  	defer s.commitHealer(true)
   648  
   649  	// Whether sync completed or not, disregard any future packets
   650  	defer func() {
   651  		log.Debug("Terminating snapshot sync cycle", "root", root)
   652  		s.lock.Lock()
   653  		s.accountReqs = make(map[uint64]*accountRequest)
   654  		s.storageReqs = make(map[uint64]*storageRequest)
   655  		s.bytecodeReqs = make(map[uint64]*bytecodeRequest)
   656  		s.trienodeHealReqs = make(map[uint64]*trienodeHealRequest)
   657  		s.bytecodeHealReqs = make(map[uint64]*bytecodeHealRequest)
   658  		s.lock.Unlock()
   659  	}()
   660  	// Keep scheduling sync tasks
   661  	peerJoin := make(chan string, 16)
   662  	peerJoinSub := s.peerJoin.Subscribe(peerJoin)
   663  	defer peerJoinSub.Unsubscribe()
   664  
   665  	peerDrop := make(chan string, 16)
   666  	peerDropSub := s.peerDrop.Subscribe(peerDrop)
   667  	defer peerDropSub.Unsubscribe()
   668  
   669  	// Create a set of unique channels for this sync cycle. We need these to be
   670  	// ephemeral so a data race doesn't accidentally deliver something stale on
   671  	// a persistent channel across syncs (yup, this happened)
   672  	var (
   673  		accountReqFails      = make(chan *accountRequest)
   674  		storageReqFails      = make(chan *storageRequest)
   675  		bytecodeReqFails     = make(chan *bytecodeRequest)
   676  		accountResps         = make(chan *accountResponse)
   677  		storageResps         = make(chan *storageResponse)
   678  		bytecodeResps        = make(chan *bytecodeResponse)
   679  		trienodeHealReqFails = make(chan *trienodeHealRequest)
   680  		bytecodeHealReqFails = make(chan *bytecodeHealRequest)
   681  		trienodeHealResps    = make(chan *trienodeHealResponse)
   682  		bytecodeHealResps    = make(chan *bytecodeHealResponse)
   683  	)
   684  	for {
   685  		// Remove all completed tasks and terminate sync if everything's done
   686  		s.cleanStorageTasks()
   687  		s.cleanAccountTasks()
   688  		if len(s.tasks) == 0 && s.healer.scheduler.Pending() == 0 {
   689  			return nil
   690  		}
   691  		// Assign all the data retrieval tasks to any free peers
   692  		s.assignAccountTasks(accountResps, accountReqFails, cancel)
   693  		s.assignBytecodeTasks(bytecodeResps, bytecodeReqFails, cancel)
   694  		s.assignStorageTasks(storageResps, storageReqFails, cancel)
   695  
   696  		if len(s.tasks) == 0 {
   697  			// Sync phase done, run heal phase
   698  			s.assignTrienodeHealTasks(trienodeHealResps, trienodeHealReqFails, cancel)
   699  			s.assignBytecodeHealTasks(bytecodeHealResps, bytecodeHealReqFails, cancel)
   700  		}
   701  		// Update sync progress
   702  		s.lock.Lock()
   703  		s.extProgress = &SyncProgress{
   704  			AccountSynced:      s.accountSynced,
   705  			AccountBytes:       s.accountBytes,
   706  			BytecodeSynced:     s.bytecodeSynced,
   707  			BytecodeBytes:      s.bytecodeBytes,
   708  			StorageSynced:      s.storageSynced,
   709  			StorageBytes:       s.storageBytes,
   710  			TrienodeHealSynced: s.trienodeHealSynced,
   711  			TrienodeHealBytes:  s.trienodeHealBytes,
   712  			BytecodeHealSynced: s.bytecodeHealSynced,
   713  			BytecodeHealBytes:  s.bytecodeHealBytes,
   714  		}
   715  		s.lock.Unlock()
   716  		// Wait for something to happen
   717  		select {
   718  		case <-s.update:
   719  			// Something happened (new peer, delivery, timeout), recheck tasks
   720  		case <-peerJoin:
   721  			// A new peer joined, try to schedule it new tasks
   722  		case id := <-peerDrop:
   723  			s.revertRequests(id)
   724  		case <-cancel:
   725  			return ErrCancelled
   726  
   727  		case req := <-accountReqFails:
   728  			s.revertAccountRequest(req)
   729  		case req := <-bytecodeReqFails:
   730  			s.revertBytecodeRequest(req)
   731  		case req := <-storageReqFails:
   732  			s.revertStorageRequest(req)
   733  		case req := <-trienodeHealReqFails:
   734  			s.revertTrienodeHealRequest(req)
   735  		case req := <-bytecodeHealReqFails:
   736  			s.revertBytecodeHealRequest(req)
   737  
   738  		case res := <-accountResps:
   739  			s.processAccountResponse(res)
   740  		case res := <-bytecodeResps:
   741  			s.processBytecodeResponse(res)
   742  		case res := <-storageResps:
   743  			s.processStorageResponse(res)
   744  		case res := <-trienodeHealResps:
   745  			s.processTrienodeHealResponse(res)
   746  		case res := <-bytecodeHealResps:
   747  			s.processBytecodeHealResponse(res)
   748  		}
   749  		// Report stats if something meaningful happened
   750  		s.report(false)
   751  	}
   752  }
   753  
   754  // loadSyncStatus retrieves a previously aborted sync status from the database,
   755  // or generates a fresh one if none is available.
   756  func (s *Syncer) loadSyncStatus() {
   757  	var progress SyncProgress
   758  
   759  	if status := rawdb.ReadSnapshotSyncStatus(s.db); status != nil {
   760  		if err := json.Unmarshal(status, &progress); err != nil {
   761  			log.Error("Failed to decode snap sync status", "err", err)
   762  		} else {
   763  			for _, task := range progress.Tasks {
   764  				log.Debug("Scheduled account sync task", "from", task.Next, "last", task.Last)
   765  			}
   766  			s.tasks = progress.Tasks
   767  			for _, task := range s.tasks {
   768  				task := task // closure for task.genBatch in the stacktrie writer callback
   769  
   770  				// Restore the completed storages
   771  				task.stateCompleted = make(map[common.Hash]struct{})
   772  				for _, hash := range task.StorageCompleted {
   773  					task.stateCompleted[hash] = struct{}{}
   774  				}
   775  				task.StorageCompleted = nil
   776  
   777  				// Allocate batch for account trie generation
   778  				task.genBatch = ethdb.HookedBatch{
   779  					Batch: s.db.NewBatch(),
   780  					OnPut: func(key []byte, value []byte) {
   781  						s.accountBytes += common.StorageSize(len(key) + len(value))
   782  					},
   783  				}
   784  				if s.scheme == rawdb.HashScheme {
   785  					task.genTrie = newHashTrie(task.genBatch)
   786  				}
   787  				if s.scheme == rawdb.PathScheme {
   788  					task.genTrie = newPathTrie(common.Hash{}, task.Next != common.Hash{}, s.db, task.genBatch)
   789  				}
   790  				// Restore leftover storage tasks
   791  				for accountHash, subtasks := range task.SubTasks {
   792  					for _, subtask := range subtasks {
   793  						subtask := subtask // closure for subtask.genBatch in the stacktrie writer callback
   794  
   795  						subtask.genBatch = ethdb.HookedBatch{
   796  							Batch: s.db.NewBatch(),
   797  							OnPut: func(key []byte, value []byte) {
   798  								s.storageBytes += common.StorageSize(len(key) + len(value))
   799  							},
   800  						}
   801  						if s.scheme == rawdb.HashScheme {
   802  							subtask.genTrie = newHashTrie(subtask.genBatch)
   803  						}
   804  						if s.scheme == rawdb.PathScheme {
   805  							subtask.genTrie = newPathTrie(accountHash, subtask.Next != common.Hash{}, s.db, subtask.genBatch)
   806  						}
   807  					}
   808  				}
   809  			}
   810  			s.lock.Lock()
   811  			defer s.lock.Unlock()
   812  
   813  			s.snapped = len(s.tasks) == 0
   814  
   815  			s.accountSynced = progress.AccountSynced
   816  			s.accountBytes = progress.AccountBytes
   817  			s.bytecodeSynced = progress.BytecodeSynced
   818  			s.bytecodeBytes = progress.BytecodeBytes
   819  			s.storageSynced = progress.StorageSynced
   820  			s.storageBytes = progress.StorageBytes
   821  
   822  			s.trienodeHealSynced = progress.TrienodeHealSynced
   823  			s.trienodeHealBytes = progress.TrienodeHealBytes
   824  			s.bytecodeHealSynced = progress.BytecodeHealSynced
   825  			s.bytecodeHealBytes = progress.BytecodeHealBytes
   826  			return
   827  		}
   828  	}
   829  	// Either we've failed to decode the previous state, or there was none.
   830  	// Start a fresh sync by chunking up the account range and scheduling
   831  	// them for retrieval.
   832  	s.tasks = nil
   833  	s.accountSynced, s.accountBytes = 0, 0
   834  	s.bytecodeSynced, s.bytecodeBytes = 0, 0
   835  	s.storageSynced, s.storageBytes = 0, 0
   836  	s.trienodeHealSynced, s.trienodeHealBytes = 0, 0
   837  	s.bytecodeHealSynced, s.bytecodeHealBytes = 0, 0
   838  
   839  	var next common.Hash
   840  	step := new(big.Int).Sub(
   841  		new(big.Int).Div(
   842  			new(big.Int).Exp(common.Big2, common.Big256, nil),
   843  			big.NewInt(int64(accountConcurrency)),
   844  		), common.Big1,
   845  	)
   846  	for i := 0; i < accountConcurrency; i++ {
   847  		last := common.BigToHash(new(big.Int).Add(next.Big(), step))
   848  		if i == accountConcurrency-1 {
   849  			// Make sure we don't overflow if the step is not a proper divisor
   850  			last = common.MaxHash
   851  		}
   852  		batch := ethdb.HookedBatch{
   853  			Batch: s.db.NewBatch(),
   854  			OnPut: func(key []byte, value []byte) {
   855  				s.accountBytes += common.StorageSize(len(key) + len(value))
   856  			},
   857  		}
   858  		var tr genTrie
   859  		if s.scheme == rawdb.HashScheme {
   860  			tr = newHashTrie(batch)
   861  		}
   862  		if s.scheme == rawdb.PathScheme {
   863  			tr = newPathTrie(common.Hash{}, next != common.Hash{}, s.db, batch)
   864  		}
   865  		s.tasks = append(s.tasks, &accountTask{
   866  			Next:           next,
   867  			Last:           last,
   868  			SubTasks:       make(map[common.Hash][]*storageTask),
   869  			genBatch:       batch,
   870  			stateCompleted: make(map[common.Hash]struct{}),
   871  			genTrie:        tr,
   872  		})
   873  		log.Debug("Created account sync task", "from", next, "last", last)
   874  		next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
   875  	}
   876  }
   877  
   878  // saveSyncStatus marshals the remaining sync tasks into leveldb.
   879  func (s *Syncer) saveSyncStatus() {
   880  	// Serialize any partial progress to disk before spinning down
   881  	for _, task := range s.tasks {
   882  		// Claim the right boundary as incomplete before flushing the
   883  		// accumulated nodes in batch, the nodes on right boundary
   884  		// will be discarded and cleaned up by this call.
   885  		task.genTrie.commit(false)
   886  		if err := task.genBatch.Write(); err != nil {
   887  			log.Error("Failed to persist account slots", "err", err)
   888  		}
   889  		for _, subtasks := range task.SubTasks {
   890  			for _, subtask := range subtasks {
   891  				// Same for account trie, discard and cleanup the
   892  				// incomplete right boundary.
   893  				subtask.genTrie.commit(false)
   894  				if err := subtask.genBatch.Write(); err != nil {
   895  					log.Error("Failed to persist storage slots", "err", err)
   896  				}
   897  			}
   898  		}
   899  		// Save the account hashes of completed storage.
   900  		task.StorageCompleted = make([]common.Hash, 0, len(task.stateCompleted))
   901  		for hash := range task.stateCompleted {
   902  			task.StorageCompleted = append(task.StorageCompleted, hash)
   903  		}
   904  		if len(task.StorageCompleted) > 0 {
   905  			log.Debug("Leftover completed storages", "number", len(task.StorageCompleted), "next", task.Next, "last", task.Last)
   906  		}
   907  	}
   908  	// Store the actual progress markers
   909  	progress := &SyncProgress{
   910  		Tasks:              s.tasks,
   911  		AccountSynced:      s.accountSynced,
   912  		AccountBytes:       s.accountBytes,
   913  		BytecodeSynced:     s.bytecodeSynced,
   914  		BytecodeBytes:      s.bytecodeBytes,
   915  		StorageSynced:      s.storageSynced,
   916  		StorageBytes:       s.storageBytes,
   917  		TrienodeHealSynced: s.trienodeHealSynced,
   918  		TrienodeHealBytes:  s.trienodeHealBytes,
   919  		BytecodeHealSynced: s.bytecodeHealSynced,
   920  		BytecodeHealBytes:  s.bytecodeHealBytes,
   921  	}
   922  	status, err := json.Marshal(progress)
   923  	if err != nil {
   924  		panic(err) // This can only fail during implementation
   925  	}
   926  	rawdb.WriteSnapshotSyncStatus(s.db, status)
   927  }
   928  
   929  // Progress returns the snap sync status statistics.
   930  func (s *Syncer) Progress() (*SyncProgress, *SyncPending) {
   931  	s.lock.Lock()
   932  	defer s.lock.Unlock()
   933  	pending := new(SyncPending)
   934  	if s.healer != nil {
   935  		pending.TrienodeHeal = uint64(len(s.healer.trieTasks))
   936  		pending.BytecodeHeal = uint64(len(s.healer.codeTasks))
   937  	}
   938  	return s.extProgress, pending
   939  }
   940  
   941  // cleanAccountTasks removes account range retrieval tasks that have already been
   942  // completed.
   943  func (s *Syncer) cleanAccountTasks() {
   944  	// If the sync was already done before, don't even bother
   945  	if len(s.tasks) == 0 {
   946  		return
   947  	}
   948  	// Sync wasn't finished previously, check for any task that can be finalized
   949  	for i := 0; i < len(s.tasks); i++ {
   950  		if s.tasks[i].done {
   951  			s.tasks = append(s.tasks[:i], s.tasks[i+1:]...)
   952  			i--
   953  		}
   954  	}
   955  	// If everything was just finalized just, generate the account trie and start heal
   956  	if len(s.tasks) == 0 {
   957  		s.lock.Lock()
   958  		s.snapped = true
   959  		s.lock.Unlock()
   960  
   961  		// Push the final sync report
   962  		s.reportSyncProgress(true)
   963  	}
   964  }
   965  
   966  // cleanStorageTasks iterates over all the account tasks and storage sub-tasks
   967  // within, cleaning any that have been completed.
   968  func (s *Syncer) cleanStorageTasks() {
   969  	for _, task := range s.tasks {
   970  		for account, subtasks := range task.SubTasks {
   971  			// Remove storage range retrieval tasks that completed
   972  			for j := 0; j < len(subtasks); j++ {
   973  				if subtasks[j].done {
   974  					subtasks = append(subtasks[:j], subtasks[j+1:]...)
   975  					j--
   976  				}
   977  			}
   978  			if len(subtasks) > 0 {
   979  				task.SubTasks[account] = subtasks
   980  				continue
   981  			}
   982  			// If all storage chunks are done, mark the account as done too
   983  			for j, hash := range task.res.hashes {
   984  				if hash == account {
   985  					task.needState[j] = false
   986  				}
   987  			}
   988  			delete(task.SubTasks, account)
   989  			task.pend--
   990  
   991  			// Mark the state as complete to prevent resyncing, regardless
   992  			// if state healing is necessary.
   993  			task.stateCompleted[account] = struct{}{}
   994  
   995  			// If this was the last pending task, forward the account task
   996  			if task.pend == 0 {
   997  				s.forwardAccountTask(task)
   998  			}
   999  		}
  1000  	}
  1001  }
  1002  
  1003  // assignAccountTasks attempts to match idle peers to pending account range
  1004  // retrievals.
  1005  func (s *Syncer) assignAccountTasks(success chan *accountResponse, fail chan *accountRequest, cancel chan struct{}) {
  1006  	s.lock.Lock()
  1007  	defer s.lock.Unlock()
  1008  
  1009  	// Sort the peers by download capacity to use faster ones if many available
  1010  	idlers := &capacitySort{
  1011  		ids:  make([]string, 0, len(s.accountIdlers)),
  1012  		caps: make([]int, 0, len(s.accountIdlers)),
  1013  	}
  1014  	targetTTL := s.rates.TargetTimeout()
  1015  	for id := range s.accountIdlers {
  1016  		if _, ok := s.statelessPeers[id]; ok {
  1017  			continue
  1018  		}
  1019  		idlers.ids = append(idlers.ids, id)
  1020  		idlers.caps = append(idlers.caps, s.rates.Capacity(id, AccountRangeMsg, targetTTL))
  1021  	}
  1022  	if len(idlers.ids) == 0 {
  1023  		return
  1024  	}
  1025  	sort.Sort(sort.Reverse(idlers))
  1026  
  1027  	// Iterate over all the tasks and try to find a pending one
  1028  	for _, task := range s.tasks {
  1029  		// Skip any tasks already filling
  1030  		if task.req != nil || task.res != nil {
  1031  			continue
  1032  		}
  1033  		// Task pending retrieval, try to find an idle peer. If no such peer
  1034  		// exists, we probably assigned tasks for all (or they are stateless).
  1035  		// Abort the entire assignment mechanism.
  1036  		if len(idlers.ids) == 0 {
  1037  			return
  1038  		}
  1039  		var (
  1040  			idle = idlers.ids[0]
  1041  			peer = s.peers[idle]
  1042  			cap  = idlers.caps[0]
  1043  		)
  1044  		idlers.ids, idlers.caps = idlers.ids[1:], idlers.caps[1:]
  1045  
  1046  		// Matched a pending task to an idle peer, allocate a unique request id
  1047  		var reqid uint64
  1048  		for {
  1049  			reqid = uint64(rand.Int63())
  1050  			if reqid == 0 {
  1051  				continue
  1052  			}
  1053  			if _, ok := s.accountReqs[reqid]; ok {
  1054  				continue
  1055  			}
  1056  			break
  1057  		}
  1058  		// Generate the network query and send it to the peer
  1059  		req := &accountRequest{
  1060  			peer:    idle,
  1061  			id:      reqid,
  1062  			time:    time.Now(),
  1063  			deliver: success,
  1064  			revert:  fail,
  1065  			cancel:  cancel,
  1066  			stale:   make(chan struct{}),
  1067  			origin:  task.Next,
  1068  			limit:   task.Last,
  1069  			task:    task,
  1070  		}
  1071  		req.timeout = time.AfterFunc(s.rates.TargetTimeout(), func() {
  1072  			peer.Log().Debug("Account range request timed out", "reqid", reqid)
  1073  			s.rates.Update(idle, AccountRangeMsg, 0, 0)
  1074  			s.scheduleRevertAccountRequest(req)
  1075  		})
  1076  		s.accountReqs[reqid] = req
  1077  		delete(s.accountIdlers, idle)
  1078  
  1079  		s.pend.Add(1)
  1080  		go func(root common.Hash) {
  1081  			defer s.pend.Done()
  1082  
  1083  			// Attempt to send the remote request and revert if it fails
  1084  			if cap > maxRequestSize {
  1085  				cap = maxRequestSize
  1086  			}
  1087  			if cap < minRequestSize { // Don't bother with peers below a bare minimum performance
  1088  				cap = minRequestSize
  1089  			}
  1090  			if err := peer.RequestAccountRange(reqid, root, req.origin, req.limit, uint64(cap)); err != nil {
  1091  				peer.Log().Debug("Failed to request account range", "err", err)
  1092  				s.scheduleRevertAccountRequest(req)
  1093  			}
  1094  		}(s.root)
  1095  
  1096  		// Inject the request into the task to block further assignments
  1097  		task.req = req
  1098  	}
  1099  }
  1100  
  1101  // assignBytecodeTasks attempts to match idle peers to pending code retrievals.
  1102  func (s *Syncer) assignBytecodeTasks(success chan *bytecodeResponse, fail chan *bytecodeRequest, cancel chan struct{}) {
  1103  	s.lock.Lock()
  1104  	defer s.lock.Unlock()
  1105  
  1106  	// Sort the peers by download capacity to use faster ones if many available
  1107  	idlers := &capacitySort{
  1108  		ids:  make([]string, 0, len(s.bytecodeIdlers)),
  1109  		caps: make([]int, 0, len(s.bytecodeIdlers)),
  1110  	}
  1111  	targetTTL := s.rates.TargetTimeout()
  1112  	for id := range s.bytecodeIdlers {
  1113  		if _, ok := s.statelessPeers[id]; ok {
  1114  			continue
  1115  		}
  1116  		idlers.ids = append(idlers.ids, id)
  1117  		idlers.caps = append(idlers.caps, s.rates.Capacity(id, ByteCodesMsg, targetTTL))
  1118  	}
  1119  	if len(idlers.ids) == 0 {
  1120  		return
  1121  	}
  1122  	sort.Sort(sort.Reverse(idlers))
  1123  
  1124  	// Iterate over all the tasks and try to find a pending one
  1125  	for _, task := range s.tasks {
  1126  		// Skip any tasks not in the bytecode retrieval phase
  1127  		if task.res == nil {
  1128  			continue
  1129  		}
  1130  		// Skip tasks that are already retrieving (or done with) all codes
  1131  		if len(task.codeTasks) == 0 {
  1132  			continue
  1133  		}
  1134  		// Task pending retrieval, try to find an idle peer. If no such peer
  1135  		// exists, we probably assigned tasks for all (or they are stateless).
  1136  		// Abort the entire assignment mechanism.
  1137  		if len(idlers.ids) == 0 {
  1138  			return
  1139  		}
  1140  		var (
  1141  			idle = idlers.ids[0]
  1142  			peer = s.peers[idle]
  1143  			cap  = idlers.caps[0]
  1144  		)
  1145  		idlers.ids, idlers.caps = idlers.ids[1:], idlers.caps[1:]
  1146  
  1147  		// Matched a pending task to an idle peer, allocate a unique request id
  1148  		var reqid uint64
  1149  		for {
  1150  			reqid = uint64(rand.Int63())
  1151  			if reqid == 0 {
  1152  				continue
  1153  			}
  1154  			if _, ok := s.bytecodeReqs[reqid]; ok {
  1155  				continue
  1156  			}
  1157  			break
  1158  		}
  1159  		// Generate the network query and send it to the peer
  1160  		if cap > maxCodeRequestCount {
  1161  			cap = maxCodeRequestCount
  1162  		}
  1163  		hashes := make([]common.Hash, 0, cap)
  1164  		for hash := range task.codeTasks {
  1165  			delete(task.codeTasks, hash)
  1166  			hashes = append(hashes, hash)
  1167  			if len(hashes) >= cap {
  1168  				break
  1169  			}
  1170  		}
  1171  		req := &bytecodeRequest{
  1172  			peer:    idle,
  1173  			id:      reqid,
  1174  			time:    time.Now(),
  1175  			deliver: success,
  1176  			revert:  fail,
  1177  			cancel:  cancel,
  1178  			stale:   make(chan struct{}),
  1179  			hashes:  hashes,
  1180  			task:    task,
  1181  		}
  1182  		req.timeout = time.AfterFunc(s.rates.TargetTimeout(), func() {
  1183  			peer.Log().Debug("Bytecode request timed out", "reqid", reqid)
  1184  			s.rates.Update(idle, ByteCodesMsg, 0, 0)
  1185  			s.scheduleRevertBytecodeRequest(req)
  1186  		})
  1187  		s.bytecodeReqs[reqid] = req
  1188  		delete(s.bytecodeIdlers, idle)
  1189  
  1190  		s.pend.Add(1)
  1191  		go func() {
  1192  			defer s.pend.Done()
  1193  
  1194  			// Attempt to send the remote request and revert if it fails
  1195  			if err := peer.RequestByteCodes(reqid, hashes, maxRequestSize); err != nil {
  1196  				log.Debug("Failed to request bytecodes", "err", err)
  1197  				s.scheduleRevertBytecodeRequest(req)
  1198  			}
  1199  		}()
  1200  	}
  1201  }
  1202  
  1203  // assignStorageTasks attempts to match idle peers to pending storage range
  1204  // retrievals.
  1205  func (s *Syncer) assignStorageTasks(success chan *storageResponse, fail chan *storageRequest, cancel chan struct{}) {
  1206  	s.lock.Lock()
  1207  	defer s.lock.Unlock()
  1208  
  1209  	// Sort the peers by download capacity to use faster ones if many available
  1210  	idlers := &capacitySort{
  1211  		ids:  make([]string, 0, len(s.storageIdlers)),
  1212  		caps: make([]int, 0, len(s.storageIdlers)),
  1213  	}
  1214  	targetTTL := s.rates.TargetTimeout()
  1215  	for id := range s.storageIdlers {
  1216  		if _, ok := s.statelessPeers[id]; ok {
  1217  			continue
  1218  		}
  1219  		idlers.ids = append(idlers.ids, id)
  1220  		idlers.caps = append(idlers.caps, s.rates.Capacity(id, StorageRangesMsg, targetTTL))
  1221  	}
  1222  	if len(idlers.ids) == 0 {
  1223  		return
  1224  	}
  1225  	sort.Sort(sort.Reverse(idlers))
  1226  
  1227  	// Iterate over all the tasks and try to find a pending one
  1228  	for _, task := range s.tasks {
  1229  		// Skip any tasks not in the storage retrieval phase
  1230  		if task.res == nil {
  1231  			continue
  1232  		}
  1233  		// Skip tasks that are already retrieving (or done with) all small states
  1234  		storageTasks := task.activeSubTasks()
  1235  		if len(storageTasks) == 0 && len(task.stateTasks) == 0 {
  1236  			continue
  1237  		}
  1238  		// Task pending retrieval, try to find an idle peer. If no such peer
  1239  		// exists, we probably assigned tasks for all (or they are stateless).
  1240  		// Abort the entire assignment mechanism.
  1241  		if len(idlers.ids) == 0 {
  1242  			return
  1243  		}
  1244  		var (
  1245  			idle = idlers.ids[0]
  1246  			peer = s.peers[idle]
  1247  			cap  = idlers.caps[0]
  1248  		)
  1249  		idlers.ids, idlers.caps = idlers.ids[1:], idlers.caps[1:]
  1250  
  1251  		// Matched a pending task to an idle peer, allocate a unique request id
  1252  		var reqid uint64
  1253  		for {
  1254  			reqid = uint64(rand.Int63())
  1255  			if reqid == 0 {
  1256  				continue
  1257  			}
  1258  			if _, ok := s.storageReqs[reqid]; ok {
  1259  				continue
  1260  			}
  1261  			break
  1262  		}
  1263  		// Generate the network query and send it to the peer. If there are
  1264  		// large contract tasks pending, complete those before diving into
  1265  		// even more new contracts.
  1266  		if cap > maxRequestSize {
  1267  			cap = maxRequestSize
  1268  		}
  1269  		if cap < minRequestSize { // Don't bother with peers below a bare minimum performance
  1270  			cap = minRequestSize
  1271  		}
  1272  		storageSets := cap / 1024
  1273  
  1274  		var (
  1275  			accounts = make([]common.Hash, 0, storageSets)
  1276  			roots    = make([]common.Hash, 0, storageSets)
  1277  			subtask  *storageTask
  1278  		)
  1279  		for account, subtasks := range storageTasks {
  1280  			for _, st := range subtasks {
  1281  				// Skip any subtasks already filling
  1282  				if st.req != nil {
  1283  					continue
  1284  				}
  1285  				// Found an incomplete storage chunk, schedule it
  1286  				accounts = append(accounts, account)
  1287  				roots = append(roots, st.root)
  1288  				subtask = st
  1289  				break // Large contract chunks are downloaded individually
  1290  			}
  1291  			if subtask != nil {
  1292  				break // Large contract chunks are downloaded individually
  1293  			}
  1294  		}
  1295  		if subtask == nil {
  1296  			// No large contract required retrieval, but small ones available
  1297  			for account, root := range task.stateTasks {
  1298  				delete(task.stateTasks, account)
  1299  
  1300  				accounts = append(accounts, account)
  1301  				roots = append(roots, root)
  1302  
  1303  				if len(accounts) >= storageSets {
  1304  					break
  1305  				}
  1306  			}
  1307  		}
  1308  		// If nothing was found, it means this task is actually already fully
  1309  		// retrieving, but large contracts are hard to detect. Skip to the next.
  1310  		if len(accounts) == 0 {
  1311  			continue
  1312  		}
  1313  		req := &storageRequest{
  1314  			peer:     idle,
  1315  			id:       reqid,
  1316  			time:     time.Now(),
  1317  			deliver:  success,
  1318  			revert:   fail,
  1319  			cancel:   cancel,
  1320  			stale:    make(chan struct{}),
  1321  			accounts: accounts,
  1322  			roots:    roots,
  1323  			mainTask: task,
  1324  			subTask:  subtask,
  1325  		}
  1326  		if subtask != nil {
  1327  			req.origin = subtask.Next
  1328  			req.limit = subtask.Last
  1329  		}
  1330  		req.timeout = time.AfterFunc(s.rates.TargetTimeout(), func() {
  1331  			peer.Log().Debug("Storage request timed out", "reqid", reqid)
  1332  			s.rates.Update(idle, StorageRangesMsg, 0, 0)
  1333  			s.scheduleRevertStorageRequest(req)
  1334  		})
  1335  		s.storageReqs[reqid] = req
  1336  		delete(s.storageIdlers, idle)
  1337  
  1338  		s.pend.Add(1)
  1339  		go func(root common.Hash) {
  1340  			defer s.pend.Done()
  1341  
  1342  			// Attempt to send the remote request and revert if it fails
  1343  			var origin, limit []byte
  1344  			if subtask != nil {
  1345  				origin, limit = req.origin[:], req.limit[:]
  1346  			}
  1347  			if err := peer.RequestStorageRanges(reqid, root, accounts, origin, limit, uint64(cap)); err != nil {
  1348  				log.Debug("Failed to request storage", "err", err)
  1349  				s.scheduleRevertStorageRequest(req)
  1350  			}
  1351  		}(s.root)
  1352  
  1353  		// Inject the request into the subtask to block further assignments
  1354  		if subtask != nil {
  1355  			subtask.req = req
  1356  		}
  1357  	}
  1358  }
  1359  
  1360  // assignTrienodeHealTasks attempts to match idle peers to trie node requests to
  1361  // heal any trie errors caused by the snap sync's chunked retrieval model.
  1362  func (s *Syncer) assignTrienodeHealTasks(success chan *trienodeHealResponse, fail chan *trienodeHealRequest, cancel chan struct{}) {
  1363  	s.lock.Lock()
  1364  	defer s.lock.Unlock()
  1365  
  1366  	// Sort the peers by download capacity to use faster ones if many available
  1367  	idlers := &capacitySort{
  1368  		ids:  make([]string, 0, len(s.trienodeHealIdlers)),
  1369  		caps: make([]int, 0, len(s.trienodeHealIdlers)),
  1370  	}
  1371  	targetTTL := s.rates.TargetTimeout()
  1372  	for id := range s.trienodeHealIdlers {
  1373  		if _, ok := s.statelessPeers[id]; ok {
  1374  			continue
  1375  		}
  1376  		idlers.ids = append(idlers.ids, id)
  1377  		idlers.caps = append(idlers.caps, s.rates.Capacity(id, TrieNodesMsg, targetTTL))
  1378  	}
  1379  	if len(idlers.ids) == 0 {
  1380  		return
  1381  	}
  1382  	sort.Sort(sort.Reverse(idlers))
  1383  
  1384  	// Iterate over pending tasks and try to find a peer to retrieve with
  1385  	for len(s.healer.trieTasks) > 0 || s.healer.scheduler.Pending() > 0 {
  1386  		// If there are not enough trie tasks queued to fully assign, fill the
  1387  		// queue from the state sync scheduler. The trie synced schedules these
  1388  		// together with bytecodes, so we need to queue them combined.
  1389  		var (
  1390  			have = len(s.healer.trieTasks) + len(s.healer.codeTasks)
  1391  			want = maxTrieRequestCount + maxCodeRequestCount
  1392  		)
  1393  		if have < want {
  1394  			paths, hashes, codes := s.healer.scheduler.Missing(want - have)
  1395  			for i, path := range paths {
  1396  				s.healer.trieTasks[path] = hashes[i]
  1397  			}
  1398  			for _, hash := range codes {
  1399  				s.healer.codeTasks[hash] = struct{}{}
  1400  			}
  1401  		}
  1402  		// If all the heal tasks are bytecodes or already downloading, bail
  1403  		if len(s.healer.trieTasks) == 0 {
  1404  			return
  1405  		}
  1406  		// Task pending retrieval, try to find an idle peer. If no such peer
  1407  		// exists, we probably assigned tasks for all (or they are stateless).
  1408  		// Abort the entire assignment mechanism.
  1409  		if len(idlers.ids) == 0 {
  1410  			return
  1411  		}
  1412  		var (
  1413  			idle = idlers.ids[0]
  1414  			peer = s.peers[idle]
  1415  			cap  = idlers.caps[0]
  1416  		)
  1417  		idlers.ids, idlers.caps = idlers.ids[1:], idlers.caps[1:]
  1418  
  1419  		// Matched a pending task to an idle peer, allocate a unique request id
  1420  		var reqid uint64
  1421  		for {
  1422  			reqid = uint64(rand.Int63())
  1423  			if reqid == 0 {
  1424  				continue
  1425  			}
  1426  			if _, ok := s.trienodeHealReqs[reqid]; ok {
  1427  				continue
  1428  			}
  1429  			break
  1430  		}
  1431  		// Generate the network query and send it to the peer
  1432  		if cap > maxTrieRequestCount {
  1433  			cap = maxTrieRequestCount
  1434  		}
  1435  		cap = int(float64(cap) / s.trienodeHealThrottle)
  1436  		if cap <= 0 {
  1437  			cap = 1
  1438  		}
  1439  		var (
  1440  			hashes   = make([]common.Hash, 0, cap)
  1441  			paths    = make([]string, 0, cap)
  1442  			pathsets = make([]TrieNodePathSet, 0, cap)
  1443  		)
  1444  		for path, hash := range s.healer.trieTasks {
  1445  			delete(s.healer.trieTasks, path)
  1446  
  1447  			paths = append(paths, path)
  1448  			hashes = append(hashes, hash)
  1449  			if len(paths) >= cap {
  1450  				break
  1451  			}
  1452  		}
  1453  		// Group requests by account hash
  1454  		paths, hashes, _, pathsets = sortByAccountPath(paths, hashes)
  1455  		req := &trienodeHealRequest{
  1456  			peer:    idle,
  1457  			id:      reqid,
  1458  			time:    time.Now(),
  1459  			deliver: success,
  1460  			revert:  fail,
  1461  			cancel:  cancel,
  1462  			stale:   make(chan struct{}),
  1463  			paths:   paths,
  1464  			hashes:  hashes,
  1465  			task:    s.healer,
  1466  		}
  1467  		req.timeout = time.AfterFunc(s.rates.TargetTimeout(), func() {
  1468  			peer.Log().Debug("Trienode heal request timed out", "reqid", reqid)
  1469  			s.rates.Update(idle, TrieNodesMsg, 0, 0)
  1470  			s.scheduleRevertTrienodeHealRequest(req)
  1471  		})
  1472  		s.trienodeHealReqs[reqid] = req
  1473  		delete(s.trienodeHealIdlers, idle)
  1474  
  1475  		s.pend.Add(1)
  1476  		go func(root common.Hash) {
  1477  			defer s.pend.Done()
  1478  
  1479  			// Attempt to send the remote request and revert if it fails
  1480  			if err := peer.RequestTrieNodes(reqid, root, pathsets, maxRequestSize); err != nil {
  1481  				log.Debug("Failed to request trienode healers", "err", err)
  1482  				s.scheduleRevertTrienodeHealRequest(req)
  1483  			}
  1484  		}(s.root)
  1485  	}
  1486  }
  1487  
  1488  // assignBytecodeHealTasks attempts to match idle peers to bytecode requests to
  1489  // heal any trie errors caused by the snap sync's chunked retrieval model.
  1490  func (s *Syncer) assignBytecodeHealTasks(success chan *bytecodeHealResponse, fail chan *bytecodeHealRequest, cancel chan struct{}) {
  1491  	s.lock.Lock()
  1492  	defer s.lock.Unlock()
  1493  
  1494  	// Sort the peers by download capacity to use faster ones if many available
  1495  	idlers := &capacitySort{
  1496  		ids:  make([]string, 0, len(s.bytecodeHealIdlers)),
  1497  		caps: make([]int, 0, len(s.bytecodeHealIdlers)),
  1498  	}
  1499  	targetTTL := s.rates.TargetTimeout()
  1500  	for id := range s.bytecodeHealIdlers {
  1501  		if _, ok := s.statelessPeers[id]; ok {
  1502  			continue
  1503  		}
  1504  		idlers.ids = append(idlers.ids, id)
  1505  		idlers.caps = append(idlers.caps, s.rates.Capacity(id, ByteCodesMsg, targetTTL))
  1506  	}
  1507  	if len(idlers.ids) == 0 {
  1508  		return
  1509  	}
  1510  	sort.Sort(sort.Reverse(idlers))
  1511  
  1512  	// Iterate over pending tasks and try to find a peer to retrieve with
  1513  	for len(s.healer.codeTasks) > 0 || s.healer.scheduler.Pending() > 0 {
  1514  		// If there are not enough trie tasks queued to fully assign, fill the
  1515  		// queue from the state sync scheduler. The trie synced schedules these
  1516  		// together with trie nodes, so we need to queue them combined.
  1517  		var (
  1518  			have = len(s.healer.trieTasks) + len(s.healer.codeTasks)
  1519  			want = maxTrieRequestCount + maxCodeRequestCount
  1520  		)
  1521  		if have < want {
  1522  			paths, hashes, codes := s.healer.scheduler.Missing(want - have)
  1523  			for i, path := range paths {
  1524  				s.healer.trieTasks[path] = hashes[i]
  1525  			}
  1526  			for _, hash := range codes {
  1527  				s.healer.codeTasks[hash] = struct{}{}
  1528  			}
  1529  		}
  1530  		// If all the heal tasks are trienodes or already downloading, bail
  1531  		if len(s.healer.codeTasks) == 0 {
  1532  			return
  1533  		}
  1534  		// Task pending retrieval, try to find an idle peer. If no such peer
  1535  		// exists, we probably assigned tasks for all (or they are stateless).
  1536  		// Abort the entire assignment mechanism.
  1537  		if len(idlers.ids) == 0 {
  1538  			return
  1539  		}
  1540  		var (
  1541  			idle = idlers.ids[0]
  1542  			peer = s.peers[idle]
  1543  			cap  = idlers.caps[0]
  1544  		)
  1545  		idlers.ids, idlers.caps = idlers.ids[1:], idlers.caps[1:]
  1546  
  1547  		// Matched a pending task to an idle peer, allocate a unique request id
  1548  		var reqid uint64
  1549  		for {
  1550  			reqid = uint64(rand.Int63())
  1551  			if reqid == 0 {
  1552  				continue
  1553  			}
  1554  			if _, ok := s.bytecodeHealReqs[reqid]; ok {
  1555  				continue
  1556  			}
  1557  			break
  1558  		}
  1559  		// Generate the network query and send it to the peer
  1560  		if cap > maxCodeRequestCount {
  1561  			cap = maxCodeRequestCount
  1562  		}
  1563  		hashes := make([]common.Hash, 0, cap)
  1564  		for hash := range s.healer.codeTasks {
  1565  			delete(s.healer.codeTasks, hash)
  1566  
  1567  			hashes = append(hashes, hash)
  1568  			if len(hashes) >= cap {
  1569  				break
  1570  			}
  1571  		}
  1572  		req := &bytecodeHealRequest{
  1573  			peer:    idle,
  1574  			id:      reqid,
  1575  			time:    time.Now(),
  1576  			deliver: success,
  1577  			revert:  fail,
  1578  			cancel:  cancel,
  1579  			stale:   make(chan struct{}),
  1580  			hashes:  hashes,
  1581  			task:    s.healer,
  1582  		}
  1583  		req.timeout = time.AfterFunc(s.rates.TargetTimeout(), func() {
  1584  			peer.Log().Debug("Bytecode heal request timed out", "reqid", reqid)
  1585  			s.rates.Update(idle, ByteCodesMsg, 0, 0)
  1586  			s.scheduleRevertBytecodeHealRequest(req)
  1587  		})
  1588  		s.bytecodeHealReqs[reqid] = req
  1589  		delete(s.bytecodeHealIdlers, idle)
  1590  
  1591  		s.pend.Add(1)
  1592  		go func() {
  1593  			defer s.pend.Done()
  1594  
  1595  			// Attempt to send the remote request and revert if it fails
  1596  			if err := peer.RequestByteCodes(reqid, hashes, maxRequestSize); err != nil {
  1597  				log.Debug("Failed to request bytecode healers", "err", err)
  1598  				s.scheduleRevertBytecodeHealRequest(req)
  1599  			}
  1600  		}()
  1601  	}
  1602  }
  1603  
  1604  // revertRequests locates all the currently pending requests from a particular
  1605  // peer and reverts them, rescheduling for others to fulfill.
  1606  func (s *Syncer) revertRequests(peer string) {
  1607  	// Gather the requests first, revertals need the lock too
  1608  	s.lock.Lock()
  1609  	var accountReqs []*accountRequest
  1610  	for _, req := range s.accountReqs {
  1611  		if req.peer == peer {
  1612  			accountReqs = append(accountReqs, req)
  1613  		}
  1614  	}
  1615  	var bytecodeReqs []*bytecodeRequest
  1616  	for _, req := range s.bytecodeReqs {
  1617  		if req.peer == peer {
  1618  			bytecodeReqs = append(bytecodeReqs, req)
  1619  		}
  1620  	}
  1621  	var storageReqs []*storageRequest
  1622  	for _, req := range s.storageReqs {
  1623  		if req.peer == peer {
  1624  			storageReqs = append(storageReqs, req)
  1625  		}
  1626  	}
  1627  	var trienodeHealReqs []*trienodeHealRequest
  1628  	for _, req := range s.trienodeHealReqs {
  1629  		if req.peer == peer {
  1630  			trienodeHealReqs = append(trienodeHealReqs, req)
  1631  		}
  1632  	}
  1633  	var bytecodeHealReqs []*bytecodeHealRequest
  1634  	for _, req := range s.bytecodeHealReqs {
  1635  		if req.peer == peer {
  1636  			bytecodeHealReqs = append(bytecodeHealReqs, req)
  1637  		}
  1638  	}
  1639  	s.lock.Unlock()
  1640  
  1641  	// Revert all the requests matching the peer
  1642  	for _, req := range accountReqs {
  1643  		s.revertAccountRequest(req)
  1644  	}
  1645  	for _, req := range bytecodeReqs {
  1646  		s.revertBytecodeRequest(req)
  1647  	}
  1648  	for _, req := range storageReqs {
  1649  		s.revertStorageRequest(req)
  1650  	}
  1651  	for _, req := range trienodeHealReqs {
  1652  		s.revertTrienodeHealRequest(req)
  1653  	}
  1654  	for _, req := range bytecodeHealReqs {
  1655  		s.revertBytecodeHealRequest(req)
  1656  	}
  1657  }
  1658  
  1659  // scheduleRevertAccountRequest asks the event loop to clean up an account range
  1660  // request and return all failed retrieval tasks to the scheduler for reassignment.
  1661  func (s *Syncer) scheduleRevertAccountRequest(req *accountRequest) {
  1662  	select {
  1663  	case req.revert <- req:
  1664  		// Sync event loop notified
  1665  	case <-req.cancel:
  1666  		// Sync cycle got cancelled
  1667  	case <-req.stale:
  1668  		// Request already reverted
  1669  	}
  1670  }
  1671  
  1672  // revertAccountRequest cleans up an account range request and returns all failed
  1673  // retrieval tasks to the scheduler for reassignment.
  1674  //
  1675  // Note, this needs to run on the event runloop thread to reschedule to idle peers.
  1676  // On peer threads, use scheduleRevertAccountRequest.
  1677  func (s *Syncer) revertAccountRequest(req *accountRequest) {
  1678  	log.Debug("Reverting account request", "peer", req.peer, "reqid", req.id)
  1679  	select {
  1680  	case <-req.stale:
  1681  		log.Trace("Account request already reverted", "peer", req.peer, "reqid", req.id)
  1682  		return
  1683  	default:
  1684  	}
  1685  	close(req.stale)
  1686  
  1687  	// Remove the request from the tracked set
  1688  	s.lock.Lock()
  1689  	delete(s.accountReqs, req.id)
  1690  	s.lock.Unlock()
  1691  
  1692  	// If there's a timeout timer still running, abort it and mark the account
  1693  	// task as not-pending, ready for rescheduling
  1694  	req.timeout.Stop()
  1695  	if req.task.req == req {
  1696  		req.task.req = nil
  1697  	}
  1698  }
  1699  
  1700  // scheduleRevertBytecodeRequest asks the event loop to clean up a bytecode request
  1701  // and return all failed retrieval tasks to the scheduler for reassignment.
  1702  func (s *Syncer) scheduleRevertBytecodeRequest(req *bytecodeRequest) {
  1703  	select {
  1704  	case req.revert <- req:
  1705  		// Sync event loop notified
  1706  	case <-req.cancel:
  1707  		// Sync cycle got cancelled
  1708  	case <-req.stale:
  1709  		// Request already reverted
  1710  	}
  1711  }
  1712  
  1713  // revertBytecodeRequest cleans up a bytecode request and returns all failed
  1714  // retrieval tasks to the scheduler for reassignment.
  1715  //
  1716  // Note, this needs to run on the event runloop thread to reschedule to idle peers.
  1717  // On peer threads, use scheduleRevertBytecodeRequest.
  1718  func (s *Syncer) revertBytecodeRequest(req *bytecodeRequest) {
  1719  	log.Debug("Reverting bytecode request", "peer", req.peer)
  1720  	select {
  1721  	case <-req.stale:
  1722  		log.Trace("Bytecode request already reverted", "peer", req.peer, "reqid", req.id)
  1723  		return
  1724  	default:
  1725  	}
  1726  	close(req.stale)
  1727  
  1728  	// Remove the request from the tracked set
  1729  	s.lock.Lock()
  1730  	delete(s.bytecodeReqs, req.id)
  1731  	s.lock.Unlock()
  1732  
  1733  	// If there's a timeout timer still running, abort it and mark the code
  1734  	// retrievals as not-pending, ready for rescheduling
  1735  	req.timeout.Stop()
  1736  	for _, hash := range req.hashes {
  1737  		req.task.codeTasks[hash] = struct{}{}
  1738  	}
  1739  }
  1740  
  1741  // scheduleRevertStorageRequest asks the event loop to clean up a storage range
  1742  // request and return all failed retrieval tasks to the scheduler for reassignment.
  1743  func (s *Syncer) scheduleRevertStorageRequest(req *storageRequest) {
  1744  	select {
  1745  	case req.revert <- req:
  1746  		// Sync event loop notified
  1747  	case <-req.cancel:
  1748  		// Sync cycle got cancelled
  1749  	case <-req.stale:
  1750  		// Request already reverted
  1751  	}
  1752  }
  1753  
  1754  // revertStorageRequest cleans up a storage range request and returns all failed
  1755  // retrieval tasks to the scheduler for reassignment.
  1756  //
  1757  // Note, this needs to run on the event runloop thread to reschedule to idle peers.
  1758  // On peer threads, use scheduleRevertStorageRequest.
  1759  func (s *Syncer) revertStorageRequest(req *storageRequest) {
  1760  	log.Debug("Reverting storage request", "peer", req.peer)
  1761  	select {
  1762  	case <-req.stale:
  1763  		log.Trace("Storage request already reverted", "peer", req.peer, "reqid", req.id)
  1764  		return
  1765  	default:
  1766  	}
  1767  	close(req.stale)
  1768  
  1769  	// Remove the request from the tracked set
  1770  	s.lock.Lock()
  1771  	delete(s.storageReqs, req.id)
  1772  	s.lock.Unlock()
  1773  
  1774  	// If there's a timeout timer still running, abort it and mark the storage
  1775  	// task as not-pending, ready for rescheduling
  1776  	req.timeout.Stop()
  1777  	if req.subTask != nil {
  1778  		req.subTask.req = nil
  1779  	} else {
  1780  		for i, account := range req.accounts {
  1781  			req.mainTask.stateTasks[account] = req.roots[i]
  1782  		}
  1783  	}
  1784  }
  1785  
  1786  // scheduleRevertTrienodeHealRequest asks the event loop to clean up a trienode heal
  1787  // request and return all failed retrieval tasks to the scheduler for reassignment.
  1788  func (s *Syncer) scheduleRevertTrienodeHealRequest(req *trienodeHealRequest) {
  1789  	select {
  1790  	case req.revert <- req:
  1791  		// Sync event loop notified
  1792  	case <-req.cancel:
  1793  		// Sync cycle got cancelled
  1794  	case <-req.stale:
  1795  		// Request already reverted
  1796  	}
  1797  }
  1798  
  1799  // revertTrienodeHealRequest cleans up a trienode heal request and returns all
  1800  // failed retrieval tasks to the scheduler for reassignment.
  1801  //
  1802  // Note, this needs to run on the event runloop thread to reschedule to idle peers.
  1803  // On peer threads, use scheduleRevertTrienodeHealRequest.
  1804  func (s *Syncer) revertTrienodeHealRequest(req *trienodeHealRequest) {
  1805  	log.Debug("Reverting trienode heal request", "peer", req.peer)
  1806  	select {
  1807  	case <-req.stale:
  1808  		log.Trace("Trienode heal request already reverted", "peer", req.peer, "reqid", req.id)
  1809  		return
  1810  	default:
  1811  	}
  1812  	close(req.stale)
  1813  
  1814  	// Remove the request from the tracked set
  1815  	s.lock.Lock()
  1816  	delete(s.trienodeHealReqs, req.id)
  1817  	s.lock.Unlock()
  1818  
  1819  	// If there's a timeout timer still running, abort it and mark the trie node
  1820  	// retrievals as not-pending, ready for rescheduling
  1821  	req.timeout.Stop()
  1822  	for i, path := range req.paths {
  1823  		req.task.trieTasks[path] = req.hashes[i]
  1824  	}
  1825  }
  1826  
  1827  // scheduleRevertBytecodeHealRequest asks the event loop to clean up a bytecode heal
  1828  // request and return all failed retrieval tasks to the scheduler for reassignment.
  1829  func (s *Syncer) scheduleRevertBytecodeHealRequest(req *bytecodeHealRequest) {
  1830  	select {
  1831  	case req.revert <- req:
  1832  		// Sync event loop notified
  1833  	case <-req.cancel:
  1834  		// Sync cycle got cancelled
  1835  	case <-req.stale:
  1836  		// Request already reverted
  1837  	}
  1838  }
  1839  
  1840  // revertBytecodeHealRequest cleans up a bytecode heal request and returns all
  1841  // failed retrieval tasks to the scheduler for reassignment.
  1842  //
  1843  // Note, this needs to run on the event runloop thread to reschedule to idle peers.
  1844  // On peer threads, use scheduleRevertBytecodeHealRequest.
  1845  func (s *Syncer) revertBytecodeHealRequest(req *bytecodeHealRequest) {
  1846  	log.Debug("Reverting bytecode heal request", "peer", req.peer)
  1847  	select {
  1848  	case <-req.stale:
  1849  		log.Trace("Bytecode heal request already reverted", "peer", req.peer, "reqid", req.id)
  1850  		return
  1851  	default:
  1852  	}
  1853  	close(req.stale)
  1854  
  1855  	// Remove the request from the tracked set
  1856  	s.lock.Lock()
  1857  	delete(s.bytecodeHealReqs, req.id)
  1858  	s.lock.Unlock()
  1859  
  1860  	// If there's a timeout timer still running, abort it and mark the code
  1861  	// retrievals as not-pending, ready for rescheduling
  1862  	req.timeout.Stop()
  1863  	for _, hash := range req.hashes {
  1864  		req.task.codeTasks[hash] = struct{}{}
  1865  	}
  1866  }
  1867  
  1868  // processAccountResponse integrates an already validated account range response
  1869  // into the account tasks.
  1870  func (s *Syncer) processAccountResponse(res *accountResponse) {
  1871  	// Switch the task from pending to filling
  1872  	res.task.req = nil
  1873  	res.task.res = res
  1874  
  1875  	// Ensure that the response doesn't overflow into the subsequent task
  1876  	lastBig := res.task.Last.Big()
  1877  	for i, hash := range res.hashes {
  1878  		// Mark the range complete if the last is already included.
  1879  		// Keep iteration to delete the extra states if exists.
  1880  		cmp := hash.Big().Cmp(lastBig)
  1881  		if cmp == 0 {
  1882  			res.cont = false
  1883  			continue
  1884  		}
  1885  		if cmp > 0 {
  1886  			// Chunk overflown, cut off excess
  1887  			res.hashes = res.hashes[:i]
  1888  			res.accounts = res.accounts[:i]
  1889  			res.cont = false // Mark range completed
  1890  			break
  1891  		}
  1892  	}
  1893  	// Iterate over all the accounts and assemble which ones need further sub-
  1894  	// filling before the entire account range can be persisted.
  1895  	res.task.needCode = make([]bool, len(res.accounts))
  1896  	res.task.needState = make([]bool, len(res.accounts))
  1897  	res.task.needHeal = make([]bool, len(res.accounts))
  1898  
  1899  	res.task.codeTasks = make(map[common.Hash]struct{})
  1900  	res.task.stateTasks = make(map[common.Hash]common.Hash)
  1901  
  1902  	resumed := make(map[common.Hash]struct{})
  1903  
  1904  	res.task.pend = 0
  1905  	for i, account := range res.accounts {
  1906  		// Check if the account is a contract with an unknown code
  1907  		if !bytes.Equal(account.CodeHash, types.EmptyCodeHash.Bytes()) {
  1908  			if !rawdb.HasCodeWithPrefix(s.db, common.BytesToHash(account.CodeHash)) {
  1909  				res.task.codeTasks[common.BytesToHash(account.CodeHash)] = struct{}{}
  1910  				res.task.needCode[i] = true
  1911  				res.task.pend++
  1912  			}
  1913  		}
  1914  		// Check if the account is a contract with an unknown storage trie
  1915  		if account.Root != types.EmptyRootHash {
  1916  			// If the storage was already retrieved in the last cycle, there's no need
  1917  			// to resync it again, regardless of whether the storage root is consistent
  1918  			// or not.
  1919  			if _, exist := res.task.stateCompleted[res.hashes[i]]; exist {
  1920  				// The leftover storage tasks are not expected, unless system is
  1921  				// very wrong.
  1922  				if _, ok := res.task.SubTasks[res.hashes[i]]; ok {
  1923  					panic(fmt.Errorf("unexpected leftover storage tasks, owner: %x", res.hashes[i]))
  1924  				}
  1925  				// Mark the healing tag if storage root node is inconsistent, or
  1926  				// it's non-existent due to storage chunking.
  1927  				if !rawdb.HasTrieNode(s.db, res.hashes[i], nil, account.Root, s.scheme) {
  1928  					res.task.needHeal[i] = true
  1929  				}
  1930  			} else {
  1931  				// If there was a previous large state retrieval in progress,
  1932  				// don't restart it from scratch. This happens if a sync cycle
  1933  				// is interrupted and resumed later. However, *do* update the
  1934  				// previous root hash.
  1935  				if subtasks, ok := res.task.SubTasks[res.hashes[i]]; ok {
  1936  					log.Debug("Resuming large storage retrieval", "account", res.hashes[i], "root", account.Root)
  1937  					for _, subtask := range subtasks {
  1938  						subtask.root = account.Root
  1939  					}
  1940  					res.task.needHeal[i] = true
  1941  					resumed[res.hashes[i]] = struct{}{}
  1942  					largeStorageResumedGauge.Inc(1)
  1943  				} else {
  1944  					// It's possible that in the hash scheme, the storage, along
  1945  					// with the trie nodes of the given root, is already present
  1946  					// in the database. Schedule the storage task anyway to simplify
  1947  					// the logic here.
  1948  					res.task.stateTasks[res.hashes[i]] = account.Root
  1949  				}
  1950  				res.task.needState[i] = true
  1951  				res.task.pend++
  1952  			}
  1953  		}
  1954  	}
  1955  	// Delete any subtasks that have been aborted but not resumed. It's essential
  1956  	// as the corresponding contract might be self-destructed in this cycle(it's
  1957  	// no longer possible in ethereum as self-destruction is disabled in Cancun
  1958  	// Fork, but the condition is still necessary for other networks).
  1959  	//
  1960  	// Keep the leftover storage tasks if they are not covered by the responded
  1961  	// account range which should be picked up in next account wave.
  1962  	if len(res.hashes) > 0 {
  1963  		// The hash of last delivered account in the response
  1964  		last := res.hashes[len(res.hashes)-1]
  1965  		for hash := range res.task.SubTasks {
  1966  			// TODO(rjl493456442) degrade the log level before merging.
  1967  			if hash.Cmp(last) > 0 {
  1968  				log.Info("Keeping suspended storage retrieval", "account", hash)
  1969  				continue
  1970  			}
  1971  			// TODO(rjl493456442) degrade the log level before merging.
  1972  			// It should never happen in ethereum.
  1973  			if _, ok := resumed[hash]; !ok {
  1974  				log.Error("Aborting suspended storage retrieval", "account", hash)
  1975  				delete(res.task.SubTasks, hash)
  1976  				largeStorageDiscardGauge.Inc(1)
  1977  			}
  1978  		}
  1979  	}
  1980  	// If the account range contained no contracts, or all have been fully filled
  1981  	// beforehand, short circuit storage filling and forward to the next task
  1982  	if res.task.pend == 0 {
  1983  		s.forwardAccountTask(res.task)
  1984  		return
  1985  	}
  1986  	// Some accounts are incomplete, leave as is for the storage and contract
  1987  	// task assigners to pick up and fill
  1988  }
  1989  
  1990  // processBytecodeResponse integrates an already validated bytecode response
  1991  // into the account tasks.
  1992  func (s *Syncer) processBytecodeResponse(res *bytecodeResponse) {
  1993  	batch := s.db.NewBatch()
  1994  
  1995  	var (
  1996  		codes uint64
  1997  	)
  1998  	for i, hash := range res.hashes {
  1999  		code := res.codes[i]
  2000  
  2001  		// If the bytecode was not delivered, reschedule it
  2002  		if code == nil {
  2003  			res.task.codeTasks[hash] = struct{}{}
  2004  			continue
  2005  		}
  2006  		// Code was delivered, mark it not needed any more
  2007  		for j, account := range res.task.res.accounts {
  2008  			if res.task.needCode[j] && hash == common.BytesToHash(account.CodeHash) {
  2009  				res.task.needCode[j] = false
  2010  				res.task.pend--
  2011  			}
  2012  		}
  2013  		// Push the bytecode into a database batch
  2014  		codes++
  2015  		rawdb.WriteCode(batch, hash, code)
  2016  	}
  2017  	bytes := common.StorageSize(batch.ValueSize())
  2018  	if err := batch.Write(); err != nil {
  2019  		log.Crit("Failed to persist bytecodes", "err", err)
  2020  	}
  2021  	s.bytecodeSynced += codes
  2022  	s.bytecodeBytes += bytes
  2023  
  2024  	log.Debug("Persisted set of bytecodes", "count", codes, "bytes", bytes)
  2025  
  2026  	// If this delivery completed the last pending task, forward the account task
  2027  	// to the next chunk
  2028  	if res.task.pend == 0 {
  2029  		s.forwardAccountTask(res.task)
  2030  		return
  2031  	}
  2032  	// Some accounts are still incomplete, leave as is for the storage and contract
  2033  	// task assigners to pick up and fill.
  2034  }
  2035  
  2036  // processStorageResponse integrates an already validated storage response
  2037  // into the account tasks.
  2038  func (s *Syncer) processStorageResponse(res *storageResponse) {
  2039  	// Switch the subtask from pending to idle
  2040  	if res.subTask != nil {
  2041  		res.subTask.req = nil
  2042  	}
  2043  	batch := ethdb.HookedBatch{
  2044  		Batch: s.db.NewBatch(),
  2045  		OnPut: func(key []byte, value []byte) {
  2046  			s.storageBytes += common.StorageSize(len(key) + len(value))
  2047  		},
  2048  	}
  2049  	var (
  2050  		slots           int
  2051  		oldStorageBytes = s.storageBytes
  2052  	)
  2053  	// Iterate over all the accounts and reconstruct their storage tries from the
  2054  	// delivered slots
  2055  	for i, account := range res.accounts {
  2056  		// If the account was not delivered, reschedule it
  2057  		if i >= len(res.hashes) {
  2058  			res.mainTask.stateTasks[account] = res.roots[i]
  2059  			continue
  2060  		}
  2061  		// State was delivered, if complete mark as not needed any more, otherwise
  2062  		// mark the account as needing healing
  2063  		for j, hash := range res.mainTask.res.hashes {
  2064  			if account != hash {
  2065  				continue
  2066  			}
  2067  			acc := res.mainTask.res.accounts[j]
  2068  
  2069  			// If the packet contains multiple contract storage slots, all
  2070  			// but the last are surely complete. The last contract may be
  2071  			// chunked, so check it's continuation flag.
  2072  			if res.subTask == nil && res.mainTask.needState[j] && (i < len(res.hashes)-1 || !res.cont) {
  2073  				res.mainTask.needState[j] = false
  2074  				res.mainTask.pend--
  2075  				res.mainTask.stateCompleted[account] = struct{}{} // mark it as completed
  2076  				smallStorageGauge.Inc(1)
  2077  			}
  2078  			// If the last contract was chunked, mark it as needing healing
  2079  			// to avoid writing it out to disk prematurely.
  2080  			if res.subTask == nil && !res.mainTask.needHeal[j] && i == len(res.hashes)-1 && res.cont {
  2081  				res.mainTask.needHeal[j] = true
  2082  			}
  2083  			// If the last contract was chunked, we need to switch to large
  2084  			// contract handling mode
  2085  			if res.subTask == nil && i == len(res.hashes)-1 && res.cont {
  2086  				// If we haven't yet started a large-contract retrieval, create
  2087  				// the subtasks for it within the main account task
  2088  				if tasks, ok := res.mainTask.SubTasks[account]; !ok {
  2089  					var (
  2090  						keys    = res.hashes[i]
  2091  						chunks  = uint64(storageConcurrency)
  2092  						lastKey common.Hash
  2093  					)
  2094  					if len(keys) > 0 {
  2095  						lastKey = keys[len(keys)-1]
  2096  					}
  2097  					// If the number of slots remaining is low, decrease the
  2098  					// number of chunks. Somewhere on the order of 10-15K slots
  2099  					// fit into a packet of 500KB. A key/slot pair is maximum 64
  2100  					// bytes, so pessimistically maxRequestSize/64 = 8K.
  2101  					//
  2102  					// Chunk so that at least 2 packets are needed to fill a task.
  2103  					if estimate, err := estimateRemainingSlots(len(keys), lastKey); err == nil {
  2104  						if n := estimate / (2 * (maxRequestSize / 64)); n+1 < chunks {
  2105  							chunks = n + 1
  2106  						}
  2107  						log.Debug("Chunked large contract", "initiators", len(keys), "tail", lastKey, "remaining", estimate, "chunks", chunks)
  2108  					} else {
  2109  						log.Debug("Chunked large contract", "initiators", len(keys), "tail", lastKey, "chunks", chunks)
  2110  					}
  2111  					r := newHashRange(lastKey, chunks)
  2112  					if chunks == 1 {
  2113  						smallStorageGauge.Inc(1)
  2114  					} else {
  2115  						largeStorageGauge.Inc(1)
  2116  					}
  2117  					// Our first task is the one that was just filled by this response.
  2118  					batch := ethdb.HookedBatch{
  2119  						Batch: s.db.NewBatch(),
  2120  						OnPut: func(key []byte, value []byte) {
  2121  							s.storageBytes += common.StorageSize(len(key) + len(value))
  2122  						},
  2123  					}
  2124  					var tr genTrie
  2125  					if s.scheme == rawdb.HashScheme {
  2126  						tr = newHashTrie(batch)
  2127  					}
  2128  					if s.scheme == rawdb.PathScheme {
  2129  						// Keep the left boundary as it's the first range.
  2130  						tr = newPathTrie(account, false, s.db, batch)
  2131  					}
  2132  					tasks = append(tasks, &storageTask{
  2133  						Next:     common.Hash{},
  2134  						Last:     r.End(),
  2135  						root:     acc.Root,
  2136  						genBatch: batch,
  2137  						genTrie:  tr,
  2138  					})
  2139  					for r.Next() {
  2140  						batch := ethdb.HookedBatch{
  2141  							Batch: s.db.NewBatch(),
  2142  							OnPut: func(key []byte, value []byte) {
  2143  								s.storageBytes += common.StorageSize(len(key) + len(value))
  2144  							},
  2145  						}
  2146  						var tr genTrie
  2147  						if s.scheme == rawdb.HashScheme {
  2148  							tr = newHashTrie(batch)
  2149  						}
  2150  						if s.scheme == rawdb.PathScheme {
  2151  							tr = newPathTrie(account, true, s.db, batch)
  2152  						}
  2153  						tasks = append(tasks, &storageTask{
  2154  							Next:     r.Start(),
  2155  							Last:     r.End(),
  2156  							root:     acc.Root,
  2157  							genBatch: batch,
  2158  							genTrie:  tr,
  2159  						})
  2160  					}
  2161  					for _, task := range tasks {
  2162  						log.Debug("Created storage sync task", "account", account, "root", acc.Root, "from", task.Next, "last", task.Last)
  2163  					}
  2164  					res.mainTask.SubTasks[account] = tasks
  2165  
  2166  					// Since we've just created the sub-tasks, this response
  2167  					// is surely for the first one (zero origin)
  2168  					res.subTask = tasks[0]
  2169  				}
  2170  			}
  2171  			// If we're in large contract delivery mode, forward the subtask
  2172  			if res.subTask != nil {
  2173  				// Ensure the response doesn't overflow into the subsequent task
  2174  				last := res.subTask.Last.Big()
  2175  				// Find the first overflowing key. While at it, mark res as complete
  2176  				// if we find the range to include or pass the 'last'
  2177  				index := sort.Search(len(res.hashes[i]), func(k int) bool {
  2178  					cmp := res.hashes[i][k].Big().Cmp(last)
  2179  					if cmp >= 0 {
  2180  						res.cont = false
  2181  					}
  2182  					return cmp > 0
  2183  				})
  2184  				if index >= 0 {
  2185  					// cut off excess
  2186  					res.hashes[i] = res.hashes[i][:index]
  2187  					res.slots[i] = res.slots[i][:index]
  2188  				}
  2189  				// Forward the relevant storage chunk (even if created just now)
  2190  				if res.cont {
  2191  					res.subTask.Next = incHash(res.hashes[i][len(res.hashes[i])-1])
  2192  				} else {
  2193  					res.subTask.done = true
  2194  				}
  2195  			}
  2196  		}
  2197  		// Iterate over all the complete contracts, reconstruct the trie nodes and
  2198  		// push them to disk. If the contract is chunked, the trie nodes will be
  2199  		// reconstructed later.
  2200  		slots += len(res.hashes[i])
  2201  
  2202  		if i < len(res.hashes)-1 || res.subTask == nil {
  2203  			// no need to make local reassignment of account: this closure does not outlive the loop
  2204  			var tr genTrie
  2205  			if s.scheme == rawdb.HashScheme {
  2206  				tr = newHashTrie(batch)
  2207  			}
  2208  			if s.scheme == rawdb.PathScheme {
  2209  				// Keep the left boundary as it's complete
  2210  				tr = newPathTrie(account, false, s.db, batch)
  2211  			}
  2212  			for j := 0; j < len(res.hashes[i]); j++ {
  2213  				tr.update(res.hashes[i][j][:], res.slots[i][j])
  2214  			}
  2215  			tr.commit(true)
  2216  		}
  2217  		// Persist the received storage segments. These flat state maybe
  2218  		// outdated during the sync, but it can be fixed later during the
  2219  		// snapshot generation.
  2220  		for j := 0; j < len(res.hashes[i]); j++ {
  2221  			rawdb.WriteStorageSnapshot(batch, account, res.hashes[i][j], res.slots[i][j])
  2222  
  2223  			// If we're storing large contracts, generate the trie nodes
  2224  			// on the fly to not trash the gluing points
  2225  			if i == len(res.hashes)-1 && res.subTask != nil {
  2226  				res.subTask.genTrie.update(res.hashes[i][j][:], res.slots[i][j])
  2227  			}
  2228  		}
  2229  	}
  2230  	// Large contracts could have generated new trie nodes, flush them to disk
  2231  	if res.subTask != nil {
  2232  		if res.subTask.done {
  2233  			root := res.subTask.genTrie.commit(res.subTask.Last == common.MaxHash)
  2234  			if err := res.subTask.genBatch.Write(); err != nil {
  2235  				log.Error("Failed to persist stack slots", "err", err)
  2236  			}
  2237  			res.subTask.genBatch.Reset()
  2238  
  2239  			// If the chunk's root is an overflown but full delivery,
  2240  			// clear the heal request.
  2241  			accountHash := res.accounts[len(res.accounts)-1]
  2242  			if root == res.subTask.root && rawdb.HasTrieNode(s.db, accountHash, nil, root, s.scheme) {
  2243  				for i, account := range res.mainTask.res.hashes {
  2244  					if account == accountHash {
  2245  						res.mainTask.needHeal[i] = false
  2246  						skipStorageHealingGauge.Inc(1)
  2247  					}
  2248  				}
  2249  			}
  2250  		} else if res.subTask.genBatch.ValueSize() > batchSizeThreshold {
  2251  			res.subTask.genTrie.commit(false)
  2252  			if err := res.subTask.genBatch.Write(); err != nil {
  2253  				log.Error("Failed to persist stack slots", "err", err)
  2254  			}
  2255  			res.subTask.genBatch.Reset()
  2256  		}
  2257  	}
  2258  	// Flush anything written just now and update the stats
  2259  	if err := batch.Write(); err != nil {
  2260  		log.Crit("Failed to persist storage slots", "err", err)
  2261  	}
  2262  	s.storageSynced += uint64(slots)
  2263  
  2264  	log.Debug("Persisted set of storage slots", "accounts", len(res.hashes), "slots", slots, "bytes", s.storageBytes-oldStorageBytes)
  2265  
  2266  	// If this delivery completed the last pending task, forward the account task
  2267  	// to the next chunk
  2268  	if res.mainTask.pend == 0 {
  2269  		s.forwardAccountTask(res.mainTask)
  2270  		return
  2271  	}
  2272  	// Some accounts are still incomplete, leave as is for the storage and contract
  2273  	// task assigners to pick up and fill.
  2274  }
  2275  
  2276  // processTrienodeHealResponse integrates an already validated trienode response
  2277  // into the healer tasks.
  2278  func (s *Syncer) processTrienodeHealResponse(res *trienodeHealResponse) {
  2279  	var (
  2280  		start = time.Now()
  2281  		fills int
  2282  	)
  2283  	for i, hash := range res.hashes {
  2284  		node := res.nodes[i]
  2285  
  2286  		// If the trie node was not delivered, reschedule it
  2287  		if node == nil {
  2288  			res.task.trieTasks[res.paths[i]] = res.hashes[i]
  2289  			continue
  2290  		}
  2291  		fills++
  2292  
  2293  		// Push the trie node into the state syncer
  2294  		s.trienodeHealSynced++
  2295  		s.trienodeHealBytes += common.StorageSize(len(node))
  2296  
  2297  		err := s.healer.scheduler.ProcessNode(trie.NodeSyncResult{Path: res.paths[i], Data: node})
  2298  		switch err {
  2299  		case nil:
  2300  		case trie.ErrAlreadyProcessed:
  2301  			s.trienodeHealDups++
  2302  		case trie.ErrNotRequested:
  2303  			s.trienodeHealNops++
  2304  		default:
  2305  			log.Error("Invalid trienode processed", "hash", hash, "err", err)
  2306  		}
  2307  	}
  2308  	s.commitHealer(false)
  2309  
  2310  	// Calculate the processing rate of one filled trie node
  2311  	rate := float64(fills) / (float64(time.Since(start)) / float64(time.Second))
  2312  
  2313  	// Update the currently measured trienode queueing and processing throughput.
  2314  	//
  2315  	// The processing rate needs to be updated uniformly independent if we've
  2316  	// processed 1x100 trie nodes or 100x1 to keep the rate consistent even in
  2317  	// the face of varying network packets. As such, we cannot just measure the
  2318  	// time it took to process N trie nodes and update once, we need one update
  2319  	// per trie node.
  2320  	//
  2321  	// Naively, that would be:
  2322  	//
  2323  	//   for i:=0; i<fills; i++ {
  2324  	//     healRate = (1-measurementImpact)*oldRate + measurementImpact*newRate
  2325  	//   }
  2326  	//
  2327  	// Essentially, a recursive expansion of HR = (1-MI)*HR + MI*NR.
  2328  	//
  2329  	// We can expand that formula for the Nth item as:
  2330  	//   HR(N) = (1-MI)^N*OR + (1-MI)^(N-1)*MI*NR + (1-MI)^(N-2)*MI*NR + ... + (1-MI)^0*MI*NR
  2331  	//
  2332  	// The above is a geometric sequence that can be summed to:
  2333  	//   HR(N) = (1-MI)^N*(OR-NR) + NR
  2334  	s.trienodeHealRate = gomath.Pow(1-trienodeHealRateMeasurementImpact, float64(fills))*(s.trienodeHealRate-rate) + rate
  2335  
  2336  	pending := s.trienodeHealPend.Load()
  2337  	if time.Since(s.trienodeHealThrottled) > time.Second {
  2338  		// Periodically adjust the trie node throttler
  2339  		if float64(pending) > 2*s.trienodeHealRate {
  2340  			s.trienodeHealThrottle *= trienodeHealThrottleIncrease
  2341  		} else {
  2342  			s.trienodeHealThrottle /= trienodeHealThrottleDecrease
  2343  		}
  2344  		if s.trienodeHealThrottle > maxTrienodeHealThrottle {
  2345  			s.trienodeHealThrottle = maxTrienodeHealThrottle
  2346  		} else if s.trienodeHealThrottle < minTrienodeHealThrottle {
  2347  			s.trienodeHealThrottle = minTrienodeHealThrottle
  2348  		}
  2349  		s.trienodeHealThrottled = time.Now()
  2350  
  2351  		log.Debug("Updated trie node heal throttler", "rate", s.trienodeHealRate, "pending", pending, "throttle", s.trienodeHealThrottle)
  2352  	}
  2353  }
  2354  
  2355  func (s *Syncer) commitHealer(force bool) {
  2356  	if !force && s.healer.scheduler.MemSize() < ethdb.IdealBatchSize {
  2357  		return
  2358  	}
  2359  	batch := s.db.NewBatch()
  2360  	if err := s.healer.scheduler.Commit(batch); err != nil {
  2361  		log.Error("Failed to commit healing data", "err", err)
  2362  	}
  2363  	if err := batch.Write(); err != nil {
  2364  		log.Crit("Failed to persist healing data", "err", err)
  2365  	}
  2366  	log.Debug("Persisted set of healing data", "type", "trienodes", "bytes", common.StorageSize(batch.ValueSize()))
  2367  }
  2368  
  2369  // processBytecodeHealResponse integrates an already validated bytecode response
  2370  // into the healer tasks.
  2371  func (s *Syncer) processBytecodeHealResponse(res *bytecodeHealResponse) {
  2372  	for i, hash := range res.hashes {
  2373  		node := res.codes[i]
  2374  
  2375  		// If the trie node was not delivered, reschedule it
  2376  		if node == nil {
  2377  			res.task.codeTasks[hash] = struct{}{}
  2378  			continue
  2379  		}
  2380  		// Push the trie node into the state syncer
  2381  		s.bytecodeHealSynced++
  2382  		s.bytecodeHealBytes += common.StorageSize(len(node))
  2383  
  2384  		err := s.healer.scheduler.ProcessCode(trie.CodeSyncResult{Hash: hash, Data: node})
  2385  		switch err {
  2386  		case nil:
  2387  		case trie.ErrAlreadyProcessed:
  2388  			s.bytecodeHealDups++
  2389  		case trie.ErrNotRequested:
  2390  			s.bytecodeHealNops++
  2391  		default:
  2392  			log.Error("Invalid bytecode processed", "hash", hash, "err", err)
  2393  		}
  2394  	}
  2395  	s.commitHealer(false)
  2396  }
  2397  
  2398  // forwardAccountTask takes a filled account task and persists anything available
  2399  // into the database, after which it forwards the next account marker so that the
  2400  // task's next chunk may be filled.
  2401  func (s *Syncer) forwardAccountTask(task *accountTask) {
  2402  	// Remove any pending delivery
  2403  	res := task.res
  2404  	if res == nil {
  2405  		return // nothing to forward
  2406  	}
  2407  	task.res = nil
  2408  
  2409  	// Persist the received account segments. These flat state maybe
  2410  	// outdated during the sync, but it can be fixed later during the
  2411  	// snapshot generation.
  2412  	oldAccountBytes := s.accountBytes
  2413  
  2414  	batch := ethdb.HookedBatch{
  2415  		Batch: s.db.NewBatch(),
  2416  		OnPut: func(key []byte, value []byte) {
  2417  			s.accountBytes += common.StorageSize(len(key) + len(value))
  2418  		},
  2419  	}
  2420  	for i, hash := range res.hashes {
  2421  		if task.needCode[i] || task.needState[i] {
  2422  			break
  2423  		}
  2424  		slim := types.SlimAccountRLP(*res.accounts[i])
  2425  		rawdb.WriteAccountSnapshot(batch, hash, slim)
  2426  
  2427  		// If the task is complete, drop it into the stack trie to generate
  2428  		// account trie nodes for it
  2429  		if !task.needHeal[i] {
  2430  			full, err := types.FullAccountRLP(slim) // TODO(karalabe): Slim parsing can be omitted
  2431  			if err != nil {
  2432  				panic(err) // Really shouldn't ever happen
  2433  			}
  2434  			task.genTrie.update(hash[:], full)
  2435  		}
  2436  	}
  2437  	// Flush anything written just now and update the stats
  2438  	if err := batch.Write(); err != nil {
  2439  		log.Crit("Failed to persist accounts", "err", err)
  2440  	}
  2441  	s.accountSynced += uint64(len(res.accounts))
  2442  
  2443  	// Task filling persisted, push it the chunk marker forward to the first
  2444  	// account still missing data.
  2445  	for i, hash := range res.hashes {
  2446  		if task.needCode[i] || task.needState[i] {
  2447  			return
  2448  		}
  2449  		task.Next = incHash(hash)
  2450  
  2451  		// Remove the completion flag once the account range is pushed
  2452  		// forward. The leftover accounts will be skipped in the next
  2453  		// cycle.
  2454  		delete(task.stateCompleted, hash)
  2455  	}
  2456  	// All accounts marked as complete, track if the entire task is done
  2457  	task.done = !res.cont
  2458  
  2459  	// Error out if there is any leftover completion flag.
  2460  	if task.done && len(task.stateCompleted) != 0 {
  2461  		panic(fmt.Errorf("storage completion flags should be emptied, %d left", len(task.stateCompleted)))
  2462  	}
  2463  	// Stack trie could have generated trie nodes, push them to disk (we need to
  2464  	// flush after finalizing task.done. It's fine even if we crash and lose this
  2465  	// write as it will only cause more data to be downloaded during heal.
  2466  	if task.done {
  2467  		task.genTrie.commit(task.Last == common.MaxHash)
  2468  		if err := task.genBatch.Write(); err != nil {
  2469  			log.Error("Failed to persist stack account", "err", err)
  2470  		}
  2471  		task.genBatch.Reset()
  2472  	} else if task.genBatch.ValueSize() > batchSizeThreshold {
  2473  		task.genTrie.commit(false)
  2474  		if err := task.genBatch.Write(); err != nil {
  2475  			log.Error("Failed to persist stack account", "err", err)
  2476  		}
  2477  		task.genBatch.Reset()
  2478  	}
  2479  	log.Debug("Persisted range of accounts", "accounts", len(res.accounts), "bytes", s.accountBytes-oldAccountBytes)
  2480  }
  2481  
  2482  // OnAccounts is a callback method to invoke when a range of accounts are
  2483  // received from a remote peer.
  2484  func (s *Syncer) OnAccounts(peer SyncPeer, id uint64, hashes []common.Hash, accounts [][]byte, proof [][]byte) error {
  2485  	size := common.StorageSize(len(hashes) * common.HashLength)
  2486  	for _, account := range accounts {
  2487  		size += common.StorageSize(len(account))
  2488  	}
  2489  	for _, node := range proof {
  2490  		size += common.StorageSize(len(node))
  2491  	}
  2492  	logger := peer.Log().New("reqid", id)
  2493  	logger.Trace("Delivering range of accounts", "hashes", len(hashes), "accounts", len(accounts), "proofs", len(proof), "bytes", size)
  2494  
  2495  	// Whether or not the response is valid, we can mark the peer as idle and
  2496  	// notify the scheduler to assign a new task. If the response is invalid,
  2497  	// we'll drop the peer in a bit.
  2498  	defer func() {
  2499  		s.lock.Lock()
  2500  		defer s.lock.Unlock()
  2501  		if _, ok := s.peers[peer.ID()]; ok {
  2502  			s.accountIdlers[peer.ID()] = struct{}{}
  2503  		}
  2504  		select {
  2505  		case s.update <- struct{}{}:
  2506  		default:
  2507  		}
  2508  	}()
  2509  	s.lock.Lock()
  2510  	// Ensure the response is for a valid request
  2511  	req, ok := s.accountReqs[id]
  2512  	if !ok {
  2513  		// Request stale, perhaps the peer timed out but came through in the end
  2514  		logger.Warn("Unexpected account range packet")
  2515  		s.lock.Unlock()
  2516  		return nil
  2517  	}
  2518  	delete(s.accountReqs, id)
  2519  	s.rates.Update(peer.ID(), AccountRangeMsg, time.Since(req.time), int(size))
  2520  
  2521  	// Clean up the request timeout timer, we'll see how to proceed further based
  2522  	// on the actual delivered content
  2523  	if !req.timeout.Stop() {
  2524  		// The timeout is already triggered, and this request will be reverted+rescheduled
  2525  		s.lock.Unlock()
  2526  		return nil
  2527  	}
  2528  	// Response is valid, but check if peer is signalling that it does not have
  2529  	// the requested data. For account range queries that means the state being
  2530  	// retrieved was either already pruned remotely, or the peer is not yet
  2531  	// synced to our head.
  2532  	if len(hashes) == 0 && len(accounts) == 0 && len(proof) == 0 {
  2533  		logger.Debug("Peer rejected account range request", "root", s.root)
  2534  		s.statelessPeers[peer.ID()] = struct{}{}
  2535  		s.lock.Unlock()
  2536  
  2537  		// Signal this request as failed, and ready for rescheduling
  2538  		s.scheduleRevertAccountRequest(req)
  2539  		return nil
  2540  	}
  2541  	root := s.root
  2542  	s.lock.Unlock()
  2543  
  2544  	// Reconstruct a partial trie from the response and verify it
  2545  	keys := make([][]byte, len(hashes))
  2546  	for i, key := range hashes {
  2547  		keys[i] = common.CopyBytes(key[:])
  2548  	}
  2549  	nodes := make(trienode.ProofList, len(proof))
  2550  	for i, node := range proof {
  2551  		nodes[i] = node
  2552  	}
  2553  	cont, err := trie.VerifyRangeProof(root, req.origin[:], keys, accounts, nodes.Set())
  2554  	if err != nil {
  2555  		logger.Warn("Account range failed proof", "err", err)
  2556  		// Signal this request as failed, and ready for rescheduling
  2557  		s.scheduleRevertAccountRequest(req)
  2558  		return err
  2559  	}
  2560  	accs := make([]*types.StateAccount, len(accounts))
  2561  	for i, account := range accounts {
  2562  		acc := new(types.StateAccount)
  2563  		if err := rlp.DecodeBytes(account, acc); err != nil {
  2564  			panic(err) // We created these blobs, we must be able to decode them
  2565  		}
  2566  		accs[i] = acc
  2567  	}
  2568  	response := &accountResponse{
  2569  		task:     req.task,
  2570  		hashes:   hashes,
  2571  		accounts: accs,
  2572  		cont:     cont,
  2573  	}
  2574  	select {
  2575  	case req.deliver <- response:
  2576  	case <-req.cancel:
  2577  	case <-req.stale:
  2578  	}
  2579  	return nil
  2580  }
  2581  
  2582  // OnByteCodes is a callback method to invoke when a batch of contract
  2583  // bytes codes are received from a remote peer.
  2584  func (s *Syncer) OnByteCodes(peer SyncPeer, id uint64, bytecodes [][]byte) error {
  2585  	s.lock.RLock()
  2586  	syncing := !s.snapped
  2587  	s.lock.RUnlock()
  2588  
  2589  	if syncing {
  2590  		return s.onByteCodes(peer, id, bytecodes)
  2591  	}
  2592  	return s.onHealByteCodes(peer, id, bytecodes)
  2593  }
  2594  
  2595  // onByteCodes is a callback method to invoke when a batch of contract
  2596  // bytes codes are received from a remote peer in the syncing phase.
  2597  func (s *Syncer) onByteCodes(peer SyncPeer, id uint64, bytecodes [][]byte) error {
  2598  	var size common.StorageSize
  2599  	for _, code := range bytecodes {
  2600  		size += common.StorageSize(len(code))
  2601  	}
  2602  	logger := peer.Log().New("reqid", id)
  2603  	logger.Trace("Delivering set of bytecodes", "bytecodes", len(bytecodes), "bytes", size)
  2604  
  2605  	// Whether or not the response is valid, we can mark the peer as idle and
  2606  	// notify the scheduler to assign a new task. If the response is invalid,
  2607  	// we'll drop the peer in a bit.
  2608  	defer func() {
  2609  		s.lock.Lock()
  2610  		defer s.lock.Unlock()
  2611  		if _, ok := s.peers[peer.ID()]; ok {
  2612  			s.bytecodeIdlers[peer.ID()] = struct{}{}
  2613  		}
  2614  		select {
  2615  		case s.update <- struct{}{}:
  2616  		default:
  2617  		}
  2618  	}()
  2619  	s.lock.Lock()
  2620  	// Ensure the response is for a valid request
  2621  	req, ok := s.bytecodeReqs[id]
  2622  	if !ok {
  2623  		// Request stale, perhaps the peer timed out but came through in the end
  2624  		logger.Warn("Unexpected bytecode packet")
  2625  		s.lock.Unlock()
  2626  		return nil
  2627  	}
  2628  	delete(s.bytecodeReqs, id)
  2629  	s.rates.Update(peer.ID(), ByteCodesMsg, time.Since(req.time), len(bytecodes))
  2630  
  2631  	// Clean up the request timeout timer, we'll see how to proceed further based
  2632  	// on the actual delivered content
  2633  	if !req.timeout.Stop() {
  2634  		// The timeout is already triggered, and this request will be reverted+rescheduled
  2635  		s.lock.Unlock()
  2636  		return nil
  2637  	}
  2638  
  2639  	// Response is valid, but check if peer is signalling that it does not have
  2640  	// the requested data. For bytecode range queries that means the peer is not
  2641  	// yet synced.
  2642  	if len(bytecodes) == 0 {
  2643  		logger.Debug("Peer rejected bytecode request")
  2644  		s.statelessPeers[peer.ID()] = struct{}{}
  2645  		s.lock.Unlock()
  2646  
  2647  		// Signal this request as failed, and ready for rescheduling
  2648  		s.scheduleRevertBytecodeRequest(req)
  2649  		return nil
  2650  	}
  2651  	s.lock.Unlock()
  2652  
  2653  	// Cross reference the requested bytecodes with the response to find gaps
  2654  	// that the serving node is missing
  2655  	hasher := crypto.NewKeccakState()
  2656  	hash := make([]byte, 32)
  2657  
  2658  	codes := make([][]byte, len(req.hashes))
  2659  	for i, j := 0, 0; i < len(bytecodes); i++ {
  2660  		// Find the next hash that we've been served, leaving misses with nils
  2661  		hasher.Reset()
  2662  		hasher.Write(bytecodes[i])
  2663  		hasher.Read(hash)
  2664  
  2665  		for j < len(req.hashes) && !bytes.Equal(hash, req.hashes[j][:]) {
  2666  			j++
  2667  		}
  2668  		if j < len(req.hashes) {
  2669  			codes[j] = bytecodes[i]
  2670  			j++
  2671  			continue
  2672  		}
  2673  		// We've either ran out of hashes, or got unrequested data
  2674  		logger.Warn("Unexpected bytecodes", "count", len(bytecodes)-i)
  2675  		// Signal this request as failed, and ready for rescheduling
  2676  		s.scheduleRevertBytecodeRequest(req)
  2677  		return errors.New("unexpected bytecode")
  2678  	}
  2679  	// Response validated, send it to the scheduler for filling
  2680  	response := &bytecodeResponse{
  2681  		task:   req.task,
  2682  		hashes: req.hashes,
  2683  		codes:  codes,
  2684  	}
  2685  	select {
  2686  	case req.deliver <- response:
  2687  	case <-req.cancel:
  2688  	case <-req.stale:
  2689  	}
  2690  	return nil
  2691  }
  2692  
  2693  // OnStorage is a callback method to invoke when ranges of storage slots
  2694  // are received from a remote peer.
  2695  func (s *Syncer) OnStorage(peer SyncPeer, id uint64, hashes [][]common.Hash, slots [][][]byte, proof [][]byte) error {
  2696  	// Gather some trace stats to aid in debugging issues
  2697  	var (
  2698  		hashCount int
  2699  		slotCount int
  2700  		size      common.StorageSize
  2701  	)
  2702  	for _, hashset := range hashes {
  2703  		size += common.StorageSize(common.HashLength * len(hashset))
  2704  		hashCount += len(hashset)
  2705  	}
  2706  	for _, slotset := range slots {
  2707  		for _, slot := range slotset {
  2708  			size += common.StorageSize(len(slot))
  2709  		}
  2710  		slotCount += len(slotset)
  2711  	}
  2712  	for _, node := range proof {
  2713  		size += common.StorageSize(len(node))
  2714  	}
  2715  	logger := peer.Log().New("reqid", id)
  2716  	logger.Trace("Delivering ranges of storage slots", "accounts", len(hashes), "hashes", hashCount, "slots", slotCount, "proofs", len(proof), "size", size)
  2717  
  2718  	// Whether or not the response is valid, we can mark the peer as idle and
  2719  	// notify the scheduler to assign a new task. If the response is invalid,
  2720  	// we'll drop the peer in a bit.
  2721  	defer func() {
  2722  		s.lock.Lock()
  2723  		defer s.lock.Unlock()
  2724  		if _, ok := s.peers[peer.ID()]; ok {
  2725  			s.storageIdlers[peer.ID()] = struct{}{}
  2726  		}
  2727  		select {
  2728  		case s.update <- struct{}{}:
  2729  		default:
  2730  		}
  2731  	}()
  2732  	s.lock.Lock()
  2733  	// Ensure the response is for a valid request
  2734  	req, ok := s.storageReqs[id]
  2735  	if !ok {
  2736  		// Request stale, perhaps the peer timed out but came through in the end
  2737  		logger.Warn("Unexpected storage ranges packet")
  2738  		s.lock.Unlock()
  2739  		return nil
  2740  	}
  2741  	delete(s.storageReqs, id)
  2742  	s.rates.Update(peer.ID(), StorageRangesMsg, time.Since(req.time), int(size))
  2743  
  2744  	// Clean up the request timeout timer, we'll see how to proceed further based
  2745  	// on the actual delivered content
  2746  	if !req.timeout.Stop() {
  2747  		// The timeout is already triggered, and this request will be reverted+rescheduled
  2748  		s.lock.Unlock()
  2749  		return nil
  2750  	}
  2751  
  2752  	// Reject the response if the hash sets and slot sets don't match, or if the
  2753  	// peer sent more data than requested.
  2754  	if len(hashes) != len(slots) {
  2755  		s.lock.Unlock()
  2756  		s.scheduleRevertStorageRequest(req) // reschedule request
  2757  		logger.Warn("Hash and slot set size mismatch", "hashset", len(hashes), "slotset", len(slots))
  2758  		return errors.New("hash and slot set size mismatch")
  2759  	}
  2760  	if len(hashes) > len(req.accounts) {
  2761  		s.lock.Unlock()
  2762  		s.scheduleRevertStorageRequest(req) // reschedule request
  2763  		logger.Warn("Hash set larger than requested", "hashset", len(hashes), "requested", len(req.accounts))
  2764  		return errors.New("hash set larger than requested")
  2765  	}
  2766  	// Response is valid, but check if peer is signalling that it does not have
  2767  	// the requested data. For storage range queries that means the state being
  2768  	// retrieved was either already pruned remotely, or the peer is not yet
  2769  	// synced to our head.
  2770  	if len(hashes) == 0 && len(proof) == 0 {
  2771  		logger.Debug("Peer rejected storage request")
  2772  		s.statelessPeers[peer.ID()] = struct{}{}
  2773  		s.lock.Unlock()
  2774  		s.scheduleRevertStorageRequest(req) // reschedule request
  2775  		return nil
  2776  	}
  2777  	s.lock.Unlock()
  2778  
  2779  	// Reconstruct the partial tries from the response and verify them
  2780  	var cont bool
  2781  
  2782  	// If a proof was attached while the response is empty, it indicates that the
  2783  	// requested range specified with 'origin' is empty. Construct an empty state
  2784  	// response locally to finalize the range.
  2785  	if len(hashes) == 0 && len(proof) > 0 {
  2786  		hashes = append(hashes, []common.Hash{})
  2787  		slots = append(slots, [][]byte{})
  2788  	}
  2789  	for i := 0; i < len(hashes); i++ {
  2790  		// Convert the keys and proofs into an internal format
  2791  		keys := make([][]byte, len(hashes[i]))
  2792  		for j, key := range hashes[i] {
  2793  			keys[j] = common.CopyBytes(key[:])
  2794  		}
  2795  		nodes := make(trienode.ProofList, 0, len(proof))
  2796  		if i == len(hashes)-1 {
  2797  			for _, node := range proof {
  2798  				nodes = append(nodes, node)
  2799  			}
  2800  		}
  2801  		var err error
  2802  		if len(nodes) == 0 {
  2803  			// No proof has been attached, the response must cover the entire key
  2804  			// space and hash to the origin root.
  2805  			_, err = trie.VerifyRangeProof(req.roots[i], nil, keys, slots[i], nil)
  2806  			if err != nil {
  2807  				s.scheduleRevertStorageRequest(req) // reschedule request
  2808  				logger.Warn("Storage slots failed proof", "err", err)
  2809  				return err
  2810  			}
  2811  		} else {
  2812  			// A proof was attached, the response is only partial, check that the
  2813  			// returned data is indeed part of the storage trie
  2814  			proofdb := nodes.Set()
  2815  
  2816  			cont, err = trie.VerifyRangeProof(req.roots[i], req.origin[:], keys, slots[i], proofdb)
  2817  			if err != nil {
  2818  				s.scheduleRevertStorageRequest(req) // reschedule request
  2819  				logger.Warn("Storage range failed proof", "err", err)
  2820  				return err
  2821  			}
  2822  		}
  2823  	}
  2824  	// Partial tries reconstructed, send them to the scheduler for storage filling
  2825  	response := &storageResponse{
  2826  		mainTask: req.mainTask,
  2827  		subTask:  req.subTask,
  2828  		accounts: req.accounts,
  2829  		roots:    req.roots,
  2830  		hashes:   hashes,
  2831  		slots:    slots,
  2832  		cont:     cont,
  2833  	}
  2834  	select {
  2835  	case req.deliver <- response:
  2836  	case <-req.cancel:
  2837  	case <-req.stale:
  2838  	}
  2839  	return nil
  2840  }
  2841  
  2842  // OnTrieNodes is a callback method to invoke when a batch of trie nodes
  2843  // are received from a remote peer.
  2844  func (s *Syncer) OnTrieNodes(peer SyncPeer, id uint64, trienodes [][]byte) error {
  2845  	var size common.StorageSize
  2846  	for _, node := range trienodes {
  2847  		size += common.StorageSize(len(node))
  2848  	}
  2849  	logger := peer.Log().New("reqid", id)
  2850  	logger.Trace("Delivering set of healing trienodes", "trienodes", len(trienodes), "bytes", size)
  2851  
  2852  	// Whether or not the response is valid, we can mark the peer as idle and
  2853  	// notify the scheduler to assign a new task. If the response is invalid,
  2854  	// we'll drop the peer in a bit.
  2855  	defer func() {
  2856  		s.lock.Lock()
  2857  		defer s.lock.Unlock()
  2858  		if _, ok := s.peers[peer.ID()]; ok {
  2859  			s.trienodeHealIdlers[peer.ID()] = struct{}{}
  2860  		}
  2861  		select {
  2862  		case s.update <- struct{}{}:
  2863  		default:
  2864  		}
  2865  	}()
  2866  	s.lock.Lock()
  2867  	// Ensure the response is for a valid request
  2868  	req, ok := s.trienodeHealReqs[id]
  2869  	if !ok {
  2870  		// Request stale, perhaps the peer timed out but came through in the end
  2871  		logger.Warn("Unexpected trienode heal packet")
  2872  		s.lock.Unlock()
  2873  		return nil
  2874  	}
  2875  	delete(s.trienodeHealReqs, id)
  2876  	s.rates.Update(peer.ID(), TrieNodesMsg, time.Since(req.time), len(trienodes))
  2877  
  2878  	// Clean up the request timeout timer, we'll see how to proceed further based
  2879  	// on the actual delivered content
  2880  	if !req.timeout.Stop() {
  2881  		// The timeout is already triggered, and this request will be reverted+rescheduled
  2882  		s.lock.Unlock()
  2883  		return nil
  2884  	}
  2885  
  2886  	// Response is valid, but check if peer is signalling that it does not have
  2887  	// the requested data. For bytecode range queries that means the peer is not
  2888  	// yet synced.
  2889  	if len(trienodes) == 0 {
  2890  		logger.Debug("Peer rejected trienode heal request")
  2891  		s.statelessPeers[peer.ID()] = struct{}{}
  2892  		s.lock.Unlock()
  2893  
  2894  		// Signal this request as failed, and ready for rescheduling
  2895  		s.scheduleRevertTrienodeHealRequest(req)
  2896  		return nil
  2897  	}
  2898  	s.lock.Unlock()
  2899  
  2900  	// Cross reference the requested trienodes with the response to find gaps
  2901  	// that the serving node is missing
  2902  	var (
  2903  		hasher = crypto.NewKeccakState()
  2904  		hash   = make([]byte, 32)
  2905  		nodes  = make([][]byte, len(req.hashes))
  2906  		fills  uint64
  2907  	)
  2908  	for i, j := 0, 0; i < len(trienodes); i++ {
  2909  		// Find the next hash that we've been served, leaving misses with nils
  2910  		hasher.Reset()
  2911  		hasher.Write(trienodes[i])
  2912  		hasher.Read(hash)
  2913  
  2914  		for j < len(req.hashes) && !bytes.Equal(hash, req.hashes[j][:]) {
  2915  			j++
  2916  		}
  2917  		if j < len(req.hashes) {
  2918  			nodes[j] = trienodes[i]
  2919  			fills++
  2920  			j++
  2921  			continue
  2922  		}
  2923  		// We've either ran out of hashes, or got unrequested data
  2924  		logger.Warn("Unexpected healing trienodes", "count", len(trienodes)-i)
  2925  
  2926  		// Signal this request as failed, and ready for rescheduling
  2927  		s.scheduleRevertTrienodeHealRequest(req)
  2928  		return errors.New("unexpected healing trienode")
  2929  	}
  2930  	// Response validated, send it to the scheduler for filling
  2931  	s.trienodeHealPend.Add(fills)
  2932  	defer func() {
  2933  		s.trienodeHealPend.Add(^(fills - 1))
  2934  	}()
  2935  	response := &trienodeHealResponse{
  2936  		paths:  req.paths,
  2937  		task:   req.task,
  2938  		hashes: req.hashes,
  2939  		nodes:  nodes,
  2940  	}
  2941  	select {
  2942  	case req.deliver <- response:
  2943  	case <-req.cancel:
  2944  	case <-req.stale:
  2945  	}
  2946  	return nil
  2947  }
  2948  
  2949  // onHealByteCodes is a callback method to invoke when a batch of contract
  2950  // bytes codes are received from a remote peer in the healing phase.
  2951  func (s *Syncer) onHealByteCodes(peer SyncPeer, id uint64, bytecodes [][]byte) error {
  2952  	var size common.StorageSize
  2953  	for _, code := range bytecodes {
  2954  		size += common.StorageSize(len(code))
  2955  	}
  2956  	logger := peer.Log().New("reqid", id)
  2957  	logger.Trace("Delivering set of healing bytecodes", "bytecodes", len(bytecodes), "bytes", size)
  2958  
  2959  	// Whether or not the response is valid, we can mark the peer as idle and
  2960  	// notify the scheduler to assign a new task. If the response is invalid,
  2961  	// we'll drop the peer in a bit.
  2962  	defer func() {
  2963  		s.lock.Lock()
  2964  		defer s.lock.Unlock()
  2965  		if _, ok := s.peers[peer.ID()]; ok {
  2966  			s.bytecodeHealIdlers[peer.ID()] = struct{}{}
  2967  		}
  2968  		select {
  2969  		case s.update <- struct{}{}:
  2970  		default:
  2971  		}
  2972  	}()
  2973  	s.lock.Lock()
  2974  	// Ensure the response is for a valid request
  2975  	req, ok := s.bytecodeHealReqs[id]
  2976  	if !ok {
  2977  		// Request stale, perhaps the peer timed out but came through in the end
  2978  		logger.Warn("Unexpected bytecode heal packet")
  2979  		s.lock.Unlock()
  2980  		return nil
  2981  	}
  2982  	delete(s.bytecodeHealReqs, id)
  2983  	s.rates.Update(peer.ID(), ByteCodesMsg, time.Since(req.time), len(bytecodes))
  2984  
  2985  	// Clean up the request timeout timer, we'll see how to proceed further based
  2986  	// on the actual delivered content
  2987  	if !req.timeout.Stop() {
  2988  		// The timeout is already triggered, and this request will be reverted+rescheduled
  2989  		s.lock.Unlock()
  2990  		return nil
  2991  	}
  2992  
  2993  	// Response is valid, but check if peer is signalling that it does not have
  2994  	// the requested data. For bytecode range queries that means the peer is not
  2995  	// yet synced.
  2996  	if len(bytecodes) == 0 {
  2997  		logger.Debug("Peer rejected bytecode heal request")
  2998  		s.statelessPeers[peer.ID()] = struct{}{}
  2999  		s.lock.Unlock()
  3000  
  3001  		// Signal this request as failed, and ready for rescheduling
  3002  		s.scheduleRevertBytecodeHealRequest(req)
  3003  		return nil
  3004  	}
  3005  	s.lock.Unlock()
  3006  
  3007  	// Cross reference the requested bytecodes with the response to find gaps
  3008  	// that the serving node is missing
  3009  	hasher := crypto.NewKeccakState()
  3010  	hash := make([]byte, 32)
  3011  
  3012  	codes := make([][]byte, len(req.hashes))
  3013  	for i, j := 0, 0; i < len(bytecodes); i++ {
  3014  		// Find the next hash that we've been served, leaving misses with nils
  3015  		hasher.Reset()
  3016  		hasher.Write(bytecodes[i])
  3017  		hasher.Read(hash)
  3018  
  3019  		for j < len(req.hashes) && !bytes.Equal(hash, req.hashes[j][:]) {
  3020  			j++
  3021  		}
  3022  		if j < len(req.hashes) {
  3023  			codes[j] = bytecodes[i]
  3024  			j++
  3025  			continue
  3026  		}
  3027  		// We've either ran out of hashes, or got unrequested data
  3028  		logger.Warn("Unexpected healing bytecodes", "count", len(bytecodes)-i)
  3029  		// Signal this request as failed, and ready for rescheduling
  3030  		s.scheduleRevertBytecodeHealRequest(req)
  3031  		return errors.New("unexpected healing bytecode")
  3032  	}
  3033  	// Response validated, send it to the scheduler for filling
  3034  	response := &bytecodeHealResponse{
  3035  		task:   req.task,
  3036  		hashes: req.hashes,
  3037  		codes:  codes,
  3038  	}
  3039  	select {
  3040  	case req.deliver <- response:
  3041  	case <-req.cancel:
  3042  	case <-req.stale:
  3043  	}
  3044  	return nil
  3045  }
  3046  
  3047  // onHealState is a callback method to invoke when a flat state(account
  3048  // or storage slot) is downloaded during the healing stage. The flat states
  3049  // can be persisted blindly and can be fixed later in the generation stage.
  3050  // Note it's not concurrent safe, please handle the concurrent issue outside.
  3051  func (s *Syncer) onHealState(paths [][]byte, value []byte) error {
  3052  	if len(paths) == 1 {
  3053  		var account types.StateAccount
  3054  		if err := rlp.DecodeBytes(value, &account); err != nil {
  3055  			return nil // Returning the error here would drop the remote peer
  3056  		}
  3057  		blob := types.SlimAccountRLP(account)
  3058  		rawdb.WriteAccountSnapshot(s.stateWriter, common.BytesToHash(paths[0]), blob)
  3059  		s.accountHealed += 1
  3060  		s.accountHealedBytes += common.StorageSize(1 + common.HashLength + len(blob))
  3061  	}
  3062  	if len(paths) == 2 {
  3063  		rawdb.WriteStorageSnapshot(s.stateWriter, common.BytesToHash(paths[0]), common.BytesToHash(paths[1]), value)
  3064  		s.storageHealed += 1
  3065  		s.storageHealedBytes += common.StorageSize(1 + 2*common.HashLength + len(value))
  3066  	}
  3067  	if s.stateWriter.ValueSize() > ethdb.IdealBatchSize {
  3068  		s.stateWriter.Write() // It's fine to ignore the error here
  3069  		s.stateWriter.Reset()
  3070  	}
  3071  	return nil
  3072  }
  3073  
  3074  // hashSpace is the total size of the 256 bit hash space for accounts.
  3075  var hashSpace = new(big.Int).Exp(common.Big2, common.Big256, nil)
  3076  
  3077  // report calculates various status reports and provides it to the user.
  3078  func (s *Syncer) report(force bool) {
  3079  	if len(s.tasks) > 0 {
  3080  		s.reportSyncProgress(force)
  3081  		return
  3082  	}
  3083  	s.reportHealProgress(force)
  3084  }
  3085  
  3086  // reportSyncProgress calculates various status reports and provides it to the user.
  3087  func (s *Syncer) reportSyncProgress(force bool) {
  3088  	// Don't report all the events, just occasionally
  3089  	if !force && time.Since(s.logTime) < 8*time.Second {
  3090  		return
  3091  	}
  3092  	// Don't report anything until we have a meaningful progress
  3093  	synced := s.accountBytes + s.bytecodeBytes + s.storageBytes
  3094  	if synced == 0 {
  3095  		return
  3096  	}
  3097  	accountGaps := new(big.Int)
  3098  	for _, task := range s.tasks {
  3099  		accountGaps.Add(accountGaps, new(big.Int).Sub(task.Last.Big(), task.Next.Big()))
  3100  	}
  3101  	accountFills := new(big.Int).Sub(hashSpace, accountGaps)
  3102  	if accountFills.BitLen() == 0 {
  3103  		return
  3104  	}
  3105  	s.logTime = time.Now()
  3106  	estBytes := float64(new(big.Int).Div(
  3107  		new(big.Int).Mul(new(big.Int).SetUint64(uint64(synced)), hashSpace),
  3108  		accountFills,
  3109  	).Uint64())
  3110  	// Don't report anything until we have a meaningful progress
  3111  	if estBytes < 1.0 {
  3112  		return
  3113  	}
  3114  	elapsed := time.Since(s.startTime)
  3115  	estTime := elapsed / time.Duration(synced) * time.Duration(estBytes)
  3116  
  3117  	// Create a mega progress report
  3118  	var (
  3119  		progress = fmt.Sprintf("%.2f%%", float64(synced)*100/estBytes)
  3120  		accounts = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.accountSynced), s.accountBytes.TerminalString())
  3121  		storage  = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.storageSynced), s.storageBytes.TerminalString())
  3122  		bytecode = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.bytecodeSynced), s.bytecodeBytes.TerminalString())
  3123  	)
  3124  	log.Info("Syncing: state download in progress", "synced", progress, "state", synced,
  3125  		"accounts", accounts, "slots", storage, "codes", bytecode, "eta", common.PrettyDuration(estTime-elapsed))
  3126  }
  3127  
  3128  // reportHealProgress calculates various status reports and provides it to the user.
  3129  func (s *Syncer) reportHealProgress(force bool) {
  3130  	// Don't report all the events, just occasionally
  3131  	if !force && time.Since(s.logTime) < 8*time.Second {
  3132  		return
  3133  	}
  3134  	s.logTime = time.Now()
  3135  
  3136  	// Create a mega progress report
  3137  	var (
  3138  		trienode = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.trienodeHealSynced), s.trienodeHealBytes.TerminalString())
  3139  		bytecode = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.bytecodeHealSynced), s.bytecodeHealBytes.TerminalString())
  3140  		accounts = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.accountHealed), s.accountHealedBytes.TerminalString())
  3141  		storage  = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.storageHealed), s.storageHealedBytes.TerminalString())
  3142  	)
  3143  	log.Info("Syncing: state healing in progress", "accounts", accounts, "slots", storage,
  3144  		"codes", bytecode, "nodes", trienode, "pending", s.healer.scheduler.Pending())
  3145  }
  3146  
  3147  // estimateRemainingSlots tries to determine roughly how many slots are left in
  3148  // a contract storage, based on the number of keys and the last hash. This method
  3149  // assumes that the hashes are lexicographically ordered and evenly distributed.
  3150  func estimateRemainingSlots(hashes int, last common.Hash) (uint64, error) {
  3151  	if last == (common.Hash{}) {
  3152  		return 0, errors.New("last hash empty")
  3153  	}
  3154  	space := new(big.Int).Mul(math.MaxBig256, big.NewInt(int64(hashes)))
  3155  	space.Div(space, last.Big())
  3156  	if !space.IsUint64() {
  3157  		// Gigantic address space probably due to too few or malicious slots
  3158  		return 0, errors.New("too few slots for estimation")
  3159  	}
  3160  	return space.Uint64() - uint64(hashes), nil
  3161  }
  3162  
  3163  // capacitySort implements the Sort interface, allowing sorting by peer message
  3164  // throughput. Note, callers should use sort.Reverse to get the desired effect
  3165  // of highest capacity being at the front.
  3166  type capacitySort struct {
  3167  	ids  []string
  3168  	caps []int
  3169  }
  3170  
  3171  func (s *capacitySort) Len() int {
  3172  	return len(s.ids)
  3173  }
  3174  
  3175  func (s *capacitySort) Less(i, j int) bool {
  3176  	return s.caps[i] < s.caps[j]
  3177  }
  3178  
  3179  func (s *capacitySort) Swap(i, j int) {
  3180  	s.ids[i], s.ids[j] = s.ids[j], s.ids[i]
  3181  	s.caps[i], s.caps[j] = s.caps[j], s.caps[i]
  3182  }
  3183  
  3184  // healRequestSort implements the Sort interface, allowing sorting trienode
  3185  // heal requests, which is a prerequisite for merging storage-requests.
  3186  type healRequestSort struct {
  3187  	paths     []string
  3188  	hashes    []common.Hash
  3189  	syncPaths []trie.SyncPath
  3190  }
  3191  
  3192  func (t *healRequestSort) Len() int {
  3193  	return len(t.hashes)
  3194  }
  3195  
  3196  func (t *healRequestSort) Less(i, j int) bool {
  3197  	a := t.syncPaths[i]
  3198  	b := t.syncPaths[j]
  3199  	switch bytes.Compare(a[0], b[0]) {
  3200  	case -1:
  3201  		return true
  3202  	case 1:
  3203  		return false
  3204  	}
  3205  	// identical first part
  3206  	if len(a) < len(b) {
  3207  		return true
  3208  	}
  3209  	if len(b) < len(a) {
  3210  		return false
  3211  	}
  3212  	if len(a) == 2 {
  3213  		return bytes.Compare(a[1], b[1]) < 0
  3214  	}
  3215  	return false
  3216  }
  3217  
  3218  func (t *healRequestSort) Swap(i, j int) {
  3219  	t.paths[i], t.paths[j] = t.paths[j], t.paths[i]
  3220  	t.hashes[i], t.hashes[j] = t.hashes[j], t.hashes[i]
  3221  	t.syncPaths[i], t.syncPaths[j] = t.syncPaths[j], t.syncPaths[i]
  3222  }
  3223  
  3224  // Merge merges the pathsets, so that several storage requests concerning the
  3225  // same account are merged into one, to reduce bandwidth.
  3226  // OBS: This operation is moot if t has not first been sorted.
  3227  func (t *healRequestSort) Merge() []TrieNodePathSet {
  3228  	var result []TrieNodePathSet
  3229  	for _, path := range t.syncPaths {
  3230  		pathset := TrieNodePathSet(path)
  3231  		if len(path) == 1 {
  3232  			// It's an account reference.
  3233  			result = append(result, pathset)
  3234  		} else {
  3235  			// It's a storage reference.
  3236  			end := len(result) - 1
  3237  			if len(result) == 0 || !bytes.Equal(pathset[0], result[end][0]) {
  3238  				// The account doesn't match last, create a new entry.
  3239  				result = append(result, pathset)
  3240  			} else {
  3241  				// It's the same account as the previous one, add to the storage
  3242  				// paths of that request.
  3243  				result[end] = append(result[end], pathset[1])
  3244  			}
  3245  		}
  3246  	}
  3247  	return result
  3248  }
  3249  
  3250  // sortByAccountPath takes hashes and paths, and sorts them. After that, it generates
  3251  // the TrieNodePaths and merges paths which belongs to the same account path.
  3252  func sortByAccountPath(paths []string, hashes []common.Hash) ([]string, []common.Hash, []trie.SyncPath, []TrieNodePathSet) {
  3253  	var syncPaths []trie.SyncPath
  3254  	for _, path := range paths {
  3255  		syncPaths = append(syncPaths, trie.NewSyncPath([]byte(path)))
  3256  	}
  3257  	n := &healRequestSort{paths, hashes, syncPaths}
  3258  	sort.Sort(n)
  3259  	pathsets := n.Merge()
  3260  	return n.paths, n.hashes, n.syncPaths, pathsets
  3261  }