github.com/ethereum/go-ethereum@v1.16.1/eth/protocols/snap/sync.go (about)

     1  // Copyright 2020 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package snap
    18  
    19  import (
    20  	"bytes"
    21  	"encoding/json"
    22  	"errors"
    23  	"fmt"
    24  	gomath "math"
    25  	"math/big"
    26  	"math/rand"
    27  	"sort"
    28  	"sync"
    29  	"sync/atomic"
    30  	"time"
    31  
    32  	"github.com/ethereum/go-ethereum/common"
    33  	"github.com/ethereum/go-ethereum/common/math"
    34  	"github.com/ethereum/go-ethereum/core/rawdb"
    35  	"github.com/ethereum/go-ethereum/core/state"
    36  	"github.com/ethereum/go-ethereum/core/types"
    37  	"github.com/ethereum/go-ethereum/crypto"
    38  	"github.com/ethereum/go-ethereum/ethdb"
    39  	"github.com/ethereum/go-ethereum/event"
    40  	"github.com/ethereum/go-ethereum/log"
    41  	"github.com/ethereum/go-ethereum/p2p/msgrate"
    42  	"github.com/ethereum/go-ethereum/rlp"
    43  	"github.com/ethereum/go-ethereum/trie"
    44  	"github.com/ethereum/go-ethereum/trie/trienode"
    45  )
    46  
    47  const (
    48  	// minRequestSize is the minimum number of bytes to request from a remote peer.
    49  	// This number is used as the low cap for account and storage range requests.
    50  	// Bytecode and trienode are limited inherently by item count (1).
    51  	minRequestSize = 64 * 1024
    52  
    53  	// maxRequestSize is the maximum number of bytes to request from a remote peer.
    54  	// This number is used as the high cap for account and storage range requests.
    55  	// Bytecode and trienode are limited more explicitly by the caps below.
    56  	maxRequestSize = 512 * 1024
    57  
    58  	// maxCodeRequestCount is the maximum number of bytecode blobs to request in a
    59  	// single query. If this number is too low, we're not filling responses fully
    60  	// and waste round trip times. If it's too high, we're capping responses and
    61  	// waste bandwidth.
    62  	//
    63  	// Deployed bytecodes are currently capped at 24KB, so the minimum request
    64  	// size should be maxRequestSize / 24K. Assuming that most contracts do not
    65  	// come close to that, requesting 4x should be a good approximation.
    66  	maxCodeRequestCount = maxRequestSize / (24 * 1024) * 4
    67  
    68  	// maxTrieRequestCount is the maximum number of trie node blobs to request in
    69  	// a single query. If this number is too low, we're not filling responses fully
    70  	// and waste round trip times. If it's too high, we're capping responses and
    71  	// waste bandwidth.
    72  	maxTrieRequestCount = maxRequestSize / 512
    73  
    74  	// trienodeHealRateMeasurementImpact is the impact a single measurement has on
    75  	// the local node's trienode processing capacity. A value closer to 0 reacts
    76  	// slower to sudden changes, but it is also more stable against temporary hiccups.
    77  	trienodeHealRateMeasurementImpact = 0.005
    78  
    79  	// minTrienodeHealThrottle is the minimum divisor for throttling trie node
    80  	// heal requests to avoid overloading the local node and excessively expanding
    81  	// the state trie breadth wise.
    82  	minTrienodeHealThrottle = 1
    83  
    84  	// maxTrienodeHealThrottle is the maximum divisor for throttling trie node
    85  	// heal requests to avoid overloading the local node and exessively expanding
    86  	// the state trie bedth wise.
    87  	maxTrienodeHealThrottle = maxTrieRequestCount
    88  
    89  	// trienodeHealThrottleIncrease is the multiplier for the throttle when the
    90  	// rate of arriving data is higher than the rate of processing it.
    91  	trienodeHealThrottleIncrease = 1.33
    92  
    93  	// trienodeHealThrottleDecrease is the divisor for the throttle when the
    94  	// rate of arriving data is lower than the rate of processing it.
    95  	trienodeHealThrottleDecrease = 1.25
    96  
    97  	// batchSizeThreshold is the maximum size allowed for gentrie batch.
    98  	batchSizeThreshold = 8 * 1024 * 1024
    99  )
   100  
   101  var (
   102  	// accountConcurrency is the number of chunks to split the account trie into
   103  	// to allow concurrent retrievals.
   104  	accountConcurrency = 16
   105  
   106  	// storageConcurrency is the number of chunks to split a large contract
   107  	// storage trie into to allow concurrent retrievals.
   108  	storageConcurrency = 16
   109  )
   110  
   111  // ErrCancelled is returned from snap syncing if the operation was prematurely
   112  // terminated.
   113  var ErrCancelled = errors.New("sync cancelled")
   114  
   115  // accountRequest tracks a pending account range request to ensure responses are
   116  // to actual requests and to validate any security constraints.
   117  //
   118  // Concurrency note: account requests and responses are handled concurrently from
   119  // the main runloop to allow Merkle proof verifications on the peer's thread and
   120  // to drop on invalid response. The request struct must contain all the data to
   121  // construct the response without accessing runloop internals (i.e. task). That
   122  // is only included to allow the runloop to match a response to the task being
   123  // synced without having yet another set of maps.
   124  type accountRequest struct {
   125  	peer string    // Peer to which this request is assigned
   126  	id   uint64    // Request ID of this request
   127  	time time.Time // Timestamp when the request was sent
   128  
   129  	deliver chan *accountResponse // Channel to deliver successful response on
   130  	revert  chan *accountRequest  // Channel to deliver request failure on
   131  	cancel  chan struct{}         // Channel to track sync cancellation
   132  	timeout *time.Timer           // Timer to track delivery timeout
   133  	stale   chan struct{}         // Channel to signal the request was dropped
   134  
   135  	origin common.Hash // First account requested to allow continuation checks
   136  	limit  common.Hash // Last account requested to allow non-overlapping chunking
   137  
   138  	task *accountTask // Task which this request is filling (only access fields through the runloop!!)
   139  }
   140  
   141  // accountResponse is an already Merkle-verified remote response to an account
   142  // range request. It contains the subtrie for the requested account range and
   143  // the database that's going to be filled with the internal nodes on commit.
   144  type accountResponse struct {
   145  	task *accountTask // Task which this request is filling
   146  
   147  	hashes   []common.Hash         // Account hashes in the returned range
   148  	accounts []*types.StateAccount // Expanded accounts in the returned range
   149  
   150  	cont bool // Whether the account range has a continuation
   151  }
   152  
   153  // bytecodeRequest tracks a pending bytecode request to ensure responses are to
   154  // actual requests and to validate any security constraints.
   155  //
   156  // Concurrency note: bytecode requests and responses are handled concurrently from
   157  // the main runloop to allow Keccak256 hash verifications on the peer's thread and
   158  // to drop on invalid response. The request struct must contain all the data to
   159  // construct the response without accessing runloop internals (i.e. task). That
   160  // is only included to allow the runloop to match a response to the task being
   161  // synced without having yet another set of maps.
   162  type bytecodeRequest struct {
   163  	peer string    // Peer to which this request is assigned
   164  	id   uint64    // Request ID of this request
   165  	time time.Time // Timestamp when the request was sent
   166  
   167  	deliver chan *bytecodeResponse // Channel to deliver successful response on
   168  	revert  chan *bytecodeRequest  // Channel to deliver request failure on
   169  	cancel  chan struct{}          // Channel to track sync cancellation
   170  	timeout *time.Timer            // Timer to track delivery timeout
   171  	stale   chan struct{}          // Channel to signal the request was dropped
   172  
   173  	hashes []common.Hash // Bytecode hashes to validate responses
   174  	task   *accountTask  // Task which this request is filling (only access fields through the runloop!!)
   175  }
   176  
   177  // bytecodeResponse is an already verified remote response to a bytecode request.
   178  type bytecodeResponse struct {
   179  	task *accountTask // Task which this request is filling
   180  
   181  	hashes []common.Hash // Hashes of the bytecode to avoid double hashing
   182  	codes  [][]byte      // Actual bytecodes to store into the database (nil = missing)
   183  }
   184  
   185  // storageRequest tracks a pending storage ranges request to ensure responses are
   186  // to actual requests and to validate any security constraints.
   187  //
   188  // Concurrency note: storage requests and responses are handled concurrently from
   189  // the main runloop to allow Merkle proof verifications on the peer's thread and
   190  // to drop on invalid response. The request struct must contain all the data to
   191  // construct the response without accessing runloop internals (i.e. tasks). That
   192  // is only included to allow the runloop to match a response to the task being
   193  // synced without having yet another set of maps.
   194  type storageRequest struct {
   195  	peer string    // Peer to which this request is assigned
   196  	id   uint64    // Request ID of this request
   197  	time time.Time // Timestamp when the request was sent
   198  
   199  	deliver chan *storageResponse // Channel to deliver successful response on
   200  	revert  chan *storageRequest  // Channel to deliver request failure on
   201  	cancel  chan struct{}         // Channel to track sync cancellation
   202  	timeout *time.Timer           // Timer to track delivery timeout
   203  	stale   chan struct{}         // Channel to signal the request was dropped
   204  
   205  	accounts []common.Hash // Account hashes to validate responses
   206  	roots    []common.Hash // Storage roots to validate responses
   207  
   208  	origin common.Hash // First storage slot requested to allow continuation checks
   209  	limit  common.Hash // Last storage slot requested to allow non-overlapping chunking
   210  
   211  	mainTask *accountTask // Task which this response belongs to (only access fields through the runloop!!)
   212  	subTask  *storageTask // Task which this response is filling (only access fields through the runloop!!)
   213  }
   214  
   215  // storageResponse is an already Merkle-verified remote response to a storage
   216  // range request. It contains the subtries for the requested storage ranges and
   217  // the databases that's going to be filled with the internal nodes on commit.
   218  type storageResponse struct {
   219  	mainTask *accountTask // Task which this response belongs to
   220  	subTask  *storageTask // Task which this response is filling
   221  
   222  	accounts []common.Hash // Account hashes requested, may be only partially filled
   223  	roots    []common.Hash // Storage roots requested, may be only partially filled
   224  
   225  	hashes [][]common.Hash // Storage slot hashes in the returned range
   226  	slots  [][][]byte      // Storage slot values in the returned range
   227  
   228  	cont bool // Whether the last storage range has a continuation
   229  }
   230  
   231  // trienodeHealRequest tracks a pending state trie request to ensure responses
   232  // are to actual requests and to validate any security constraints.
   233  //
   234  // Concurrency note: trie node requests and responses are handled concurrently from
   235  // the main runloop to allow Keccak256 hash verifications on the peer's thread and
   236  // to drop on invalid response. The request struct must contain all the data to
   237  // construct the response without accessing runloop internals (i.e. task). That
   238  // is only included to allow the runloop to match a response to the task being
   239  // synced without having yet another set of maps.
   240  type trienodeHealRequest struct {
   241  	peer string    // Peer to which this request is assigned
   242  	id   uint64    // Request ID of this request
   243  	time time.Time // Timestamp when the request was sent
   244  
   245  	deliver chan *trienodeHealResponse // Channel to deliver successful response on
   246  	revert  chan *trienodeHealRequest  // Channel to deliver request failure on
   247  	cancel  chan struct{}              // Channel to track sync cancellation
   248  	timeout *time.Timer                // Timer to track delivery timeout
   249  	stale   chan struct{}              // Channel to signal the request was dropped
   250  
   251  	paths  []string      // Trie node paths for identifying trie node
   252  	hashes []common.Hash // Trie node hashes to validate responses
   253  
   254  	task *healTask // Task which this request is filling (only access fields through the runloop!!)
   255  }
   256  
   257  // trienodeHealResponse is an already verified remote response to a trie node request.
   258  type trienodeHealResponse struct {
   259  	task *healTask // Task which this request is filling
   260  
   261  	paths  []string      // Paths of the trie nodes
   262  	hashes []common.Hash // Hashes of the trie nodes to avoid double hashing
   263  	nodes  [][]byte      // Actual trie nodes to store into the database (nil = missing)
   264  }
   265  
   266  // bytecodeHealRequest tracks a pending bytecode request to ensure responses are to
   267  // actual requests and to validate any security constraints.
   268  //
   269  // Concurrency note: bytecode requests and responses are handled concurrently from
   270  // the main runloop to allow Keccak256 hash verifications on the peer's thread and
   271  // to drop on invalid response. The request struct must contain all the data to
   272  // construct the response without accessing runloop internals (i.e. task). That
   273  // is only included to allow the runloop to match a response to the task being
   274  // synced without having yet another set of maps.
   275  type bytecodeHealRequest struct {
   276  	peer string    // Peer to which this request is assigned
   277  	id   uint64    // Request ID of this request
   278  	time time.Time // Timestamp when the request was sent
   279  
   280  	deliver chan *bytecodeHealResponse // Channel to deliver successful response on
   281  	revert  chan *bytecodeHealRequest  // Channel to deliver request failure on
   282  	cancel  chan struct{}              // Channel to track sync cancellation
   283  	timeout *time.Timer                // Timer to track delivery timeout
   284  	stale   chan struct{}              // Channel to signal the request was dropped
   285  
   286  	hashes []common.Hash // Bytecode hashes to validate responses
   287  	task   *healTask     // Task which this request is filling (only access fields through the runloop!!)
   288  }
   289  
   290  // bytecodeHealResponse is an already verified remote response to a bytecode request.
   291  type bytecodeHealResponse struct {
   292  	task *healTask // Task which this request is filling
   293  
   294  	hashes []common.Hash // Hashes of the bytecode to avoid double hashing
   295  	codes  [][]byte      // Actual bytecodes to store into the database (nil = missing)
   296  }
   297  
   298  // accountTask represents the sync task for a chunk of the account snapshot.
   299  type accountTask struct {
   300  	// These fields get serialized to key-value store on shutdown
   301  	Next     common.Hash                    // Next account to sync in this interval
   302  	Last     common.Hash                    // Last account to sync in this interval
   303  	SubTasks map[common.Hash][]*storageTask // Storage intervals needing fetching for large contracts
   304  
   305  	// This is a list of account hashes whose storage are already completed
   306  	// in this cycle. This field is newly introduced in v1.14 and will be
   307  	// empty if the task is resolved from legacy progress data. Furthermore,
   308  	// this additional field will be ignored by legacy Geth. The only side
   309  	// effect is that these contracts might be resynced in the new cycle,
   310  	// retaining the legacy behavior.
   311  	StorageCompleted []common.Hash `json:",omitempty"`
   312  
   313  	// These fields are internals used during runtime
   314  	req  *accountRequest  // Pending request to fill this task
   315  	res  *accountResponse // Validate response filling this task
   316  	pend int              // Number of pending subtasks for this round
   317  
   318  	needCode  []bool // Flags whether the filling accounts need code retrieval
   319  	needState []bool // Flags whether the filling accounts need storage retrieval
   320  	needHeal  []bool // Flags whether the filling accounts's state was chunked and need healing
   321  
   322  	codeTasks      map[common.Hash]struct{}    // Code hashes that need retrieval
   323  	stateTasks     map[common.Hash]common.Hash // Account hashes->roots that need full state retrieval
   324  	stateCompleted map[common.Hash]struct{}    // Account hashes whose storage have been completed
   325  
   326  	genBatch ethdb.Batch // Batch used by the node generator
   327  	genTrie  genTrie     // Node generator from storage slots
   328  
   329  	done bool // Flag whether the task can be removed
   330  }
   331  
   332  // activeSubTasks returns the set of storage tasks covered by the current account
   333  // range. Normally this would be the entire subTask set, but on a sync interrupt
   334  // and later resume it can happen that a shorter account range is retrieved. This
   335  // method ensures that we only start up the subtasks covered by the latest account
   336  // response.
   337  //
   338  // Nil is returned if the account range is empty.
   339  func (task *accountTask) activeSubTasks() map[common.Hash][]*storageTask {
   340  	if len(task.res.hashes) == 0 {
   341  		return nil
   342  	}
   343  	var (
   344  		tasks = make(map[common.Hash][]*storageTask)
   345  		last  = task.res.hashes[len(task.res.hashes)-1]
   346  	)
   347  	for hash, subTasks := range task.SubTasks {
   348  		if hash.Cmp(last) <= 0 {
   349  			tasks[hash] = subTasks
   350  		}
   351  	}
   352  	return tasks
   353  }
   354  
   355  // storageTask represents the sync task for a chunk of the storage snapshot.
   356  type storageTask struct {
   357  	Next common.Hash // Next account to sync in this interval
   358  	Last common.Hash // Last account to sync in this interval
   359  
   360  	// These fields are internals used during runtime
   361  	root common.Hash     // Storage root hash for this instance
   362  	req  *storageRequest // Pending request to fill this task
   363  
   364  	genBatch ethdb.Batch // Batch used by the node generator
   365  	genTrie  genTrie     // Node generator from storage slots
   366  
   367  	done bool // Flag whether the task can be removed
   368  }
   369  
   370  // healTask represents the sync task for healing the snap-synced chunk boundaries.
   371  type healTask struct {
   372  	scheduler *trie.Sync // State trie sync scheduler defining the tasks
   373  
   374  	trieTasks map[string]common.Hash   // Set of trie node tasks currently queued for retrieval, indexed by node path
   375  	codeTasks map[common.Hash]struct{} // Set of byte code tasks currently queued for retrieval, indexed by code hash
   376  }
   377  
   378  // SyncProgress is a database entry to allow suspending and resuming a snapshot state
   379  // sync. Opposed to full and fast sync, there is no way to restart a suspended
   380  // snap sync without prior knowledge of the suspension point.
   381  type SyncProgress struct {
   382  	Tasks []*accountTask // The suspended account tasks (contract tasks within)
   383  
   384  	// Status report during syncing phase
   385  	AccountSynced  uint64             // Number of accounts downloaded
   386  	AccountBytes   common.StorageSize // Number of account trie bytes persisted to disk
   387  	BytecodeSynced uint64             // Number of bytecodes downloaded
   388  	BytecodeBytes  common.StorageSize // Number of bytecode bytes downloaded
   389  	StorageSynced  uint64             // Number of storage slots downloaded
   390  	StorageBytes   common.StorageSize // Number of storage trie bytes persisted to disk
   391  
   392  	// Status report during healing phase
   393  	TrienodeHealSynced uint64             // Number of state trie nodes downloaded
   394  	TrienodeHealBytes  common.StorageSize // Number of state trie bytes persisted to disk
   395  	BytecodeHealSynced uint64             // Number of bytecodes downloaded
   396  	BytecodeHealBytes  common.StorageSize // Number of bytecodes persisted to disk
   397  }
   398  
   399  // SyncPending is analogous to SyncProgress, but it's used to report on pending
   400  // ephemeral sync progress that doesn't get persisted into the database.
   401  type SyncPending struct {
   402  	TrienodeHeal uint64 // Number of state trie nodes pending
   403  	BytecodeHeal uint64 // Number of bytecodes pending
   404  }
   405  
   406  // SyncPeer abstracts out the methods required for a peer to be synced against
   407  // with the goal of allowing the construction of mock peers without the full
   408  // blown networking.
   409  type SyncPeer interface {
   410  	// ID retrieves the peer's unique identifier.
   411  	ID() string
   412  
   413  	// RequestAccountRange fetches a batch of accounts rooted in a specific account
   414  	// trie, starting with the origin.
   415  	RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error
   416  
   417  	// RequestStorageRanges fetches a batch of storage slots belonging to one or
   418  	// more accounts. If slots from only one account is requested, an origin marker
   419  	// may also be used to retrieve from there.
   420  	RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error
   421  
   422  	// RequestByteCodes fetches a batch of bytecodes by hash.
   423  	RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error
   424  
   425  	// RequestTrieNodes fetches a batch of account or storage trie nodes rooted in
   426  	// a specific state trie.
   427  	RequestTrieNodes(id uint64, root common.Hash, paths []TrieNodePathSet, bytes uint64) error
   428  
   429  	// Log retrieves the peer's own contextual logger.
   430  	Log() log.Logger
   431  }
   432  
   433  // Syncer is an Ethereum account and storage trie syncer based on snapshots and
   434  // the  snap protocol. It's purpose is to download all the accounts and storage
   435  // slots from remote peers and reassemble chunks of the state trie, on top of
   436  // which a state sync can be run to fix any gaps / overlaps.
   437  //
   438  // Every network request has a variety of failure events:
   439  //   - The peer disconnects after task assignment, failing to send the request
   440  //   - The peer disconnects after sending the request, before delivering on it
   441  //   - The peer remains connected, but does not deliver a response in time
   442  //   - The peer delivers a stale response after a previous timeout
   443  //   - The peer delivers a refusal to serve the requested state
   444  type Syncer struct {
   445  	db     ethdb.KeyValueStore // Database to store the trie nodes into (and dedup)
   446  	scheme string              // Node scheme used in node database
   447  
   448  	root    common.Hash    // Current state trie root being synced
   449  	tasks   []*accountTask // Current account task set being synced
   450  	snapped bool           // Flag to signal that snap phase is done
   451  	healer  *healTask      // Current state healing task being executed
   452  	update  chan struct{}  // Notification channel for possible sync progression
   453  
   454  	peers    map[string]SyncPeer // Currently active peers to download from
   455  	peerJoin *event.Feed         // Event feed to react to peers joining
   456  	peerDrop *event.Feed         // Event feed to react to peers dropping
   457  	rates    *msgrate.Trackers   // Message throughput rates for peers
   458  
   459  	// Request tracking during syncing phase
   460  	statelessPeers map[string]struct{} // Peers that failed to deliver state data
   461  	accountIdlers  map[string]struct{} // Peers that aren't serving account requests
   462  	bytecodeIdlers map[string]struct{} // Peers that aren't serving bytecode requests
   463  	storageIdlers  map[string]struct{} // Peers that aren't serving storage requests
   464  
   465  	accountReqs  map[uint64]*accountRequest  // Account requests currently running
   466  	bytecodeReqs map[uint64]*bytecodeRequest // Bytecode requests currently running
   467  	storageReqs  map[uint64]*storageRequest  // Storage requests currently running
   468  
   469  	accountSynced  uint64             // Number of accounts downloaded
   470  	accountBytes   common.StorageSize // Number of account trie bytes persisted to disk
   471  	bytecodeSynced uint64             // Number of bytecodes downloaded
   472  	bytecodeBytes  common.StorageSize // Number of bytecode bytes downloaded
   473  	storageSynced  uint64             // Number of storage slots downloaded
   474  	storageBytes   common.StorageSize // Number of storage trie bytes persisted to disk
   475  
   476  	extProgress *SyncProgress // progress that can be exposed to external caller.
   477  
   478  	// Request tracking during healing phase
   479  	trienodeHealIdlers map[string]struct{} // Peers that aren't serving trie node requests
   480  	bytecodeHealIdlers map[string]struct{} // Peers that aren't serving bytecode requests
   481  
   482  	trienodeHealReqs map[uint64]*trienodeHealRequest // Trie node requests currently running
   483  	bytecodeHealReqs map[uint64]*bytecodeHealRequest // Bytecode requests currently running
   484  
   485  	trienodeHealRate      float64       // Average heal rate for processing trie node data
   486  	trienodeHealPend      atomic.Uint64 // Number of trie nodes currently pending for processing
   487  	trienodeHealThrottle  float64       // Divisor for throttling the amount of trienode heal data requested
   488  	trienodeHealThrottled time.Time     // Timestamp the last time the throttle was updated
   489  
   490  	trienodeHealSynced uint64             // Number of state trie nodes downloaded
   491  	trienodeHealBytes  common.StorageSize // Number of state trie bytes persisted to disk
   492  	trienodeHealDups   uint64             // Number of state trie nodes already processed
   493  	trienodeHealNops   uint64             // Number of state trie nodes not requested
   494  	bytecodeHealSynced uint64             // Number of bytecodes downloaded
   495  	bytecodeHealBytes  common.StorageSize // Number of bytecodes persisted to disk
   496  	bytecodeHealDups   uint64             // Number of bytecodes already processed
   497  	bytecodeHealNops   uint64             // Number of bytecodes not requested
   498  
   499  	stateWriter        ethdb.Batch        // Shared batch writer used for persisting raw states
   500  	accountHealed      uint64             // Number of accounts downloaded during the healing stage
   501  	accountHealedBytes common.StorageSize // Number of raw account bytes persisted to disk during the healing stage
   502  	storageHealed      uint64             // Number of storage slots downloaded during the healing stage
   503  	storageHealedBytes common.StorageSize // Number of raw storage bytes persisted to disk during the healing stage
   504  
   505  	startTime time.Time // Time instance when snapshot sync started
   506  	logTime   time.Time // Time instance when status was last reported
   507  
   508  	pend sync.WaitGroup // Tracks network request goroutines for graceful shutdown
   509  	lock sync.RWMutex   // Protects fields that can change outside of sync (peers, reqs, root)
   510  }
   511  
   512  // NewSyncer creates a new snapshot syncer to download the Ethereum state over the
   513  // snap protocol.
   514  func NewSyncer(db ethdb.KeyValueStore, scheme string) *Syncer {
   515  	return &Syncer{
   516  		db:     db,
   517  		scheme: scheme,
   518  
   519  		peers:    make(map[string]SyncPeer),
   520  		peerJoin: new(event.Feed),
   521  		peerDrop: new(event.Feed),
   522  		rates:    msgrate.NewTrackers(log.New("proto", "snap")),
   523  		update:   make(chan struct{}, 1),
   524  
   525  		accountIdlers:  make(map[string]struct{}),
   526  		storageIdlers:  make(map[string]struct{}),
   527  		bytecodeIdlers: make(map[string]struct{}),
   528  
   529  		accountReqs:  make(map[uint64]*accountRequest),
   530  		storageReqs:  make(map[uint64]*storageRequest),
   531  		bytecodeReqs: make(map[uint64]*bytecodeRequest),
   532  
   533  		trienodeHealIdlers: make(map[string]struct{}),
   534  		bytecodeHealIdlers: make(map[string]struct{}),
   535  
   536  		trienodeHealReqs:     make(map[uint64]*trienodeHealRequest),
   537  		bytecodeHealReqs:     make(map[uint64]*bytecodeHealRequest),
   538  		trienodeHealThrottle: maxTrienodeHealThrottle, // Tune downward instead of insta-filling with junk
   539  		stateWriter:          db.NewBatch(),
   540  
   541  		extProgress: new(SyncProgress),
   542  	}
   543  }
   544  
   545  // Register injects a new data source into the syncer's peerset.
   546  func (s *Syncer) Register(peer SyncPeer) error {
   547  	// Make sure the peer is not registered yet
   548  	id := peer.ID()
   549  
   550  	s.lock.Lock()
   551  	if _, ok := s.peers[id]; ok {
   552  		log.Error("Snap peer already registered", "id", id)
   553  
   554  		s.lock.Unlock()
   555  		return errors.New("already registered")
   556  	}
   557  	s.peers[id] = peer
   558  	s.rates.Track(id, msgrate.NewTracker(s.rates.MeanCapacities(), s.rates.MedianRoundTrip()))
   559  
   560  	// Mark the peer as idle, even if no sync is running
   561  	s.accountIdlers[id] = struct{}{}
   562  	s.storageIdlers[id] = struct{}{}
   563  	s.bytecodeIdlers[id] = struct{}{}
   564  	s.trienodeHealIdlers[id] = struct{}{}
   565  	s.bytecodeHealIdlers[id] = struct{}{}
   566  	s.lock.Unlock()
   567  
   568  	// Notify any active syncs that a new peer can be assigned data
   569  	s.peerJoin.Send(id)
   570  	return nil
   571  }
   572  
   573  // Unregister injects a new data source into the syncer's peerset.
   574  func (s *Syncer) Unregister(id string) error {
   575  	// Remove all traces of the peer from the registry
   576  	s.lock.Lock()
   577  	if _, ok := s.peers[id]; !ok {
   578  		log.Error("Snap peer not registered", "id", id)
   579  
   580  		s.lock.Unlock()
   581  		return errors.New("not registered")
   582  	}
   583  	delete(s.peers, id)
   584  	s.rates.Untrack(id)
   585  
   586  	// Remove status markers, even if no sync is running
   587  	delete(s.statelessPeers, id)
   588  
   589  	delete(s.accountIdlers, id)
   590  	delete(s.storageIdlers, id)
   591  	delete(s.bytecodeIdlers, id)
   592  	delete(s.trienodeHealIdlers, id)
   593  	delete(s.bytecodeHealIdlers, id)
   594  	s.lock.Unlock()
   595  
   596  	// Notify any active syncs that pending requests need to be reverted
   597  	s.peerDrop.Send(id)
   598  	return nil
   599  }
   600  
   601  // Sync starts (or resumes a previous) sync cycle to iterate over a state trie
   602  // with the given root and reconstruct the nodes based on the snapshot leaves.
   603  // Previously downloaded segments will not be redownloaded of fixed, rather any
   604  // errors will be healed after the leaves are fully accumulated.
   605  func (s *Syncer) Sync(root common.Hash, cancel chan struct{}) error {
   606  	// Move the trie root from any previous value, revert stateless markers for
   607  	// any peers and initialize the syncer if it was not yet run
   608  	s.lock.Lock()
   609  	s.root = root
   610  	s.healer = &healTask{
   611  		scheduler: state.NewStateSync(root, s.db, s.onHealState, s.scheme),
   612  		trieTasks: make(map[string]common.Hash),
   613  		codeTasks: make(map[common.Hash]struct{}),
   614  	}
   615  	s.statelessPeers = make(map[string]struct{})
   616  	s.lock.Unlock()
   617  
   618  	if s.startTime == (time.Time{}) {
   619  		s.startTime = time.Now()
   620  	}
   621  	// Retrieve the previous sync status from LevelDB and abort if already synced
   622  	s.loadSyncStatus()
   623  	if len(s.tasks) == 0 && s.healer.scheduler.Pending() == 0 {
   624  		log.Debug("Snapshot sync already completed")
   625  		return nil
   626  	}
   627  	defer func() { // Persist any progress, independent of failure
   628  		for _, task := range s.tasks {
   629  			s.forwardAccountTask(task)
   630  		}
   631  		s.cleanAccountTasks()
   632  		s.saveSyncStatus()
   633  	}()
   634  
   635  	log.Debug("Starting snapshot sync cycle", "root", root)
   636  
   637  	// Flush out the last committed raw states
   638  	defer func() {
   639  		if s.stateWriter.ValueSize() > 0 {
   640  			s.stateWriter.Write()
   641  			s.stateWriter.Reset()
   642  		}
   643  	}()
   644  	defer s.report(true)
   645  	// commit any trie- and bytecode-healing data.
   646  	defer s.commitHealer(true)
   647  
   648  	// Whether sync completed or not, disregard any future packets
   649  	defer func() {
   650  		log.Debug("Terminating snapshot sync cycle", "root", root)
   651  		s.lock.Lock()
   652  		s.accountReqs = make(map[uint64]*accountRequest)
   653  		s.storageReqs = make(map[uint64]*storageRequest)
   654  		s.bytecodeReqs = make(map[uint64]*bytecodeRequest)
   655  		s.trienodeHealReqs = make(map[uint64]*trienodeHealRequest)
   656  		s.bytecodeHealReqs = make(map[uint64]*bytecodeHealRequest)
   657  		s.lock.Unlock()
   658  	}()
   659  	// Keep scheduling sync tasks
   660  	peerJoin := make(chan string, 16)
   661  	peerJoinSub := s.peerJoin.Subscribe(peerJoin)
   662  	defer peerJoinSub.Unsubscribe()
   663  
   664  	peerDrop := make(chan string, 16)
   665  	peerDropSub := s.peerDrop.Subscribe(peerDrop)
   666  	defer peerDropSub.Unsubscribe()
   667  
   668  	// Create a set of unique channels for this sync cycle. We need these to be
   669  	// ephemeral so a data race doesn't accidentally deliver something stale on
   670  	// a persistent channel across syncs (yup, this happened)
   671  	var (
   672  		accountReqFails      = make(chan *accountRequest)
   673  		storageReqFails      = make(chan *storageRequest)
   674  		bytecodeReqFails     = make(chan *bytecodeRequest)
   675  		accountResps         = make(chan *accountResponse)
   676  		storageResps         = make(chan *storageResponse)
   677  		bytecodeResps        = make(chan *bytecodeResponse)
   678  		trienodeHealReqFails = make(chan *trienodeHealRequest)
   679  		bytecodeHealReqFails = make(chan *bytecodeHealRequest)
   680  		trienodeHealResps    = make(chan *trienodeHealResponse)
   681  		bytecodeHealResps    = make(chan *bytecodeHealResponse)
   682  	)
   683  	for {
   684  		// Remove all completed tasks and terminate sync if everything's done
   685  		s.cleanStorageTasks()
   686  		s.cleanAccountTasks()
   687  		if len(s.tasks) == 0 && s.healer.scheduler.Pending() == 0 {
   688  			return nil
   689  		}
   690  		// Assign all the data retrieval tasks to any free peers
   691  		s.assignAccountTasks(accountResps, accountReqFails, cancel)
   692  		s.assignBytecodeTasks(bytecodeResps, bytecodeReqFails, cancel)
   693  		s.assignStorageTasks(storageResps, storageReqFails, cancel)
   694  
   695  		if len(s.tasks) == 0 {
   696  			// Sync phase done, run heal phase
   697  			s.assignTrienodeHealTasks(trienodeHealResps, trienodeHealReqFails, cancel)
   698  			s.assignBytecodeHealTasks(bytecodeHealResps, bytecodeHealReqFails, cancel)
   699  		}
   700  		// Update sync progress
   701  		s.lock.Lock()
   702  		s.extProgress = &SyncProgress{
   703  			AccountSynced:      s.accountSynced,
   704  			AccountBytes:       s.accountBytes,
   705  			BytecodeSynced:     s.bytecodeSynced,
   706  			BytecodeBytes:      s.bytecodeBytes,
   707  			StorageSynced:      s.storageSynced,
   708  			StorageBytes:       s.storageBytes,
   709  			TrienodeHealSynced: s.trienodeHealSynced,
   710  			TrienodeHealBytes:  s.trienodeHealBytes,
   711  			BytecodeHealSynced: s.bytecodeHealSynced,
   712  			BytecodeHealBytes:  s.bytecodeHealBytes,
   713  		}
   714  		s.lock.Unlock()
   715  		// Wait for something to happen
   716  		select {
   717  		case <-s.update:
   718  			// Something happened (new peer, delivery, timeout), recheck tasks
   719  		case <-peerJoin:
   720  			// A new peer joined, try to schedule it new tasks
   721  		case id := <-peerDrop:
   722  			s.revertRequests(id)
   723  		case <-cancel:
   724  			return ErrCancelled
   725  
   726  		case req := <-accountReqFails:
   727  			s.revertAccountRequest(req)
   728  		case req := <-bytecodeReqFails:
   729  			s.revertBytecodeRequest(req)
   730  		case req := <-storageReqFails:
   731  			s.revertStorageRequest(req)
   732  		case req := <-trienodeHealReqFails:
   733  			s.revertTrienodeHealRequest(req)
   734  		case req := <-bytecodeHealReqFails:
   735  			s.revertBytecodeHealRequest(req)
   736  
   737  		case res := <-accountResps:
   738  			s.processAccountResponse(res)
   739  		case res := <-bytecodeResps:
   740  			s.processBytecodeResponse(res)
   741  		case res := <-storageResps:
   742  			s.processStorageResponse(res)
   743  		case res := <-trienodeHealResps:
   744  			s.processTrienodeHealResponse(res)
   745  		case res := <-bytecodeHealResps:
   746  			s.processBytecodeHealResponse(res)
   747  		}
   748  		// Report stats if something meaningful happened
   749  		s.report(false)
   750  	}
   751  }
   752  
   753  // loadSyncStatus retrieves a previously aborted sync status from the database,
   754  // or generates a fresh one if none is available.
   755  func (s *Syncer) loadSyncStatus() {
   756  	var progress SyncProgress
   757  
   758  	if status := rawdb.ReadSnapshotSyncStatus(s.db); status != nil {
   759  		if err := json.Unmarshal(status, &progress); err != nil {
   760  			log.Error("Failed to decode snap sync status", "err", err)
   761  		} else {
   762  			for _, task := range progress.Tasks {
   763  				log.Debug("Scheduled account sync task", "from", task.Next, "last", task.Last)
   764  			}
   765  			s.tasks = progress.Tasks
   766  			for _, task := range s.tasks {
   767  				// Restore the completed storages
   768  				task.stateCompleted = make(map[common.Hash]struct{})
   769  				for _, hash := range task.StorageCompleted {
   770  					task.stateCompleted[hash] = struct{}{}
   771  				}
   772  				task.StorageCompleted = nil
   773  
   774  				// Allocate batch for account trie generation
   775  				task.genBatch = ethdb.HookedBatch{
   776  					Batch: s.db.NewBatch(),
   777  					OnPut: func(key []byte, value []byte) {
   778  						s.accountBytes += common.StorageSize(len(key) + len(value))
   779  					},
   780  				}
   781  				if s.scheme == rawdb.HashScheme {
   782  					task.genTrie = newHashTrie(task.genBatch)
   783  				}
   784  				if s.scheme == rawdb.PathScheme {
   785  					task.genTrie = newPathTrie(common.Hash{}, task.Next != common.Hash{}, s.db, task.genBatch)
   786  				}
   787  				// Restore leftover storage tasks
   788  				for accountHash, subtasks := range task.SubTasks {
   789  					for _, subtask := range subtasks {
   790  						subtask.genBatch = ethdb.HookedBatch{
   791  							Batch: s.db.NewBatch(),
   792  							OnPut: func(key []byte, value []byte) {
   793  								s.storageBytes += common.StorageSize(len(key) + len(value))
   794  							},
   795  						}
   796  						if s.scheme == rawdb.HashScheme {
   797  							subtask.genTrie = newHashTrie(subtask.genBatch)
   798  						}
   799  						if s.scheme == rawdb.PathScheme {
   800  							subtask.genTrie = newPathTrie(accountHash, subtask.Next != common.Hash{}, s.db, subtask.genBatch)
   801  						}
   802  					}
   803  				}
   804  			}
   805  			s.lock.Lock()
   806  			defer s.lock.Unlock()
   807  
   808  			s.snapped = len(s.tasks) == 0
   809  
   810  			s.accountSynced = progress.AccountSynced
   811  			s.accountBytes = progress.AccountBytes
   812  			s.bytecodeSynced = progress.BytecodeSynced
   813  			s.bytecodeBytes = progress.BytecodeBytes
   814  			s.storageSynced = progress.StorageSynced
   815  			s.storageBytes = progress.StorageBytes
   816  
   817  			s.trienodeHealSynced = progress.TrienodeHealSynced
   818  			s.trienodeHealBytes = progress.TrienodeHealBytes
   819  			s.bytecodeHealSynced = progress.BytecodeHealSynced
   820  			s.bytecodeHealBytes = progress.BytecodeHealBytes
   821  			return
   822  		}
   823  	}
   824  	// Either we've failed to decode the previous state, or there was none.
   825  	// Start a fresh sync by chunking up the account range and scheduling
   826  	// them for retrieval.
   827  	s.tasks = nil
   828  	s.accountSynced, s.accountBytes = 0, 0
   829  	s.bytecodeSynced, s.bytecodeBytes = 0, 0
   830  	s.storageSynced, s.storageBytes = 0, 0
   831  	s.trienodeHealSynced, s.trienodeHealBytes = 0, 0
   832  	s.bytecodeHealSynced, s.bytecodeHealBytes = 0, 0
   833  
   834  	var next common.Hash
   835  	step := new(big.Int).Sub(
   836  		new(big.Int).Div(
   837  			new(big.Int).Exp(common.Big2, common.Big256, nil),
   838  			big.NewInt(int64(accountConcurrency)),
   839  		), common.Big1,
   840  	)
   841  	for i := 0; i < accountConcurrency; i++ {
   842  		last := common.BigToHash(new(big.Int).Add(next.Big(), step))
   843  		if i == accountConcurrency-1 {
   844  			// Make sure we don't overflow if the step is not a proper divisor
   845  			last = common.MaxHash
   846  		}
   847  		batch := ethdb.HookedBatch{
   848  			Batch: s.db.NewBatch(),
   849  			OnPut: func(key []byte, value []byte) {
   850  				s.accountBytes += common.StorageSize(len(key) + len(value))
   851  			},
   852  		}
   853  		var tr genTrie
   854  		if s.scheme == rawdb.HashScheme {
   855  			tr = newHashTrie(batch)
   856  		}
   857  		if s.scheme == rawdb.PathScheme {
   858  			tr = newPathTrie(common.Hash{}, next != common.Hash{}, s.db, batch)
   859  		}
   860  		s.tasks = append(s.tasks, &accountTask{
   861  			Next:           next,
   862  			Last:           last,
   863  			SubTasks:       make(map[common.Hash][]*storageTask),
   864  			genBatch:       batch,
   865  			stateCompleted: make(map[common.Hash]struct{}),
   866  			genTrie:        tr,
   867  		})
   868  		log.Debug("Created account sync task", "from", next, "last", last)
   869  		next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
   870  	}
   871  }
   872  
   873  // saveSyncStatus marshals the remaining sync tasks into leveldb.
   874  func (s *Syncer) saveSyncStatus() {
   875  	// Serialize any partial progress to disk before spinning down
   876  	for _, task := range s.tasks {
   877  		// Claim the right boundary as incomplete before flushing the
   878  		// accumulated nodes in batch, the nodes on right boundary
   879  		// will be discarded and cleaned up by this call.
   880  		task.genTrie.commit(false)
   881  		if err := task.genBatch.Write(); err != nil {
   882  			log.Error("Failed to persist account slots", "err", err)
   883  		}
   884  		for _, subtasks := range task.SubTasks {
   885  			for _, subtask := range subtasks {
   886  				// Same for account trie, discard and cleanup the
   887  				// incomplete right boundary.
   888  				subtask.genTrie.commit(false)
   889  				if err := subtask.genBatch.Write(); err != nil {
   890  					log.Error("Failed to persist storage slots", "err", err)
   891  				}
   892  			}
   893  		}
   894  		// Save the account hashes of completed storage.
   895  		task.StorageCompleted = make([]common.Hash, 0, len(task.stateCompleted))
   896  		for hash := range task.stateCompleted {
   897  			task.StorageCompleted = append(task.StorageCompleted, hash)
   898  		}
   899  		if len(task.StorageCompleted) > 0 {
   900  			log.Debug("Leftover completed storages", "number", len(task.StorageCompleted), "next", task.Next, "last", task.Last)
   901  		}
   902  	}
   903  	// Store the actual progress markers
   904  	progress := &SyncProgress{
   905  		Tasks:              s.tasks,
   906  		AccountSynced:      s.accountSynced,
   907  		AccountBytes:       s.accountBytes,
   908  		BytecodeSynced:     s.bytecodeSynced,
   909  		BytecodeBytes:      s.bytecodeBytes,
   910  		StorageSynced:      s.storageSynced,
   911  		StorageBytes:       s.storageBytes,
   912  		TrienodeHealSynced: s.trienodeHealSynced,
   913  		TrienodeHealBytes:  s.trienodeHealBytes,
   914  		BytecodeHealSynced: s.bytecodeHealSynced,
   915  		BytecodeHealBytes:  s.bytecodeHealBytes,
   916  	}
   917  	status, err := json.Marshal(progress)
   918  	if err != nil {
   919  		panic(err) // This can only fail during implementation
   920  	}
   921  	rawdb.WriteSnapshotSyncStatus(s.db, status)
   922  }
   923  
   924  // Progress returns the snap sync status statistics.
   925  func (s *Syncer) Progress() (*SyncProgress, *SyncPending) {
   926  	s.lock.Lock()
   927  	defer s.lock.Unlock()
   928  	pending := new(SyncPending)
   929  	if s.healer != nil {
   930  		pending.TrienodeHeal = uint64(len(s.healer.trieTasks))
   931  		pending.BytecodeHeal = uint64(len(s.healer.codeTasks))
   932  	}
   933  	return s.extProgress, pending
   934  }
   935  
   936  // cleanAccountTasks removes account range retrieval tasks that have already been
   937  // completed.
   938  func (s *Syncer) cleanAccountTasks() {
   939  	// If the sync was already done before, don't even bother
   940  	if len(s.tasks) == 0 {
   941  		return
   942  	}
   943  	// Sync wasn't finished previously, check for any task that can be finalized
   944  	for i := 0; i < len(s.tasks); i++ {
   945  		if s.tasks[i].done {
   946  			s.tasks = append(s.tasks[:i], s.tasks[i+1:]...)
   947  			i--
   948  		}
   949  	}
   950  	// If everything was just finalized just, generate the account trie and start heal
   951  	if len(s.tasks) == 0 {
   952  		s.lock.Lock()
   953  		s.snapped = true
   954  		s.lock.Unlock()
   955  
   956  		// Push the final sync report
   957  		s.reportSyncProgress(true)
   958  	}
   959  }
   960  
   961  // cleanStorageTasks iterates over all the account tasks and storage sub-tasks
   962  // within, cleaning any that have been completed.
   963  func (s *Syncer) cleanStorageTasks() {
   964  	for _, task := range s.tasks {
   965  		for account, subtasks := range task.SubTasks {
   966  			// Remove storage range retrieval tasks that completed
   967  			for j := 0; j < len(subtasks); j++ {
   968  				if subtasks[j].done {
   969  					subtasks = append(subtasks[:j], subtasks[j+1:]...)
   970  					j--
   971  				}
   972  			}
   973  			if len(subtasks) > 0 {
   974  				task.SubTasks[account] = subtasks
   975  				continue
   976  			}
   977  			// If all storage chunks are done, mark the account as done too
   978  			for j, hash := range task.res.hashes {
   979  				if hash == account {
   980  					task.needState[j] = false
   981  				}
   982  			}
   983  			delete(task.SubTasks, account)
   984  			task.pend--
   985  
   986  			// Mark the state as complete to prevent resyncing, regardless
   987  			// if state healing is necessary.
   988  			task.stateCompleted[account] = struct{}{}
   989  
   990  			// If this was the last pending task, forward the account task
   991  			if task.pend == 0 {
   992  				s.forwardAccountTask(task)
   993  			}
   994  		}
   995  	}
   996  }
   997  
   998  // assignAccountTasks attempts to match idle peers to pending account range
   999  // retrievals.
  1000  func (s *Syncer) assignAccountTasks(success chan *accountResponse, fail chan *accountRequest, cancel chan struct{}) {
  1001  	s.lock.Lock()
  1002  	defer s.lock.Unlock()
  1003  
  1004  	// Sort the peers by download capacity to use faster ones if many available
  1005  	idlers := &capacitySort{
  1006  		ids:  make([]string, 0, len(s.accountIdlers)),
  1007  		caps: make([]int, 0, len(s.accountIdlers)),
  1008  	}
  1009  	targetTTL := s.rates.TargetTimeout()
  1010  	for id := range s.accountIdlers {
  1011  		if _, ok := s.statelessPeers[id]; ok {
  1012  			continue
  1013  		}
  1014  		idlers.ids = append(idlers.ids, id)
  1015  		idlers.caps = append(idlers.caps, s.rates.Capacity(id, AccountRangeMsg, targetTTL))
  1016  	}
  1017  	if len(idlers.ids) == 0 {
  1018  		return
  1019  	}
  1020  	sort.Sort(sort.Reverse(idlers))
  1021  
  1022  	// Iterate over all the tasks and try to find a pending one
  1023  	for _, task := range s.tasks {
  1024  		// Skip any tasks already filling
  1025  		if task.req != nil || task.res != nil {
  1026  			continue
  1027  		}
  1028  		// Task pending retrieval, try to find an idle peer. If no such peer
  1029  		// exists, we probably assigned tasks for all (or they are stateless).
  1030  		// Abort the entire assignment mechanism.
  1031  		if len(idlers.ids) == 0 {
  1032  			return
  1033  		}
  1034  		var (
  1035  			idle = idlers.ids[0]
  1036  			peer = s.peers[idle]
  1037  			cap  = idlers.caps[0]
  1038  		)
  1039  		idlers.ids, idlers.caps = idlers.ids[1:], idlers.caps[1:]
  1040  
  1041  		// Matched a pending task to an idle peer, allocate a unique request id
  1042  		var reqid uint64
  1043  		for {
  1044  			reqid = uint64(rand.Int63())
  1045  			if reqid == 0 {
  1046  				continue
  1047  			}
  1048  			if _, ok := s.accountReqs[reqid]; ok {
  1049  				continue
  1050  			}
  1051  			break
  1052  		}
  1053  		// Generate the network query and send it to the peer
  1054  		req := &accountRequest{
  1055  			peer:    idle,
  1056  			id:      reqid,
  1057  			time:    time.Now(),
  1058  			deliver: success,
  1059  			revert:  fail,
  1060  			cancel:  cancel,
  1061  			stale:   make(chan struct{}),
  1062  			origin:  task.Next,
  1063  			limit:   task.Last,
  1064  			task:    task,
  1065  		}
  1066  		req.timeout = time.AfterFunc(s.rates.TargetTimeout(), func() {
  1067  			peer.Log().Debug("Account range request timed out", "reqid", reqid)
  1068  			s.rates.Update(idle, AccountRangeMsg, 0, 0)
  1069  			s.scheduleRevertAccountRequest(req)
  1070  		})
  1071  		s.accountReqs[reqid] = req
  1072  		delete(s.accountIdlers, idle)
  1073  
  1074  		s.pend.Add(1)
  1075  		go func(root common.Hash) {
  1076  			defer s.pend.Done()
  1077  
  1078  			// Attempt to send the remote request and revert if it fails
  1079  			if cap > maxRequestSize {
  1080  				cap = maxRequestSize
  1081  			}
  1082  			if cap < minRequestSize { // Don't bother with peers below a bare minimum performance
  1083  				cap = minRequestSize
  1084  			}
  1085  			if err := peer.RequestAccountRange(reqid, root, req.origin, req.limit, uint64(cap)); err != nil {
  1086  				peer.Log().Debug("Failed to request account range", "err", err)
  1087  				s.scheduleRevertAccountRequest(req)
  1088  			}
  1089  		}(s.root)
  1090  
  1091  		// Inject the request into the task to block further assignments
  1092  		task.req = req
  1093  	}
  1094  }
  1095  
  1096  // assignBytecodeTasks attempts to match idle peers to pending code retrievals.
  1097  func (s *Syncer) assignBytecodeTasks(success chan *bytecodeResponse, fail chan *bytecodeRequest, cancel chan struct{}) {
  1098  	s.lock.Lock()
  1099  	defer s.lock.Unlock()
  1100  
  1101  	// Sort the peers by download capacity to use faster ones if many available
  1102  	idlers := &capacitySort{
  1103  		ids:  make([]string, 0, len(s.bytecodeIdlers)),
  1104  		caps: make([]int, 0, len(s.bytecodeIdlers)),
  1105  	}
  1106  	targetTTL := s.rates.TargetTimeout()
  1107  	for id := range s.bytecodeIdlers {
  1108  		if _, ok := s.statelessPeers[id]; ok {
  1109  			continue
  1110  		}
  1111  		idlers.ids = append(idlers.ids, id)
  1112  		idlers.caps = append(idlers.caps, s.rates.Capacity(id, ByteCodesMsg, targetTTL))
  1113  	}
  1114  	if len(idlers.ids) == 0 {
  1115  		return
  1116  	}
  1117  	sort.Sort(sort.Reverse(idlers))
  1118  
  1119  	// Iterate over all the tasks and try to find a pending one
  1120  	for _, task := range s.tasks {
  1121  		// Skip any tasks not in the bytecode retrieval phase
  1122  		if task.res == nil {
  1123  			continue
  1124  		}
  1125  		// Skip tasks that are already retrieving (or done with) all codes
  1126  		if len(task.codeTasks) == 0 {
  1127  			continue
  1128  		}
  1129  		// Task pending retrieval, try to find an idle peer. If no such peer
  1130  		// exists, we probably assigned tasks for all (or they are stateless).
  1131  		// Abort the entire assignment mechanism.
  1132  		if len(idlers.ids) == 0 {
  1133  			return
  1134  		}
  1135  		var (
  1136  			idle = idlers.ids[0]
  1137  			peer = s.peers[idle]
  1138  			cap  = idlers.caps[0]
  1139  		)
  1140  		idlers.ids, idlers.caps = idlers.ids[1:], idlers.caps[1:]
  1141  
  1142  		// Matched a pending task to an idle peer, allocate a unique request id
  1143  		var reqid uint64
  1144  		for {
  1145  			reqid = uint64(rand.Int63())
  1146  			if reqid == 0 {
  1147  				continue
  1148  			}
  1149  			if _, ok := s.bytecodeReqs[reqid]; ok {
  1150  				continue
  1151  			}
  1152  			break
  1153  		}
  1154  		// Generate the network query and send it to the peer
  1155  		if cap > maxCodeRequestCount {
  1156  			cap = maxCodeRequestCount
  1157  		}
  1158  		hashes := make([]common.Hash, 0, cap)
  1159  		for hash := range task.codeTasks {
  1160  			delete(task.codeTasks, hash)
  1161  			hashes = append(hashes, hash)
  1162  			if len(hashes) >= cap {
  1163  				break
  1164  			}
  1165  		}
  1166  		req := &bytecodeRequest{
  1167  			peer:    idle,
  1168  			id:      reqid,
  1169  			time:    time.Now(),
  1170  			deliver: success,
  1171  			revert:  fail,
  1172  			cancel:  cancel,
  1173  			stale:   make(chan struct{}),
  1174  			hashes:  hashes,
  1175  			task:    task,
  1176  		}
  1177  		req.timeout = time.AfterFunc(s.rates.TargetTimeout(), func() {
  1178  			peer.Log().Debug("Bytecode request timed out", "reqid", reqid)
  1179  			s.rates.Update(idle, ByteCodesMsg, 0, 0)
  1180  			s.scheduleRevertBytecodeRequest(req)
  1181  		})
  1182  		s.bytecodeReqs[reqid] = req
  1183  		delete(s.bytecodeIdlers, idle)
  1184  
  1185  		s.pend.Add(1)
  1186  		go func() {
  1187  			defer s.pend.Done()
  1188  
  1189  			// Attempt to send the remote request and revert if it fails
  1190  			if err := peer.RequestByteCodes(reqid, hashes, maxRequestSize); err != nil {
  1191  				log.Debug("Failed to request bytecodes", "err", err)
  1192  				s.scheduleRevertBytecodeRequest(req)
  1193  			}
  1194  		}()
  1195  	}
  1196  }
  1197  
  1198  // assignStorageTasks attempts to match idle peers to pending storage range
  1199  // retrievals.
  1200  func (s *Syncer) assignStorageTasks(success chan *storageResponse, fail chan *storageRequest, cancel chan struct{}) {
  1201  	s.lock.Lock()
  1202  	defer s.lock.Unlock()
  1203  
  1204  	// Sort the peers by download capacity to use faster ones if many available
  1205  	idlers := &capacitySort{
  1206  		ids:  make([]string, 0, len(s.storageIdlers)),
  1207  		caps: make([]int, 0, len(s.storageIdlers)),
  1208  	}
  1209  	targetTTL := s.rates.TargetTimeout()
  1210  	for id := range s.storageIdlers {
  1211  		if _, ok := s.statelessPeers[id]; ok {
  1212  			continue
  1213  		}
  1214  		idlers.ids = append(idlers.ids, id)
  1215  		idlers.caps = append(idlers.caps, s.rates.Capacity(id, StorageRangesMsg, targetTTL))
  1216  	}
  1217  	if len(idlers.ids) == 0 {
  1218  		return
  1219  	}
  1220  	sort.Sort(sort.Reverse(idlers))
  1221  
  1222  	// Iterate over all the tasks and try to find a pending one
  1223  	for _, task := range s.tasks {
  1224  		// Skip any tasks not in the storage retrieval phase
  1225  		if task.res == nil {
  1226  			continue
  1227  		}
  1228  		// Skip tasks that are already retrieving (or done with) all small states
  1229  		storageTasks := task.activeSubTasks()
  1230  		if len(storageTasks) == 0 && len(task.stateTasks) == 0 {
  1231  			continue
  1232  		}
  1233  		// Task pending retrieval, try to find an idle peer. If no such peer
  1234  		// exists, we probably assigned tasks for all (or they are stateless).
  1235  		// Abort the entire assignment mechanism.
  1236  		if len(idlers.ids) == 0 {
  1237  			return
  1238  		}
  1239  		var (
  1240  			idle = idlers.ids[0]
  1241  			peer = s.peers[idle]
  1242  			cap  = idlers.caps[0]
  1243  		)
  1244  		idlers.ids, idlers.caps = idlers.ids[1:], idlers.caps[1:]
  1245  
  1246  		// Matched a pending task to an idle peer, allocate a unique request id
  1247  		var reqid uint64
  1248  		for {
  1249  			reqid = uint64(rand.Int63())
  1250  			if reqid == 0 {
  1251  				continue
  1252  			}
  1253  			if _, ok := s.storageReqs[reqid]; ok {
  1254  				continue
  1255  			}
  1256  			break
  1257  		}
  1258  		// Generate the network query and send it to the peer. If there are
  1259  		// large contract tasks pending, complete those before diving into
  1260  		// even more new contracts.
  1261  		if cap > maxRequestSize {
  1262  			cap = maxRequestSize
  1263  		}
  1264  		if cap < minRequestSize { // Don't bother with peers below a bare minimum performance
  1265  			cap = minRequestSize
  1266  		}
  1267  		storageSets := cap / 1024
  1268  
  1269  		var (
  1270  			accounts = make([]common.Hash, 0, storageSets)
  1271  			roots    = make([]common.Hash, 0, storageSets)
  1272  			subtask  *storageTask
  1273  		)
  1274  		for account, subtasks := range storageTasks {
  1275  			for _, st := range subtasks {
  1276  				// Skip any subtasks already filling
  1277  				if st.req != nil {
  1278  					continue
  1279  				}
  1280  				// Found an incomplete storage chunk, schedule it
  1281  				accounts = append(accounts, account)
  1282  				roots = append(roots, st.root)
  1283  				subtask = st
  1284  				break // Large contract chunks are downloaded individually
  1285  			}
  1286  			if subtask != nil {
  1287  				break // Large contract chunks are downloaded individually
  1288  			}
  1289  		}
  1290  		if subtask == nil {
  1291  			// No large contract required retrieval, but small ones available
  1292  			for account, root := range task.stateTasks {
  1293  				delete(task.stateTasks, account)
  1294  
  1295  				accounts = append(accounts, account)
  1296  				roots = append(roots, root)
  1297  
  1298  				if len(accounts) >= storageSets {
  1299  					break
  1300  				}
  1301  			}
  1302  		}
  1303  		// If nothing was found, it means this task is actually already fully
  1304  		// retrieving, but large contracts are hard to detect. Skip to the next.
  1305  		if len(accounts) == 0 {
  1306  			continue
  1307  		}
  1308  		req := &storageRequest{
  1309  			peer:     idle,
  1310  			id:       reqid,
  1311  			time:     time.Now(),
  1312  			deliver:  success,
  1313  			revert:   fail,
  1314  			cancel:   cancel,
  1315  			stale:    make(chan struct{}),
  1316  			accounts: accounts,
  1317  			roots:    roots,
  1318  			mainTask: task,
  1319  			subTask:  subtask,
  1320  		}
  1321  		if subtask != nil {
  1322  			req.origin = subtask.Next
  1323  			req.limit = subtask.Last
  1324  		}
  1325  		req.timeout = time.AfterFunc(s.rates.TargetTimeout(), func() {
  1326  			peer.Log().Debug("Storage request timed out", "reqid", reqid)
  1327  			s.rates.Update(idle, StorageRangesMsg, 0, 0)
  1328  			s.scheduleRevertStorageRequest(req)
  1329  		})
  1330  		s.storageReqs[reqid] = req
  1331  		delete(s.storageIdlers, idle)
  1332  
  1333  		s.pend.Add(1)
  1334  		go func(root common.Hash) {
  1335  			defer s.pend.Done()
  1336  
  1337  			// Attempt to send the remote request and revert if it fails
  1338  			var origin, limit []byte
  1339  			if subtask != nil {
  1340  				origin, limit = req.origin[:], req.limit[:]
  1341  			}
  1342  			if err := peer.RequestStorageRanges(reqid, root, accounts, origin, limit, uint64(cap)); err != nil {
  1343  				log.Debug("Failed to request storage", "err", err)
  1344  				s.scheduleRevertStorageRequest(req)
  1345  			}
  1346  		}(s.root)
  1347  
  1348  		// Inject the request into the subtask to block further assignments
  1349  		if subtask != nil {
  1350  			subtask.req = req
  1351  		}
  1352  	}
  1353  }
  1354  
  1355  // assignTrienodeHealTasks attempts to match idle peers to trie node requests to
  1356  // heal any trie errors caused by the snap sync's chunked retrieval model.
  1357  func (s *Syncer) assignTrienodeHealTasks(success chan *trienodeHealResponse, fail chan *trienodeHealRequest, cancel chan struct{}) {
  1358  	s.lock.Lock()
  1359  	defer s.lock.Unlock()
  1360  
  1361  	// Sort the peers by download capacity to use faster ones if many available
  1362  	idlers := &capacitySort{
  1363  		ids:  make([]string, 0, len(s.trienodeHealIdlers)),
  1364  		caps: make([]int, 0, len(s.trienodeHealIdlers)),
  1365  	}
  1366  	targetTTL := s.rates.TargetTimeout()
  1367  	for id := range s.trienodeHealIdlers {
  1368  		if _, ok := s.statelessPeers[id]; ok {
  1369  			continue
  1370  		}
  1371  		idlers.ids = append(idlers.ids, id)
  1372  		idlers.caps = append(idlers.caps, s.rates.Capacity(id, TrieNodesMsg, targetTTL))
  1373  	}
  1374  	if len(idlers.ids) == 0 {
  1375  		return
  1376  	}
  1377  	sort.Sort(sort.Reverse(idlers))
  1378  
  1379  	// Iterate over pending tasks and try to find a peer to retrieve with
  1380  	for len(s.healer.trieTasks) > 0 || s.healer.scheduler.Pending() > 0 {
  1381  		// If there are not enough trie tasks queued to fully assign, fill the
  1382  		// queue from the state sync scheduler. The trie synced schedules these
  1383  		// together with bytecodes, so we need to queue them combined.
  1384  		var (
  1385  			have = len(s.healer.trieTasks) + len(s.healer.codeTasks)
  1386  			want = maxTrieRequestCount + maxCodeRequestCount
  1387  		)
  1388  		if have < want {
  1389  			paths, hashes, codes := s.healer.scheduler.Missing(want - have)
  1390  			for i, path := range paths {
  1391  				s.healer.trieTasks[path] = hashes[i]
  1392  			}
  1393  			for _, hash := range codes {
  1394  				s.healer.codeTasks[hash] = struct{}{}
  1395  			}
  1396  		}
  1397  		// If all the heal tasks are bytecodes or already downloading, bail
  1398  		if len(s.healer.trieTasks) == 0 {
  1399  			return
  1400  		}
  1401  		// Task pending retrieval, try to find an idle peer. If no such peer
  1402  		// exists, we probably assigned tasks for all (or they are stateless).
  1403  		// Abort the entire assignment mechanism.
  1404  		if len(idlers.ids) == 0 {
  1405  			return
  1406  		}
  1407  		var (
  1408  			idle = idlers.ids[0]
  1409  			peer = s.peers[idle]
  1410  			cap  = idlers.caps[0]
  1411  		)
  1412  		idlers.ids, idlers.caps = idlers.ids[1:], idlers.caps[1:]
  1413  
  1414  		// Matched a pending task to an idle peer, allocate a unique request id
  1415  		var reqid uint64
  1416  		for {
  1417  			reqid = uint64(rand.Int63())
  1418  			if reqid == 0 {
  1419  				continue
  1420  			}
  1421  			if _, ok := s.trienodeHealReqs[reqid]; ok {
  1422  				continue
  1423  			}
  1424  			break
  1425  		}
  1426  		// Generate the network query and send it to the peer
  1427  		if cap > maxTrieRequestCount {
  1428  			cap = maxTrieRequestCount
  1429  		}
  1430  		cap = int(float64(cap) / s.trienodeHealThrottle)
  1431  		if cap <= 0 {
  1432  			cap = 1
  1433  		}
  1434  		var (
  1435  			hashes   = make([]common.Hash, 0, cap)
  1436  			paths    = make([]string, 0, cap)
  1437  			pathsets = make([]TrieNodePathSet, 0, cap)
  1438  		)
  1439  		for path, hash := range s.healer.trieTasks {
  1440  			delete(s.healer.trieTasks, path)
  1441  
  1442  			paths = append(paths, path)
  1443  			hashes = append(hashes, hash)
  1444  			if len(paths) >= cap {
  1445  				break
  1446  			}
  1447  		}
  1448  		// Group requests by account hash
  1449  		paths, hashes, _, pathsets = sortByAccountPath(paths, hashes)
  1450  		req := &trienodeHealRequest{
  1451  			peer:    idle,
  1452  			id:      reqid,
  1453  			time:    time.Now(),
  1454  			deliver: success,
  1455  			revert:  fail,
  1456  			cancel:  cancel,
  1457  			stale:   make(chan struct{}),
  1458  			paths:   paths,
  1459  			hashes:  hashes,
  1460  			task:    s.healer,
  1461  		}
  1462  		req.timeout = time.AfterFunc(s.rates.TargetTimeout(), func() {
  1463  			peer.Log().Debug("Trienode heal request timed out", "reqid", reqid)
  1464  			s.rates.Update(idle, TrieNodesMsg, 0, 0)
  1465  			s.scheduleRevertTrienodeHealRequest(req)
  1466  		})
  1467  		s.trienodeHealReqs[reqid] = req
  1468  		delete(s.trienodeHealIdlers, idle)
  1469  
  1470  		s.pend.Add(1)
  1471  		go func(root common.Hash) {
  1472  			defer s.pend.Done()
  1473  
  1474  			// Attempt to send the remote request and revert if it fails
  1475  			if err := peer.RequestTrieNodes(reqid, root, pathsets, maxRequestSize); err != nil {
  1476  				log.Debug("Failed to request trienode healers", "err", err)
  1477  				s.scheduleRevertTrienodeHealRequest(req)
  1478  			}
  1479  		}(s.root)
  1480  	}
  1481  }
  1482  
  1483  // assignBytecodeHealTasks attempts to match idle peers to bytecode requests to
  1484  // heal any trie errors caused by the snap sync's chunked retrieval model.
  1485  func (s *Syncer) assignBytecodeHealTasks(success chan *bytecodeHealResponse, fail chan *bytecodeHealRequest, cancel chan struct{}) {
  1486  	s.lock.Lock()
  1487  	defer s.lock.Unlock()
  1488  
  1489  	// Sort the peers by download capacity to use faster ones if many available
  1490  	idlers := &capacitySort{
  1491  		ids:  make([]string, 0, len(s.bytecodeHealIdlers)),
  1492  		caps: make([]int, 0, len(s.bytecodeHealIdlers)),
  1493  	}
  1494  	targetTTL := s.rates.TargetTimeout()
  1495  	for id := range s.bytecodeHealIdlers {
  1496  		if _, ok := s.statelessPeers[id]; ok {
  1497  			continue
  1498  		}
  1499  		idlers.ids = append(idlers.ids, id)
  1500  		idlers.caps = append(idlers.caps, s.rates.Capacity(id, ByteCodesMsg, targetTTL))
  1501  	}
  1502  	if len(idlers.ids) == 0 {
  1503  		return
  1504  	}
  1505  	sort.Sort(sort.Reverse(idlers))
  1506  
  1507  	// Iterate over pending tasks and try to find a peer to retrieve with
  1508  	for len(s.healer.codeTasks) > 0 || s.healer.scheduler.Pending() > 0 {
  1509  		// If there are not enough trie tasks queued to fully assign, fill the
  1510  		// queue from the state sync scheduler. The trie synced schedules these
  1511  		// together with trie nodes, so we need to queue them combined.
  1512  		var (
  1513  			have = len(s.healer.trieTasks) + len(s.healer.codeTasks)
  1514  			want = maxTrieRequestCount + maxCodeRequestCount
  1515  		)
  1516  		if have < want {
  1517  			paths, hashes, codes := s.healer.scheduler.Missing(want - have)
  1518  			for i, path := range paths {
  1519  				s.healer.trieTasks[path] = hashes[i]
  1520  			}
  1521  			for _, hash := range codes {
  1522  				s.healer.codeTasks[hash] = struct{}{}
  1523  			}
  1524  		}
  1525  		// If all the heal tasks are trienodes or already downloading, bail
  1526  		if len(s.healer.codeTasks) == 0 {
  1527  			return
  1528  		}
  1529  		// Task pending retrieval, try to find an idle peer. If no such peer
  1530  		// exists, we probably assigned tasks for all (or they are stateless).
  1531  		// Abort the entire assignment mechanism.
  1532  		if len(idlers.ids) == 0 {
  1533  			return
  1534  		}
  1535  		var (
  1536  			idle = idlers.ids[0]
  1537  			peer = s.peers[idle]
  1538  			cap  = idlers.caps[0]
  1539  		)
  1540  		idlers.ids, idlers.caps = idlers.ids[1:], idlers.caps[1:]
  1541  
  1542  		// Matched a pending task to an idle peer, allocate a unique request id
  1543  		var reqid uint64
  1544  		for {
  1545  			reqid = uint64(rand.Int63())
  1546  			if reqid == 0 {
  1547  				continue
  1548  			}
  1549  			if _, ok := s.bytecodeHealReqs[reqid]; ok {
  1550  				continue
  1551  			}
  1552  			break
  1553  		}
  1554  		// Generate the network query and send it to the peer
  1555  		if cap > maxCodeRequestCount {
  1556  			cap = maxCodeRequestCount
  1557  		}
  1558  		hashes := make([]common.Hash, 0, cap)
  1559  		for hash := range s.healer.codeTasks {
  1560  			delete(s.healer.codeTasks, hash)
  1561  
  1562  			hashes = append(hashes, hash)
  1563  			if len(hashes) >= cap {
  1564  				break
  1565  			}
  1566  		}
  1567  		req := &bytecodeHealRequest{
  1568  			peer:    idle,
  1569  			id:      reqid,
  1570  			time:    time.Now(),
  1571  			deliver: success,
  1572  			revert:  fail,
  1573  			cancel:  cancel,
  1574  			stale:   make(chan struct{}),
  1575  			hashes:  hashes,
  1576  			task:    s.healer,
  1577  		}
  1578  		req.timeout = time.AfterFunc(s.rates.TargetTimeout(), func() {
  1579  			peer.Log().Debug("Bytecode heal request timed out", "reqid", reqid)
  1580  			s.rates.Update(idle, ByteCodesMsg, 0, 0)
  1581  			s.scheduleRevertBytecodeHealRequest(req)
  1582  		})
  1583  		s.bytecodeHealReqs[reqid] = req
  1584  		delete(s.bytecodeHealIdlers, idle)
  1585  
  1586  		s.pend.Add(1)
  1587  		go func() {
  1588  			defer s.pend.Done()
  1589  
  1590  			// Attempt to send the remote request and revert if it fails
  1591  			if err := peer.RequestByteCodes(reqid, hashes, maxRequestSize); err != nil {
  1592  				log.Debug("Failed to request bytecode healers", "err", err)
  1593  				s.scheduleRevertBytecodeHealRequest(req)
  1594  			}
  1595  		}()
  1596  	}
  1597  }
  1598  
  1599  // revertRequests locates all the currently pending requests from a particular
  1600  // peer and reverts them, rescheduling for others to fulfill.
  1601  func (s *Syncer) revertRequests(peer string) {
  1602  	// Gather the requests first, revertals need the lock too
  1603  	s.lock.Lock()
  1604  	var accountReqs []*accountRequest
  1605  	for _, req := range s.accountReqs {
  1606  		if req.peer == peer {
  1607  			accountReqs = append(accountReqs, req)
  1608  		}
  1609  	}
  1610  	var bytecodeReqs []*bytecodeRequest
  1611  	for _, req := range s.bytecodeReqs {
  1612  		if req.peer == peer {
  1613  			bytecodeReqs = append(bytecodeReqs, req)
  1614  		}
  1615  	}
  1616  	var storageReqs []*storageRequest
  1617  	for _, req := range s.storageReqs {
  1618  		if req.peer == peer {
  1619  			storageReqs = append(storageReqs, req)
  1620  		}
  1621  	}
  1622  	var trienodeHealReqs []*trienodeHealRequest
  1623  	for _, req := range s.trienodeHealReqs {
  1624  		if req.peer == peer {
  1625  			trienodeHealReqs = append(trienodeHealReqs, req)
  1626  		}
  1627  	}
  1628  	var bytecodeHealReqs []*bytecodeHealRequest
  1629  	for _, req := range s.bytecodeHealReqs {
  1630  		if req.peer == peer {
  1631  			bytecodeHealReqs = append(bytecodeHealReqs, req)
  1632  		}
  1633  	}
  1634  	s.lock.Unlock()
  1635  
  1636  	// Revert all the requests matching the peer
  1637  	for _, req := range accountReqs {
  1638  		s.revertAccountRequest(req)
  1639  	}
  1640  	for _, req := range bytecodeReqs {
  1641  		s.revertBytecodeRequest(req)
  1642  	}
  1643  	for _, req := range storageReqs {
  1644  		s.revertStorageRequest(req)
  1645  	}
  1646  	for _, req := range trienodeHealReqs {
  1647  		s.revertTrienodeHealRequest(req)
  1648  	}
  1649  	for _, req := range bytecodeHealReqs {
  1650  		s.revertBytecodeHealRequest(req)
  1651  	}
  1652  }
  1653  
  1654  // scheduleRevertAccountRequest asks the event loop to clean up an account range
  1655  // request and return all failed retrieval tasks to the scheduler for reassignment.
  1656  func (s *Syncer) scheduleRevertAccountRequest(req *accountRequest) {
  1657  	select {
  1658  	case req.revert <- req:
  1659  		// Sync event loop notified
  1660  	case <-req.cancel:
  1661  		// Sync cycle got cancelled
  1662  	case <-req.stale:
  1663  		// Request already reverted
  1664  	}
  1665  }
  1666  
  1667  // revertAccountRequest cleans up an account range request and returns all failed
  1668  // retrieval tasks to the scheduler for reassignment.
  1669  //
  1670  // Note, this needs to run on the event runloop thread to reschedule to idle peers.
  1671  // On peer threads, use scheduleRevertAccountRequest.
  1672  func (s *Syncer) revertAccountRequest(req *accountRequest) {
  1673  	log.Debug("Reverting account request", "peer", req.peer, "reqid", req.id)
  1674  	select {
  1675  	case <-req.stale:
  1676  		log.Trace("Account request already reverted", "peer", req.peer, "reqid", req.id)
  1677  		return
  1678  	default:
  1679  	}
  1680  	close(req.stale)
  1681  
  1682  	// Remove the request from the tracked set
  1683  	s.lock.Lock()
  1684  	delete(s.accountReqs, req.id)
  1685  	s.lock.Unlock()
  1686  
  1687  	// If there's a timeout timer still running, abort it and mark the account
  1688  	// task as not-pending, ready for rescheduling
  1689  	req.timeout.Stop()
  1690  	if req.task.req == req {
  1691  		req.task.req = nil
  1692  	}
  1693  }
  1694  
  1695  // scheduleRevertBytecodeRequest asks the event loop to clean up a bytecode request
  1696  // and return all failed retrieval tasks to the scheduler for reassignment.
  1697  func (s *Syncer) scheduleRevertBytecodeRequest(req *bytecodeRequest) {
  1698  	select {
  1699  	case req.revert <- req:
  1700  		// Sync event loop notified
  1701  	case <-req.cancel:
  1702  		// Sync cycle got cancelled
  1703  	case <-req.stale:
  1704  		// Request already reverted
  1705  	}
  1706  }
  1707  
  1708  // revertBytecodeRequest cleans up a bytecode request and returns all failed
  1709  // retrieval tasks to the scheduler for reassignment.
  1710  //
  1711  // Note, this needs to run on the event runloop thread to reschedule to idle peers.
  1712  // On peer threads, use scheduleRevertBytecodeRequest.
  1713  func (s *Syncer) revertBytecodeRequest(req *bytecodeRequest) {
  1714  	log.Debug("Reverting bytecode request", "peer", req.peer)
  1715  	select {
  1716  	case <-req.stale:
  1717  		log.Trace("Bytecode request already reverted", "peer", req.peer, "reqid", req.id)
  1718  		return
  1719  	default:
  1720  	}
  1721  	close(req.stale)
  1722  
  1723  	// Remove the request from the tracked set
  1724  	s.lock.Lock()
  1725  	delete(s.bytecodeReqs, req.id)
  1726  	s.lock.Unlock()
  1727  
  1728  	// If there's a timeout timer still running, abort it and mark the code
  1729  	// retrievals as not-pending, ready for rescheduling
  1730  	req.timeout.Stop()
  1731  	for _, hash := range req.hashes {
  1732  		req.task.codeTasks[hash] = struct{}{}
  1733  	}
  1734  }
  1735  
  1736  // scheduleRevertStorageRequest asks the event loop to clean up a storage range
  1737  // request and return all failed retrieval tasks to the scheduler for reassignment.
  1738  func (s *Syncer) scheduleRevertStorageRequest(req *storageRequest) {
  1739  	select {
  1740  	case req.revert <- req:
  1741  		// Sync event loop notified
  1742  	case <-req.cancel:
  1743  		// Sync cycle got cancelled
  1744  	case <-req.stale:
  1745  		// Request already reverted
  1746  	}
  1747  }
  1748  
  1749  // revertStorageRequest cleans up a storage range request and returns all failed
  1750  // retrieval tasks to the scheduler for reassignment.
  1751  //
  1752  // Note, this needs to run on the event runloop thread to reschedule to idle peers.
  1753  // On peer threads, use scheduleRevertStorageRequest.
  1754  func (s *Syncer) revertStorageRequest(req *storageRequest) {
  1755  	log.Debug("Reverting storage request", "peer", req.peer)
  1756  	select {
  1757  	case <-req.stale:
  1758  		log.Trace("Storage request already reverted", "peer", req.peer, "reqid", req.id)
  1759  		return
  1760  	default:
  1761  	}
  1762  	close(req.stale)
  1763  
  1764  	// Remove the request from the tracked set
  1765  	s.lock.Lock()
  1766  	delete(s.storageReqs, req.id)
  1767  	s.lock.Unlock()
  1768  
  1769  	// If there's a timeout timer still running, abort it and mark the storage
  1770  	// task as not-pending, ready for rescheduling
  1771  	req.timeout.Stop()
  1772  	if req.subTask != nil {
  1773  		req.subTask.req = nil
  1774  	} else {
  1775  		for i, account := range req.accounts {
  1776  			req.mainTask.stateTasks[account] = req.roots[i]
  1777  		}
  1778  	}
  1779  }
  1780  
  1781  // scheduleRevertTrienodeHealRequest asks the event loop to clean up a trienode heal
  1782  // request and return all failed retrieval tasks to the scheduler for reassignment.
  1783  func (s *Syncer) scheduleRevertTrienodeHealRequest(req *trienodeHealRequest) {
  1784  	select {
  1785  	case req.revert <- req:
  1786  		// Sync event loop notified
  1787  	case <-req.cancel:
  1788  		// Sync cycle got cancelled
  1789  	case <-req.stale:
  1790  		// Request already reverted
  1791  	}
  1792  }
  1793  
  1794  // revertTrienodeHealRequest cleans up a trienode heal request and returns all
  1795  // failed retrieval tasks to the scheduler for reassignment.
  1796  //
  1797  // Note, this needs to run on the event runloop thread to reschedule to idle peers.
  1798  // On peer threads, use scheduleRevertTrienodeHealRequest.
  1799  func (s *Syncer) revertTrienodeHealRequest(req *trienodeHealRequest) {
  1800  	log.Debug("Reverting trienode heal request", "peer", req.peer)
  1801  	select {
  1802  	case <-req.stale:
  1803  		log.Trace("Trienode heal request already reverted", "peer", req.peer, "reqid", req.id)
  1804  		return
  1805  	default:
  1806  	}
  1807  	close(req.stale)
  1808  
  1809  	// Remove the request from the tracked set
  1810  	s.lock.Lock()
  1811  	delete(s.trienodeHealReqs, req.id)
  1812  	s.lock.Unlock()
  1813  
  1814  	// If there's a timeout timer still running, abort it and mark the trie node
  1815  	// retrievals as not-pending, ready for rescheduling
  1816  	req.timeout.Stop()
  1817  	for i, path := range req.paths {
  1818  		req.task.trieTasks[path] = req.hashes[i]
  1819  	}
  1820  }
  1821  
  1822  // scheduleRevertBytecodeHealRequest asks the event loop to clean up a bytecode heal
  1823  // request and return all failed retrieval tasks to the scheduler for reassignment.
  1824  func (s *Syncer) scheduleRevertBytecodeHealRequest(req *bytecodeHealRequest) {
  1825  	select {
  1826  	case req.revert <- req:
  1827  		// Sync event loop notified
  1828  	case <-req.cancel:
  1829  		// Sync cycle got cancelled
  1830  	case <-req.stale:
  1831  		// Request already reverted
  1832  	}
  1833  }
  1834  
  1835  // revertBytecodeHealRequest cleans up a bytecode heal request and returns all
  1836  // failed retrieval tasks to the scheduler for reassignment.
  1837  //
  1838  // Note, this needs to run on the event runloop thread to reschedule to idle peers.
  1839  // On peer threads, use scheduleRevertBytecodeHealRequest.
  1840  func (s *Syncer) revertBytecodeHealRequest(req *bytecodeHealRequest) {
  1841  	log.Debug("Reverting bytecode heal request", "peer", req.peer)
  1842  	select {
  1843  	case <-req.stale:
  1844  		log.Trace("Bytecode heal request already reverted", "peer", req.peer, "reqid", req.id)
  1845  		return
  1846  	default:
  1847  	}
  1848  	close(req.stale)
  1849  
  1850  	// Remove the request from the tracked set
  1851  	s.lock.Lock()
  1852  	delete(s.bytecodeHealReqs, req.id)
  1853  	s.lock.Unlock()
  1854  
  1855  	// If there's a timeout timer still running, abort it and mark the code
  1856  	// retrievals as not-pending, ready for rescheduling
  1857  	req.timeout.Stop()
  1858  	for _, hash := range req.hashes {
  1859  		req.task.codeTasks[hash] = struct{}{}
  1860  	}
  1861  }
  1862  
  1863  // processAccountResponse integrates an already validated account range response
  1864  // into the account tasks.
  1865  func (s *Syncer) processAccountResponse(res *accountResponse) {
  1866  	// Switch the task from pending to filling
  1867  	res.task.req = nil
  1868  	res.task.res = res
  1869  
  1870  	// Ensure that the response doesn't overflow into the subsequent task
  1871  	lastBig := res.task.Last.Big()
  1872  	for i, hash := range res.hashes {
  1873  		// Mark the range complete if the last is already included.
  1874  		// Keep iteration to delete the extra states if exists.
  1875  		cmp := hash.Big().Cmp(lastBig)
  1876  		if cmp == 0 {
  1877  			res.cont = false
  1878  			continue
  1879  		}
  1880  		if cmp > 0 {
  1881  			// Chunk overflown, cut off excess
  1882  			res.hashes = res.hashes[:i]
  1883  			res.accounts = res.accounts[:i]
  1884  			res.cont = false // Mark range completed
  1885  			break
  1886  		}
  1887  	}
  1888  	// Iterate over all the accounts and assemble which ones need further sub-
  1889  	// filling before the entire account range can be persisted.
  1890  	res.task.needCode = make([]bool, len(res.accounts))
  1891  	res.task.needState = make([]bool, len(res.accounts))
  1892  	res.task.needHeal = make([]bool, len(res.accounts))
  1893  
  1894  	res.task.codeTasks = make(map[common.Hash]struct{})
  1895  	res.task.stateTasks = make(map[common.Hash]common.Hash)
  1896  
  1897  	resumed := make(map[common.Hash]struct{})
  1898  
  1899  	res.task.pend = 0
  1900  	for i, account := range res.accounts {
  1901  		// Check if the account is a contract with an unknown code
  1902  		if !bytes.Equal(account.CodeHash, types.EmptyCodeHash.Bytes()) {
  1903  			if !rawdb.HasCodeWithPrefix(s.db, common.BytesToHash(account.CodeHash)) {
  1904  				res.task.codeTasks[common.BytesToHash(account.CodeHash)] = struct{}{}
  1905  				res.task.needCode[i] = true
  1906  				res.task.pend++
  1907  			}
  1908  		}
  1909  		// Check if the account is a contract with an unknown storage trie
  1910  		if account.Root != types.EmptyRootHash {
  1911  			// If the storage was already retrieved in the last cycle, there's no need
  1912  			// to resync it again, regardless of whether the storage root is consistent
  1913  			// or not.
  1914  			if _, exist := res.task.stateCompleted[res.hashes[i]]; exist {
  1915  				// The leftover storage tasks are not expected, unless system is
  1916  				// very wrong.
  1917  				if _, ok := res.task.SubTasks[res.hashes[i]]; ok {
  1918  					panic(fmt.Errorf("unexpected leftover storage tasks, owner: %x", res.hashes[i]))
  1919  				}
  1920  				// Mark the healing tag if storage root node is inconsistent, or
  1921  				// it's non-existent due to storage chunking.
  1922  				if !rawdb.HasTrieNode(s.db, res.hashes[i], nil, account.Root, s.scheme) {
  1923  					res.task.needHeal[i] = true
  1924  				}
  1925  			} else {
  1926  				// If there was a previous large state retrieval in progress,
  1927  				// don't restart it from scratch. This happens if a sync cycle
  1928  				// is interrupted and resumed later. However, *do* update the
  1929  				// previous root hash.
  1930  				if subtasks, ok := res.task.SubTasks[res.hashes[i]]; ok {
  1931  					log.Debug("Resuming large storage retrieval", "account", res.hashes[i], "root", account.Root)
  1932  					for _, subtask := range subtasks {
  1933  						subtask.root = account.Root
  1934  					}
  1935  					res.task.needHeal[i] = true
  1936  					resumed[res.hashes[i]] = struct{}{}
  1937  					largeStorageResumedGauge.Inc(1)
  1938  				} else {
  1939  					// It's possible that in the hash scheme, the storage, along
  1940  					// with the trie nodes of the given root, is already present
  1941  					// in the database. Schedule the storage task anyway to simplify
  1942  					// the logic here.
  1943  					res.task.stateTasks[res.hashes[i]] = account.Root
  1944  				}
  1945  				res.task.needState[i] = true
  1946  				res.task.pend++
  1947  			}
  1948  		}
  1949  	}
  1950  	// Delete any subtasks that have been aborted but not resumed. It's essential
  1951  	// as the corresponding contract might be self-destructed in this cycle(it's
  1952  	// no longer possible in ethereum as self-destruction is disabled in Cancun
  1953  	// Fork, but the condition is still necessary for other networks).
  1954  	//
  1955  	// Keep the leftover storage tasks if they are not covered by the responded
  1956  	// account range which should be picked up in next account wave.
  1957  	if len(res.hashes) > 0 {
  1958  		// The hash of last delivered account in the response
  1959  		last := res.hashes[len(res.hashes)-1]
  1960  		for hash := range res.task.SubTasks {
  1961  			// TODO(rjl493456442) degrade the log level before merging.
  1962  			if hash.Cmp(last) > 0 {
  1963  				log.Info("Keeping suspended storage retrieval", "account", hash)
  1964  				continue
  1965  			}
  1966  			// TODO(rjl493456442) degrade the log level before merging.
  1967  			// It should never happen in ethereum.
  1968  			if _, ok := resumed[hash]; !ok {
  1969  				log.Error("Aborting suspended storage retrieval", "account", hash)
  1970  				delete(res.task.SubTasks, hash)
  1971  				largeStorageDiscardGauge.Inc(1)
  1972  			}
  1973  		}
  1974  	}
  1975  	// If the account range contained no contracts, or all have been fully filled
  1976  	// beforehand, short circuit storage filling and forward to the next task
  1977  	if res.task.pend == 0 {
  1978  		s.forwardAccountTask(res.task)
  1979  		return
  1980  	}
  1981  	// Some accounts are incomplete, leave as is for the storage and contract
  1982  	// task assigners to pick up and fill
  1983  }
  1984  
  1985  // processBytecodeResponse integrates an already validated bytecode response
  1986  // into the account tasks.
  1987  func (s *Syncer) processBytecodeResponse(res *bytecodeResponse) {
  1988  	batch := s.db.NewBatch()
  1989  
  1990  	var (
  1991  		codes uint64
  1992  	)
  1993  	for i, hash := range res.hashes {
  1994  		code := res.codes[i]
  1995  
  1996  		// If the bytecode was not delivered, reschedule it
  1997  		if code == nil {
  1998  			res.task.codeTasks[hash] = struct{}{}
  1999  			continue
  2000  		}
  2001  		// Code was delivered, mark it not needed any more
  2002  		for j, account := range res.task.res.accounts {
  2003  			if res.task.needCode[j] && hash == common.BytesToHash(account.CodeHash) {
  2004  				res.task.needCode[j] = false
  2005  				res.task.pend--
  2006  			}
  2007  		}
  2008  		// Push the bytecode into a database batch
  2009  		codes++
  2010  		rawdb.WriteCode(batch, hash, code)
  2011  	}
  2012  	bytes := common.StorageSize(batch.ValueSize())
  2013  	if err := batch.Write(); err != nil {
  2014  		log.Crit("Failed to persist bytecodes", "err", err)
  2015  	}
  2016  	s.bytecodeSynced += codes
  2017  	s.bytecodeBytes += bytes
  2018  
  2019  	log.Debug("Persisted set of bytecodes", "count", codes, "bytes", bytes)
  2020  
  2021  	// If this delivery completed the last pending task, forward the account task
  2022  	// to the next chunk
  2023  	if res.task.pend == 0 {
  2024  		s.forwardAccountTask(res.task)
  2025  		return
  2026  	}
  2027  	// Some accounts are still incomplete, leave as is for the storage and contract
  2028  	// task assigners to pick up and fill.
  2029  }
  2030  
  2031  // processStorageResponse integrates an already validated storage response
  2032  // into the account tasks.
  2033  func (s *Syncer) processStorageResponse(res *storageResponse) {
  2034  	// Switch the subtask from pending to idle
  2035  	if res.subTask != nil {
  2036  		res.subTask.req = nil
  2037  	}
  2038  	batch := ethdb.HookedBatch{
  2039  		Batch: s.db.NewBatch(),
  2040  		OnPut: func(key []byte, value []byte) {
  2041  			s.storageBytes += common.StorageSize(len(key) + len(value))
  2042  		},
  2043  	}
  2044  	var (
  2045  		slots           int
  2046  		oldStorageBytes = s.storageBytes
  2047  	)
  2048  	// Iterate over all the accounts and reconstruct their storage tries from the
  2049  	// delivered slots
  2050  	for i, account := range res.accounts {
  2051  		// If the account was not delivered, reschedule it
  2052  		if i >= len(res.hashes) {
  2053  			res.mainTask.stateTasks[account] = res.roots[i]
  2054  			continue
  2055  		}
  2056  		// State was delivered, if complete mark as not needed any more, otherwise
  2057  		// mark the account as needing healing
  2058  		for j, hash := range res.mainTask.res.hashes {
  2059  			if account != hash {
  2060  				continue
  2061  			}
  2062  			acc := res.mainTask.res.accounts[j]
  2063  
  2064  			// If the packet contains multiple contract storage slots, all
  2065  			// but the last are surely complete. The last contract may be
  2066  			// chunked, so check it's continuation flag.
  2067  			if res.subTask == nil && res.mainTask.needState[j] && (i < len(res.hashes)-1 || !res.cont) {
  2068  				res.mainTask.needState[j] = false
  2069  				res.mainTask.pend--
  2070  				res.mainTask.stateCompleted[account] = struct{}{} // mark it as completed
  2071  				smallStorageGauge.Inc(1)
  2072  			}
  2073  			// If the last contract was chunked, mark it as needing healing
  2074  			// to avoid writing it out to disk prematurely.
  2075  			if res.subTask == nil && !res.mainTask.needHeal[j] && i == len(res.hashes)-1 && res.cont {
  2076  				res.mainTask.needHeal[j] = true
  2077  			}
  2078  			// If the last contract was chunked, we need to switch to large
  2079  			// contract handling mode
  2080  			if res.subTask == nil && i == len(res.hashes)-1 && res.cont {
  2081  				// If we haven't yet started a large-contract retrieval, create
  2082  				// the subtasks for it within the main account task
  2083  				if tasks, ok := res.mainTask.SubTasks[account]; !ok {
  2084  					var (
  2085  						keys    = res.hashes[i]
  2086  						chunks  = uint64(storageConcurrency)
  2087  						lastKey common.Hash
  2088  					)
  2089  					if len(keys) > 0 {
  2090  						lastKey = keys[len(keys)-1]
  2091  					}
  2092  					// If the number of slots remaining is low, decrease the
  2093  					// number of chunks. Somewhere on the order of 10-15K slots
  2094  					// fit into a packet of 500KB. A key/slot pair is maximum 64
  2095  					// bytes, so pessimistically maxRequestSize/64 = 8K.
  2096  					//
  2097  					// Chunk so that at least 2 packets are needed to fill a task.
  2098  					if estimate, err := estimateRemainingSlots(len(keys), lastKey); err == nil {
  2099  						if n := estimate / (2 * (maxRequestSize / 64)); n+1 < chunks {
  2100  							chunks = n + 1
  2101  						}
  2102  						log.Debug("Chunked large contract", "initiators", len(keys), "tail", lastKey, "remaining", estimate, "chunks", chunks)
  2103  					} else {
  2104  						log.Debug("Chunked large contract", "initiators", len(keys), "tail", lastKey, "chunks", chunks)
  2105  					}
  2106  					r := newHashRange(lastKey, chunks)
  2107  					if chunks == 1 {
  2108  						smallStorageGauge.Inc(1)
  2109  					} else {
  2110  						largeStorageGauge.Inc(1)
  2111  					}
  2112  					// Our first task is the one that was just filled by this response.
  2113  					batch := ethdb.HookedBatch{
  2114  						Batch: s.db.NewBatch(),
  2115  						OnPut: func(key []byte, value []byte) {
  2116  							s.storageBytes += common.StorageSize(len(key) + len(value))
  2117  						},
  2118  					}
  2119  					var tr genTrie
  2120  					if s.scheme == rawdb.HashScheme {
  2121  						tr = newHashTrie(batch)
  2122  					}
  2123  					if s.scheme == rawdb.PathScheme {
  2124  						// Keep the left boundary as it's the first range.
  2125  						tr = newPathTrie(account, false, s.db, batch)
  2126  					}
  2127  					tasks = append(tasks, &storageTask{
  2128  						Next:     common.Hash{},
  2129  						Last:     r.End(),
  2130  						root:     acc.Root,
  2131  						genBatch: batch,
  2132  						genTrie:  tr,
  2133  					})
  2134  					for r.Next() {
  2135  						batch := ethdb.HookedBatch{
  2136  							Batch: s.db.NewBatch(),
  2137  							OnPut: func(key []byte, value []byte) {
  2138  								s.storageBytes += common.StorageSize(len(key) + len(value))
  2139  							},
  2140  						}
  2141  						var tr genTrie
  2142  						if s.scheme == rawdb.HashScheme {
  2143  							tr = newHashTrie(batch)
  2144  						}
  2145  						if s.scheme == rawdb.PathScheme {
  2146  							tr = newPathTrie(account, true, s.db, batch)
  2147  						}
  2148  						tasks = append(tasks, &storageTask{
  2149  							Next:     r.Start(),
  2150  							Last:     r.End(),
  2151  							root:     acc.Root,
  2152  							genBatch: batch,
  2153  							genTrie:  tr,
  2154  						})
  2155  					}
  2156  					for _, task := range tasks {
  2157  						log.Debug("Created storage sync task", "account", account, "root", acc.Root, "from", task.Next, "last", task.Last)
  2158  					}
  2159  					res.mainTask.SubTasks[account] = tasks
  2160  
  2161  					// Since we've just created the sub-tasks, this response
  2162  					// is surely for the first one (zero origin)
  2163  					res.subTask = tasks[0]
  2164  				}
  2165  			}
  2166  			// If we're in large contract delivery mode, forward the subtask
  2167  			if res.subTask != nil {
  2168  				// Ensure the response doesn't overflow into the subsequent task
  2169  				last := res.subTask.Last.Big()
  2170  				// Find the first overflowing key. While at it, mark res as complete
  2171  				// if we find the range to include or pass the 'last'
  2172  				index := sort.Search(len(res.hashes[i]), func(k int) bool {
  2173  					cmp := res.hashes[i][k].Big().Cmp(last)
  2174  					if cmp >= 0 {
  2175  						res.cont = false
  2176  					}
  2177  					return cmp > 0
  2178  				})
  2179  				if index >= 0 {
  2180  					// cut off excess
  2181  					res.hashes[i] = res.hashes[i][:index]
  2182  					res.slots[i] = res.slots[i][:index]
  2183  				}
  2184  				// Forward the relevant storage chunk (even if created just now)
  2185  				if res.cont {
  2186  					res.subTask.Next = incHash(res.hashes[i][len(res.hashes[i])-1])
  2187  				} else {
  2188  					res.subTask.done = true
  2189  				}
  2190  			}
  2191  		}
  2192  		// Iterate over all the complete contracts, reconstruct the trie nodes and
  2193  		// push them to disk. If the contract is chunked, the trie nodes will be
  2194  		// reconstructed later.
  2195  		slots += len(res.hashes[i])
  2196  
  2197  		if i < len(res.hashes)-1 || res.subTask == nil {
  2198  			// no need to make local reassignment of account: this closure does not outlive the loop
  2199  			var tr genTrie
  2200  			if s.scheme == rawdb.HashScheme {
  2201  				tr = newHashTrie(batch)
  2202  			}
  2203  			if s.scheme == rawdb.PathScheme {
  2204  				// Keep the left boundary as it's complete
  2205  				tr = newPathTrie(account, false, s.db, batch)
  2206  			}
  2207  			for j := 0; j < len(res.hashes[i]); j++ {
  2208  				tr.update(res.hashes[i][j][:], res.slots[i][j])
  2209  			}
  2210  			tr.commit(true)
  2211  		}
  2212  		// Persist the received storage segments. These flat state maybe
  2213  		// outdated during the sync, but it can be fixed later during the
  2214  		// snapshot generation.
  2215  		for j := 0; j < len(res.hashes[i]); j++ {
  2216  			rawdb.WriteStorageSnapshot(batch, account, res.hashes[i][j], res.slots[i][j])
  2217  
  2218  			// If we're storing large contracts, generate the trie nodes
  2219  			// on the fly to not trash the gluing points
  2220  			if i == len(res.hashes)-1 && res.subTask != nil {
  2221  				res.subTask.genTrie.update(res.hashes[i][j][:], res.slots[i][j])
  2222  			}
  2223  		}
  2224  	}
  2225  	// Large contracts could have generated new trie nodes, flush them to disk
  2226  	if res.subTask != nil {
  2227  		if res.subTask.done {
  2228  			root := res.subTask.genTrie.commit(res.subTask.Last == common.MaxHash)
  2229  			if err := res.subTask.genBatch.Write(); err != nil {
  2230  				log.Error("Failed to persist stack slots", "err", err)
  2231  			}
  2232  			res.subTask.genBatch.Reset()
  2233  
  2234  			// If the chunk's root is an overflown but full delivery,
  2235  			// clear the heal request.
  2236  			accountHash := res.accounts[len(res.accounts)-1]
  2237  			if root == res.subTask.root && rawdb.HasTrieNode(s.db, accountHash, nil, root, s.scheme) {
  2238  				for i, account := range res.mainTask.res.hashes {
  2239  					if account == accountHash {
  2240  						res.mainTask.needHeal[i] = false
  2241  						skipStorageHealingGauge.Inc(1)
  2242  					}
  2243  				}
  2244  			}
  2245  		} else if res.subTask.genBatch.ValueSize() > batchSizeThreshold {
  2246  			res.subTask.genTrie.commit(false)
  2247  			if err := res.subTask.genBatch.Write(); err != nil {
  2248  				log.Error("Failed to persist stack slots", "err", err)
  2249  			}
  2250  			res.subTask.genBatch.Reset()
  2251  		}
  2252  	}
  2253  	// Flush anything written just now and update the stats
  2254  	if err := batch.Write(); err != nil {
  2255  		log.Crit("Failed to persist storage slots", "err", err)
  2256  	}
  2257  	s.storageSynced += uint64(slots)
  2258  
  2259  	log.Debug("Persisted set of storage slots", "accounts", len(res.hashes), "slots", slots, "bytes", s.storageBytes-oldStorageBytes)
  2260  
  2261  	// If this delivery completed the last pending task, forward the account task
  2262  	// to the next chunk
  2263  	if res.mainTask.pend == 0 {
  2264  		s.forwardAccountTask(res.mainTask)
  2265  		return
  2266  	}
  2267  	// Some accounts are still incomplete, leave as is for the storage and contract
  2268  	// task assigners to pick up and fill.
  2269  }
  2270  
  2271  // processTrienodeHealResponse integrates an already validated trienode response
  2272  // into the healer tasks.
  2273  func (s *Syncer) processTrienodeHealResponse(res *trienodeHealResponse) {
  2274  	var (
  2275  		start = time.Now()
  2276  		fills int
  2277  	)
  2278  	for i, hash := range res.hashes {
  2279  		node := res.nodes[i]
  2280  
  2281  		// If the trie node was not delivered, reschedule it
  2282  		if node == nil {
  2283  			res.task.trieTasks[res.paths[i]] = res.hashes[i]
  2284  			continue
  2285  		}
  2286  		fills++
  2287  
  2288  		// Push the trie node into the state syncer
  2289  		s.trienodeHealSynced++
  2290  		s.trienodeHealBytes += common.StorageSize(len(node))
  2291  
  2292  		err := s.healer.scheduler.ProcessNode(trie.NodeSyncResult{Path: res.paths[i], Data: node})
  2293  		switch err {
  2294  		case nil:
  2295  		case trie.ErrAlreadyProcessed:
  2296  			s.trienodeHealDups++
  2297  		case trie.ErrNotRequested:
  2298  			s.trienodeHealNops++
  2299  		default:
  2300  			log.Error("Invalid trienode processed", "hash", hash, "err", err)
  2301  		}
  2302  	}
  2303  	s.commitHealer(false)
  2304  
  2305  	// Calculate the processing rate of one filled trie node
  2306  	rate := float64(fills) / (float64(time.Since(start)) / float64(time.Second))
  2307  
  2308  	// Update the currently measured trienode queueing and processing throughput.
  2309  	//
  2310  	// The processing rate needs to be updated uniformly independent if we've
  2311  	// processed 1x100 trie nodes or 100x1 to keep the rate consistent even in
  2312  	// the face of varying network packets. As such, we cannot just measure the
  2313  	// time it took to process N trie nodes and update once, we need one update
  2314  	// per trie node.
  2315  	//
  2316  	// Naively, that would be:
  2317  	//
  2318  	//   for i:=0; i<fills; i++ {
  2319  	//     healRate = (1-measurementImpact)*oldRate + measurementImpact*newRate
  2320  	//   }
  2321  	//
  2322  	// Essentially, a recursive expansion of HR = (1-MI)*HR + MI*NR.
  2323  	//
  2324  	// We can expand that formula for the Nth item as:
  2325  	//   HR(N) = (1-MI)^N*OR + (1-MI)^(N-1)*MI*NR + (1-MI)^(N-2)*MI*NR + ... + (1-MI)^0*MI*NR
  2326  	//
  2327  	// The above is a geometric sequence that can be summed to:
  2328  	//   HR(N) = (1-MI)^N*(OR-NR) + NR
  2329  	s.trienodeHealRate = gomath.Pow(1-trienodeHealRateMeasurementImpact, float64(fills))*(s.trienodeHealRate-rate) + rate
  2330  
  2331  	pending := s.trienodeHealPend.Load()
  2332  	if time.Since(s.trienodeHealThrottled) > time.Second {
  2333  		// Periodically adjust the trie node throttler
  2334  		if float64(pending) > 2*s.trienodeHealRate {
  2335  			s.trienodeHealThrottle *= trienodeHealThrottleIncrease
  2336  		} else {
  2337  			s.trienodeHealThrottle /= trienodeHealThrottleDecrease
  2338  		}
  2339  		if s.trienodeHealThrottle > maxTrienodeHealThrottle {
  2340  			s.trienodeHealThrottle = maxTrienodeHealThrottle
  2341  		} else if s.trienodeHealThrottle < minTrienodeHealThrottle {
  2342  			s.trienodeHealThrottle = minTrienodeHealThrottle
  2343  		}
  2344  		s.trienodeHealThrottled = time.Now()
  2345  
  2346  		log.Debug("Updated trie node heal throttler", "rate", s.trienodeHealRate, "pending", pending, "throttle", s.trienodeHealThrottle)
  2347  	}
  2348  }
  2349  
  2350  func (s *Syncer) commitHealer(force bool) {
  2351  	if !force && s.healer.scheduler.MemSize() < ethdb.IdealBatchSize {
  2352  		return
  2353  	}
  2354  	batch := s.db.NewBatch()
  2355  	if err := s.healer.scheduler.Commit(batch); err != nil {
  2356  		log.Crit("Failed to commit healing data", "err", err)
  2357  	}
  2358  	if err := batch.Write(); err != nil {
  2359  		log.Crit("Failed to persist healing data", "err", err)
  2360  	}
  2361  	log.Debug("Persisted set of healing data", "type", "trienodes", "bytes", common.StorageSize(batch.ValueSize()))
  2362  }
  2363  
  2364  // processBytecodeHealResponse integrates an already validated bytecode response
  2365  // into the healer tasks.
  2366  func (s *Syncer) processBytecodeHealResponse(res *bytecodeHealResponse) {
  2367  	for i, hash := range res.hashes {
  2368  		node := res.codes[i]
  2369  
  2370  		// If the trie node was not delivered, reschedule it
  2371  		if node == nil {
  2372  			res.task.codeTasks[hash] = struct{}{}
  2373  			continue
  2374  		}
  2375  		// Push the trie node into the state syncer
  2376  		s.bytecodeHealSynced++
  2377  		s.bytecodeHealBytes += common.StorageSize(len(node))
  2378  
  2379  		err := s.healer.scheduler.ProcessCode(trie.CodeSyncResult{Hash: hash, Data: node})
  2380  		switch err {
  2381  		case nil:
  2382  		case trie.ErrAlreadyProcessed:
  2383  			s.bytecodeHealDups++
  2384  		case trie.ErrNotRequested:
  2385  			s.bytecodeHealNops++
  2386  		default:
  2387  			log.Error("Invalid bytecode processed", "hash", hash, "err", err)
  2388  		}
  2389  	}
  2390  	s.commitHealer(false)
  2391  }
  2392  
  2393  // forwardAccountTask takes a filled account task and persists anything available
  2394  // into the database, after which it forwards the next account marker so that the
  2395  // task's next chunk may be filled.
  2396  func (s *Syncer) forwardAccountTask(task *accountTask) {
  2397  	// Remove any pending delivery
  2398  	res := task.res
  2399  	if res == nil {
  2400  		return // nothing to forward
  2401  	}
  2402  	task.res = nil
  2403  
  2404  	// Persist the received account segments. These flat state maybe
  2405  	// outdated during the sync, but it can be fixed later during the
  2406  	// snapshot generation.
  2407  	oldAccountBytes := s.accountBytes
  2408  
  2409  	batch := ethdb.HookedBatch{
  2410  		Batch: s.db.NewBatch(),
  2411  		OnPut: func(key []byte, value []byte) {
  2412  			s.accountBytes += common.StorageSize(len(key) + len(value))
  2413  		},
  2414  	}
  2415  	for i, hash := range res.hashes {
  2416  		if task.needCode[i] || task.needState[i] {
  2417  			break
  2418  		}
  2419  		slim := types.SlimAccountRLP(*res.accounts[i])
  2420  		rawdb.WriteAccountSnapshot(batch, hash, slim)
  2421  
  2422  		if !task.needHeal[i] {
  2423  			// If the storage task is complete, drop it into the stack trie
  2424  			// to generate account trie nodes for it
  2425  			full, err := types.FullAccountRLP(slim) // TODO(karalabe): Slim parsing can be omitted
  2426  			if err != nil {
  2427  				panic(err) // Really shouldn't ever happen
  2428  			}
  2429  			task.genTrie.update(hash[:], full)
  2430  		} else {
  2431  			// If the storage task is incomplete, explicitly delete the corresponding
  2432  			// account item from the account trie to ensure that all nodes along the
  2433  			// path to the incomplete storage trie are cleaned up.
  2434  			if err := task.genTrie.delete(hash[:]); err != nil {
  2435  				panic(err) // Really shouldn't ever happen
  2436  			}
  2437  		}
  2438  	}
  2439  	// Flush anything written just now and update the stats
  2440  	if err := batch.Write(); err != nil {
  2441  		log.Crit("Failed to persist accounts", "err", err)
  2442  	}
  2443  	s.accountSynced += uint64(len(res.accounts))
  2444  
  2445  	// Task filling persisted, push it the chunk marker forward to the first
  2446  	// account still missing data.
  2447  	for i, hash := range res.hashes {
  2448  		if task.needCode[i] || task.needState[i] {
  2449  			return
  2450  		}
  2451  		task.Next = incHash(hash)
  2452  
  2453  		// Remove the completion flag once the account range is pushed
  2454  		// forward. The leftover accounts will be skipped in the next
  2455  		// cycle.
  2456  		delete(task.stateCompleted, hash)
  2457  	}
  2458  	// All accounts marked as complete, track if the entire task is done
  2459  	task.done = !res.cont
  2460  
  2461  	// Error out if there is any leftover completion flag.
  2462  	if task.done && len(task.stateCompleted) != 0 {
  2463  		panic(fmt.Errorf("storage completion flags should be emptied, %d left", len(task.stateCompleted)))
  2464  	}
  2465  	// Stack trie could have generated trie nodes, push them to disk (we need to
  2466  	// flush after finalizing task.done. It's fine even if we crash and lose this
  2467  	// write as it will only cause more data to be downloaded during heal.
  2468  	if task.done {
  2469  		task.genTrie.commit(task.Last == common.MaxHash)
  2470  		if err := task.genBatch.Write(); err != nil {
  2471  			log.Error("Failed to persist stack account", "err", err)
  2472  		}
  2473  		task.genBatch.Reset()
  2474  	} else if task.genBatch.ValueSize() > batchSizeThreshold {
  2475  		task.genTrie.commit(false)
  2476  		if err := task.genBatch.Write(); err != nil {
  2477  			log.Error("Failed to persist stack account", "err", err)
  2478  		}
  2479  		task.genBatch.Reset()
  2480  	}
  2481  	log.Debug("Persisted range of accounts", "accounts", len(res.accounts), "bytes", s.accountBytes-oldAccountBytes)
  2482  }
  2483  
  2484  // OnAccounts is a callback method to invoke when a range of accounts are
  2485  // received from a remote peer.
  2486  func (s *Syncer) OnAccounts(peer SyncPeer, id uint64, hashes []common.Hash, accounts [][]byte, proof [][]byte) error {
  2487  	size := common.StorageSize(len(hashes) * common.HashLength)
  2488  	for _, account := range accounts {
  2489  		size += common.StorageSize(len(account))
  2490  	}
  2491  	for _, node := range proof {
  2492  		size += common.StorageSize(len(node))
  2493  	}
  2494  	logger := peer.Log().New("reqid", id)
  2495  	logger.Trace("Delivering range of accounts", "hashes", len(hashes), "accounts", len(accounts), "proofs", len(proof), "bytes", size)
  2496  
  2497  	// Whether or not the response is valid, we can mark the peer as idle and
  2498  	// notify the scheduler to assign a new task. If the response is invalid,
  2499  	// we'll drop the peer in a bit.
  2500  	defer func() {
  2501  		s.lock.Lock()
  2502  		defer s.lock.Unlock()
  2503  		if _, ok := s.peers[peer.ID()]; ok {
  2504  			s.accountIdlers[peer.ID()] = struct{}{}
  2505  		}
  2506  		select {
  2507  		case s.update <- struct{}{}:
  2508  		default:
  2509  		}
  2510  	}()
  2511  	s.lock.Lock()
  2512  	// Ensure the response is for a valid request
  2513  	req, ok := s.accountReqs[id]
  2514  	if !ok {
  2515  		// Request stale, perhaps the peer timed out but came through in the end
  2516  		logger.Warn("Unexpected account range packet")
  2517  		s.lock.Unlock()
  2518  		return nil
  2519  	}
  2520  	delete(s.accountReqs, id)
  2521  	s.rates.Update(peer.ID(), AccountRangeMsg, time.Since(req.time), int(size))
  2522  
  2523  	// Clean up the request timeout timer, we'll see how to proceed further based
  2524  	// on the actual delivered content
  2525  	if !req.timeout.Stop() {
  2526  		// The timeout is already triggered, and this request will be reverted+rescheduled
  2527  		s.lock.Unlock()
  2528  		return nil
  2529  	}
  2530  	// Response is valid, but check if peer is signalling that it does not have
  2531  	// the requested data. For account range queries that means the state being
  2532  	// retrieved was either already pruned remotely, or the peer is not yet
  2533  	// synced to our head.
  2534  	if len(hashes) == 0 && len(accounts) == 0 && len(proof) == 0 {
  2535  		logger.Debug("Peer rejected account range request", "root", s.root)
  2536  		s.statelessPeers[peer.ID()] = struct{}{}
  2537  		s.lock.Unlock()
  2538  
  2539  		// Signal this request as failed, and ready for rescheduling
  2540  		s.scheduleRevertAccountRequest(req)
  2541  		return nil
  2542  	}
  2543  	root := s.root
  2544  	s.lock.Unlock()
  2545  
  2546  	// Reconstruct a partial trie from the response and verify it
  2547  	keys := make([][]byte, len(hashes))
  2548  	for i, key := range hashes {
  2549  		keys[i] = common.CopyBytes(key[:])
  2550  	}
  2551  	nodes := make(trienode.ProofList, len(proof))
  2552  	for i, node := range proof {
  2553  		nodes[i] = node
  2554  	}
  2555  	cont, err := trie.VerifyRangeProof(root, req.origin[:], keys, accounts, nodes.Set())
  2556  	if err != nil {
  2557  		logger.Warn("Account range failed proof", "err", err)
  2558  		// Signal this request as failed, and ready for rescheduling
  2559  		s.scheduleRevertAccountRequest(req)
  2560  		return err
  2561  	}
  2562  	accs := make([]*types.StateAccount, len(accounts))
  2563  	for i, account := range accounts {
  2564  		acc := new(types.StateAccount)
  2565  		if err := rlp.DecodeBytes(account, acc); err != nil {
  2566  			panic(err) // We created these blobs, we must be able to decode them
  2567  		}
  2568  		accs[i] = acc
  2569  	}
  2570  	response := &accountResponse{
  2571  		task:     req.task,
  2572  		hashes:   hashes,
  2573  		accounts: accs,
  2574  		cont:     cont,
  2575  	}
  2576  	select {
  2577  	case req.deliver <- response:
  2578  	case <-req.cancel:
  2579  	case <-req.stale:
  2580  	}
  2581  	return nil
  2582  }
  2583  
  2584  // OnByteCodes is a callback method to invoke when a batch of contract
  2585  // bytes codes are received from a remote peer.
  2586  func (s *Syncer) OnByteCodes(peer SyncPeer, id uint64, bytecodes [][]byte) error {
  2587  	s.lock.RLock()
  2588  	syncing := !s.snapped
  2589  	s.lock.RUnlock()
  2590  
  2591  	if syncing {
  2592  		return s.onByteCodes(peer, id, bytecodes)
  2593  	}
  2594  	return s.onHealByteCodes(peer, id, bytecodes)
  2595  }
  2596  
  2597  // onByteCodes is a callback method to invoke when a batch of contract
  2598  // bytes codes are received from a remote peer in the syncing phase.
  2599  func (s *Syncer) onByteCodes(peer SyncPeer, id uint64, bytecodes [][]byte) error {
  2600  	var size common.StorageSize
  2601  	for _, code := range bytecodes {
  2602  		size += common.StorageSize(len(code))
  2603  	}
  2604  	logger := peer.Log().New("reqid", id)
  2605  	logger.Trace("Delivering set of bytecodes", "bytecodes", len(bytecodes), "bytes", size)
  2606  
  2607  	// Whether or not the response is valid, we can mark the peer as idle and
  2608  	// notify the scheduler to assign a new task. If the response is invalid,
  2609  	// we'll drop the peer in a bit.
  2610  	defer func() {
  2611  		s.lock.Lock()
  2612  		defer s.lock.Unlock()
  2613  		if _, ok := s.peers[peer.ID()]; ok {
  2614  			s.bytecodeIdlers[peer.ID()] = struct{}{}
  2615  		}
  2616  		select {
  2617  		case s.update <- struct{}{}:
  2618  		default:
  2619  		}
  2620  	}()
  2621  	s.lock.Lock()
  2622  	// Ensure the response is for a valid request
  2623  	req, ok := s.bytecodeReqs[id]
  2624  	if !ok {
  2625  		// Request stale, perhaps the peer timed out but came through in the end
  2626  		logger.Warn("Unexpected bytecode packet")
  2627  		s.lock.Unlock()
  2628  		return nil
  2629  	}
  2630  	delete(s.bytecodeReqs, id)
  2631  	s.rates.Update(peer.ID(), ByteCodesMsg, time.Since(req.time), len(bytecodes))
  2632  
  2633  	// Clean up the request timeout timer, we'll see how to proceed further based
  2634  	// on the actual delivered content
  2635  	if !req.timeout.Stop() {
  2636  		// The timeout is already triggered, and this request will be reverted+rescheduled
  2637  		s.lock.Unlock()
  2638  		return nil
  2639  	}
  2640  
  2641  	// Response is valid, but check if peer is signalling that it does not have
  2642  	// the requested data. For bytecode range queries that means the peer is not
  2643  	// yet synced.
  2644  	if len(bytecodes) == 0 {
  2645  		logger.Debug("Peer rejected bytecode request")
  2646  		s.statelessPeers[peer.ID()] = struct{}{}
  2647  		s.lock.Unlock()
  2648  
  2649  		// Signal this request as failed, and ready for rescheduling
  2650  		s.scheduleRevertBytecodeRequest(req)
  2651  		return nil
  2652  	}
  2653  	s.lock.Unlock()
  2654  
  2655  	// Cross reference the requested bytecodes with the response to find gaps
  2656  	// that the serving node is missing
  2657  	hasher := crypto.NewKeccakState()
  2658  	hash := make([]byte, 32)
  2659  
  2660  	codes := make([][]byte, len(req.hashes))
  2661  	for i, j := 0, 0; i < len(bytecodes); i++ {
  2662  		// Find the next hash that we've been served, leaving misses with nils
  2663  		hasher.Reset()
  2664  		hasher.Write(bytecodes[i])
  2665  		hasher.Read(hash)
  2666  
  2667  		for j < len(req.hashes) && !bytes.Equal(hash, req.hashes[j][:]) {
  2668  			j++
  2669  		}
  2670  		if j < len(req.hashes) {
  2671  			codes[j] = bytecodes[i]
  2672  			j++
  2673  			continue
  2674  		}
  2675  		// We've either ran out of hashes, or got unrequested data
  2676  		logger.Warn("Unexpected bytecodes", "count", len(bytecodes)-i)
  2677  		// Signal this request as failed, and ready for rescheduling
  2678  		s.scheduleRevertBytecodeRequest(req)
  2679  		return errors.New("unexpected bytecode")
  2680  	}
  2681  	// Response validated, send it to the scheduler for filling
  2682  	response := &bytecodeResponse{
  2683  		task:   req.task,
  2684  		hashes: req.hashes,
  2685  		codes:  codes,
  2686  	}
  2687  	select {
  2688  	case req.deliver <- response:
  2689  	case <-req.cancel:
  2690  	case <-req.stale:
  2691  	}
  2692  	return nil
  2693  }
  2694  
  2695  // OnStorage is a callback method to invoke when ranges of storage slots
  2696  // are received from a remote peer.
  2697  func (s *Syncer) OnStorage(peer SyncPeer, id uint64, hashes [][]common.Hash, slots [][][]byte, proof [][]byte) error {
  2698  	// Gather some trace stats to aid in debugging issues
  2699  	var (
  2700  		hashCount int
  2701  		slotCount int
  2702  		size      common.StorageSize
  2703  	)
  2704  	for _, hashset := range hashes {
  2705  		size += common.StorageSize(common.HashLength * len(hashset))
  2706  		hashCount += len(hashset)
  2707  	}
  2708  	for _, slotset := range slots {
  2709  		for _, slot := range slotset {
  2710  			size += common.StorageSize(len(slot))
  2711  		}
  2712  		slotCount += len(slotset)
  2713  	}
  2714  	for _, node := range proof {
  2715  		size += common.StorageSize(len(node))
  2716  	}
  2717  	logger := peer.Log().New("reqid", id)
  2718  	logger.Trace("Delivering ranges of storage slots", "accounts", len(hashes), "hashes", hashCount, "slots", slotCount, "proofs", len(proof), "size", size)
  2719  
  2720  	// Whether or not the response is valid, we can mark the peer as idle and
  2721  	// notify the scheduler to assign a new task. If the response is invalid,
  2722  	// we'll drop the peer in a bit.
  2723  	defer func() {
  2724  		s.lock.Lock()
  2725  		defer s.lock.Unlock()
  2726  		if _, ok := s.peers[peer.ID()]; ok {
  2727  			s.storageIdlers[peer.ID()] = struct{}{}
  2728  		}
  2729  		select {
  2730  		case s.update <- struct{}{}:
  2731  		default:
  2732  		}
  2733  	}()
  2734  	s.lock.Lock()
  2735  	// Ensure the response is for a valid request
  2736  	req, ok := s.storageReqs[id]
  2737  	if !ok {
  2738  		// Request stale, perhaps the peer timed out but came through in the end
  2739  		logger.Warn("Unexpected storage ranges packet")
  2740  		s.lock.Unlock()
  2741  		return nil
  2742  	}
  2743  	delete(s.storageReqs, id)
  2744  	s.rates.Update(peer.ID(), StorageRangesMsg, time.Since(req.time), int(size))
  2745  
  2746  	// Clean up the request timeout timer, we'll see how to proceed further based
  2747  	// on the actual delivered content
  2748  	if !req.timeout.Stop() {
  2749  		// The timeout is already triggered, and this request will be reverted+rescheduled
  2750  		s.lock.Unlock()
  2751  		return nil
  2752  	}
  2753  
  2754  	// Reject the response if the hash sets and slot sets don't match, or if the
  2755  	// peer sent more data than requested.
  2756  	if len(hashes) != len(slots) {
  2757  		s.lock.Unlock()
  2758  		s.scheduleRevertStorageRequest(req) // reschedule request
  2759  		logger.Warn("Hash and slot set size mismatch", "hashset", len(hashes), "slotset", len(slots))
  2760  		return errors.New("hash and slot set size mismatch")
  2761  	}
  2762  	if len(hashes) > len(req.accounts) {
  2763  		s.lock.Unlock()
  2764  		s.scheduleRevertStorageRequest(req) // reschedule request
  2765  		logger.Warn("Hash set larger than requested", "hashset", len(hashes), "requested", len(req.accounts))
  2766  		return errors.New("hash set larger than requested")
  2767  	}
  2768  	// Response is valid, but check if peer is signalling that it does not have
  2769  	// the requested data. For storage range queries that means the state being
  2770  	// retrieved was either already pruned remotely, or the peer is not yet
  2771  	// synced to our head.
  2772  	if len(hashes) == 0 && len(proof) == 0 {
  2773  		logger.Debug("Peer rejected storage request")
  2774  		s.statelessPeers[peer.ID()] = struct{}{}
  2775  		s.lock.Unlock()
  2776  		s.scheduleRevertStorageRequest(req) // reschedule request
  2777  		return nil
  2778  	}
  2779  	s.lock.Unlock()
  2780  
  2781  	// Reconstruct the partial tries from the response and verify them
  2782  	var cont bool
  2783  
  2784  	// If a proof was attached while the response is empty, it indicates that the
  2785  	// requested range specified with 'origin' is empty. Construct an empty state
  2786  	// response locally to finalize the range.
  2787  	if len(hashes) == 0 && len(proof) > 0 {
  2788  		hashes = append(hashes, []common.Hash{})
  2789  		slots = append(slots, [][]byte{})
  2790  	}
  2791  	for i := 0; i < len(hashes); i++ {
  2792  		// Convert the keys and proofs into an internal format
  2793  		keys := make([][]byte, len(hashes[i]))
  2794  		for j, key := range hashes[i] {
  2795  			keys[j] = common.CopyBytes(key[:])
  2796  		}
  2797  		nodes := make(trienode.ProofList, 0, len(proof))
  2798  		if i == len(hashes)-1 {
  2799  			for _, node := range proof {
  2800  				nodes = append(nodes, node)
  2801  			}
  2802  		}
  2803  		var err error
  2804  		if len(nodes) == 0 {
  2805  			// No proof has been attached, the response must cover the entire key
  2806  			// space and hash to the origin root.
  2807  			_, err = trie.VerifyRangeProof(req.roots[i], nil, keys, slots[i], nil)
  2808  			if err != nil {
  2809  				s.scheduleRevertStorageRequest(req) // reschedule request
  2810  				logger.Warn("Storage slots failed proof", "err", err)
  2811  				return err
  2812  			}
  2813  		} else {
  2814  			// A proof was attached, the response is only partial, check that the
  2815  			// returned data is indeed part of the storage trie
  2816  			proofdb := nodes.Set()
  2817  
  2818  			cont, err = trie.VerifyRangeProof(req.roots[i], req.origin[:], keys, slots[i], proofdb)
  2819  			if err != nil {
  2820  				s.scheduleRevertStorageRequest(req) // reschedule request
  2821  				logger.Warn("Storage range failed proof", "err", err)
  2822  				return err
  2823  			}
  2824  		}
  2825  	}
  2826  	// Partial tries reconstructed, send them to the scheduler for storage filling
  2827  	response := &storageResponse{
  2828  		mainTask: req.mainTask,
  2829  		subTask:  req.subTask,
  2830  		accounts: req.accounts,
  2831  		roots:    req.roots,
  2832  		hashes:   hashes,
  2833  		slots:    slots,
  2834  		cont:     cont,
  2835  	}
  2836  	select {
  2837  	case req.deliver <- response:
  2838  	case <-req.cancel:
  2839  	case <-req.stale:
  2840  	}
  2841  	return nil
  2842  }
  2843  
  2844  // OnTrieNodes is a callback method to invoke when a batch of trie nodes
  2845  // are received from a remote peer.
  2846  func (s *Syncer) OnTrieNodes(peer SyncPeer, id uint64, trienodes [][]byte) error {
  2847  	var size common.StorageSize
  2848  	for _, node := range trienodes {
  2849  		size += common.StorageSize(len(node))
  2850  	}
  2851  	logger := peer.Log().New("reqid", id)
  2852  	logger.Trace("Delivering set of healing trienodes", "trienodes", len(trienodes), "bytes", size)
  2853  
  2854  	// Whether or not the response is valid, we can mark the peer as idle and
  2855  	// notify the scheduler to assign a new task. If the response is invalid,
  2856  	// we'll drop the peer in a bit.
  2857  	defer func() {
  2858  		s.lock.Lock()
  2859  		defer s.lock.Unlock()
  2860  		if _, ok := s.peers[peer.ID()]; ok {
  2861  			s.trienodeHealIdlers[peer.ID()] = struct{}{}
  2862  		}
  2863  		select {
  2864  		case s.update <- struct{}{}:
  2865  		default:
  2866  		}
  2867  	}()
  2868  	s.lock.Lock()
  2869  	// Ensure the response is for a valid request
  2870  	req, ok := s.trienodeHealReqs[id]
  2871  	if !ok {
  2872  		// Request stale, perhaps the peer timed out but came through in the end
  2873  		logger.Warn("Unexpected trienode heal packet")
  2874  		s.lock.Unlock()
  2875  		return nil
  2876  	}
  2877  	delete(s.trienodeHealReqs, id)
  2878  	s.rates.Update(peer.ID(), TrieNodesMsg, time.Since(req.time), len(trienodes))
  2879  
  2880  	// Clean up the request timeout timer, we'll see how to proceed further based
  2881  	// on the actual delivered content
  2882  	if !req.timeout.Stop() {
  2883  		// The timeout is already triggered, and this request will be reverted+rescheduled
  2884  		s.lock.Unlock()
  2885  		return nil
  2886  	}
  2887  
  2888  	// Response is valid, but check if peer is signalling that it does not have
  2889  	// the requested data. For bytecode range queries that means the peer is not
  2890  	// yet synced.
  2891  	if len(trienodes) == 0 {
  2892  		logger.Debug("Peer rejected trienode heal request")
  2893  		s.statelessPeers[peer.ID()] = struct{}{}
  2894  		s.lock.Unlock()
  2895  
  2896  		// Signal this request as failed, and ready for rescheduling
  2897  		s.scheduleRevertTrienodeHealRequest(req)
  2898  		return nil
  2899  	}
  2900  	s.lock.Unlock()
  2901  
  2902  	// Cross reference the requested trienodes with the response to find gaps
  2903  	// that the serving node is missing
  2904  	var (
  2905  		hasher = crypto.NewKeccakState()
  2906  		hash   = make([]byte, 32)
  2907  		nodes  = make([][]byte, len(req.hashes))
  2908  		fills  uint64
  2909  	)
  2910  	for i, j := 0, 0; i < len(trienodes); i++ {
  2911  		// Find the next hash that we've been served, leaving misses with nils
  2912  		hasher.Reset()
  2913  		hasher.Write(trienodes[i])
  2914  		hasher.Read(hash)
  2915  
  2916  		for j < len(req.hashes) && !bytes.Equal(hash, req.hashes[j][:]) {
  2917  			j++
  2918  		}
  2919  		if j < len(req.hashes) {
  2920  			nodes[j] = trienodes[i]
  2921  			fills++
  2922  			j++
  2923  			continue
  2924  		}
  2925  		// We've either ran out of hashes, or got unrequested data
  2926  		logger.Warn("Unexpected healing trienodes", "count", len(trienodes)-i)
  2927  
  2928  		// Signal this request as failed, and ready for rescheduling
  2929  		s.scheduleRevertTrienodeHealRequest(req)
  2930  		return errors.New("unexpected healing trienode")
  2931  	}
  2932  	// Response validated, send it to the scheduler for filling
  2933  	s.trienodeHealPend.Add(fills)
  2934  	defer func() {
  2935  		s.trienodeHealPend.Add(^(fills - 1))
  2936  	}()
  2937  	response := &trienodeHealResponse{
  2938  		paths:  req.paths,
  2939  		task:   req.task,
  2940  		hashes: req.hashes,
  2941  		nodes:  nodes,
  2942  	}
  2943  	select {
  2944  	case req.deliver <- response:
  2945  	case <-req.cancel:
  2946  	case <-req.stale:
  2947  	}
  2948  	return nil
  2949  }
  2950  
  2951  // onHealByteCodes is a callback method to invoke when a batch of contract
  2952  // bytes codes are received from a remote peer in the healing phase.
  2953  func (s *Syncer) onHealByteCodes(peer SyncPeer, id uint64, bytecodes [][]byte) error {
  2954  	var size common.StorageSize
  2955  	for _, code := range bytecodes {
  2956  		size += common.StorageSize(len(code))
  2957  	}
  2958  	logger := peer.Log().New("reqid", id)
  2959  	logger.Trace("Delivering set of healing bytecodes", "bytecodes", len(bytecodes), "bytes", size)
  2960  
  2961  	// Whether or not the response is valid, we can mark the peer as idle and
  2962  	// notify the scheduler to assign a new task. If the response is invalid,
  2963  	// we'll drop the peer in a bit.
  2964  	defer func() {
  2965  		s.lock.Lock()
  2966  		defer s.lock.Unlock()
  2967  		if _, ok := s.peers[peer.ID()]; ok {
  2968  			s.bytecodeHealIdlers[peer.ID()] = struct{}{}
  2969  		}
  2970  		select {
  2971  		case s.update <- struct{}{}:
  2972  		default:
  2973  		}
  2974  	}()
  2975  	s.lock.Lock()
  2976  	// Ensure the response is for a valid request
  2977  	req, ok := s.bytecodeHealReqs[id]
  2978  	if !ok {
  2979  		// Request stale, perhaps the peer timed out but came through in the end
  2980  		logger.Warn("Unexpected bytecode heal packet")
  2981  		s.lock.Unlock()
  2982  		return nil
  2983  	}
  2984  	delete(s.bytecodeHealReqs, id)
  2985  	s.rates.Update(peer.ID(), ByteCodesMsg, time.Since(req.time), len(bytecodes))
  2986  
  2987  	// Clean up the request timeout timer, we'll see how to proceed further based
  2988  	// on the actual delivered content
  2989  	if !req.timeout.Stop() {
  2990  		// The timeout is already triggered, and this request will be reverted+rescheduled
  2991  		s.lock.Unlock()
  2992  		return nil
  2993  	}
  2994  
  2995  	// Response is valid, but check if peer is signalling that it does not have
  2996  	// the requested data. For bytecode range queries that means the peer is not
  2997  	// yet synced.
  2998  	if len(bytecodes) == 0 {
  2999  		logger.Debug("Peer rejected bytecode heal request")
  3000  		s.statelessPeers[peer.ID()] = struct{}{}
  3001  		s.lock.Unlock()
  3002  
  3003  		// Signal this request as failed, and ready for rescheduling
  3004  		s.scheduleRevertBytecodeHealRequest(req)
  3005  		return nil
  3006  	}
  3007  	s.lock.Unlock()
  3008  
  3009  	// Cross reference the requested bytecodes with the response to find gaps
  3010  	// that the serving node is missing
  3011  	hasher := crypto.NewKeccakState()
  3012  	hash := make([]byte, 32)
  3013  
  3014  	codes := make([][]byte, len(req.hashes))
  3015  	for i, j := 0, 0; i < len(bytecodes); i++ {
  3016  		// Find the next hash that we've been served, leaving misses with nils
  3017  		hasher.Reset()
  3018  		hasher.Write(bytecodes[i])
  3019  		hasher.Read(hash)
  3020  
  3021  		for j < len(req.hashes) && !bytes.Equal(hash, req.hashes[j][:]) {
  3022  			j++
  3023  		}
  3024  		if j < len(req.hashes) {
  3025  			codes[j] = bytecodes[i]
  3026  			j++
  3027  			continue
  3028  		}
  3029  		// We've either ran out of hashes, or got unrequested data
  3030  		logger.Warn("Unexpected healing bytecodes", "count", len(bytecodes)-i)
  3031  		// Signal this request as failed, and ready for rescheduling
  3032  		s.scheduleRevertBytecodeHealRequest(req)
  3033  		return errors.New("unexpected healing bytecode")
  3034  	}
  3035  	// Response validated, send it to the scheduler for filling
  3036  	response := &bytecodeHealResponse{
  3037  		task:   req.task,
  3038  		hashes: req.hashes,
  3039  		codes:  codes,
  3040  	}
  3041  	select {
  3042  	case req.deliver <- response:
  3043  	case <-req.cancel:
  3044  	case <-req.stale:
  3045  	}
  3046  	return nil
  3047  }
  3048  
  3049  // onHealState is a callback method to invoke when a flat state(account
  3050  // or storage slot) is downloaded during the healing stage. The flat states
  3051  // can be persisted blindly and can be fixed later in the generation stage.
  3052  // Note it's not concurrent safe, please handle the concurrent issue outside.
  3053  func (s *Syncer) onHealState(paths [][]byte, value []byte) error {
  3054  	if len(paths) == 1 {
  3055  		var account types.StateAccount
  3056  		if err := rlp.DecodeBytes(value, &account); err != nil {
  3057  			return nil // Returning the error here would drop the remote peer
  3058  		}
  3059  		blob := types.SlimAccountRLP(account)
  3060  		rawdb.WriteAccountSnapshot(s.stateWriter, common.BytesToHash(paths[0]), blob)
  3061  		s.accountHealed += 1
  3062  		s.accountHealedBytes += common.StorageSize(1 + common.HashLength + len(blob))
  3063  	}
  3064  	if len(paths) == 2 {
  3065  		rawdb.WriteStorageSnapshot(s.stateWriter, common.BytesToHash(paths[0]), common.BytesToHash(paths[1]), value)
  3066  		s.storageHealed += 1
  3067  		s.storageHealedBytes += common.StorageSize(1 + 2*common.HashLength + len(value))
  3068  	}
  3069  	if s.stateWriter.ValueSize() > ethdb.IdealBatchSize {
  3070  		s.stateWriter.Write() // It's fine to ignore the error here
  3071  		s.stateWriter.Reset()
  3072  	}
  3073  	return nil
  3074  }
  3075  
  3076  // hashSpace is the total size of the 256 bit hash space for accounts.
  3077  var hashSpace = new(big.Int).Exp(common.Big2, common.Big256, nil)
  3078  
  3079  // report calculates various status reports and provides it to the user.
  3080  func (s *Syncer) report(force bool) {
  3081  	if len(s.tasks) > 0 {
  3082  		s.reportSyncProgress(force)
  3083  		return
  3084  	}
  3085  	s.reportHealProgress(force)
  3086  }
  3087  
  3088  // reportSyncProgress calculates various status reports and provides it to the user.
  3089  func (s *Syncer) reportSyncProgress(force bool) {
  3090  	// Don't report all the events, just occasionally
  3091  	if !force && time.Since(s.logTime) < 8*time.Second {
  3092  		return
  3093  	}
  3094  	// Don't report anything until we have a meaningful progress
  3095  	synced := s.accountBytes + s.bytecodeBytes + s.storageBytes
  3096  	if synced == 0 {
  3097  		return
  3098  	}
  3099  	accountGaps := new(big.Int)
  3100  	for _, task := range s.tasks {
  3101  		accountGaps.Add(accountGaps, new(big.Int).Sub(task.Last.Big(), task.Next.Big()))
  3102  	}
  3103  	accountFills := new(big.Int).Sub(hashSpace, accountGaps)
  3104  	if accountFills.BitLen() == 0 {
  3105  		return
  3106  	}
  3107  	s.logTime = time.Now()
  3108  	estBytes := float64(new(big.Int).Div(
  3109  		new(big.Int).Mul(new(big.Int).SetUint64(uint64(synced)), hashSpace),
  3110  		accountFills,
  3111  	).Uint64())
  3112  	// Don't report anything until we have a meaningful progress
  3113  	if estBytes < 1.0 {
  3114  		return
  3115  	}
  3116  	elapsed := time.Since(s.startTime)
  3117  	estTime := elapsed / time.Duration(synced) * time.Duration(estBytes)
  3118  
  3119  	// Create a mega progress report
  3120  	var (
  3121  		progress = fmt.Sprintf("%.2f%%", float64(synced)*100/estBytes)
  3122  		accounts = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.accountSynced), s.accountBytes.TerminalString())
  3123  		storage  = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.storageSynced), s.storageBytes.TerminalString())
  3124  		bytecode = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.bytecodeSynced), s.bytecodeBytes.TerminalString())
  3125  	)
  3126  	log.Info("Syncing: state download in progress", "synced", progress, "state", synced,
  3127  		"accounts", accounts, "slots", storage, "codes", bytecode, "eta", common.PrettyDuration(estTime-elapsed))
  3128  }
  3129  
  3130  // reportHealProgress calculates various status reports and provides it to the user.
  3131  func (s *Syncer) reportHealProgress(force bool) {
  3132  	// Don't report all the events, just occasionally
  3133  	if !force && time.Since(s.logTime) < 8*time.Second {
  3134  		return
  3135  	}
  3136  	s.logTime = time.Now()
  3137  
  3138  	// Create a mega progress report
  3139  	var (
  3140  		trienode = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.trienodeHealSynced), s.trienodeHealBytes.TerminalString())
  3141  		bytecode = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.bytecodeHealSynced), s.bytecodeHealBytes.TerminalString())
  3142  		accounts = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.accountHealed), s.accountHealedBytes.TerminalString())
  3143  		storage  = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.storageHealed), s.storageHealedBytes.TerminalString())
  3144  	)
  3145  	log.Info("Syncing: state healing in progress", "accounts", accounts, "slots", storage,
  3146  		"codes", bytecode, "nodes", trienode, "pending", s.healer.scheduler.Pending())
  3147  }
  3148  
  3149  // estimateRemainingSlots tries to determine roughly how many slots are left in
  3150  // a contract storage, based on the number of keys and the last hash. This method
  3151  // assumes that the hashes are lexicographically ordered and evenly distributed.
  3152  func estimateRemainingSlots(hashes int, last common.Hash) (uint64, error) {
  3153  	if last == (common.Hash{}) {
  3154  		return 0, errors.New("last hash empty")
  3155  	}
  3156  	space := new(big.Int).Mul(math.MaxBig256, big.NewInt(int64(hashes)))
  3157  	space.Div(space, last.Big())
  3158  	if !space.IsUint64() {
  3159  		// Gigantic address space probably due to too few or malicious slots
  3160  		return 0, errors.New("too few slots for estimation")
  3161  	}
  3162  	return space.Uint64() - uint64(hashes), nil
  3163  }
  3164  
  3165  // capacitySort implements the Sort interface, allowing sorting by peer message
  3166  // throughput. Note, callers should use sort.Reverse to get the desired effect
  3167  // of highest capacity being at the front.
  3168  type capacitySort struct {
  3169  	ids  []string
  3170  	caps []int
  3171  }
  3172  
  3173  func (s *capacitySort) Len() int {
  3174  	return len(s.ids)
  3175  }
  3176  
  3177  func (s *capacitySort) Less(i, j int) bool {
  3178  	return s.caps[i] < s.caps[j]
  3179  }
  3180  
  3181  func (s *capacitySort) Swap(i, j int) {
  3182  	s.ids[i], s.ids[j] = s.ids[j], s.ids[i]
  3183  	s.caps[i], s.caps[j] = s.caps[j], s.caps[i]
  3184  }
  3185  
  3186  // healRequestSort implements the Sort interface, allowing sorting trienode
  3187  // heal requests, which is a prerequisite for merging storage-requests.
  3188  type healRequestSort struct {
  3189  	paths     []string
  3190  	hashes    []common.Hash
  3191  	syncPaths []trie.SyncPath
  3192  }
  3193  
  3194  func (t *healRequestSort) Len() int {
  3195  	return len(t.hashes)
  3196  }
  3197  
  3198  func (t *healRequestSort) Less(i, j int) bool {
  3199  	a := t.syncPaths[i]
  3200  	b := t.syncPaths[j]
  3201  	switch bytes.Compare(a[0], b[0]) {
  3202  	case -1:
  3203  		return true
  3204  	case 1:
  3205  		return false
  3206  	}
  3207  	// identical first part
  3208  	if len(a) < len(b) {
  3209  		return true
  3210  	}
  3211  	if len(b) < len(a) {
  3212  		return false
  3213  	}
  3214  	if len(a) == 2 {
  3215  		return bytes.Compare(a[1], b[1]) < 0
  3216  	}
  3217  	return false
  3218  }
  3219  
  3220  func (t *healRequestSort) Swap(i, j int) {
  3221  	t.paths[i], t.paths[j] = t.paths[j], t.paths[i]
  3222  	t.hashes[i], t.hashes[j] = t.hashes[j], t.hashes[i]
  3223  	t.syncPaths[i], t.syncPaths[j] = t.syncPaths[j], t.syncPaths[i]
  3224  }
  3225  
  3226  // Merge merges the pathsets, so that several storage requests concerning the
  3227  // same account are merged into one, to reduce bandwidth.
  3228  // OBS: This operation is moot if t has not first been sorted.
  3229  func (t *healRequestSort) Merge() []TrieNodePathSet {
  3230  	var result []TrieNodePathSet
  3231  	for _, path := range t.syncPaths {
  3232  		pathset := TrieNodePathSet(path)
  3233  		if len(path) == 1 {
  3234  			// It's an account reference.
  3235  			result = append(result, pathset)
  3236  		} else {
  3237  			// It's a storage reference.
  3238  			end := len(result) - 1
  3239  			if len(result) == 0 || !bytes.Equal(pathset[0], result[end][0]) {
  3240  				// The account doesn't match last, create a new entry.
  3241  				result = append(result, pathset)
  3242  			} else {
  3243  				// It's the same account as the previous one, add to the storage
  3244  				// paths of that request.
  3245  				result[end] = append(result[end], pathset[1])
  3246  			}
  3247  		}
  3248  	}
  3249  	return result
  3250  }
  3251  
  3252  // sortByAccountPath takes hashes and paths, and sorts them. After that, it generates
  3253  // the TrieNodePaths and merges paths which belongs to the same account path.
  3254  func sortByAccountPath(paths []string, hashes []common.Hash) ([]string, []common.Hash, []trie.SyncPath, []TrieNodePathSet) {
  3255  	syncPaths := make([]trie.SyncPath, len(paths))
  3256  	for i, path := range paths {
  3257  		syncPaths[i] = trie.NewSyncPath([]byte(path))
  3258  	}
  3259  	n := &healRequestSort{paths, hashes, syncPaths}
  3260  	sort.Sort(n)
  3261  	pathsets := n.Merge()
  3262  	return n.paths, n.hashes, n.syncPaths, pathsets
  3263  }