github.com/cryptotooltop/go-ethereum@v0.0.0-20231103184714-151d1922f3e5/eth/protocols/snap/sync.go (about)

     1  // Copyright 2020 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package snap
    18  
    19  import (
    20  	"bytes"
    21  	"encoding/json"
    22  	"errors"
    23  	"fmt"
    24  	"math/big"
    25  	"math/rand"
    26  	"sort"
    27  	"sync"
    28  	"time"
    29  
    30  	"golang.org/x/crypto/sha3"
    31  
    32  	"github.com/scroll-tech/go-ethereum/common"
    33  	"github.com/scroll-tech/go-ethereum/common/math"
    34  	"github.com/scroll-tech/go-ethereum/core/rawdb"
    35  	"github.com/scroll-tech/go-ethereum/core/state"
    36  	"github.com/scroll-tech/go-ethereum/core/state/snapshot"
    37  	"github.com/scroll-tech/go-ethereum/core/types"
    38  	"github.com/scroll-tech/go-ethereum/crypto"
    39  	"github.com/scroll-tech/go-ethereum/crypto/codehash"
    40  	"github.com/scroll-tech/go-ethereum/ethdb"
    41  	"github.com/scroll-tech/go-ethereum/event"
    42  	"github.com/scroll-tech/go-ethereum/light"
    43  	"github.com/scroll-tech/go-ethereum/log"
    44  	"github.com/scroll-tech/go-ethereum/p2p/msgrate"
    45  	"github.com/scroll-tech/go-ethereum/rlp"
    46  	"github.com/scroll-tech/go-ethereum/trie"
    47  )
    48  
    49  var (
    50  	// emptyRoot is the known root hash of an empty trie.
    51  	emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
    52  
    53  	// emptyKeccakCodeHash is the known keccak hash of the empty EVM bytecode.
    54  	emptyKeccakCodeHash = codehash.EmptyKeccakCodeHash
    55  
    56  	// emptyPoseidonCodeHash is the known poseidon hash of the empty EVM bytecode.
    57  	emptyPoseidonCodeHash = codehash.EmptyPoseidonCodeHash
    58  )
    59  
    60  const (
    61  	// minRequestSize is the minimum number of bytes to request from a remote peer.
    62  	// This number is used as the low cap for account and storage range requests.
    63  	// Bytecode and trienode are limited inherently by item count (1).
    64  	minRequestSize = 64 * 1024
    65  
    66  	// maxRequestSize is the maximum number of bytes to request from a remote peer.
    67  	// This number is used as the high cap for account and storage range requests.
    68  	// Bytecode and trienode are limited more explicitly by the caps below.
    69  	maxRequestSize = 512 * 1024
    70  
    71  	// maxCodeRequestCount is the maximum number of bytecode blobs to request in a
    72  	// single query. If this number is too low, we're not filling responses fully
    73  	// and waste round trip times. If it's too high, we're capping responses and
    74  	// waste bandwidth.
    75  	//
    76  	// Depoyed bytecodes are currently capped at 24KB, so the minimum request
    77  	// size should be maxRequestSize / 24K. Assuming that most contracts do not
    78  	// come close to that, requesting 4x should be a good approximation.
    79  	maxCodeRequestCount = maxRequestSize / (24 * 1024) * 4
    80  
    81  	// maxTrieRequestCount is the maximum number of trie node blobs to request in
    82  	// a single query. If this number is too low, we're not filling responses fully
    83  	// and waste round trip times. If it's too high, we're capping responses and
    84  	// waste bandwidth.
    85  	maxTrieRequestCount = maxRequestSize / 512
    86  )
    87  
    88  var (
    89  	// accountConcurrency is the number of chunks to split the account trie into
    90  	// to allow concurrent retrievals.
    91  	accountConcurrency = 16
    92  
    93  	// storageConcurrency is the number of chunks to split the a large contract
    94  	// storage trie into to allow concurrent retrievals.
    95  	storageConcurrency = 16
    96  )
    97  
    98  // ErrCancelled is returned from snap syncing if the operation was prematurely
    99  // terminated.
   100  var ErrCancelled = errors.New("sync cancelled")
   101  
   102  // accountRequest tracks a pending account range request to ensure responses are
   103  // to actual requests and to validate any security constraints.
   104  //
   105  // Concurrency note: account requests and responses are handled concurrently from
   106  // the main runloop to allow Merkle proof verifications on the peer's thread and
   107  // to drop on invalid response. The request struct must contain all the data to
   108  // construct the response without accessing runloop internals (i.e. task). That
   109  // is only included to allow the runloop to match a response to the task being
   110  // synced without having yet another set of maps.
   111  type accountRequest struct {
   112  	peer string    // Peer to which this request is assigned
   113  	id   uint64    // Request ID of this request
   114  	time time.Time // Timestamp when the request was sent
   115  
   116  	deliver chan *accountResponse // Channel to deliver successful response on
   117  	revert  chan *accountRequest  // Channel to deliver request failure on
   118  	cancel  chan struct{}         // Channel to track sync cancellation
   119  	timeout *time.Timer           // Timer to track delivery timeout
   120  	stale   chan struct{}         // Channel to signal the request was dropped
   121  
   122  	origin common.Hash // First account requested to allow continuation checks
   123  	limit  common.Hash // Last account requested to allow non-overlapping chunking
   124  
   125  	task *accountTask // Task which this request is filling (only access fields through the runloop!!)
   126  }
   127  
   128  // accountResponse is an already Merkle-verified remote response to an account
   129  // range request. It contains the subtrie for the requested account range and
   130  // the database that's going to be filled with the internal nodes on commit.
   131  type accountResponse struct {
   132  	task *accountTask // Task which this request is filling
   133  
   134  	hashes   []common.Hash         // Account hashes in the returned range
   135  	accounts []*types.StateAccount // Expanded accounts in the returned range
   136  
   137  	cont bool // Whether the account range has a continuation
   138  }
   139  
   140  // bytecodeRequest tracks a pending bytecode request to ensure responses are to
   141  // actual requests and to validate any security constraints.
   142  //
   143  // Concurrency note: bytecode requests and responses are handled concurrently from
   144  // the main runloop to allow Keccak256 hash verifications on the peer's thread and
   145  // to drop on invalid response. The request struct must contain all the data to
   146  // construct the response without accessing runloop internals (i.e. task). That
   147  // is only included to allow the runloop to match a response to the task being
   148  // synced without having yet another set of maps.
   149  type bytecodeRequest struct {
   150  	peer string    // Peer to which this request is assigned
   151  	id   uint64    // Request ID of this request
   152  	time time.Time // Timestamp when the request was sent
   153  
   154  	deliver chan *bytecodeResponse // Channel to deliver successful response on
   155  	revert  chan *bytecodeRequest  // Channel to deliver request failure on
   156  	cancel  chan struct{}          // Channel to track sync cancellation
   157  	timeout *time.Timer            // Timer to track delivery timeout
   158  	stale   chan struct{}          // Channel to signal the request was dropped
   159  
   160  	hashes []common.Hash // Bytecode hashes to validate responses
   161  	task   *accountTask  // Task which this request is filling (only access fields through the runloop!!)
   162  }
   163  
   164  // bytecodeResponse is an already verified remote response to a bytecode request.
   165  type bytecodeResponse struct {
   166  	task *accountTask // Task which this request is filling
   167  
   168  	hashes []common.Hash // Hashes of the bytecode to avoid double hashing
   169  	codes  [][]byte      // Actual bytecodes to store into the database (nil = missing)
   170  }
   171  
   172  // storageRequest tracks a pending storage ranges request to ensure responses are
   173  // to actual requests and to validate any security constraints.
   174  //
   175  // Concurrency note: storage requests and responses are handled concurrently from
   176  // the main runloop to allow Merkel proof verifications on the peer's thread and
   177  // to drop on invalid response. The request struct must contain all the data to
   178  // construct the response without accessing runloop internals (i.e. tasks). That
   179  // is only included to allow the runloop to match a response to the task being
   180  // synced without having yet another set of maps.
   181  type storageRequest struct {
   182  	peer string    // Peer to which this request is assigned
   183  	id   uint64    // Request ID of this request
   184  	time time.Time // Timestamp when the request was sent
   185  
   186  	deliver chan *storageResponse // Channel to deliver successful response on
   187  	revert  chan *storageRequest  // Channel to deliver request failure on
   188  	cancel  chan struct{}         // Channel to track sync cancellation
   189  	timeout *time.Timer           // Timer to track delivery timeout
   190  	stale   chan struct{}         // Channel to signal the request was dropped
   191  
   192  	accounts []common.Hash // Account hashes to validate responses
   193  	roots    []common.Hash // Storage roots to validate responses
   194  
   195  	origin common.Hash // First storage slot requested to allow continuation checks
   196  	limit  common.Hash // Last storage slot requested to allow non-overlapping chunking
   197  
   198  	mainTask *accountTask // Task which this response belongs to (only access fields through the runloop!!)
   199  	subTask  *storageTask // Task which this response is filling (only access fields through the runloop!!)
   200  }
   201  
   202  // storageResponse is an already Merkle-verified remote response to a storage
   203  // range request. It contains the subtries for the requested storage ranges and
   204  // the databases that's going to be filled with the internal nodes on commit.
   205  type storageResponse struct {
   206  	mainTask *accountTask // Task which this response belongs to
   207  	subTask  *storageTask // Task which this response is filling
   208  
   209  	accounts []common.Hash // Account hashes requested, may be only partially filled
   210  	roots    []common.Hash // Storage roots requested, may be only partially filled
   211  
   212  	hashes [][]common.Hash // Storage slot hashes in the returned range
   213  	slots  [][][]byte      // Storage slot values in the returned range
   214  
   215  	cont bool // Whether the last storage range has a continuation
   216  }
   217  
   218  // trienodeHealRequest tracks a pending state trie request to ensure responses
   219  // are to actual requests and to validate any security constraints.
   220  //
   221  // Concurrency note: trie node requests and responses are handled concurrently from
   222  // the main runloop to allow Keccak256 hash verifications on the peer's thread and
   223  // to drop on invalid response. The request struct must contain all the data to
   224  // construct the response without accessing runloop internals (i.e. task). That
   225  // is only included to allow the runloop to match a response to the task being
   226  // synced without having yet another set of maps.
   227  type trienodeHealRequest struct {
   228  	peer string    // Peer to which this request is assigned
   229  	id   uint64    // Request ID of this request
   230  	time time.Time // Timestamp when the request was sent
   231  
   232  	deliver chan *trienodeHealResponse // Channel to deliver successful response on
   233  	revert  chan *trienodeHealRequest  // Channel to deliver request failure on
   234  	cancel  chan struct{}              // Channel to track sync cancellation
   235  	timeout *time.Timer                // Timer to track delivery timeout
   236  	stale   chan struct{}              // Channel to signal the request was dropped
   237  
   238  	hashes []common.Hash   // Trie node hashes to validate responses
   239  	paths  []trie.SyncPath // Trie node paths requested for rescheduling
   240  
   241  	task *healTask // Task which this request is filling (only access fields through the runloop!!)
   242  }
   243  
   244  // trienodeHealResponse is an already verified remote response to a trie node request.
   245  type trienodeHealResponse struct {
   246  	task *healTask // Task which this request is filling
   247  
   248  	hashes []common.Hash   // Hashes of the trie nodes to avoid double hashing
   249  	paths  []trie.SyncPath // Trie node paths requested for rescheduling missing ones
   250  	nodes  [][]byte        // Actual trie nodes to store into the database (nil = missing)
   251  }
   252  
   253  // bytecodeHealRequest tracks a pending bytecode request to ensure responses are to
   254  // actual requests and to validate any security constraints.
   255  //
   256  // Concurrency note: bytecode requests and responses are handled concurrently from
   257  // the main runloop to allow Keccak256 hash verifications on the peer's thread and
   258  // to drop on invalid response. The request struct must contain all the data to
   259  // construct the response without accessing runloop internals (i.e. task). That
   260  // is only included to allow the runloop to match a response to the task being
   261  // synced without having yet another set of maps.
   262  type bytecodeHealRequest struct {
   263  	peer string    // Peer to which this request is assigned
   264  	id   uint64    // Request ID of this request
   265  	time time.Time // Timestamp when the request was sent
   266  
   267  	deliver chan *bytecodeHealResponse // Channel to deliver successful response on
   268  	revert  chan *bytecodeHealRequest  // Channel to deliver request failure on
   269  	cancel  chan struct{}              // Channel to track sync cancellation
   270  	timeout *time.Timer                // Timer to track delivery timeout
   271  	stale   chan struct{}              // Channel to signal the request was dropped
   272  
   273  	hashes []common.Hash // Bytecode hashes to validate responses
   274  	task   *healTask     // Task which this request is filling (only access fields through the runloop!!)
   275  }
   276  
   277  // bytecodeHealResponse is an already verified remote response to a bytecode request.
   278  type bytecodeHealResponse struct {
   279  	task *healTask // Task which this request is filling
   280  
   281  	hashes []common.Hash // Hashes of the bytecode to avoid double hashing
   282  	codes  [][]byte      // Actual bytecodes to store into the database (nil = missing)
   283  }
   284  
   285  // accountTask represents the sync task for a chunk of the account snapshot.
   286  type accountTask struct {
   287  	// These fields get serialized to leveldb on shutdown
   288  	Next     common.Hash                    // Next account to sync in this interval
   289  	Last     common.Hash                    // Last account to sync in this interval
   290  	SubTasks map[common.Hash][]*storageTask // Storage intervals needing fetching for large contracts
   291  
   292  	// These fields are internals used during runtime
   293  	req  *accountRequest  // Pending request to fill this task
   294  	res  *accountResponse // Validate response filling this task
   295  	pend int              // Number of pending subtasks for this round
   296  
   297  	needCode  []bool // Flags whether the filling accounts need code retrieval
   298  	needState []bool // Flags whether the filling accounts need storage retrieval
   299  	needHeal  []bool // Flags whether the filling accounts's state was chunked and need healing
   300  
   301  	codeTasks  map[common.Hash]struct{}    // Code hashes that need retrieval
   302  	stateTasks map[common.Hash]common.Hash // Account hashes->roots that need full state retrieval
   303  
   304  	genBatch ethdb.Batch     // Batch used by the node generator
   305  	genTrie  *trie.StackTrie // Node generator from storage slots
   306  
   307  	done bool // Flag whether the task can be removed
   308  }
   309  
   310  // storageTask represents the sync task for a chunk of the storage snapshot.
   311  type storageTask struct {
   312  	Next common.Hash // Next account to sync in this interval
   313  	Last common.Hash // Last account to sync in this interval
   314  
   315  	// These fields are internals used during runtime
   316  	root common.Hash     // Storage root hash for this instance
   317  	req  *storageRequest // Pending request to fill this task
   318  
   319  	genBatch ethdb.Batch     // Batch used by the node generator
   320  	genTrie  *trie.StackTrie // Node generator from storage slots
   321  
   322  	done bool // Flag whether the task can be removed
   323  }
   324  
   325  // healTask represents the sync task for healing the snap-synced chunk boundaries.
   326  type healTask struct {
   327  	scheduler *trie.Sync // State trie sync scheduler defining the tasks
   328  
   329  	trieTasks map[common.Hash]trie.SyncPath // Set of trie node tasks currently queued for retrieval
   330  	codeTasks map[common.Hash]struct{}      // Set of byte code tasks currently queued for retrieval
   331  }
   332  
   333  // syncProgress is a database entry to allow suspending and resuming a snapshot state
   334  // sync. Opposed to full and fast sync, there is no way to restart a suspended
   335  // snap sync without prior knowledge of the suspension point.
   336  type syncProgress struct {
   337  	Tasks []*accountTask // The suspended account tasks (contract tasks within)
   338  
   339  	// Status report during syncing phase
   340  	AccountSynced  uint64             // Number of accounts downloaded
   341  	AccountBytes   common.StorageSize // Number of account trie bytes persisted to disk
   342  	BytecodeSynced uint64             // Number of bytecodes downloaded
   343  	BytecodeBytes  common.StorageSize // Number of bytecode bytes downloaded
   344  	StorageSynced  uint64             // Number of storage slots downloaded
   345  	StorageBytes   common.StorageSize // Number of storage trie bytes persisted to disk
   346  
   347  	// Status report during healing phase
   348  	TrienodeHealSynced uint64             // Number of state trie nodes downloaded
   349  	TrienodeHealBytes  common.StorageSize // Number of state trie bytes persisted to disk
   350  	TrienodeHealDups   uint64             // Number of state trie nodes already processed
   351  	TrienodeHealNops   uint64             // Number of state trie nodes not requested
   352  	BytecodeHealSynced uint64             // Number of bytecodes downloaded
   353  	BytecodeHealBytes  common.StorageSize // Number of bytecodes persisted to disk
   354  	BytecodeHealDups   uint64             // Number of bytecodes already processed
   355  	BytecodeHealNops   uint64             // Number of bytecodes not requested
   356  }
   357  
   358  // SyncPeer abstracts out the methods required for a peer to be synced against
   359  // with the goal of allowing the construction of mock peers without the full
   360  // blown networking.
   361  type SyncPeer interface {
   362  	// ID retrieves the peer's unique identifier.
   363  	ID() string
   364  
   365  	// RequestAccountRange fetches a batch of accounts rooted in a specific account
   366  	// trie, starting with the origin.
   367  	RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error
   368  
   369  	// RequestStorageRanges fetches a batch of storage slots belonging to one or
   370  	// more accounts. If slots from only one accout is requested, an origin marker
   371  	// may also be used to retrieve from there.
   372  	RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error
   373  
   374  	// RequestByteCodes fetches a batch of bytecodes by hash.
   375  	RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error
   376  
   377  	// RequestTrieNodes fetches a batch of account or storage trie nodes rooted in
   378  	// a specificstate trie.
   379  	RequestTrieNodes(id uint64, root common.Hash, paths []TrieNodePathSet, bytes uint64) error
   380  
   381  	// Log retrieves the peer's own contextual logger.
   382  	Log() log.Logger
   383  }
   384  
   385  // Syncer is an Ethereum account and storage trie syncer based on snapshots and
   386  // the  snap protocol. It's purpose is to download all the accounts and storage
   387  // slots from remote peers and reassemble chunks of the state trie, on top of
   388  // which a state sync can be run to fix any gaps / overlaps.
   389  //
   390  // Every network request has a variety of failure events:
   391  //   - The peer disconnects after task assignment, failing to send the request
   392  //   - The peer disconnects after sending the request, before delivering on it
   393  //   - The peer remains connected, but does not deliver a response in time
   394  //   - The peer delivers a stale response after a previous timeout
   395  //   - The peer delivers a refusal to serve the requested state
   396  type Syncer struct {
   397  	db ethdb.KeyValueStore // Database to store the trie nodes into (and dedup)
   398  
   399  	root    common.Hash    // Current state trie root being synced
   400  	tasks   []*accountTask // Current account task set being synced
   401  	snapped bool           // Flag to signal that snap phase is done
   402  	healer  *healTask      // Current state healing task being executed
   403  	update  chan struct{}  // Notification channel for possible sync progression
   404  
   405  	peers    map[string]SyncPeer // Currently active peers to download from
   406  	peerJoin *event.Feed         // Event feed to react to peers joining
   407  	peerDrop *event.Feed         // Event feed to react to peers dropping
   408  	rates    *msgrate.Trackers   // Message throughput rates for peers
   409  
   410  	// Request tracking during syncing phase
   411  	statelessPeers map[string]struct{} // Peers that failed to deliver state data
   412  	accountIdlers  map[string]struct{} // Peers that aren't serving account requests
   413  	bytecodeIdlers map[string]struct{} // Peers that aren't serving bytecode requests
   414  	storageIdlers  map[string]struct{} // Peers that aren't serving storage requests
   415  
   416  	accountReqs  map[uint64]*accountRequest  // Account requests currently running
   417  	bytecodeReqs map[uint64]*bytecodeRequest // Bytecode requests currently running
   418  	storageReqs  map[uint64]*storageRequest  // Storage requests currently running
   419  
   420  	accountSynced  uint64             // Number of accounts downloaded
   421  	accountBytes   common.StorageSize // Number of account trie bytes persisted to disk
   422  	bytecodeSynced uint64             // Number of bytecodes downloaded
   423  	bytecodeBytes  common.StorageSize // Number of bytecode bytes downloaded
   424  	storageSynced  uint64             // Number of storage slots downloaded
   425  	storageBytes   common.StorageSize // Number of storage trie bytes persisted to disk
   426  
   427  	// Request tracking during healing phase
   428  	trienodeHealIdlers map[string]struct{} // Peers that aren't serving trie node requests
   429  	bytecodeHealIdlers map[string]struct{} // Peers that aren't serving bytecode requests
   430  
   431  	trienodeHealReqs map[uint64]*trienodeHealRequest // Trie node requests currently running
   432  	bytecodeHealReqs map[uint64]*bytecodeHealRequest // Bytecode requests currently running
   433  
   434  	trienodeHealSynced uint64             // Number of state trie nodes downloaded
   435  	trienodeHealBytes  common.StorageSize // Number of state trie bytes persisted to disk
   436  	trienodeHealDups   uint64             // Number of state trie nodes already processed
   437  	trienodeHealNops   uint64             // Number of state trie nodes not requested
   438  	bytecodeHealSynced uint64             // Number of bytecodes downloaded
   439  	bytecodeHealBytes  common.StorageSize // Number of bytecodes persisted to disk
   440  	bytecodeHealDups   uint64             // Number of bytecodes already processed
   441  	bytecodeHealNops   uint64             // Number of bytecodes not requested
   442  
   443  	stateWriter        ethdb.Batch        // Shared batch writer used for persisting raw states
   444  	accountHealed      uint64             // Number of accounts downloaded during the healing stage
   445  	accountHealedBytes common.StorageSize // Number of raw account bytes persisted to disk during the healing stage
   446  	storageHealed      uint64             // Number of storage slots downloaded during the healing stage
   447  	storageHealedBytes common.StorageSize // Number of raw storage bytes persisted to disk during the healing stage
   448  
   449  	startTime time.Time // Time instance when snapshot sync started
   450  	logTime   time.Time // Time instance when status was last reported
   451  
   452  	pend sync.WaitGroup // Tracks network request goroutines for graceful shutdown
   453  	lock sync.RWMutex   // Protects fields that can change outside of sync (peers, reqs, root)
   454  }
   455  
   456  // NewSyncer creates a new snapshot syncer to download the Ethereum state over the
   457  // snap protocol.
   458  func NewSyncer(db ethdb.KeyValueStore) *Syncer {
   459  	return &Syncer{
   460  		db: db,
   461  
   462  		peers:    make(map[string]SyncPeer),
   463  		peerJoin: new(event.Feed),
   464  		peerDrop: new(event.Feed),
   465  		rates:    msgrate.NewTrackers(log.New("proto", "snap")),
   466  		update:   make(chan struct{}, 1),
   467  
   468  		accountIdlers:  make(map[string]struct{}),
   469  		storageIdlers:  make(map[string]struct{}),
   470  		bytecodeIdlers: make(map[string]struct{}),
   471  
   472  		accountReqs:  make(map[uint64]*accountRequest),
   473  		storageReqs:  make(map[uint64]*storageRequest),
   474  		bytecodeReqs: make(map[uint64]*bytecodeRequest),
   475  
   476  		trienodeHealIdlers: make(map[string]struct{}),
   477  		bytecodeHealIdlers: make(map[string]struct{}),
   478  
   479  		trienodeHealReqs: make(map[uint64]*trienodeHealRequest),
   480  		bytecodeHealReqs: make(map[uint64]*bytecodeHealRequest),
   481  		stateWriter:      db.NewBatch(),
   482  	}
   483  }
   484  
   485  // Register injects a new data source into the syncer's peerset.
   486  func (s *Syncer) Register(peer SyncPeer) error {
   487  	// Make sure the peer is not registered yet
   488  	id := peer.ID()
   489  
   490  	s.lock.Lock()
   491  	if _, ok := s.peers[id]; ok {
   492  		log.Error("Snap peer already registered", "id", id)
   493  
   494  		s.lock.Unlock()
   495  		return errors.New("already registered")
   496  	}
   497  	s.peers[id] = peer
   498  	s.rates.Track(id, msgrate.NewTracker(s.rates.MeanCapacities(), s.rates.MedianRoundTrip()))
   499  
   500  	// Mark the peer as idle, even if no sync is running
   501  	s.accountIdlers[id] = struct{}{}
   502  	s.storageIdlers[id] = struct{}{}
   503  	s.bytecodeIdlers[id] = struct{}{}
   504  	s.trienodeHealIdlers[id] = struct{}{}
   505  	s.bytecodeHealIdlers[id] = struct{}{}
   506  	s.lock.Unlock()
   507  
   508  	// Notify any active syncs that a new peer can be assigned data
   509  	s.peerJoin.Send(id)
   510  	return nil
   511  }
   512  
   513  // Unregister injects a new data source into the syncer's peerset.
   514  func (s *Syncer) Unregister(id string) error {
   515  	// Remove all traces of the peer from the registry
   516  	s.lock.Lock()
   517  	if _, ok := s.peers[id]; !ok {
   518  		log.Error("Snap peer not registered", "id", id)
   519  
   520  		s.lock.Unlock()
   521  		return errors.New("not registered")
   522  	}
   523  	delete(s.peers, id)
   524  	s.rates.Untrack(id)
   525  
   526  	// Remove status markers, even if no sync is running
   527  	delete(s.statelessPeers, id)
   528  
   529  	delete(s.accountIdlers, id)
   530  	delete(s.storageIdlers, id)
   531  	delete(s.bytecodeIdlers, id)
   532  	delete(s.trienodeHealIdlers, id)
   533  	delete(s.bytecodeHealIdlers, id)
   534  	s.lock.Unlock()
   535  
   536  	// Notify any active syncs that pending requests need to be reverted
   537  	s.peerDrop.Send(id)
   538  	return nil
   539  }
   540  
   541  // Sync starts (or resumes a previous) sync cycle to iterate over an state trie
   542  // with the given root and reconstruct the nodes based on the snapshot leaves.
   543  // Previously downloaded segments will not be redownloaded of fixed, rather any
   544  // errors will be healed after the leaves are fully accumulated.
   545  func (s *Syncer) Sync(root common.Hash, cancel chan struct{}) error {
   546  	// Move the trie root from any previous value, revert stateless markers for
   547  	// any peers and initialize the syncer if it was not yet run
   548  	s.lock.Lock()
   549  	s.root = root
   550  	s.healer = &healTask{
   551  		scheduler: state.NewStateSync(root, s.db, nil, s.onHealState),
   552  		trieTasks: make(map[common.Hash]trie.SyncPath),
   553  		codeTasks: make(map[common.Hash]struct{}),
   554  	}
   555  	s.statelessPeers = make(map[string]struct{})
   556  	s.lock.Unlock()
   557  
   558  	if s.startTime == (time.Time{}) {
   559  		s.startTime = time.Now()
   560  	}
   561  	// Retrieve the previous sync status from LevelDB and abort if already synced
   562  	s.loadSyncStatus()
   563  	if len(s.tasks) == 0 && s.healer.scheduler.Pending() == 0 {
   564  		log.Debug("Snapshot sync already completed")
   565  		return nil
   566  	}
   567  	defer func() { // Persist any progress, independent of failure
   568  		for _, task := range s.tasks {
   569  			s.forwardAccountTask(task)
   570  		}
   571  		s.cleanAccountTasks()
   572  		s.saveSyncStatus()
   573  	}()
   574  
   575  	log.Debug("Starting snapshot sync cycle", "root", root)
   576  
   577  	// Flush out the last committed raw states
   578  	defer func() {
   579  		if s.stateWriter.ValueSize() > 0 {
   580  			s.stateWriter.Write()
   581  			s.stateWriter.Reset()
   582  		}
   583  	}()
   584  	defer s.report(true)
   585  
   586  	// Whether sync completed or not, disregard any future packets
   587  	defer func() {
   588  		log.Debug("Terminating snapshot sync cycle", "root", root)
   589  		s.lock.Lock()
   590  		s.accountReqs = make(map[uint64]*accountRequest)
   591  		s.storageReqs = make(map[uint64]*storageRequest)
   592  		s.bytecodeReqs = make(map[uint64]*bytecodeRequest)
   593  		s.trienodeHealReqs = make(map[uint64]*trienodeHealRequest)
   594  		s.bytecodeHealReqs = make(map[uint64]*bytecodeHealRequest)
   595  		s.lock.Unlock()
   596  	}()
   597  	// Keep scheduling sync tasks
   598  	peerJoin := make(chan string, 16)
   599  	peerJoinSub := s.peerJoin.Subscribe(peerJoin)
   600  	defer peerJoinSub.Unsubscribe()
   601  
   602  	peerDrop := make(chan string, 16)
   603  	peerDropSub := s.peerDrop.Subscribe(peerDrop)
   604  	defer peerDropSub.Unsubscribe()
   605  
   606  	// Create a set of unique channels for this sync cycle. We need these to be
   607  	// ephemeral so a data race doesn't accidentally deliver something stale on
   608  	// a persistent channel across syncs (yup, this happened)
   609  	var (
   610  		accountReqFails      = make(chan *accountRequest)
   611  		storageReqFails      = make(chan *storageRequest)
   612  		bytecodeReqFails     = make(chan *bytecodeRequest)
   613  		accountResps         = make(chan *accountResponse)
   614  		storageResps         = make(chan *storageResponse)
   615  		bytecodeResps        = make(chan *bytecodeResponse)
   616  		trienodeHealReqFails = make(chan *trienodeHealRequest)
   617  		bytecodeHealReqFails = make(chan *bytecodeHealRequest)
   618  		trienodeHealResps    = make(chan *trienodeHealResponse)
   619  		bytecodeHealResps    = make(chan *bytecodeHealResponse)
   620  	)
   621  	for {
   622  		// Remove all completed tasks and terminate sync if everything's done
   623  		s.cleanStorageTasks()
   624  		s.cleanAccountTasks()
   625  		if len(s.tasks) == 0 && s.healer.scheduler.Pending() == 0 {
   626  			return nil
   627  		}
   628  		// Assign all the data retrieval tasks to any free peers
   629  		s.assignAccountTasks(accountResps, accountReqFails, cancel)
   630  		s.assignBytecodeTasks(bytecodeResps, bytecodeReqFails, cancel)
   631  		s.assignStorageTasks(storageResps, storageReqFails, cancel)
   632  
   633  		if len(s.tasks) == 0 {
   634  			// Sync phase done, run heal phase
   635  			s.assignTrienodeHealTasks(trienodeHealResps, trienodeHealReqFails, cancel)
   636  			s.assignBytecodeHealTasks(bytecodeHealResps, bytecodeHealReqFails, cancel)
   637  		}
   638  		// Wait for something to happen
   639  		select {
   640  		case <-s.update:
   641  			// Something happened (new peer, delivery, timeout), recheck tasks
   642  		case <-peerJoin:
   643  			// A new peer joined, try to schedule it new tasks
   644  		case id := <-peerDrop:
   645  			s.revertRequests(id)
   646  		case <-cancel:
   647  			return ErrCancelled
   648  
   649  		case req := <-accountReqFails:
   650  			s.revertAccountRequest(req)
   651  		case req := <-bytecodeReqFails:
   652  			s.revertBytecodeRequest(req)
   653  		case req := <-storageReqFails:
   654  			s.revertStorageRequest(req)
   655  		case req := <-trienodeHealReqFails:
   656  			s.revertTrienodeHealRequest(req)
   657  		case req := <-bytecodeHealReqFails:
   658  			s.revertBytecodeHealRequest(req)
   659  
   660  		case res := <-accountResps:
   661  			s.processAccountResponse(res)
   662  		case res := <-bytecodeResps:
   663  			s.processBytecodeResponse(res)
   664  		case res := <-storageResps:
   665  			s.processStorageResponse(res)
   666  		case res := <-trienodeHealResps:
   667  			s.processTrienodeHealResponse(res)
   668  		case res := <-bytecodeHealResps:
   669  			s.processBytecodeHealResponse(res)
   670  		}
   671  		// Report stats if something meaningful happened
   672  		s.report(false)
   673  	}
   674  }
   675  
   676  // loadSyncStatus retrieves a previously aborted sync status from the database,
   677  // or generates a fresh one if none is available.
   678  func (s *Syncer) loadSyncStatus() {
   679  	var progress syncProgress
   680  
   681  	if status := rawdb.ReadSnapshotSyncStatus(s.db); status != nil {
   682  		if err := json.Unmarshal(status, &progress); err != nil {
   683  			log.Error("Failed to decode snap sync status", "err", err)
   684  		} else {
   685  			for _, task := range progress.Tasks {
   686  				log.Debug("Scheduled account sync task", "from", task.Next, "last", task.Last)
   687  			}
   688  			s.tasks = progress.Tasks
   689  			for _, task := range s.tasks {
   690  				task.genBatch = ethdb.HookedBatch{
   691  					Batch: s.db.NewBatch(),
   692  					OnPut: func(key []byte, value []byte) {
   693  						s.accountBytes += common.StorageSize(len(key) + len(value))
   694  					},
   695  				}
   696  				task.genTrie = trie.NewStackTrie(task.genBatch)
   697  
   698  				for _, subtasks := range task.SubTasks {
   699  					for _, subtask := range subtasks {
   700  						subtask.genBatch = ethdb.HookedBatch{
   701  							Batch: s.db.NewBatch(),
   702  							OnPut: func(key []byte, value []byte) {
   703  								s.storageBytes += common.StorageSize(len(key) + len(value))
   704  							},
   705  						}
   706  						subtask.genTrie = trie.NewStackTrie(subtask.genBatch)
   707  					}
   708  				}
   709  			}
   710  			s.snapped = len(s.tasks) == 0
   711  
   712  			s.accountSynced = progress.AccountSynced
   713  			s.accountBytes = progress.AccountBytes
   714  			s.bytecodeSynced = progress.BytecodeSynced
   715  			s.bytecodeBytes = progress.BytecodeBytes
   716  			s.storageSynced = progress.StorageSynced
   717  			s.storageBytes = progress.StorageBytes
   718  
   719  			s.trienodeHealSynced = progress.TrienodeHealSynced
   720  			s.trienodeHealBytes = progress.TrienodeHealBytes
   721  			s.bytecodeHealSynced = progress.BytecodeHealSynced
   722  			s.bytecodeHealBytes = progress.BytecodeHealBytes
   723  			return
   724  		}
   725  	}
   726  	// Either we've failed to decode the previus state, or there was none.
   727  	// Start a fresh sync by chunking up the account range and scheduling
   728  	// them for retrieval.
   729  	s.tasks = nil
   730  	s.accountSynced, s.accountBytes = 0, 0
   731  	s.bytecodeSynced, s.bytecodeBytes = 0, 0
   732  	s.storageSynced, s.storageBytes = 0, 0
   733  	s.trienodeHealSynced, s.trienodeHealBytes = 0, 0
   734  	s.bytecodeHealSynced, s.bytecodeHealBytes = 0, 0
   735  
   736  	var next common.Hash
   737  	step := new(big.Int).Sub(
   738  		new(big.Int).Div(
   739  			new(big.Int).Exp(common.Big2, common.Big256, nil),
   740  			big.NewInt(int64(accountConcurrency)),
   741  		), common.Big1,
   742  	)
   743  	for i := 0; i < accountConcurrency; i++ {
   744  		last := common.BigToHash(new(big.Int).Add(next.Big(), step))
   745  		if i == accountConcurrency-1 {
   746  			// Make sure we don't overflow if the step is not a proper divisor
   747  			last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
   748  		}
   749  		batch := ethdb.HookedBatch{
   750  			Batch: s.db.NewBatch(),
   751  			OnPut: func(key []byte, value []byte) {
   752  				s.accountBytes += common.StorageSize(len(key) + len(value))
   753  			},
   754  		}
   755  		s.tasks = append(s.tasks, &accountTask{
   756  			Next:     next,
   757  			Last:     last,
   758  			SubTasks: make(map[common.Hash][]*storageTask),
   759  			genBatch: batch,
   760  			genTrie:  trie.NewStackTrie(batch),
   761  		})
   762  		log.Debug("Created account sync task", "from", next, "last", last)
   763  		next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
   764  	}
   765  }
   766  
   767  // saveSyncStatus marshals the remaining sync tasks into leveldb.
   768  func (s *Syncer) saveSyncStatus() {
   769  	// Serialize any partial progress to disk before spinning down
   770  	for _, task := range s.tasks {
   771  		if err := task.genBatch.Write(); err != nil {
   772  			log.Error("Failed to persist account slots", "err", err)
   773  		}
   774  		for _, subtasks := range task.SubTasks {
   775  			for _, subtask := range subtasks {
   776  				if err := subtask.genBatch.Write(); err != nil {
   777  					log.Error("Failed to persist storage slots", "err", err)
   778  				}
   779  			}
   780  		}
   781  	}
   782  	// Store the actual progress markers
   783  	progress := &syncProgress{
   784  		Tasks:              s.tasks,
   785  		AccountSynced:      s.accountSynced,
   786  		AccountBytes:       s.accountBytes,
   787  		BytecodeSynced:     s.bytecodeSynced,
   788  		BytecodeBytes:      s.bytecodeBytes,
   789  		StorageSynced:      s.storageSynced,
   790  		StorageBytes:       s.storageBytes,
   791  		TrienodeHealSynced: s.trienodeHealSynced,
   792  		TrienodeHealBytes:  s.trienodeHealBytes,
   793  		BytecodeHealSynced: s.bytecodeHealSynced,
   794  		BytecodeHealBytes:  s.bytecodeHealBytes,
   795  	}
   796  	status, err := json.Marshal(progress)
   797  	if err != nil {
   798  		panic(err) // This can only fail during implementation
   799  	}
   800  	rawdb.WriteSnapshotSyncStatus(s.db, status)
   801  }
   802  
   803  // cleanAccountTasks removes account range retrieval tasks that have already been
   804  // completed.
   805  func (s *Syncer) cleanAccountTasks() {
   806  	// If the sync was already done before, don't even bother
   807  	if len(s.tasks) == 0 {
   808  		return
   809  	}
   810  	// Sync wasn't finished previously, check for any task that can be finalized
   811  	for i := 0; i < len(s.tasks); i++ {
   812  		if s.tasks[i].done {
   813  			s.tasks = append(s.tasks[:i], s.tasks[i+1:]...)
   814  			i--
   815  		}
   816  	}
   817  	// If everything was just finalized just, generate the account trie and start heal
   818  	if len(s.tasks) == 0 {
   819  		s.lock.Lock()
   820  		s.snapped = true
   821  		s.lock.Unlock()
   822  
   823  		// Push the final sync report
   824  		s.reportSyncProgress(true)
   825  	}
   826  }
   827  
   828  // cleanStorageTasks iterates over all the account tasks and storage sub-tasks
   829  // within, cleaning any that have been completed.
   830  func (s *Syncer) cleanStorageTasks() {
   831  	for _, task := range s.tasks {
   832  		for account, subtasks := range task.SubTasks {
   833  			// Remove storage range retrieval tasks that completed
   834  			for j := 0; j < len(subtasks); j++ {
   835  				if subtasks[j].done {
   836  					subtasks = append(subtasks[:j], subtasks[j+1:]...)
   837  					j--
   838  				}
   839  			}
   840  			if len(subtasks) > 0 {
   841  				task.SubTasks[account] = subtasks
   842  				continue
   843  			}
   844  			// If all storage chunks are done, mark the account as done too
   845  			for j, hash := range task.res.hashes {
   846  				if hash == account {
   847  					task.needState[j] = false
   848  				}
   849  			}
   850  			delete(task.SubTasks, account)
   851  			task.pend--
   852  
   853  			// If this was the last pending task, forward the account task
   854  			if task.pend == 0 {
   855  				s.forwardAccountTask(task)
   856  			}
   857  		}
   858  	}
   859  }
   860  
   861  // assignAccountTasks attempts to match idle peers to pending account range
   862  // retrievals.
   863  func (s *Syncer) assignAccountTasks(success chan *accountResponse, fail chan *accountRequest, cancel chan struct{}) {
   864  	s.lock.Lock()
   865  	defer s.lock.Unlock()
   866  
   867  	// Sort the peers by download capacity to use faster ones if many available
   868  	idlers := &capacitySort{
   869  		ids:  make([]string, 0, len(s.accountIdlers)),
   870  		caps: make([]int, 0, len(s.accountIdlers)),
   871  	}
   872  	targetTTL := s.rates.TargetTimeout()
   873  	for id := range s.accountIdlers {
   874  		if _, ok := s.statelessPeers[id]; ok {
   875  			continue
   876  		}
   877  		idlers.ids = append(idlers.ids, id)
   878  		idlers.caps = append(idlers.caps, s.rates.Capacity(id, AccountRangeMsg, targetTTL))
   879  	}
   880  	if len(idlers.ids) == 0 {
   881  		return
   882  	}
   883  	sort.Sort(sort.Reverse(idlers))
   884  
   885  	// Iterate over all the tasks and try to find a pending one
   886  	for _, task := range s.tasks {
   887  		// Skip any tasks already filling
   888  		if task.req != nil || task.res != nil {
   889  			continue
   890  		}
   891  		// Task pending retrieval, try to find an idle peer. If no such peer
   892  		// exists, we probably assigned tasks for all (or they are stateless).
   893  		// Abort the entire assignment mechanism.
   894  		if len(idlers.ids) == 0 {
   895  			return
   896  		}
   897  		var (
   898  			idle = idlers.ids[0]
   899  			peer = s.peers[idle]
   900  			cap  = idlers.caps[0]
   901  		)
   902  		idlers.ids, idlers.caps = idlers.ids[1:], idlers.caps[1:]
   903  
   904  		// Matched a pending task to an idle peer, allocate a unique request id
   905  		var reqid uint64
   906  		for {
   907  			reqid = uint64(rand.Int63())
   908  			if reqid == 0 {
   909  				continue
   910  			}
   911  			if _, ok := s.accountReqs[reqid]; ok {
   912  				continue
   913  			}
   914  			break
   915  		}
   916  		// Generate the network query and send it to the peer
   917  		req := &accountRequest{
   918  			peer:    idle,
   919  			id:      reqid,
   920  			time:    time.Now(),
   921  			deliver: success,
   922  			revert:  fail,
   923  			cancel:  cancel,
   924  			stale:   make(chan struct{}),
   925  			origin:  task.Next,
   926  			limit:   task.Last,
   927  			task:    task,
   928  		}
   929  		req.timeout = time.AfterFunc(s.rates.TargetTimeout(), func() {
   930  			peer.Log().Debug("Account range request timed out", "reqid", reqid)
   931  			s.rates.Update(idle, AccountRangeMsg, 0, 0)
   932  			s.scheduleRevertAccountRequest(req)
   933  		})
   934  		s.accountReqs[reqid] = req
   935  		delete(s.accountIdlers, idle)
   936  
   937  		s.pend.Add(1)
   938  		go func(root common.Hash) {
   939  			defer s.pend.Done()
   940  
   941  			// Attempt to send the remote request and revert if it fails
   942  			if cap > maxRequestSize {
   943  				cap = maxRequestSize
   944  			}
   945  			if cap < minRequestSize { // Don't bother with peers below a bare minimum performance
   946  				cap = minRequestSize
   947  			}
   948  			if err := peer.RequestAccountRange(reqid, root, req.origin, req.limit, uint64(cap)); err != nil {
   949  				peer.Log().Debug("Failed to request account range", "err", err)
   950  				s.scheduleRevertAccountRequest(req)
   951  			}
   952  		}(s.root)
   953  
   954  		// Inject the request into the task to block further assignments
   955  		task.req = req
   956  	}
   957  }
   958  
   959  // assignBytecodeTasks attempts to match idle peers to pending code retrievals.
   960  func (s *Syncer) assignBytecodeTasks(success chan *bytecodeResponse, fail chan *bytecodeRequest, cancel chan struct{}) {
   961  	s.lock.Lock()
   962  	defer s.lock.Unlock()
   963  
   964  	// Sort the peers by download capacity to use faster ones if many available
   965  	idlers := &capacitySort{
   966  		ids:  make([]string, 0, len(s.bytecodeIdlers)),
   967  		caps: make([]int, 0, len(s.bytecodeIdlers)),
   968  	}
   969  	targetTTL := s.rates.TargetTimeout()
   970  	for id := range s.bytecodeIdlers {
   971  		if _, ok := s.statelessPeers[id]; ok {
   972  			continue
   973  		}
   974  		idlers.ids = append(idlers.ids, id)
   975  		idlers.caps = append(idlers.caps, s.rates.Capacity(id, ByteCodesMsg, targetTTL))
   976  	}
   977  	if len(idlers.ids) == 0 {
   978  		return
   979  	}
   980  	sort.Sort(sort.Reverse(idlers))
   981  
   982  	// Iterate over all the tasks and try to find a pending one
   983  	for _, task := range s.tasks {
   984  		// Skip any tasks not in the bytecode retrieval phase
   985  		if task.res == nil {
   986  			continue
   987  		}
   988  		// Skip tasks that are already retrieving (or done with) all codes
   989  		if len(task.codeTasks) == 0 {
   990  			continue
   991  		}
   992  		// Task pending retrieval, try to find an idle peer. If no such peer
   993  		// exists, we probably assigned tasks for all (or they are stateless).
   994  		// Abort the entire assignment mechanism.
   995  		if len(idlers.ids) == 0 {
   996  			return
   997  		}
   998  		var (
   999  			idle = idlers.ids[0]
  1000  			peer = s.peers[idle]
  1001  			cap  = idlers.caps[0]
  1002  		)
  1003  		idlers.ids, idlers.caps = idlers.ids[1:], idlers.caps[1:]
  1004  
  1005  		// Matched a pending task to an idle peer, allocate a unique request id
  1006  		var reqid uint64
  1007  		for {
  1008  			reqid = uint64(rand.Int63())
  1009  			if reqid == 0 {
  1010  				continue
  1011  			}
  1012  			if _, ok := s.bytecodeReqs[reqid]; ok {
  1013  				continue
  1014  			}
  1015  			break
  1016  		}
  1017  		// Generate the network query and send it to the peer
  1018  		if cap > maxCodeRequestCount {
  1019  			cap = maxCodeRequestCount
  1020  		}
  1021  		hashes := make([]common.Hash, 0, cap)
  1022  		for hash := range task.codeTasks {
  1023  			delete(task.codeTasks, hash)
  1024  			hashes = append(hashes, hash)
  1025  			if len(hashes) >= cap {
  1026  				break
  1027  			}
  1028  		}
  1029  		req := &bytecodeRequest{
  1030  			peer:    idle,
  1031  			id:      reqid,
  1032  			time:    time.Now(),
  1033  			deliver: success,
  1034  			revert:  fail,
  1035  			cancel:  cancel,
  1036  			stale:   make(chan struct{}),
  1037  			hashes:  hashes,
  1038  			task:    task,
  1039  		}
  1040  		req.timeout = time.AfterFunc(s.rates.TargetTimeout(), func() {
  1041  			peer.Log().Debug("Bytecode request timed out", "reqid", reqid)
  1042  			s.rates.Update(idle, ByteCodesMsg, 0, 0)
  1043  			s.scheduleRevertBytecodeRequest(req)
  1044  		})
  1045  		s.bytecodeReqs[reqid] = req
  1046  		delete(s.bytecodeIdlers, idle)
  1047  
  1048  		s.pend.Add(1)
  1049  		go func() {
  1050  			defer s.pend.Done()
  1051  
  1052  			// Attempt to send the remote request and revert if it fails
  1053  			if err := peer.RequestByteCodes(reqid, hashes, maxRequestSize); err != nil {
  1054  				log.Debug("Failed to request bytecodes", "err", err)
  1055  				s.scheduleRevertBytecodeRequest(req)
  1056  			}
  1057  		}()
  1058  	}
  1059  }
  1060  
  1061  // assignStorageTasks attempts to match idle peers to pending storage range
  1062  // retrievals.
  1063  func (s *Syncer) assignStorageTasks(success chan *storageResponse, fail chan *storageRequest, cancel chan struct{}) {
  1064  	s.lock.Lock()
  1065  	defer s.lock.Unlock()
  1066  
  1067  	// Sort the peers by download capacity to use faster ones if many available
  1068  	idlers := &capacitySort{
  1069  		ids:  make([]string, 0, len(s.storageIdlers)),
  1070  		caps: make([]int, 0, len(s.storageIdlers)),
  1071  	}
  1072  	targetTTL := s.rates.TargetTimeout()
  1073  	for id := range s.storageIdlers {
  1074  		if _, ok := s.statelessPeers[id]; ok {
  1075  			continue
  1076  		}
  1077  		idlers.ids = append(idlers.ids, id)
  1078  		idlers.caps = append(idlers.caps, s.rates.Capacity(id, StorageRangesMsg, targetTTL))
  1079  	}
  1080  	if len(idlers.ids) == 0 {
  1081  		return
  1082  	}
  1083  	sort.Sort(sort.Reverse(idlers))
  1084  
  1085  	// Iterate over all the tasks and try to find a pending one
  1086  	for _, task := range s.tasks {
  1087  		// Skip any tasks not in the storage retrieval phase
  1088  		if task.res == nil {
  1089  			continue
  1090  		}
  1091  		// Skip tasks that are already retrieving (or done with) all small states
  1092  		if len(task.SubTasks) == 0 && len(task.stateTasks) == 0 {
  1093  			continue
  1094  		}
  1095  		// Task pending retrieval, try to find an idle peer. If no such peer
  1096  		// exists, we probably assigned tasks for all (or they are stateless).
  1097  		// Abort the entire assignment mechanism.
  1098  		if len(idlers.ids) == 0 {
  1099  			return
  1100  		}
  1101  		var (
  1102  			idle = idlers.ids[0]
  1103  			peer = s.peers[idle]
  1104  			cap  = idlers.caps[0]
  1105  		)
  1106  		idlers.ids, idlers.caps = idlers.ids[1:], idlers.caps[1:]
  1107  
  1108  		// Matched a pending task to an idle peer, allocate a unique request id
  1109  		var reqid uint64
  1110  		for {
  1111  			reqid = uint64(rand.Int63())
  1112  			if reqid == 0 {
  1113  				continue
  1114  			}
  1115  			if _, ok := s.storageReqs[reqid]; ok {
  1116  				continue
  1117  			}
  1118  			break
  1119  		}
  1120  		// Generate the network query and send it to the peer. If there are
  1121  		// large contract tasks pending, complete those before diving into
  1122  		// even more new contracts.
  1123  		if cap > maxRequestSize {
  1124  			cap = maxRequestSize
  1125  		}
  1126  		if cap < minRequestSize { // Don't bother with peers below a bare minimum performance
  1127  			cap = minRequestSize
  1128  		}
  1129  		storageSets := cap / 1024
  1130  
  1131  		var (
  1132  			accounts = make([]common.Hash, 0, storageSets)
  1133  			roots    = make([]common.Hash, 0, storageSets)
  1134  			subtask  *storageTask
  1135  		)
  1136  		for account, subtasks := range task.SubTasks {
  1137  			for _, st := range subtasks {
  1138  				// Skip any subtasks already filling
  1139  				if st.req != nil {
  1140  					continue
  1141  				}
  1142  				// Found an incomplete storage chunk, schedule it
  1143  				accounts = append(accounts, account)
  1144  				roots = append(roots, st.root)
  1145  				subtask = st
  1146  				break // Large contract chunks are downloaded individually
  1147  			}
  1148  			if subtask != nil {
  1149  				break // Large contract chunks are downloaded individually
  1150  			}
  1151  		}
  1152  		if subtask == nil {
  1153  			// No large contract required retrieval, but small ones available
  1154  			for acccount, root := range task.stateTasks {
  1155  				delete(task.stateTasks, acccount)
  1156  
  1157  				accounts = append(accounts, acccount)
  1158  				roots = append(roots, root)
  1159  
  1160  				if len(accounts) >= storageSets {
  1161  					break
  1162  				}
  1163  			}
  1164  		}
  1165  		// If nothing was found, it means this task is actually already fully
  1166  		// retrieving, but large contracts are hard to detect. Skip to the next.
  1167  		if len(accounts) == 0 {
  1168  			continue
  1169  		}
  1170  		req := &storageRequest{
  1171  			peer:     idle,
  1172  			id:       reqid,
  1173  			time:     time.Now(),
  1174  			deliver:  success,
  1175  			revert:   fail,
  1176  			cancel:   cancel,
  1177  			stale:    make(chan struct{}),
  1178  			accounts: accounts,
  1179  			roots:    roots,
  1180  			mainTask: task,
  1181  			subTask:  subtask,
  1182  		}
  1183  		if subtask != nil {
  1184  			req.origin = subtask.Next
  1185  			req.limit = subtask.Last
  1186  		}
  1187  		req.timeout = time.AfterFunc(s.rates.TargetTimeout(), func() {
  1188  			peer.Log().Debug("Storage request timed out", "reqid", reqid)
  1189  			s.rates.Update(idle, StorageRangesMsg, 0, 0)
  1190  			s.scheduleRevertStorageRequest(req)
  1191  		})
  1192  		s.storageReqs[reqid] = req
  1193  		delete(s.storageIdlers, idle)
  1194  
  1195  		s.pend.Add(1)
  1196  		go func(root common.Hash) {
  1197  			defer s.pend.Done()
  1198  
  1199  			// Attempt to send the remote request and revert if it fails
  1200  			var origin, limit []byte
  1201  			if subtask != nil {
  1202  				origin, limit = req.origin[:], req.limit[:]
  1203  			}
  1204  			if err := peer.RequestStorageRanges(reqid, root, accounts, origin, limit, uint64(cap)); err != nil {
  1205  				log.Debug("Failed to request storage", "err", err)
  1206  				s.scheduleRevertStorageRequest(req)
  1207  			}
  1208  		}(s.root)
  1209  
  1210  		// Inject the request into the subtask to block further assignments
  1211  		if subtask != nil {
  1212  			subtask.req = req
  1213  		}
  1214  	}
  1215  }
  1216  
  1217  // assignTrienodeHealTasks attempts to match idle peers to trie node requests to
  1218  // heal any trie errors caused by the snap sync's chunked retrieval model.
  1219  func (s *Syncer) assignTrienodeHealTasks(success chan *trienodeHealResponse, fail chan *trienodeHealRequest, cancel chan struct{}) {
  1220  	s.lock.Lock()
  1221  	defer s.lock.Unlock()
  1222  
  1223  	// Sort the peers by download capacity to use faster ones if many available
  1224  	idlers := &capacitySort{
  1225  		ids:  make([]string, 0, len(s.trienodeHealIdlers)),
  1226  		caps: make([]int, 0, len(s.trienodeHealIdlers)),
  1227  	}
  1228  	targetTTL := s.rates.TargetTimeout()
  1229  	for id := range s.trienodeHealIdlers {
  1230  		if _, ok := s.statelessPeers[id]; ok {
  1231  			continue
  1232  		}
  1233  		idlers.ids = append(idlers.ids, id)
  1234  		idlers.caps = append(idlers.caps, s.rates.Capacity(id, TrieNodesMsg, targetTTL))
  1235  	}
  1236  	if len(idlers.ids) == 0 {
  1237  		return
  1238  	}
  1239  	sort.Sort(sort.Reverse(idlers))
  1240  
  1241  	// Iterate over pending tasks and try to find a peer to retrieve with
  1242  	for len(s.healer.trieTasks) > 0 || s.healer.scheduler.Pending() > 0 {
  1243  		// If there are not enough trie tasks queued to fully assign, fill the
  1244  		// queue from the state sync scheduler. The trie synced schedules these
  1245  		// together with bytecodes, so we need to queue them combined.
  1246  		var (
  1247  			have = len(s.healer.trieTasks) + len(s.healer.codeTasks)
  1248  			want = maxTrieRequestCount + maxCodeRequestCount
  1249  		)
  1250  		if have < want {
  1251  			nodes, paths, codes := s.healer.scheduler.Missing(want - have)
  1252  			for i, hash := range nodes {
  1253  				s.healer.trieTasks[hash] = paths[i]
  1254  			}
  1255  			for _, hash := range codes {
  1256  				s.healer.codeTasks[hash] = struct{}{}
  1257  			}
  1258  		}
  1259  		// If all the heal tasks are bytecodes or already downloading, bail
  1260  		if len(s.healer.trieTasks) == 0 {
  1261  			return
  1262  		}
  1263  		// Task pending retrieval, try to find an idle peer. If no such peer
  1264  		// exists, we probably assigned tasks for all (or they are stateless).
  1265  		// Abort the entire assignment mechanism.
  1266  		if len(idlers.ids) == 0 {
  1267  			return
  1268  		}
  1269  		var (
  1270  			idle = idlers.ids[0]
  1271  			peer = s.peers[idle]
  1272  			cap  = idlers.caps[0]
  1273  		)
  1274  		idlers.ids, idlers.caps = idlers.ids[1:], idlers.caps[1:]
  1275  
  1276  		// Matched a pending task to an idle peer, allocate a unique request id
  1277  		var reqid uint64
  1278  		for {
  1279  			reqid = uint64(rand.Int63())
  1280  			if reqid == 0 {
  1281  				continue
  1282  			}
  1283  			if _, ok := s.trienodeHealReqs[reqid]; ok {
  1284  				continue
  1285  			}
  1286  			break
  1287  		}
  1288  		// Generate the network query and send it to the peer
  1289  		if cap > maxTrieRequestCount {
  1290  			cap = maxTrieRequestCount
  1291  		}
  1292  		var (
  1293  			hashes   = make([]common.Hash, 0, cap)
  1294  			paths    = make([]trie.SyncPath, 0, cap)
  1295  			pathsets = make([]TrieNodePathSet, 0, cap)
  1296  		)
  1297  		for hash, pathset := range s.healer.trieTasks {
  1298  			delete(s.healer.trieTasks, hash)
  1299  
  1300  			hashes = append(hashes, hash)
  1301  			paths = append(paths, pathset)
  1302  			pathsets = append(pathsets, [][]byte(pathset)) // TODO(karalabe): group requests by account hash
  1303  
  1304  			if len(hashes) >= cap {
  1305  				break
  1306  			}
  1307  		}
  1308  		req := &trienodeHealRequest{
  1309  			peer:    idle,
  1310  			id:      reqid,
  1311  			time:    time.Now(),
  1312  			deliver: success,
  1313  			revert:  fail,
  1314  			cancel:  cancel,
  1315  			stale:   make(chan struct{}),
  1316  			hashes:  hashes,
  1317  			paths:   paths,
  1318  			task:    s.healer,
  1319  		}
  1320  		req.timeout = time.AfterFunc(s.rates.TargetTimeout(), func() {
  1321  			peer.Log().Debug("Trienode heal request timed out", "reqid", reqid)
  1322  			s.rates.Update(idle, TrieNodesMsg, 0, 0)
  1323  			s.scheduleRevertTrienodeHealRequest(req)
  1324  		})
  1325  		s.trienodeHealReqs[reqid] = req
  1326  		delete(s.trienodeHealIdlers, idle)
  1327  
  1328  		s.pend.Add(1)
  1329  		go func(root common.Hash) {
  1330  			defer s.pend.Done()
  1331  
  1332  			// Attempt to send the remote request and revert if it fails
  1333  			if err := peer.RequestTrieNodes(reqid, root, pathsets, maxRequestSize); err != nil {
  1334  				log.Debug("Failed to request trienode healers", "err", err)
  1335  				s.scheduleRevertTrienodeHealRequest(req)
  1336  			}
  1337  		}(s.root)
  1338  	}
  1339  }
  1340  
  1341  // assignBytecodeHealTasks attempts to match idle peers to bytecode requests to
  1342  // heal any trie errors caused by the snap sync's chunked retrieval model.
  1343  func (s *Syncer) assignBytecodeHealTasks(success chan *bytecodeHealResponse, fail chan *bytecodeHealRequest, cancel chan struct{}) {
  1344  	s.lock.Lock()
  1345  	defer s.lock.Unlock()
  1346  
  1347  	// Sort the peers by download capacity to use faster ones if many available
  1348  	idlers := &capacitySort{
  1349  		ids:  make([]string, 0, len(s.bytecodeHealIdlers)),
  1350  		caps: make([]int, 0, len(s.bytecodeHealIdlers)),
  1351  	}
  1352  	targetTTL := s.rates.TargetTimeout()
  1353  	for id := range s.bytecodeHealIdlers {
  1354  		if _, ok := s.statelessPeers[id]; ok {
  1355  			continue
  1356  		}
  1357  		idlers.ids = append(idlers.ids, id)
  1358  		idlers.caps = append(idlers.caps, s.rates.Capacity(id, ByteCodesMsg, targetTTL))
  1359  	}
  1360  	if len(idlers.ids) == 0 {
  1361  		return
  1362  	}
  1363  	sort.Sort(sort.Reverse(idlers))
  1364  
  1365  	// Iterate over pending tasks and try to find a peer to retrieve with
  1366  	for len(s.healer.codeTasks) > 0 || s.healer.scheduler.Pending() > 0 {
  1367  		// If there are not enough trie tasks queued to fully assign, fill the
  1368  		// queue from the state sync scheduler. The trie synced schedules these
  1369  		// together with trie nodes, so we need to queue them combined.
  1370  		var (
  1371  			have = len(s.healer.trieTasks) + len(s.healer.codeTasks)
  1372  			want = maxTrieRequestCount + maxCodeRequestCount
  1373  		)
  1374  		if have < want {
  1375  			nodes, paths, codes := s.healer.scheduler.Missing(want - have)
  1376  			for i, hash := range nodes {
  1377  				s.healer.trieTasks[hash] = paths[i]
  1378  			}
  1379  			for _, hash := range codes {
  1380  				s.healer.codeTasks[hash] = struct{}{}
  1381  			}
  1382  		}
  1383  		// If all the heal tasks are trienodes or already downloading, bail
  1384  		if len(s.healer.codeTasks) == 0 {
  1385  			return
  1386  		}
  1387  		// Task pending retrieval, try to find an idle peer. If no such peer
  1388  		// exists, we probably assigned tasks for all (or they are stateless).
  1389  		// Abort the entire assignment mechanism.
  1390  		if len(idlers.ids) == 0 {
  1391  			return
  1392  		}
  1393  		var (
  1394  			idle = idlers.ids[0]
  1395  			peer = s.peers[idle]
  1396  			cap  = idlers.caps[0]
  1397  		)
  1398  		idlers.ids, idlers.caps = idlers.ids[1:], idlers.caps[1:]
  1399  
  1400  		// Matched a pending task to an idle peer, allocate a unique request id
  1401  		var reqid uint64
  1402  		for {
  1403  			reqid = uint64(rand.Int63())
  1404  			if reqid == 0 {
  1405  				continue
  1406  			}
  1407  			if _, ok := s.bytecodeHealReqs[reqid]; ok {
  1408  				continue
  1409  			}
  1410  			break
  1411  		}
  1412  		// Generate the network query and send it to the peer
  1413  		if cap > maxCodeRequestCount {
  1414  			cap = maxCodeRequestCount
  1415  		}
  1416  		hashes := make([]common.Hash, 0, cap)
  1417  		for hash := range s.healer.codeTasks {
  1418  			delete(s.healer.codeTasks, hash)
  1419  
  1420  			hashes = append(hashes, hash)
  1421  			if len(hashes) >= cap {
  1422  				break
  1423  			}
  1424  		}
  1425  		req := &bytecodeHealRequest{
  1426  			peer:    idle,
  1427  			id:      reqid,
  1428  			time:    time.Now(),
  1429  			deliver: success,
  1430  			revert:  fail,
  1431  			cancel:  cancel,
  1432  			stale:   make(chan struct{}),
  1433  			hashes:  hashes,
  1434  			task:    s.healer,
  1435  		}
  1436  		req.timeout = time.AfterFunc(s.rates.TargetTimeout(), func() {
  1437  			peer.Log().Debug("Bytecode heal request timed out", "reqid", reqid)
  1438  			s.rates.Update(idle, ByteCodesMsg, 0, 0)
  1439  			s.scheduleRevertBytecodeHealRequest(req)
  1440  		})
  1441  		s.bytecodeHealReqs[reqid] = req
  1442  		delete(s.bytecodeHealIdlers, idle)
  1443  
  1444  		s.pend.Add(1)
  1445  		go func() {
  1446  			defer s.pend.Done()
  1447  
  1448  			// Attempt to send the remote request and revert if it fails
  1449  			if err := peer.RequestByteCodes(reqid, hashes, maxRequestSize); err != nil {
  1450  				log.Debug("Failed to request bytecode healers", "err", err)
  1451  				s.scheduleRevertBytecodeHealRequest(req)
  1452  			}
  1453  		}()
  1454  	}
  1455  }
  1456  
  1457  // revertRequests locates all the currently pending reuqests from a particular
  1458  // peer and reverts them, rescheduling for others to fulfill.
  1459  func (s *Syncer) revertRequests(peer string) {
  1460  	// Gather the requests first, revertals need the lock too
  1461  	s.lock.Lock()
  1462  	var accountReqs []*accountRequest
  1463  	for _, req := range s.accountReqs {
  1464  		if req.peer == peer {
  1465  			accountReqs = append(accountReqs, req)
  1466  		}
  1467  	}
  1468  	var bytecodeReqs []*bytecodeRequest
  1469  	for _, req := range s.bytecodeReqs {
  1470  		if req.peer == peer {
  1471  			bytecodeReqs = append(bytecodeReqs, req)
  1472  		}
  1473  	}
  1474  	var storageReqs []*storageRequest
  1475  	for _, req := range s.storageReqs {
  1476  		if req.peer == peer {
  1477  			storageReqs = append(storageReqs, req)
  1478  		}
  1479  	}
  1480  	var trienodeHealReqs []*trienodeHealRequest
  1481  	for _, req := range s.trienodeHealReqs {
  1482  		if req.peer == peer {
  1483  			trienodeHealReqs = append(trienodeHealReqs, req)
  1484  		}
  1485  	}
  1486  	var bytecodeHealReqs []*bytecodeHealRequest
  1487  	for _, req := range s.bytecodeHealReqs {
  1488  		if req.peer == peer {
  1489  			bytecodeHealReqs = append(bytecodeHealReqs, req)
  1490  		}
  1491  	}
  1492  	s.lock.Unlock()
  1493  
  1494  	// Revert all the requests matching the peer
  1495  	for _, req := range accountReqs {
  1496  		s.revertAccountRequest(req)
  1497  	}
  1498  	for _, req := range bytecodeReqs {
  1499  		s.revertBytecodeRequest(req)
  1500  	}
  1501  	for _, req := range storageReqs {
  1502  		s.revertStorageRequest(req)
  1503  	}
  1504  	for _, req := range trienodeHealReqs {
  1505  		s.revertTrienodeHealRequest(req)
  1506  	}
  1507  	for _, req := range bytecodeHealReqs {
  1508  		s.revertBytecodeHealRequest(req)
  1509  	}
  1510  }
  1511  
  1512  // scheduleRevertAccountRequest asks the event loop to clean up an account range
  1513  // request and return all failed retrieval tasks to the scheduler for reassignment.
  1514  func (s *Syncer) scheduleRevertAccountRequest(req *accountRequest) {
  1515  	select {
  1516  	case req.revert <- req:
  1517  		// Sync event loop notified
  1518  	case <-req.cancel:
  1519  		// Sync cycle got cancelled
  1520  	case <-req.stale:
  1521  		// Request already reverted
  1522  	}
  1523  }
  1524  
  1525  // revertAccountRequest cleans up an account range request and returns all failed
  1526  // retrieval tasks to the scheduler for reassignment.
  1527  //
  1528  // Note, this needs to run on the event runloop thread to reschedule to idle peers.
  1529  // On peer threads, use scheduleRevertAccountRequest.
  1530  func (s *Syncer) revertAccountRequest(req *accountRequest) {
  1531  	log.Debug("Reverting account request", "peer", req.peer, "reqid", req.id)
  1532  	select {
  1533  	case <-req.stale:
  1534  		log.Trace("Account request already reverted", "peer", req.peer, "reqid", req.id)
  1535  		return
  1536  	default:
  1537  	}
  1538  	close(req.stale)
  1539  
  1540  	// Remove the request from the tracked set
  1541  	s.lock.Lock()
  1542  	delete(s.accountReqs, req.id)
  1543  	s.lock.Unlock()
  1544  
  1545  	// If there's a timeout timer still running, abort it and mark the account
  1546  	// task as not-pending, ready for resheduling
  1547  	req.timeout.Stop()
  1548  	if req.task.req == req {
  1549  		req.task.req = nil
  1550  	}
  1551  }
  1552  
  1553  // scheduleRevertBytecodeRequest asks the event loop to clean up a bytecode request
  1554  // and return all failed retrieval tasks to the scheduler for reassignment.
  1555  func (s *Syncer) scheduleRevertBytecodeRequest(req *bytecodeRequest) {
  1556  	select {
  1557  	case req.revert <- req:
  1558  		// Sync event loop notified
  1559  	case <-req.cancel:
  1560  		// Sync cycle got cancelled
  1561  	case <-req.stale:
  1562  		// Request already reverted
  1563  	}
  1564  }
  1565  
  1566  // revertBytecodeRequest cleans up a bytecode request and returns all failed
  1567  // retrieval tasks to the scheduler for reassignment.
  1568  //
  1569  // Note, this needs to run on the event runloop thread to reschedule to idle peers.
  1570  // On peer threads, use scheduleRevertBytecodeRequest.
  1571  func (s *Syncer) revertBytecodeRequest(req *bytecodeRequest) {
  1572  	log.Debug("Reverting bytecode request", "peer", req.peer)
  1573  	select {
  1574  	case <-req.stale:
  1575  		log.Trace("Bytecode request already reverted", "peer", req.peer, "reqid", req.id)
  1576  		return
  1577  	default:
  1578  	}
  1579  	close(req.stale)
  1580  
  1581  	// Remove the request from the tracked set
  1582  	s.lock.Lock()
  1583  	delete(s.bytecodeReqs, req.id)
  1584  	s.lock.Unlock()
  1585  
  1586  	// If there's a timeout timer still running, abort it and mark the code
  1587  	// retrievals as not-pending, ready for resheduling
  1588  	req.timeout.Stop()
  1589  	for _, hash := range req.hashes {
  1590  		req.task.codeTasks[hash] = struct{}{}
  1591  	}
  1592  }
  1593  
  1594  // scheduleRevertStorageRequest asks the event loop to clean up a storage range
  1595  // request and return all failed retrieval tasks to the scheduler for reassignment.
  1596  func (s *Syncer) scheduleRevertStorageRequest(req *storageRequest) {
  1597  	select {
  1598  	case req.revert <- req:
  1599  		// Sync event loop notified
  1600  	case <-req.cancel:
  1601  		// Sync cycle got cancelled
  1602  	case <-req.stale:
  1603  		// Request already reverted
  1604  	}
  1605  }
  1606  
  1607  // revertStorageRequest cleans up a storage range request and returns all failed
  1608  // retrieval tasks to the scheduler for reassignment.
  1609  //
  1610  // Note, this needs to run on the event runloop thread to reschedule to idle peers.
  1611  // On peer threads, use scheduleRevertStorageRequest.
  1612  func (s *Syncer) revertStorageRequest(req *storageRequest) {
  1613  	log.Debug("Reverting storage request", "peer", req.peer)
  1614  	select {
  1615  	case <-req.stale:
  1616  		log.Trace("Storage request already reverted", "peer", req.peer, "reqid", req.id)
  1617  		return
  1618  	default:
  1619  	}
  1620  	close(req.stale)
  1621  
  1622  	// Remove the request from the tracked set
  1623  	s.lock.Lock()
  1624  	delete(s.storageReqs, req.id)
  1625  	s.lock.Unlock()
  1626  
  1627  	// If there's a timeout timer still running, abort it and mark the storage
  1628  	// task as not-pending, ready for resheduling
  1629  	req.timeout.Stop()
  1630  	if req.subTask != nil {
  1631  		req.subTask.req = nil
  1632  	} else {
  1633  		for i, account := range req.accounts {
  1634  			req.mainTask.stateTasks[account] = req.roots[i]
  1635  		}
  1636  	}
  1637  }
  1638  
  1639  // scheduleRevertTrienodeHealRequest asks the event loop to clean up a trienode heal
  1640  // request and return all failed retrieval tasks to the scheduler for reassignment.
  1641  func (s *Syncer) scheduleRevertTrienodeHealRequest(req *trienodeHealRequest) {
  1642  	select {
  1643  	case req.revert <- req:
  1644  		// Sync event loop notified
  1645  	case <-req.cancel:
  1646  		// Sync cycle got cancelled
  1647  	case <-req.stale:
  1648  		// Request already reverted
  1649  	}
  1650  }
  1651  
  1652  // revertTrienodeHealRequest cleans up a trienode heal request and returns all
  1653  // failed retrieval tasks to the scheduler for reassignment.
  1654  //
  1655  // Note, this needs to run on the event runloop thread to reschedule to idle peers.
  1656  // On peer threads, use scheduleRevertTrienodeHealRequest.
  1657  func (s *Syncer) revertTrienodeHealRequest(req *trienodeHealRequest) {
  1658  	log.Debug("Reverting trienode heal request", "peer", req.peer)
  1659  	select {
  1660  	case <-req.stale:
  1661  		log.Trace("Trienode heal request already reverted", "peer", req.peer, "reqid", req.id)
  1662  		return
  1663  	default:
  1664  	}
  1665  	close(req.stale)
  1666  
  1667  	// Remove the request from the tracked set
  1668  	s.lock.Lock()
  1669  	delete(s.trienodeHealReqs, req.id)
  1670  	s.lock.Unlock()
  1671  
  1672  	// If there's a timeout timer still running, abort it and mark the trie node
  1673  	// retrievals as not-pending, ready for resheduling
  1674  	req.timeout.Stop()
  1675  	for i, hash := range req.hashes {
  1676  		req.task.trieTasks[hash] = req.paths[i]
  1677  	}
  1678  }
  1679  
  1680  // scheduleRevertBytecodeHealRequest asks the event loop to clean up a bytecode heal
  1681  // request and return all failed retrieval tasks to the scheduler for reassignment.
  1682  func (s *Syncer) scheduleRevertBytecodeHealRequest(req *bytecodeHealRequest) {
  1683  	select {
  1684  	case req.revert <- req:
  1685  		// Sync event loop notified
  1686  	case <-req.cancel:
  1687  		// Sync cycle got cancelled
  1688  	case <-req.stale:
  1689  		// Request already reverted
  1690  	}
  1691  }
  1692  
  1693  // revertBytecodeHealRequest cleans up a bytecode heal request and returns all
  1694  // failed retrieval tasks to the scheduler for reassignment.
  1695  //
  1696  // Note, this needs to run on the event runloop thread to reschedule to idle peers.
  1697  // On peer threads, use scheduleRevertBytecodeHealRequest.
  1698  func (s *Syncer) revertBytecodeHealRequest(req *bytecodeHealRequest) {
  1699  	log.Debug("Reverting bytecode heal request", "peer", req.peer)
  1700  	select {
  1701  	case <-req.stale:
  1702  		log.Trace("Bytecode heal request already reverted", "peer", req.peer, "reqid", req.id)
  1703  		return
  1704  	default:
  1705  	}
  1706  	close(req.stale)
  1707  
  1708  	// Remove the request from the tracked set
  1709  	s.lock.Lock()
  1710  	delete(s.bytecodeHealReqs, req.id)
  1711  	s.lock.Unlock()
  1712  
  1713  	// If there's a timeout timer still running, abort it and mark the code
  1714  	// retrievals as not-pending, ready for resheduling
  1715  	req.timeout.Stop()
  1716  	for _, hash := range req.hashes {
  1717  		req.task.codeTasks[hash] = struct{}{}
  1718  	}
  1719  }
  1720  
  1721  // processAccountResponse integrates an already validated account range response
  1722  // into the account tasks.
  1723  func (s *Syncer) processAccountResponse(res *accountResponse) {
  1724  	// Switch the task from pending to filling
  1725  	res.task.req = nil
  1726  	res.task.res = res
  1727  
  1728  	// Ensure that the response doesn't overflow into the subsequent task
  1729  	last := res.task.Last.Big()
  1730  	for i, hash := range res.hashes {
  1731  		// Mark the range complete if the last is already included.
  1732  		// Keep iteration to delete the extra states if exists.
  1733  		cmp := hash.Big().Cmp(last)
  1734  		if cmp == 0 {
  1735  			res.cont = false
  1736  			continue
  1737  		}
  1738  		if cmp > 0 {
  1739  			// Chunk overflown, cut off excess
  1740  			res.hashes = res.hashes[:i]
  1741  			res.accounts = res.accounts[:i]
  1742  			res.cont = false // Mark range completed
  1743  			break
  1744  		}
  1745  	}
  1746  	// Iterate over all the accounts and assemble which ones need further sub-
  1747  	// filling before the entire account range can be persisted.
  1748  	res.task.needCode = make([]bool, len(res.accounts))
  1749  	res.task.needState = make([]bool, len(res.accounts))
  1750  	res.task.needHeal = make([]bool, len(res.accounts))
  1751  
  1752  	res.task.codeTasks = make(map[common.Hash]struct{})
  1753  	res.task.stateTasks = make(map[common.Hash]common.Hash)
  1754  
  1755  	resumed := make(map[common.Hash]struct{})
  1756  
  1757  	res.task.pend = 0
  1758  	for i, account := range res.accounts {
  1759  		// Check if the account is a contract with an unknown code
  1760  		if !bytes.Equal(account.KeccakCodeHash, emptyKeccakCodeHash[:]) {
  1761  			if code := rawdb.ReadCodeWithPrefix(s.db, common.BytesToHash(account.KeccakCodeHash)); code == nil {
  1762  				res.task.codeTasks[common.BytesToHash(account.KeccakCodeHash)] = struct{}{}
  1763  				res.task.needCode[i] = true
  1764  				res.task.pend++
  1765  			}
  1766  		}
  1767  		// Check if the account is a contract with an unknown storage trie
  1768  		if account.Root != emptyRoot {
  1769  			if node, err := s.db.Get(account.Root[:]); err != nil || node == nil {
  1770  				// If there was a previous large state retrieval in progress,
  1771  				// don't restart it from scratch. This happens if a sync cycle
  1772  				// is interrupted and resumed later. However, *do* update the
  1773  				// previous root hash.
  1774  				if subtasks, ok := res.task.SubTasks[res.hashes[i]]; ok {
  1775  					log.Debug("Resuming large storage retrieval", "account", res.hashes[i], "root", account.Root)
  1776  					for _, subtask := range subtasks {
  1777  						subtask.root = account.Root
  1778  					}
  1779  					res.task.needHeal[i] = true
  1780  					resumed[res.hashes[i]] = struct{}{}
  1781  				} else {
  1782  					res.task.stateTasks[res.hashes[i]] = account.Root
  1783  				}
  1784  				res.task.needState[i] = true
  1785  				res.task.pend++
  1786  			}
  1787  		}
  1788  	}
  1789  	// Delete any subtasks that have been aborted but not resumed. This may undo
  1790  	// some progress if a new peer gives us less accounts than an old one, but for
  1791  	// now we have to live with that.
  1792  	for hash := range res.task.SubTasks {
  1793  		if _, ok := resumed[hash]; !ok {
  1794  			log.Debug("Aborting suspended storage retrieval", "account", hash)
  1795  			delete(res.task.SubTasks, hash)
  1796  		}
  1797  	}
  1798  	// If the account range contained no contracts, or all have been fully filled
  1799  	// beforehand, short circuit storage filling and forward to the next task
  1800  	if res.task.pend == 0 {
  1801  		s.forwardAccountTask(res.task)
  1802  		return
  1803  	}
  1804  	// Some accounts are incomplete, leave as is for the storage and contract
  1805  	// task assigners to pick up and fill.
  1806  }
  1807  
  1808  // processBytecodeResponse integrates an already validated bytecode response
  1809  // into the account tasks.
  1810  func (s *Syncer) processBytecodeResponse(res *bytecodeResponse) {
  1811  	batch := s.db.NewBatch()
  1812  
  1813  	var (
  1814  		codes uint64
  1815  	)
  1816  	for i, hash := range res.hashes {
  1817  		code := res.codes[i]
  1818  
  1819  		// If the bytecode was not delivered, reschedule it
  1820  		if code == nil {
  1821  			res.task.codeTasks[hash] = struct{}{}
  1822  			continue
  1823  		}
  1824  		// Code was delivered, mark it not needed any more
  1825  		for j, account := range res.task.res.accounts {
  1826  			if res.task.needCode[j] && hash == common.BytesToHash(account.KeccakCodeHash) {
  1827  				res.task.needCode[j] = false
  1828  				res.task.pend--
  1829  			}
  1830  		}
  1831  		// Push the bytecode into a database batch
  1832  		codes++
  1833  		rawdb.WriteCode(batch, hash, code)
  1834  	}
  1835  	bytes := common.StorageSize(batch.ValueSize())
  1836  	if err := batch.Write(); err != nil {
  1837  		log.Crit("Failed to persist bytecodes", "err", err)
  1838  	}
  1839  	s.bytecodeSynced += codes
  1840  	s.bytecodeBytes += bytes
  1841  
  1842  	log.Debug("Persisted set of bytecodes", "count", codes, "bytes", bytes)
  1843  
  1844  	// If this delivery completed the last pending task, forward the account task
  1845  	// to the next chunk
  1846  	if res.task.pend == 0 {
  1847  		s.forwardAccountTask(res.task)
  1848  		return
  1849  	}
  1850  	// Some accounts are still incomplete, leave as is for the storage and contract
  1851  	// task assigners to pick up and fill.
  1852  }
  1853  
  1854  // processStorageResponse integrates an already validated storage response
  1855  // into the account tasks.
  1856  func (s *Syncer) processStorageResponse(res *storageResponse) {
  1857  	// Switch the subtask from pending to idle
  1858  	if res.subTask != nil {
  1859  		res.subTask.req = nil
  1860  	}
  1861  	batch := ethdb.HookedBatch{
  1862  		Batch: s.db.NewBatch(),
  1863  		OnPut: func(key []byte, value []byte) {
  1864  			s.storageBytes += common.StorageSize(len(key) + len(value))
  1865  		},
  1866  	}
  1867  	var (
  1868  		slots           int
  1869  		oldStorageBytes = s.storageBytes
  1870  	)
  1871  	// Iterate over all the accounts and reconstruct their storage tries from the
  1872  	// delivered slots
  1873  	for i, account := range res.accounts {
  1874  		// If the account was not delivered, reschedule it
  1875  		if i >= len(res.hashes) {
  1876  			res.mainTask.stateTasks[account] = res.roots[i]
  1877  			continue
  1878  		}
  1879  		// State was delivered, if complete mark as not needed any more, otherwise
  1880  		// mark the account as needing healing
  1881  		for j, hash := range res.mainTask.res.hashes {
  1882  			if account != hash {
  1883  				continue
  1884  			}
  1885  			acc := res.mainTask.res.accounts[j]
  1886  
  1887  			// If the packet contains multiple contract storage slots, all
  1888  			// but the last are surely complete. The last contract may be
  1889  			// chunked, so check it's continuation flag.
  1890  			if res.subTask == nil && res.mainTask.needState[j] && (i < len(res.hashes)-1 || !res.cont) {
  1891  				res.mainTask.needState[j] = false
  1892  				res.mainTask.pend--
  1893  			}
  1894  			// If the last contract was chunked, mark it as needing healing
  1895  			// to avoid writing it out to disk prematurely.
  1896  			if res.subTask == nil && !res.mainTask.needHeal[j] && i == len(res.hashes)-1 && res.cont {
  1897  				res.mainTask.needHeal[j] = true
  1898  			}
  1899  			// If the last contract was chunked, we need to switch to large
  1900  			// contract handling mode
  1901  			if res.subTask == nil && i == len(res.hashes)-1 && res.cont {
  1902  				// If we haven't yet started a large-contract retrieval, create
  1903  				// the subtasks for it within the main account task
  1904  				if tasks, ok := res.mainTask.SubTasks[account]; !ok {
  1905  					var (
  1906  						keys    = res.hashes[i]
  1907  						chunks  = uint64(storageConcurrency)
  1908  						lastKey common.Hash
  1909  					)
  1910  					if len(keys) > 0 {
  1911  						lastKey = keys[len(keys)-1]
  1912  					}
  1913  					// If the number of slots remaining is low, decrease the
  1914  					// number of chunks. Somewhere on the order of 10-15K slots
  1915  					// fit into a packet of 500KB. A key/slot pair is maximum 64
  1916  					// bytes, so pessimistically maxRequestSize/64 = 8K.
  1917  					//
  1918  					// Chunk so that at least 2 packets are needed to fill a task.
  1919  					if estimate, err := estimateRemainingSlots(len(keys), lastKey); err == nil {
  1920  						if n := estimate / (2 * (maxRequestSize / 64)); n+1 < chunks {
  1921  							chunks = n + 1
  1922  						}
  1923  						log.Debug("Chunked large contract", "initiators", len(keys), "tail", lastKey, "remaining", estimate, "chunks", chunks)
  1924  					} else {
  1925  						log.Debug("Chunked large contract", "initiators", len(keys), "tail", lastKey, "chunks", chunks)
  1926  					}
  1927  					r := newHashRange(lastKey, chunks)
  1928  
  1929  					// Our first task is the one that was just filled by this response.
  1930  					batch := ethdb.HookedBatch{
  1931  						Batch: s.db.NewBatch(),
  1932  						OnPut: func(key []byte, value []byte) {
  1933  							s.storageBytes += common.StorageSize(len(key) + len(value))
  1934  						},
  1935  					}
  1936  					tasks = append(tasks, &storageTask{
  1937  						Next:     common.Hash{},
  1938  						Last:     r.End(),
  1939  						root:     acc.Root,
  1940  						genBatch: batch,
  1941  						genTrie:  trie.NewStackTrie(batch),
  1942  					})
  1943  					for r.Next() {
  1944  						batch := ethdb.HookedBatch{
  1945  							Batch: s.db.NewBatch(),
  1946  							OnPut: func(key []byte, value []byte) {
  1947  								s.storageBytes += common.StorageSize(len(key) + len(value))
  1948  							},
  1949  						}
  1950  						tasks = append(tasks, &storageTask{
  1951  							Next:     r.Start(),
  1952  							Last:     r.End(),
  1953  							root:     acc.Root,
  1954  							genBatch: batch,
  1955  							genTrie:  trie.NewStackTrie(batch),
  1956  						})
  1957  					}
  1958  					for _, task := range tasks {
  1959  						log.Debug("Created storage sync task", "account", account, "root", acc.Root, "from", task.Next, "last", task.Last)
  1960  					}
  1961  					res.mainTask.SubTasks[account] = tasks
  1962  
  1963  					// Since we've just created the sub-tasks, this response
  1964  					// is surely for the first one (zero origin)
  1965  					res.subTask = tasks[0]
  1966  				}
  1967  			}
  1968  			// If we're in large contract delivery mode, forward the subtask
  1969  			if res.subTask != nil {
  1970  				// Ensure the response doesn't overflow into the subsequent task
  1971  				last := res.subTask.Last.Big()
  1972  				// Find the first overflowing key. While at it, mark res as complete
  1973  				// if we find the range to include or pass the 'last'
  1974  				index := sort.Search(len(res.hashes[i]), func(k int) bool {
  1975  					cmp := res.hashes[i][k].Big().Cmp(last)
  1976  					if cmp >= 0 {
  1977  						res.cont = false
  1978  					}
  1979  					return cmp > 0
  1980  				})
  1981  				if index >= 0 {
  1982  					// cut off excess
  1983  					res.hashes[i] = res.hashes[i][:index]
  1984  					res.slots[i] = res.slots[i][:index]
  1985  				}
  1986  				// Forward the relevant storage chunk (even if created just now)
  1987  				if res.cont {
  1988  					res.subTask.Next = incHash(res.hashes[i][len(res.hashes[i])-1])
  1989  				} else {
  1990  					res.subTask.done = true
  1991  				}
  1992  			}
  1993  		}
  1994  		// Iterate over all the complete contracts, reconstruct the trie nodes and
  1995  		// push them to disk. If the contract is chunked, the trie nodes will be
  1996  		// reconstructed later.
  1997  		slots += len(res.hashes[i])
  1998  
  1999  		if i < len(res.hashes)-1 || res.subTask == nil {
  2000  			tr := trie.NewStackTrie(batch)
  2001  			for j := 0; j < len(res.hashes[i]); j++ {
  2002  				tr.Update(res.hashes[i][j][:], res.slots[i][j])
  2003  			}
  2004  			tr.Commit()
  2005  		}
  2006  		// Persist the received storage segements. These flat state maybe
  2007  		// outdated during the sync, but it can be fixed later during the
  2008  		// snapshot generation.
  2009  		for j := 0; j < len(res.hashes[i]); j++ {
  2010  			rawdb.WriteStorageSnapshot(batch, account, res.hashes[i][j], res.slots[i][j])
  2011  
  2012  			// If we're storing large contracts, generate the trie nodes
  2013  			// on the fly to not trash the gluing points
  2014  			if i == len(res.hashes)-1 && res.subTask != nil {
  2015  				res.subTask.genTrie.Update(res.hashes[i][j][:], res.slots[i][j])
  2016  			}
  2017  		}
  2018  	}
  2019  	// Large contracts could have generated new trie nodes, flush them to disk
  2020  	if res.subTask != nil {
  2021  		if res.subTask.done {
  2022  			if root, err := res.subTask.genTrie.Commit(); err != nil {
  2023  				log.Error("Failed to commit stack slots", "err", err)
  2024  			} else if root == res.subTask.root {
  2025  				// If the chunk's root is an overflown but full delivery, clear the heal request
  2026  				for i, account := range res.mainTask.res.hashes {
  2027  					if account == res.accounts[len(res.accounts)-1] {
  2028  						res.mainTask.needHeal[i] = false
  2029  					}
  2030  				}
  2031  			}
  2032  		}
  2033  		if res.subTask.genBatch.ValueSize() > ethdb.IdealBatchSize || res.subTask.done {
  2034  			if err := res.subTask.genBatch.Write(); err != nil {
  2035  				log.Error("Failed to persist stack slots", "err", err)
  2036  			}
  2037  			res.subTask.genBatch.Reset()
  2038  		}
  2039  	}
  2040  	// Flush anything written just now and update the stats
  2041  	if err := batch.Write(); err != nil {
  2042  		log.Crit("Failed to persist storage slots", "err", err)
  2043  	}
  2044  	s.storageSynced += uint64(slots)
  2045  
  2046  	log.Debug("Persisted set of storage slots", "accounts", len(res.hashes), "slots", slots, "bytes", s.storageBytes-oldStorageBytes)
  2047  
  2048  	// If this delivery completed the last pending task, forward the account task
  2049  	// to the next chunk
  2050  	if res.mainTask.pend == 0 {
  2051  		s.forwardAccountTask(res.mainTask)
  2052  		return
  2053  	}
  2054  	// Some accounts are still incomplete, leave as is for the storage and contract
  2055  	// task assigners to pick up and fill.
  2056  }
  2057  
  2058  // processTrienodeHealResponse integrates an already validated trienode response
  2059  // into the healer tasks.
  2060  func (s *Syncer) processTrienodeHealResponse(res *trienodeHealResponse) {
  2061  	for i, hash := range res.hashes {
  2062  		node := res.nodes[i]
  2063  
  2064  		// If the trie node was not delivered, reschedule it
  2065  		if node == nil {
  2066  			res.task.trieTasks[hash] = res.paths[i]
  2067  			continue
  2068  		}
  2069  		// Push the trie node into the state syncer
  2070  		s.trienodeHealSynced++
  2071  		s.trienodeHealBytes += common.StorageSize(len(node))
  2072  
  2073  		err := s.healer.scheduler.Process(trie.SyncResult{Hash: hash, Data: node})
  2074  		switch err {
  2075  		case nil:
  2076  		case trie.ErrAlreadyProcessed:
  2077  			s.trienodeHealDups++
  2078  		case trie.ErrNotRequested:
  2079  			s.trienodeHealNops++
  2080  		default:
  2081  			log.Error("Invalid trienode processed", "hash", hash, "err", err)
  2082  		}
  2083  	}
  2084  	batch := s.db.NewBatch()
  2085  	if err := s.healer.scheduler.Commit(batch); err != nil {
  2086  		log.Error("Failed to commit healing data", "err", err)
  2087  	}
  2088  	if err := batch.Write(); err != nil {
  2089  		log.Crit("Failed to persist healing data", "err", err)
  2090  	}
  2091  	log.Debug("Persisted set of healing data", "type", "trienodes", "bytes", common.StorageSize(batch.ValueSize()))
  2092  }
  2093  
  2094  // processBytecodeHealResponse integrates an already validated bytecode response
  2095  // into the healer tasks.
  2096  func (s *Syncer) processBytecodeHealResponse(res *bytecodeHealResponse) {
  2097  	for i, hash := range res.hashes {
  2098  		node := res.codes[i]
  2099  
  2100  		// If the trie node was not delivered, reschedule it
  2101  		if node == nil {
  2102  			res.task.codeTasks[hash] = struct{}{}
  2103  			continue
  2104  		}
  2105  		// Push the trie node into the state syncer
  2106  		s.bytecodeHealSynced++
  2107  		s.bytecodeHealBytes += common.StorageSize(len(node))
  2108  
  2109  		err := s.healer.scheduler.Process(trie.SyncResult{Hash: hash, Data: node})
  2110  		switch err {
  2111  		case nil:
  2112  		case trie.ErrAlreadyProcessed:
  2113  			s.bytecodeHealDups++
  2114  		case trie.ErrNotRequested:
  2115  			s.bytecodeHealNops++
  2116  		default:
  2117  			log.Error("Invalid bytecode processed", "hash", hash, "err", err)
  2118  		}
  2119  	}
  2120  	batch := s.db.NewBatch()
  2121  	if err := s.healer.scheduler.Commit(batch); err != nil {
  2122  		log.Error("Failed to commit healing data", "err", err)
  2123  	}
  2124  	if err := batch.Write(); err != nil {
  2125  		log.Crit("Failed to persist healing data", "err", err)
  2126  	}
  2127  	log.Debug("Persisted set of healing data", "type", "bytecode", "bytes", common.StorageSize(batch.ValueSize()))
  2128  }
  2129  
  2130  // forwardAccountTask takes a filled account task and persists anything available
  2131  // into the database, after which it forwards the next account marker so that the
  2132  // task's next chunk may be filled.
  2133  func (s *Syncer) forwardAccountTask(task *accountTask) {
  2134  	// Remove any pending delivery
  2135  	res := task.res
  2136  	if res == nil {
  2137  		return // nothing to forward
  2138  	}
  2139  	task.res = nil
  2140  
  2141  	// Persist the received account segements. These flat state maybe
  2142  	// outdated during the sync, but it can be fixed later during the
  2143  	// snapshot generation.
  2144  	oldAccountBytes := s.accountBytes
  2145  
  2146  	batch := ethdb.HookedBatch{
  2147  		Batch: s.db.NewBatch(),
  2148  		OnPut: func(key []byte, value []byte) {
  2149  			s.accountBytes += common.StorageSize(len(key) + len(value))
  2150  		},
  2151  	}
  2152  	for i, hash := range res.hashes {
  2153  		if task.needCode[i] || task.needState[i] {
  2154  			break
  2155  		}
  2156  		slim := snapshot.SlimAccountRLP(res.accounts[i].Nonce, res.accounts[i].Balance, res.accounts[i].Root, res.accounts[i].KeccakCodeHash, res.accounts[i].PoseidonCodeHash, res.accounts[i].CodeSize)
  2157  		rawdb.WriteAccountSnapshot(batch, hash, slim)
  2158  
  2159  		// If the task is complete, drop it into the stack trie to generate
  2160  		// account trie nodes for it
  2161  		if !task.needHeal[i] {
  2162  			full, err := snapshot.FullAccountRLP(slim) // TODO(karalabe): Slim parsing can be omitted
  2163  			if err != nil {
  2164  				panic(err) // Really shouldn't ever happen
  2165  			}
  2166  			task.genTrie.Update(hash[:], full)
  2167  		}
  2168  	}
  2169  	// Flush anything written just now and update the stats
  2170  	if err := batch.Write(); err != nil {
  2171  		log.Crit("Failed to persist accounts", "err", err)
  2172  	}
  2173  	s.accountSynced += uint64(len(res.accounts))
  2174  
  2175  	// Task filling persisted, push it the chunk marker forward to the first
  2176  	// account still missing data.
  2177  	for i, hash := range res.hashes {
  2178  		if task.needCode[i] || task.needState[i] {
  2179  			return
  2180  		}
  2181  		task.Next = incHash(hash)
  2182  	}
  2183  	// All accounts marked as complete, track if the entire task is done
  2184  	task.done = !res.cont
  2185  
  2186  	// Stack trie could have generated trie nodes, push them to disk (we need to
  2187  	// flush after finalizing task.done. It's fine even if we crash and lose this
  2188  	// write as it will only cause more data to be downloaded during heal.
  2189  	if task.done {
  2190  		if _, err := task.genTrie.Commit(); err != nil {
  2191  			log.Error("Failed to commit stack account", "err", err)
  2192  		}
  2193  	}
  2194  	if task.genBatch.ValueSize() > ethdb.IdealBatchSize || task.done {
  2195  		if err := task.genBatch.Write(); err != nil {
  2196  			log.Error("Failed to persist stack account", "err", err)
  2197  		}
  2198  		task.genBatch.Reset()
  2199  	}
  2200  	log.Debug("Persisted range of accounts", "accounts", len(res.accounts), "bytes", s.accountBytes-oldAccountBytes)
  2201  }
  2202  
  2203  // OnAccounts is a callback method to invoke when a range of accounts are
  2204  // received from a remote peer.
  2205  func (s *Syncer) OnAccounts(peer SyncPeer, id uint64, hashes []common.Hash, accounts [][]byte, proof [][]byte) error {
  2206  	size := common.StorageSize(len(hashes) * common.HashLength)
  2207  	for _, account := range accounts {
  2208  		size += common.StorageSize(len(account))
  2209  	}
  2210  	for _, node := range proof {
  2211  		size += common.StorageSize(len(node))
  2212  	}
  2213  	logger := peer.Log().New("reqid", id)
  2214  	logger.Trace("Delivering range of accounts", "hashes", len(hashes), "accounts", len(accounts), "proofs", len(proof), "bytes", size)
  2215  
  2216  	// Whether or not the response is valid, we can mark the peer as idle and
  2217  	// notify the scheduler to assign a new task. If the response is invalid,
  2218  	// we'll drop the peer in a bit.
  2219  	s.lock.Lock()
  2220  	if _, ok := s.peers[peer.ID()]; ok {
  2221  		s.accountIdlers[peer.ID()] = struct{}{}
  2222  	}
  2223  	select {
  2224  	case s.update <- struct{}{}:
  2225  	default:
  2226  	}
  2227  	// Ensure the response is for a valid request
  2228  	req, ok := s.accountReqs[id]
  2229  	if !ok {
  2230  		// Request stale, perhaps the peer timed out but came through in the end
  2231  		logger.Warn("Unexpected account range packet")
  2232  		s.lock.Unlock()
  2233  		return nil
  2234  	}
  2235  	delete(s.accountReqs, id)
  2236  	s.rates.Update(peer.ID(), AccountRangeMsg, time.Since(req.time), int(size))
  2237  
  2238  	// Clean up the request timeout timer, we'll see how to proceed further based
  2239  	// on the actual delivered content
  2240  	if !req.timeout.Stop() {
  2241  		// The timeout is already triggered, and this request will be reverted+rescheduled
  2242  		s.lock.Unlock()
  2243  		return nil
  2244  	}
  2245  	// Response is valid, but check if peer is signalling that it does not have
  2246  	// the requested data. For account range queries that means the state being
  2247  	// retrieved was either already pruned remotely, or the peer is not yet
  2248  	// synced to our head.
  2249  	if len(hashes) == 0 && len(accounts) == 0 && len(proof) == 0 {
  2250  		logger.Debug("Peer rejected account range request", "root", s.root)
  2251  		s.statelessPeers[peer.ID()] = struct{}{}
  2252  		s.lock.Unlock()
  2253  
  2254  		// Signal this request as failed, and ready for rescheduling
  2255  		s.scheduleRevertAccountRequest(req)
  2256  		return nil
  2257  	}
  2258  	root := s.root
  2259  	s.lock.Unlock()
  2260  
  2261  	// Reconstruct a partial trie from the response and verify it
  2262  	keys := make([][]byte, len(hashes))
  2263  	for i, key := range hashes {
  2264  		keys[i] = common.CopyBytes(key[:])
  2265  	}
  2266  	nodes := make(light.NodeList, len(proof))
  2267  	for i, node := range proof {
  2268  		nodes[i] = node
  2269  	}
  2270  	proofdb := nodes.NodeSet()
  2271  
  2272  	var end []byte
  2273  	if len(keys) > 0 {
  2274  		end = keys[len(keys)-1]
  2275  	}
  2276  	cont, err := trie.VerifyRangeProof(root, req.origin[:], end, keys, accounts, proofdb)
  2277  	if err != nil {
  2278  		logger.Warn("Account range failed proof", "err", err)
  2279  		// Signal this request as failed, and ready for rescheduling
  2280  		s.scheduleRevertAccountRequest(req)
  2281  		return err
  2282  	}
  2283  	accs := make([]*types.StateAccount, len(accounts))
  2284  	for i, account := range accounts {
  2285  		acc := new(types.StateAccount)
  2286  		if err := rlp.DecodeBytes(account, acc); err != nil {
  2287  			panic(err) // We created these blobs, we must be able to decode them
  2288  		}
  2289  		accs[i] = acc
  2290  	}
  2291  	response := &accountResponse{
  2292  		task:     req.task,
  2293  		hashes:   hashes,
  2294  		accounts: accs,
  2295  		cont:     cont,
  2296  	}
  2297  	select {
  2298  	case req.deliver <- response:
  2299  	case <-req.cancel:
  2300  	case <-req.stale:
  2301  	}
  2302  	return nil
  2303  }
  2304  
  2305  // OnByteCodes is a callback method to invoke when a batch of contract
  2306  // bytes codes are received from a remote peer.
  2307  func (s *Syncer) OnByteCodes(peer SyncPeer, id uint64, bytecodes [][]byte) error {
  2308  	s.lock.RLock()
  2309  	syncing := !s.snapped
  2310  	s.lock.RUnlock()
  2311  
  2312  	if syncing {
  2313  		return s.onByteCodes(peer, id, bytecodes)
  2314  	}
  2315  	return s.onHealByteCodes(peer, id, bytecodes)
  2316  }
  2317  
  2318  // onByteCodes is a callback method to invoke when a batch of contract
  2319  // bytes codes are received from a remote peer in the syncing phase.
  2320  func (s *Syncer) onByteCodes(peer SyncPeer, id uint64, bytecodes [][]byte) error {
  2321  	var size common.StorageSize
  2322  	for _, code := range bytecodes {
  2323  		size += common.StorageSize(len(code))
  2324  	}
  2325  	logger := peer.Log().New("reqid", id)
  2326  	logger.Trace("Delivering set of bytecodes", "bytecodes", len(bytecodes), "bytes", size)
  2327  
  2328  	// Whether or not the response is valid, we can mark the peer as idle and
  2329  	// notify the scheduler to assign a new task. If the response is invalid,
  2330  	// we'll drop the peer in a bit.
  2331  	s.lock.Lock()
  2332  	if _, ok := s.peers[peer.ID()]; ok {
  2333  		s.bytecodeIdlers[peer.ID()] = struct{}{}
  2334  	}
  2335  	select {
  2336  	case s.update <- struct{}{}:
  2337  	default:
  2338  	}
  2339  	// Ensure the response is for a valid request
  2340  	req, ok := s.bytecodeReqs[id]
  2341  	if !ok {
  2342  		// Request stale, perhaps the peer timed out but came through in the end
  2343  		logger.Warn("Unexpected bytecode packet")
  2344  		s.lock.Unlock()
  2345  		return nil
  2346  	}
  2347  	delete(s.bytecodeReqs, id)
  2348  	s.rates.Update(peer.ID(), ByteCodesMsg, time.Since(req.time), len(bytecodes))
  2349  
  2350  	// Clean up the request timeout timer, we'll see how to proceed further based
  2351  	// on the actual delivered content
  2352  	if !req.timeout.Stop() {
  2353  		// The timeout is already triggered, and this request will be reverted+rescheduled
  2354  		s.lock.Unlock()
  2355  		return nil
  2356  	}
  2357  
  2358  	// Response is valid, but check if peer is signalling that it does not have
  2359  	// the requested data. For bytecode range queries that means the peer is not
  2360  	// yet synced.
  2361  	if len(bytecodes) == 0 {
  2362  		logger.Debug("Peer rejected bytecode request")
  2363  		s.statelessPeers[peer.ID()] = struct{}{}
  2364  		s.lock.Unlock()
  2365  
  2366  		// Signal this request as failed, and ready for rescheduling
  2367  		s.scheduleRevertBytecodeRequest(req)
  2368  		return nil
  2369  	}
  2370  	s.lock.Unlock()
  2371  
  2372  	// Cross reference the requested bytecodes with the response to find gaps
  2373  	// that the serving node is missing
  2374  	hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
  2375  	hash := make([]byte, 32)
  2376  
  2377  	codes := make([][]byte, len(req.hashes))
  2378  	for i, j := 0, 0; i < len(bytecodes); i++ {
  2379  		// Find the next hash that we've been served, leaving misses with nils
  2380  		hasher.Reset()
  2381  		hasher.Write(bytecodes[i])
  2382  		hasher.Read(hash)
  2383  
  2384  		for j < len(req.hashes) && !bytes.Equal(hash, req.hashes[j][:]) {
  2385  			j++
  2386  		}
  2387  		if j < len(req.hashes) {
  2388  			codes[j] = bytecodes[i]
  2389  			j++
  2390  			continue
  2391  		}
  2392  		// We've either ran out of hashes, or got unrequested data
  2393  		logger.Warn("Unexpected bytecodes", "count", len(bytecodes)-i)
  2394  		// Signal this request as failed, and ready for rescheduling
  2395  		s.scheduleRevertBytecodeRequest(req)
  2396  		return errors.New("unexpected bytecode")
  2397  	}
  2398  	// Response validated, send it to the scheduler for filling
  2399  	response := &bytecodeResponse{
  2400  		task:   req.task,
  2401  		hashes: req.hashes,
  2402  		codes:  codes,
  2403  	}
  2404  	select {
  2405  	case req.deliver <- response:
  2406  	case <-req.cancel:
  2407  	case <-req.stale:
  2408  	}
  2409  	return nil
  2410  }
  2411  
  2412  // OnStorage is a callback method to invoke when ranges of storage slots
  2413  // are received from a remote peer.
  2414  func (s *Syncer) OnStorage(peer SyncPeer, id uint64, hashes [][]common.Hash, slots [][][]byte, proof [][]byte) error {
  2415  	// Gather some trace stats to aid in debugging issues
  2416  	var (
  2417  		hashCount int
  2418  		slotCount int
  2419  		size      common.StorageSize
  2420  	)
  2421  	for _, hashset := range hashes {
  2422  		size += common.StorageSize(common.HashLength * len(hashset))
  2423  		hashCount += len(hashset)
  2424  	}
  2425  	for _, slotset := range slots {
  2426  		for _, slot := range slotset {
  2427  			size += common.StorageSize(len(slot))
  2428  		}
  2429  		slotCount += len(slotset)
  2430  	}
  2431  	for _, node := range proof {
  2432  		size += common.StorageSize(len(node))
  2433  	}
  2434  	logger := peer.Log().New("reqid", id)
  2435  	logger.Trace("Delivering ranges of storage slots", "accounts", len(hashes), "hashes", hashCount, "slots", slotCount, "proofs", len(proof), "size", size)
  2436  
  2437  	// Whether or not the response is valid, we can mark the peer as idle and
  2438  	// notify the scheduler to assign a new task. If the response is invalid,
  2439  	// we'll drop the peer in a bit.
  2440  	s.lock.Lock()
  2441  	if _, ok := s.peers[peer.ID()]; ok {
  2442  		s.storageIdlers[peer.ID()] = struct{}{}
  2443  	}
  2444  	select {
  2445  	case s.update <- struct{}{}:
  2446  	default:
  2447  	}
  2448  	// Ensure the response is for a valid request
  2449  	req, ok := s.storageReqs[id]
  2450  	if !ok {
  2451  		// Request stale, perhaps the peer timed out but came through in the end
  2452  		logger.Warn("Unexpected storage ranges packet")
  2453  		s.lock.Unlock()
  2454  		return nil
  2455  	}
  2456  	delete(s.storageReqs, id)
  2457  	s.rates.Update(peer.ID(), StorageRangesMsg, time.Since(req.time), int(size))
  2458  
  2459  	// Clean up the request timeout timer, we'll see how to proceed further based
  2460  	// on the actual delivered content
  2461  	if !req.timeout.Stop() {
  2462  		// The timeout is already triggered, and this request will be reverted+rescheduled
  2463  		s.lock.Unlock()
  2464  		return nil
  2465  	}
  2466  
  2467  	// Reject the response if the hash sets and slot sets don't match, or if the
  2468  	// peer sent more data than requested.
  2469  	if len(hashes) != len(slots) {
  2470  		s.lock.Unlock()
  2471  		s.scheduleRevertStorageRequest(req) // reschedule request
  2472  		logger.Warn("Hash and slot set size mismatch", "hashset", len(hashes), "slotset", len(slots))
  2473  		return errors.New("hash and slot set size mismatch")
  2474  	}
  2475  	if len(hashes) > len(req.accounts) {
  2476  		s.lock.Unlock()
  2477  		s.scheduleRevertStorageRequest(req) // reschedule request
  2478  		logger.Warn("Hash set larger than requested", "hashset", len(hashes), "requested", len(req.accounts))
  2479  		return errors.New("hash set larger than requested")
  2480  	}
  2481  	// Response is valid, but check if peer is signalling that it does not have
  2482  	// the requested data. For storage range queries that means the state being
  2483  	// retrieved was either already pruned remotely, or the peer is not yet
  2484  	// synced to our head.
  2485  	if len(hashes) == 0 {
  2486  		logger.Debug("Peer rejected storage request")
  2487  		s.statelessPeers[peer.ID()] = struct{}{}
  2488  		s.lock.Unlock()
  2489  		s.scheduleRevertStorageRequest(req) // reschedule request
  2490  		return nil
  2491  	}
  2492  	s.lock.Unlock()
  2493  
  2494  	// Reconstruct the partial tries from the response and verify them
  2495  	var cont bool
  2496  
  2497  	for i := 0; i < len(hashes); i++ {
  2498  		// Convert the keys and proofs into an internal format
  2499  		keys := make([][]byte, len(hashes[i]))
  2500  		for j, key := range hashes[i] {
  2501  			keys[j] = common.CopyBytes(key[:])
  2502  		}
  2503  		nodes := make(light.NodeList, 0, len(proof))
  2504  		if i == len(hashes)-1 {
  2505  			for _, node := range proof {
  2506  				nodes = append(nodes, node)
  2507  			}
  2508  		}
  2509  		var err error
  2510  		if len(nodes) == 0 {
  2511  			// No proof has been attached, the response must cover the entire key
  2512  			// space and hash to the origin root.
  2513  			_, err = trie.VerifyRangeProof(req.roots[i], nil, nil, keys, slots[i], nil)
  2514  			if err != nil {
  2515  				s.scheduleRevertStorageRequest(req) // reschedule request
  2516  				logger.Warn("Storage slots failed proof", "err", err)
  2517  				return err
  2518  			}
  2519  		} else {
  2520  			// A proof was attached, the response is only partial, check that the
  2521  			// returned data is indeed part of the storage trie
  2522  			proofdb := nodes.NodeSet()
  2523  
  2524  			var end []byte
  2525  			if len(keys) > 0 {
  2526  				end = keys[len(keys)-1]
  2527  			}
  2528  			cont, err = trie.VerifyRangeProof(req.roots[i], req.origin[:], end, keys, slots[i], proofdb)
  2529  			if err != nil {
  2530  				s.scheduleRevertStorageRequest(req) // reschedule request
  2531  				logger.Warn("Storage range failed proof", "err", err)
  2532  				return err
  2533  			}
  2534  		}
  2535  	}
  2536  	// Partial tries reconstructed, send them to the scheduler for storage filling
  2537  	response := &storageResponse{
  2538  		mainTask: req.mainTask,
  2539  		subTask:  req.subTask,
  2540  		accounts: req.accounts,
  2541  		roots:    req.roots,
  2542  		hashes:   hashes,
  2543  		slots:    slots,
  2544  		cont:     cont,
  2545  	}
  2546  	select {
  2547  	case req.deliver <- response:
  2548  	case <-req.cancel:
  2549  	case <-req.stale:
  2550  	}
  2551  	return nil
  2552  }
  2553  
  2554  // OnTrieNodes is a callback method to invoke when a batch of trie nodes
  2555  // are received from a remote peer.
  2556  func (s *Syncer) OnTrieNodes(peer SyncPeer, id uint64, trienodes [][]byte) error {
  2557  	var size common.StorageSize
  2558  	for _, node := range trienodes {
  2559  		size += common.StorageSize(len(node))
  2560  	}
  2561  	logger := peer.Log().New("reqid", id)
  2562  	logger.Trace("Delivering set of healing trienodes", "trienodes", len(trienodes), "bytes", size)
  2563  
  2564  	// Whether or not the response is valid, we can mark the peer as idle and
  2565  	// notify the scheduler to assign a new task. If the response is invalid,
  2566  	// we'll drop the peer in a bit.
  2567  	s.lock.Lock()
  2568  	if _, ok := s.peers[peer.ID()]; ok {
  2569  		s.trienodeHealIdlers[peer.ID()] = struct{}{}
  2570  	}
  2571  	select {
  2572  	case s.update <- struct{}{}:
  2573  	default:
  2574  	}
  2575  	// Ensure the response is for a valid request
  2576  	req, ok := s.trienodeHealReqs[id]
  2577  	if !ok {
  2578  		// Request stale, perhaps the peer timed out but came through in the end
  2579  		logger.Warn("Unexpected trienode heal packet")
  2580  		s.lock.Unlock()
  2581  		return nil
  2582  	}
  2583  	delete(s.trienodeHealReqs, id)
  2584  	s.rates.Update(peer.ID(), TrieNodesMsg, time.Since(req.time), len(trienodes))
  2585  
  2586  	// Clean up the request timeout timer, we'll see how to proceed further based
  2587  	// on the actual delivered content
  2588  	if !req.timeout.Stop() {
  2589  		// The timeout is already triggered, and this request will be reverted+rescheduled
  2590  		s.lock.Unlock()
  2591  		return nil
  2592  	}
  2593  
  2594  	// Response is valid, but check if peer is signalling that it does not have
  2595  	// the requested data. For bytecode range queries that means the peer is not
  2596  	// yet synced.
  2597  	if len(trienodes) == 0 {
  2598  		logger.Debug("Peer rejected trienode heal request")
  2599  		s.statelessPeers[peer.ID()] = struct{}{}
  2600  		s.lock.Unlock()
  2601  
  2602  		// Signal this request as failed, and ready for rescheduling
  2603  		s.scheduleRevertTrienodeHealRequest(req)
  2604  		return nil
  2605  	}
  2606  	s.lock.Unlock()
  2607  
  2608  	// Cross reference the requested trienodes with the response to find gaps
  2609  	// that the serving node is missing
  2610  	hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
  2611  	hash := make([]byte, 32)
  2612  
  2613  	nodes := make([][]byte, len(req.hashes))
  2614  	for i, j := 0, 0; i < len(trienodes); i++ {
  2615  		// Find the next hash that we've been served, leaving misses with nils
  2616  		hasher.Reset()
  2617  		hasher.Write(trienodes[i])
  2618  		hasher.Read(hash)
  2619  
  2620  		for j < len(req.hashes) && !bytes.Equal(hash, req.hashes[j][:]) {
  2621  			j++
  2622  		}
  2623  		if j < len(req.hashes) {
  2624  			nodes[j] = trienodes[i]
  2625  			j++
  2626  			continue
  2627  		}
  2628  		// We've either ran out of hashes, or got unrequested data
  2629  		logger.Warn("Unexpected healing trienodes", "count", len(trienodes)-i)
  2630  		// Signal this request as failed, and ready for rescheduling
  2631  		s.scheduleRevertTrienodeHealRequest(req)
  2632  		return errors.New("unexpected healing trienode")
  2633  	}
  2634  	// Response validated, send it to the scheduler for filling
  2635  	response := &trienodeHealResponse{
  2636  		task:   req.task,
  2637  		hashes: req.hashes,
  2638  		paths:  req.paths,
  2639  		nodes:  nodes,
  2640  	}
  2641  	select {
  2642  	case req.deliver <- response:
  2643  	case <-req.cancel:
  2644  	case <-req.stale:
  2645  	}
  2646  	return nil
  2647  }
  2648  
  2649  // onHealByteCodes is a callback method to invoke when a batch of contract
  2650  // bytes codes are received from a remote peer in the healing phase.
  2651  func (s *Syncer) onHealByteCodes(peer SyncPeer, id uint64, bytecodes [][]byte) error {
  2652  	var size common.StorageSize
  2653  	for _, code := range bytecodes {
  2654  		size += common.StorageSize(len(code))
  2655  	}
  2656  	logger := peer.Log().New("reqid", id)
  2657  	logger.Trace("Delivering set of healing bytecodes", "bytecodes", len(bytecodes), "bytes", size)
  2658  
  2659  	// Whether or not the response is valid, we can mark the peer as idle and
  2660  	// notify the scheduler to assign a new task. If the response is invalid,
  2661  	// we'll drop the peer in a bit.
  2662  	s.lock.Lock()
  2663  	if _, ok := s.peers[peer.ID()]; ok {
  2664  		s.bytecodeHealIdlers[peer.ID()] = struct{}{}
  2665  	}
  2666  	select {
  2667  	case s.update <- struct{}{}:
  2668  	default:
  2669  	}
  2670  	// Ensure the response is for a valid request
  2671  	req, ok := s.bytecodeHealReqs[id]
  2672  	if !ok {
  2673  		// Request stale, perhaps the peer timed out but came through in the end
  2674  		logger.Warn("Unexpected bytecode heal packet")
  2675  		s.lock.Unlock()
  2676  		return nil
  2677  	}
  2678  	delete(s.bytecodeHealReqs, id)
  2679  	s.rates.Update(peer.ID(), ByteCodesMsg, time.Since(req.time), len(bytecodes))
  2680  
  2681  	// Clean up the request timeout timer, we'll see how to proceed further based
  2682  	// on the actual delivered content
  2683  	if !req.timeout.Stop() {
  2684  		// The timeout is already triggered, and this request will be reverted+rescheduled
  2685  		s.lock.Unlock()
  2686  		return nil
  2687  	}
  2688  
  2689  	// Response is valid, but check if peer is signalling that it does not have
  2690  	// the requested data. For bytecode range queries that means the peer is not
  2691  	// yet synced.
  2692  	if len(bytecodes) == 0 {
  2693  		logger.Debug("Peer rejected bytecode heal request")
  2694  		s.statelessPeers[peer.ID()] = struct{}{}
  2695  		s.lock.Unlock()
  2696  
  2697  		// Signal this request as failed, and ready for rescheduling
  2698  		s.scheduleRevertBytecodeHealRequest(req)
  2699  		return nil
  2700  	}
  2701  	s.lock.Unlock()
  2702  
  2703  	// Cross reference the requested bytecodes with the response to find gaps
  2704  	// that the serving node is missing
  2705  	hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
  2706  	hash := make([]byte, 32)
  2707  
  2708  	codes := make([][]byte, len(req.hashes))
  2709  	for i, j := 0, 0; i < len(bytecodes); i++ {
  2710  		// Find the next hash that we've been served, leaving misses with nils
  2711  		hasher.Reset()
  2712  		hasher.Write(bytecodes[i])
  2713  		hasher.Read(hash)
  2714  
  2715  		for j < len(req.hashes) && !bytes.Equal(hash, req.hashes[j][:]) {
  2716  			j++
  2717  		}
  2718  		if j < len(req.hashes) {
  2719  			codes[j] = bytecodes[i]
  2720  			j++
  2721  			continue
  2722  		}
  2723  		// We've either ran out of hashes, or got unrequested data
  2724  		logger.Warn("Unexpected healing bytecodes", "count", len(bytecodes)-i)
  2725  		// Signal this request as failed, and ready for rescheduling
  2726  		s.scheduleRevertBytecodeHealRequest(req)
  2727  		return errors.New("unexpected healing bytecode")
  2728  	}
  2729  	// Response validated, send it to the scheduler for filling
  2730  	response := &bytecodeHealResponse{
  2731  		task:   req.task,
  2732  		hashes: req.hashes,
  2733  		codes:  codes,
  2734  	}
  2735  	select {
  2736  	case req.deliver <- response:
  2737  	case <-req.cancel:
  2738  	case <-req.stale:
  2739  	}
  2740  	return nil
  2741  }
  2742  
  2743  // onHealState is a callback method to invoke when a flat state(account
  2744  // or storage slot) is downloded during the healing stage. The flat states
  2745  // can be persisted blindly and can be fixed later in the generation stage.
  2746  // Note it's not concurrent safe, please handle the concurrent issue outside.
  2747  func (s *Syncer) onHealState(paths [][]byte, value []byte) error {
  2748  	if len(paths) == 1 {
  2749  		var account types.StateAccount
  2750  		if err := rlp.DecodeBytes(value, &account); err != nil {
  2751  			return nil
  2752  		}
  2753  		blob := snapshot.SlimAccountRLP(account.Nonce, account.Balance, account.Root, account.KeccakCodeHash, account.PoseidonCodeHash, account.CodeSize)
  2754  		rawdb.WriteAccountSnapshot(s.stateWriter, common.BytesToHash(paths[0]), blob)
  2755  		s.accountHealed += 1
  2756  		s.accountHealedBytes += common.StorageSize(1 + common.HashLength + len(blob))
  2757  	}
  2758  	if len(paths) == 2 {
  2759  		rawdb.WriteStorageSnapshot(s.stateWriter, common.BytesToHash(paths[0]), common.BytesToHash(paths[1]), value)
  2760  		s.storageHealed += 1
  2761  		s.storageHealedBytes += common.StorageSize(1 + 2*common.HashLength + len(value))
  2762  	}
  2763  	if s.stateWriter.ValueSize() > ethdb.IdealBatchSize {
  2764  		s.stateWriter.Write() // It's fine to ignore the error here
  2765  		s.stateWriter.Reset()
  2766  	}
  2767  	return nil
  2768  }
  2769  
  2770  // hashSpace is the total size of the 256 bit hash space for accounts.
  2771  var hashSpace = new(big.Int).Exp(common.Big2, common.Big256, nil)
  2772  
  2773  // report calculates various status reports and provides it to the user.
  2774  func (s *Syncer) report(force bool) {
  2775  	if len(s.tasks) > 0 {
  2776  		s.reportSyncProgress(force)
  2777  		return
  2778  	}
  2779  	s.reportHealProgress(force)
  2780  }
  2781  
  2782  // reportSyncProgress calculates various status reports and provides it to the user.
  2783  func (s *Syncer) reportSyncProgress(force bool) {
  2784  	// Don't report all the events, just occasionally
  2785  	if !force && time.Since(s.logTime) < 8*time.Second {
  2786  		return
  2787  	}
  2788  	// Don't report anything until we have a meaningful progress
  2789  	synced := s.accountBytes + s.bytecodeBytes + s.storageBytes
  2790  	if synced == 0 {
  2791  		return
  2792  	}
  2793  	accountGaps := new(big.Int)
  2794  	for _, task := range s.tasks {
  2795  		accountGaps.Add(accountGaps, new(big.Int).Sub(task.Last.Big(), task.Next.Big()))
  2796  	}
  2797  	accountFills := new(big.Int).Sub(hashSpace, accountGaps)
  2798  	if accountFills.BitLen() == 0 {
  2799  		return
  2800  	}
  2801  	s.logTime = time.Now()
  2802  	estBytes := float64(new(big.Int).Div(
  2803  		new(big.Int).Mul(new(big.Int).SetUint64(uint64(synced)), hashSpace),
  2804  		accountFills,
  2805  	).Uint64())
  2806  
  2807  	elapsed := time.Since(s.startTime)
  2808  	estTime := elapsed / time.Duration(synced) * time.Duration(estBytes)
  2809  
  2810  	// Create a mega progress report
  2811  	var (
  2812  		progress = fmt.Sprintf("%.2f%%", float64(synced)*100/estBytes)
  2813  		accounts = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.accountSynced), s.accountBytes.TerminalString())
  2814  		storage  = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.storageSynced), s.storageBytes.TerminalString())
  2815  		bytecode = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.bytecodeSynced), s.bytecodeBytes.TerminalString())
  2816  	)
  2817  	log.Info("State sync in progress", "synced", progress, "state", synced,
  2818  		"accounts", accounts, "slots", storage, "codes", bytecode, "eta", common.PrettyDuration(estTime-elapsed))
  2819  }
  2820  
  2821  // reportHealProgress calculates various status reports and provides it to the user.
  2822  func (s *Syncer) reportHealProgress(force bool) {
  2823  	// Don't report all the events, just occasionally
  2824  	if !force && time.Since(s.logTime) < 8*time.Second {
  2825  		return
  2826  	}
  2827  	s.logTime = time.Now()
  2828  
  2829  	// Create a mega progress report
  2830  	var (
  2831  		trienode = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.trienodeHealSynced), s.trienodeHealBytes.TerminalString())
  2832  		bytecode = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.bytecodeHealSynced), s.bytecodeHealBytes.TerminalString())
  2833  		accounts = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.accountHealed), s.accountHealedBytes.TerminalString())
  2834  		storage  = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.storageHealed), s.storageHealedBytes.TerminalString())
  2835  	)
  2836  	log.Info("State heal in progress", "accounts", accounts, "slots", storage,
  2837  		"codes", bytecode, "nodes", trienode, "pending", s.healer.scheduler.Pending())
  2838  }
  2839  
  2840  // estimateRemainingSlots tries to determine roughly how many slots are left in
  2841  // a contract storage, based on the number of keys and the last hash. This method
  2842  // assumes that the hashes are lexicographically ordered and evenly distributed.
  2843  func estimateRemainingSlots(hashes int, last common.Hash) (uint64, error) {
  2844  	if last == (common.Hash{}) {
  2845  		return 0, errors.New("last hash empty")
  2846  	}
  2847  	space := new(big.Int).Mul(math.MaxBig256, big.NewInt(int64(hashes)))
  2848  	space.Div(space, last.Big())
  2849  	if !space.IsUint64() {
  2850  		// Gigantic address space probably due to too few or malicious slots
  2851  		return 0, errors.New("too few slots for estimation")
  2852  	}
  2853  	return space.Uint64() - uint64(hashes), nil
  2854  }
  2855  
  2856  // capacitySort implements the Sort interface, allowing sorting by peer message
  2857  // throughput. Note, callers should use sort.Reverse to get the desired effect
  2858  // of highest capacity being at the front.
  2859  type capacitySort struct {
  2860  	ids  []string
  2861  	caps []int
  2862  }
  2863  
  2864  func (s *capacitySort) Len() int {
  2865  	return len(s.ids)
  2866  }
  2867  
  2868  func (s *capacitySort) Less(i, j int) bool {
  2869  	return s.caps[i] < s.caps[j]
  2870  }
  2871  
  2872  func (s *capacitySort) Swap(i, j int) {
  2873  	s.ids[i], s.ids[j] = s.ids[j], s.ids[i]
  2874  	s.caps[i], s.caps[j] = s.caps[j], s.caps[i]
  2875  }