github.com/palisadeinc/bor@v0.0.0-20230615125219-ab7196213d15/eth/protocols/snap/sync.go (about)

     1  // Copyright 2020 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package snap
    18  
    19  import (
    20  	"bytes"
    21  	"encoding/json"
    22  	"errors"
    23  	"fmt"
    24  	"math/big"
    25  	"math/rand"
    26  	"sort"
    27  	"sync"
    28  	"time"
    29  
    30  	"golang.org/x/crypto/sha3"
    31  
    32  	"github.com/ethereum/go-ethereum/common"
    33  	"github.com/ethereum/go-ethereum/common/math"
    34  	"github.com/ethereum/go-ethereum/core/rawdb"
    35  	"github.com/ethereum/go-ethereum/core/state"
    36  	"github.com/ethereum/go-ethereum/core/state/snapshot"
    37  	"github.com/ethereum/go-ethereum/core/types"
    38  	"github.com/ethereum/go-ethereum/crypto"
    39  	"github.com/ethereum/go-ethereum/ethdb"
    40  	"github.com/ethereum/go-ethereum/event"
    41  	"github.com/ethereum/go-ethereum/light"
    42  	"github.com/ethereum/go-ethereum/log"
    43  	"github.com/ethereum/go-ethereum/p2p/msgrate"
    44  	"github.com/ethereum/go-ethereum/rlp"
    45  	"github.com/ethereum/go-ethereum/trie"
    46  )
    47  
    48  var (
    49  	// emptyRoot is the known root hash of an empty trie.
    50  	emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
    51  
    52  	// emptyCode is the known hash of the empty EVM bytecode.
    53  	emptyCode = crypto.Keccak256Hash(nil)
    54  )
    55  
    56  const (
    57  	// minRequestSize is the minimum number of bytes to request from a remote peer.
    58  	// This number is used as the low cap for account and storage range requests.
    59  	// Bytecode and trienode are limited inherently by item count (1).
    60  	minRequestSize = 64 * 1024
    61  
    62  	// maxRequestSize is the maximum number of bytes to request from a remote peer.
    63  	// This number is used as the high cap for account and storage range requests.
    64  	// Bytecode and trienode are limited more explicitly by the caps below.
    65  	maxRequestSize = 512 * 1024
    66  
    67  	// maxCodeRequestCount is the maximum number of bytecode blobs to request in a
    68  	// single query. If this number is too low, we're not filling responses fully
    69  	// and waste round trip times. If it's too high, we're capping responses and
    70  	// waste bandwidth.
    71  	//
    72  	// Depoyed bytecodes are currently capped at 24KB, so the minimum request
    73  	// size should be maxRequestSize / 24K. Assuming that most contracts do not
    74  	// come close to that, requesting 4x should be a good approximation.
    75  	maxCodeRequestCount = maxRequestSize / (24 * 1024) * 4
    76  
    77  	// maxTrieRequestCount is the maximum number of trie node blobs to request in
    78  	// a single query. If this number is too low, we're not filling responses fully
    79  	// and waste round trip times. If it's too high, we're capping responses and
    80  	// waste bandwidth.
    81  	maxTrieRequestCount = maxRequestSize / 512
    82  )
    83  
    84  var (
    85  	// accountConcurrency is the number of chunks to split the account trie into
    86  	// to allow concurrent retrievals.
    87  	accountConcurrency = 16
    88  
    89  	// storageConcurrency is the number of chunks to split the a large contract
    90  	// storage trie into to allow concurrent retrievals.
    91  	storageConcurrency = 16
    92  )
    93  
    94  // ErrCancelled is returned from snap syncing if the operation was prematurely
    95  // terminated.
    96  var ErrCancelled = errors.New("sync cancelled")
    97  
    98  // accountRequest tracks a pending account range request to ensure responses are
    99  // to actual requests and to validate any security constraints.
   100  //
   101  // Concurrency note: account requests and responses are handled concurrently from
   102  // the main runloop to allow Merkle proof verifications on the peer's thread and
   103  // to drop on invalid response. The request struct must contain all the data to
   104  // construct the response without accessing runloop internals (i.e. task). That
   105  // is only included to allow the runloop to match a response to the task being
   106  // synced without having yet another set of maps.
   107  type accountRequest struct {
   108  	peer string    // Peer to which this request is assigned
   109  	id   uint64    // Request ID of this request
   110  	time time.Time // Timestamp when the request was sent
   111  
   112  	deliver chan *accountResponse // Channel to deliver successful response on
   113  	revert  chan *accountRequest  // Channel to deliver request failure on
   114  	cancel  chan struct{}         // Channel to track sync cancellation
   115  	timeout *time.Timer           // Timer to track delivery timeout
   116  	stale   chan struct{}         // Channel to signal the request was dropped
   117  
   118  	origin common.Hash // First account requested to allow continuation checks
   119  	limit  common.Hash // Last account requested to allow non-overlapping chunking
   120  
   121  	task *accountTask // Task which this request is filling (only access fields through the runloop!!)
   122  }
   123  
   124  // accountResponse is an already Merkle-verified remote response to an account
   125  // range request. It contains the subtrie for the requested account range and
   126  // the database that's going to be filled with the internal nodes on commit.
   127  type accountResponse struct {
   128  	task *accountTask // Task which this request is filling
   129  
   130  	hashes   []common.Hash         // Account hashes in the returned range
   131  	accounts []*types.StateAccount // Expanded accounts in the returned range
   132  
   133  	cont bool // Whether the account range has a continuation
   134  }
   135  
   136  // bytecodeRequest tracks a pending bytecode request to ensure responses are to
   137  // actual requests and to validate any security constraints.
   138  //
   139  // Concurrency note: bytecode requests and responses are handled concurrently from
   140  // the main runloop to allow Keccak256 hash verifications on the peer's thread and
   141  // to drop on invalid response. The request struct must contain all the data to
   142  // construct the response without accessing runloop internals (i.e. task). That
   143  // is only included to allow the runloop to match a response to the task being
   144  // synced without having yet another set of maps.
   145  type bytecodeRequest struct {
   146  	peer string    // Peer to which this request is assigned
   147  	id   uint64    // Request ID of this request
   148  	time time.Time // Timestamp when the request was sent
   149  
   150  	deliver chan *bytecodeResponse // Channel to deliver successful response on
   151  	revert  chan *bytecodeRequest  // Channel to deliver request failure on
   152  	cancel  chan struct{}          // Channel to track sync cancellation
   153  	timeout *time.Timer            // Timer to track delivery timeout
   154  	stale   chan struct{}          // Channel to signal the request was dropped
   155  
   156  	hashes []common.Hash // Bytecode hashes to validate responses
   157  	task   *accountTask  // Task which this request is filling (only access fields through the runloop!!)
   158  }
   159  
   160  // bytecodeResponse is an already verified remote response to a bytecode request.
   161  type bytecodeResponse struct {
   162  	task *accountTask // Task which this request is filling
   163  
   164  	hashes []common.Hash // Hashes of the bytecode to avoid double hashing
   165  	codes  [][]byte      // Actual bytecodes to store into the database (nil = missing)
   166  }
   167  
   168  // storageRequest tracks a pending storage ranges request to ensure responses are
   169  // to actual requests and to validate any security constraints.
   170  //
   171  // Concurrency note: storage requests and responses are handled concurrently from
   172  // the main runloop to allow Merkle proof verifications on the peer's thread and
   173  // to drop on invalid response. The request struct must contain all the data to
   174  // construct the response without accessing runloop internals (i.e. tasks). That
   175  // is only included to allow the runloop to match a response to the task being
   176  // synced without having yet another set of maps.
   177  type storageRequest struct {
   178  	peer string    // Peer to which this request is assigned
   179  	id   uint64    // Request ID of this request
   180  	time time.Time // Timestamp when the request was sent
   181  
   182  	deliver chan *storageResponse // Channel to deliver successful response on
   183  	revert  chan *storageRequest  // Channel to deliver request failure on
   184  	cancel  chan struct{}         // Channel to track sync cancellation
   185  	timeout *time.Timer           // Timer to track delivery timeout
   186  	stale   chan struct{}         // Channel to signal the request was dropped
   187  
   188  	accounts []common.Hash // Account hashes to validate responses
   189  	roots    []common.Hash // Storage roots to validate responses
   190  
   191  	origin common.Hash // First storage slot requested to allow continuation checks
   192  	limit  common.Hash // Last storage slot requested to allow non-overlapping chunking
   193  
   194  	mainTask *accountTask // Task which this response belongs to (only access fields through the runloop!!)
   195  	subTask  *storageTask // Task which this response is filling (only access fields through the runloop!!)
   196  }
   197  
   198  // storageResponse is an already Merkle-verified remote response to a storage
   199  // range request. It contains the subtries for the requested storage ranges and
   200  // the databases that's going to be filled with the internal nodes on commit.
   201  type storageResponse struct {
   202  	mainTask *accountTask // Task which this response belongs to
   203  	subTask  *storageTask // Task which this response is filling
   204  
   205  	accounts []common.Hash // Account hashes requested, may be only partially filled
   206  	roots    []common.Hash // Storage roots requested, may be only partially filled
   207  
   208  	hashes [][]common.Hash // Storage slot hashes in the returned range
   209  	slots  [][][]byte      // Storage slot values in the returned range
   210  
   211  	cont bool // Whether the last storage range has a continuation
   212  }
   213  
   214  // trienodeHealRequest tracks a pending state trie request to ensure responses
   215  // are to actual requests and to validate any security constraints.
   216  //
   217  // Concurrency note: trie node requests and responses are handled concurrently from
   218  // the main runloop to allow Keccak256 hash verifications on the peer's thread and
   219  // to drop on invalid response. The request struct must contain all the data to
   220  // construct the response without accessing runloop internals (i.e. task). That
   221  // is only included to allow the runloop to match a response to the task being
   222  // synced without having yet another set of maps.
   223  type trienodeHealRequest struct {
   224  	peer string    // Peer to which this request is assigned
   225  	id   uint64    // Request ID of this request
   226  	time time.Time // Timestamp when the request was sent
   227  
   228  	deliver chan *trienodeHealResponse // Channel to deliver successful response on
   229  	revert  chan *trienodeHealRequest  // Channel to deliver request failure on
   230  	cancel  chan struct{}              // Channel to track sync cancellation
   231  	timeout *time.Timer                // Timer to track delivery timeout
   232  	stale   chan struct{}              // Channel to signal the request was dropped
   233  
   234  	hashes []common.Hash   // Trie node hashes to validate responses
   235  	paths  []trie.SyncPath // Trie node paths requested for rescheduling
   236  
   237  	task *healTask // Task which this request is filling (only access fields through the runloop!!)
   238  }
   239  
   240  // trienodeHealResponse is an already verified remote response to a trie node request.
   241  type trienodeHealResponse struct {
   242  	task *healTask // Task which this request is filling
   243  
   244  	hashes []common.Hash   // Hashes of the trie nodes to avoid double hashing
   245  	paths  []trie.SyncPath // Trie node paths requested for rescheduling missing ones
   246  	nodes  [][]byte        // Actual trie nodes to store into the database (nil = missing)
   247  }
   248  
   249  // bytecodeHealRequest tracks a pending bytecode request to ensure responses are to
   250  // actual requests and to validate any security constraints.
   251  //
   252  // Concurrency note: bytecode requests and responses are handled concurrently from
   253  // the main runloop to allow Keccak256 hash verifications on the peer's thread and
   254  // to drop on invalid response. The request struct must contain all the data to
   255  // construct the response without accessing runloop internals (i.e. task). That
   256  // is only included to allow the runloop to match a response to the task being
   257  // synced without having yet another set of maps.
   258  type bytecodeHealRequest struct {
   259  	peer string    // Peer to which this request is assigned
   260  	id   uint64    // Request ID of this request
   261  	time time.Time // Timestamp when the request was sent
   262  
   263  	deliver chan *bytecodeHealResponse // Channel to deliver successful response on
   264  	revert  chan *bytecodeHealRequest  // Channel to deliver request failure on
   265  	cancel  chan struct{}              // Channel to track sync cancellation
   266  	timeout *time.Timer                // Timer to track delivery timeout
   267  	stale   chan struct{}              // Channel to signal the request was dropped
   268  
   269  	hashes []common.Hash // Bytecode hashes to validate responses
   270  	task   *healTask     // Task which this request is filling (only access fields through the runloop!!)
   271  }
   272  
   273  // bytecodeHealResponse is an already verified remote response to a bytecode request.
   274  type bytecodeHealResponse struct {
   275  	task *healTask // Task which this request is filling
   276  
   277  	hashes []common.Hash // Hashes of the bytecode to avoid double hashing
   278  	codes  [][]byte      // Actual bytecodes to store into the database (nil = missing)
   279  }
   280  
   281  // accountTask represents the sync task for a chunk of the account snapshot.
   282  type accountTask struct {
   283  	// These fields get serialized to leveldb on shutdown
   284  	Next     common.Hash                    // Next account to sync in this interval
   285  	Last     common.Hash                    // Last account to sync in this interval
   286  	SubTasks map[common.Hash][]*storageTask // Storage intervals needing fetching for large contracts
   287  
   288  	// These fields are internals used during runtime
   289  	req  *accountRequest  // Pending request to fill this task
   290  	res  *accountResponse // Validate response filling this task
   291  	pend int              // Number of pending subtasks for this round
   292  
   293  	needCode  []bool // Flags whether the filling accounts need code retrieval
   294  	needState []bool // Flags whether the filling accounts need storage retrieval
   295  	needHeal  []bool // Flags whether the filling accounts's state was chunked and need healing
   296  
   297  	codeTasks  map[common.Hash]struct{}    // Code hashes that need retrieval
   298  	stateTasks map[common.Hash]common.Hash // Account hashes->roots that need full state retrieval
   299  
   300  	genBatch ethdb.Batch     // Batch used by the node generator
   301  	genTrie  *trie.StackTrie // Node generator from storage slots
   302  
   303  	done bool // Flag whether the task can be removed
   304  }
   305  
   306  // storageTask represents the sync task for a chunk of the storage snapshot.
   307  type storageTask struct {
   308  	Next common.Hash // Next account to sync in this interval
   309  	Last common.Hash // Last account to sync in this interval
   310  
   311  	// These fields are internals used during runtime
   312  	root common.Hash     // Storage root hash for this instance
   313  	req  *storageRequest // Pending request to fill this task
   314  
   315  	genBatch ethdb.Batch     // Batch used by the node generator
   316  	genTrie  *trie.StackTrie // Node generator from storage slots
   317  
   318  	done bool // Flag whether the task can be removed
   319  }
   320  
   321  // healTask represents the sync task for healing the snap-synced chunk boundaries.
   322  type healTask struct {
   323  	scheduler *trie.Sync // State trie sync scheduler defining the tasks
   324  
   325  	trieTasks map[common.Hash]trie.SyncPath // Set of trie node tasks currently queued for retrieval
   326  	codeTasks map[common.Hash]struct{}      // Set of byte code tasks currently queued for retrieval
   327  }
   328  
   329  // SyncProgress is a database entry to allow suspending and resuming a snapshot state
   330  // sync. Opposed to full and fast sync, there is no way to restart a suspended
   331  // snap sync without prior knowledge of the suspension point.
   332  type SyncProgress struct {
   333  	Tasks []*accountTask // The suspended account tasks (contract tasks within)
   334  
   335  	// Status report during syncing phase
   336  	AccountSynced  uint64             // Number of accounts downloaded
   337  	AccountBytes   common.StorageSize // Number of account trie bytes persisted to disk
   338  	BytecodeSynced uint64             // Number of bytecodes downloaded
   339  	BytecodeBytes  common.StorageSize // Number of bytecode bytes downloaded
   340  	StorageSynced  uint64             // Number of storage slots downloaded
   341  	StorageBytes   common.StorageSize // Number of storage trie bytes persisted to disk
   342  
   343  	// Status report during healing phase
   344  	TrienodeHealSynced uint64             // Number of state trie nodes downloaded
   345  	TrienodeHealBytes  common.StorageSize // Number of state trie bytes persisted to disk
   346  	BytecodeHealSynced uint64             // Number of bytecodes downloaded
   347  	BytecodeHealBytes  common.StorageSize // Number of bytecodes persisted to disk
   348  }
   349  
   350  // SyncPending is analogous to SyncProgress, but it's used to report on pending
   351  // ephemeral sync progress that doesn't get persisted into the database.
   352  type SyncPending struct {
   353  	TrienodeHeal uint64 // Number of state trie nodes pending
   354  	BytecodeHeal uint64 // Number of bytecodes pending
   355  }
   356  
   357  // SyncPeer abstracts out the methods required for a peer to be synced against
   358  // with the goal of allowing the construction of mock peers without the full
   359  // blown networking.
   360  type SyncPeer interface {
   361  	// ID retrieves the peer's unique identifier.
   362  	ID() string
   363  
   364  	// RequestAccountRange fetches a batch of accounts rooted in a specific account
   365  	// trie, starting with the origin.
   366  	RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error
   367  
   368  	// RequestStorageRanges fetches a batch of storage slots belonging to one or
   369  	// more accounts. If slots from only one accout is requested, an origin marker
   370  	// may also be used to retrieve from there.
   371  	RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error
   372  
   373  	// RequestByteCodes fetches a batch of bytecodes by hash.
   374  	RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error
   375  
   376  	// RequestTrieNodes fetches a batch of account or storage trie nodes rooted in
   377  	// a specificstate trie.
   378  	RequestTrieNodes(id uint64, root common.Hash, paths []TrieNodePathSet, bytes uint64) error
   379  
   380  	// Log retrieves the peer's own contextual logger.
   381  	Log() log.Logger
   382  }
   383  
   384  // Syncer is an Ethereum account and storage trie syncer based on snapshots and
   385  // the  snap protocol. It's purpose is to download all the accounts and storage
   386  // slots from remote peers and reassemble chunks of the state trie, on top of
   387  // which a state sync can be run to fix any gaps / overlaps.
   388  //
   389  // Every network request has a variety of failure events:
   390  //   - The peer disconnects after task assignment, failing to send the request
   391  //   - The peer disconnects after sending the request, before delivering on it
   392  //   - The peer remains connected, but does not deliver a response in time
   393  //   - The peer delivers a stale response after a previous timeout
   394  //   - The peer delivers a refusal to serve the requested state
   395  type Syncer struct {
   396  	db ethdb.KeyValueStore // Database to store the trie nodes into (and dedup)
   397  
   398  	root    common.Hash    // Current state trie root being synced
   399  	tasks   []*accountTask // Current account task set being synced
   400  	snapped bool           // Flag to signal that snap phase is done
   401  	healer  *healTask      // Current state healing task being executed
   402  	update  chan struct{}  // Notification channel for possible sync progression
   403  
   404  	peers    map[string]SyncPeer // Currently active peers to download from
   405  	peerJoin *event.Feed         // Event feed to react to peers joining
   406  	peerDrop *event.Feed         // Event feed to react to peers dropping
   407  	rates    *msgrate.Trackers   // Message throughput rates for peers
   408  
   409  	// Request tracking during syncing phase
   410  	statelessPeers map[string]struct{} // Peers that failed to deliver state data
   411  	accountIdlers  map[string]struct{} // Peers that aren't serving account requests
   412  	bytecodeIdlers map[string]struct{} // Peers that aren't serving bytecode requests
   413  	storageIdlers  map[string]struct{} // Peers that aren't serving storage requests
   414  
   415  	accountReqs  map[uint64]*accountRequest  // Account requests currently running
   416  	bytecodeReqs map[uint64]*bytecodeRequest // Bytecode requests currently running
   417  	storageReqs  map[uint64]*storageRequest  // Storage requests currently running
   418  
   419  	accountSynced  uint64             // Number of accounts downloaded
   420  	accountBytes   common.StorageSize // Number of account trie bytes persisted to disk
   421  	bytecodeSynced uint64             // Number of bytecodes downloaded
   422  	bytecodeBytes  common.StorageSize // Number of bytecode bytes downloaded
   423  	storageSynced  uint64             // Number of storage slots downloaded
   424  	storageBytes   common.StorageSize // Number of storage trie bytes persisted to disk
   425  
   426  	// Request tracking during healing phase
   427  	trienodeHealIdlers map[string]struct{} // Peers that aren't serving trie node requests
   428  	bytecodeHealIdlers map[string]struct{} // Peers that aren't serving bytecode requests
   429  
   430  	trienodeHealReqs map[uint64]*trienodeHealRequest // Trie node requests currently running
   431  	bytecodeHealReqs map[uint64]*bytecodeHealRequest // Bytecode requests currently running
   432  
   433  	trienodeHealSynced uint64             // Number of state trie nodes downloaded
   434  	trienodeHealBytes  common.StorageSize // Number of state trie bytes persisted to disk
   435  	trienodeHealDups   uint64             // Number of state trie nodes already processed
   436  	trienodeHealNops   uint64             // Number of state trie nodes not requested
   437  	bytecodeHealSynced uint64             // Number of bytecodes downloaded
   438  	bytecodeHealBytes  common.StorageSize // Number of bytecodes persisted to disk
   439  	bytecodeHealDups   uint64             // Number of bytecodes already processed
   440  	bytecodeHealNops   uint64             // Number of bytecodes not requested
   441  
   442  	stateWriter        ethdb.Batch        // Shared batch writer used for persisting raw states
   443  	accountHealed      uint64             // Number of accounts downloaded during the healing stage
   444  	accountHealedBytes common.StorageSize // Number of raw account bytes persisted to disk during the healing stage
   445  	storageHealed      uint64             // Number of storage slots downloaded during the healing stage
   446  	storageHealedBytes common.StorageSize // Number of raw storage bytes persisted to disk during the healing stage
   447  
   448  	startTime time.Time // Time instance when snapshot sync started
   449  	logTime   time.Time // Time instance when status was last reported
   450  
   451  	pend sync.WaitGroup // Tracks network request goroutines for graceful shutdown
   452  	lock sync.RWMutex   // Protects fields that can change outside of sync (peers, reqs, root)
   453  }
   454  
   455  // NewSyncer creates a new snapshot syncer to download the Ethereum state over the
   456  // snap protocol.
   457  func NewSyncer(db ethdb.KeyValueStore) *Syncer {
   458  	return &Syncer{
   459  		db: db,
   460  
   461  		peers:    make(map[string]SyncPeer),
   462  		peerJoin: new(event.Feed),
   463  		peerDrop: new(event.Feed),
   464  		rates:    msgrate.NewTrackers(log.New("proto", "snap")),
   465  		update:   make(chan struct{}, 1),
   466  
   467  		accountIdlers:  make(map[string]struct{}),
   468  		storageIdlers:  make(map[string]struct{}),
   469  		bytecodeIdlers: make(map[string]struct{}),
   470  
   471  		accountReqs:  make(map[uint64]*accountRequest),
   472  		storageReqs:  make(map[uint64]*storageRequest),
   473  		bytecodeReqs: make(map[uint64]*bytecodeRequest),
   474  
   475  		trienodeHealIdlers: make(map[string]struct{}),
   476  		bytecodeHealIdlers: make(map[string]struct{}),
   477  
   478  		trienodeHealReqs: make(map[uint64]*trienodeHealRequest),
   479  		bytecodeHealReqs: make(map[uint64]*bytecodeHealRequest),
   480  		stateWriter:      db.NewBatch(),
   481  	}
   482  }
   483  
   484  // Register injects a new data source into the syncer's peerset.
   485  func (s *Syncer) Register(peer SyncPeer) error {
   486  	// Make sure the peer is not registered yet
   487  	id := peer.ID()
   488  
   489  	s.lock.Lock()
   490  	if _, ok := s.peers[id]; ok {
   491  		log.Error("Snap peer already registered", "id", id)
   492  
   493  		s.lock.Unlock()
   494  		return errors.New("already registered")
   495  	}
   496  	s.peers[id] = peer
   497  	s.rates.Track(id, msgrate.NewTracker(s.rates.MeanCapacities(), s.rates.MedianRoundTrip()))
   498  
   499  	// Mark the peer as idle, even if no sync is running
   500  	s.accountIdlers[id] = struct{}{}
   501  	s.storageIdlers[id] = struct{}{}
   502  	s.bytecodeIdlers[id] = struct{}{}
   503  	s.trienodeHealIdlers[id] = struct{}{}
   504  	s.bytecodeHealIdlers[id] = struct{}{}
   505  	s.lock.Unlock()
   506  
   507  	// Notify any active syncs that a new peer can be assigned data
   508  	s.peerJoin.Send(id)
   509  	return nil
   510  }
   511  
   512  // Unregister injects a new data source into the syncer's peerset.
   513  func (s *Syncer) Unregister(id string) error {
   514  	// Remove all traces of the peer from the registry
   515  	s.lock.Lock()
   516  	if _, ok := s.peers[id]; !ok {
   517  		log.Error("Snap peer not registered", "id", id)
   518  
   519  		s.lock.Unlock()
   520  		return errors.New("not registered")
   521  	}
   522  	delete(s.peers, id)
   523  	s.rates.Untrack(id)
   524  
   525  	// Remove status markers, even if no sync is running
   526  	delete(s.statelessPeers, id)
   527  
   528  	delete(s.accountIdlers, id)
   529  	delete(s.storageIdlers, id)
   530  	delete(s.bytecodeIdlers, id)
   531  	delete(s.trienodeHealIdlers, id)
   532  	delete(s.bytecodeHealIdlers, id)
   533  	s.lock.Unlock()
   534  
   535  	// Notify any active syncs that pending requests need to be reverted
   536  	s.peerDrop.Send(id)
   537  	return nil
   538  }
   539  
   540  // Sync starts (or resumes a previous) sync cycle to iterate over an state trie
   541  // with the given root and reconstruct the nodes based on the snapshot leaves.
   542  // Previously downloaded segments will not be redownloaded of fixed, rather any
   543  // errors will be healed after the leaves are fully accumulated.
   544  func (s *Syncer) Sync(root common.Hash, cancel chan struct{}) error {
   545  	// Move the trie root from any previous value, revert stateless markers for
   546  	// any peers and initialize the syncer if it was not yet run
   547  	s.lock.Lock()
   548  	s.root = root
   549  	s.healer = &healTask{
   550  		scheduler: state.NewStateSync(root, s.db, s.onHealState),
   551  		trieTasks: make(map[common.Hash]trie.SyncPath),
   552  		codeTasks: make(map[common.Hash]struct{}),
   553  	}
   554  	s.statelessPeers = make(map[string]struct{})
   555  	s.lock.Unlock()
   556  
   557  	if s.startTime == (time.Time{}) {
   558  		s.startTime = time.Now()
   559  	}
   560  	// Retrieve the previous sync status from LevelDB and abort if already synced
   561  	s.loadSyncStatus()
   562  	if len(s.tasks) == 0 && s.healer.scheduler.Pending() == 0 {
   563  		log.Debug("Snapshot sync already completed")
   564  		return nil
   565  	}
   566  	defer func() { // Persist any progress, independent of failure
   567  		for _, task := range s.tasks {
   568  			s.forwardAccountTask(task)
   569  		}
   570  		s.cleanAccountTasks()
   571  		s.saveSyncStatus()
   572  	}()
   573  
   574  	log.Debug("Starting snapshot sync cycle", "root", root)
   575  
   576  	// Flush out the last committed raw states
   577  	defer func() {
   578  		if s.stateWriter.ValueSize() > 0 {
   579  			s.stateWriter.Write()
   580  			s.stateWriter.Reset()
   581  		}
   582  	}()
   583  	defer s.report(true)
   584  
   585  	// Whether sync completed or not, disregard any future packets
   586  	defer func() {
   587  		log.Debug("Terminating snapshot sync cycle", "root", root)
   588  		s.lock.Lock()
   589  		s.accountReqs = make(map[uint64]*accountRequest)
   590  		s.storageReqs = make(map[uint64]*storageRequest)
   591  		s.bytecodeReqs = make(map[uint64]*bytecodeRequest)
   592  		s.trienodeHealReqs = make(map[uint64]*trienodeHealRequest)
   593  		s.bytecodeHealReqs = make(map[uint64]*bytecodeHealRequest)
   594  		s.lock.Unlock()
   595  	}()
   596  	// Keep scheduling sync tasks
   597  	peerJoin := make(chan string, 16)
   598  	peerJoinSub := s.peerJoin.Subscribe(peerJoin)
   599  	defer peerJoinSub.Unsubscribe()
   600  
   601  	peerDrop := make(chan string, 16)
   602  	peerDropSub := s.peerDrop.Subscribe(peerDrop)
   603  	defer peerDropSub.Unsubscribe()
   604  
   605  	// Create a set of unique channels for this sync cycle. We need these to be
   606  	// ephemeral so a data race doesn't accidentally deliver something stale on
   607  	// a persistent channel across syncs (yup, this happened)
   608  	var (
   609  		accountReqFails      = make(chan *accountRequest)
   610  		storageReqFails      = make(chan *storageRequest)
   611  		bytecodeReqFails     = make(chan *bytecodeRequest)
   612  		accountResps         = make(chan *accountResponse)
   613  		storageResps         = make(chan *storageResponse)
   614  		bytecodeResps        = make(chan *bytecodeResponse)
   615  		trienodeHealReqFails = make(chan *trienodeHealRequest)
   616  		bytecodeHealReqFails = make(chan *bytecodeHealRequest)
   617  		trienodeHealResps    = make(chan *trienodeHealResponse)
   618  		bytecodeHealResps    = make(chan *bytecodeHealResponse)
   619  	)
   620  	for {
   621  		// Remove all completed tasks and terminate sync if everything's done
   622  		s.cleanStorageTasks()
   623  		s.cleanAccountTasks()
   624  		if len(s.tasks) == 0 && s.healer.scheduler.Pending() == 0 {
   625  			return nil
   626  		}
   627  		// Assign all the data retrieval tasks to any free peers
   628  		s.assignAccountTasks(accountResps, accountReqFails, cancel)
   629  		s.assignBytecodeTasks(bytecodeResps, bytecodeReqFails, cancel)
   630  		s.assignStorageTasks(storageResps, storageReqFails, cancel)
   631  
   632  		if len(s.tasks) == 0 {
   633  			// Sync phase done, run heal phase
   634  			s.assignTrienodeHealTasks(trienodeHealResps, trienodeHealReqFails, cancel)
   635  			s.assignBytecodeHealTasks(bytecodeHealResps, bytecodeHealReqFails, cancel)
   636  		}
   637  		// Wait for something to happen
   638  		select {
   639  		case <-s.update:
   640  			// Something happened (new peer, delivery, timeout), recheck tasks
   641  		case <-peerJoin:
   642  			// A new peer joined, try to schedule it new tasks
   643  		case id := <-peerDrop:
   644  			s.revertRequests(id)
   645  		case <-cancel:
   646  			return ErrCancelled
   647  
   648  		case req := <-accountReqFails:
   649  			s.revertAccountRequest(req)
   650  		case req := <-bytecodeReqFails:
   651  			s.revertBytecodeRequest(req)
   652  		case req := <-storageReqFails:
   653  			s.revertStorageRequest(req)
   654  		case req := <-trienodeHealReqFails:
   655  			s.revertTrienodeHealRequest(req)
   656  		case req := <-bytecodeHealReqFails:
   657  			s.revertBytecodeHealRequest(req)
   658  
   659  		case res := <-accountResps:
   660  			s.processAccountResponse(res)
   661  		case res := <-bytecodeResps:
   662  			s.processBytecodeResponse(res)
   663  		case res := <-storageResps:
   664  			s.processStorageResponse(res)
   665  		case res := <-trienodeHealResps:
   666  			s.processTrienodeHealResponse(res)
   667  		case res := <-bytecodeHealResps:
   668  			s.processBytecodeHealResponse(res)
   669  		}
   670  		// Report stats if something meaningful happened
   671  		s.report(false)
   672  	}
   673  }
   674  
   675  // loadSyncStatus retrieves a previously aborted sync status from the database,
   676  // or generates a fresh one if none is available.
   677  func (s *Syncer) loadSyncStatus() {
   678  	var progress SyncProgress
   679  
   680  	if status := rawdb.ReadSnapshotSyncStatus(s.db); status != nil {
   681  		if err := json.Unmarshal(status, &progress); err != nil {
   682  			log.Error("Failed to decode snap sync status", "err", err)
   683  		} else {
   684  			for _, task := range progress.Tasks {
   685  				log.Debug("Scheduled account sync task", "from", task.Next, "last", task.Last)
   686  			}
   687  			s.tasks = progress.Tasks
   688  			for _, task := range s.tasks {
   689  				task.genBatch = ethdb.HookedBatch{
   690  					Batch: s.db.NewBatch(),
   691  					OnPut: func(key []byte, value []byte) {
   692  						s.accountBytes += common.StorageSize(len(key) + len(value))
   693  					},
   694  				}
   695  				task.genTrie = trie.NewStackTrie(task.genBatch)
   696  
   697  				for _, subtasks := range task.SubTasks {
   698  					for _, subtask := range subtasks {
   699  						subtask.genBatch = ethdb.HookedBatch{
   700  							Batch: s.db.NewBatch(),
   701  							OnPut: func(key []byte, value []byte) {
   702  								s.storageBytes += common.StorageSize(len(key) + len(value))
   703  							},
   704  						}
   705  						subtask.genTrie = trie.NewStackTrie(subtask.genBatch)
   706  					}
   707  				}
   708  			}
   709  			s.snapped = len(s.tasks) == 0
   710  
   711  			s.accountSynced = progress.AccountSynced
   712  			s.accountBytes = progress.AccountBytes
   713  			s.bytecodeSynced = progress.BytecodeSynced
   714  			s.bytecodeBytes = progress.BytecodeBytes
   715  			s.storageSynced = progress.StorageSynced
   716  			s.storageBytes = progress.StorageBytes
   717  
   718  			s.trienodeHealSynced = progress.TrienodeHealSynced
   719  			s.trienodeHealBytes = progress.TrienodeHealBytes
   720  			s.bytecodeHealSynced = progress.BytecodeHealSynced
   721  			s.bytecodeHealBytes = progress.BytecodeHealBytes
   722  			return
   723  		}
   724  	}
   725  	// Either we've failed to decode the previus state, or there was none.
   726  	// Start a fresh sync by chunking up the account range and scheduling
   727  	// them for retrieval.
   728  	s.tasks = nil
   729  	s.accountSynced, s.accountBytes = 0, 0
   730  	s.bytecodeSynced, s.bytecodeBytes = 0, 0
   731  	s.storageSynced, s.storageBytes = 0, 0
   732  	s.trienodeHealSynced, s.trienodeHealBytes = 0, 0
   733  	s.bytecodeHealSynced, s.bytecodeHealBytes = 0, 0
   734  
   735  	var next common.Hash
   736  	step := new(big.Int).Sub(
   737  		new(big.Int).Div(
   738  			new(big.Int).Exp(common.Big2, common.Big256, nil),
   739  			big.NewInt(int64(accountConcurrency)),
   740  		), common.Big1,
   741  	)
   742  	for i := 0; i < accountConcurrency; i++ {
   743  		last := common.BigToHash(new(big.Int).Add(next.Big(), step))
   744  		if i == accountConcurrency-1 {
   745  			// Make sure we don't overflow if the step is not a proper divisor
   746  			last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
   747  		}
   748  		batch := ethdb.HookedBatch{
   749  			Batch: s.db.NewBatch(),
   750  			OnPut: func(key []byte, value []byte) {
   751  				s.accountBytes += common.StorageSize(len(key) + len(value))
   752  			},
   753  		}
   754  		s.tasks = append(s.tasks, &accountTask{
   755  			Next:     next,
   756  			Last:     last,
   757  			SubTasks: make(map[common.Hash][]*storageTask),
   758  			genBatch: batch,
   759  			genTrie:  trie.NewStackTrie(batch),
   760  		})
   761  		log.Debug("Created account sync task", "from", next, "last", last)
   762  		next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
   763  	}
   764  }
   765  
   766  // saveSyncStatus marshals the remaining sync tasks into leveldb.
   767  func (s *Syncer) saveSyncStatus() {
   768  	// Serialize any partial progress to disk before spinning down
   769  	for _, task := range s.tasks {
   770  		if err := task.genBatch.Write(); err != nil {
   771  			log.Error("Failed to persist account slots", "err", err)
   772  		}
   773  		for _, subtasks := range task.SubTasks {
   774  			for _, subtask := range subtasks {
   775  				if err := subtask.genBatch.Write(); err != nil {
   776  					log.Error("Failed to persist storage slots", "err", err)
   777  				}
   778  			}
   779  		}
   780  	}
   781  	// Store the actual progress markers
   782  	progress := &SyncProgress{
   783  		Tasks:              s.tasks,
   784  		AccountSynced:      s.accountSynced,
   785  		AccountBytes:       s.accountBytes,
   786  		BytecodeSynced:     s.bytecodeSynced,
   787  		BytecodeBytes:      s.bytecodeBytes,
   788  		StorageSynced:      s.storageSynced,
   789  		StorageBytes:       s.storageBytes,
   790  		TrienodeHealSynced: s.trienodeHealSynced,
   791  		TrienodeHealBytes:  s.trienodeHealBytes,
   792  		BytecodeHealSynced: s.bytecodeHealSynced,
   793  		BytecodeHealBytes:  s.bytecodeHealBytes,
   794  	}
   795  	status, err := json.Marshal(progress)
   796  	if err != nil {
   797  		panic(err) // This can only fail during implementation
   798  	}
   799  	rawdb.WriteSnapshotSyncStatus(s.db, status)
   800  }
   801  
   802  // Progress returns the snap sync status statistics.
   803  func (s *Syncer) Progress() (*SyncProgress, *SyncPending) {
   804  	s.lock.Lock()
   805  	defer s.lock.Unlock()
   806  
   807  	progress := &SyncProgress{
   808  		AccountSynced:      s.accountSynced,
   809  		AccountBytes:       s.accountBytes,
   810  		BytecodeSynced:     s.bytecodeSynced,
   811  		BytecodeBytes:      s.bytecodeBytes,
   812  		StorageSynced:      s.storageSynced,
   813  		StorageBytes:       s.storageBytes,
   814  		TrienodeHealSynced: s.trienodeHealSynced,
   815  		TrienodeHealBytes:  s.trienodeHealBytes,
   816  		BytecodeHealSynced: s.bytecodeHealSynced,
   817  		BytecodeHealBytes:  s.bytecodeHealBytes,
   818  	}
   819  	pending := new(SyncPending)
   820  	if s.healer != nil {
   821  		pending.TrienodeHeal = uint64(len(s.healer.trieTasks))
   822  		pending.BytecodeHeal = uint64(len(s.healer.codeTasks))
   823  	}
   824  	return progress, pending
   825  }
   826  
   827  // cleanAccountTasks removes account range retrieval tasks that have already been
   828  // completed.
   829  func (s *Syncer) cleanAccountTasks() {
   830  	// If the sync was already done before, don't even bother
   831  	if len(s.tasks) == 0 {
   832  		return
   833  	}
   834  	// Sync wasn't finished previously, check for any task that can be finalized
   835  	for i := 0; i < len(s.tasks); i++ {
   836  		if s.tasks[i].done {
   837  			s.tasks = append(s.tasks[:i], s.tasks[i+1:]...)
   838  			i--
   839  		}
   840  	}
   841  	// If everything was just finalized just, generate the account trie and start heal
   842  	if len(s.tasks) == 0 {
   843  		s.lock.Lock()
   844  		s.snapped = true
   845  		s.lock.Unlock()
   846  
   847  		// Push the final sync report
   848  		s.reportSyncProgress(true)
   849  	}
   850  }
   851  
   852  // cleanStorageTasks iterates over all the account tasks and storage sub-tasks
   853  // within, cleaning any that have been completed.
   854  func (s *Syncer) cleanStorageTasks() {
   855  	for _, task := range s.tasks {
   856  		for account, subtasks := range task.SubTasks {
   857  			// Remove storage range retrieval tasks that completed
   858  			for j := 0; j < len(subtasks); j++ {
   859  				if subtasks[j].done {
   860  					subtasks = append(subtasks[:j], subtasks[j+1:]...)
   861  					j--
   862  				}
   863  			}
   864  			if len(subtasks) > 0 {
   865  				task.SubTasks[account] = subtasks
   866  				continue
   867  			}
   868  			// If all storage chunks are done, mark the account as done too
   869  			for j, hash := range task.res.hashes {
   870  				if hash == account {
   871  					task.needState[j] = false
   872  				}
   873  			}
   874  			delete(task.SubTasks, account)
   875  			task.pend--
   876  
   877  			// If this was the last pending task, forward the account task
   878  			if task.pend == 0 {
   879  				s.forwardAccountTask(task)
   880  			}
   881  		}
   882  	}
   883  }
   884  
   885  // assignAccountTasks attempts to match idle peers to pending account range
   886  // retrievals.
   887  func (s *Syncer) assignAccountTasks(success chan *accountResponse, fail chan *accountRequest, cancel chan struct{}) {
   888  	s.lock.Lock()
   889  	defer s.lock.Unlock()
   890  
   891  	// Sort the peers by download capacity to use faster ones if many available
   892  	idlers := &capacitySort{
   893  		ids:  make([]string, 0, len(s.accountIdlers)),
   894  		caps: make([]int, 0, len(s.accountIdlers)),
   895  	}
   896  	targetTTL := s.rates.TargetTimeout()
   897  	for id := range s.accountIdlers {
   898  		if _, ok := s.statelessPeers[id]; ok {
   899  			continue
   900  		}
   901  		idlers.ids = append(idlers.ids, id)
   902  		idlers.caps = append(idlers.caps, s.rates.Capacity(id, AccountRangeMsg, targetTTL))
   903  	}
   904  	if len(idlers.ids) == 0 {
   905  		return
   906  	}
   907  	sort.Sort(sort.Reverse(idlers))
   908  
   909  	// Iterate over all the tasks and try to find a pending one
   910  	for _, task := range s.tasks {
   911  		// Skip any tasks already filling
   912  		if task.req != nil || task.res != nil {
   913  			continue
   914  		}
   915  		// Task pending retrieval, try to find an idle peer. If no such peer
   916  		// exists, we probably assigned tasks for all (or they are stateless).
   917  		// Abort the entire assignment mechanism.
   918  		if len(idlers.ids) == 0 {
   919  			return
   920  		}
   921  		var (
   922  			idle = idlers.ids[0]
   923  			peer = s.peers[idle]
   924  			cap  = idlers.caps[0]
   925  		)
   926  		idlers.ids, idlers.caps = idlers.ids[1:], idlers.caps[1:]
   927  
   928  		// Matched a pending task to an idle peer, allocate a unique request id
   929  		var reqid uint64
   930  		for {
   931  			reqid = uint64(rand.Int63())
   932  			if reqid == 0 {
   933  				continue
   934  			}
   935  			if _, ok := s.accountReqs[reqid]; ok {
   936  				continue
   937  			}
   938  			break
   939  		}
   940  		// Generate the network query and send it to the peer
   941  		req := &accountRequest{
   942  			peer:    idle,
   943  			id:      reqid,
   944  			time:    time.Now(),
   945  			deliver: success,
   946  			revert:  fail,
   947  			cancel:  cancel,
   948  			stale:   make(chan struct{}),
   949  			origin:  task.Next,
   950  			limit:   task.Last,
   951  			task:    task,
   952  		}
   953  		req.timeout = time.AfterFunc(s.rates.TargetTimeout(), func() {
   954  			peer.Log().Debug("Account range request timed out", "reqid", reqid)
   955  			s.rates.Update(idle, AccountRangeMsg, 0, 0)
   956  			s.scheduleRevertAccountRequest(req)
   957  		})
   958  		s.accountReqs[reqid] = req
   959  		delete(s.accountIdlers, idle)
   960  
   961  		s.pend.Add(1)
   962  		go func(root common.Hash) {
   963  			defer s.pend.Done()
   964  
   965  			// Attempt to send the remote request and revert if it fails
   966  			if cap > maxRequestSize {
   967  				cap = maxRequestSize
   968  			}
   969  			if cap < minRequestSize { // Don't bother with peers below a bare minimum performance
   970  				cap = minRequestSize
   971  			}
   972  			if err := peer.RequestAccountRange(reqid, root, req.origin, req.limit, uint64(cap)); err != nil {
   973  				peer.Log().Debug("Failed to request account range", "err", err)
   974  				s.scheduleRevertAccountRequest(req)
   975  			}
   976  		}(s.root)
   977  
   978  		// Inject the request into the task to block further assignments
   979  		task.req = req
   980  	}
   981  }
   982  
   983  // assignBytecodeTasks attempts to match idle peers to pending code retrievals.
   984  func (s *Syncer) assignBytecodeTasks(success chan *bytecodeResponse, fail chan *bytecodeRequest, cancel chan struct{}) {
   985  	s.lock.Lock()
   986  	defer s.lock.Unlock()
   987  
   988  	// Sort the peers by download capacity to use faster ones if many available
   989  	idlers := &capacitySort{
   990  		ids:  make([]string, 0, len(s.bytecodeIdlers)),
   991  		caps: make([]int, 0, len(s.bytecodeIdlers)),
   992  	}
   993  	targetTTL := s.rates.TargetTimeout()
   994  	for id := range s.bytecodeIdlers {
   995  		if _, ok := s.statelessPeers[id]; ok {
   996  			continue
   997  		}
   998  		idlers.ids = append(idlers.ids, id)
   999  		idlers.caps = append(idlers.caps, s.rates.Capacity(id, ByteCodesMsg, targetTTL))
  1000  	}
  1001  	if len(idlers.ids) == 0 {
  1002  		return
  1003  	}
  1004  	sort.Sort(sort.Reverse(idlers))
  1005  
  1006  	// Iterate over all the tasks and try to find a pending one
  1007  	for _, task := range s.tasks {
  1008  		// Skip any tasks not in the bytecode retrieval phase
  1009  		if task.res == nil {
  1010  			continue
  1011  		}
  1012  		// Skip tasks that are already retrieving (or done with) all codes
  1013  		if len(task.codeTasks) == 0 {
  1014  			continue
  1015  		}
  1016  		// Task pending retrieval, try to find an idle peer. If no such peer
  1017  		// exists, we probably assigned tasks for all (or they are stateless).
  1018  		// Abort the entire assignment mechanism.
  1019  		if len(idlers.ids) == 0 {
  1020  			return
  1021  		}
  1022  		var (
  1023  			idle = idlers.ids[0]
  1024  			peer = s.peers[idle]
  1025  			cap  = idlers.caps[0]
  1026  		)
  1027  		idlers.ids, idlers.caps = idlers.ids[1:], idlers.caps[1:]
  1028  
  1029  		// Matched a pending task to an idle peer, allocate a unique request id
  1030  		var reqid uint64
  1031  		for {
  1032  			reqid = uint64(rand.Int63())
  1033  			if reqid == 0 {
  1034  				continue
  1035  			}
  1036  			if _, ok := s.bytecodeReqs[reqid]; ok {
  1037  				continue
  1038  			}
  1039  			break
  1040  		}
  1041  		// Generate the network query and send it to the peer
  1042  		if cap > maxCodeRequestCount {
  1043  			cap = maxCodeRequestCount
  1044  		}
  1045  		hashes := make([]common.Hash, 0, cap)
  1046  		for hash := range task.codeTasks {
  1047  			delete(task.codeTasks, hash)
  1048  			hashes = append(hashes, hash)
  1049  			if len(hashes) >= cap {
  1050  				break
  1051  			}
  1052  		}
  1053  		req := &bytecodeRequest{
  1054  			peer:    idle,
  1055  			id:      reqid,
  1056  			time:    time.Now(),
  1057  			deliver: success,
  1058  			revert:  fail,
  1059  			cancel:  cancel,
  1060  			stale:   make(chan struct{}),
  1061  			hashes:  hashes,
  1062  			task:    task,
  1063  		}
  1064  		req.timeout = time.AfterFunc(s.rates.TargetTimeout(), func() {
  1065  			peer.Log().Debug("Bytecode request timed out", "reqid", reqid)
  1066  			s.rates.Update(idle, ByteCodesMsg, 0, 0)
  1067  			s.scheduleRevertBytecodeRequest(req)
  1068  		})
  1069  		s.bytecodeReqs[reqid] = req
  1070  		delete(s.bytecodeIdlers, idle)
  1071  
  1072  		s.pend.Add(1)
  1073  		go func() {
  1074  			defer s.pend.Done()
  1075  
  1076  			// Attempt to send the remote request and revert if it fails
  1077  			if err := peer.RequestByteCodes(reqid, hashes, maxRequestSize); err != nil {
  1078  				log.Debug("Failed to request bytecodes", "err", err)
  1079  				s.scheduleRevertBytecodeRequest(req)
  1080  			}
  1081  		}()
  1082  	}
  1083  }
  1084  
  1085  // assignStorageTasks attempts to match idle peers to pending storage range
  1086  // retrievals.
  1087  func (s *Syncer) assignStorageTasks(success chan *storageResponse, fail chan *storageRequest, cancel chan struct{}) {
  1088  	s.lock.Lock()
  1089  	defer s.lock.Unlock()
  1090  
  1091  	// Sort the peers by download capacity to use faster ones if many available
  1092  	idlers := &capacitySort{
  1093  		ids:  make([]string, 0, len(s.storageIdlers)),
  1094  		caps: make([]int, 0, len(s.storageIdlers)),
  1095  	}
  1096  	targetTTL := s.rates.TargetTimeout()
  1097  	for id := range s.storageIdlers {
  1098  		if _, ok := s.statelessPeers[id]; ok {
  1099  			continue
  1100  		}
  1101  		idlers.ids = append(idlers.ids, id)
  1102  		idlers.caps = append(idlers.caps, s.rates.Capacity(id, StorageRangesMsg, targetTTL))
  1103  	}
  1104  	if len(idlers.ids) == 0 {
  1105  		return
  1106  	}
  1107  	sort.Sort(sort.Reverse(idlers))
  1108  
  1109  	// Iterate over all the tasks and try to find a pending one
  1110  	for _, task := range s.tasks {
  1111  		// Skip any tasks not in the storage retrieval phase
  1112  		if task.res == nil {
  1113  			continue
  1114  		}
  1115  		// Skip tasks that are already retrieving (or done with) all small states
  1116  		if len(task.SubTasks) == 0 && len(task.stateTasks) == 0 {
  1117  			continue
  1118  		}
  1119  		// Task pending retrieval, try to find an idle peer. If no such peer
  1120  		// exists, we probably assigned tasks for all (or they are stateless).
  1121  		// Abort the entire assignment mechanism.
  1122  		if len(idlers.ids) == 0 {
  1123  			return
  1124  		}
  1125  		var (
  1126  			idle = idlers.ids[0]
  1127  			peer = s.peers[idle]
  1128  			cap  = idlers.caps[0]
  1129  		)
  1130  		idlers.ids, idlers.caps = idlers.ids[1:], idlers.caps[1:]
  1131  
  1132  		// Matched a pending task to an idle peer, allocate a unique request id
  1133  		var reqid uint64
  1134  		for {
  1135  			reqid = uint64(rand.Int63())
  1136  			if reqid == 0 {
  1137  				continue
  1138  			}
  1139  			if _, ok := s.storageReqs[reqid]; ok {
  1140  				continue
  1141  			}
  1142  			break
  1143  		}
  1144  		// Generate the network query and send it to the peer. If there are
  1145  		// large contract tasks pending, complete those before diving into
  1146  		// even more new contracts.
  1147  		if cap > maxRequestSize {
  1148  			cap = maxRequestSize
  1149  		}
  1150  		if cap < minRequestSize { // Don't bother with peers below a bare minimum performance
  1151  			cap = minRequestSize
  1152  		}
  1153  		storageSets := cap / 1024
  1154  
  1155  		var (
  1156  			accounts = make([]common.Hash, 0, storageSets)
  1157  			roots    = make([]common.Hash, 0, storageSets)
  1158  			subtask  *storageTask
  1159  		)
  1160  		for account, subtasks := range task.SubTasks {
  1161  			for _, st := range subtasks {
  1162  				// Skip any subtasks already filling
  1163  				if st.req != nil {
  1164  					continue
  1165  				}
  1166  				// Found an incomplete storage chunk, schedule it
  1167  				accounts = append(accounts, account)
  1168  				roots = append(roots, st.root)
  1169  				subtask = st
  1170  				break // Large contract chunks are downloaded individually
  1171  			}
  1172  			if subtask != nil {
  1173  				break // Large contract chunks are downloaded individually
  1174  			}
  1175  		}
  1176  		if subtask == nil {
  1177  			// No large contract required retrieval, but small ones available
  1178  			for acccount, root := range task.stateTasks {
  1179  				delete(task.stateTasks, acccount)
  1180  
  1181  				accounts = append(accounts, acccount)
  1182  				roots = append(roots, root)
  1183  
  1184  				if len(accounts) >= storageSets {
  1185  					break
  1186  				}
  1187  			}
  1188  		}
  1189  		// If nothing was found, it means this task is actually already fully
  1190  		// retrieving, but large contracts are hard to detect. Skip to the next.
  1191  		if len(accounts) == 0 {
  1192  			continue
  1193  		}
  1194  		req := &storageRequest{
  1195  			peer:     idle,
  1196  			id:       reqid,
  1197  			time:     time.Now(),
  1198  			deliver:  success,
  1199  			revert:   fail,
  1200  			cancel:   cancel,
  1201  			stale:    make(chan struct{}),
  1202  			accounts: accounts,
  1203  			roots:    roots,
  1204  			mainTask: task,
  1205  			subTask:  subtask,
  1206  		}
  1207  		if subtask != nil {
  1208  			req.origin = subtask.Next
  1209  			req.limit = subtask.Last
  1210  		}
  1211  		req.timeout = time.AfterFunc(s.rates.TargetTimeout(), func() {
  1212  			peer.Log().Debug("Storage request timed out", "reqid", reqid)
  1213  			s.rates.Update(idle, StorageRangesMsg, 0, 0)
  1214  			s.scheduleRevertStorageRequest(req)
  1215  		})
  1216  		s.storageReqs[reqid] = req
  1217  		delete(s.storageIdlers, idle)
  1218  
  1219  		s.pend.Add(1)
  1220  		go func(root common.Hash) {
  1221  			defer s.pend.Done()
  1222  
  1223  			// Attempt to send the remote request and revert if it fails
  1224  			var origin, limit []byte
  1225  			if subtask != nil {
  1226  				origin, limit = req.origin[:], req.limit[:]
  1227  			}
  1228  			if err := peer.RequestStorageRanges(reqid, root, accounts, origin, limit, uint64(cap)); err != nil {
  1229  				log.Debug("Failed to request storage", "err", err)
  1230  				s.scheduleRevertStorageRequest(req)
  1231  			}
  1232  		}(s.root)
  1233  
  1234  		// Inject the request into the subtask to block further assignments
  1235  		if subtask != nil {
  1236  			subtask.req = req
  1237  		}
  1238  	}
  1239  }
  1240  
  1241  // assignTrienodeHealTasks attempts to match idle peers to trie node requests to
  1242  // heal any trie errors caused by the snap sync's chunked retrieval model.
  1243  func (s *Syncer) assignTrienodeHealTasks(success chan *trienodeHealResponse, fail chan *trienodeHealRequest, cancel chan struct{}) {
  1244  	s.lock.Lock()
  1245  	defer s.lock.Unlock()
  1246  
  1247  	// Sort the peers by download capacity to use faster ones if many available
  1248  	idlers := &capacitySort{
  1249  		ids:  make([]string, 0, len(s.trienodeHealIdlers)),
  1250  		caps: make([]int, 0, len(s.trienodeHealIdlers)),
  1251  	}
  1252  	targetTTL := s.rates.TargetTimeout()
  1253  	for id := range s.trienodeHealIdlers {
  1254  		if _, ok := s.statelessPeers[id]; ok {
  1255  			continue
  1256  		}
  1257  		idlers.ids = append(idlers.ids, id)
  1258  		idlers.caps = append(idlers.caps, s.rates.Capacity(id, TrieNodesMsg, targetTTL))
  1259  	}
  1260  	if len(idlers.ids) == 0 {
  1261  		return
  1262  	}
  1263  	sort.Sort(sort.Reverse(idlers))
  1264  
  1265  	// Iterate over pending tasks and try to find a peer to retrieve with
  1266  	for len(s.healer.trieTasks) > 0 || s.healer.scheduler.Pending() > 0 {
  1267  		// If there are not enough trie tasks queued to fully assign, fill the
  1268  		// queue from the state sync scheduler. The trie synced schedules these
  1269  		// together with bytecodes, so we need to queue them combined.
  1270  		var (
  1271  			have = len(s.healer.trieTasks) + len(s.healer.codeTasks)
  1272  			want = maxTrieRequestCount + maxCodeRequestCount
  1273  		)
  1274  		if have < want {
  1275  			nodes, paths, codes := s.healer.scheduler.Missing(want - have)
  1276  			for i, hash := range nodes {
  1277  				s.healer.trieTasks[hash] = paths[i]
  1278  			}
  1279  			for _, hash := range codes {
  1280  				s.healer.codeTasks[hash] = struct{}{}
  1281  			}
  1282  		}
  1283  		// If all the heal tasks are bytecodes or already downloading, bail
  1284  		if len(s.healer.trieTasks) == 0 {
  1285  			return
  1286  		}
  1287  		// Task pending retrieval, try to find an idle peer. If no such peer
  1288  		// exists, we probably assigned tasks for all (or they are stateless).
  1289  		// Abort the entire assignment mechanism.
  1290  		if len(idlers.ids) == 0 {
  1291  			return
  1292  		}
  1293  		var (
  1294  			idle = idlers.ids[0]
  1295  			peer = s.peers[idle]
  1296  			cap  = idlers.caps[0]
  1297  		)
  1298  		idlers.ids, idlers.caps = idlers.ids[1:], idlers.caps[1:]
  1299  
  1300  		// Matched a pending task to an idle peer, allocate a unique request id
  1301  		var reqid uint64
  1302  		for {
  1303  			reqid = uint64(rand.Int63())
  1304  			if reqid == 0 {
  1305  				continue
  1306  			}
  1307  			if _, ok := s.trienodeHealReqs[reqid]; ok {
  1308  				continue
  1309  			}
  1310  			break
  1311  		}
  1312  		// Generate the network query and send it to the peer
  1313  		if cap > maxTrieRequestCount {
  1314  			cap = maxTrieRequestCount
  1315  		}
  1316  		var (
  1317  			hashes   = make([]common.Hash, 0, cap)
  1318  			paths    = make([]trie.SyncPath, 0, cap)
  1319  			pathsets = make([]TrieNodePathSet, 0, cap)
  1320  		)
  1321  		for hash, pathset := range s.healer.trieTasks {
  1322  			delete(s.healer.trieTasks, hash)
  1323  
  1324  			hashes = append(hashes, hash)
  1325  			paths = append(paths, pathset)
  1326  			pathsets = append(pathsets, [][]byte(pathset)) // TODO(karalabe): group requests by account hash
  1327  
  1328  			if len(hashes) >= cap {
  1329  				break
  1330  			}
  1331  		}
  1332  		req := &trienodeHealRequest{
  1333  			peer:    idle,
  1334  			id:      reqid,
  1335  			time:    time.Now(),
  1336  			deliver: success,
  1337  			revert:  fail,
  1338  			cancel:  cancel,
  1339  			stale:   make(chan struct{}),
  1340  			hashes:  hashes,
  1341  			paths:   paths,
  1342  			task:    s.healer,
  1343  		}
  1344  		req.timeout = time.AfterFunc(s.rates.TargetTimeout(), func() {
  1345  			peer.Log().Debug("Trienode heal request timed out", "reqid", reqid)
  1346  			s.rates.Update(idle, TrieNodesMsg, 0, 0)
  1347  			s.scheduleRevertTrienodeHealRequest(req)
  1348  		})
  1349  		s.trienodeHealReqs[reqid] = req
  1350  		delete(s.trienodeHealIdlers, idle)
  1351  
  1352  		s.pend.Add(1)
  1353  		go func(root common.Hash) {
  1354  			defer s.pend.Done()
  1355  
  1356  			// Attempt to send the remote request and revert if it fails
  1357  			if err := peer.RequestTrieNodes(reqid, root, pathsets, maxRequestSize); err != nil {
  1358  				log.Debug("Failed to request trienode healers", "err", err)
  1359  				s.scheduleRevertTrienodeHealRequest(req)
  1360  			}
  1361  		}(s.root)
  1362  	}
  1363  }
  1364  
  1365  // assignBytecodeHealTasks attempts to match idle peers to bytecode requests to
  1366  // heal any trie errors caused by the snap sync's chunked retrieval model.
  1367  func (s *Syncer) assignBytecodeHealTasks(success chan *bytecodeHealResponse, fail chan *bytecodeHealRequest, cancel chan struct{}) {
  1368  	s.lock.Lock()
  1369  	defer s.lock.Unlock()
  1370  
  1371  	// Sort the peers by download capacity to use faster ones if many available
  1372  	idlers := &capacitySort{
  1373  		ids:  make([]string, 0, len(s.bytecodeHealIdlers)),
  1374  		caps: make([]int, 0, len(s.bytecodeHealIdlers)),
  1375  	}
  1376  	targetTTL := s.rates.TargetTimeout()
  1377  	for id := range s.bytecodeHealIdlers {
  1378  		if _, ok := s.statelessPeers[id]; ok {
  1379  			continue
  1380  		}
  1381  		idlers.ids = append(idlers.ids, id)
  1382  		idlers.caps = append(idlers.caps, s.rates.Capacity(id, ByteCodesMsg, targetTTL))
  1383  	}
  1384  	if len(idlers.ids) == 0 {
  1385  		return
  1386  	}
  1387  	sort.Sort(sort.Reverse(idlers))
  1388  
  1389  	// Iterate over pending tasks and try to find a peer to retrieve with
  1390  	for len(s.healer.codeTasks) > 0 || s.healer.scheduler.Pending() > 0 {
  1391  		// If there are not enough trie tasks queued to fully assign, fill the
  1392  		// queue from the state sync scheduler. The trie synced schedules these
  1393  		// together with trie nodes, so we need to queue them combined.
  1394  		var (
  1395  			have = len(s.healer.trieTasks) + len(s.healer.codeTasks)
  1396  			want = maxTrieRequestCount + maxCodeRequestCount
  1397  		)
  1398  		if have < want {
  1399  			nodes, paths, codes := s.healer.scheduler.Missing(want - have)
  1400  			for i, hash := range nodes {
  1401  				s.healer.trieTasks[hash] = paths[i]
  1402  			}
  1403  			for _, hash := range codes {
  1404  				s.healer.codeTasks[hash] = struct{}{}
  1405  			}
  1406  		}
  1407  		// If all the heal tasks are trienodes or already downloading, bail
  1408  		if len(s.healer.codeTasks) == 0 {
  1409  			return
  1410  		}
  1411  		// Task pending retrieval, try to find an idle peer. If no such peer
  1412  		// exists, we probably assigned tasks for all (or they are stateless).
  1413  		// Abort the entire assignment mechanism.
  1414  		if len(idlers.ids) == 0 {
  1415  			return
  1416  		}
  1417  		var (
  1418  			idle = idlers.ids[0]
  1419  			peer = s.peers[idle]
  1420  			cap  = idlers.caps[0]
  1421  		)
  1422  		idlers.ids, idlers.caps = idlers.ids[1:], idlers.caps[1:]
  1423  
  1424  		// Matched a pending task to an idle peer, allocate a unique request id
  1425  		var reqid uint64
  1426  		for {
  1427  			reqid = uint64(rand.Int63())
  1428  			if reqid == 0 {
  1429  				continue
  1430  			}
  1431  			if _, ok := s.bytecodeHealReqs[reqid]; ok {
  1432  				continue
  1433  			}
  1434  			break
  1435  		}
  1436  		// Generate the network query and send it to the peer
  1437  		if cap > maxCodeRequestCount {
  1438  			cap = maxCodeRequestCount
  1439  		}
  1440  		hashes := make([]common.Hash, 0, cap)
  1441  		for hash := range s.healer.codeTasks {
  1442  			delete(s.healer.codeTasks, hash)
  1443  
  1444  			hashes = append(hashes, hash)
  1445  			if len(hashes) >= cap {
  1446  				break
  1447  			}
  1448  		}
  1449  		req := &bytecodeHealRequest{
  1450  			peer:    idle,
  1451  			id:      reqid,
  1452  			time:    time.Now(),
  1453  			deliver: success,
  1454  			revert:  fail,
  1455  			cancel:  cancel,
  1456  			stale:   make(chan struct{}),
  1457  			hashes:  hashes,
  1458  			task:    s.healer,
  1459  		}
  1460  		req.timeout = time.AfterFunc(s.rates.TargetTimeout(), func() {
  1461  			peer.Log().Debug("Bytecode heal request timed out", "reqid", reqid)
  1462  			s.rates.Update(idle, ByteCodesMsg, 0, 0)
  1463  			s.scheduleRevertBytecodeHealRequest(req)
  1464  		})
  1465  		s.bytecodeHealReqs[reqid] = req
  1466  		delete(s.bytecodeHealIdlers, idle)
  1467  
  1468  		s.pend.Add(1)
  1469  		go func() {
  1470  			defer s.pend.Done()
  1471  
  1472  			// Attempt to send the remote request and revert if it fails
  1473  			if err := peer.RequestByteCodes(reqid, hashes, maxRequestSize); err != nil {
  1474  				log.Debug("Failed to request bytecode healers", "err", err)
  1475  				s.scheduleRevertBytecodeHealRequest(req)
  1476  			}
  1477  		}()
  1478  	}
  1479  }
  1480  
  1481  // revertRequests locates all the currently pending reuqests from a particular
  1482  // peer and reverts them, rescheduling for others to fulfill.
  1483  func (s *Syncer) revertRequests(peer string) {
  1484  	// Gather the requests first, revertals need the lock too
  1485  	s.lock.Lock()
  1486  	var accountReqs []*accountRequest
  1487  	for _, req := range s.accountReqs {
  1488  		if req.peer == peer {
  1489  			accountReqs = append(accountReqs, req)
  1490  		}
  1491  	}
  1492  	var bytecodeReqs []*bytecodeRequest
  1493  	for _, req := range s.bytecodeReqs {
  1494  		if req.peer == peer {
  1495  			bytecodeReqs = append(bytecodeReqs, req)
  1496  		}
  1497  	}
  1498  	var storageReqs []*storageRequest
  1499  	for _, req := range s.storageReqs {
  1500  		if req.peer == peer {
  1501  			storageReqs = append(storageReqs, req)
  1502  		}
  1503  	}
  1504  	var trienodeHealReqs []*trienodeHealRequest
  1505  	for _, req := range s.trienodeHealReqs {
  1506  		if req.peer == peer {
  1507  			trienodeHealReqs = append(trienodeHealReqs, req)
  1508  		}
  1509  	}
  1510  	var bytecodeHealReqs []*bytecodeHealRequest
  1511  	for _, req := range s.bytecodeHealReqs {
  1512  		if req.peer == peer {
  1513  			bytecodeHealReqs = append(bytecodeHealReqs, req)
  1514  		}
  1515  	}
  1516  	s.lock.Unlock()
  1517  
  1518  	// Revert all the requests matching the peer
  1519  	for _, req := range accountReqs {
  1520  		s.revertAccountRequest(req)
  1521  	}
  1522  	for _, req := range bytecodeReqs {
  1523  		s.revertBytecodeRequest(req)
  1524  	}
  1525  	for _, req := range storageReqs {
  1526  		s.revertStorageRequest(req)
  1527  	}
  1528  	for _, req := range trienodeHealReqs {
  1529  		s.revertTrienodeHealRequest(req)
  1530  	}
  1531  	for _, req := range bytecodeHealReqs {
  1532  		s.revertBytecodeHealRequest(req)
  1533  	}
  1534  }
  1535  
  1536  // scheduleRevertAccountRequest asks the event loop to clean up an account range
  1537  // request and return all failed retrieval tasks to the scheduler for reassignment.
  1538  func (s *Syncer) scheduleRevertAccountRequest(req *accountRequest) {
  1539  	select {
  1540  	case req.revert <- req:
  1541  		// Sync event loop notified
  1542  	case <-req.cancel:
  1543  		// Sync cycle got cancelled
  1544  	case <-req.stale:
  1545  		// Request already reverted
  1546  	}
  1547  }
  1548  
  1549  // revertAccountRequest cleans up an account range request and returns all failed
  1550  // retrieval tasks to the scheduler for reassignment.
  1551  //
  1552  // Note, this needs to run on the event runloop thread to reschedule to idle peers.
  1553  // On peer threads, use scheduleRevertAccountRequest.
  1554  func (s *Syncer) revertAccountRequest(req *accountRequest) {
  1555  	log.Debug("Reverting account request", "peer", req.peer, "reqid", req.id)
  1556  	select {
  1557  	case <-req.stale:
  1558  		log.Trace("Account request already reverted", "peer", req.peer, "reqid", req.id)
  1559  		return
  1560  	default:
  1561  	}
  1562  	close(req.stale)
  1563  
  1564  	// Remove the request from the tracked set
  1565  	s.lock.Lock()
  1566  	delete(s.accountReqs, req.id)
  1567  	s.lock.Unlock()
  1568  
  1569  	// If there's a timeout timer still running, abort it and mark the account
  1570  	// task as not-pending, ready for resheduling
  1571  	req.timeout.Stop()
  1572  	if req.task.req == req {
  1573  		req.task.req = nil
  1574  	}
  1575  }
  1576  
  1577  // scheduleRevertBytecodeRequest asks the event loop to clean up a bytecode request
  1578  // and return all failed retrieval tasks to the scheduler for reassignment.
  1579  func (s *Syncer) scheduleRevertBytecodeRequest(req *bytecodeRequest) {
  1580  	select {
  1581  	case req.revert <- req:
  1582  		// Sync event loop notified
  1583  	case <-req.cancel:
  1584  		// Sync cycle got cancelled
  1585  	case <-req.stale:
  1586  		// Request already reverted
  1587  	}
  1588  }
  1589  
  1590  // revertBytecodeRequest cleans up a bytecode request and returns all failed
  1591  // retrieval tasks to the scheduler for reassignment.
  1592  //
  1593  // Note, this needs to run on the event runloop thread to reschedule to idle peers.
  1594  // On peer threads, use scheduleRevertBytecodeRequest.
  1595  func (s *Syncer) revertBytecodeRequest(req *bytecodeRequest) {
  1596  	log.Debug("Reverting bytecode request", "peer", req.peer)
  1597  	select {
  1598  	case <-req.stale:
  1599  		log.Trace("Bytecode request already reverted", "peer", req.peer, "reqid", req.id)
  1600  		return
  1601  	default:
  1602  	}
  1603  	close(req.stale)
  1604  
  1605  	// Remove the request from the tracked set
  1606  	s.lock.Lock()
  1607  	delete(s.bytecodeReqs, req.id)
  1608  	s.lock.Unlock()
  1609  
  1610  	// If there's a timeout timer still running, abort it and mark the code
  1611  	// retrievals as not-pending, ready for resheduling
  1612  	req.timeout.Stop()
  1613  	for _, hash := range req.hashes {
  1614  		req.task.codeTasks[hash] = struct{}{}
  1615  	}
  1616  }
  1617  
  1618  // scheduleRevertStorageRequest asks the event loop to clean up a storage range
  1619  // request and return all failed retrieval tasks to the scheduler for reassignment.
  1620  func (s *Syncer) scheduleRevertStorageRequest(req *storageRequest) {
  1621  	select {
  1622  	case req.revert <- req:
  1623  		// Sync event loop notified
  1624  	case <-req.cancel:
  1625  		// Sync cycle got cancelled
  1626  	case <-req.stale:
  1627  		// Request already reverted
  1628  	}
  1629  }
  1630  
  1631  // revertStorageRequest cleans up a storage range request and returns all failed
  1632  // retrieval tasks to the scheduler for reassignment.
  1633  //
  1634  // Note, this needs to run on the event runloop thread to reschedule to idle peers.
  1635  // On peer threads, use scheduleRevertStorageRequest.
  1636  func (s *Syncer) revertStorageRequest(req *storageRequest) {
  1637  	log.Debug("Reverting storage request", "peer", req.peer)
  1638  	select {
  1639  	case <-req.stale:
  1640  		log.Trace("Storage request already reverted", "peer", req.peer, "reqid", req.id)
  1641  		return
  1642  	default:
  1643  	}
  1644  	close(req.stale)
  1645  
  1646  	// Remove the request from the tracked set
  1647  	s.lock.Lock()
  1648  	delete(s.storageReqs, req.id)
  1649  	s.lock.Unlock()
  1650  
  1651  	// If there's a timeout timer still running, abort it and mark the storage
  1652  	// task as not-pending, ready for resheduling
  1653  	req.timeout.Stop()
  1654  	if req.subTask != nil {
  1655  		req.subTask.req = nil
  1656  	} else {
  1657  		for i, account := range req.accounts {
  1658  			req.mainTask.stateTasks[account] = req.roots[i]
  1659  		}
  1660  	}
  1661  }
  1662  
  1663  // scheduleRevertTrienodeHealRequest asks the event loop to clean up a trienode heal
  1664  // request and return all failed retrieval tasks to the scheduler for reassignment.
  1665  func (s *Syncer) scheduleRevertTrienodeHealRequest(req *trienodeHealRequest) {
  1666  	select {
  1667  	case req.revert <- req:
  1668  		// Sync event loop notified
  1669  	case <-req.cancel:
  1670  		// Sync cycle got cancelled
  1671  	case <-req.stale:
  1672  		// Request already reverted
  1673  	}
  1674  }
  1675  
  1676  // revertTrienodeHealRequest cleans up a trienode heal request and returns all
  1677  // failed retrieval tasks to the scheduler for reassignment.
  1678  //
  1679  // Note, this needs to run on the event runloop thread to reschedule to idle peers.
  1680  // On peer threads, use scheduleRevertTrienodeHealRequest.
  1681  func (s *Syncer) revertTrienodeHealRequest(req *trienodeHealRequest) {
  1682  	log.Debug("Reverting trienode heal request", "peer", req.peer)
  1683  	select {
  1684  	case <-req.stale:
  1685  		log.Trace("Trienode heal request already reverted", "peer", req.peer, "reqid", req.id)
  1686  		return
  1687  	default:
  1688  	}
  1689  	close(req.stale)
  1690  
  1691  	// Remove the request from the tracked set
  1692  	s.lock.Lock()
  1693  	delete(s.trienodeHealReqs, req.id)
  1694  	s.lock.Unlock()
  1695  
  1696  	// If there's a timeout timer still running, abort it and mark the trie node
  1697  	// retrievals as not-pending, ready for resheduling
  1698  	req.timeout.Stop()
  1699  	for i, hash := range req.hashes {
  1700  		req.task.trieTasks[hash] = req.paths[i]
  1701  	}
  1702  }
  1703  
  1704  // scheduleRevertBytecodeHealRequest asks the event loop to clean up a bytecode heal
  1705  // request and return all failed retrieval tasks to the scheduler for reassignment.
  1706  func (s *Syncer) scheduleRevertBytecodeHealRequest(req *bytecodeHealRequest) {
  1707  	select {
  1708  	case req.revert <- req:
  1709  		// Sync event loop notified
  1710  	case <-req.cancel:
  1711  		// Sync cycle got cancelled
  1712  	case <-req.stale:
  1713  		// Request already reverted
  1714  	}
  1715  }
  1716  
  1717  // revertBytecodeHealRequest cleans up a bytecode heal request and returns all
  1718  // failed retrieval tasks to the scheduler for reassignment.
  1719  //
  1720  // Note, this needs to run on the event runloop thread to reschedule to idle peers.
  1721  // On peer threads, use scheduleRevertBytecodeHealRequest.
  1722  func (s *Syncer) revertBytecodeHealRequest(req *bytecodeHealRequest) {
  1723  	log.Debug("Reverting bytecode heal request", "peer", req.peer)
  1724  	select {
  1725  	case <-req.stale:
  1726  		log.Trace("Bytecode heal request already reverted", "peer", req.peer, "reqid", req.id)
  1727  		return
  1728  	default:
  1729  	}
  1730  	close(req.stale)
  1731  
  1732  	// Remove the request from the tracked set
  1733  	s.lock.Lock()
  1734  	delete(s.bytecodeHealReqs, req.id)
  1735  	s.lock.Unlock()
  1736  
  1737  	// If there's a timeout timer still running, abort it and mark the code
  1738  	// retrievals as not-pending, ready for resheduling
  1739  	req.timeout.Stop()
  1740  	for _, hash := range req.hashes {
  1741  		req.task.codeTasks[hash] = struct{}{}
  1742  	}
  1743  }
  1744  
  1745  // processAccountResponse integrates an already validated account range response
  1746  // into the account tasks.
  1747  func (s *Syncer) processAccountResponse(res *accountResponse) {
  1748  	// Switch the task from pending to filling
  1749  	res.task.req = nil
  1750  	res.task.res = res
  1751  
  1752  	// Ensure that the response doesn't overflow into the subsequent task
  1753  	last := res.task.Last.Big()
  1754  	for i, hash := range res.hashes {
  1755  		// Mark the range complete if the last is already included.
  1756  		// Keep iteration to delete the extra states if exists.
  1757  		cmp := hash.Big().Cmp(last)
  1758  		if cmp == 0 {
  1759  			res.cont = false
  1760  			continue
  1761  		}
  1762  		if cmp > 0 {
  1763  			// Chunk overflown, cut off excess
  1764  			res.hashes = res.hashes[:i]
  1765  			res.accounts = res.accounts[:i]
  1766  			res.cont = false // Mark range completed
  1767  			break
  1768  		}
  1769  	}
  1770  	// Iterate over all the accounts and assemble which ones need further sub-
  1771  	// filling before the entire account range can be persisted.
  1772  	res.task.needCode = make([]bool, len(res.accounts))
  1773  	res.task.needState = make([]bool, len(res.accounts))
  1774  	res.task.needHeal = make([]bool, len(res.accounts))
  1775  
  1776  	res.task.codeTasks = make(map[common.Hash]struct{})
  1777  	res.task.stateTasks = make(map[common.Hash]common.Hash)
  1778  
  1779  	resumed := make(map[common.Hash]struct{})
  1780  
  1781  	res.task.pend = 0
  1782  	for i, account := range res.accounts {
  1783  		// Check if the account is a contract with an unknown code
  1784  		if !bytes.Equal(account.CodeHash, emptyCode[:]) {
  1785  			if !rawdb.HasCodeWithPrefix(s.db, common.BytesToHash(account.CodeHash)) {
  1786  				res.task.codeTasks[common.BytesToHash(account.CodeHash)] = struct{}{}
  1787  				res.task.needCode[i] = true
  1788  				res.task.pend++
  1789  			}
  1790  		}
  1791  		// Check if the account is a contract with an unknown storage trie
  1792  		if account.Root != emptyRoot {
  1793  			if ok, err := s.db.Has(account.Root[:]); err != nil || !ok {
  1794  				// If there was a previous large state retrieval in progress,
  1795  				// don't restart it from scratch. This happens if a sync cycle
  1796  				// is interrupted and resumed later. However, *do* update the
  1797  				// previous root hash.
  1798  				if subtasks, ok := res.task.SubTasks[res.hashes[i]]; ok {
  1799  					log.Debug("Resuming large storage retrieval", "account", res.hashes[i], "root", account.Root)
  1800  					for _, subtask := range subtasks {
  1801  						subtask.root = account.Root
  1802  					}
  1803  					res.task.needHeal[i] = true
  1804  					resumed[res.hashes[i]] = struct{}{}
  1805  				} else {
  1806  					res.task.stateTasks[res.hashes[i]] = account.Root
  1807  				}
  1808  				res.task.needState[i] = true
  1809  				res.task.pend++
  1810  			}
  1811  		}
  1812  	}
  1813  	// Delete any subtasks that have been aborted but not resumed. This may undo
  1814  	// some progress if a new peer gives us less accounts than an old one, but for
  1815  	// now we have to live with that.
  1816  	for hash := range res.task.SubTasks {
  1817  		if _, ok := resumed[hash]; !ok {
  1818  			log.Debug("Aborting suspended storage retrieval", "account", hash)
  1819  			delete(res.task.SubTasks, hash)
  1820  		}
  1821  	}
  1822  	// If the account range contained no contracts, or all have been fully filled
  1823  	// beforehand, short circuit storage filling and forward to the next task
  1824  	if res.task.pend == 0 {
  1825  		s.forwardAccountTask(res.task)
  1826  		return
  1827  	}
  1828  	// Some accounts are incomplete, leave as is for the storage and contract
  1829  	// task assigners to pick up and fill.
  1830  }
  1831  
  1832  // processBytecodeResponse integrates an already validated bytecode response
  1833  // into the account tasks.
  1834  func (s *Syncer) processBytecodeResponse(res *bytecodeResponse) {
  1835  	batch := s.db.NewBatch()
  1836  
  1837  	var (
  1838  		codes uint64
  1839  	)
  1840  	for i, hash := range res.hashes {
  1841  		code := res.codes[i]
  1842  
  1843  		// If the bytecode was not delivered, reschedule it
  1844  		if code == nil {
  1845  			res.task.codeTasks[hash] = struct{}{}
  1846  			continue
  1847  		}
  1848  		// Code was delivered, mark it not needed any more
  1849  		for j, account := range res.task.res.accounts {
  1850  			if res.task.needCode[j] && hash == common.BytesToHash(account.CodeHash) {
  1851  				res.task.needCode[j] = false
  1852  				res.task.pend--
  1853  			}
  1854  		}
  1855  		// Push the bytecode into a database batch
  1856  		codes++
  1857  		rawdb.WriteCode(batch, hash, code)
  1858  	}
  1859  	bytes := common.StorageSize(batch.ValueSize())
  1860  	if err := batch.Write(); err != nil {
  1861  		log.Crit("Failed to persist bytecodes", "err", err)
  1862  	}
  1863  	s.bytecodeSynced += codes
  1864  	s.bytecodeBytes += bytes
  1865  
  1866  	log.Debug("Persisted set of bytecodes", "count", codes, "bytes", bytes)
  1867  
  1868  	// If this delivery completed the last pending task, forward the account task
  1869  	// to the next chunk
  1870  	if res.task.pend == 0 {
  1871  		s.forwardAccountTask(res.task)
  1872  		return
  1873  	}
  1874  	// Some accounts are still incomplete, leave as is for the storage and contract
  1875  	// task assigners to pick up and fill.
  1876  }
  1877  
  1878  // processStorageResponse integrates an already validated storage response
  1879  // into the account tasks.
  1880  func (s *Syncer) processStorageResponse(res *storageResponse) {
  1881  	// Switch the subtask from pending to idle
  1882  	if res.subTask != nil {
  1883  		res.subTask.req = nil
  1884  	}
  1885  	batch := ethdb.HookedBatch{
  1886  		Batch: s.db.NewBatch(),
  1887  		OnPut: func(key []byte, value []byte) {
  1888  			s.storageBytes += common.StorageSize(len(key) + len(value))
  1889  		},
  1890  	}
  1891  	var (
  1892  		slots           int
  1893  		oldStorageBytes = s.storageBytes
  1894  	)
  1895  	// Iterate over all the accounts and reconstruct their storage tries from the
  1896  	// delivered slots
  1897  	for i, account := range res.accounts {
  1898  		// If the account was not delivered, reschedule it
  1899  		if i >= len(res.hashes) {
  1900  			res.mainTask.stateTasks[account] = res.roots[i]
  1901  			continue
  1902  		}
  1903  		// State was delivered, if complete mark as not needed any more, otherwise
  1904  		// mark the account as needing healing
  1905  		for j, hash := range res.mainTask.res.hashes {
  1906  			if account != hash {
  1907  				continue
  1908  			}
  1909  			acc := res.mainTask.res.accounts[j]
  1910  
  1911  			// If the packet contains multiple contract storage slots, all
  1912  			// but the last are surely complete. The last contract may be
  1913  			// chunked, so check it's continuation flag.
  1914  			if res.subTask == nil && res.mainTask.needState[j] && (i < len(res.hashes)-1 || !res.cont) {
  1915  				res.mainTask.needState[j] = false
  1916  				res.mainTask.pend--
  1917  			}
  1918  			// If the last contract was chunked, mark it as needing healing
  1919  			// to avoid writing it out to disk prematurely.
  1920  			if res.subTask == nil && !res.mainTask.needHeal[j] && i == len(res.hashes)-1 && res.cont {
  1921  				res.mainTask.needHeal[j] = true
  1922  			}
  1923  			// If the last contract was chunked, we need to switch to large
  1924  			// contract handling mode
  1925  			if res.subTask == nil && i == len(res.hashes)-1 && res.cont {
  1926  				// If we haven't yet started a large-contract retrieval, create
  1927  				// the subtasks for it within the main account task
  1928  				if tasks, ok := res.mainTask.SubTasks[account]; !ok {
  1929  					var (
  1930  						keys    = res.hashes[i]
  1931  						chunks  = uint64(storageConcurrency)
  1932  						lastKey common.Hash
  1933  					)
  1934  					if len(keys) > 0 {
  1935  						lastKey = keys[len(keys)-1]
  1936  					}
  1937  					// If the number of slots remaining is low, decrease the
  1938  					// number of chunks. Somewhere on the order of 10-15K slots
  1939  					// fit into a packet of 500KB. A key/slot pair is maximum 64
  1940  					// bytes, so pessimistically maxRequestSize/64 = 8K.
  1941  					//
  1942  					// Chunk so that at least 2 packets are needed to fill a task.
  1943  					if estimate, err := estimateRemainingSlots(len(keys), lastKey); err == nil {
  1944  						if n := estimate / (2 * (maxRequestSize / 64)); n+1 < chunks {
  1945  							chunks = n + 1
  1946  						}
  1947  						log.Debug("Chunked large contract", "initiators", len(keys), "tail", lastKey, "remaining", estimate, "chunks", chunks)
  1948  					} else {
  1949  						log.Debug("Chunked large contract", "initiators", len(keys), "tail", lastKey, "chunks", chunks)
  1950  					}
  1951  					r := newHashRange(lastKey, chunks)
  1952  
  1953  					// Our first task is the one that was just filled by this response.
  1954  					batch := ethdb.HookedBatch{
  1955  						Batch: s.db.NewBatch(),
  1956  						OnPut: func(key []byte, value []byte) {
  1957  							s.storageBytes += common.StorageSize(len(key) + len(value))
  1958  						},
  1959  					}
  1960  					tasks = append(tasks, &storageTask{
  1961  						Next:     common.Hash{},
  1962  						Last:     r.End(),
  1963  						root:     acc.Root,
  1964  						genBatch: batch,
  1965  						genTrie:  trie.NewStackTrie(batch),
  1966  					})
  1967  					for r.Next() {
  1968  						batch := ethdb.HookedBatch{
  1969  							Batch: s.db.NewBatch(),
  1970  							OnPut: func(key []byte, value []byte) {
  1971  								s.storageBytes += common.StorageSize(len(key) + len(value))
  1972  							},
  1973  						}
  1974  						tasks = append(tasks, &storageTask{
  1975  							Next:     r.Start(),
  1976  							Last:     r.End(),
  1977  							root:     acc.Root,
  1978  							genBatch: batch,
  1979  							genTrie:  trie.NewStackTrie(batch),
  1980  						})
  1981  					}
  1982  					for _, task := range tasks {
  1983  						log.Debug("Created storage sync task", "account", account, "root", acc.Root, "from", task.Next, "last", task.Last)
  1984  					}
  1985  					res.mainTask.SubTasks[account] = tasks
  1986  
  1987  					// Since we've just created the sub-tasks, this response
  1988  					// is surely for the first one (zero origin)
  1989  					res.subTask = tasks[0]
  1990  				}
  1991  			}
  1992  			// If we're in large contract delivery mode, forward the subtask
  1993  			if res.subTask != nil {
  1994  				// Ensure the response doesn't overflow into the subsequent task
  1995  				last := res.subTask.Last.Big()
  1996  				// Find the first overflowing key. While at it, mark res as complete
  1997  				// if we find the range to include or pass the 'last'
  1998  				index := sort.Search(len(res.hashes[i]), func(k int) bool {
  1999  					cmp := res.hashes[i][k].Big().Cmp(last)
  2000  					if cmp >= 0 {
  2001  						res.cont = false
  2002  					}
  2003  					return cmp > 0
  2004  				})
  2005  				if index >= 0 {
  2006  					// cut off excess
  2007  					res.hashes[i] = res.hashes[i][:index]
  2008  					res.slots[i] = res.slots[i][:index]
  2009  				}
  2010  				// Forward the relevant storage chunk (even if created just now)
  2011  				if res.cont {
  2012  					res.subTask.Next = incHash(res.hashes[i][len(res.hashes[i])-1])
  2013  				} else {
  2014  					res.subTask.done = true
  2015  				}
  2016  			}
  2017  		}
  2018  		// Iterate over all the complete contracts, reconstruct the trie nodes and
  2019  		// push them to disk. If the contract is chunked, the trie nodes will be
  2020  		// reconstructed later.
  2021  		slots += len(res.hashes[i])
  2022  
  2023  		if i < len(res.hashes)-1 || res.subTask == nil {
  2024  			tr := trie.NewStackTrie(batch)
  2025  			for j := 0; j < len(res.hashes[i]); j++ {
  2026  				tr.Update(res.hashes[i][j][:], res.slots[i][j])
  2027  			}
  2028  			tr.Commit()
  2029  		}
  2030  		// Persist the received storage segements. These flat state maybe
  2031  		// outdated during the sync, but it can be fixed later during the
  2032  		// snapshot generation.
  2033  		for j := 0; j < len(res.hashes[i]); j++ {
  2034  			rawdb.WriteStorageSnapshot(batch, account, res.hashes[i][j], res.slots[i][j])
  2035  
  2036  			// If we're storing large contracts, generate the trie nodes
  2037  			// on the fly to not trash the gluing points
  2038  			if i == len(res.hashes)-1 && res.subTask != nil {
  2039  				res.subTask.genTrie.Update(res.hashes[i][j][:], res.slots[i][j])
  2040  			}
  2041  		}
  2042  	}
  2043  	// Large contracts could have generated new trie nodes, flush them to disk
  2044  	if res.subTask != nil {
  2045  		if res.subTask.done {
  2046  			if root, err := res.subTask.genTrie.Commit(); err != nil {
  2047  				log.Error("Failed to commit stack slots", "err", err)
  2048  			} else if root == res.subTask.root {
  2049  				// If the chunk's root is an overflown but full delivery, clear the heal request
  2050  				for i, account := range res.mainTask.res.hashes {
  2051  					if account == res.accounts[len(res.accounts)-1] {
  2052  						res.mainTask.needHeal[i] = false
  2053  					}
  2054  				}
  2055  			}
  2056  		}
  2057  		if res.subTask.genBatch.ValueSize() > ethdb.IdealBatchSize || res.subTask.done {
  2058  			if err := res.subTask.genBatch.Write(); err != nil {
  2059  				log.Error("Failed to persist stack slots", "err", err)
  2060  			}
  2061  			res.subTask.genBatch.Reset()
  2062  		}
  2063  	}
  2064  	// Flush anything written just now and update the stats
  2065  	if err := batch.Write(); err != nil {
  2066  		log.Crit("Failed to persist storage slots", "err", err)
  2067  	}
  2068  	s.storageSynced += uint64(slots)
  2069  
  2070  	log.Debug("Persisted set of storage slots", "accounts", len(res.hashes), "slots", slots, "bytes", s.storageBytes-oldStorageBytes)
  2071  
  2072  	// If this delivery completed the last pending task, forward the account task
  2073  	// to the next chunk
  2074  	if res.mainTask.pend == 0 {
  2075  		s.forwardAccountTask(res.mainTask)
  2076  		return
  2077  	}
  2078  	// Some accounts are still incomplete, leave as is for the storage and contract
  2079  	// task assigners to pick up and fill.
  2080  }
  2081  
  2082  // processTrienodeHealResponse integrates an already validated trienode response
  2083  // into the healer tasks.
  2084  func (s *Syncer) processTrienodeHealResponse(res *trienodeHealResponse) {
  2085  	for i, hash := range res.hashes {
  2086  		node := res.nodes[i]
  2087  
  2088  		// If the trie node was not delivered, reschedule it
  2089  		if node == nil {
  2090  			res.task.trieTasks[hash] = res.paths[i]
  2091  			continue
  2092  		}
  2093  		// Push the trie node into the state syncer
  2094  		s.trienodeHealSynced++
  2095  		s.trienodeHealBytes += common.StorageSize(len(node))
  2096  
  2097  		err := s.healer.scheduler.Process(trie.SyncResult{Hash: hash, Data: node})
  2098  		switch err {
  2099  		case nil:
  2100  		case trie.ErrAlreadyProcessed:
  2101  			s.trienodeHealDups++
  2102  		case trie.ErrNotRequested:
  2103  			s.trienodeHealNops++
  2104  		default:
  2105  			log.Error("Invalid trienode processed", "hash", hash, "err", err)
  2106  		}
  2107  	}
  2108  	batch := s.db.NewBatch()
  2109  	if err := s.healer.scheduler.Commit(batch); err != nil {
  2110  		log.Error("Failed to commit healing data", "err", err)
  2111  	}
  2112  	if err := batch.Write(); err != nil {
  2113  		log.Crit("Failed to persist healing data", "err", err)
  2114  	}
  2115  	log.Debug("Persisted set of healing data", "type", "trienodes", "bytes", common.StorageSize(batch.ValueSize()))
  2116  }
  2117  
  2118  // processBytecodeHealResponse integrates an already validated bytecode response
  2119  // into the healer tasks.
  2120  func (s *Syncer) processBytecodeHealResponse(res *bytecodeHealResponse) {
  2121  	for i, hash := range res.hashes {
  2122  		node := res.codes[i]
  2123  
  2124  		// If the trie node was not delivered, reschedule it
  2125  		if node == nil {
  2126  			res.task.codeTasks[hash] = struct{}{}
  2127  			continue
  2128  		}
  2129  		// Push the trie node into the state syncer
  2130  		s.bytecodeHealSynced++
  2131  		s.bytecodeHealBytes += common.StorageSize(len(node))
  2132  
  2133  		err := s.healer.scheduler.Process(trie.SyncResult{Hash: hash, Data: node})
  2134  		switch err {
  2135  		case nil:
  2136  		case trie.ErrAlreadyProcessed:
  2137  			s.bytecodeHealDups++
  2138  		case trie.ErrNotRequested:
  2139  			s.bytecodeHealNops++
  2140  		default:
  2141  			log.Error("Invalid bytecode processed", "hash", hash, "err", err)
  2142  		}
  2143  	}
  2144  	batch := s.db.NewBatch()
  2145  	if err := s.healer.scheduler.Commit(batch); err != nil {
  2146  		log.Error("Failed to commit healing data", "err", err)
  2147  	}
  2148  	if err := batch.Write(); err != nil {
  2149  		log.Crit("Failed to persist healing data", "err", err)
  2150  	}
  2151  	log.Debug("Persisted set of healing data", "type", "bytecode", "bytes", common.StorageSize(batch.ValueSize()))
  2152  }
  2153  
  2154  // forwardAccountTask takes a filled account task and persists anything available
  2155  // into the database, after which it forwards the next account marker so that the
  2156  // task's next chunk may be filled.
  2157  func (s *Syncer) forwardAccountTask(task *accountTask) {
  2158  	// Remove any pending delivery
  2159  	res := task.res
  2160  	if res == nil {
  2161  		return // nothing to forward
  2162  	}
  2163  	task.res = nil
  2164  
  2165  	// Persist the received account segements. These flat state maybe
  2166  	// outdated during the sync, but it can be fixed later during the
  2167  	// snapshot generation.
  2168  	oldAccountBytes := s.accountBytes
  2169  
  2170  	batch := ethdb.HookedBatch{
  2171  		Batch: s.db.NewBatch(),
  2172  		OnPut: func(key []byte, value []byte) {
  2173  			s.accountBytes += common.StorageSize(len(key) + len(value))
  2174  		},
  2175  	}
  2176  	for i, hash := range res.hashes {
  2177  		if task.needCode[i] || task.needState[i] {
  2178  			break
  2179  		}
  2180  		slim := snapshot.SlimAccountRLP(res.accounts[i].Nonce, res.accounts[i].Balance, res.accounts[i].Root, res.accounts[i].CodeHash)
  2181  		rawdb.WriteAccountSnapshot(batch, hash, slim)
  2182  
  2183  		// If the task is complete, drop it into the stack trie to generate
  2184  		// account trie nodes for it
  2185  		if !task.needHeal[i] {
  2186  			full, err := snapshot.FullAccountRLP(slim) // TODO(karalabe): Slim parsing can be omitted
  2187  			if err != nil {
  2188  				panic(err) // Really shouldn't ever happen
  2189  			}
  2190  			task.genTrie.Update(hash[:], full)
  2191  		}
  2192  	}
  2193  	// Flush anything written just now and update the stats
  2194  	if err := batch.Write(); err != nil {
  2195  		log.Crit("Failed to persist accounts", "err", err)
  2196  	}
  2197  	s.accountSynced += uint64(len(res.accounts))
  2198  
  2199  	// Task filling persisted, push it the chunk marker forward to the first
  2200  	// account still missing data.
  2201  	for i, hash := range res.hashes {
  2202  		if task.needCode[i] || task.needState[i] {
  2203  			return
  2204  		}
  2205  		task.Next = incHash(hash)
  2206  	}
  2207  	// All accounts marked as complete, track if the entire task is done
  2208  	task.done = !res.cont
  2209  
  2210  	// Stack trie could have generated trie nodes, push them to disk (we need to
  2211  	// flush after finalizing task.done. It's fine even if we crash and lose this
  2212  	// write as it will only cause more data to be downloaded during heal.
  2213  	if task.done {
  2214  		if _, err := task.genTrie.Commit(); err != nil {
  2215  			log.Error("Failed to commit stack account", "err", err)
  2216  		}
  2217  	}
  2218  	if task.genBatch.ValueSize() > ethdb.IdealBatchSize || task.done {
  2219  		if err := task.genBatch.Write(); err != nil {
  2220  			log.Error("Failed to persist stack account", "err", err)
  2221  		}
  2222  		task.genBatch.Reset()
  2223  	}
  2224  	log.Debug("Persisted range of accounts", "accounts", len(res.accounts), "bytes", s.accountBytes-oldAccountBytes)
  2225  }
  2226  
  2227  // OnAccounts is a callback method to invoke when a range of accounts are
  2228  // received from a remote peer.
  2229  func (s *Syncer) OnAccounts(peer SyncPeer, id uint64, hashes []common.Hash, accounts [][]byte, proof [][]byte) error {
  2230  	size := common.StorageSize(len(hashes) * common.HashLength)
  2231  	for _, account := range accounts {
  2232  		size += common.StorageSize(len(account))
  2233  	}
  2234  	for _, node := range proof {
  2235  		size += common.StorageSize(len(node))
  2236  	}
  2237  	logger := peer.Log().New("reqid", id)
  2238  	logger.Trace("Delivering range of accounts", "hashes", len(hashes), "accounts", len(accounts), "proofs", len(proof), "bytes", size)
  2239  
  2240  	// Whether or not the response is valid, we can mark the peer as idle and
  2241  	// notify the scheduler to assign a new task. If the response is invalid,
  2242  	// we'll drop the peer in a bit.
  2243  	s.lock.Lock()
  2244  	if _, ok := s.peers[peer.ID()]; ok {
  2245  		s.accountIdlers[peer.ID()] = struct{}{}
  2246  	}
  2247  	select {
  2248  	case s.update <- struct{}{}:
  2249  	default:
  2250  	}
  2251  	// Ensure the response is for a valid request
  2252  	req, ok := s.accountReqs[id]
  2253  	if !ok {
  2254  		// Request stale, perhaps the peer timed out but came through in the end
  2255  		logger.Warn("Unexpected account range packet")
  2256  		s.lock.Unlock()
  2257  		return nil
  2258  	}
  2259  	delete(s.accountReqs, id)
  2260  	s.rates.Update(peer.ID(), AccountRangeMsg, time.Since(req.time), int(size))
  2261  
  2262  	// Clean up the request timeout timer, we'll see how to proceed further based
  2263  	// on the actual delivered content
  2264  	if !req.timeout.Stop() {
  2265  		// The timeout is already triggered, and this request will be reverted+rescheduled
  2266  		s.lock.Unlock()
  2267  		return nil
  2268  	}
  2269  	// Response is valid, but check if peer is signalling that it does not have
  2270  	// the requested data. For account range queries that means the state being
  2271  	// retrieved was either already pruned remotely, or the peer is not yet
  2272  	// synced to our head.
  2273  	if len(hashes) == 0 && len(accounts) == 0 && len(proof) == 0 {
  2274  		logger.Debug("Peer rejected account range request", "root", s.root)
  2275  		s.statelessPeers[peer.ID()] = struct{}{}
  2276  		s.lock.Unlock()
  2277  
  2278  		// Signal this request as failed, and ready for rescheduling
  2279  		s.scheduleRevertAccountRequest(req)
  2280  		return nil
  2281  	}
  2282  	root := s.root
  2283  	s.lock.Unlock()
  2284  
  2285  	// Reconstruct a partial trie from the response and verify it
  2286  	keys := make([][]byte, len(hashes))
  2287  	for i, key := range hashes {
  2288  		keys[i] = common.CopyBytes(key[:])
  2289  	}
  2290  	nodes := make(light.NodeList, len(proof))
  2291  	for i, node := range proof {
  2292  		nodes[i] = node
  2293  	}
  2294  	proofdb := nodes.NodeSet()
  2295  
  2296  	var end []byte
  2297  	if len(keys) > 0 {
  2298  		end = keys[len(keys)-1]
  2299  	}
  2300  	cont, err := trie.VerifyRangeProof(root, req.origin[:], end, keys, accounts, proofdb)
  2301  	if err != nil {
  2302  		logger.Warn("Account range failed proof", "err", err)
  2303  		// Signal this request as failed, and ready for rescheduling
  2304  		s.scheduleRevertAccountRequest(req)
  2305  		return err
  2306  	}
  2307  	accs := make([]*types.StateAccount, len(accounts))
  2308  	for i, account := range accounts {
  2309  		acc := new(types.StateAccount)
  2310  		if err := rlp.DecodeBytes(account, acc); err != nil {
  2311  			panic(err) // We created these blobs, we must be able to decode them
  2312  		}
  2313  		accs[i] = acc
  2314  	}
  2315  	response := &accountResponse{
  2316  		task:     req.task,
  2317  		hashes:   hashes,
  2318  		accounts: accs,
  2319  		cont:     cont,
  2320  	}
  2321  	select {
  2322  	case req.deliver <- response:
  2323  	case <-req.cancel:
  2324  	case <-req.stale:
  2325  	}
  2326  	return nil
  2327  }
  2328  
  2329  // OnByteCodes is a callback method to invoke when a batch of contract
  2330  // bytes codes are received from a remote peer.
  2331  func (s *Syncer) OnByteCodes(peer SyncPeer, id uint64, bytecodes [][]byte) error {
  2332  	s.lock.RLock()
  2333  	syncing := !s.snapped
  2334  	s.lock.RUnlock()
  2335  
  2336  	if syncing {
  2337  		return s.onByteCodes(peer, id, bytecodes)
  2338  	}
  2339  	return s.onHealByteCodes(peer, id, bytecodes)
  2340  }
  2341  
  2342  // onByteCodes is a callback method to invoke when a batch of contract
  2343  // bytes codes are received from a remote peer in the syncing phase.
  2344  func (s *Syncer) onByteCodes(peer SyncPeer, id uint64, bytecodes [][]byte) error {
  2345  	var size common.StorageSize
  2346  	for _, code := range bytecodes {
  2347  		size += common.StorageSize(len(code))
  2348  	}
  2349  	logger := peer.Log().New("reqid", id)
  2350  	logger.Trace("Delivering set of bytecodes", "bytecodes", len(bytecodes), "bytes", size)
  2351  
  2352  	// Whether or not the response is valid, we can mark the peer as idle and
  2353  	// notify the scheduler to assign a new task. If the response is invalid,
  2354  	// we'll drop the peer in a bit.
  2355  	s.lock.Lock()
  2356  	if _, ok := s.peers[peer.ID()]; ok {
  2357  		s.bytecodeIdlers[peer.ID()] = struct{}{}
  2358  	}
  2359  	select {
  2360  	case s.update <- struct{}{}:
  2361  	default:
  2362  	}
  2363  	// Ensure the response is for a valid request
  2364  	req, ok := s.bytecodeReqs[id]
  2365  	if !ok {
  2366  		// Request stale, perhaps the peer timed out but came through in the end
  2367  		logger.Warn("Unexpected bytecode packet")
  2368  		s.lock.Unlock()
  2369  		return nil
  2370  	}
  2371  	delete(s.bytecodeReqs, id)
  2372  	s.rates.Update(peer.ID(), ByteCodesMsg, time.Since(req.time), len(bytecodes))
  2373  
  2374  	// Clean up the request timeout timer, we'll see how to proceed further based
  2375  	// on the actual delivered content
  2376  	if !req.timeout.Stop() {
  2377  		// The timeout is already triggered, and this request will be reverted+rescheduled
  2378  		s.lock.Unlock()
  2379  		return nil
  2380  	}
  2381  
  2382  	// Response is valid, but check if peer is signalling that it does not have
  2383  	// the requested data. For bytecode range queries that means the peer is not
  2384  	// yet synced.
  2385  	if len(bytecodes) == 0 {
  2386  		logger.Debug("Peer rejected bytecode request")
  2387  		s.statelessPeers[peer.ID()] = struct{}{}
  2388  		s.lock.Unlock()
  2389  
  2390  		// Signal this request as failed, and ready for rescheduling
  2391  		s.scheduleRevertBytecodeRequest(req)
  2392  		return nil
  2393  	}
  2394  	s.lock.Unlock()
  2395  
  2396  	// Cross reference the requested bytecodes with the response to find gaps
  2397  	// that the serving node is missing
  2398  	hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
  2399  	hash := make([]byte, 32)
  2400  
  2401  	codes := make([][]byte, len(req.hashes))
  2402  	for i, j := 0, 0; i < len(bytecodes); i++ {
  2403  		// Find the next hash that we've been served, leaving misses with nils
  2404  		hasher.Reset()
  2405  		hasher.Write(bytecodes[i])
  2406  		hasher.Read(hash)
  2407  
  2408  		for j < len(req.hashes) && !bytes.Equal(hash, req.hashes[j][:]) {
  2409  			j++
  2410  		}
  2411  		if j < len(req.hashes) {
  2412  			codes[j] = bytecodes[i]
  2413  			j++
  2414  			continue
  2415  		}
  2416  		// We've either ran out of hashes, or got unrequested data
  2417  		logger.Warn("Unexpected bytecodes", "count", len(bytecodes)-i)
  2418  		// Signal this request as failed, and ready for rescheduling
  2419  		s.scheduleRevertBytecodeRequest(req)
  2420  		return errors.New("unexpected bytecode")
  2421  	}
  2422  	// Response validated, send it to the scheduler for filling
  2423  	response := &bytecodeResponse{
  2424  		task:   req.task,
  2425  		hashes: req.hashes,
  2426  		codes:  codes,
  2427  	}
  2428  	select {
  2429  	case req.deliver <- response:
  2430  	case <-req.cancel:
  2431  	case <-req.stale:
  2432  	}
  2433  	return nil
  2434  }
  2435  
  2436  // OnStorage is a callback method to invoke when ranges of storage slots
  2437  // are received from a remote peer.
  2438  func (s *Syncer) OnStorage(peer SyncPeer, id uint64, hashes [][]common.Hash, slots [][][]byte, proof [][]byte) error {
  2439  	// Gather some trace stats to aid in debugging issues
  2440  	var (
  2441  		hashCount int
  2442  		slotCount int
  2443  		size      common.StorageSize
  2444  	)
  2445  	for _, hashset := range hashes {
  2446  		size += common.StorageSize(common.HashLength * len(hashset))
  2447  		hashCount += len(hashset)
  2448  	}
  2449  	for _, slotset := range slots {
  2450  		for _, slot := range slotset {
  2451  			size += common.StorageSize(len(slot))
  2452  		}
  2453  		slotCount += len(slotset)
  2454  	}
  2455  	for _, node := range proof {
  2456  		size += common.StorageSize(len(node))
  2457  	}
  2458  	logger := peer.Log().New("reqid", id)
  2459  	logger.Trace("Delivering ranges of storage slots", "accounts", len(hashes), "hashes", hashCount, "slots", slotCount, "proofs", len(proof), "size", size)
  2460  
  2461  	// Whether or not the response is valid, we can mark the peer as idle and
  2462  	// notify the scheduler to assign a new task. If the response is invalid,
  2463  	// we'll drop the peer in a bit.
  2464  	s.lock.Lock()
  2465  	if _, ok := s.peers[peer.ID()]; ok {
  2466  		s.storageIdlers[peer.ID()] = struct{}{}
  2467  	}
  2468  	select {
  2469  	case s.update <- struct{}{}:
  2470  	default:
  2471  	}
  2472  	// Ensure the response is for a valid request
  2473  	req, ok := s.storageReqs[id]
  2474  	if !ok {
  2475  		// Request stale, perhaps the peer timed out but came through in the end
  2476  		logger.Warn("Unexpected storage ranges packet")
  2477  		s.lock.Unlock()
  2478  		return nil
  2479  	}
  2480  	delete(s.storageReqs, id)
  2481  	s.rates.Update(peer.ID(), StorageRangesMsg, time.Since(req.time), int(size))
  2482  
  2483  	// Clean up the request timeout timer, we'll see how to proceed further based
  2484  	// on the actual delivered content
  2485  	if !req.timeout.Stop() {
  2486  		// The timeout is already triggered, and this request will be reverted+rescheduled
  2487  		s.lock.Unlock()
  2488  		return nil
  2489  	}
  2490  
  2491  	// Reject the response if the hash sets and slot sets don't match, or if the
  2492  	// peer sent more data than requested.
  2493  	if len(hashes) != len(slots) {
  2494  		s.lock.Unlock()
  2495  		s.scheduleRevertStorageRequest(req) // reschedule request
  2496  		logger.Warn("Hash and slot set size mismatch", "hashset", len(hashes), "slotset", len(slots))
  2497  		return errors.New("hash and slot set size mismatch")
  2498  	}
  2499  	if len(hashes) > len(req.accounts) {
  2500  		s.lock.Unlock()
  2501  		s.scheduleRevertStorageRequest(req) // reschedule request
  2502  		logger.Warn("Hash set larger than requested", "hashset", len(hashes), "requested", len(req.accounts))
  2503  		return errors.New("hash set larger than requested")
  2504  	}
  2505  	// Response is valid, but check if peer is signalling that it does not have
  2506  	// the requested data. For storage range queries that means the state being
  2507  	// retrieved was either already pruned remotely, or the peer is not yet
  2508  	// synced to our head.
  2509  	if len(hashes) == 0 {
  2510  		logger.Debug("Peer rejected storage request")
  2511  		s.statelessPeers[peer.ID()] = struct{}{}
  2512  		s.lock.Unlock()
  2513  		s.scheduleRevertStorageRequest(req) // reschedule request
  2514  		return nil
  2515  	}
  2516  	s.lock.Unlock()
  2517  
  2518  	// Reconstruct the partial tries from the response and verify them
  2519  	var cont bool
  2520  
  2521  	for i := 0; i < len(hashes); i++ {
  2522  		// Convert the keys and proofs into an internal format
  2523  		keys := make([][]byte, len(hashes[i]))
  2524  		for j, key := range hashes[i] {
  2525  			keys[j] = common.CopyBytes(key[:])
  2526  		}
  2527  		nodes := make(light.NodeList, 0, len(proof))
  2528  		if i == len(hashes)-1 {
  2529  			for _, node := range proof {
  2530  				nodes = append(nodes, node)
  2531  			}
  2532  		}
  2533  		var err error
  2534  		if len(nodes) == 0 {
  2535  			// No proof has been attached, the response must cover the entire key
  2536  			// space and hash to the origin root.
  2537  			_, err = trie.VerifyRangeProof(req.roots[i], nil, nil, keys, slots[i], nil)
  2538  			if err != nil {
  2539  				s.scheduleRevertStorageRequest(req) // reschedule request
  2540  				logger.Warn("Storage slots failed proof", "err", err)
  2541  				return err
  2542  			}
  2543  		} else {
  2544  			// A proof was attached, the response is only partial, check that the
  2545  			// returned data is indeed part of the storage trie
  2546  			proofdb := nodes.NodeSet()
  2547  
  2548  			var end []byte
  2549  			if len(keys) > 0 {
  2550  				end = keys[len(keys)-1]
  2551  			}
  2552  			cont, err = trie.VerifyRangeProof(req.roots[i], req.origin[:], end, keys, slots[i], proofdb)
  2553  			if err != nil {
  2554  				s.scheduleRevertStorageRequest(req) // reschedule request
  2555  				logger.Warn("Storage range failed proof", "err", err)
  2556  				return err
  2557  			}
  2558  		}
  2559  	}
  2560  	// Partial tries reconstructed, send them to the scheduler for storage filling
  2561  	response := &storageResponse{
  2562  		mainTask: req.mainTask,
  2563  		subTask:  req.subTask,
  2564  		accounts: req.accounts,
  2565  		roots:    req.roots,
  2566  		hashes:   hashes,
  2567  		slots:    slots,
  2568  		cont:     cont,
  2569  	}
  2570  	select {
  2571  	case req.deliver <- response:
  2572  	case <-req.cancel:
  2573  	case <-req.stale:
  2574  	}
  2575  	return nil
  2576  }
  2577  
  2578  // OnTrieNodes is a callback method to invoke when a batch of trie nodes
  2579  // are received from a remote peer.
  2580  func (s *Syncer) OnTrieNodes(peer SyncPeer, id uint64, trienodes [][]byte) error {
  2581  	var size common.StorageSize
  2582  	for _, node := range trienodes {
  2583  		size += common.StorageSize(len(node))
  2584  	}
  2585  	logger := peer.Log().New("reqid", id)
  2586  	logger.Trace("Delivering set of healing trienodes", "trienodes", len(trienodes), "bytes", size)
  2587  
  2588  	// Whether or not the response is valid, we can mark the peer as idle and
  2589  	// notify the scheduler to assign a new task. If the response is invalid,
  2590  	// we'll drop the peer in a bit.
  2591  	s.lock.Lock()
  2592  	if _, ok := s.peers[peer.ID()]; ok {
  2593  		s.trienodeHealIdlers[peer.ID()] = struct{}{}
  2594  	}
  2595  	select {
  2596  	case s.update <- struct{}{}:
  2597  	default:
  2598  	}
  2599  	// Ensure the response is for a valid request
  2600  	req, ok := s.trienodeHealReqs[id]
  2601  	if !ok {
  2602  		// Request stale, perhaps the peer timed out but came through in the end
  2603  		logger.Warn("Unexpected trienode heal packet")
  2604  		s.lock.Unlock()
  2605  		return nil
  2606  	}
  2607  	delete(s.trienodeHealReqs, id)
  2608  	s.rates.Update(peer.ID(), TrieNodesMsg, time.Since(req.time), len(trienodes))
  2609  
  2610  	// Clean up the request timeout timer, we'll see how to proceed further based
  2611  	// on the actual delivered content
  2612  	if !req.timeout.Stop() {
  2613  		// The timeout is already triggered, and this request will be reverted+rescheduled
  2614  		s.lock.Unlock()
  2615  		return nil
  2616  	}
  2617  
  2618  	// Response is valid, but check if peer is signalling that it does not have
  2619  	// the requested data. For bytecode range queries that means the peer is not
  2620  	// yet synced.
  2621  	if len(trienodes) == 0 {
  2622  		logger.Debug("Peer rejected trienode heal request")
  2623  		s.statelessPeers[peer.ID()] = struct{}{}
  2624  		s.lock.Unlock()
  2625  
  2626  		// Signal this request as failed, and ready for rescheduling
  2627  		s.scheduleRevertTrienodeHealRequest(req)
  2628  		return nil
  2629  	}
  2630  	s.lock.Unlock()
  2631  
  2632  	// Cross reference the requested trienodes with the response to find gaps
  2633  	// that the serving node is missing
  2634  	hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
  2635  	hash := make([]byte, 32)
  2636  
  2637  	nodes := make([][]byte, len(req.hashes))
  2638  	for i, j := 0, 0; i < len(trienodes); i++ {
  2639  		// Find the next hash that we've been served, leaving misses with nils
  2640  		hasher.Reset()
  2641  		hasher.Write(trienodes[i])
  2642  		hasher.Read(hash)
  2643  
  2644  		for j < len(req.hashes) && !bytes.Equal(hash, req.hashes[j][:]) {
  2645  			j++
  2646  		}
  2647  		if j < len(req.hashes) {
  2648  			nodes[j] = trienodes[i]
  2649  			j++
  2650  			continue
  2651  		}
  2652  		// We've either ran out of hashes, or got unrequested data
  2653  		logger.Warn("Unexpected healing trienodes", "count", len(trienodes)-i)
  2654  		// Signal this request as failed, and ready for rescheduling
  2655  		s.scheduleRevertTrienodeHealRequest(req)
  2656  		return errors.New("unexpected healing trienode")
  2657  	}
  2658  	// Response validated, send it to the scheduler for filling
  2659  	response := &trienodeHealResponse{
  2660  		task:   req.task,
  2661  		hashes: req.hashes,
  2662  		paths:  req.paths,
  2663  		nodes:  nodes,
  2664  	}
  2665  	select {
  2666  	case req.deliver <- response:
  2667  	case <-req.cancel:
  2668  	case <-req.stale:
  2669  	}
  2670  	return nil
  2671  }
  2672  
  2673  // onHealByteCodes is a callback method to invoke when a batch of contract
  2674  // bytes codes are received from a remote peer in the healing phase.
  2675  func (s *Syncer) onHealByteCodes(peer SyncPeer, id uint64, bytecodes [][]byte) error {
  2676  	var size common.StorageSize
  2677  	for _, code := range bytecodes {
  2678  		size += common.StorageSize(len(code))
  2679  	}
  2680  	logger := peer.Log().New("reqid", id)
  2681  	logger.Trace("Delivering set of healing bytecodes", "bytecodes", len(bytecodes), "bytes", size)
  2682  
  2683  	// Whether or not the response is valid, we can mark the peer as idle and
  2684  	// notify the scheduler to assign a new task. If the response is invalid,
  2685  	// we'll drop the peer in a bit.
  2686  	s.lock.Lock()
  2687  	if _, ok := s.peers[peer.ID()]; ok {
  2688  		s.bytecodeHealIdlers[peer.ID()] = struct{}{}
  2689  	}
  2690  	select {
  2691  	case s.update <- struct{}{}:
  2692  	default:
  2693  	}
  2694  	// Ensure the response is for a valid request
  2695  	req, ok := s.bytecodeHealReqs[id]
  2696  	if !ok {
  2697  		// Request stale, perhaps the peer timed out but came through in the end
  2698  		logger.Warn("Unexpected bytecode heal packet")
  2699  		s.lock.Unlock()
  2700  		return nil
  2701  	}
  2702  	delete(s.bytecodeHealReqs, id)
  2703  	s.rates.Update(peer.ID(), ByteCodesMsg, time.Since(req.time), len(bytecodes))
  2704  
  2705  	// Clean up the request timeout timer, we'll see how to proceed further based
  2706  	// on the actual delivered content
  2707  	if !req.timeout.Stop() {
  2708  		// The timeout is already triggered, and this request will be reverted+rescheduled
  2709  		s.lock.Unlock()
  2710  		return nil
  2711  	}
  2712  
  2713  	// Response is valid, but check if peer is signalling that it does not have
  2714  	// the requested data. For bytecode range queries that means the peer is not
  2715  	// yet synced.
  2716  	if len(bytecodes) == 0 {
  2717  		logger.Debug("Peer rejected bytecode heal request")
  2718  		s.statelessPeers[peer.ID()] = struct{}{}
  2719  		s.lock.Unlock()
  2720  
  2721  		// Signal this request as failed, and ready for rescheduling
  2722  		s.scheduleRevertBytecodeHealRequest(req)
  2723  		return nil
  2724  	}
  2725  	s.lock.Unlock()
  2726  
  2727  	// Cross reference the requested bytecodes with the response to find gaps
  2728  	// that the serving node is missing
  2729  	hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
  2730  	hash := make([]byte, 32)
  2731  
  2732  	codes := make([][]byte, len(req.hashes))
  2733  	for i, j := 0, 0; i < len(bytecodes); i++ {
  2734  		// Find the next hash that we've been served, leaving misses with nils
  2735  		hasher.Reset()
  2736  		hasher.Write(bytecodes[i])
  2737  		hasher.Read(hash)
  2738  
  2739  		for j < len(req.hashes) && !bytes.Equal(hash, req.hashes[j][:]) {
  2740  			j++
  2741  		}
  2742  		if j < len(req.hashes) {
  2743  			codes[j] = bytecodes[i]
  2744  			j++
  2745  			continue
  2746  		}
  2747  		// We've either ran out of hashes, or got unrequested data
  2748  		logger.Warn("Unexpected healing bytecodes", "count", len(bytecodes)-i)
  2749  		// Signal this request as failed, and ready for rescheduling
  2750  		s.scheduleRevertBytecodeHealRequest(req)
  2751  		return errors.New("unexpected healing bytecode")
  2752  	}
  2753  	// Response validated, send it to the scheduler for filling
  2754  	response := &bytecodeHealResponse{
  2755  		task:   req.task,
  2756  		hashes: req.hashes,
  2757  		codes:  codes,
  2758  	}
  2759  	select {
  2760  	case req.deliver <- response:
  2761  	case <-req.cancel:
  2762  	case <-req.stale:
  2763  	}
  2764  	return nil
  2765  }
  2766  
  2767  // onHealState is a callback method to invoke when a flat state(account
  2768  // or storage slot) is downloded during the healing stage. The flat states
  2769  // can be persisted blindly and can be fixed later in the generation stage.
  2770  // Note it's not concurrent safe, please handle the concurrent issue outside.
  2771  func (s *Syncer) onHealState(paths [][]byte, value []byte) error {
  2772  	if len(paths) == 1 {
  2773  		var account types.StateAccount
  2774  		if err := rlp.DecodeBytes(value, &account); err != nil {
  2775  			return nil
  2776  		}
  2777  		blob := snapshot.SlimAccountRLP(account.Nonce, account.Balance, account.Root, account.CodeHash)
  2778  		rawdb.WriteAccountSnapshot(s.stateWriter, common.BytesToHash(paths[0]), blob)
  2779  		s.accountHealed += 1
  2780  		s.accountHealedBytes += common.StorageSize(1 + common.HashLength + len(blob))
  2781  	}
  2782  	if len(paths) == 2 {
  2783  		rawdb.WriteStorageSnapshot(s.stateWriter, common.BytesToHash(paths[0]), common.BytesToHash(paths[1]), value)
  2784  		s.storageHealed += 1
  2785  		s.storageHealedBytes += common.StorageSize(1 + 2*common.HashLength + len(value))
  2786  	}
  2787  	if s.stateWriter.ValueSize() > ethdb.IdealBatchSize {
  2788  		s.stateWriter.Write() // It's fine to ignore the error here
  2789  		s.stateWriter.Reset()
  2790  	}
  2791  	return nil
  2792  }
  2793  
  2794  // hashSpace is the total size of the 256 bit hash space for accounts.
  2795  var hashSpace = new(big.Int).Exp(common.Big2, common.Big256, nil)
  2796  
  2797  // report calculates various status reports and provides it to the user.
  2798  func (s *Syncer) report(force bool) {
  2799  	if len(s.tasks) > 0 {
  2800  		s.reportSyncProgress(force)
  2801  		return
  2802  	}
  2803  	s.reportHealProgress(force)
  2804  }
  2805  
  2806  // reportSyncProgress calculates various status reports and provides it to the user.
  2807  func (s *Syncer) reportSyncProgress(force bool) {
  2808  	// Don't report all the events, just occasionally
  2809  	if !force && time.Since(s.logTime) < 8*time.Second {
  2810  		return
  2811  	}
  2812  	// Don't report anything until we have a meaningful progress
  2813  	synced := s.accountBytes + s.bytecodeBytes + s.storageBytes
  2814  	if synced == 0 {
  2815  		return
  2816  	}
  2817  	accountGaps := new(big.Int)
  2818  	for _, task := range s.tasks {
  2819  		accountGaps.Add(accountGaps, new(big.Int).Sub(task.Last.Big(), task.Next.Big()))
  2820  	}
  2821  	accountFills := new(big.Int).Sub(hashSpace, accountGaps)
  2822  	if accountFills.BitLen() == 0 {
  2823  		return
  2824  	}
  2825  	s.logTime = time.Now()
  2826  	estBytes := float64(new(big.Int).Div(
  2827  		new(big.Int).Mul(new(big.Int).SetUint64(uint64(synced)), hashSpace),
  2828  		accountFills,
  2829  	).Uint64())
  2830  	// Don't report anything until we have a meaningful progress
  2831  	if estBytes < 1.0 {
  2832  		return
  2833  	}
  2834  	elapsed := time.Since(s.startTime)
  2835  	estTime := elapsed / time.Duration(synced) * time.Duration(estBytes)
  2836  
  2837  	// Create a mega progress report
  2838  	var (
  2839  		progress = fmt.Sprintf("%.2f%%", float64(synced)*100/estBytes)
  2840  		accounts = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.accountSynced), s.accountBytes.TerminalString())
  2841  		storage  = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.storageSynced), s.storageBytes.TerminalString())
  2842  		bytecode = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.bytecodeSynced), s.bytecodeBytes.TerminalString())
  2843  	)
  2844  	log.Info("State sync in progress", "synced", progress, "state", synced,
  2845  		"accounts", accounts, "slots", storage, "codes", bytecode, "eta", common.PrettyDuration(estTime-elapsed))
  2846  }
  2847  
  2848  // reportHealProgress calculates various status reports and provides it to the user.
  2849  func (s *Syncer) reportHealProgress(force bool) {
  2850  	// Don't report all the events, just occasionally
  2851  	if !force && time.Since(s.logTime) < 8*time.Second {
  2852  		return
  2853  	}
  2854  	s.logTime = time.Now()
  2855  
  2856  	// Create a mega progress report
  2857  	var (
  2858  		trienode = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.trienodeHealSynced), s.trienodeHealBytes.TerminalString())
  2859  		bytecode = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.bytecodeHealSynced), s.bytecodeHealBytes.TerminalString())
  2860  		accounts = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.accountHealed), s.accountHealedBytes.TerminalString())
  2861  		storage  = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.storageHealed), s.storageHealedBytes.TerminalString())
  2862  	)
  2863  	log.Info("State heal in progress", "accounts", accounts, "slots", storage,
  2864  		"codes", bytecode, "nodes", trienode, "pending", s.healer.scheduler.Pending())
  2865  }
  2866  
  2867  // estimateRemainingSlots tries to determine roughly how many slots are left in
  2868  // a contract storage, based on the number of keys and the last hash. This method
  2869  // assumes that the hashes are lexicographically ordered and evenly distributed.
  2870  func estimateRemainingSlots(hashes int, last common.Hash) (uint64, error) {
  2871  	if last == (common.Hash{}) {
  2872  		return 0, errors.New("last hash empty")
  2873  	}
  2874  	space := new(big.Int).Mul(math.MaxBig256, big.NewInt(int64(hashes)))
  2875  	space.Div(space, last.Big())
  2876  	if !space.IsUint64() {
  2877  		// Gigantic address space probably due to too few or malicious slots
  2878  		return 0, errors.New("too few slots for estimation")
  2879  	}
  2880  	return space.Uint64() - uint64(hashes), nil
  2881  }
  2882  
  2883  // capacitySort implements the Sort interface, allowing sorting by peer message
  2884  // throughput. Note, callers should use sort.Reverse to get the desired effect
  2885  // of highest capacity being at the front.
  2886  type capacitySort struct {
  2887  	ids  []string
  2888  	caps []int
  2889  }
  2890  
  2891  func (s *capacitySort) Len() int {
  2892  	return len(s.ids)
  2893  }
  2894  
  2895  func (s *capacitySort) Less(i, j int) bool {
  2896  	return s.caps[i] < s.caps[j]
  2897  }
  2898  
  2899  func (s *capacitySort) Swap(i, j int) {
  2900  	s.ids[i], s.ids[j] = s.ids[j], s.ids[i]
  2901  	s.caps[i], s.caps[j] = s.caps[j], s.caps[i]
  2902  }