github.com/codysnider/go-ethereum@v1.10.18-0.20220420071915-14f4ae99222a/eth/protocols/snap/sync.go (about)

     1  // Copyright 2020 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package snap
    18  
    19  import (
    20  	"bytes"
    21  	"encoding/json"
    22  	"errors"
    23  	"fmt"
    24  	"math/big"
    25  	"math/rand"
    26  	"sort"
    27  	"sync"
    28  	"time"
    29  
    30  	"github.com/ethereum/go-ethereum/common"
    31  	"github.com/ethereum/go-ethereum/common/math"
    32  	"github.com/ethereum/go-ethereum/core/rawdb"
    33  	"github.com/ethereum/go-ethereum/core/state"
    34  	"github.com/ethereum/go-ethereum/core/state/snapshot"
    35  	"github.com/ethereum/go-ethereum/core/types"
    36  	"github.com/ethereum/go-ethereum/crypto"
    37  	"github.com/ethereum/go-ethereum/ethdb"
    38  	"github.com/ethereum/go-ethereum/event"
    39  	"github.com/ethereum/go-ethereum/light"
    40  	"github.com/ethereum/go-ethereum/log"
    41  	"github.com/ethereum/go-ethereum/p2p/msgrate"
    42  	"github.com/ethereum/go-ethereum/rlp"
    43  	"github.com/ethereum/go-ethereum/trie"
    44  	"golang.org/x/crypto/sha3"
    45  )
    46  
    47  var (
    48  	// emptyRoot is the known root hash of an empty trie.
    49  	emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
    50  
    51  	// emptyCode is the known hash of the empty EVM bytecode.
    52  	emptyCode = crypto.Keccak256Hash(nil)
    53  )
    54  
    55  const (
    56  	// minRequestSize is the minimum number of bytes to request from a remote peer.
    57  	// This number is used as the low cap for account and storage range requests.
    58  	// Bytecode and trienode are limited inherently by item count (1).
    59  	minRequestSize = 64 * 1024
    60  
    61  	// maxRequestSize is the maximum number of bytes to request from a remote peer.
    62  	// This number is used as the high cap for account and storage range requests.
    63  	// Bytecode and trienode are limited more explicitly by the caps below.
    64  	maxRequestSize = 512 * 1024
    65  
    66  	// maxCodeRequestCount is the maximum number of bytecode blobs to request in a
    67  	// single query. If this number is too low, we're not filling responses fully
    68  	// and waste round trip times. If it's too high, we're capping responses and
    69  	// waste bandwidth.
    70  	//
    71  	// Depoyed bytecodes are currently capped at 24KB, so the minimum request
    72  	// size should be maxRequestSize / 24K. Assuming that most contracts do not
    73  	// come close to that, requesting 4x should be a good approximation.
    74  	maxCodeRequestCount = maxRequestSize / (24 * 1024) * 4
    75  
    76  	// maxTrieRequestCount is the maximum number of trie node blobs to request in
    77  	// a single query. If this number is too low, we're not filling responses fully
    78  	// and waste round trip times. If it's too high, we're capping responses and
    79  	// waste bandwidth.
    80  	maxTrieRequestCount = maxRequestSize / 512
    81  )
    82  
    83  var (
    84  	// accountConcurrency is the number of chunks to split the account trie into
    85  	// to allow concurrent retrievals.
    86  	accountConcurrency = 16
    87  
    88  	// storageConcurrency is the number of chunks to split the a large contract
    89  	// storage trie into to allow concurrent retrievals.
    90  	storageConcurrency = 16
    91  )
    92  
    93  // ErrCancelled is returned from snap syncing if the operation was prematurely
    94  // terminated.
    95  var ErrCancelled = errors.New("sync cancelled")
    96  
    97  // accountRequest tracks a pending account range request to ensure responses are
    98  // to actual requests and to validate any security constraints.
    99  //
   100  // Concurrency note: account requests and responses are handled concurrently from
   101  // the main runloop to allow Merkle proof verifications on the peer's thread and
   102  // to drop on invalid response. The request struct must contain all the data to
   103  // construct the response without accessing runloop internals (i.e. task). That
   104  // is only included to allow the runloop to match a response to the task being
   105  // synced without having yet another set of maps.
   106  type accountRequest struct {
   107  	peer string    // Peer to which this request is assigned
   108  	id   uint64    // Request ID of this request
   109  	time time.Time // Timestamp when the request was sent
   110  
   111  	deliver chan *accountResponse // Channel to deliver successful response on
   112  	revert  chan *accountRequest  // Channel to deliver request failure on
   113  	cancel  chan struct{}         // Channel to track sync cancellation
   114  	timeout *time.Timer           // Timer to track delivery timeout
   115  	stale   chan struct{}         // Channel to signal the request was dropped
   116  
   117  	origin common.Hash // First account requested to allow continuation checks
   118  	limit  common.Hash // Last account requested to allow non-overlapping chunking
   119  
   120  	task *accountTask // Task which this request is filling (only access fields through the runloop!!)
   121  }
   122  
   123  // accountResponse is an already Merkle-verified remote response to an account
   124  // range request. It contains the subtrie for the requested account range and
   125  // the database that's going to be filled with the internal nodes on commit.
   126  type accountResponse struct {
   127  	task *accountTask // Task which this request is filling
   128  
   129  	hashes   []common.Hash         // Account hashes in the returned range
   130  	accounts []*types.StateAccount // Expanded accounts in the returned range
   131  
   132  	cont bool // Whether the account range has a continuation
   133  }
   134  
   135  // bytecodeRequest tracks a pending bytecode request to ensure responses are to
   136  // actual requests and to validate any security constraints.
   137  //
   138  // Concurrency note: bytecode requests and responses are handled concurrently from
   139  // the main runloop to allow Keccak256 hash verifications on the peer's thread and
   140  // to drop on invalid response. The request struct must contain all the data to
   141  // construct the response without accessing runloop internals (i.e. task). That
   142  // is only included to allow the runloop to match a response to the task being
   143  // synced without having yet another set of maps.
   144  type bytecodeRequest struct {
   145  	peer string    // Peer to which this request is assigned
   146  	id   uint64    // Request ID of this request
   147  	time time.Time // Timestamp when the request was sent
   148  
   149  	deliver chan *bytecodeResponse // Channel to deliver successful response on
   150  	revert  chan *bytecodeRequest  // Channel to deliver request failure on
   151  	cancel  chan struct{}          // Channel to track sync cancellation
   152  	timeout *time.Timer            // Timer to track delivery timeout
   153  	stale   chan struct{}          // Channel to signal the request was dropped
   154  
   155  	hashes []common.Hash // Bytecode hashes to validate responses
   156  	task   *accountTask  // Task which this request is filling (only access fields through the runloop!!)
   157  }
   158  
   159  // bytecodeResponse is an already verified remote response to a bytecode request.
   160  type bytecodeResponse struct {
   161  	task *accountTask // Task which this request is filling
   162  
   163  	hashes []common.Hash // Hashes of the bytecode to avoid double hashing
   164  	codes  [][]byte      // Actual bytecodes to store into the database (nil = missing)
   165  }
   166  
   167  // storageRequest tracks a pending storage ranges request to ensure responses are
   168  // to actual requests and to validate any security constraints.
   169  //
   170  // Concurrency note: storage requests and responses are handled concurrently from
   171  // the main runloop to allow Merkle proof verifications on the peer's thread and
   172  // to drop on invalid response. The request struct must contain all the data to
   173  // construct the response without accessing runloop internals (i.e. tasks). That
   174  // is only included to allow the runloop to match a response to the task being
   175  // synced without having yet another set of maps.
   176  type storageRequest struct {
   177  	peer string    // Peer to which this request is assigned
   178  	id   uint64    // Request ID of this request
   179  	time time.Time // Timestamp when the request was sent
   180  
   181  	deliver chan *storageResponse // Channel to deliver successful response on
   182  	revert  chan *storageRequest  // Channel to deliver request failure on
   183  	cancel  chan struct{}         // Channel to track sync cancellation
   184  	timeout *time.Timer           // Timer to track delivery timeout
   185  	stale   chan struct{}         // Channel to signal the request was dropped
   186  
   187  	accounts []common.Hash // Account hashes to validate responses
   188  	roots    []common.Hash // Storage roots to validate responses
   189  
   190  	origin common.Hash // First storage slot requested to allow continuation checks
   191  	limit  common.Hash // Last storage slot requested to allow non-overlapping chunking
   192  
   193  	mainTask *accountTask // Task which this response belongs to (only access fields through the runloop!!)
   194  	subTask  *storageTask // Task which this response is filling (only access fields through the runloop!!)
   195  }
   196  
   197  // storageResponse is an already Merkle-verified remote response to a storage
   198  // range request. It contains the subtries for the requested storage ranges and
   199  // the databases that's going to be filled with the internal nodes on commit.
   200  type storageResponse struct {
   201  	mainTask *accountTask // Task which this response belongs to
   202  	subTask  *storageTask // Task which this response is filling
   203  
   204  	accounts []common.Hash // Account hashes requested, may be only partially filled
   205  	roots    []common.Hash // Storage roots requested, may be only partially filled
   206  
   207  	hashes [][]common.Hash // Storage slot hashes in the returned range
   208  	slots  [][][]byte      // Storage slot values in the returned range
   209  
   210  	cont bool // Whether the last storage range has a continuation
   211  }
   212  
   213  // trienodeHealRequest tracks a pending state trie request to ensure responses
   214  // are to actual requests and to validate any security constraints.
   215  //
   216  // Concurrency note: trie node requests and responses are handled concurrently from
   217  // the main runloop to allow Keccak256 hash verifications on the peer's thread and
   218  // to drop on invalid response. The request struct must contain all the data to
   219  // construct the response without accessing runloop internals (i.e. task). That
   220  // is only included to allow the runloop to match a response to the task being
   221  // synced without having yet another set of maps.
   222  type trienodeHealRequest struct {
   223  	peer string    // Peer to which this request is assigned
   224  	id   uint64    // Request ID of this request
   225  	time time.Time // Timestamp when the request was sent
   226  
   227  	deliver chan *trienodeHealResponse // Channel to deliver successful response on
   228  	revert  chan *trienodeHealRequest  // Channel to deliver request failure on
   229  	cancel  chan struct{}              // Channel to track sync cancellation
   230  	timeout *time.Timer                // Timer to track delivery timeout
   231  	stale   chan struct{}              // Channel to signal the request was dropped
   232  
   233  	hashes []common.Hash   // Trie node hashes to validate responses
   234  	paths  []trie.SyncPath // Trie node paths requested for rescheduling
   235  
   236  	task *healTask // Task which this request is filling (only access fields through the runloop!!)
   237  }
   238  
   239  // trienodeHealResponse is an already verified remote response to a trie node request.
   240  type trienodeHealResponse struct {
   241  	task *healTask // Task which this request is filling
   242  
   243  	hashes []common.Hash   // Hashes of the trie nodes to avoid double hashing
   244  	paths  []trie.SyncPath // Trie node paths requested for rescheduling missing ones
   245  	nodes  [][]byte        // Actual trie nodes to store into the database (nil = missing)
   246  }
   247  
   248  // bytecodeHealRequest tracks a pending bytecode request to ensure responses are to
   249  // actual requests and to validate any security constraints.
   250  //
   251  // Concurrency note: bytecode requests and responses are handled concurrently from
   252  // the main runloop to allow Keccak256 hash verifications on the peer's thread and
   253  // to drop on invalid response. The request struct must contain all the data to
   254  // construct the response without accessing runloop internals (i.e. task). That
   255  // is only included to allow the runloop to match a response to the task being
   256  // synced without having yet another set of maps.
   257  type bytecodeHealRequest struct {
   258  	peer string    // Peer to which this request is assigned
   259  	id   uint64    // Request ID of this request
   260  	time time.Time // Timestamp when the request was sent
   261  
   262  	deliver chan *bytecodeHealResponse // Channel to deliver successful response on
   263  	revert  chan *bytecodeHealRequest  // Channel to deliver request failure on
   264  	cancel  chan struct{}              // Channel to track sync cancellation
   265  	timeout *time.Timer                // Timer to track delivery timeout
   266  	stale   chan struct{}              // Channel to signal the request was dropped
   267  
   268  	hashes []common.Hash // Bytecode hashes to validate responses
   269  	task   *healTask     // Task which this request is filling (only access fields through the runloop!!)
   270  }
   271  
   272  // bytecodeHealResponse is an already verified remote response to a bytecode request.
   273  type bytecodeHealResponse struct {
   274  	task *healTask // Task which this request is filling
   275  
   276  	hashes []common.Hash // Hashes of the bytecode to avoid double hashing
   277  	codes  [][]byte      // Actual bytecodes to store into the database (nil = missing)
   278  }
   279  
   280  // accountTask represents the sync task for a chunk of the account snapshot.
   281  type accountTask struct {
   282  	// These fields get serialized to leveldb on shutdown
   283  	Next     common.Hash                    // Next account to sync in this interval
   284  	Last     common.Hash                    // Last account to sync in this interval
   285  	SubTasks map[common.Hash][]*storageTask // Storage intervals needing fetching for large contracts
   286  
   287  	// These fields are internals used during runtime
   288  	req  *accountRequest  // Pending request to fill this task
   289  	res  *accountResponse // Validate response filling this task
   290  	pend int              // Number of pending subtasks for this round
   291  
   292  	needCode  []bool // Flags whether the filling accounts need code retrieval
   293  	needState []bool // Flags whether the filling accounts need storage retrieval
   294  	needHeal  []bool // Flags whether the filling accounts's state was chunked and need healing
   295  
   296  	codeTasks  map[common.Hash]struct{}    // Code hashes that need retrieval
   297  	stateTasks map[common.Hash]common.Hash // Account hashes->roots that need full state retrieval
   298  
   299  	genBatch ethdb.Batch     // Batch used by the node generator
   300  	genTrie  *trie.StackTrie // Node generator from storage slots
   301  
   302  	done bool // Flag whether the task can be removed
   303  }
   304  
   305  // storageTask represents the sync task for a chunk of the storage snapshot.
   306  type storageTask struct {
   307  	Next common.Hash // Next account to sync in this interval
   308  	Last common.Hash // Last account to sync in this interval
   309  
   310  	// These fields are internals used during runtime
   311  	root common.Hash     // Storage root hash for this instance
   312  	req  *storageRequest // Pending request to fill this task
   313  
   314  	genBatch ethdb.Batch     // Batch used by the node generator
   315  	genTrie  *trie.StackTrie // Node generator from storage slots
   316  
   317  	done bool // Flag whether the task can be removed
   318  }
   319  
   320  // healTask represents the sync task for healing the snap-synced chunk boundaries.
   321  type healTask struct {
   322  	scheduler *trie.Sync // State trie sync scheduler defining the tasks
   323  
   324  	trieTasks map[common.Hash]trie.SyncPath // Set of trie node tasks currently queued for retrieval
   325  	codeTasks map[common.Hash]struct{}      // Set of byte code tasks currently queued for retrieval
   326  }
   327  
   328  // SyncProgress is a database entry to allow suspending and resuming a snapshot state
   329  // sync. Opposed to full and fast sync, there is no way to restart a suspended
   330  // snap sync without prior knowledge of the suspension point.
   331  type SyncProgress struct {
   332  	Tasks []*accountTask // The suspended account tasks (contract tasks within)
   333  
   334  	// Status report during syncing phase
   335  	AccountSynced  uint64             // Number of accounts downloaded
   336  	AccountBytes   common.StorageSize // Number of account trie bytes persisted to disk
   337  	BytecodeSynced uint64             // Number of bytecodes downloaded
   338  	BytecodeBytes  common.StorageSize // Number of bytecode bytes downloaded
   339  	StorageSynced  uint64             // Number of storage slots downloaded
   340  	StorageBytes   common.StorageSize // Number of storage trie bytes persisted to disk
   341  
   342  	// Status report during healing phase
   343  	TrienodeHealSynced uint64             // Number of state trie nodes downloaded
   344  	TrienodeHealBytes  common.StorageSize // Number of state trie bytes persisted to disk
   345  	BytecodeHealSynced uint64             // Number of bytecodes downloaded
   346  	BytecodeHealBytes  common.StorageSize // Number of bytecodes persisted to disk
   347  }
   348  
   349  // SyncPending is analogous to SyncProgress, but it's used to report on pending
   350  // ephemeral sync progress that doesn't get persisted into the database.
   351  type SyncPending struct {
   352  	TrienodeHeal uint64 // Number of state trie nodes pending
   353  	BytecodeHeal uint64 // Number of bytecodes pending
   354  }
   355  
   356  // SyncPeer abstracts out the methods required for a peer to be synced against
   357  // with the goal of allowing the construction of mock peers without the full
   358  // blown networking.
   359  type SyncPeer interface {
   360  	// ID retrieves the peer's unique identifier.
   361  	ID() string
   362  
   363  	// RequestAccountRange fetches a batch of accounts rooted in a specific account
   364  	// trie, starting with the origin.
   365  	RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error
   366  
   367  	// RequestStorageRanges fetches a batch of storage slots belonging to one or
   368  	// more accounts. If slots from only one accout is requested, an origin marker
   369  	// may also be used to retrieve from there.
   370  	RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error
   371  
   372  	// RequestByteCodes fetches a batch of bytecodes by hash.
   373  	RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error
   374  
   375  	// RequestTrieNodes fetches a batch of account or storage trie nodes rooted in
   376  	// a specificstate trie.
   377  	RequestTrieNodes(id uint64, root common.Hash, paths []TrieNodePathSet, bytes uint64) error
   378  
   379  	// Log retrieves the peer's own contextual logger.
   380  	Log() log.Logger
   381  }
   382  
   383  // Syncer is an Ethereum account and storage trie syncer based on snapshots and
   384  // the  snap protocol. It's purpose is to download all the accounts and storage
   385  // slots from remote peers and reassemble chunks of the state trie, on top of
   386  // which a state sync can be run to fix any gaps / overlaps.
   387  //
   388  // Every network request has a variety of failure events:
   389  //   - The peer disconnects after task assignment, failing to send the request
   390  //   - The peer disconnects after sending the request, before delivering on it
   391  //   - The peer remains connected, but does not deliver a response in time
   392  //   - The peer delivers a stale response after a previous timeout
   393  //   - The peer delivers a refusal to serve the requested state
   394  type Syncer struct {
   395  	db ethdb.KeyValueStore // Database to store the trie nodes into (and dedup)
   396  
   397  	root    common.Hash    // Current state trie root being synced
   398  	tasks   []*accountTask // Current account task set being synced
   399  	snapped bool           // Flag to signal that snap phase is done
   400  	healer  *healTask      // Current state healing task being executed
   401  	update  chan struct{}  // Notification channel for possible sync progression
   402  
   403  	peers    map[string]SyncPeer // Currently active peers to download from
   404  	peerJoin *event.Feed         // Event feed to react to peers joining
   405  	peerDrop *event.Feed         // Event feed to react to peers dropping
   406  	rates    *msgrate.Trackers   // Message throughput rates for peers
   407  
   408  	// Request tracking during syncing phase
   409  	statelessPeers map[string]struct{} // Peers that failed to deliver state data
   410  	accountIdlers  map[string]struct{} // Peers that aren't serving account requests
   411  	bytecodeIdlers map[string]struct{} // Peers that aren't serving bytecode requests
   412  	storageIdlers  map[string]struct{} // Peers that aren't serving storage requests
   413  
   414  	accountReqs  map[uint64]*accountRequest  // Account requests currently running
   415  	bytecodeReqs map[uint64]*bytecodeRequest // Bytecode requests currently running
   416  	storageReqs  map[uint64]*storageRequest  // Storage requests currently running
   417  
   418  	accountSynced  uint64             // Number of accounts downloaded
   419  	accountBytes   common.StorageSize // Number of account trie bytes persisted to disk
   420  	bytecodeSynced uint64             // Number of bytecodes downloaded
   421  	bytecodeBytes  common.StorageSize // Number of bytecode bytes downloaded
   422  	storageSynced  uint64             // Number of storage slots downloaded
   423  	storageBytes   common.StorageSize // Number of storage trie bytes persisted to disk
   424  
   425  	// Request tracking during healing phase
   426  	trienodeHealIdlers map[string]struct{} // Peers that aren't serving trie node requests
   427  	bytecodeHealIdlers map[string]struct{} // Peers that aren't serving bytecode requests
   428  
   429  	trienodeHealReqs map[uint64]*trienodeHealRequest // Trie node requests currently running
   430  	bytecodeHealReqs map[uint64]*bytecodeHealRequest // Bytecode requests currently running
   431  
   432  	trienodeHealSynced uint64             // Number of state trie nodes downloaded
   433  	trienodeHealBytes  common.StorageSize // Number of state trie bytes persisted to disk
   434  	trienodeHealDups   uint64             // Number of state trie nodes already processed
   435  	trienodeHealNops   uint64             // Number of state trie nodes not requested
   436  	bytecodeHealSynced uint64             // Number of bytecodes downloaded
   437  	bytecodeHealBytes  common.StorageSize // Number of bytecodes persisted to disk
   438  	bytecodeHealDups   uint64             // Number of bytecodes already processed
   439  	bytecodeHealNops   uint64             // Number of bytecodes not requested
   440  
   441  	stateWriter        ethdb.Batch        // Shared batch writer used for persisting raw states
   442  	accountHealed      uint64             // Number of accounts downloaded during the healing stage
   443  	accountHealedBytes common.StorageSize // Number of raw account bytes persisted to disk during the healing stage
   444  	storageHealed      uint64             // Number of storage slots downloaded during the healing stage
   445  	storageHealedBytes common.StorageSize // Number of raw storage bytes persisted to disk during the healing stage
   446  
   447  	startTime time.Time // Time instance when snapshot sync started
   448  	logTime   time.Time // Time instance when status was last reported
   449  
   450  	pend sync.WaitGroup // Tracks network request goroutines for graceful shutdown
   451  	lock sync.RWMutex   // Protects fields that can change outside of sync (peers, reqs, root)
   452  }
   453  
   454  // NewSyncer creates a new snapshot syncer to download the Ethereum state over the
   455  // snap protocol.
   456  func NewSyncer(db ethdb.KeyValueStore) *Syncer {
   457  	return &Syncer{
   458  		db: db,
   459  
   460  		peers:    make(map[string]SyncPeer),
   461  		peerJoin: new(event.Feed),
   462  		peerDrop: new(event.Feed),
   463  		rates:    msgrate.NewTrackers(log.New("proto", "snap")),
   464  		update:   make(chan struct{}, 1),
   465  
   466  		accountIdlers:  make(map[string]struct{}),
   467  		storageIdlers:  make(map[string]struct{}),
   468  		bytecodeIdlers: make(map[string]struct{}),
   469  
   470  		accountReqs:  make(map[uint64]*accountRequest),
   471  		storageReqs:  make(map[uint64]*storageRequest),
   472  		bytecodeReqs: make(map[uint64]*bytecodeRequest),
   473  
   474  		trienodeHealIdlers: make(map[string]struct{}),
   475  		bytecodeHealIdlers: make(map[string]struct{}),
   476  
   477  		trienodeHealReqs: make(map[uint64]*trienodeHealRequest),
   478  		bytecodeHealReqs: make(map[uint64]*bytecodeHealRequest),
   479  		stateWriter:      db.NewBatch(),
   480  	}
   481  }
   482  
   483  // Register injects a new data source into the syncer's peerset.
   484  func (s *Syncer) Register(peer SyncPeer) error {
   485  	// Make sure the peer is not registered yet
   486  	id := peer.ID()
   487  
   488  	s.lock.Lock()
   489  	if _, ok := s.peers[id]; ok {
   490  		log.Error("Snap peer already registered", "id", id)
   491  
   492  		s.lock.Unlock()
   493  		return errors.New("already registered")
   494  	}
   495  	s.peers[id] = peer
   496  	s.rates.Track(id, msgrate.NewTracker(s.rates.MeanCapacities(), s.rates.MedianRoundTrip()))
   497  
   498  	// Mark the peer as idle, even if no sync is running
   499  	s.accountIdlers[id] = struct{}{}
   500  	s.storageIdlers[id] = struct{}{}
   501  	s.bytecodeIdlers[id] = struct{}{}
   502  	s.trienodeHealIdlers[id] = struct{}{}
   503  	s.bytecodeHealIdlers[id] = struct{}{}
   504  	s.lock.Unlock()
   505  
   506  	// Notify any active syncs that a new peer can be assigned data
   507  	s.peerJoin.Send(id)
   508  	return nil
   509  }
   510  
   511  // Unregister injects a new data source into the syncer's peerset.
   512  func (s *Syncer) Unregister(id string) error {
   513  	// Remove all traces of the peer from the registry
   514  	s.lock.Lock()
   515  	if _, ok := s.peers[id]; !ok {
   516  		log.Error("Snap peer not registered", "id", id)
   517  
   518  		s.lock.Unlock()
   519  		return errors.New("not registered")
   520  	}
   521  	delete(s.peers, id)
   522  	s.rates.Untrack(id)
   523  
   524  	// Remove status markers, even if no sync is running
   525  	delete(s.statelessPeers, id)
   526  
   527  	delete(s.accountIdlers, id)
   528  	delete(s.storageIdlers, id)
   529  	delete(s.bytecodeIdlers, id)
   530  	delete(s.trienodeHealIdlers, id)
   531  	delete(s.bytecodeHealIdlers, id)
   532  	s.lock.Unlock()
   533  
   534  	// Notify any active syncs that pending requests need to be reverted
   535  	s.peerDrop.Send(id)
   536  	return nil
   537  }
   538  
   539  // Sync starts (or resumes a previous) sync cycle to iterate over an state trie
   540  // with the given root and reconstruct the nodes based on the snapshot leaves.
   541  // Previously downloaded segments will not be redownloaded of fixed, rather any
   542  // errors will be healed after the leaves are fully accumulated.
   543  func (s *Syncer) Sync(root common.Hash, cancel chan struct{}) error {
   544  	// Move the trie root from any previous value, revert stateless markers for
   545  	// any peers and initialize the syncer if it was not yet run
   546  	s.lock.Lock()
   547  	s.root = root
   548  	s.healer = &healTask{
   549  		scheduler: state.NewStateSync(root, s.db, s.onHealState),
   550  		trieTasks: make(map[common.Hash]trie.SyncPath),
   551  		codeTasks: make(map[common.Hash]struct{}),
   552  	}
   553  	s.statelessPeers = make(map[string]struct{})
   554  	s.lock.Unlock()
   555  
   556  	if s.startTime == (time.Time{}) {
   557  		s.startTime = time.Now()
   558  	}
   559  	// Retrieve the previous sync status from LevelDB and abort if already synced
   560  	s.loadSyncStatus()
   561  	if len(s.tasks) == 0 && s.healer.scheduler.Pending() == 0 {
   562  		log.Debug("Snapshot sync already completed")
   563  		return nil
   564  	}
   565  	defer func() { // Persist any progress, independent of failure
   566  		for _, task := range s.tasks {
   567  			s.forwardAccountTask(task)
   568  		}
   569  		s.cleanAccountTasks()
   570  		s.saveSyncStatus()
   571  	}()
   572  
   573  	log.Debug("Starting snapshot sync cycle", "root", root)
   574  
   575  	// Flush out the last committed raw states
   576  	defer func() {
   577  		if s.stateWriter.ValueSize() > 0 {
   578  			s.stateWriter.Write()
   579  			s.stateWriter.Reset()
   580  		}
   581  	}()
   582  	defer s.report(true)
   583  
   584  	// Whether sync completed or not, disregard any future packets
   585  	defer func() {
   586  		log.Debug("Terminating snapshot sync cycle", "root", root)
   587  		s.lock.Lock()
   588  		s.accountReqs = make(map[uint64]*accountRequest)
   589  		s.storageReqs = make(map[uint64]*storageRequest)
   590  		s.bytecodeReqs = make(map[uint64]*bytecodeRequest)
   591  		s.trienodeHealReqs = make(map[uint64]*trienodeHealRequest)
   592  		s.bytecodeHealReqs = make(map[uint64]*bytecodeHealRequest)
   593  		s.lock.Unlock()
   594  	}()
   595  	// Keep scheduling sync tasks
   596  	peerJoin := make(chan string, 16)
   597  	peerJoinSub := s.peerJoin.Subscribe(peerJoin)
   598  	defer peerJoinSub.Unsubscribe()
   599  
   600  	peerDrop := make(chan string, 16)
   601  	peerDropSub := s.peerDrop.Subscribe(peerDrop)
   602  	defer peerDropSub.Unsubscribe()
   603  
   604  	// Create a set of unique channels for this sync cycle. We need these to be
   605  	// ephemeral so a data race doesn't accidentally deliver something stale on
   606  	// a persistent channel across syncs (yup, this happened)
   607  	var (
   608  		accountReqFails      = make(chan *accountRequest)
   609  		storageReqFails      = make(chan *storageRequest)
   610  		bytecodeReqFails     = make(chan *bytecodeRequest)
   611  		accountResps         = make(chan *accountResponse)
   612  		storageResps         = make(chan *storageResponse)
   613  		bytecodeResps        = make(chan *bytecodeResponse)
   614  		trienodeHealReqFails = make(chan *trienodeHealRequest)
   615  		bytecodeHealReqFails = make(chan *bytecodeHealRequest)
   616  		trienodeHealResps    = make(chan *trienodeHealResponse)
   617  		bytecodeHealResps    = make(chan *bytecodeHealResponse)
   618  	)
   619  	for {
   620  		// Remove all completed tasks and terminate sync if everything's done
   621  		s.cleanStorageTasks()
   622  		s.cleanAccountTasks()
   623  		if len(s.tasks) == 0 && s.healer.scheduler.Pending() == 0 {
   624  			return nil
   625  		}
   626  		// Assign all the data retrieval tasks to any free peers
   627  		s.assignAccountTasks(accountResps, accountReqFails, cancel)
   628  		s.assignBytecodeTasks(bytecodeResps, bytecodeReqFails, cancel)
   629  		s.assignStorageTasks(storageResps, storageReqFails, cancel)
   630  
   631  		if len(s.tasks) == 0 {
   632  			// Sync phase done, run heal phase
   633  			s.assignTrienodeHealTasks(trienodeHealResps, trienodeHealReqFails, cancel)
   634  			s.assignBytecodeHealTasks(bytecodeHealResps, bytecodeHealReqFails, cancel)
   635  		}
   636  		// Wait for something to happen
   637  		select {
   638  		case <-s.update:
   639  			// Something happened (new peer, delivery, timeout), recheck tasks
   640  		case <-peerJoin:
   641  			// A new peer joined, try to schedule it new tasks
   642  		case id := <-peerDrop:
   643  			s.revertRequests(id)
   644  		case <-cancel:
   645  			return ErrCancelled
   646  
   647  		case req := <-accountReqFails:
   648  			s.revertAccountRequest(req)
   649  		case req := <-bytecodeReqFails:
   650  			s.revertBytecodeRequest(req)
   651  		case req := <-storageReqFails:
   652  			s.revertStorageRequest(req)
   653  		case req := <-trienodeHealReqFails:
   654  			s.revertTrienodeHealRequest(req)
   655  		case req := <-bytecodeHealReqFails:
   656  			s.revertBytecodeHealRequest(req)
   657  
   658  		case res := <-accountResps:
   659  			s.processAccountResponse(res)
   660  		case res := <-bytecodeResps:
   661  			s.processBytecodeResponse(res)
   662  		case res := <-storageResps:
   663  			s.processStorageResponse(res)
   664  		case res := <-trienodeHealResps:
   665  			s.processTrienodeHealResponse(res)
   666  		case res := <-bytecodeHealResps:
   667  			s.processBytecodeHealResponse(res)
   668  		}
   669  		// Report stats if something meaningful happened
   670  		s.report(false)
   671  	}
   672  }
   673  
   674  // loadSyncStatus retrieves a previously aborted sync status from the database,
   675  // or generates a fresh one if none is available.
   676  func (s *Syncer) loadSyncStatus() {
   677  	var progress SyncProgress
   678  
   679  	if status := rawdb.ReadSnapshotSyncStatus(s.db); status != nil {
   680  		if err := json.Unmarshal(status, &progress); err != nil {
   681  			log.Error("Failed to decode snap sync status", "err", err)
   682  		} else {
   683  			for _, task := range progress.Tasks {
   684  				log.Debug("Scheduled account sync task", "from", task.Next, "last", task.Last)
   685  			}
   686  			s.tasks = progress.Tasks
   687  			for _, task := range s.tasks {
   688  				task.genBatch = ethdb.HookedBatch{
   689  					Batch: s.db.NewBatch(),
   690  					OnPut: func(key []byte, value []byte) {
   691  						s.accountBytes += common.StorageSize(len(key) + len(value))
   692  					},
   693  				}
   694  				task.genTrie = trie.NewStackTrie(task.genBatch)
   695  
   696  				for _, subtasks := range task.SubTasks {
   697  					for _, subtask := range subtasks {
   698  						subtask.genBatch = ethdb.HookedBatch{
   699  							Batch: s.db.NewBatch(),
   700  							OnPut: func(key []byte, value []byte) {
   701  								s.storageBytes += common.StorageSize(len(key) + len(value))
   702  							},
   703  						}
   704  						subtask.genTrie = trie.NewStackTrie(subtask.genBatch)
   705  					}
   706  				}
   707  			}
   708  			s.snapped = len(s.tasks) == 0
   709  
   710  			s.accountSynced = progress.AccountSynced
   711  			s.accountBytes = progress.AccountBytes
   712  			s.bytecodeSynced = progress.BytecodeSynced
   713  			s.bytecodeBytes = progress.BytecodeBytes
   714  			s.storageSynced = progress.StorageSynced
   715  			s.storageBytes = progress.StorageBytes
   716  
   717  			s.trienodeHealSynced = progress.TrienodeHealSynced
   718  			s.trienodeHealBytes = progress.TrienodeHealBytes
   719  			s.bytecodeHealSynced = progress.BytecodeHealSynced
   720  			s.bytecodeHealBytes = progress.BytecodeHealBytes
   721  			return
   722  		}
   723  	}
   724  	// Either we've failed to decode the previus state, or there was none.
   725  	// Start a fresh sync by chunking up the account range and scheduling
   726  	// them for retrieval.
   727  	s.tasks = nil
   728  	s.accountSynced, s.accountBytes = 0, 0
   729  	s.bytecodeSynced, s.bytecodeBytes = 0, 0
   730  	s.storageSynced, s.storageBytes = 0, 0
   731  	s.trienodeHealSynced, s.trienodeHealBytes = 0, 0
   732  	s.bytecodeHealSynced, s.bytecodeHealBytes = 0, 0
   733  
   734  	var next common.Hash
   735  	step := new(big.Int).Sub(
   736  		new(big.Int).Div(
   737  			new(big.Int).Exp(common.Big2, common.Big256, nil),
   738  			big.NewInt(int64(accountConcurrency)),
   739  		), common.Big1,
   740  	)
   741  	for i := 0; i < accountConcurrency; i++ {
   742  		last := common.BigToHash(new(big.Int).Add(next.Big(), step))
   743  		if i == accountConcurrency-1 {
   744  			// Make sure we don't overflow if the step is not a proper divisor
   745  			last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
   746  		}
   747  		batch := ethdb.HookedBatch{
   748  			Batch: s.db.NewBatch(),
   749  			OnPut: func(key []byte, value []byte) {
   750  				s.accountBytes += common.StorageSize(len(key) + len(value))
   751  			},
   752  		}
   753  		s.tasks = append(s.tasks, &accountTask{
   754  			Next:     next,
   755  			Last:     last,
   756  			SubTasks: make(map[common.Hash][]*storageTask),
   757  			genBatch: batch,
   758  			genTrie:  trie.NewStackTrie(batch),
   759  		})
   760  		log.Debug("Created account sync task", "from", next, "last", last)
   761  		next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
   762  	}
   763  }
   764  
   765  // saveSyncStatus marshals the remaining sync tasks into leveldb.
   766  func (s *Syncer) saveSyncStatus() {
   767  	// Serialize any partial progress to disk before spinning down
   768  	for _, task := range s.tasks {
   769  		if err := task.genBatch.Write(); err != nil {
   770  			log.Error("Failed to persist account slots", "err", err)
   771  		}
   772  		for _, subtasks := range task.SubTasks {
   773  			for _, subtask := range subtasks {
   774  				if err := subtask.genBatch.Write(); err != nil {
   775  					log.Error("Failed to persist storage slots", "err", err)
   776  				}
   777  			}
   778  		}
   779  	}
   780  	// Store the actual progress markers
   781  	progress := &SyncProgress{
   782  		Tasks:              s.tasks,
   783  		AccountSynced:      s.accountSynced,
   784  		AccountBytes:       s.accountBytes,
   785  		BytecodeSynced:     s.bytecodeSynced,
   786  		BytecodeBytes:      s.bytecodeBytes,
   787  		StorageSynced:      s.storageSynced,
   788  		StorageBytes:       s.storageBytes,
   789  		TrienodeHealSynced: s.trienodeHealSynced,
   790  		TrienodeHealBytes:  s.trienodeHealBytes,
   791  		BytecodeHealSynced: s.bytecodeHealSynced,
   792  		BytecodeHealBytes:  s.bytecodeHealBytes,
   793  	}
   794  	status, err := json.Marshal(progress)
   795  	if err != nil {
   796  		panic(err) // This can only fail during implementation
   797  	}
   798  	rawdb.WriteSnapshotSyncStatus(s.db, status)
   799  }
   800  
   801  // Progress returns the snap sync status statistics.
   802  func (s *Syncer) Progress() (*SyncProgress, *SyncPending) {
   803  	s.lock.Lock()
   804  	defer s.lock.Unlock()
   805  
   806  	progress := &SyncProgress{
   807  		AccountSynced:      s.accountSynced,
   808  		AccountBytes:       s.accountBytes,
   809  		BytecodeSynced:     s.bytecodeSynced,
   810  		BytecodeBytes:      s.bytecodeBytes,
   811  		StorageSynced:      s.storageSynced,
   812  		StorageBytes:       s.storageBytes,
   813  		TrienodeHealSynced: s.trienodeHealSynced,
   814  		TrienodeHealBytes:  s.trienodeHealBytes,
   815  		BytecodeHealSynced: s.bytecodeHealSynced,
   816  		BytecodeHealBytes:  s.bytecodeHealBytes,
   817  	}
   818  	pending := new(SyncPending)
   819  	if s.healer != nil {
   820  		pending.TrienodeHeal = uint64(len(s.healer.trieTasks))
   821  		pending.BytecodeHeal = uint64(len(s.healer.codeTasks))
   822  	}
   823  	return progress, pending
   824  }
   825  
   826  // cleanAccountTasks removes account range retrieval tasks that have already been
   827  // completed.
   828  func (s *Syncer) cleanAccountTasks() {
   829  	// If the sync was already done before, don't even bother
   830  	if len(s.tasks) == 0 {
   831  		return
   832  	}
   833  	// Sync wasn't finished previously, check for any task that can be finalized
   834  	for i := 0; i < len(s.tasks); i++ {
   835  		if s.tasks[i].done {
   836  			s.tasks = append(s.tasks[:i], s.tasks[i+1:]...)
   837  			i--
   838  		}
   839  	}
   840  	// If everything was just finalized just, generate the account trie and start heal
   841  	if len(s.tasks) == 0 {
   842  		s.lock.Lock()
   843  		s.snapped = true
   844  		s.lock.Unlock()
   845  
   846  		// Push the final sync report
   847  		s.reportSyncProgress(true)
   848  	}
   849  }
   850  
   851  // cleanStorageTasks iterates over all the account tasks and storage sub-tasks
   852  // within, cleaning any that have been completed.
   853  func (s *Syncer) cleanStorageTasks() {
   854  	for _, task := range s.tasks {
   855  		for account, subtasks := range task.SubTasks {
   856  			// Remove storage range retrieval tasks that completed
   857  			for j := 0; j < len(subtasks); j++ {
   858  				if subtasks[j].done {
   859  					subtasks = append(subtasks[:j], subtasks[j+1:]...)
   860  					j--
   861  				}
   862  			}
   863  			if len(subtasks) > 0 {
   864  				task.SubTasks[account] = subtasks
   865  				continue
   866  			}
   867  			// If all storage chunks are done, mark the account as done too
   868  			for j, hash := range task.res.hashes {
   869  				if hash == account {
   870  					task.needState[j] = false
   871  				}
   872  			}
   873  			delete(task.SubTasks, account)
   874  			task.pend--
   875  
   876  			// If this was the last pending task, forward the account task
   877  			if task.pend == 0 {
   878  				s.forwardAccountTask(task)
   879  			}
   880  		}
   881  	}
   882  }
   883  
   884  // assignAccountTasks attempts to match idle peers to pending account range
   885  // retrievals.
   886  func (s *Syncer) assignAccountTasks(success chan *accountResponse, fail chan *accountRequest, cancel chan struct{}) {
   887  	s.lock.Lock()
   888  	defer s.lock.Unlock()
   889  
   890  	// Sort the peers by download capacity to use faster ones if many available
   891  	idlers := &capacitySort{
   892  		ids:  make([]string, 0, len(s.accountIdlers)),
   893  		caps: make([]int, 0, len(s.accountIdlers)),
   894  	}
   895  	targetTTL := s.rates.TargetTimeout()
   896  	for id := range s.accountIdlers {
   897  		if _, ok := s.statelessPeers[id]; ok {
   898  			continue
   899  		}
   900  		idlers.ids = append(idlers.ids, id)
   901  		idlers.caps = append(idlers.caps, s.rates.Capacity(id, AccountRangeMsg, targetTTL))
   902  	}
   903  	if len(idlers.ids) == 0 {
   904  		return
   905  	}
   906  	sort.Sort(sort.Reverse(idlers))
   907  
   908  	// Iterate over all the tasks and try to find a pending one
   909  	for _, task := range s.tasks {
   910  		// Skip any tasks already filling
   911  		if task.req != nil || task.res != nil {
   912  			continue
   913  		}
   914  		// Task pending retrieval, try to find an idle peer. If no such peer
   915  		// exists, we probably assigned tasks for all (or they are stateless).
   916  		// Abort the entire assignment mechanism.
   917  		if len(idlers.ids) == 0 {
   918  			return
   919  		}
   920  		var (
   921  			idle = idlers.ids[0]
   922  			peer = s.peers[idle]
   923  			cap  = idlers.caps[0]
   924  		)
   925  		idlers.ids, idlers.caps = idlers.ids[1:], idlers.caps[1:]
   926  
   927  		// Matched a pending task to an idle peer, allocate a unique request id
   928  		var reqid uint64
   929  		for {
   930  			reqid = uint64(rand.Int63())
   931  			if reqid == 0 {
   932  				continue
   933  			}
   934  			if _, ok := s.accountReqs[reqid]; ok {
   935  				continue
   936  			}
   937  			break
   938  		}
   939  		// Generate the network query and send it to the peer
   940  		req := &accountRequest{
   941  			peer:    idle,
   942  			id:      reqid,
   943  			time:    time.Now(),
   944  			deliver: success,
   945  			revert:  fail,
   946  			cancel:  cancel,
   947  			stale:   make(chan struct{}),
   948  			origin:  task.Next,
   949  			limit:   task.Last,
   950  			task:    task,
   951  		}
   952  		req.timeout = time.AfterFunc(s.rates.TargetTimeout(), func() {
   953  			peer.Log().Debug("Account range request timed out", "reqid", reqid)
   954  			s.rates.Update(idle, AccountRangeMsg, 0, 0)
   955  			s.scheduleRevertAccountRequest(req)
   956  		})
   957  		s.accountReqs[reqid] = req
   958  		delete(s.accountIdlers, idle)
   959  
   960  		s.pend.Add(1)
   961  		go func(root common.Hash) {
   962  			defer s.pend.Done()
   963  
   964  			// Attempt to send the remote request and revert if it fails
   965  			if cap > maxRequestSize {
   966  				cap = maxRequestSize
   967  			}
   968  			if cap < minRequestSize { // Don't bother with peers below a bare minimum performance
   969  				cap = minRequestSize
   970  			}
   971  			if err := peer.RequestAccountRange(reqid, root, req.origin, req.limit, uint64(cap)); err != nil {
   972  				peer.Log().Debug("Failed to request account range", "err", err)
   973  				s.scheduleRevertAccountRequest(req)
   974  			}
   975  		}(s.root)
   976  
   977  		// Inject the request into the task to block further assignments
   978  		task.req = req
   979  	}
   980  }
   981  
   982  // assignBytecodeTasks attempts to match idle peers to pending code retrievals.
   983  func (s *Syncer) assignBytecodeTasks(success chan *bytecodeResponse, fail chan *bytecodeRequest, cancel chan struct{}) {
   984  	s.lock.Lock()
   985  	defer s.lock.Unlock()
   986  
   987  	// Sort the peers by download capacity to use faster ones if many available
   988  	idlers := &capacitySort{
   989  		ids:  make([]string, 0, len(s.bytecodeIdlers)),
   990  		caps: make([]int, 0, len(s.bytecodeIdlers)),
   991  	}
   992  	targetTTL := s.rates.TargetTimeout()
   993  	for id := range s.bytecodeIdlers {
   994  		if _, ok := s.statelessPeers[id]; ok {
   995  			continue
   996  		}
   997  		idlers.ids = append(idlers.ids, id)
   998  		idlers.caps = append(idlers.caps, s.rates.Capacity(id, ByteCodesMsg, targetTTL))
   999  	}
  1000  	if len(idlers.ids) == 0 {
  1001  		return
  1002  	}
  1003  	sort.Sort(sort.Reverse(idlers))
  1004  
  1005  	// Iterate over all the tasks and try to find a pending one
  1006  	for _, task := range s.tasks {
  1007  		// Skip any tasks not in the bytecode retrieval phase
  1008  		if task.res == nil {
  1009  			continue
  1010  		}
  1011  		// Skip tasks that are already retrieving (or done with) all codes
  1012  		if len(task.codeTasks) == 0 {
  1013  			continue
  1014  		}
  1015  		// Task pending retrieval, try to find an idle peer. If no such peer
  1016  		// exists, we probably assigned tasks for all (or they are stateless).
  1017  		// Abort the entire assignment mechanism.
  1018  		if len(idlers.ids) == 0 {
  1019  			return
  1020  		}
  1021  		var (
  1022  			idle = idlers.ids[0]
  1023  			peer = s.peers[idle]
  1024  			cap  = idlers.caps[0]
  1025  		)
  1026  		idlers.ids, idlers.caps = idlers.ids[1:], idlers.caps[1:]
  1027  
  1028  		// Matched a pending task to an idle peer, allocate a unique request id
  1029  		var reqid uint64
  1030  		for {
  1031  			reqid = uint64(rand.Int63())
  1032  			if reqid == 0 {
  1033  				continue
  1034  			}
  1035  			if _, ok := s.bytecodeReqs[reqid]; ok {
  1036  				continue
  1037  			}
  1038  			break
  1039  		}
  1040  		// Generate the network query and send it to the peer
  1041  		if cap > maxCodeRequestCount {
  1042  			cap = maxCodeRequestCount
  1043  		}
  1044  		hashes := make([]common.Hash, 0, cap)
  1045  		for hash := range task.codeTasks {
  1046  			delete(task.codeTasks, hash)
  1047  			hashes = append(hashes, hash)
  1048  			if len(hashes) >= cap {
  1049  				break
  1050  			}
  1051  		}
  1052  		req := &bytecodeRequest{
  1053  			peer:    idle,
  1054  			id:      reqid,
  1055  			time:    time.Now(),
  1056  			deliver: success,
  1057  			revert:  fail,
  1058  			cancel:  cancel,
  1059  			stale:   make(chan struct{}),
  1060  			hashes:  hashes,
  1061  			task:    task,
  1062  		}
  1063  		req.timeout = time.AfterFunc(s.rates.TargetTimeout(), func() {
  1064  			peer.Log().Debug("Bytecode request timed out", "reqid", reqid)
  1065  			s.rates.Update(idle, ByteCodesMsg, 0, 0)
  1066  			s.scheduleRevertBytecodeRequest(req)
  1067  		})
  1068  		s.bytecodeReqs[reqid] = req
  1069  		delete(s.bytecodeIdlers, idle)
  1070  
  1071  		s.pend.Add(1)
  1072  		go func() {
  1073  			defer s.pend.Done()
  1074  
  1075  			// Attempt to send the remote request and revert if it fails
  1076  			if err := peer.RequestByteCodes(reqid, hashes, maxRequestSize); err != nil {
  1077  				log.Debug("Failed to request bytecodes", "err", err)
  1078  				s.scheduleRevertBytecodeRequest(req)
  1079  			}
  1080  		}()
  1081  	}
  1082  }
  1083  
  1084  // assignStorageTasks attempts to match idle peers to pending storage range
  1085  // retrievals.
  1086  func (s *Syncer) assignStorageTasks(success chan *storageResponse, fail chan *storageRequest, cancel chan struct{}) {
  1087  	s.lock.Lock()
  1088  	defer s.lock.Unlock()
  1089  
  1090  	// Sort the peers by download capacity to use faster ones if many available
  1091  	idlers := &capacitySort{
  1092  		ids:  make([]string, 0, len(s.storageIdlers)),
  1093  		caps: make([]int, 0, len(s.storageIdlers)),
  1094  	}
  1095  	targetTTL := s.rates.TargetTimeout()
  1096  	for id := range s.storageIdlers {
  1097  		if _, ok := s.statelessPeers[id]; ok {
  1098  			continue
  1099  		}
  1100  		idlers.ids = append(idlers.ids, id)
  1101  		idlers.caps = append(idlers.caps, s.rates.Capacity(id, StorageRangesMsg, targetTTL))
  1102  	}
  1103  	if len(idlers.ids) == 0 {
  1104  		return
  1105  	}
  1106  	sort.Sort(sort.Reverse(idlers))
  1107  
  1108  	// Iterate over all the tasks and try to find a pending one
  1109  	for _, task := range s.tasks {
  1110  		// Skip any tasks not in the storage retrieval phase
  1111  		if task.res == nil {
  1112  			continue
  1113  		}
  1114  		// Skip tasks that are already retrieving (or done with) all small states
  1115  		if len(task.SubTasks) == 0 && len(task.stateTasks) == 0 {
  1116  			continue
  1117  		}
  1118  		// Task pending retrieval, try to find an idle peer. If no such peer
  1119  		// exists, we probably assigned tasks for all (or they are stateless).
  1120  		// Abort the entire assignment mechanism.
  1121  		if len(idlers.ids) == 0 {
  1122  			return
  1123  		}
  1124  		var (
  1125  			idle = idlers.ids[0]
  1126  			peer = s.peers[idle]
  1127  			cap  = idlers.caps[0]
  1128  		)
  1129  		idlers.ids, idlers.caps = idlers.ids[1:], idlers.caps[1:]
  1130  
  1131  		// Matched a pending task to an idle peer, allocate a unique request id
  1132  		var reqid uint64
  1133  		for {
  1134  			reqid = uint64(rand.Int63())
  1135  			if reqid == 0 {
  1136  				continue
  1137  			}
  1138  			if _, ok := s.storageReqs[reqid]; ok {
  1139  				continue
  1140  			}
  1141  			break
  1142  		}
  1143  		// Generate the network query and send it to the peer. If there are
  1144  		// large contract tasks pending, complete those before diving into
  1145  		// even more new contracts.
  1146  		if cap > maxRequestSize {
  1147  			cap = maxRequestSize
  1148  		}
  1149  		if cap < minRequestSize { // Don't bother with peers below a bare minimum performance
  1150  			cap = minRequestSize
  1151  		}
  1152  		storageSets := cap / 1024
  1153  
  1154  		var (
  1155  			accounts = make([]common.Hash, 0, storageSets)
  1156  			roots    = make([]common.Hash, 0, storageSets)
  1157  			subtask  *storageTask
  1158  		)
  1159  		for account, subtasks := range task.SubTasks {
  1160  			for _, st := range subtasks {
  1161  				// Skip any subtasks already filling
  1162  				if st.req != nil {
  1163  					continue
  1164  				}
  1165  				// Found an incomplete storage chunk, schedule it
  1166  				accounts = append(accounts, account)
  1167  				roots = append(roots, st.root)
  1168  				subtask = st
  1169  				break // Large contract chunks are downloaded individually
  1170  			}
  1171  			if subtask != nil {
  1172  				break // Large contract chunks are downloaded individually
  1173  			}
  1174  		}
  1175  		if subtask == nil {
  1176  			// No large contract required retrieval, but small ones available
  1177  			for acccount, root := range task.stateTasks {
  1178  				delete(task.stateTasks, acccount)
  1179  
  1180  				accounts = append(accounts, acccount)
  1181  				roots = append(roots, root)
  1182  
  1183  				if len(accounts) >= storageSets {
  1184  					break
  1185  				}
  1186  			}
  1187  		}
  1188  		// If nothing was found, it means this task is actually already fully
  1189  		// retrieving, but large contracts are hard to detect. Skip to the next.
  1190  		if len(accounts) == 0 {
  1191  			continue
  1192  		}
  1193  		req := &storageRequest{
  1194  			peer:     idle,
  1195  			id:       reqid,
  1196  			time:     time.Now(),
  1197  			deliver:  success,
  1198  			revert:   fail,
  1199  			cancel:   cancel,
  1200  			stale:    make(chan struct{}),
  1201  			accounts: accounts,
  1202  			roots:    roots,
  1203  			mainTask: task,
  1204  			subTask:  subtask,
  1205  		}
  1206  		if subtask != nil {
  1207  			req.origin = subtask.Next
  1208  			req.limit = subtask.Last
  1209  		}
  1210  		req.timeout = time.AfterFunc(s.rates.TargetTimeout(), func() {
  1211  			peer.Log().Debug("Storage request timed out", "reqid", reqid)
  1212  			s.rates.Update(idle, StorageRangesMsg, 0, 0)
  1213  			s.scheduleRevertStorageRequest(req)
  1214  		})
  1215  		s.storageReqs[reqid] = req
  1216  		delete(s.storageIdlers, idle)
  1217  
  1218  		s.pend.Add(1)
  1219  		go func(root common.Hash) {
  1220  			defer s.pend.Done()
  1221  
  1222  			// Attempt to send the remote request and revert if it fails
  1223  			var origin, limit []byte
  1224  			if subtask != nil {
  1225  				origin, limit = req.origin[:], req.limit[:]
  1226  			}
  1227  			if err := peer.RequestStorageRanges(reqid, root, accounts, origin, limit, uint64(cap)); err != nil {
  1228  				log.Debug("Failed to request storage", "err", err)
  1229  				s.scheduleRevertStorageRequest(req)
  1230  			}
  1231  		}(s.root)
  1232  
  1233  		// Inject the request into the subtask to block further assignments
  1234  		if subtask != nil {
  1235  			subtask.req = req
  1236  		}
  1237  	}
  1238  }
  1239  
  1240  // assignTrienodeHealTasks attempts to match idle peers to trie node requests to
  1241  // heal any trie errors caused by the snap sync's chunked retrieval model.
  1242  func (s *Syncer) assignTrienodeHealTasks(success chan *trienodeHealResponse, fail chan *trienodeHealRequest, cancel chan struct{}) {
  1243  	s.lock.Lock()
  1244  	defer s.lock.Unlock()
  1245  
  1246  	// Sort the peers by download capacity to use faster ones if many available
  1247  	idlers := &capacitySort{
  1248  		ids:  make([]string, 0, len(s.trienodeHealIdlers)),
  1249  		caps: make([]int, 0, len(s.trienodeHealIdlers)),
  1250  	}
  1251  	targetTTL := s.rates.TargetTimeout()
  1252  	for id := range s.trienodeHealIdlers {
  1253  		if _, ok := s.statelessPeers[id]; ok {
  1254  			continue
  1255  		}
  1256  		idlers.ids = append(idlers.ids, id)
  1257  		idlers.caps = append(idlers.caps, s.rates.Capacity(id, TrieNodesMsg, targetTTL))
  1258  	}
  1259  	if len(idlers.ids) == 0 {
  1260  		return
  1261  	}
  1262  	sort.Sort(sort.Reverse(idlers))
  1263  
  1264  	// Iterate over pending tasks and try to find a peer to retrieve with
  1265  	for len(s.healer.trieTasks) > 0 || s.healer.scheduler.Pending() > 0 {
  1266  		// If there are not enough trie tasks queued to fully assign, fill the
  1267  		// queue from the state sync scheduler. The trie synced schedules these
  1268  		// together with bytecodes, so we need to queue them combined.
  1269  		var (
  1270  			have = len(s.healer.trieTasks) + len(s.healer.codeTasks)
  1271  			want = maxTrieRequestCount + maxCodeRequestCount
  1272  		)
  1273  		if have < want {
  1274  			nodes, paths, codes := s.healer.scheduler.Missing(want - have)
  1275  			for i, hash := range nodes {
  1276  				s.healer.trieTasks[hash] = paths[i]
  1277  			}
  1278  			for _, hash := range codes {
  1279  				s.healer.codeTasks[hash] = struct{}{}
  1280  			}
  1281  		}
  1282  		// If all the heal tasks are bytecodes or already downloading, bail
  1283  		if len(s.healer.trieTasks) == 0 {
  1284  			return
  1285  		}
  1286  		// Task pending retrieval, try to find an idle peer. If no such peer
  1287  		// exists, we probably assigned tasks for all (or they are stateless).
  1288  		// Abort the entire assignment mechanism.
  1289  		if len(idlers.ids) == 0 {
  1290  			return
  1291  		}
  1292  		var (
  1293  			idle = idlers.ids[0]
  1294  			peer = s.peers[idle]
  1295  			cap  = idlers.caps[0]
  1296  		)
  1297  		idlers.ids, idlers.caps = idlers.ids[1:], idlers.caps[1:]
  1298  
  1299  		// Matched a pending task to an idle peer, allocate a unique request id
  1300  		var reqid uint64
  1301  		for {
  1302  			reqid = uint64(rand.Int63())
  1303  			if reqid == 0 {
  1304  				continue
  1305  			}
  1306  			if _, ok := s.trienodeHealReqs[reqid]; ok {
  1307  				continue
  1308  			}
  1309  			break
  1310  		}
  1311  		// Generate the network query and send it to the peer
  1312  		if cap > maxTrieRequestCount {
  1313  			cap = maxTrieRequestCount
  1314  		}
  1315  		var (
  1316  			hashes   = make([]common.Hash, 0, cap)
  1317  			paths    = make([]trie.SyncPath, 0, cap)
  1318  			pathsets = make([]TrieNodePathSet, 0, cap)
  1319  		)
  1320  		for hash, pathset := range s.healer.trieTasks {
  1321  			delete(s.healer.trieTasks, hash)
  1322  
  1323  			hashes = append(hashes, hash)
  1324  			paths = append(paths, pathset)
  1325  			pathsets = append(pathsets, [][]byte(pathset)) // TODO(karalabe): group requests by account hash
  1326  
  1327  			if len(hashes) >= cap {
  1328  				break
  1329  			}
  1330  		}
  1331  		req := &trienodeHealRequest{
  1332  			peer:    idle,
  1333  			id:      reqid,
  1334  			time:    time.Now(),
  1335  			deliver: success,
  1336  			revert:  fail,
  1337  			cancel:  cancel,
  1338  			stale:   make(chan struct{}),
  1339  			hashes:  hashes,
  1340  			paths:   paths,
  1341  			task:    s.healer,
  1342  		}
  1343  		req.timeout = time.AfterFunc(s.rates.TargetTimeout(), func() {
  1344  			peer.Log().Debug("Trienode heal request timed out", "reqid", reqid)
  1345  			s.rates.Update(idle, TrieNodesMsg, 0, 0)
  1346  			s.scheduleRevertTrienodeHealRequest(req)
  1347  		})
  1348  		s.trienodeHealReqs[reqid] = req
  1349  		delete(s.trienodeHealIdlers, idle)
  1350  
  1351  		s.pend.Add(1)
  1352  		go func(root common.Hash) {
  1353  			defer s.pend.Done()
  1354  
  1355  			// Attempt to send the remote request and revert if it fails
  1356  			if err := peer.RequestTrieNodes(reqid, root, pathsets, maxRequestSize); err != nil {
  1357  				log.Debug("Failed to request trienode healers", "err", err)
  1358  				s.scheduleRevertTrienodeHealRequest(req)
  1359  			}
  1360  		}(s.root)
  1361  	}
  1362  }
  1363  
  1364  // assignBytecodeHealTasks attempts to match idle peers to bytecode requests to
  1365  // heal any trie errors caused by the snap sync's chunked retrieval model.
  1366  func (s *Syncer) assignBytecodeHealTasks(success chan *bytecodeHealResponse, fail chan *bytecodeHealRequest, cancel chan struct{}) {
  1367  	s.lock.Lock()
  1368  	defer s.lock.Unlock()
  1369  
  1370  	// Sort the peers by download capacity to use faster ones if many available
  1371  	idlers := &capacitySort{
  1372  		ids:  make([]string, 0, len(s.bytecodeHealIdlers)),
  1373  		caps: make([]int, 0, len(s.bytecodeHealIdlers)),
  1374  	}
  1375  	targetTTL := s.rates.TargetTimeout()
  1376  	for id := range s.bytecodeHealIdlers {
  1377  		if _, ok := s.statelessPeers[id]; ok {
  1378  			continue
  1379  		}
  1380  		idlers.ids = append(idlers.ids, id)
  1381  		idlers.caps = append(idlers.caps, s.rates.Capacity(id, ByteCodesMsg, targetTTL))
  1382  	}
  1383  	if len(idlers.ids) == 0 {
  1384  		return
  1385  	}
  1386  	sort.Sort(sort.Reverse(idlers))
  1387  
  1388  	// Iterate over pending tasks and try to find a peer to retrieve with
  1389  	for len(s.healer.codeTasks) > 0 || s.healer.scheduler.Pending() > 0 {
  1390  		// If there are not enough trie tasks queued to fully assign, fill the
  1391  		// queue from the state sync scheduler. The trie synced schedules these
  1392  		// together with trie nodes, so we need to queue them combined.
  1393  		var (
  1394  			have = len(s.healer.trieTasks) + len(s.healer.codeTasks)
  1395  			want = maxTrieRequestCount + maxCodeRequestCount
  1396  		)
  1397  		if have < want {
  1398  			nodes, paths, codes := s.healer.scheduler.Missing(want - have)
  1399  			for i, hash := range nodes {
  1400  				s.healer.trieTasks[hash] = paths[i]
  1401  			}
  1402  			for _, hash := range codes {
  1403  				s.healer.codeTasks[hash] = struct{}{}
  1404  			}
  1405  		}
  1406  		// If all the heal tasks are trienodes or already downloading, bail
  1407  		if len(s.healer.codeTasks) == 0 {
  1408  			return
  1409  		}
  1410  		// Task pending retrieval, try to find an idle peer. If no such peer
  1411  		// exists, we probably assigned tasks for all (or they are stateless).
  1412  		// Abort the entire assignment mechanism.
  1413  		if len(idlers.ids) == 0 {
  1414  			return
  1415  		}
  1416  		var (
  1417  			idle = idlers.ids[0]
  1418  			peer = s.peers[idle]
  1419  			cap  = idlers.caps[0]
  1420  		)
  1421  		idlers.ids, idlers.caps = idlers.ids[1:], idlers.caps[1:]
  1422  
  1423  		// Matched a pending task to an idle peer, allocate a unique request id
  1424  		var reqid uint64
  1425  		for {
  1426  			reqid = uint64(rand.Int63())
  1427  			if reqid == 0 {
  1428  				continue
  1429  			}
  1430  			if _, ok := s.bytecodeHealReqs[reqid]; ok {
  1431  				continue
  1432  			}
  1433  			break
  1434  		}
  1435  		// Generate the network query and send it to the peer
  1436  		if cap > maxCodeRequestCount {
  1437  			cap = maxCodeRequestCount
  1438  		}
  1439  		hashes := make([]common.Hash, 0, cap)
  1440  		for hash := range s.healer.codeTasks {
  1441  			delete(s.healer.codeTasks, hash)
  1442  
  1443  			hashes = append(hashes, hash)
  1444  			if len(hashes) >= cap {
  1445  				break
  1446  			}
  1447  		}
  1448  		req := &bytecodeHealRequest{
  1449  			peer:    idle,
  1450  			id:      reqid,
  1451  			time:    time.Now(),
  1452  			deliver: success,
  1453  			revert:  fail,
  1454  			cancel:  cancel,
  1455  			stale:   make(chan struct{}),
  1456  			hashes:  hashes,
  1457  			task:    s.healer,
  1458  		}
  1459  		req.timeout = time.AfterFunc(s.rates.TargetTimeout(), func() {
  1460  			peer.Log().Debug("Bytecode heal request timed out", "reqid", reqid)
  1461  			s.rates.Update(idle, ByteCodesMsg, 0, 0)
  1462  			s.scheduleRevertBytecodeHealRequest(req)
  1463  		})
  1464  		s.bytecodeHealReqs[reqid] = req
  1465  		delete(s.bytecodeHealIdlers, idle)
  1466  
  1467  		s.pend.Add(1)
  1468  		go func() {
  1469  			defer s.pend.Done()
  1470  
  1471  			// Attempt to send the remote request and revert if it fails
  1472  			if err := peer.RequestByteCodes(reqid, hashes, maxRequestSize); err != nil {
  1473  				log.Debug("Failed to request bytecode healers", "err", err)
  1474  				s.scheduleRevertBytecodeHealRequest(req)
  1475  			}
  1476  		}()
  1477  	}
  1478  }
  1479  
  1480  // revertRequests locates all the currently pending reuqests from a particular
  1481  // peer and reverts them, rescheduling for others to fulfill.
  1482  func (s *Syncer) revertRequests(peer string) {
  1483  	// Gather the requests first, revertals need the lock too
  1484  	s.lock.Lock()
  1485  	var accountReqs []*accountRequest
  1486  	for _, req := range s.accountReqs {
  1487  		if req.peer == peer {
  1488  			accountReqs = append(accountReqs, req)
  1489  		}
  1490  	}
  1491  	var bytecodeReqs []*bytecodeRequest
  1492  	for _, req := range s.bytecodeReqs {
  1493  		if req.peer == peer {
  1494  			bytecodeReqs = append(bytecodeReqs, req)
  1495  		}
  1496  	}
  1497  	var storageReqs []*storageRequest
  1498  	for _, req := range s.storageReqs {
  1499  		if req.peer == peer {
  1500  			storageReqs = append(storageReqs, req)
  1501  		}
  1502  	}
  1503  	var trienodeHealReqs []*trienodeHealRequest
  1504  	for _, req := range s.trienodeHealReqs {
  1505  		if req.peer == peer {
  1506  			trienodeHealReqs = append(trienodeHealReqs, req)
  1507  		}
  1508  	}
  1509  	var bytecodeHealReqs []*bytecodeHealRequest
  1510  	for _, req := range s.bytecodeHealReqs {
  1511  		if req.peer == peer {
  1512  			bytecodeHealReqs = append(bytecodeHealReqs, req)
  1513  		}
  1514  	}
  1515  	s.lock.Unlock()
  1516  
  1517  	// Revert all the requests matching the peer
  1518  	for _, req := range accountReqs {
  1519  		s.revertAccountRequest(req)
  1520  	}
  1521  	for _, req := range bytecodeReqs {
  1522  		s.revertBytecodeRequest(req)
  1523  	}
  1524  	for _, req := range storageReqs {
  1525  		s.revertStorageRequest(req)
  1526  	}
  1527  	for _, req := range trienodeHealReqs {
  1528  		s.revertTrienodeHealRequest(req)
  1529  	}
  1530  	for _, req := range bytecodeHealReqs {
  1531  		s.revertBytecodeHealRequest(req)
  1532  	}
  1533  }
  1534  
  1535  // scheduleRevertAccountRequest asks the event loop to clean up an account range
  1536  // request and return all failed retrieval tasks to the scheduler for reassignment.
  1537  func (s *Syncer) scheduleRevertAccountRequest(req *accountRequest) {
  1538  	select {
  1539  	case req.revert <- req:
  1540  		// Sync event loop notified
  1541  	case <-req.cancel:
  1542  		// Sync cycle got cancelled
  1543  	case <-req.stale:
  1544  		// Request already reverted
  1545  	}
  1546  }
  1547  
  1548  // revertAccountRequest cleans up an account range request and returns all failed
  1549  // retrieval tasks to the scheduler for reassignment.
  1550  //
  1551  // Note, this needs to run on the event runloop thread to reschedule to idle peers.
  1552  // On peer threads, use scheduleRevertAccountRequest.
  1553  func (s *Syncer) revertAccountRequest(req *accountRequest) {
  1554  	log.Debug("Reverting account request", "peer", req.peer, "reqid", req.id)
  1555  	select {
  1556  	case <-req.stale:
  1557  		log.Trace("Account request already reverted", "peer", req.peer, "reqid", req.id)
  1558  		return
  1559  	default:
  1560  	}
  1561  	close(req.stale)
  1562  
  1563  	// Remove the request from the tracked set
  1564  	s.lock.Lock()
  1565  	delete(s.accountReqs, req.id)
  1566  	s.lock.Unlock()
  1567  
  1568  	// If there's a timeout timer still running, abort it and mark the account
  1569  	// task as not-pending, ready for resheduling
  1570  	req.timeout.Stop()
  1571  	if req.task.req == req {
  1572  		req.task.req = nil
  1573  	}
  1574  }
  1575  
  1576  // scheduleRevertBytecodeRequest asks the event loop to clean up a bytecode request
  1577  // and return all failed retrieval tasks to the scheduler for reassignment.
  1578  func (s *Syncer) scheduleRevertBytecodeRequest(req *bytecodeRequest) {
  1579  	select {
  1580  	case req.revert <- req:
  1581  		// Sync event loop notified
  1582  	case <-req.cancel:
  1583  		// Sync cycle got cancelled
  1584  	case <-req.stale:
  1585  		// Request already reverted
  1586  	}
  1587  }
  1588  
  1589  // revertBytecodeRequest cleans up a bytecode request and returns all failed
  1590  // retrieval tasks to the scheduler for reassignment.
  1591  //
  1592  // Note, this needs to run on the event runloop thread to reschedule to idle peers.
  1593  // On peer threads, use scheduleRevertBytecodeRequest.
  1594  func (s *Syncer) revertBytecodeRequest(req *bytecodeRequest) {
  1595  	log.Debug("Reverting bytecode request", "peer", req.peer)
  1596  	select {
  1597  	case <-req.stale:
  1598  		log.Trace("Bytecode request already reverted", "peer", req.peer, "reqid", req.id)
  1599  		return
  1600  	default:
  1601  	}
  1602  	close(req.stale)
  1603  
  1604  	// Remove the request from the tracked set
  1605  	s.lock.Lock()
  1606  	delete(s.bytecodeReqs, req.id)
  1607  	s.lock.Unlock()
  1608  
  1609  	// If there's a timeout timer still running, abort it and mark the code
  1610  	// retrievals as not-pending, ready for resheduling
  1611  	req.timeout.Stop()
  1612  	for _, hash := range req.hashes {
  1613  		req.task.codeTasks[hash] = struct{}{}
  1614  	}
  1615  }
  1616  
  1617  // scheduleRevertStorageRequest asks the event loop to clean up a storage range
  1618  // request and return all failed retrieval tasks to the scheduler for reassignment.
  1619  func (s *Syncer) scheduleRevertStorageRequest(req *storageRequest) {
  1620  	select {
  1621  	case req.revert <- req:
  1622  		// Sync event loop notified
  1623  	case <-req.cancel:
  1624  		// Sync cycle got cancelled
  1625  	case <-req.stale:
  1626  		// Request already reverted
  1627  	}
  1628  }
  1629  
  1630  // revertStorageRequest cleans up a storage range request and returns all failed
  1631  // retrieval tasks to the scheduler for reassignment.
  1632  //
  1633  // Note, this needs to run on the event runloop thread to reschedule to idle peers.
  1634  // On peer threads, use scheduleRevertStorageRequest.
  1635  func (s *Syncer) revertStorageRequest(req *storageRequest) {
  1636  	log.Debug("Reverting storage request", "peer", req.peer)
  1637  	select {
  1638  	case <-req.stale:
  1639  		log.Trace("Storage request already reverted", "peer", req.peer, "reqid", req.id)
  1640  		return
  1641  	default:
  1642  	}
  1643  	close(req.stale)
  1644  
  1645  	// Remove the request from the tracked set
  1646  	s.lock.Lock()
  1647  	delete(s.storageReqs, req.id)
  1648  	s.lock.Unlock()
  1649  
  1650  	// If there's a timeout timer still running, abort it and mark the storage
  1651  	// task as not-pending, ready for resheduling
  1652  	req.timeout.Stop()
  1653  	if req.subTask != nil {
  1654  		req.subTask.req = nil
  1655  	} else {
  1656  		for i, account := range req.accounts {
  1657  			req.mainTask.stateTasks[account] = req.roots[i]
  1658  		}
  1659  	}
  1660  }
  1661  
  1662  // scheduleRevertTrienodeHealRequest asks the event loop to clean up a trienode heal
  1663  // request and return all failed retrieval tasks to the scheduler for reassignment.
  1664  func (s *Syncer) scheduleRevertTrienodeHealRequest(req *trienodeHealRequest) {
  1665  	select {
  1666  	case req.revert <- req:
  1667  		// Sync event loop notified
  1668  	case <-req.cancel:
  1669  		// Sync cycle got cancelled
  1670  	case <-req.stale:
  1671  		// Request already reverted
  1672  	}
  1673  }
  1674  
  1675  // revertTrienodeHealRequest cleans up a trienode heal request and returns all
  1676  // failed retrieval tasks to the scheduler for reassignment.
  1677  //
  1678  // Note, this needs to run on the event runloop thread to reschedule to idle peers.
  1679  // On peer threads, use scheduleRevertTrienodeHealRequest.
  1680  func (s *Syncer) revertTrienodeHealRequest(req *trienodeHealRequest) {
  1681  	log.Debug("Reverting trienode heal request", "peer", req.peer)
  1682  	select {
  1683  	case <-req.stale:
  1684  		log.Trace("Trienode heal request already reverted", "peer", req.peer, "reqid", req.id)
  1685  		return
  1686  	default:
  1687  	}
  1688  	close(req.stale)
  1689  
  1690  	// Remove the request from the tracked set
  1691  	s.lock.Lock()
  1692  	delete(s.trienodeHealReqs, req.id)
  1693  	s.lock.Unlock()
  1694  
  1695  	// If there's a timeout timer still running, abort it and mark the trie node
  1696  	// retrievals as not-pending, ready for resheduling
  1697  	req.timeout.Stop()
  1698  	for i, hash := range req.hashes {
  1699  		req.task.trieTasks[hash] = req.paths[i]
  1700  	}
  1701  }
  1702  
  1703  // scheduleRevertBytecodeHealRequest asks the event loop to clean up a bytecode heal
  1704  // request and return all failed retrieval tasks to the scheduler for reassignment.
  1705  func (s *Syncer) scheduleRevertBytecodeHealRequest(req *bytecodeHealRequest) {
  1706  	select {
  1707  	case req.revert <- req:
  1708  		// Sync event loop notified
  1709  	case <-req.cancel:
  1710  		// Sync cycle got cancelled
  1711  	case <-req.stale:
  1712  		// Request already reverted
  1713  	}
  1714  }
  1715  
  1716  // revertBytecodeHealRequest cleans up a bytecode heal request and returns all
  1717  // failed retrieval tasks to the scheduler for reassignment.
  1718  //
  1719  // Note, this needs to run on the event runloop thread to reschedule to idle peers.
  1720  // On peer threads, use scheduleRevertBytecodeHealRequest.
  1721  func (s *Syncer) revertBytecodeHealRequest(req *bytecodeHealRequest) {
  1722  	log.Debug("Reverting bytecode heal request", "peer", req.peer)
  1723  	select {
  1724  	case <-req.stale:
  1725  		log.Trace("Bytecode heal request already reverted", "peer", req.peer, "reqid", req.id)
  1726  		return
  1727  	default:
  1728  	}
  1729  	close(req.stale)
  1730  
  1731  	// Remove the request from the tracked set
  1732  	s.lock.Lock()
  1733  	delete(s.bytecodeHealReqs, req.id)
  1734  	s.lock.Unlock()
  1735  
  1736  	// If there's a timeout timer still running, abort it and mark the code
  1737  	// retrievals as not-pending, ready for resheduling
  1738  	req.timeout.Stop()
  1739  	for _, hash := range req.hashes {
  1740  		req.task.codeTasks[hash] = struct{}{}
  1741  	}
  1742  }
  1743  
  1744  // processAccountResponse integrates an already validated account range response
  1745  // into the account tasks.
  1746  func (s *Syncer) processAccountResponse(res *accountResponse) {
  1747  	// Switch the task from pending to filling
  1748  	res.task.req = nil
  1749  	res.task.res = res
  1750  
  1751  	// Ensure that the response doesn't overflow into the subsequent task
  1752  	last := res.task.Last.Big()
  1753  	for i, hash := range res.hashes {
  1754  		// Mark the range complete if the last is already included.
  1755  		// Keep iteration to delete the extra states if exists.
  1756  		cmp := hash.Big().Cmp(last)
  1757  		if cmp == 0 {
  1758  			res.cont = false
  1759  			continue
  1760  		}
  1761  		if cmp > 0 {
  1762  			// Chunk overflown, cut off excess
  1763  			res.hashes = res.hashes[:i]
  1764  			res.accounts = res.accounts[:i]
  1765  			res.cont = false // Mark range completed
  1766  			break
  1767  		}
  1768  	}
  1769  	// Iterate over all the accounts and assemble which ones need further sub-
  1770  	// filling before the entire account range can be persisted.
  1771  	res.task.needCode = make([]bool, len(res.accounts))
  1772  	res.task.needState = make([]bool, len(res.accounts))
  1773  	res.task.needHeal = make([]bool, len(res.accounts))
  1774  
  1775  	res.task.codeTasks = make(map[common.Hash]struct{})
  1776  	res.task.stateTasks = make(map[common.Hash]common.Hash)
  1777  
  1778  	resumed := make(map[common.Hash]struct{})
  1779  
  1780  	res.task.pend = 0
  1781  	for i, account := range res.accounts {
  1782  		// Check if the account is a contract with an unknown code
  1783  		if !bytes.Equal(account.CodeHash, emptyCode[:]) {
  1784  			if !rawdb.HasCodeWithPrefix(s.db, common.BytesToHash(account.CodeHash)) {
  1785  				res.task.codeTasks[common.BytesToHash(account.CodeHash)] = struct{}{}
  1786  				res.task.needCode[i] = true
  1787  				res.task.pend++
  1788  			}
  1789  		}
  1790  		// Check if the account is a contract with an unknown storage trie
  1791  		if account.Root != emptyRoot {
  1792  			if ok, err := s.db.Has(account.Root[:]); err != nil || !ok {
  1793  				// If there was a previous large state retrieval in progress,
  1794  				// don't restart it from scratch. This happens if a sync cycle
  1795  				// is interrupted and resumed later. However, *do* update the
  1796  				// previous root hash.
  1797  				if subtasks, ok := res.task.SubTasks[res.hashes[i]]; ok {
  1798  					log.Debug("Resuming large storage retrieval", "account", res.hashes[i], "root", account.Root)
  1799  					for _, subtask := range subtasks {
  1800  						subtask.root = account.Root
  1801  					}
  1802  					res.task.needHeal[i] = true
  1803  					resumed[res.hashes[i]] = struct{}{}
  1804  				} else {
  1805  					res.task.stateTasks[res.hashes[i]] = account.Root
  1806  				}
  1807  				res.task.needState[i] = true
  1808  				res.task.pend++
  1809  			}
  1810  		}
  1811  	}
  1812  	// Delete any subtasks that have been aborted but not resumed. This may undo
  1813  	// some progress if a new peer gives us less accounts than an old one, but for
  1814  	// now we have to live with that.
  1815  	for hash := range res.task.SubTasks {
  1816  		if _, ok := resumed[hash]; !ok {
  1817  			log.Debug("Aborting suspended storage retrieval", "account", hash)
  1818  			delete(res.task.SubTasks, hash)
  1819  		}
  1820  	}
  1821  	// If the account range contained no contracts, or all have been fully filled
  1822  	// beforehand, short circuit storage filling and forward to the next task
  1823  	if res.task.pend == 0 {
  1824  		s.forwardAccountTask(res.task)
  1825  		return
  1826  	}
  1827  	// Some accounts are incomplete, leave as is for the storage and contract
  1828  	// task assigners to pick up and fill.
  1829  }
  1830  
  1831  // processBytecodeResponse integrates an already validated bytecode response
  1832  // into the account tasks.
  1833  func (s *Syncer) processBytecodeResponse(res *bytecodeResponse) {
  1834  	batch := s.db.NewBatch()
  1835  
  1836  	var (
  1837  		codes uint64
  1838  	)
  1839  	for i, hash := range res.hashes {
  1840  		code := res.codes[i]
  1841  
  1842  		// If the bytecode was not delivered, reschedule it
  1843  		if code == nil {
  1844  			res.task.codeTasks[hash] = struct{}{}
  1845  			continue
  1846  		}
  1847  		// Code was delivered, mark it not needed any more
  1848  		for j, account := range res.task.res.accounts {
  1849  			if res.task.needCode[j] && hash == common.BytesToHash(account.CodeHash) {
  1850  				res.task.needCode[j] = false
  1851  				res.task.pend--
  1852  			}
  1853  		}
  1854  		// Push the bytecode into a database batch
  1855  		codes++
  1856  		rawdb.WriteCode(batch, hash, code)
  1857  	}
  1858  	bytes := common.StorageSize(batch.ValueSize())
  1859  	if err := batch.Write(); err != nil {
  1860  		log.Crit("Failed to persist bytecodes", "err", err)
  1861  	}
  1862  	s.bytecodeSynced += codes
  1863  	s.bytecodeBytes += bytes
  1864  
  1865  	log.Debug("Persisted set of bytecodes", "count", codes, "bytes", bytes)
  1866  
  1867  	// If this delivery completed the last pending task, forward the account task
  1868  	// to the next chunk
  1869  	if res.task.pend == 0 {
  1870  		s.forwardAccountTask(res.task)
  1871  		return
  1872  	}
  1873  	// Some accounts are still incomplete, leave as is for the storage and contract
  1874  	// task assigners to pick up and fill.
  1875  }
  1876  
  1877  // processStorageResponse integrates an already validated storage response
  1878  // into the account tasks.
  1879  func (s *Syncer) processStorageResponse(res *storageResponse) {
  1880  	// Switch the subtask from pending to idle
  1881  	if res.subTask != nil {
  1882  		res.subTask.req = nil
  1883  	}
  1884  	batch := ethdb.HookedBatch{
  1885  		Batch: s.db.NewBatch(),
  1886  		OnPut: func(key []byte, value []byte) {
  1887  			s.storageBytes += common.StorageSize(len(key) + len(value))
  1888  		},
  1889  	}
  1890  	var (
  1891  		slots           int
  1892  		oldStorageBytes = s.storageBytes
  1893  	)
  1894  	// Iterate over all the accounts and reconstruct their storage tries from the
  1895  	// delivered slots
  1896  	for i, account := range res.accounts {
  1897  		// If the account was not delivered, reschedule it
  1898  		if i >= len(res.hashes) {
  1899  			res.mainTask.stateTasks[account] = res.roots[i]
  1900  			continue
  1901  		}
  1902  		// State was delivered, if complete mark as not needed any more, otherwise
  1903  		// mark the account as needing healing
  1904  		for j, hash := range res.mainTask.res.hashes {
  1905  			if account != hash {
  1906  				continue
  1907  			}
  1908  			acc := res.mainTask.res.accounts[j]
  1909  
  1910  			// If the packet contains multiple contract storage slots, all
  1911  			// but the last are surely complete. The last contract may be
  1912  			// chunked, so check it's continuation flag.
  1913  			if res.subTask == nil && res.mainTask.needState[j] && (i < len(res.hashes)-1 || !res.cont) {
  1914  				res.mainTask.needState[j] = false
  1915  				res.mainTask.pend--
  1916  			}
  1917  			// If the last contract was chunked, mark it as needing healing
  1918  			// to avoid writing it out to disk prematurely.
  1919  			if res.subTask == nil && !res.mainTask.needHeal[j] && i == len(res.hashes)-1 && res.cont {
  1920  				res.mainTask.needHeal[j] = true
  1921  			}
  1922  			// If the last contract was chunked, we need to switch to large
  1923  			// contract handling mode
  1924  			if res.subTask == nil && i == len(res.hashes)-1 && res.cont {
  1925  				// If we haven't yet started a large-contract retrieval, create
  1926  				// the subtasks for it within the main account task
  1927  				if tasks, ok := res.mainTask.SubTasks[account]; !ok {
  1928  					var (
  1929  						keys    = res.hashes[i]
  1930  						chunks  = uint64(storageConcurrency)
  1931  						lastKey common.Hash
  1932  					)
  1933  					if len(keys) > 0 {
  1934  						lastKey = keys[len(keys)-1]
  1935  					}
  1936  					// If the number of slots remaining is low, decrease the
  1937  					// number of chunks. Somewhere on the order of 10-15K slots
  1938  					// fit into a packet of 500KB. A key/slot pair is maximum 64
  1939  					// bytes, so pessimistically maxRequestSize/64 = 8K.
  1940  					//
  1941  					// Chunk so that at least 2 packets are needed to fill a task.
  1942  					if estimate, err := estimateRemainingSlots(len(keys), lastKey); err == nil {
  1943  						if n := estimate / (2 * (maxRequestSize / 64)); n+1 < chunks {
  1944  							chunks = n + 1
  1945  						}
  1946  						log.Debug("Chunked large contract", "initiators", len(keys), "tail", lastKey, "remaining", estimate, "chunks", chunks)
  1947  					} else {
  1948  						log.Debug("Chunked large contract", "initiators", len(keys), "tail", lastKey, "chunks", chunks)
  1949  					}
  1950  					r := newHashRange(lastKey, chunks)
  1951  
  1952  					// Our first task is the one that was just filled by this response.
  1953  					batch := ethdb.HookedBatch{
  1954  						Batch: s.db.NewBatch(),
  1955  						OnPut: func(key []byte, value []byte) {
  1956  							s.storageBytes += common.StorageSize(len(key) + len(value))
  1957  						},
  1958  					}
  1959  					tasks = append(tasks, &storageTask{
  1960  						Next:     common.Hash{},
  1961  						Last:     r.End(),
  1962  						root:     acc.Root,
  1963  						genBatch: batch,
  1964  						genTrie:  trie.NewStackTrie(batch),
  1965  					})
  1966  					for r.Next() {
  1967  						batch := ethdb.HookedBatch{
  1968  							Batch: s.db.NewBatch(),
  1969  							OnPut: func(key []byte, value []byte) {
  1970  								s.storageBytes += common.StorageSize(len(key) + len(value))
  1971  							},
  1972  						}
  1973  						tasks = append(tasks, &storageTask{
  1974  							Next:     r.Start(),
  1975  							Last:     r.End(),
  1976  							root:     acc.Root,
  1977  							genBatch: batch,
  1978  							genTrie:  trie.NewStackTrie(batch),
  1979  						})
  1980  					}
  1981  					for _, task := range tasks {
  1982  						log.Debug("Created storage sync task", "account", account, "root", acc.Root, "from", task.Next, "last", task.Last)
  1983  					}
  1984  					res.mainTask.SubTasks[account] = tasks
  1985  
  1986  					// Since we've just created the sub-tasks, this response
  1987  					// is surely for the first one (zero origin)
  1988  					res.subTask = tasks[0]
  1989  				}
  1990  			}
  1991  			// If we're in large contract delivery mode, forward the subtask
  1992  			if res.subTask != nil {
  1993  				// Ensure the response doesn't overflow into the subsequent task
  1994  				last := res.subTask.Last.Big()
  1995  				// Find the first overflowing key. While at it, mark res as complete
  1996  				// if we find the range to include or pass the 'last'
  1997  				index := sort.Search(len(res.hashes[i]), func(k int) bool {
  1998  					cmp := res.hashes[i][k].Big().Cmp(last)
  1999  					if cmp >= 0 {
  2000  						res.cont = false
  2001  					}
  2002  					return cmp > 0
  2003  				})
  2004  				if index >= 0 {
  2005  					// cut off excess
  2006  					res.hashes[i] = res.hashes[i][:index]
  2007  					res.slots[i] = res.slots[i][:index]
  2008  				}
  2009  				// Forward the relevant storage chunk (even if created just now)
  2010  				if res.cont {
  2011  					res.subTask.Next = incHash(res.hashes[i][len(res.hashes[i])-1])
  2012  				} else {
  2013  					res.subTask.done = true
  2014  				}
  2015  			}
  2016  		}
  2017  		// Iterate over all the complete contracts, reconstruct the trie nodes and
  2018  		// push them to disk. If the contract is chunked, the trie nodes will be
  2019  		// reconstructed later.
  2020  		slots += len(res.hashes[i])
  2021  
  2022  		if i < len(res.hashes)-1 || res.subTask == nil {
  2023  			tr := trie.NewStackTrie(batch)
  2024  			for j := 0; j < len(res.hashes[i]); j++ {
  2025  				tr.Update(res.hashes[i][j][:], res.slots[i][j])
  2026  			}
  2027  			tr.Commit()
  2028  		}
  2029  		// Persist the received storage segements. These flat state maybe
  2030  		// outdated during the sync, but it can be fixed later during the
  2031  		// snapshot generation.
  2032  		for j := 0; j < len(res.hashes[i]); j++ {
  2033  			rawdb.WriteStorageSnapshot(batch, account, res.hashes[i][j], res.slots[i][j])
  2034  
  2035  			// If we're storing large contracts, generate the trie nodes
  2036  			// on the fly to not trash the gluing points
  2037  			if i == len(res.hashes)-1 && res.subTask != nil {
  2038  				res.subTask.genTrie.Update(res.hashes[i][j][:], res.slots[i][j])
  2039  			}
  2040  		}
  2041  	}
  2042  	// Large contracts could have generated new trie nodes, flush them to disk
  2043  	if res.subTask != nil {
  2044  		if res.subTask.done {
  2045  			if root, err := res.subTask.genTrie.Commit(); err != nil {
  2046  				log.Error("Failed to commit stack slots", "err", err)
  2047  			} else if root == res.subTask.root {
  2048  				// If the chunk's root is an overflown but full delivery, clear the heal request
  2049  				for i, account := range res.mainTask.res.hashes {
  2050  					if account == res.accounts[len(res.accounts)-1] {
  2051  						res.mainTask.needHeal[i] = false
  2052  					}
  2053  				}
  2054  			}
  2055  		}
  2056  		if res.subTask.genBatch.ValueSize() > ethdb.IdealBatchSize || res.subTask.done {
  2057  			if err := res.subTask.genBatch.Write(); err != nil {
  2058  				log.Error("Failed to persist stack slots", "err", err)
  2059  			}
  2060  			res.subTask.genBatch.Reset()
  2061  		}
  2062  	}
  2063  	// Flush anything written just now and update the stats
  2064  	if err := batch.Write(); err != nil {
  2065  		log.Crit("Failed to persist storage slots", "err", err)
  2066  	}
  2067  	s.storageSynced += uint64(slots)
  2068  
  2069  	log.Debug("Persisted set of storage slots", "accounts", len(res.hashes), "slots", slots, "bytes", s.storageBytes-oldStorageBytes)
  2070  
  2071  	// If this delivery completed the last pending task, forward the account task
  2072  	// to the next chunk
  2073  	if res.mainTask.pend == 0 {
  2074  		s.forwardAccountTask(res.mainTask)
  2075  		return
  2076  	}
  2077  	// Some accounts are still incomplete, leave as is for the storage and contract
  2078  	// task assigners to pick up and fill.
  2079  }
  2080  
  2081  // processTrienodeHealResponse integrates an already validated trienode response
  2082  // into the healer tasks.
  2083  func (s *Syncer) processTrienodeHealResponse(res *trienodeHealResponse) {
  2084  	for i, hash := range res.hashes {
  2085  		node := res.nodes[i]
  2086  
  2087  		// If the trie node was not delivered, reschedule it
  2088  		if node == nil {
  2089  			res.task.trieTasks[hash] = res.paths[i]
  2090  			continue
  2091  		}
  2092  		// Push the trie node into the state syncer
  2093  		s.trienodeHealSynced++
  2094  		s.trienodeHealBytes += common.StorageSize(len(node))
  2095  
  2096  		err := s.healer.scheduler.Process(trie.SyncResult{Hash: hash, Data: node})
  2097  		switch err {
  2098  		case nil:
  2099  		case trie.ErrAlreadyProcessed:
  2100  			s.trienodeHealDups++
  2101  		case trie.ErrNotRequested:
  2102  			s.trienodeHealNops++
  2103  		default:
  2104  			log.Error("Invalid trienode processed", "hash", hash, "err", err)
  2105  		}
  2106  	}
  2107  	batch := s.db.NewBatch()
  2108  	if err := s.healer.scheduler.Commit(batch); err != nil {
  2109  		log.Error("Failed to commit healing data", "err", err)
  2110  	}
  2111  	if err := batch.Write(); err != nil {
  2112  		log.Crit("Failed to persist healing data", "err", err)
  2113  	}
  2114  	log.Debug("Persisted set of healing data", "type", "trienodes", "bytes", common.StorageSize(batch.ValueSize()))
  2115  }
  2116  
  2117  // processBytecodeHealResponse integrates an already validated bytecode response
  2118  // into the healer tasks.
  2119  func (s *Syncer) processBytecodeHealResponse(res *bytecodeHealResponse) {
  2120  	for i, hash := range res.hashes {
  2121  		node := res.codes[i]
  2122  
  2123  		// If the trie node was not delivered, reschedule it
  2124  		if node == nil {
  2125  			res.task.codeTasks[hash] = struct{}{}
  2126  			continue
  2127  		}
  2128  		// Push the trie node into the state syncer
  2129  		s.bytecodeHealSynced++
  2130  		s.bytecodeHealBytes += common.StorageSize(len(node))
  2131  
  2132  		err := s.healer.scheduler.Process(trie.SyncResult{Hash: hash, Data: node})
  2133  		switch err {
  2134  		case nil:
  2135  		case trie.ErrAlreadyProcessed:
  2136  			s.bytecodeHealDups++
  2137  		case trie.ErrNotRequested:
  2138  			s.bytecodeHealNops++
  2139  		default:
  2140  			log.Error("Invalid bytecode processed", "hash", hash, "err", err)
  2141  		}
  2142  	}
  2143  	batch := s.db.NewBatch()
  2144  	if err := s.healer.scheduler.Commit(batch); err != nil {
  2145  		log.Error("Failed to commit healing data", "err", err)
  2146  	}
  2147  	if err := batch.Write(); err != nil {
  2148  		log.Crit("Failed to persist healing data", "err", err)
  2149  	}
  2150  	log.Debug("Persisted set of healing data", "type", "bytecode", "bytes", common.StorageSize(batch.ValueSize()))
  2151  }
  2152  
  2153  // forwardAccountTask takes a filled account task and persists anything available
  2154  // into the database, after which it forwards the next account marker so that the
  2155  // task's next chunk may be filled.
  2156  func (s *Syncer) forwardAccountTask(task *accountTask) {
  2157  	// Remove any pending delivery
  2158  	res := task.res
  2159  	if res == nil {
  2160  		return // nothing to forward
  2161  	}
  2162  	task.res = nil
  2163  
  2164  	// Persist the received account segements. These flat state maybe
  2165  	// outdated during the sync, but it can be fixed later during the
  2166  	// snapshot generation.
  2167  	oldAccountBytes := s.accountBytes
  2168  
  2169  	batch := ethdb.HookedBatch{
  2170  		Batch: s.db.NewBatch(),
  2171  		OnPut: func(key []byte, value []byte) {
  2172  			s.accountBytes += common.StorageSize(len(key) + len(value))
  2173  		},
  2174  	}
  2175  	for i, hash := range res.hashes {
  2176  		if task.needCode[i] || task.needState[i] {
  2177  			break
  2178  		}
  2179  		slim := snapshot.SlimAccountRLP(res.accounts[i].Nonce, res.accounts[i].Balance, res.accounts[i].Root, res.accounts[i].CodeHash)
  2180  		rawdb.WriteAccountSnapshot(batch, hash, slim)
  2181  
  2182  		// If the task is complete, drop it into the stack trie to generate
  2183  		// account trie nodes for it
  2184  		if !task.needHeal[i] {
  2185  			full, err := snapshot.FullAccountRLP(slim) // TODO(karalabe): Slim parsing can be omitted
  2186  			if err != nil {
  2187  				panic(err) // Really shouldn't ever happen
  2188  			}
  2189  			task.genTrie.Update(hash[:], full)
  2190  		}
  2191  	}
  2192  	// Flush anything written just now and update the stats
  2193  	if err := batch.Write(); err != nil {
  2194  		log.Crit("Failed to persist accounts", "err", err)
  2195  	}
  2196  	s.accountSynced += uint64(len(res.accounts))
  2197  
  2198  	// Task filling persisted, push it the chunk marker forward to the first
  2199  	// account still missing data.
  2200  	for i, hash := range res.hashes {
  2201  		if task.needCode[i] || task.needState[i] {
  2202  			return
  2203  		}
  2204  		task.Next = incHash(hash)
  2205  	}
  2206  	// All accounts marked as complete, track if the entire task is done
  2207  	task.done = !res.cont
  2208  
  2209  	// Stack trie could have generated trie nodes, push them to disk (we need to
  2210  	// flush after finalizing task.done. It's fine even if we crash and lose this
  2211  	// write as it will only cause more data to be downloaded during heal.
  2212  	if task.done {
  2213  		if _, err := task.genTrie.Commit(); err != nil {
  2214  			log.Error("Failed to commit stack account", "err", err)
  2215  		}
  2216  	}
  2217  	if task.genBatch.ValueSize() > ethdb.IdealBatchSize || task.done {
  2218  		if err := task.genBatch.Write(); err != nil {
  2219  			log.Error("Failed to persist stack account", "err", err)
  2220  		}
  2221  		task.genBatch.Reset()
  2222  	}
  2223  	log.Debug("Persisted range of accounts", "accounts", len(res.accounts), "bytes", s.accountBytes-oldAccountBytes)
  2224  }
  2225  
  2226  // OnAccounts is a callback method to invoke when a range of accounts are
  2227  // received from a remote peer.
  2228  func (s *Syncer) OnAccounts(peer SyncPeer, id uint64, hashes []common.Hash, accounts [][]byte, proof [][]byte) error {
  2229  	size := common.StorageSize(len(hashes) * common.HashLength)
  2230  	for _, account := range accounts {
  2231  		size += common.StorageSize(len(account))
  2232  	}
  2233  	for _, node := range proof {
  2234  		size += common.StorageSize(len(node))
  2235  	}
  2236  	logger := peer.Log().New("reqid", id)
  2237  	logger.Trace("Delivering range of accounts", "hashes", len(hashes), "accounts", len(accounts), "proofs", len(proof), "bytes", size)
  2238  
  2239  	// Whether or not the response is valid, we can mark the peer as idle and
  2240  	// notify the scheduler to assign a new task. If the response is invalid,
  2241  	// we'll drop the peer in a bit.
  2242  	s.lock.Lock()
  2243  	if _, ok := s.peers[peer.ID()]; ok {
  2244  		s.accountIdlers[peer.ID()] = struct{}{}
  2245  	}
  2246  	select {
  2247  	case s.update <- struct{}{}:
  2248  	default:
  2249  	}
  2250  	// Ensure the response is for a valid request
  2251  	req, ok := s.accountReqs[id]
  2252  	if !ok {
  2253  		// Request stale, perhaps the peer timed out but came through in the end
  2254  		logger.Warn("Unexpected account range packet")
  2255  		s.lock.Unlock()
  2256  		return nil
  2257  	}
  2258  	delete(s.accountReqs, id)
  2259  	s.rates.Update(peer.ID(), AccountRangeMsg, time.Since(req.time), int(size))
  2260  
  2261  	// Clean up the request timeout timer, we'll see how to proceed further based
  2262  	// on the actual delivered content
  2263  	if !req.timeout.Stop() {
  2264  		// The timeout is already triggered, and this request will be reverted+rescheduled
  2265  		s.lock.Unlock()
  2266  		return nil
  2267  	}
  2268  	// Response is valid, but check if peer is signalling that it does not have
  2269  	// the requested data. For account range queries that means the state being
  2270  	// retrieved was either already pruned remotely, or the peer is not yet
  2271  	// synced to our head.
  2272  	if len(hashes) == 0 && len(accounts) == 0 && len(proof) == 0 {
  2273  		logger.Debug("Peer rejected account range request", "root", s.root)
  2274  		s.statelessPeers[peer.ID()] = struct{}{}
  2275  		s.lock.Unlock()
  2276  
  2277  		// Signal this request as failed, and ready for rescheduling
  2278  		s.scheduleRevertAccountRequest(req)
  2279  		return nil
  2280  	}
  2281  	root := s.root
  2282  	s.lock.Unlock()
  2283  
  2284  	// Reconstruct a partial trie from the response and verify it
  2285  	keys := make([][]byte, len(hashes))
  2286  	for i, key := range hashes {
  2287  		keys[i] = common.CopyBytes(key[:])
  2288  	}
  2289  	nodes := make(light.NodeList, len(proof))
  2290  	for i, node := range proof {
  2291  		nodes[i] = node
  2292  	}
  2293  	proofdb := nodes.NodeSet()
  2294  
  2295  	var end []byte
  2296  	if len(keys) > 0 {
  2297  		end = keys[len(keys)-1]
  2298  	}
  2299  	cont, err := trie.VerifyRangeProof(root, req.origin[:], end, keys, accounts, proofdb)
  2300  	if err != nil {
  2301  		logger.Warn("Account range failed proof", "err", err)
  2302  		// Signal this request as failed, and ready for rescheduling
  2303  		s.scheduleRevertAccountRequest(req)
  2304  		return err
  2305  	}
  2306  	accs := make([]*types.StateAccount, len(accounts))
  2307  	for i, account := range accounts {
  2308  		acc := new(types.StateAccount)
  2309  		if err := rlp.DecodeBytes(account, acc); err != nil {
  2310  			panic(err) // We created these blobs, we must be able to decode them
  2311  		}
  2312  		accs[i] = acc
  2313  	}
  2314  	response := &accountResponse{
  2315  		task:     req.task,
  2316  		hashes:   hashes,
  2317  		accounts: accs,
  2318  		cont:     cont,
  2319  	}
  2320  	select {
  2321  	case req.deliver <- response:
  2322  	case <-req.cancel:
  2323  	case <-req.stale:
  2324  	}
  2325  	return nil
  2326  }
  2327  
  2328  // OnByteCodes is a callback method to invoke when a batch of contract
  2329  // bytes codes are received from a remote peer.
  2330  func (s *Syncer) OnByteCodes(peer SyncPeer, id uint64, bytecodes [][]byte) error {
  2331  	s.lock.RLock()
  2332  	syncing := !s.snapped
  2333  	s.lock.RUnlock()
  2334  
  2335  	if syncing {
  2336  		return s.onByteCodes(peer, id, bytecodes)
  2337  	}
  2338  	return s.onHealByteCodes(peer, id, bytecodes)
  2339  }
  2340  
  2341  // onByteCodes is a callback method to invoke when a batch of contract
  2342  // bytes codes are received from a remote peer in the syncing phase.
  2343  func (s *Syncer) onByteCodes(peer SyncPeer, id uint64, bytecodes [][]byte) error {
  2344  	var size common.StorageSize
  2345  	for _, code := range bytecodes {
  2346  		size += common.StorageSize(len(code))
  2347  	}
  2348  	logger := peer.Log().New("reqid", id)
  2349  	logger.Trace("Delivering set of bytecodes", "bytecodes", len(bytecodes), "bytes", size)
  2350  
  2351  	// Whether or not the response is valid, we can mark the peer as idle and
  2352  	// notify the scheduler to assign a new task. If the response is invalid,
  2353  	// we'll drop the peer in a bit.
  2354  	s.lock.Lock()
  2355  	if _, ok := s.peers[peer.ID()]; ok {
  2356  		s.bytecodeIdlers[peer.ID()] = struct{}{}
  2357  	}
  2358  	select {
  2359  	case s.update <- struct{}{}:
  2360  	default:
  2361  	}
  2362  	// Ensure the response is for a valid request
  2363  	req, ok := s.bytecodeReqs[id]
  2364  	if !ok {
  2365  		// Request stale, perhaps the peer timed out but came through in the end
  2366  		logger.Warn("Unexpected bytecode packet")
  2367  		s.lock.Unlock()
  2368  		return nil
  2369  	}
  2370  	delete(s.bytecodeReqs, id)
  2371  	s.rates.Update(peer.ID(), ByteCodesMsg, time.Since(req.time), len(bytecodes))
  2372  
  2373  	// Clean up the request timeout timer, we'll see how to proceed further based
  2374  	// on the actual delivered content
  2375  	if !req.timeout.Stop() {
  2376  		// The timeout is already triggered, and this request will be reverted+rescheduled
  2377  		s.lock.Unlock()
  2378  		return nil
  2379  	}
  2380  
  2381  	// Response is valid, but check if peer is signalling that it does not have
  2382  	// the requested data. For bytecode range queries that means the peer is not
  2383  	// yet synced.
  2384  	if len(bytecodes) == 0 {
  2385  		logger.Debug("Peer rejected bytecode request")
  2386  		s.statelessPeers[peer.ID()] = struct{}{}
  2387  		s.lock.Unlock()
  2388  
  2389  		// Signal this request as failed, and ready for rescheduling
  2390  		s.scheduleRevertBytecodeRequest(req)
  2391  		return nil
  2392  	}
  2393  	s.lock.Unlock()
  2394  
  2395  	// Cross reference the requested bytecodes with the response to find gaps
  2396  	// that the serving node is missing
  2397  	hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
  2398  	hash := make([]byte, 32)
  2399  
  2400  	codes := make([][]byte, len(req.hashes))
  2401  	for i, j := 0, 0; i < len(bytecodes); i++ {
  2402  		// Find the next hash that we've been served, leaving misses with nils
  2403  		hasher.Reset()
  2404  		hasher.Write(bytecodes[i])
  2405  		hasher.Read(hash)
  2406  
  2407  		for j < len(req.hashes) && !bytes.Equal(hash, req.hashes[j][:]) {
  2408  			j++
  2409  		}
  2410  		if j < len(req.hashes) {
  2411  			codes[j] = bytecodes[i]
  2412  			j++
  2413  			continue
  2414  		}
  2415  		// We've either ran out of hashes, or got unrequested data
  2416  		logger.Warn("Unexpected bytecodes", "count", len(bytecodes)-i)
  2417  		// Signal this request as failed, and ready for rescheduling
  2418  		s.scheduleRevertBytecodeRequest(req)
  2419  		return errors.New("unexpected bytecode")
  2420  	}
  2421  	// Response validated, send it to the scheduler for filling
  2422  	response := &bytecodeResponse{
  2423  		task:   req.task,
  2424  		hashes: req.hashes,
  2425  		codes:  codes,
  2426  	}
  2427  	select {
  2428  	case req.deliver <- response:
  2429  	case <-req.cancel:
  2430  	case <-req.stale:
  2431  	}
  2432  	return nil
  2433  }
  2434  
  2435  // OnStorage is a callback method to invoke when ranges of storage slots
  2436  // are received from a remote peer.
  2437  func (s *Syncer) OnStorage(peer SyncPeer, id uint64, hashes [][]common.Hash, slots [][][]byte, proof [][]byte) error {
  2438  	// Gather some trace stats to aid in debugging issues
  2439  	var (
  2440  		hashCount int
  2441  		slotCount int
  2442  		size      common.StorageSize
  2443  	)
  2444  	for _, hashset := range hashes {
  2445  		size += common.StorageSize(common.HashLength * len(hashset))
  2446  		hashCount += len(hashset)
  2447  	}
  2448  	for _, slotset := range slots {
  2449  		for _, slot := range slotset {
  2450  			size += common.StorageSize(len(slot))
  2451  		}
  2452  		slotCount += len(slotset)
  2453  	}
  2454  	for _, node := range proof {
  2455  		size += common.StorageSize(len(node))
  2456  	}
  2457  	logger := peer.Log().New("reqid", id)
  2458  	logger.Trace("Delivering ranges of storage slots", "accounts", len(hashes), "hashes", hashCount, "slots", slotCount, "proofs", len(proof), "size", size)
  2459  
  2460  	// Whether or not the response is valid, we can mark the peer as idle and
  2461  	// notify the scheduler to assign a new task. If the response is invalid,
  2462  	// we'll drop the peer in a bit.
  2463  	s.lock.Lock()
  2464  	if _, ok := s.peers[peer.ID()]; ok {
  2465  		s.storageIdlers[peer.ID()] = struct{}{}
  2466  	}
  2467  	select {
  2468  	case s.update <- struct{}{}:
  2469  	default:
  2470  	}
  2471  	// Ensure the response is for a valid request
  2472  	req, ok := s.storageReqs[id]
  2473  	if !ok {
  2474  		// Request stale, perhaps the peer timed out but came through in the end
  2475  		logger.Warn("Unexpected storage ranges packet")
  2476  		s.lock.Unlock()
  2477  		return nil
  2478  	}
  2479  	delete(s.storageReqs, id)
  2480  	s.rates.Update(peer.ID(), StorageRangesMsg, time.Since(req.time), int(size))
  2481  
  2482  	// Clean up the request timeout timer, we'll see how to proceed further based
  2483  	// on the actual delivered content
  2484  	if !req.timeout.Stop() {
  2485  		// The timeout is already triggered, and this request will be reverted+rescheduled
  2486  		s.lock.Unlock()
  2487  		return nil
  2488  	}
  2489  
  2490  	// Reject the response if the hash sets and slot sets don't match, or if the
  2491  	// peer sent more data than requested.
  2492  	if len(hashes) != len(slots) {
  2493  		s.lock.Unlock()
  2494  		s.scheduleRevertStorageRequest(req) // reschedule request
  2495  		logger.Warn("Hash and slot set size mismatch", "hashset", len(hashes), "slotset", len(slots))
  2496  		return errors.New("hash and slot set size mismatch")
  2497  	}
  2498  	if len(hashes) > len(req.accounts) {
  2499  		s.lock.Unlock()
  2500  		s.scheduleRevertStorageRequest(req) // reschedule request
  2501  		logger.Warn("Hash set larger than requested", "hashset", len(hashes), "requested", len(req.accounts))
  2502  		return errors.New("hash set larger than requested")
  2503  	}
  2504  	// Response is valid, but check if peer is signalling that it does not have
  2505  	// the requested data. For storage range queries that means the state being
  2506  	// retrieved was either already pruned remotely, or the peer is not yet
  2507  	// synced to our head.
  2508  	if len(hashes) == 0 {
  2509  		logger.Debug("Peer rejected storage request")
  2510  		s.statelessPeers[peer.ID()] = struct{}{}
  2511  		s.lock.Unlock()
  2512  		s.scheduleRevertStorageRequest(req) // reschedule request
  2513  		return nil
  2514  	}
  2515  	s.lock.Unlock()
  2516  
  2517  	// Reconstruct the partial tries from the response and verify them
  2518  	var cont bool
  2519  
  2520  	for i := 0; i < len(hashes); i++ {
  2521  		// Convert the keys and proofs into an internal format
  2522  		keys := make([][]byte, len(hashes[i]))
  2523  		for j, key := range hashes[i] {
  2524  			keys[j] = common.CopyBytes(key[:])
  2525  		}
  2526  		nodes := make(light.NodeList, 0, len(proof))
  2527  		if i == len(hashes)-1 {
  2528  			for _, node := range proof {
  2529  				nodes = append(nodes, node)
  2530  			}
  2531  		}
  2532  		var err error
  2533  		if len(nodes) == 0 {
  2534  			// No proof has been attached, the response must cover the entire key
  2535  			// space and hash to the origin root.
  2536  			_, err = trie.VerifyRangeProof(req.roots[i], nil, nil, keys, slots[i], nil)
  2537  			if err != nil {
  2538  				s.scheduleRevertStorageRequest(req) // reschedule request
  2539  				logger.Warn("Storage slots failed proof", "err", err)
  2540  				return err
  2541  			}
  2542  		} else {
  2543  			// A proof was attached, the response is only partial, check that the
  2544  			// returned data is indeed part of the storage trie
  2545  			proofdb := nodes.NodeSet()
  2546  
  2547  			var end []byte
  2548  			if len(keys) > 0 {
  2549  				end = keys[len(keys)-1]
  2550  			}
  2551  			cont, err = trie.VerifyRangeProof(req.roots[i], req.origin[:], end, keys, slots[i], proofdb)
  2552  			if err != nil {
  2553  				s.scheduleRevertStorageRequest(req) // reschedule request
  2554  				logger.Warn("Storage range failed proof", "err", err)
  2555  				return err
  2556  			}
  2557  		}
  2558  	}
  2559  	// Partial tries reconstructed, send them to the scheduler for storage filling
  2560  	response := &storageResponse{
  2561  		mainTask: req.mainTask,
  2562  		subTask:  req.subTask,
  2563  		accounts: req.accounts,
  2564  		roots:    req.roots,
  2565  		hashes:   hashes,
  2566  		slots:    slots,
  2567  		cont:     cont,
  2568  	}
  2569  	select {
  2570  	case req.deliver <- response:
  2571  	case <-req.cancel:
  2572  	case <-req.stale:
  2573  	}
  2574  	return nil
  2575  }
  2576  
  2577  // OnTrieNodes is a callback method to invoke when a batch of trie nodes
  2578  // are received from a remote peer.
  2579  func (s *Syncer) OnTrieNodes(peer SyncPeer, id uint64, trienodes [][]byte) error {
  2580  	var size common.StorageSize
  2581  	for _, node := range trienodes {
  2582  		size += common.StorageSize(len(node))
  2583  	}
  2584  	logger := peer.Log().New("reqid", id)
  2585  	logger.Trace("Delivering set of healing trienodes", "trienodes", len(trienodes), "bytes", size)
  2586  
  2587  	// Whether or not the response is valid, we can mark the peer as idle and
  2588  	// notify the scheduler to assign a new task. If the response is invalid,
  2589  	// we'll drop the peer in a bit.
  2590  	s.lock.Lock()
  2591  	if _, ok := s.peers[peer.ID()]; ok {
  2592  		s.trienodeHealIdlers[peer.ID()] = struct{}{}
  2593  	}
  2594  	select {
  2595  	case s.update <- struct{}{}:
  2596  	default:
  2597  	}
  2598  	// Ensure the response is for a valid request
  2599  	req, ok := s.trienodeHealReqs[id]
  2600  	if !ok {
  2601  		// Request stale, perhaps the peer timed out but came through in the end
  2602  		logger.Warn("Unexpected trienode heal packet")
  2603  		s.lock.Unlock()
  2604  		return nil
  2605  	}
  2606  	delete(s.trienodeHealReqs, id)
  2607  	s.rates.Update(peer.ID(), TrieNodesMsg, time.Since(req.time), len(trienodes))
  2608  
  2609  	// Clean up the request timeout timer, we'll see how to proceed further based
  2610  	// on the actual delivered content
  2611  	if !req.timeout.Stop() {
  2612  		// The timeout is already triggered, and this request will be reverted+rescheduled
  2613  		s.lock.Unlock()
  2614  		return nil
  2615  	}
  2616  
  2617  	// Response is valid, but check if peer is signalling that it does not have
  2618  	// the requested data. For bytecode range queries that means the peer is not
  2619  	// yet synced.
  2620  	if len(trienodes) == 0 {
  2621  		logger.Debug("Peer rejected trienode heal request")
  2622  		s.statelessPeers[peer.ID()] = struct{}{}
  2623  		s.lock.Unlock()
  2624  
  2625  		// Signal this request as failed, and ready for rescheduling
  2626  		s.scheduleRevertTrienodeHealRequest(req)
  2627  		return nil
  2628  	}
  2629  	s.lock.Unlock()
  2630  
  2631  	// Cross reference the requested trienodes with the response to find gaps
  2632  	// that the serving node is missing
  2633  	hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
  2634  	hash := make([]byte, 32)
  2635  
  2636  	nodes := make([][]byte, len(req.hashes))
  2637  	for i, j := 0, 0; i < len(trienodes); i++ {
  2638  		// Find the next hash that we've been served, leaving misses with nils
  2639  		hasher.Reset()
  2640  		hasher.Write(trienodes[i])
  2641  		hasher.Read(hash)
  2642  
  2643  		for j < len(req.hashes) && !bytes.Equal(hash, req.hashes[j][:]) {
  2644  			j++
  2645  		}
  2646  		if j < len(req.hashes) {
  2647  			nodes[j] = trienodes[i]
  2648  			j++
  2649  			continue
  2650  		}
  2651  		// We've either ran out of hashes, or got unrequested data
  2652  		logger.Warn("Unexpected healing trienodes", "count", len(trienodes)-i)
  2653  		// Signal this request as failed, and ready for rescheduling
  2654  		s.scheduleRevertTrienodeHealRequest(req)
  2655  		return errors.New("unexpected healing trienode")
  2656  	}
  2657  	// Response validated, send it to the scheduler for filling
  2658  	response := &trienodeHealResponse{
  2659  		task:   req.task,
  2660  		hashes: req.hashes,
  2661  		paths:  req.paths,
  2662  		nodes:  nodes,
  2663  	}
  2664  	select {
  2665  	case req.deliver <- response:
  2666  	case <-req.cancel:
  2667  	case <-req.stale:
  2668  	}
  2669  	return nil
  2670  }
  2671  
  2672  // onHealByteCodes is a callback method to invoke when a batch of contract
  2673  // bytes codes are received from a remote peer in the healing phase.
  2674  func (s *Syncer) onHealByteCodes(peer SyncPeer, id uint64, bytecodes [][]byte) error {
  2675  	var size common.StorageSize
  2676  	for _, code := range bytecodes {
  2677  		size += common.StorageSize(len(code))
  2678  	}
  2679  	logger := peer.Log().New("reqid", id)
  2680  	logger.Trace("Delivering set of healing bytecodes", "bytecodes", len(bytecodes), "bytes", size)
  2681  
  2682  	// Whether or not the response is valid, we can mark the peer as idle and
  2683  	// notify the scheduler to assign a new task. If the response is invalid,
  2684  	// we'll drop the peer in a bit.
  2685  	s.lock.Lock()
  2686  	if _, ok := s.peers[peer.ID()]; ok {
  2687  		s.bytecodeHealIdlers[peer.ID()] = struct{}{}
  2688  	}
  2689  	select {
  2690  	case s.update <- struct{}{}:
  2691  	default:
  2692  	}
  2693  	// Ensure the response is for a valid request
  2694  	req, ok := s.bytecodeHealReqs[id]
  2695  	if !ok {
  2696  		// Request stale, perhaps the peer timed out but came through in the end
  2697  		logger.Warn("Unexpected bytecode heal packet")
  2698  		s.lock.Unlock()
  2699  		return nil
  2700  	}
  2701  	delete(s.bytecodeHealReqs, id)
  2702  	s.rates.Update(peer.ID(), ByteCodesMsg, time.Since(req.time), len(bytecodes))
  2703  
  2704  	// Clean up the request timeout timer, we'll see how to proceed further based
  2705  	// on the actual delivered content
  2706  	if !req.timeout.Stop() {
  2707  		// The timeout is already triggered, and this request will be reverted+rescheduled
  2708  		s.lock.Unlock()
  2709  		return nil
  2710  	}
  2711  
  2712  	// Response is valid, but check if peer is signalling that it does not have
  2713  	// the requested data. For bytecode range queries that means the peer is not
  2714  	// yet synced.
  2715  	if len(bytecodes) == 0 {
  2716  		logger.Debug("Peer rejected bytecode heal request")
  2717  		s.statelessPeers[peer.ID()] = struct{}{}
  2718  		s.lock.Unlock()
  2719  
  2720  		// Signal this request as failed, and ready for rescheduling
  2721  		s.scheduleRevertBytecodeHealRequest(req)
  2722  		return nil
  2723  	}
  2724  	s.lock.Unlock()
  2725  
  2726  	// Cross reference the requested bytecodes with the response to find gaps
  2727  	// that the serving node is missing
  2728  	hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
  2729  	hash := make([]byte, 32)
  2730  
  2731  	codes := make([][]byte, len(req.hashes))
  2732  	for i, j := 0, 0; i < len(bytecodes); i++ {
  2733  		// Find the next hash that we've been served, leaving misses with nils
  2734  		hasher.Reset()
  2735  		hasher.Write(bytecodes[i])
  2736  		hasher.Read(hash)
  2737  
  2738  		for j < len(req.hashes) && !bytes.Equal(hash, req.hashes[j][:]) {
  2739  			j++
  2740  		}
  2741  		if j < len(req.hashes) {
  2742  			codes[j] = bytecodes[i]
  2743  			j++
  2744  			continue
  2745  		}
  2746  		// We've either ran out of hashes, or got unrequested data
  2747  		logger.Warn("Unexpected healing bytecodes", "count", len(bytecodes)-i)
  2748  		// Signal this request as failed, and ready for rescheduling
  2749  		s.scheduleRevertBytecodeHealRequest(req)
  2750  		return errors.New("unexpected healing bytecode")
  2751  	}
  2752  	// Response validated, send it to the scheduler for filling
  2753  	response := &bytecodeHealResponse{
  2754  		task:   req.task,
  2755  		hashes: req.hashes,
  2756  		codes:  codes,
  2757  	}
  2758  	select {
  2759  	case req.deliver <- response:
  2760  	case <-req.cancel:
  2761  	case <-req.stale:
  2762  	}
  2763  	return nil
  2764  }
  2765  
  2766  // onHealState is a callback method to invoke when a flat state(account
  2767  // or storage slot) is downloded during the healing stage. The flat states
  2768  // can be persisted blindly and can be fixed later in the generation stage.
  2769  // Note it's not concurrent safe, please handle the concurrent issue outside.
  2770  func (s *Syncer) onHealState(paths [][]byte, value []byte) error {
  2771  	if len(paths) == 1 {
  2772  		var account types.StateAccount
  2773  		if err := rlp.DecodeBytes(value, &account); err != nil {
  2774  			return nil
  2775  		}
  2776  		blob := snapshot.SlimAccountRLP(account.Nonce, account.Balance, account.Root, account.CodeHash)
  2777  		rawdb.WriteAccountSnapshot(s.stateWriter, common.BytesToHash(paths[0]), blob)
  2778  		s.accountHealed += 1
  2779  		s.accountHealedBytes += common.StorageSize(1 + common.HashLength + len(blob))
  2780  	}
  2781  	if len(paths) == 2 {
  2782  		rawdb.WriteStorageSnapshot(s.stateWriter, common.BytesToHash(paths[0]), common.BytesToHash(paths[1]), value)
  2783  		s.storageHealed += 1
  2784  		s.storageHealedBytes += common.StorageSize(1 + 2*common.HashLength + len(value))
  2785  	}
  2786  	if s.stateWriter.ValueSize() > ethdb.IdealBatchSize {
  2787  		s.stateWriter.Write() // It's fine to ignore the error here
  2788  		s.stateWriter.Reset()
  2789  	}
  2790  	return nil
  2791  }
  2792  
  2793  // hashSpace is the total size of the 256 bit hash space for accounts.
  2794  var hashSpace = new(big.Int).Exp(common.Big2, common.Big256, nil)
  2795  
  2796  // report calculates various status reports and provides it to the user.
  2797  func (s *Syncer) report(force bool) {
  2798  	if len(s.tasks) > 0 {
  2799  		s.reportSyncProgress(force)
  2800  		return
  2801  	}
  2802  	s.reportHealProgress(force)
  2803  }
  2804  
  2805  // reportSyncProgress calculates various status reports and provides it to the user.
  2806  func (s *Syncer) reportSyncProgress(force bool) {
  2807  	// Don't report all the events, just occasionally
  2808  	if !force && time.Since(s.logTime) < 8*time.Second {
  2809  		return
  2810  	}
  2811  	// Don't report anything until we have a meaningful progress
  2812  	synced := s.accountBytes + s.bytecodeBytes + s.storageBytes
  2813  	if synced == 0 {
  2814  		return
  2815  	}
  2816  	accountGaps := new(big.Int)
  2817  	for _, task := range s.tasks {
  2818  		accountGaps.Add(accountGaps, new(big.Int).Sub(task.Last.Big(), task.Next.Big()))
  2819  	}
  2820  	accountFills := new(big.Int).Sub(hashSpace, accountGaps)
  2821  	if accountFills.BitLen() == 0 {
  2822  		return
  2823  	}
  2824  	s.logTime = time.Now()
  2825  	estBytes := float64(new(big.Int).Div(
  2826  		new(big.Int).Mul(new(big.Int).SetUint64(uint64(synced)), hashSpace),
  2827  		accountFills,
  2828  	).Uint64())
  2829  	// Don't report anything until we have a meaningful progress
  2830  	if estBytes < 1.0 {
  2831  		return
  2832  	}
  2833  	elapsed := time.Since(s.startTime)
  2834  	estTime := elapsed / time.Duration(synced) * time.Duration(estBytes)
  2835  
  2836  	// Create a mega progress report
  2837  	var (
  2838  		progress = fmt.Sprintf("%.2f%%", float64(synced)*100/estBytes)
  2839  		accounts = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.accountSynced), s.accountBytes.TerminalString())
  2840  		storage  = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.storageSynced), s.storageBytes.TerminalString())
  2841  		bytecode = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.bytecodeSynced), s.bytecodeBytes.TerminalString())
  2842  	)
  2843  	log.Info("State sync in progress", "synced", progress, "state", synced,
  2844  		"accounts", accounts, "slots", storage, "codes", bytecode, "eta", common.PrettyDuration(estTime-elapsed))
  2845  }
  2846  
  2847  // reportHealProgress calculates various status reports and provides it to the user.
  2848  func (s *Syncer) reportHealProgress(force bool) {
  2849  	// Don't report all the events, just occasionally
  2850  	if !force && time.Since(s.logTime) < 8*time.Second {
  2851  		return
  2852  	}
  2853  	s.logTime = time.Now()
  2854  
  2855  	// Create a mega progress report
  2856  	var (
  2857  		trienode = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.trienodeHealSynced), s.trienodeHealBytes.TerminalString())
  2858  		bytecode = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.bytecodeHealSynced), s.bytecodeHealBytes.TerminalString())
  2859  		accounts = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.accountHealed), s.accountHealedBytes.TerminalString())
  2860  		storage  = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.storageHealed), s.storageHealedBytes.TerminalString())
  2861  	)
  2862  	log.Info("State heal in progress", "accounts", accounts, "slots", storage,
  2863  		"codes", bytecode, "nodes", trienode, "pending", s.healer.scheduler.Pending())
  2864  }
  2865  
  2866  // estimateRemainingSlots tries to determine roughly how many slots are left in
  2867  // a contract storage, based on the number of keys and the last hash. This method
  2868  // assumes that the hashes are lexicographically ordered and evenly distributed.
  2869  func estimateRemainingSlots(hashes int, last common.Hash) (uint64, error) {
  2870  	if last == (common.Hash{}) {
  2871  		return 0, errors.New("last hash empty")
  2872  	}
  2873  	space := new(big.Int).Mul(math.MaxBig256, big.NewInt(int64(hashes)))
  2874  	space.Div(space, last.Big())
  2875  	if !space.IsUint64() {
  2876  		// Gigantic address space probably due to too few or malicious slots
  2877  		return 0, errors.New("too few slots for estimation")
  2878  	}
  2879  	return space.Uint64() - uint64(hashes), nil
  2880  }
  2881  
  2882  // capacitySort implements the Sort interface, allowing sorting by peer message
  2883  // throughput. Note, callers should use sort.Reverse to get the desired effect
  2884  // of highest capacity being at the front.
  2885  type capacitySort struct {
  2886  	ids  []string
  2887  	caps []int
  2888  }
  2889  
  2890  func (s *capacitySort) Len() int {
  2891  	return len(s.ids)
  2892  }
  2893  
  2894  func (s *capacitySort) Less(i, j int) bool {
  2895  	return s.caps[i] < s.caps[j]
  2896  }
  2897  
  2898  func (s *capacitySort) Swap(i, j int) {
  2899  	s.ids[i], s.ids[j] = s.ids[j], s.ids[i]
  2900  	s.caps[i], s.caps[j] = s.caps[j], s.caps[i]
  2901  }