github.com/ethw3/go-ethereuma@v0.0.0-20221013053120-c14602a4c23c/eth/protocols/snap/sync.go (about)

     1  // Copyright 2020 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package snap
    18  
    19  import (
    20  	"bytes"
    21  	"encoding/json"
    22  	"errors"
    23  	"fmt"
    24  	"math/big"
    25  	"math/rand"
    26  	"sort"
    27  	"sync"
    28  	"time"
    29  
    30  	"github.com/ethw3/go-ethereuma/common"
    31  	"github.com/ethw3/go-ethereuma/common/math"
    32  	"github.com/ethw3/go-ethereuma/core/rawdb"
    33  	"github.com/ethw3/go-ethereuma/core/state"
    34  	"github.com/ethw3/go-ethereuma/core/state/snapshot"
    35  	"github.com/ethw3/go-ethereuma/core/types"
    36  	"github.com/ethw3/go-ethereuma/crypto"
    37  	"github.com/ethw3/go-ethereuma/ethdb"
    38  	"github.com/ethw3/go-ethereuma/event"
    39  	"github.com/ethw3/go-ethereuma/light"
    40  	"github.com/ethw3/go-ethereuma/log"
    41  	"github.com/ethw3/go-ethereuma/p2p/msgrate"
    42  	"github.com/ethw3/go-ethereuma/rlp"
    43  	"github.com/ethw3/go-ethereuma/trie"
    44  	"golang.org/x/crypto/sha3"
    45  )
    46  
    47  var (
    48  	// emptyRoot is the known root hash of an empty trie.
    49  	emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
    50  
    51  	// emptyCode is the known hash of the empty EVM bytecode.
    52  	emptyCode = crypto.Keccak256Hash(nil)
    53  )
    54  
    55  const (
    56  	// minRequestSize is the minimum number of bytes to request from a remote peer.
    57  	// This number is used as the low cap for account and storage range requests.
    58  	// Bytecode and trienode are limited inherently by item count (1).
    59  	minRequestSize = 64 * 1024
    60  
    61  	// maxRequestSize is the maximum number of bytes to request from a remote peer.
    62  	// This number is used as the high cap for account and storage range requests.
    63  	// Bytecode and trienode are limited more explicitly by the caps below.
    64  	maxRequestSize = 512 * 1024
    65  
    66  	// maxCodeRequestCount is the maximum number of bytecode blobs to request in a
    67  	// single query. If this number is too low, we're not filling responses fully
    68  	// and waste round trip times. If it's too high, we're capping responses and
    69  	// waste bandwidth.
    70  	//
    71  	// Depoyed bytecodes are currently capped at 24KB, so the minimum request
    72  	// size should be maxRequestSize / 24K. Assuming that most contracts do not
    73  	// come close to that, requesting 4x should be a good approximation.
    74  	maxCodeRequestCount = maxRequestSize / (24 * 1024) * 4
    75  
    76  	// maxTrieRequestCount is the maximum number of trie node blobs to request in
    77  	// a single query. If this number is too low, we're not filling responses fully
    78  	// and waste round trip times. If it's too high, we're capping responses and
    79  	// waste bandwidth.
    80  	maxTrieRequestCount = maxRequestSize / 512
    81  )
    82  
    83  var (
    84  	// accountConcurrency is the number of chunks to split the account trie into
    85  	// to allow concurrent retrievals.
    86  	accountConcurrency = 16
    87  
    88  	// storageConcurrency is the number of chunks to split the a large contract
    89  	// storage trie into to allow concurrent retrievals.
    90  	storageConcurrency = 16
    91  )
    92  
    93  // ErrCancelled is returned from snap syncing if the operation was prematurely
    94  // terminated.
    95  var ErrCancelled = errors.New("sync cancelled")
    96  
    97  // accountRequest tracks a pending account range request to ensure responses are
    98  // to actual requests and to validate any security constraints.
    99  //
   100  // Concurrency note: account requests and responses are handled concurrently from
   101  // the main runloop to allow Merkle proof verifications on the peer's thread and
   102  // to drop on invalid response. The request struct must contain all the data to
   103  // construct the response without accessing runloop internals (i.e. task). That
   104  // is only included to allow the runloop to match a response to the task being
   105  // synced without having yet another set of maps.
   106  type accountRequest struct {
   107  	peer string    // Peer to which this request is assigned
   108  	id   uint64    // Request ID of this request
   109  	time time.Time // Timestamp when the request was sent
   110  
   111  	deliver chan *accountResponse // Channel to deliver successful response on
   112  	revert  chan *accountRequest  // Channel to deliver request failure on
   113  	cancel  chan struct{}         // Channel to track sync cancellation
   114  	timeout *time.Timer           // Timer to track delivery timeout
   115  	stale   chan struct{}         // Channel to signal the request was dropped
   116  
   117  	origin common.Hash // First account requested to allow continuation checks
   118  	limit  common.Hash // Last account requested to allow non-overlapping chunking
   119  
   120  	task *accountTask // Task which this request is filling (only access fields through the runloop!!)
   121  }
   122  
   123  // accountResponse is an already Merkle-verified remote response to an account
   124  // range request. It contains the subtrie for the requested account range and
   125  // the database that's going to be filled with the internal nodes on commit.
   126  type accountResponse struct {
   127  	task *accountTask // Task which this request is filling
   128  
   129  	hashes   []common.Hash         // Account hashes in the returned range
   130  	accounts []*types.StateAccount // Expanded accounts in the returned range
   131  
   132  	cont bool // Whether the account range has a continuation
   133  }
   134  
   135  // bytecodeRequest tracks a pending bytecode request to ensure responses are to
   136  // actual requests and to validate any security constraints.
   137  //
   138  // Concurrency note: bytecode requests and responses are handled concurrently from
   139  // the main runloop to allow Keccak256 hash verifications on the peer's thread and
   140  // to drop on invalid response. The request struct must contain all the data to
   141  // construct the response without accessing runloop internals (i.e. task). That
   142  // is only included to allow the runloop to match a response to the task being
   143  // synced without having yet another set of maps.
   144  type bytecodeRequest struct {
   145  	peer string    // Peer to which this request is assigned
   146  	id   uint64    // Request ID of this request
   147  	time time.Time // Timestamp when the request was sent
   148  
   149  	deliver chan *bytecodeResponse // Channel to deliver successful response on
   150  	revert  chan *bytecodeRequest  // Channel to deliver request failure on
   151  	cancel  chan struct{}          // Channel to track sync cancellation
   152  	timeout *time.Timer            // Timer to track delivery timeout
   153  	stale   chan struct{}          // Channel to signal the request was dropped
   154  
   155  	hashes []common.Hash // Bytecode hashes to validate responses
   156  	task   *accountTask  // Task which this request is filling (only access fields through the runloop!!)
   157  }
   158  
   159  // bytecodeResponse is an already verified remote response to a bytecode request.
   160  type bytecodeResponse struct {
   161  	task *accountTask // Task which this request is filling
   162  
   163  	hashes []common.Hash // Hashes of the bytecode to avoid double hashing
   164  	codes  [][]byte      // Actual bytecodes to store into the database (nil = missing)
   165  }
   166  
   167  // storageRequest tracks a pending storage ranges request to ensure responses are
   168  // to actual requests and to validate any security constraints.
   169  //
   170  // Concurrency note: storage requests and responses are handled concurrently from
   171  // the main runloop to allow Merkle proof verifications on the peer's thread and
   172  // to drop on invalid response. The request struct must contain all the data to
   173  // construct the response without accessing runloop internals (i.e. tasks). That
   174  // is only included to allow the runloop to match a response to the task being
   175  // synced without having yet another set of maps.
   176  type storageRequest struct {
   177  	peer string    // Peer to which this request is assigned
   178  	id   uint64    // Request ID of this request
   179  	time time.Time // Timestamp when the request was sent
   180  
   181  	deliver chan *storageResponse // Channel to deliver successful response on
   182  	revert  chan *storageRequest  // Channel to deliver request failure on
   183  	cancel  chan struct{}         // Channel to track sync cancellation
   184  	timeout *time.Timer           // Timer to track delivery timeout
   185  	stale   chan struct{}         // Channel to signal the request was dropped
   186  
   187  	accounts []common.Hash // Account hashes to validate responses
   188  	roots    []common.Hash // Storage roots to validate responses
   189  
   190  	origin common.Hash // First storage slot requested to allow continuation checks
   191  	limit  common.Hash // Last storage slot requested to allow non-overlapping chunking
   192  
   193  	mainTask *accountTask // Task which this response belongs to (only access fields through the runloop!!)
   194  	subTask  *storageTask // Task which this response is filling (only access fields through the runloop!!)
   195  }
   196  
   197  // storageResponse is an already Merkle-verified remote response to a storage
   198  // range request. It contains the subtries for the requested storage ranges and
   199  // the databases that's going to be filled with the internal nodes on commit.
   200  type storageResponse struct {
   201  	mainTask *accountTask // Task which this response belongs to
   202  	subTask  *storageTask // Task which this response is filling
   203  
   204  	accounts []common.Hash // Account hashes requested, may be only partially filled
   205  	roots    []common.Hash // Storage roots requested, may be only partially filled
   206  
   207  	hashes [][]common.Hash // Storage slot hashes in the returned range
   208  	slots  [][][]byte      // Storage slot values in the returned range
   209  
   210  	cont bool // Whether the last storage range has a continuation
   211  }
   212  
   213  // trienodeHealRequest tracks a pending state trie request to ensure responses
   214  // are to actual requests and to validate any security constraints.
   215  //
   216  // Concurrency note: trie node requests and responses are handled concurrently from
   217  // the main runloop to allow Keccak256 hash verifications on the peer's thread and
   218  // to drop on invalid response. The request struct must contain all the data to
   219  // construct the response without accessing runloop internals (i.e. task). That
   220  // is only included to allow the runloop to match a response to the task being
   221  // synced without having yet another set of maps.
   222  type trienodeHealRequest struct {
   223  	peer string    // Peer to which this request is assigned
   224  	id   uint64    // Request ID of this request
   225  	time time.Time // Timestamp when the request was sent
   226  
   227  	deliver chan *trienodeHealResponse // Channel to deliver successful response on
   228  	revert  chan *trienodeHealRequest  // Channel to deliver request failure on
   229  	cancel  chan struct{}              // Channel to track sync cancellation
   230  	timeout *time.Timer                // Timer to track delivery timeout
   231  	stale   chan struct{}              // Channel to signal the request was dropped
   232  
   233  	paths  []string      // Trie node paths for identifying trie node
   234  	hashes []common.Hash // Trie node hashes to validate responses
   235  
   236  	task *healTask // Task which this request is filling (only access fields through the runloop!!)
   237  }
   238  
   239  // trienodeHealResponse is an already verified remote response to a trie node request.
   240  type trienodeHealResponse struct {
   241  	task *healTask // Task which this request is filling
   242  
   243  	paths  []string      // Paths of the trie nodes
   244  	hashes []common.Hash // Hashes of the trie nodes to avoid double hashing
   245  	nodes  [][]byte      // Actual trie nodes to store into the database (nil = missing)
   246  }
   247  
   248  // bytecodeHealRequest tracks a pending bytecode request to ensure responses are to
   249  // actual requests and to validate any security constraints.
   250  //
   251  // Concurrency note: bytecode requests and responses are handled concurrently from
   252  // the main runloop to allow Keccak256 hash verifications on the peer's thread and
   253  // to drop on invalid response. The request struct must contain all the data to
   254  // construct the response without accessing runloop internals (i.e. task). That
   255  // is only included to allow the runloop to match a response to the task being
   256  // synced without having yet another set of maps.
   257  type bytecodeHealRequest struct {
   258  	peer string    // Peer to which this request is assigned
   259  	id   uint64    // Request ID of this request
   260  	time time.Time // Timestamp when the request was sent
   261  
   262  	deliver chan *bytecodeHealResponse // Channel to deliver successful response on
   263  	revert  chan *bytecodeHealRequest  // Channel to deliver request failure on
   264  	cancel  chan struct{}              // Channel to track sync cancellation
   265  	timeout *time.Timer                // Timer to track delivery timeout
   266  	stale   chan struct{}              // Channel to signal the request was dropped
   267  
   268  	hashes []common.Hash // Bytecode hashes to validate responses
   269  	task   *healTask     // Task which this request is filling (only access fields through the runloop!!)
   270  }
   271  
   272  // bytecodeHealResponse is an already verified remote response to a bytecode request.
   273  type bytecodeHealResponse struct {
   274  	task *healTask // Task which this request is filling
   275  
   276  	hashes []common.Hash // Hashes of the bytecode to avoid double hashing
   277  	codes  [][]byte      // Actual bytecodes to store into the database (nil = missing)
   278  }
   279  
   280  // accountTask represents the sync task for a chunk of the account snapshot.
   281  type accountTask struct {
   282  	// These fields get serialized to leveldb on shutdown
   283  	Next     common.Hash                    // Next account to sync in this interval
   284  	Last     common.Hash                    // Last account to sync in this interval
   285  	SubTasks map[common.Hash][]*storageTask // Storage intervals needing fetching for large contracts
   286  
   287  	// These fields are internals used during runtime
   288  	req  *accountRequest  // Pending request to fill this task
   289  	res  *accountResponse // Validate response filling this task
   290  	pend int              // Number of pending subtasks for this round
   291  
   292  	needCode  []bool // Flags whether the filling accounts need code retrieval
   293  	needState []bool // Flags whether the filling accounts need storage retrieval
   294  	needHeal  []bool // Flags whether the filling accounts's state was chunked and need healing
   295  
   296  	codeTasks  map[common.Hash]struct{}    // Code hashes that need retrieval
   297  	stateTasks map[common.Hash]common.Hash // Account hashes->roots that need full state retrieval
   298  
   299  	genBatch ethdb.Batch     // Batch used by the node generator
   300  	genTrie  *trie.StackTrie // Node generator from storage slots
   301  
   302  	done bool // Flag whether the task can be removed
   303  }
   304  
   305  // storageTask represents the sync task for a chunk of the storage snapshot.
   306  type storageTask struct {
   307  	Next common.Hash // Next account to sync in this interval
   308  	Last common.Hash // Last account to sync in this interval
   309  
   310  	// These fields are internals used during runtime
   311  	root common.Hash     // Storage root hash for this instance
   312  	req  *storageRequest // Pending request to fill this task
   313  
   314  	genBatch ethdb.Batch     // Batch used by the node generator
   315  	genTrie  *trie.StackTrie // Node generator from storage slots
   316  
   317  	done bool // Flag whether the task can be removed
   318  }
   319  
   320  // healTask represents the sync task for healing the snap-synced chunk boundaries.
   321  type healTask struct {
   322  	scheduler *trie.Sync // State trie sync scheduler defining the tasks
   323  
   324  	trieTasks map[string]common.Hash   // Set of trie node tasks currently queued for retrieval, indexed by node path
   325  	codeTasks map[common.Hash]struct{} // Set of byte code tasks currently queued for retrieval, indexed by code hash
   326  }
   327  
   328  // SyncProgress is a database entry to allow suspending and resuming a snapshot state
   329  // sync. Opposed to full and fast sync, there is no way to restart a suspended
   330  // snap sync without prior knowledge of the suspension point.
   331  type SyncProgress struct {
   332  	Tasks []*accountTask // The suspended account tasks (contract tasks within)
   333  
   334  	// Status report during syncing phase
   335  	AccountSynced  uint64             // Number of accounts downloaded
   336  	AccountBytes   common.StorageSize // Number of account trie bytes persisted to disk
   337  	BytecodeSynced uint64             // Number of bytecodes downloaded
   338  	BytecodeBytes  common.StorageSize // Number of bytecode bytes downloaded
   339  	StorageSynced  uint64             // Number of storage slots downloaded
   340  	StorageBytes   common.StorageSize // Number of storage trie bytes persisted to disk
   341  
   342  	// Status report during healing phase
   343  	TrienodeHealSynced uint64             // Number of state trie nodes downloaded
   344  	TrienodeHealBytes  common.StorageSize // Number of state trie bytes persisted to disk
   345  	BytecodeHealSynced uint64             // Number of bytecodes downloaded
   346  	BytecodeHealBytes  common.StorageSize // Number of bytecodes persisted to disk
   347  }
   348  
   349  // SyncPending is analogous to SyncProgress, but it's used to report on pending
   350  // ephemeral sync progress that doesn't get persisted into the database.
   351  type SyncPending struct {
   352  	TrienodeHeal uint64 // Number of state trie nodes pending
   353  	BytecodeHeal uint64 // Number of bytecodes pending
   354  }
   355  
   356  // SyncPeer abstracts out the methods required for a peer to be synced against
   357  // with the goal of allowing the construction of mock peers without the full
   358  // blown networking.
   359  type SyncPeer interface {
   360  	// ID retrieves the peer's unique identifier.
   361  	ID() string
   362  
   363  	// RequestAccountRange fetches a batch of accounts rooted in a specific account
   364  	// trie, starting with the origin.
   365  	RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error
   366  
   367  	// RequestStorageRanges fetches a batch of storage slots belonging to one or
   368  	// more accounts. If slots from only one account is requested, an origin marker
   369  	// may also be used to retrieve from there.
   370  	RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error
   371  
   372  	// RequestByteCodes fetches a batch of bytecodes by hash.
   373  	RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error
   374  
   375  	// RequestTrieNodes fetches a batch of account or storage trie nodes rooted in
   376  	// a specific state trie.
   377  	RequestTrieNodes(id uint64, root common.Hash, paths []TrieNodePathSet, bytes uint64) error
   378  
   379  	// Log retrieves the peer's own contextual logger.
   380  	Log() log.Logger
   381  }
   382  
   383  // Syncer is an Ethereum account and storage trie syncer based on snapshots and
   384  // the  snap protocol. It's purpose is to download all the accounts and storage
   385  // slots from remote peers and reassemble chunks of the state trie, on top of
   386  // which a state sync can be run to fix any gaps / overlaps.
   387  //
   388  // Every network request has a variety of failure events:
   389  //   - The peer disconnects after task assignment, failing to send the request
   390  //   - The peer disconnects after sending the request, before delivering on it
   391  //   - The peer remains connected, but does not deliver a response in time
   392  //   - The peer delivers a stale response after a previous timeout
   393  //   - The peer delivers a refusal to serve the requested state
   394  type Syncer struct {
   395  	db ethdb.KeyValueStore // Database to store the trie nodes into (and dedup)
   396  
   397  	root    common.Hash    // Current state trie root being synced
   398  	tasks   []*accountTask // Current account task set being synced
   399  	snapped bool           // Flag to signal that snap phase is done
   400  	healer  *healTask      // Current state healing task being executed
   401  	update  chan struct{}  // Notification channel for possible sync progression
   402  
   403  	peers    map[string]SyncPeer // Currently active peers to download from
   404  	peerJoin *event.Feed         // Event feed to react to peers joining
   405  	peerDrop *event.Feed         // Event feed to react to peers dropping
   406  	rates    *msgrate.Trackers   // Message throughput rates for peers
   407  
   408  	// Request tracking during syncing phase
   409  	statelessPeers map[string]struct{} // Peers that failed to deliver state data
   410  	accountIdlers  map[string]struct{} // Peers that aren't serving account requests
   411  	bytecodeIdlers map[string]struct{} // Peers that aren't serving bytecode requests
   412  	storageIdlers  map[string]struct{} // Peers that aren't serving storage requests
   413  
   414  	accountReqs  map[uint64]*accountRequest  // Account requests currently running
   415  	bytecodeReqs map[uint64]*bytecodeRequest // Bytecode requests currently running
   416  	storageReqs  map[uint64]*storageRequest  // Storage requests currently running
   417  
   418  	accountSynced  uint64             // Number of accounts downloaded
   419  	accountBytes   common.StorageSize // Number of account trie bytes persisted to disk
   420  	bytecodeSynced uint64             // Number of bytecodes downloaded
   421  	bytecodeBytes  common.StorageSize // Number of bytecode bytes downloaded
   422  	storageSynced  uint64             // Number of storage slots downloaded
   423  	storageBytes   common.StorageSize // Number of storage trie bytes persisted to disk
   424  
   425  	extProgress *SyncProgress // progress that can be exposed to external caller.
   426  
   427  	// Request tracking during healing phase
   428  	trienodeHealIdlers map[string]struct{} // Peers that aren't serving trie node requests
   429  	bytecodeHealIdlers map[string]struct{} // Peers that aren't serving bytecode requests
   430  
   431  	trienodeHealReqs map[uint64]*trienodeHealRequest // Trie node requests currently running
   432  	bytecodeHealReqs map[uint64]*bytecodeHealRequest // Bytecode requests currently running
   433  
   434  	trienodeHealSynced uint64             // Number of state trie nodes downloaded
   435  	trienodeHealBytes  common.StorageSize // Number of state trie bytes persisted to disk
   436  	trienodeHealDups   uint64             // Number of state trie nodes already processed
   437  	trienodeHealNops   uint64             // Number of state trie nodes not requested
   438  	bytecodeHealSynced uint64             // Number of bytecodes downloaded
   439  	bytecodeHealBytes  common.StorageSize // Number of bytecodes persisted to disk
   440  	bytecodeHealDups   uint64             // Number of bytecodes already processed
   441  	bytecodeHealNops   uint64             // Number of bytecodes not requested
   442  
   443  	stateWriter        ethdb.Batch        // Shared batch writer used for persisting raw states
   444  	accountHealed      uint64             // Number of accounts downloaded during the healing stage
   445  	accountHealedBytes common.StorageSize // Number of raw account bytes persisted to disk during the healing stage
   446  	storageHealed      uint64             // Number of storage slots downloaded during the healing stage
   447  	storageHealedBytes common.StorageSize // Number of raw storage bytes persisted to disk during the healing stage
   448  
   449  	startTime time.Time // Time instance when snapshot sync started
   450  	logTime   time.Time // Time instance when status was last reported
   451  
   452  	pend sync.WaitGroup // Tracks network request goroutines for graceful shutdown
   453  	lock sync.RWMutex   // Protects fields that can change outside of sync (peers, reqs, root)
   454  }
   455  
   456  // NewSyncer creates a new snapshot syncer to download the Ethereum state over the
   457  // snap protocol.
   458  func NewSyncer(db ethdb.KeyValueStore) *Syncer {
   459  	return &Syncer{
   460  		db: db,
   461  
   462  		peers:    make(map[string]SyncPeer),
   463  		peerJoin: new(event.Feed),
   464  		peerDrop: new(event.Feed),
   465  		rates:    msgrate.NewTrackers(log.New("proto", "snap")),
   466  		update:   make(chan struct{}, 1),
   467  
   468  		accountIdlers:  make(map[string]struct{}),
   469  		storageIdlers:  make(map[string]struct{}),
   470  		bytecodeIdlers: make(map[string]struct{}),
   471  
   472  		accountReqs:  make(map[uint64]*accountRequest),
   473  		storageReqs:  make(map[uint64]*storageRequest),
   474  		bytecodeReqs: make(map[uint64]*bytecodeRequest),
   475  
   476  		trienodeHealIdlers: make(map[string]struct{}),
   477  		bytecodeHealIdlers: make(map[string]struct{}),
   478  
   479  		trienodeHealReqs: make(map[uint64]*trienodeHealRequest),
   480  		bytecodeHealReqs: make(map[uint64]*bytecodeHealRequest),
   481  		stateWriter:      db.NewBatch(),
   482  
   483  		extProgress: new(SyncProgress),
   484  	}
   485  }
   486  
   487  // Register injects a new data source into the syncer's peerset.
   488  func (s *Syncer) Register(peer SyncPeer) error {
   489  	// Make sure the peer is not registered yet
   490  	id := peer.ID()
   491  
   492  	s.lock.Lock()
   493  	if _, ok := s.peers[id]; ok {
   494  		log.Error("Snap peer already registered", "id", id)
   495  
   496  		s.lock.Unlock()
   497  		return errors.New("already registered")
   498  	}
   499  	s.peers[id] = peer
   500  	s.rates.Track(id, msgrate.NewTracker(s.rates.MeanCapacities(), s.rates.MedianRoundTrip()))
   501  
   502  	// Mark the peer as idle, even if no sync is running
   503  	s.accountIdlers[id] = struct{}{}
   504  	s.storageIdlers[id] = struct{}{}
   505  	s.bytecodeIdlers[id] = struct{}{}
   506  	s.trienodeHealIdlers[id] = struct{}{}
   507  	s.bytecodeHealIdlers[id] = struct{}{}
   508  	s.lock.Unlock()
   509  
   510  	// Notify any active syncs that a new peer can be assigned data
   511  	s.peerJoin.Send(id)
   512  	return nil
   513  }
   514  
   515  // Unregister injects a new data source into the syncer's peerset.
   516  func (s *Syncer) Unregister(id string) error {
   517  	// Remove all traces of the peer from the registry
   518  	s.lock.Lock()
   519  	if _, ok := s.peers[id]; !ok {
   520  		log.Error("Snap peer not registered", "id", id)
   521  
   522  		s.lock.Unlock()
   523  		return errors.New("not registered")
   524  	}
   525  	delete(s.peers, id)
   526  	s.rates.Untrack(id)
   527  
   528  	// Remove status markers, even if no sync is running
   529  	delete(s.statelessPeers, id)
   530  
   531  	delete(s.accountIdlers, id)
   532  	delete(s.storageIdlers, id)
   533  	delete(s.bytecodeIdlers, id)
   534  	delete(s.trienodeHealIdlers, id)
   535  	delete(s.bytecodeHealIdlers, id)
   536  	s.lock.Unlock()
   537  
   538  	// Notify any active syncs that pending requests need to be reverted
   539  	s.peerDrop.Send(id)
   540  	return nil
   541  }
   542  
   543  // Sync starts (or resumes a previous) sync cycle to iterate over a state trie
   544  // with the given root and reconstruct the nodes based on the snapshot leaves.
   545  // Previously downloaded segments will not be redownloaded of fixed, rather any
   546  // errors will be healed after the leaves are fully accumulated.
   547  func (s *Syncer) Sync(root common.Hash, cancel chan struct{}) error {
   548  	// Move the trie root from any previous value, revert stateless markers for
   549  	// any peers and initialize the syncer if it was not yet run
   550  	s.lock.Lock()
   551  	s.root = root
   552  	s.healer = &healTask{
   553  		scheduler: state.NewStateSync(root, s.db, s.onHealState),
   554  		trieTasks: make(map[string]common.Hash),
   555  		codeTasks: make(map[common.Hash]struct{}),
   556  	}
   557  	s.statelessPeers = make(map[string]struct{})
   558  	s.lock.Unlock()
   559  
   560  	if s.startTime == (time.Time{}) {
   561  		s.startTime = time.Now()
   562  	}
   563  	// Retrieve the previous sync status from LevelDB and abort if already synced
   564  	s.loadSyncStatus()
   565  	if len(s.tasks) == 0 && s.healer.scheduler.Pending() == 0 {
   566  		log.Debug("Snapshot sync already completed")
   567  		return nil
   568  	}
   569  	defer func() { // Persist any progress, independent of failure
   570  		for _, task := range s.tasks {
   571  			s.forwardAccountTask(task)
   572  		}
   573  		s.cleanAccountTasks()
   574  		s.saveSyncStatus()
   575  	}()
   576  
   577  	log.Debug("Starting snapshot sync cycle", "root", root)
   578  
   579  	// Flush out the last committed raw states
   580  	defer func() {
   581  		if s.stateWriter.ValueSize() > 0 {
   582  			s.stateWriter.Write()
   583  			s.stateWriter.Reset()
   584  		}
   585  	}()
   586  	defer s.report(true)
   587  
   588  	// Whether sync completed or not, disregard any future packets
   589  	defer func() {
   590  		log.Debug("Terminating snapshot sync cycle", "root", root)
   591  		s.lock.Lock()
   592  		s.accountReqs = make(map[uint64]*accountRequest)
   593  		s.storageReqs = make(map[uint64]*storageRequest)
   594  		s.bytecodeReqs = make(map[uint64]*bytecodeRequest)
   595  		s.trienodeHealReqs = make(map[uint64]*trienodeHealRequest)
   596  		s.bytecodeHealReqs = make(map[uint64]*bytecodeHealRequest)
   597  		s.lock.Unlock()
   598  	}()
   599  	// Keep scheduling sync tasks
   600  	peerJoin := make(chan string, 16)
   601  	peerJoinSub := s.peerJoin.Subscribe(peerJoin)
   602  	defer peerJoinSub.Unsubscribe()
   603  
   604  	peerDrop := make(chan string, 16)
   605  	peerDropSub := s.peerDrop.Subscribe(peerDrop)
   606  	defer peerDropSub.Unsubscribe()
   607  
   608  	// Create a set of unique channels for this sync cycle. We need these to be
   609  	// ephemeral so a data race doesn't accidentally deliver something stale on
   610  	// a persistent channel across syncs (yup, this happened)
   611  	var (
   612  		accountReqFails      = make(chan *accountRequest)
   613  		storageReqFails      = make(chan *storageRequest)
   614  		bytecodeReqFails     = make(chan *bytecodeRequest)
   615  		accountResps         = make(chan *accountResponse)
   616  		storageResps         = make(chan *storageResponse)
   617  		bytecodeResps        = make(chan *bytecodeResponse)
   618  		trienodeHealReqFails = make(chan *trienodeHealRequest)
   619  		bytecodeHealReqFails = make(chan *bytecodeHealRequest)
   620  		trienodeHealResps    = make(chan *trienodeHealResponse)
   621  		bytecodeHealResps    = make(chan *bytecodeHealResponse)
   622  	)
   623  	for {
   624  		// Remove all completed tasks and terminate sync if everything's done
   625  		s.cleanStorageTasks()
   626  		s.cleanAccountTasks()
   627  		if len(s.tasks) == 0 && s.healer.scheduler.Pending() == 0 {
   628  			return nil
   629  		}
   630  		// Assign all the data retrieval tasks to any free peers
   631  		s.assignAccountTasks(accountResps, accountReqFails, cancel)
   632  		s.assignBytecodeTasks(bytecodeResps, bytecodeReqFails, cancel)
   633  		s.assignStorageTasks(storageResps, storageReqFails, cancel)
   634  
   635  		if len(s.tasks) == 0 {
   636  			// Sync phase done, run heal phase
   637  			s.assignTrienodeHealTasks(trienodeHealResps, trienodeHealReqFails, cancel)
   638  			s.assignBytecodeHealTasks(bytecodeHealResps, bytecodeHealReqFails, cancel)
   639  		}
   640  		// Update sync progress
   641  		s.lock.Lock()
   642  		s.extProgress = &SyncProgress{
   643  			AccountSynced:      s.accountSynced,
   644  			AccountBytes:       s.accountBytes,
   645  			BytecodeSynced:     s.bytecodeSynced,
   646  			BytecodeBytes:      s.bytecodeBytes,
   647  			StorageSynced:      s.storageSynced,
   648  			StorageBytes:       s.storageBytes,
   649  			TrienodeHealSynced: s.trienodeHealSynced,
   650  			TrienodeHealBytes:  s.trienodeHealBytes,
   651  			BytecodeHealSynced: s.bytecodeHealSynced,
   652  			BytecodeHealBytes:  s.bytecodeHealBytes,
   653  		}
   654  		s.lock.Unlock()
   655  		// Wait for something to happen
   656  		select {
   657  		case <-s.update:
   658  			// Something happened (new peer, delivery, timeout), recheck tasks
   659  		case <-peerJoin:
   660  			// A new peer joined, try to schedule it new tasks
   661  		case id := <-peerDrop:
   662  			s.revertRequests(id)
   663  		case <-cancel:
   664  			return ErrCancelled
   665  
   666  		case req := <-accountReqFails:
   667  			s.revertAccountRequest(req)
   668  		case req := <-bytecodeReqFails:
   669  			s.revertBytecodeRequest(req)
   670  		case req := <-storageReqFails:
   671  			s.revertStorageRequest(req)
   672  		case req := <-trienodeHealReqFails:
   673  			s.revertTrienodeHealRequest(req)
   674  		case req := <-bytecodeHealReqFails:
   675  			s.revertBytecodeHealRequest(req)
   676  
   677  		case res := <-accountResps:
   678  			s.processAccountResponse(res)
   679  		case res := <-bytecodeResps:
   680  			s.processBytecodeResponse(res)
   681  		case res := <-storageResps:
   682  			s.processStorageResponse(res)
   683  		case res := <-trienodeHealResps:
   684  			s.processTrienodeHealResponse(res)
   685  		case res := <-bytecodeHealResps:
   686  			s.processBytecodeHealResponse(res)
   687  		}
   688  		// Report stats if something meaningful happened
   689  		s.report(false)
   690  	}
   691  }
   692  
   693  // loadSyncStatus retrieves a previously aborted sync status from the database,
   694  // or generates a fresh one if none is available.
   695  func (s *Syncer) loadSyncStatus() {
   696  	var progress SyncProgress
   697  
   698  	if status := rawdb.ReadSnapshotSyncStatus(s.db); status != nil {
   699  		if err := json.Unmarshal(status, &progress); err != nil {
   700  			log.Error("Failed to decode snap sync status", "err", err)
   701  		} else {
   702  			for _, task := range progress.Tasks {
   703  				log.Debug("Scheduled account sync task", "from", task.Next, "last", task.Last)
   704  			}
   705  			s.tasks = progress.Tasks
   706  			for _, task := range s.tasks {
   707  				task.genBatch = ethdb.HookedBatch{
   708  					Batch: s.db.NewBatch(),
   709  					OnPut: func(key []byte, value []byte) {
   710  						s.accountBytes += common.StorageSize(len(key) + len(value))
   711  					},
   712  				}
   713  				task.genTrie = trie.NewStackTrie(task.genBatch)
   714  
   715  				for accountHash, subtasks := range task.SubTasks {
   716  					for _, subtask := range subtasks {
   717  						subtask.genBatch = ethdb.HookedBatch{
   718  							Batch: s.db.NewBatch(),
   719  							OnPut: func(key []byte, value []byte) {
   720  								s.storageBytes += common.StorageSize(len(key) + len(value))
   721  							},
   722  						}
   723  						subtask.genTrie = trie.NewStackTrieWithOwner(subtask.genBatch, accountHash)
   724  					}
   725  				}
   726  			}
   727  			s.lock.Lock()
   728  			defer s.lock.Unlock()
   729  
   730  			s.snapped = len(s.tasks) == 0
   731  
   732  			s.accountSynced = progress.AccountSynced
   733  			s.accountBytes = progress.AccountBytes
   734  			s.bytecodeSynced = progress.BytecodeSynced
   735  			s.bytecodeBytes = progress.BytecodeBytes
   736  			s.storageSynced = progress.StorageSynced
   737  			s.storageBytes = progress.StorageBytes
   738  
   739  			s.trienodeHealSynced = progress.TrienodeHealSynced
   740  			s.trienodeHealBytes = progress.TrienodeHealBytes
   741  			s.bytecodeHealSynced = progress.BytecodeHealSynced
   742  			s.bytecodeHealBytes = progress.BytecodeHealBytes
   743  			return
   744  		}
   745  	}
   746  	// Either we've failed to decode the previous state, or there was none.
   747  	// Start a fresh sync by chunking up the account range and scheduling
   748  	// them for retrieval.
   749  	s.tasks = nil
   750  	s.accountSynced, s.accountBytes = 0, 0
   751  	s.bytecodeSynced, s.bytecodeBytes = 0, 0
   752  	s.storageSynced, s.storageBytes = 0, 0
   753  	s.trienodeHealSynced, s.trienodeHealBytes = 0, 0
   754  	s.bytecodeHealSynced, s.bytecodeHealBytes = 0, 0
   755  
   756  	var next common.Hash
   757  	step := new(big.Int).Sub(
   758  		new(big.Int).Div(
   759  			new(big.Int).Exp(common.Big2, common.Big256, nil),
   760  			big.NewInt(int64(accountConcurrency)),
   761  		), common.Big1,
   762  	)
   763  	for i := 0; i < accountConcurrency; i++ {
   764  		last := common.BigToHash(new(big.Int).Add(next.Big(), step))
   765  		if i == accountConcurrency-1 {
   766  			// Make sure we don't overflow if the step is not a proper divisor
   767  			last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
   768  		}
   769  		batch := ethdb.HookedBatch{
   770  			Batch: s.db.NewBatch(),
   771  			OnPut: func(key []byte, value []byte) {
   772  				s.accountBytes += common.StorageSize(len(key) + len(value))
   773  			},
   774  		}
   775  		s.tasks = append(s.tasks, &accountTask{
   776  			Next:     next,
   777  			Last:     last,
   778  			SubTasks: make(map[common.Hash][]*storageTask),
   779  			genBatch: batch,
   780  			genTrie:  trie.NewStackTrie(batch),
   781  		})
   782  		log.Debug("Created account sync task", "from", next, "last", last)
   783  		next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
   784  	}
   785  }
   786  
   787  // saveSyncStatus marshals the remaining sync tasks into leveldb.
   788  func (s *Syncer) saveSyncStatus() {
   789  	// Serialize any partial progress to disk before spinning down
   790  	for _, task := range s.tasks {
   791  		if err := task.genBatch.Write(); err != nil {
   792  			log.Error("Failed to persist account slots", "err", err)
   793  		}
   794  		for _, subtasks := range task.SubTasks {
   795  			for _, subtask := range subtasks {
   796  				if err := subtask.genBatch.Write(); err != nil {
   797  					log.Error("Failed to persist storage slots", "err", err)
   798  				}
   799  			}
   800  		}
   801  	}
   802  	// Store the actual progress markers
   803  	progress := &SyncProgress{
   804  		Tasks:              s.tasks,
   805  		AccountSynced:      s.accountSynced,
   806  		AccountBytes:       s.accountBytes,
   807  		BytecodeSynced:     s.bytecodeSynced,
   808  		BytecodeBytes:      s.bytecodeBytes,
   809  		StorageSynced:      s.storageSynced,
   810  		StorageBytes:       s.storageBytes,
   811  		TrienodeHealSynced: s.trienodeHealSynced,
   812  		TrienodeHealBytes:  s.trienodeHealBytes,
   813  		BytecodeHealSynced: s.bytecodeHealSynced,
   814  		BytecodeHealBytes:  s.bytecodeHealBytes,
   815  	}
   816  	status, err := json.Marshal(progress)
   817  	if err != nil {
   818  		panic(err) // This can only fail during implementation
   819  	}
   820  	rawdb.WriteSnapshotSyncStatus(s.db, status)
   821  }
   822  
   823  // Progress returns the snap sync status statistics.
   824  func (s *Syncer) Progress() (*SyncProgress, *SyncPending) {
   825  	s.lock.Lock()
   826  	defer s.lock.Unlock()
   827  	pending := new(SyncPending)
   828  	if s.healer != nil {
   829  		pending.TrienodeHeal = uint64(len(s.healer.trieTasks))
   830  		pending.BytecodeHeal = uint64(len(s.healer.codeTasks))
   831  	}
   832  	return s.extProgress, pending
   833  }
   834  
   835  // cleanAccountTasks removes account range retrieval tasks that have already been
   836  // completed.
   837  func (s *Syncer) cleanAccountTasks() {
   838  	// If the sync was already done before, don't even bother
   839  	if len(s.tasks) == 0 {
   840  		return
   841  	}
   842  	// Sync wasn't finished previously, check for any task that can be finalized
   843  	for i := 0; i < len(s.tasks); i++ {
   844  		if s.tasks[i].done {
   845  			s.tasks = append(s.tasks[:i], s.tasks[i+1:]...)
   846  			i--
   847  		}
   848  	}
   849  	// If everything was just finalized just, generate the account trie and start heal
   850  	if len(s.tasks) == 0 {
   851  		s.lock.Lock()
   852  		s.snapped = true
   853  		s.lock.Unlock()
   854  
   855  		// Push the final sync report
   856  		s.reportSyncProgress(true)
   857  	}
   858  }
   859  
   860  // cleanStorageTasks iterates over all the account tasks and storage sub-tasks
   861  // within, cleaning any that have been completed.
   862  func (s *Syncer) cleanStorageTasks() {
   863  	for _, task := range s.tasks {
   864  		for account, subtasks := range task.SubTasks {
   865  			// Remove storage range retrieval tasks that completed
   866  			for j := 0; j < len(subtasks); j++ {
   867  				if subtasks[j].done {
   868  					subtasks = append(subtasks[:j], subtasks[j+1:]...)
   869  					j--
   870  				}
   871  			}
   872  			if len(subtasks) > 0 {
   873  				task.SubTasks[account] = subtasks
   874  				continue
   875  			}
   876  			// If all storage chunks are done, mark the account as done too
   877  			for j, hash := range task.res.hashes {
   878  				if hash == account {
   879  					task.needState[j] = false
   880  				}
   881  			}
   882  			delete(task.SubTasks, account)
   883  			task.pend--
   884  
   885  			// If this was the last pending task, forward the account task
   886  			if task.pend == 0 {
   887  				s.forwardAccountTask(task)
   888  			}
   889  		}
   890  	}
   891  }
   892  
   893  // assignAccountTasks attempts to match idle peers to pending account range
   894  // retrievals.
   895  func (s *Syncer) assignAccountTasks(success chan *accountResponse, fail chan *accountRequest, cancel chan struct{}) {
   896  	s.lock.Lock()
   897  	defer s.lock.Unlock()
   898  
   899  	// Sort the peers by download capacity to use faster ones if many available
   900  	idlers := &capacitySort{
   901  		ids:  make([]string, 0, len(s.accountIdlers)),
   902  		caps: make([]int, 0, len(s.accountIdlers)),
   903  	}
   904  	targetTTL := s.rates.TargetTimeout()
   905  	for id := range s.accountIdlers {
   906  		if _, ok := s.statelessPeers[id]; ok {
   907  			continue
   908  		}
   909  		idlers.ids = append(idlers.ids, id)
   910  		idlers.caps = append(idlers.caps, s.rates.Capacity(id, AccountRangeMsg, targetTTL))
   911  	}
   912  	if len(idlers.ids) == 0 {
   913  		return
   914  	}
   915  	sort.Sort(sort.Reverse(idlers))
   916  
   917  	// Iterate over all the tasks and try to find a pending one
   918  	for _, task := range s.tasks {
   919  		// Skip any tasks already filling
   920  		if task.req != nil || task.res != nil {
   921  			continue
   922  		}
   923  		// Task pending retrieval, try to find an idle peer. If no such peer
   924  		// exists, we probably assigned tasks for all (or they are stateless).
   925  		// Abort the entire assignment mechanism.
   926  		if len(idlers.ids) == 0 {
   927  			return
   928  		}
   929  		var (
   930  			idle = idlers.ids[0]
   931  			peer = s.peers[idle]
   932  			cap  = idlers.caps[0]
   933  		)
   934  		idlers.ids, idlers.caps = idlers.ids[1:], idlers.caps[1:]
   935  
   936  		// Matched a pending task to an idle peer, allocate a unique request id
   937  		var reqid uint64
   938  		for {
   939  			reqid = uint64(rand.Int63())
   940  			if reqid == 0 {
   941  				continue
   942  			}
   943  			if _, ok := s.accountReqs[reqid]; ok {
   944  				continue
   945  			}
   946  			break
   947  		}
   948  		// Generate the network query and send it to the peer
   949  		req := &accountRequest{
   950  			peer:    idle,
   951  			id:      reqid,
   952  			time:    time.Now(),
   953  			deliver: success,
   954  			revert:  fail,
   955  			cancel:  cancel,
   956  			stale:   make(chan struct{}),
   957  			origin:  task.Next,
   958  			limit:   task.Last,
   959  			task:    task,
   960  		}
   961  		req.timeout = time.AfterFunc(s.rates.TargetTimeout(), func() {
   962  			peer.Log().Debug("Account range request timed out", "reqid", reqid)
   963  			s.rates.Update(idle, AccountRangeMsg, 0, 0)
   964  			s.scheduleRevertAccountRequest(req)
   965  		})
   966  		s.accountReqs[reqid] = req
   967  		delete(s.accountIdlers, idle)
   968  
   969  		s.pend.Add(1)
   970  		go func(root common.Hash) {
   971  			defer s.pend.Done()
   972  
   973  			// Attempt to send the remote request and revert if it fails
   974  			if cap > maxRequestSize {
   975  				cap = maxRequestSize
   976  			}
   977  			if cap < minRequestSize { // Don't bother with peers below a bare minimum performance
   978  				cap = minRequestSize
   979  			}
   980  			if err := peer.RequestAccountRange(reqid, root, req.origin, req.limit, uint64(cap)); err != nil {
   981  				peer.Log().Debug("Failed to request account range", "err", err)
   982  				s.scheduleRevertAccountRequest(req)
   983  			}
   984  		}(s.root)
   985  
   986  		// Inject the request into the task to block further assignments
   987  		task.req = req
   988  	}
   989  }
   990  
   991  // assignBytecodeTasks attempts to match idle peers to pending code retrievals.
   992  func (s *Syncer) assignBytecodeTasks(success chan *bytecodeResponse, fail chan *bytecodeRequest, cancel chan struct{}) {
   993  	s.lock.Lock()
   994  	defer s.lock.Unlock()
   995  
   996  	// Sort the peers by download capacity to use faster ones if many available
   997  	idlers := &capacitySort{
   998  		ids:  make([]string, 0, len(s.bytecodeIdlers)),
   999  		caps: make([]int, 0, len(s.bytecodeIdlers)),
  1000  	}
  1001  	targetTTL := s.rates.TargetTimeout()
  1002  	for id := range s.bytecodeIdlers {
  1003  		if _, ok := s.statelessPeers[id]; ok {
  1004  			continue
  1005  		}
  1006  		idlers.ids = append(idlers.ids, id)
  1007  		idlers.caps = append(idlers.caps, s.rates.Capacity(id, ByteCodesMsg, targetTTL))
  1008  	}
  1009  	if len(idlers.ids) == 0 {
  1010  		return
  1011  	}
  1012  	sort.Sort(sort.Reverse(idlers))
  1013  
  1014  	// Iterate over all the tasks and try to find a pending one
  1015  	for _, task := range s.tasks {
  1016  		// Skip any tasks not in the bytecode retrieval phase
  1017  		if task.res == nil {
  1018  			continue
  1019  		}
  1020  		// Skip tasks that are already retrieving (or done with) all codes
  1021  		if len(task.codeTasks) == 0 {
  1022  			continue
  1023  		}
  1024  		// Task pending retrieval, try to find an idle peer. If no such peer
  1025  		// exists, we probably assigned tasks for all (or they are stateless).
  1026  		// Abort the entire assignment mechanism.
  1027  		if len(idlers.ids) == 0 {
  1028  			return
  1029  		}
  1030  		var (
  1031  			idle = idlers.ids[0]
  1032  			peer = s.peers[idle]
  1033  			cap  = idlers.caps[0]
  1034  		)
  1035  		idlers.ids, idlers.caps = idlers.ids[1:], idlers.caps[1:]
  1036  
  1037  		// Matched a pending task to an idle peer, allocate a unique request id
  1038  		var reqid uint64
  1039  		for {
  1040  			reqid = uint64(rand.Int63())
  1041  			if reqid == 0 {
  1042  				continue
  1043  			}
  1044  			if _, ok := s.bytecodeReqs[reqid]; ok {
  1045  				continue
  1046  			}
  1047  			break
  1048  		}
  1049  		// Generate the network query and send it to the peer
  1050  		if cap > maxCodeRequestCount {
  1051  			cap = maxCodeRequestCount
  1052  		}
  1053  		hashes := make([]common.Hash, 0, cap)
  1054  		for hash := range task.codeTasks {
  1055  			delete(task.codeTasks, hash)
  1056  			hashes = append(hashes, hash)
  1057  			if len(hashes) >= cap {
  1058  				break
  1059  			}
  1060  		}
  1061  		req := &bytecodeRequest{
  1062  			peer:    idle,
  1063  			id:      reqid,
  1064  			time:    time.Now(),
  1065  			deliver: success,
  1066  			revert:  fail,
  1067  			cancel:  cancel,
  1068  			stale:   make(chan struct{}),
  1069  			hashes:  hashes,
  1070  			task:    task,
  1071  		}
  1072  		req.timeout = time.AfterFunc(s.rates.TargetTimeout(), func() {
  1073  			peer.Log().Debug("Bytecode request timed out", "reqid", reqid)
  1074  			s.rates.Update(idle, ByteCodesMsg, 0, 0)
  1075  			s.scheduleRevertBytecodeRequest(req)
  1076  		})
  1077  		s.bytecodeReqs[reqid] = req
  1078  		delete(s.bytecodeIdlers, idle)
  1079  
  1080  		s.pend.Add(1)
  1081  		go func() {
  1082  			defer s.pend.Done()
  1083  
  1084  			// Attempt to send the remote request and revert if it fails
  1085  			if err := peer.RequestByteCodes(reqid, hashes, maxRequestSize); err != nil {
  1086  				log.Debug("Failed to request bytecodes", "err", err)
  1087  				s.scheduleRevertBytecodeRequest(req)
  1088  			}
  1089  		}()
  1090  	}
  1091  }
  1092  
  1093  // assignStorageTasks attempts to match idle peers to pending storage range
  1094  // retrievals.
  1095  func (s *Syncer) assignStorageTasks(success chan *storageResponse, fail chan *storageRequest, cancel chan struct{}) {
  1096  	s.lock.Lock()
  1097  	defer s.lock.Unlock()
  1098  
  1099  	// Sort the peers by download capacity to use faster ones if many available
  1100  	idlers := &capacitySort{
  1101  		ids:  make([]string, 0, len(s.storageIdlers)),
  1102  		caps: make([]int, 0, len(s.storageIdlers)),
  1103  	}
  1104  	targetTTL := s.rates.TargetTimeout()
  1105  	for id := range s.storageIdlers {
  1106  		if _, ok := s.statelessPeers[id]; ok {
  1107  			continue
  1108  		}
  1109  		idlers.ids = append(idlers.ids, id)
  1110  		idlers.caps = append(idlers.caps, s.rates.Capacity(id, StorageRangesMsg, targetTTL))
  1111  	}
  1112  	if len(idlers.ids) == 0 {
  1113  		return
  1114  	}
  1115  	sort.Sort(sort.Reverse(idlers))
  1116  
  1117  	// Iterate over all the tasks and try to find a pending one
  1118  	for _, task := range s.tasks {
  1119  		// Skip any tasks not in the storage retrieval phase
  1120  		if task.res == nil {
  1121  			continue
  1122  		}
  1123  		// Skip tasks that are already retrieving (or done with) all small states
  1124  		if len(task.SubTasks) == 0 && len(task.stateTasks) == 0 {
  1125  			continue
  1126  		}
  1127  		// Task pending retrieval, try to find an idle peer. If no such peer
  1128  		// exists, we probably assigned tasks for all (or they are stateless).
  1129  		// Abort the entire assignment mechanism.
  1130  		if len(idlers.ids) == 0 {
  1131  			return
  1132  		}
  1133  		var (
  1134  			idle = idlers.ids[0]
  1135  			peer = s.peers[idle]
  1136  			cap  = idlers.caps[0]
  1137  		)
  1138  		idlers.ids, idlers.caps = idlers.ids[1:], idlers.caps[1:]
  1139  
  1140  		// Matched a pending task to an idle peer, allocate a unique request id
  1141  		var reqid uint64
  1142  		for {
  1143  			reqid = uint64(rand.Int63())
  1144  			if reqid == 0 {
  1145  				continue
  1146  			}
  1147  			if _, ok := s.storageReqs[reqid]; ok {
  1148  				continue
  1149  			}
  1150  			break
  1151  		}
  1152  		// Generate the network query and send it to the peer. If there are
  1153  		// large contract tasks pending, complete those before diving into
  1154  		// even more new contracts.
  1155  		if cap > maxRequestSize {
  1156  			cap = maxRequestSize
  1157  		}
  1158  		if cap < minRequestSize { // Don't bother with peers below a bare minimum performance
  1159  			cap = minRequestSize
  1160  		}
  1161  		storageSets := cap / 1024
  1162  
  1163  		var (
  1164  			accounts = make([]common.Hash, 0, storageSets)
  1165  			roots    = make([]common.Hash, 0, storageSets)
  1166  			subtask  *storageTask
  1167  		)
  1168  		for account, subtasks := range task.SubTasks {
  1169  			for _, st := range subtasks {
  1170  				// Skip any subtasks already filling
  1171  				if st.req != nil {
  1172  					continue
  1173  				}
  1174  				// Found an incomplete storage chunk, schedule it
  1175  				accounts = append(accounts, account)
  1176  				roots = append(roots, st.root)
  1177  				subtask = st
  1178  				break // Large contract chunks are downloaded individually
  1179  			}
  1180  			if subtask != nil {
  1181  				break // Large contract chunks are downloaded individually
  1182  			}
  1183  		}
  1184  		if subtask == nil {
  1185  			// No large contract required retrieval, but small ones available
  1186  			for account, root := range task.stateTasks {
  1187  				delete(task.stateTasks, account)
  1188  
  1189  				accounts = append(accounts, account)
  1190  				roots = append(roots, root)
  1191  
  1192  				if len(accounts) >= storageSets {
  1193  					break
  1194  				}
  1195  			}
  1196  		}
  1197  		// If nothing was found, it means this task is actually already fully
  1198  		// retrieving, but large contracts are hard to detect. Skip to the next.
  1199  		if len(accounts) == 0 {
  1200  			continue
  1201  		}
  1202  		req := &storageRequest{
  1203  			peer:     idle,
  1204  			id:       reqid,
  1205  			time:     time.Now(),
  1206  			deliver:  success,
  1207  			revert:   fail,
  1208  			cancel:   cancel,
  1209  			stale:    make(chan struct{}),
  1210  			accounts: accounts,
  1211  			roots:    roots,
  1212  			mainTask: task,
  1213  			subTask:  subtask,
  1214  		}
  1215  		if subtask != nil {
  1216  			req.origin = subtask.Next
  1217  			req.limit = subtask.Last
  1218  		}
  1219  		req.timeout = time.AfterFunc(s.rates.TargetTimeout(), func() {
  1220  			peer.Log().Debug("Storage request timed out", "reqid", reqid)
  1221  			s.rates.Update(idle, StorageRangesMsg, 0, 0)
  1222  			s.scheduleRevertStorageRequest(req)
  1223  		})
  1224  		s.storageReqs[reqid] = req
  1225  		delete(s.storageIdlers, idle)
  1226  
  1227  		s.pend.Add(1)
  1228  		go func(root common.Hash) {
  1229  			defer s.pend.Done()
  1230  
  1231  			// Attempt to send the remote request and revert if it fails
  1232  			var origin, limit []byte
  1233  			if subtask != nil {
  1234  				origin, limit = req.origin[:], req.limit[:]
  1235  			}
  1236  			if err := peer.RequestStorageRanges(reqid, root, accounts, origin, limit, uint64(cap)); err != nil {
  1237  				log.Debug("Failed to request storage", "err", err)
  1238  				s.scheduleRevertStorageRequest(req)
  1239  			}
  1240  		}(s.root)
  1241  
  1242  		// Inject the request into the subtask to block further assignments
  1243  		if subtask != nil {
  1244  			subtask.req = req
  1245  		}
  1246  	}
  1247  }
  1248  
  1249  // assignTrienodeHealTasks attempts to match idle peers to trie node requests to
  1250  // heal any trie errors caused by the snap sync's chunked retrieval model.
  1251  func (s *Syncer) assignTrienodeHealTasks(success chan *trienodeHealResponse, fail chan *trienodeHealRequest, cancel chan struct{}) {
  1252  	s.lock.Lock()
  1253  	defer s.lock.Unlock()
  1254  
  1255  	// Sort the peers by download capacity to use faster ones if many available
  1256  	idlers := &capacitySort{
  1257  		ids:  make([]string, 0, len(s.trienodeHealIdlers)),
  1258  		caps: make([]int, 0, len(s.trienodeHealIdlers)),
  1259  	}
  1260  	targetTTL := s.rates.TargetTimeout()
  1261  	for id := range s.trienodeHealIdlers {
  1262  		if _, ok := s.statelessPeers[id]; ok {
  1263  			continue
  1264  		}
  1265  		idlers.ids = append(idlers.ids, id)
  1266  		idlers.caps = append(idlers.caps, s.rates.Capacity(id, TrieNodesMsg, targetTTL))
  1267  	}
  1268  	if len(idlers.ids) == 0 {
  1269  		return
  1270  	}
  1271  	sort.Sort(sort.Reverse(idlers))
  1272  
  1273  	// Iterate over pending tasks and try to find a peer to retrieve with
  1274  	for len(s.healer.trieTasks) > 0 || s.healer.scheduler.Pending() > 0 {
  1275  		// If there are not enough trie tasks queued to fully assign, fill the
  1276  		// queue from the state sync scheduler. The trie synced schedules these
  1277  		// together with bytecodes, so we need to queue them combined.
  1278  		var (
  1279  			have = len(s.healer.trieTasks) + len(s.healer.codeTasks)
  1280  			want = maxTrieRequestCount + maxCodeRequestCount
  1281  		)
  1282  		if have < want {
  1283  			paths, hashes, codes := s.healer.scheduler.Missing(want - have)
  1284  			for i, path := range paths {
  1285  				s.healer.trieTasks[path] = hashes[i]
  1286  			}
  1287  			for _, hash := range codes {
  1288  				s.healer.codeTasks[hash] = struct{}{}
  1289  			}
  1290  		}
  1291  		// If all the heal tasks are bytecodes or already downloading, bail
  1292  		if len(s.healer.trieTasks) == 0 {
  1293  			return
  1294  		}
  1295  		// Task pending retrieval, try to find an idle peer. If no such peer
  1296  		// exists, we probably assigned tasks for all (or they are stateless).
  1297  		// Abort the entire assignment mechanism.
  1298  		if len(idlers.ids) == 0 {
  1299  			return
  1300  		}
  1301  		var (
  1302  			idle = idlers.ids[0]
  1303  			peer = s.peers[idle]
  1304  			cap  = idlers.caps[0]
  1305  		)
  1306  		idlers.ids, idlers.caps = idlers.ids[1:], idlers.caps[1:]
  1307  
  1308  		// Matched a pending task to an idle peer, allocate a unique request id
  1309  		var reqid uint64
  1310  		for {
  1311  			reqid = uint64(rand.Int63())
  1312  			if reqid == 0 {
  1313  				continue
  1314  			}
  1315  			if _, ok := s.trienodeHealReqs[reqid]; ok {
  1316  				continue
  1317  			}
  1318  			break
  1319  		}
  1320  		// Generate the network query and send it to the peer
  1321  		if cap > maxTrieRequestCount {
  1322  			cap = maxTrieRequestCount
  1323  		}
  1324  		var (
  1325  			hashes   = make([]common.Hash, 0, cap)
  1326  			paths    = make([]string, 0, cap)
  1327  			pathsets = make([]TrieNodePathSet, 0, cap)
  1328  		)
  1329  		for path, hash := range s.healer.trieTasks {
  1330  			delete(s.healer.trieTasks, path)
  1331  
  1332  			paths = append(paths, path)
  1333  			hashes = append(hashes, hash)
  1334  			if len(paths) >= cap {
  1335  				break
  1336  			}
  1337  		}
  1338  		// Group requests by account hash
  1339  		paths, hashes, _, pathsets = sortByAccountPath(paths, hashes)
  1340  		req := &trienodeHealRequest{
  1341  			peer:    idle,
  1342  			id:      reqid,
  1343  			time:    time.Now(),
  1344  			deliver: success,
  1345  			revert:  fail,
  1346  			cancel:  cancel,
  1347  			stale:   make(chan struct{}),
  1348  			paths:   paths,
  1349  			hashes:  hashes,
  1350  			task:    s.healer,
  1351  		}
  1352  		req.timeout = time.AfterFunc(s.rates.TargetTimeout(), func() {
  1353  			peer.Log().Debug("Trienode heal request timed out", "reqid", reqid)
  1354  			s.rates.Update(idle, TrieNodesMsg, 0, 0)
  1355  			s.scheduleRevertTrienodeHealRequest(req)
  1356  		})
  1357  		s.trienodeHealReqs[reqid] = req
  1358  		delete(s.trienodeHealIdlers, idle)
  1359  
  1360  		s.pend.Add(1)
  1361  		go func(root common.Hash) {
  1362  			defer s.pend.Done()
  1363  
  1364  			// Attempt to send the remote request and revert if it fails
  1365  			if err := peer.RequestTrieNodes(reqid, root, pathsets, maxRequestSize); err != nil {
  1366  				log.Debug("Failed to request trienode healers", "err", err)
  1367  				s.scheduleRevertTrienodeHealRequest(req)
  1368  			}
  1369  		}(s.root)
  1370  	}
  1371  }
  1372  
  1373  // assignBytecodeHealTasks attempts to match idle peers to bytecode requests to
  1374  // heal any trie errors caused by the snap sync's chunked retrieval model.
  1375  func (s *Syncer) assignBytecodeHealTasks(success chan *bytecodeHealResponse, fail chan *bytecodeHealRequest, cancel chan struct{}) {
  1376  	s.lock.Lock()
  1377  	defer s.lock.Unlock()
  1378  
  1379  	// Sort the peers by download capacity to use faster ones if many available
  1380  	idlers := &capacitySort{
  1381  		ids:  make([]string, 0, len(s.bytecodeHealIdlers)),
  1382  		caps: make([]int, 0, len(s.bytecodeHealIdlers)),
  1383  	}
  1384  	targetTTL := s.rates.TargetTimeout()
  1385  	for id := range s.bytecodeHealIdlers {
  1386  		if _, ok := s.statelessPeers[id]; ok {
  1387  			continue
  1388  		}
  1389  		idlers.ids = append(idlers.ids, id)
  1390  		idlers.caps = append(idlers.caps, s.rates.Capacity(id, ByteCodesMsg, targetTTL))
  1391  	}
  1392  	if len(idlers.ids) == 0 {
  1393  		return
  1394  	}
  1395  	sort.Sort(sort.Reverse(idlers))
  1396  
  1397  	// Iterate over pending tasks and try to find a peer to retrieve with
  1398  	for len(s.healer.codeTasks) > 0 || s.healer.scheduler.Pending() > 0 {
  1399  		// If there are not enough trie tasks queued to fully assign, fill the
  1400  		// queue from the state sync scheduler. The trie synced schedules these
  1401  		// together with trie nodes, so we need to queue them combined.
  1402  		var (
  1403  			have = len(s.healer.trieTasks) + len(s.healer.codeTasks)
  1404  			want = maxTrieRequestCount + maxCodeRequestCount
  1405  		)
  1406  		if have < want {
  1407  			paths, hashes, codes := s.healer.scheduler.Missing(want - have)
  1408  			for i, path := range paths {
  1409  				s.healer.trieTasks[path] = hashes[i]
  1410  			}
  1411  			for _, hash := range codes {
  1412  				s.healer.codeTasks[hash] = struct{}{}
  1413  			}
  1414  		}
  1415  		// If all the heal tasks are trienodes or already downloading, bail
  1416  		if len(s.healer.codeTasks) == 0 {
  1417  			return
  1418  		}
  1419  		// Task pending retrieval, try to find an idle peer. If no such peer
  1420  		// exists, we probably assigned tasks for all (or they are stateless).
  1421  		// Abort the entire assignment mechanism.
  1422  		if len(idlers.ids) == 0 {
  1423  			return
  1424  		}
  1425  		var (
  1426  			idle = idlers.ids[0]
  1427  			peer = s.peers[idle]
  1428  			cap  = idlers.caps[0]
  1429  		)
  1430  		idlers.ids, idlers.caps = idlers.ids[1:], idlers.caps[1:]
  1431  
  1432  		// Matched a pending task to an idle peer, allocate a unique request id
  1433  		var reqid uint64
  1434  		for {
  1435  			reqid = uint64(rand.Int63())
  1436  			if reqid == 0 {
  1437  				continue
  1438  			}
  1439  			if _, ok := s.bytecodeHealReqs[reqid]; ok {
  1440  				continue
  1441  			}
  1442  			break
  1443  		}
  1444  		// Generate the network query and send it to the peer
  1445  		if cap > maxCodeRequestCount {
  1446  			cap = maxCodeRequestCount
  1447  		}
  1448  		hashes := make([]common.Hash, 0, cap)
  1449  		for hash := range s.healer.codeTasks {
  1450  			delete(s.healer.codeTasks, hash)
  1451  
  1452  			hashes = append(hashes, hash)
  1453  			if len(hashes) >= cap {
  1454  				break
  1455  			}
  1456  		}
  1457  		req := &bytecodeHealRequest{
  1458  			peer:    idle,
  1459  			id:      reqid,
  1460  			time:    time.Now(),
  1461  			deliver: success,
  1462  			revert:  fail,
  1463  			cancel:  cancel,
  1464  			stale:   make(chan struct{}),
  1465  			hashes:  hashes,
  1466  			task:    s.healer,
  1467  		}
  1468  		req.timeout = time.AfterFunc(s.rates.TargetTimeout(), func() {
  1469  			peer.Log().Debug("Bytecode heal request timed out", "reqid", reqid)
  1470  			s.rates.Update(idle, ByteCodesMsg, 0, 0)
  1471  			s.scheduleRevertBytecodeHealRequest(req)
  1472  		})
  1473  		s.bytecodeHealReqs[reqid] = req
  1474  		delete(s.bytecodeHealIdlers, idle)
  1475  
  1476  		s.pend.Add(1)
  1477  		go func() {
  1478  			defer s.pend.Done()
  1479  
  1480  			// Attempt to send the remote request and revert if it fails
  1481  			if err := peer.RequestByteCodes(reqid, hashes, maxRequestSize); err != nil {
  1482  				log.Debug("Failed to request bytecode healers", "err", err)
  1483  				s.scheduleRevertBytecodeHealRequest(req)
  1484  			}
  1485  		}()
  1486  	}
  1487  }
  1488  
  1489  // revertRequests locates all the currently pending requests from a particular
  1490  // peer and reverts them, rescheduling for others to fulfill.
  1491  func (s *Syncer) revertRequests(peer string) {
  1492  	// Gather the requests first, revertals need the lock too
  1493  	s.lock.Lock()
  1494  	var accountReqs []*accountRequest
  1495  	for _, req := range s.accountReqs {
  1496  		if req.peer == peer {
  1497  			accountReqs = append(accountReqs, req)
  1498  		}
  1499  	}
  1500  	var bytecodeReqs []*bytecodeRequest
  1501  	for _, req := range s.bytecodeReqs {
  1502  		if req.peer == peer {
  1503  			bytecodeReqs = append(bytecodeReqs, req)
  1504  		}
  1505  	}
  1506  	var storageReqs []*storageRequest
  1507  	for _, req := range s.storageReqs {
  1508  		if req.peer == peer {
  1509  			storageReqs = append(storageReqs, req)
  1510  		}
  1511  	}
  1512  	var trienodeHealReqs []*trienodeHealRequest
  1513  	for _, req := range s.trienodeHealReqs {
  1514  		if req.peer == peer {
  1515  			trienodeHealReqs = append(trienodeHealReqs, req)
  1516  		}
  1517  	}
  1518  	var bytecodeHealReqs []*bytecodeHealRequest
  1519  	for _, req := range s.bytecodeHealReqs {
  1520  		if req.peer == peer {
  1521  			bytecodeHealReqs = append(bytecodeHealReqs, req)
  1522  		}
  1523  	}
  1524  	s.lock.Unlock()
  1525  
  1526  	// Revert all the requests matching the peer
  1527  	for _, req := range accountReqs {
  1528  		s.revertAccountRequest(req)
  1529  	}
  1530  	for _, req := range bytecodeReqs {
  1531  		s.revertBytecodeRequest(req)
  1532  	}
  1533  	for _, req := range storageReqs {
  1534  		s.revertStorageRequest(req)
  1535  	}
  1536  	for _, req := range trienodeHealReqs {
  1537  		s.revertTrienodeHealRequest(req)
  1538  	}
  1539  	for _, req := range bytecodeHealReqs {
  1540  		s.revertBytecodeHealRequest(req)
  1541  	}
  1542  }
  1543  
  1544  // scheduleRevertAccountRequest asks the event loop to clean up an account range
  1545  // request and return all failed retrieval tasks to the scheduler for reassignment.
  1546  func (s *Syncer) scheduleRevertAccountRequest(req *accountRequest) {
  1547  	select {
  1548  	case req.revert <- req:
  1549  		// Sync event loop notified
  1550  	case <-req.cancel:
  1551  		// Sync cycle got cancelled
  1552  	case <-req.stale:
  1553  		// Request already reverted
  1554  	}
  1555  }
  1556  
  1557  // revertAccountRequest cleans up an account range request and returns all failed
  1558  // retrieval tasks to the scheduler for reassignment.
  1559  //
  1560  // Note, this needs to run on the event runloop thread to reschedule to idle peers.
  1561  // On peer threads, use scheduleRevertAccountRequest.
  1562  func (s *Syncer) revertAccountRequest(req *accountRequest) {
  1563  	log.Debug("Reverting account request", "peer", req.peer, "reqid", req.id)
  1564  	select {
  1565  	case <-req.stale:
  1566  		log.Trace("Account request already reverted", "peer", req.peer, "reqid", req.id)
  1567  		return
  1568  	default:
  1569  	}
  1570  	close(req.stale)
  1571  
  1572  	// Remove the request from the tracked set
  1573  	s.lock.Lock()
  1574  	delete(s.accountReqs, req.id)
  1575  	s.lock.Unlock()
  1576  
  1577  	// If there's a timeout timer still running, abort it and mark the account
  1578  	// task as not-pending, ready for rescheduling
  1579  	req.timeout.Stop()
  1580  	if req.task.req == req {
  1581  		req.task.req = nil
  1582  	}
  1583  }
  1584  
  1585  // scheduleRevertBytecodeRequest asks the event loop to clean up a bytecode request
  1586  // and return all failed retrieval tasks to the scheduler for reassignment.
  1587  func (s *Syncer) scheduleRevertBytecodeRequest(req *bytecodeRequest) {
  1588  	select {
  1589  	case req.revert <- req:
  1590  		// Sync event loop notified
  1591  	case <-req.cancel:
  1592  		// Sync cycle got cancelled
  1593  	case <-req.stale:
  1594  		// Request already reverted
  1595  	}
  1596  }
  1597  
  1598  // revertBytecodeRequest cleans up a bytecode request and returns all failed
  1599  // retrieval tasks to the scheduler for reassignment.
  1600  //
  1601  // Note, this needs to run on the event runloop thread to reschedule to idle peers.
  1602  // On peer threads, use scheduleRevertBytecodeRequest.
  1603  func (s *Syncer) revertBytecodeRequest(req *bytecodeRequest) {
  1604  	log.Debug("Reverting bytecode request", "peer", req.peer)
  1605  	select {
  1606  	case <-req.stale:
  1607  		log.Trace("Bytecode request already reverted", "peer", req.peer, "reqid", req.id)
  1608  		return
  1609  	default:
  1610  	}
  1611  	close(req.stale)
  1612  
  1613  	// Remove the request from the tracked set
  1614  	s.lock.Lock()
  1615  	delete(s.bytecodeReqs, req.id)
  1616  	s.lock.Unlock()
  1617  
  1618  	// If there's a timeout timer still running, abort it and mark the code
  1619  	// retrievals as not-pending, ready for rescheduling
  1620  	req.timeout.Stop()
  1621  	for _, hash := range req.hashes {
  1622  		req.task.codeTasks[hash] = struct{}{}
  1623  	}
  1624  }
  1625  
  1626  // scheduleRevertStorageRequest asks the event loop to clean up a storage range
  1627  // request and return all failed retrieval tasks to the scheduler for reassignment.
  1628  func (s *Syncer) scheduleRevertStorageRequest(req *storageRequest) {
  1629  	select {
  1630  	case req.revert <- req:
  1631  		// Sync event loop notified
  1632  	case <-req.cancel:
  1633  		// Sync cycle got cancelled
  1634  	case <-req.stale:
  1635  		// Request already reverted
  1636  	}
  1637  }
  1638  
  1639  // revertStorageRequest cleans up a storage range request and returns all failed
  1640  // retrieval tasks to the scheduler for reassignment.
  1641  //
  1642  // Note, this needs to run on the event runloop thread to reschedule to idle peers.
  1643  // On peer threads, use scheduleRevertStorageRequest.
  1644  func (s *Syncer) revertStorageRequest(req *storageRequest) {
  1645  	log.Debug("Reverting storage request", "peer", req.peer)
  1646  	select {
  1647  	case <-req.stale:
  1648  		log.Trace("Storage request already reverted", "peer", req.peer, "reqid", req.id)
  1649  		return
  1650  	default:
  1651  	}
  1652  	close(req.stale)
  1653  
  1654  	// Remove the request from the tracked set
  1655  	s.lock.Lock()
  1656  	delete(s.storageReqs, req.id)
  1657  	s.lock.Unlock()
  1658  
  1659  	// If there's a timeout timer still running, abort it and mark the storage
  1660  	// task as not-pending, ready for rescheduling
  1661  	req.timeout.Stop()
  1662  	if req.subTask != nil {
  1663  		req.subTask.req = nil
  1664  	} else {
  1665  		for i, account := range req.accounts {
  1666  			req.mainTask.stateTasks[account] = req.roots[i]
  1667  		}
  1668  	}
  1669  }
  1670  
  1671  // scheduleRevertTrienodeHealRequest asks the event loop to clean up a trienode heal
  1672  // request and return all failed retrieval tasks to the scheduler for reassignment.
  1673  func (s *Syncer) scheduleRevertTrienodeHealRequest(req *trienodeHealRequest) {
  1674  	select {
  1675  	case req.revert <- req:
  1676  		// Sync event loop notified
  1677  	case <-req.cancel:
  1678  		// Sync cycle got cancelled
  1679  	case <-req.stale:
  1680  		// Request already reverted
  1681  	}
  1682  }
  1683  
  1684  // revertTrienodeHealRequest cleans up a trienode heal request and returns all
  1685  // failed retrieval tasks to the scheduler for reassignment.
  1686  //
  1687  // Note, this needs to run on the event runloop thread to reschedule to idle peers.
  1688  // On peer threads, use scheduleRevertTrienodeHealRequest.
  1689  func (s *Syncer) revertTrienodeHealRequest(req *trienodeHealRequest) {
  1690  	log.Debug("Reverting trienode heal request", "peer", req.peer)
  1691  	select {
  1692  	case <-req.stale:
  1693  		log.Trace("Trienode heal request already reverted", "peer", req.peer, "reqid", req.id)
  1694  		return
  1695  	default:
  1696  	}
  1697  	close(req.stale)
  1698  
  1699  	// Remove the request from the tracked set
  1700  	s.lock.Lock()
  1701  	delete(s.trienodeHealReqs, req.id)
  1702  	s.lock.Unlock()
  1703  
  1704  	// If there's a timeout timer still running, abort it and mark the trie node
  1705  	// retrievals as not-pending, ready for rescheduling
  1706  	req.timeout.Stop()
  1707  	for i, path := range req.paths {
  1708  		req.task.trieTasks[path] = req.hashes[i]
  1709  	}
  1710  }
  1711  
  1712  // scheduleRevertBytecodeHealRequest asks the event loop to clean up a bytecode heal
  1713  // request and return all failed retrieval tasks to the scheduler for reassignment.
  1714  func (s *Syncer) scheduleRevertBytecodeHealRequest(req *bytecodeHealRequest) {
  1715  	select {
  1716  	case req.revert <- req:
  1717  		// Sync event loop notified
  1718  	case <-req.cancel:
  1719  		// Sync cycle got cancelled
  1720  	case <-req.stale:
  1721  		// Request already reverted
  1722  	}
  1723  }
  1724  
  1725  // revertBytecodeHealRequest cleans up a bytecode heal request and returns all
  1726  // failed retrieval tasks to the scheduler for reassignment.
  1727  //
  1728  // Note, this needs to run on the event runloop thread to reschedule to idle peers.
  1729  // On peer threads, use scheduleRevertBytecodeHealRequest.
  1730  func (s *Syncer) revertBytecodeHealRequest(req *bytecodeHealRequest) {
  1731  	log.Debug("Reverting bytecode heal request", "peer", req.peer)
  1732  	select {
  1733  	case <-req.stale:
  1734  		log.Trace("Bytecode heal request already reverted", "peer", req.peer, "reqid", req.id)
  1735  		return
  1736  	default:
  1737  	}
  1738  	close(req.stale)
  1739  
  1740  	// Remove the request from the tracked set
  1741  	s.lock.Lock()
  1742  	delete(s.bytecodeHealReqs, req.id)
  1743  	s.lock.Unlock()
  1744  
  1745  	// If there's a timeout timer still running, abort it and mark the code
  1746  	// retrievals as not-pending, ready for rescheduling
  1747  	req.timeout.Stop()
  1748  	for _, hash := range req.hashes {
  1749  		req.task.codeTasks[hash] = struct{}{}
  1750  	}
  1751  }
  1752  
  1753  // processAccountResponse integrates an already validated account range response
  1754  // into the account tasks.
  1755  func (s *Syncer) processAccountResponse(res *accountResponse) {
  1756  	// Switch the task from pending to filling
  1757  	res.task.req = nil
  1758  	res.task.res = res
  1759  
  1760  	// Ensure that the response doesn't overflow into the subsequent task
  1761  	last := res.task.Last.Big()
  1762  	for i, hash := range res.hashes {
  1763  		// Mark the range complete if the last is already included.
  1764  		// Keep iteration to delete the extra states if exists.
  1765  		cmp := hash.Big().Cmp(last)
  1766  		if cmp == 0 {
  1767  			res.cont = false
  1768  			continue
  1769  		}
  1770  		if cmp > 0 {
  1771  			// Chunk overflown, cut off excess
  1772  			res.hashes = res.hashes[:i]
  1773  			res.accounts = res.accounts[:i]
  1774  			res.cont = false // Mark range completed
  1775  			break
  1776  		}
  1777  	}
  1778  	// Iterate over all the accounts and assemble which ones need further sub-
  1779  	// filling before the entire account range can be persisted.
  1780  	res.task.needCode = make([]bool, len(res.accounts))
  1781  	res.task.needState = make([]bool, len(res.accounts))
  1782  	res.task.needHeal = make([]bool, len(res.accounts))
  1783  
  1784  	res.task.codeTasks = make(map[common.Hash]struct{})
  1785  	res.task.stateTasks = make(map[common.Hash]common.Hash)
  1786  
  1787  	resumed := make(map[common.Hash]struct{})
  1788  
  1789  	res.task.pend = 0
  1790  	for i, account := range res.accounts {
  1791  		// Check if the account is a contract with an unknown code
  1792  		if !bytes.Equal(account.CodeHash, emptyCode[:]) {
  1793  			if !rawdb.HasCodeWithPrefix(s.db, common.BytesToHash(account.CodeHash)) {
  1794  				res.task.codeTasks[common.BytesToHash(account.CodeHash)] = struct{}{}
  1795  				res.task.needCode[i] = true
  1796  				res.task.pend++
  1797  			}
  1798  		}
  1799  		// Check if the account is a contract with an unknown storage trie
  1800  		if account.Root != emptyRoot {
  1801  			if ok, err := s.db.Has(account.Root[:]); err != nil || !ok {
  1802  				// If there was a previous large state retrieval in progress,
  1803  				// don't restart it from scratch. This happens if a sync cycle
  1804  				// is interrupted and resumed later. However, *do* update the
  1805  				// previous root hash.
  1806  				if subtasks, ok := res.task.SubTasks[res.hashes[i]]; ok {
  1807  					log.Debug("Resuming large storage retrieval", "account", res.hashes[i], "root", account.Root)
  1808  					for _, subtask := range subtasks {
  1809  						subtask.root = account.Root
  1810  					}
  1811  					res.task.needHeal[i] = true
  1812  					resumed[res.hashes[i]] = struct{}{}
  1813  				} else {
  1814  					res.task.stateTasks[res.hashes[i]] = account.Root
  1815  				}
  1816  				res.task.needState[i] = true
  1817  				res.task.pend++
  1818  			}
  1819  		}
  1820  	}
  1821  	// Delete any subtasks that have been aborted but not resumed. This may undo
  1822  	// some progress if a new peer gives us less accounts than an old one, but for
  1823  	// now we have to live with that.
  1824  	for hash := range res.task.SubTasks {
  1825  		if _, ok := resumed[hash]; !ok {
  1826  			log.Debug("Aborting suspended storage retrieval", "account", hash)
  1827  			delete(res.task.SubTasks, hash)
  1828  		}
  1829  	}
  1830  	// If the account range contained no contracts, or all have been fully filled
  1831  	// beforehand, short circuit storage filling and forward to the next task
  1832  	if res.task.pend == 0 {
  1833  		s.forwardAccountTask(res.task)
  1834  		return
  1835  	}
  1836  	// Some accounts are incomplete, leave as is for the storage and contract
  1837  	// task assigners to pick up and fill.
  1838  }
  1839  
  1840  // processBytecodeResponse integrates an already validated bytecode response
  1841  // into the account tasks.
  1842  func (s *Syncer) processBytecodeResponse(res *bytecodeResponse) {
  1843  	batch := s.db.NewBatch()
  1844  
  1845  	var (
  1846  		codes uint64
  1847  	)
  1848  	for i, hash := range res.hashes {
  1849  		code := res.codes[i]
  1850  
  1851  		// If the bytecode was not delivered, reschedule it
  1852  		if code == nil {
  1853  			res.task.codeTasks[hash] = struct{}{}
  1854  			continue
  1855  		}
  1856  		// Code was delivered, mark it not needed any more
  1857  		for j, account := range res.task.res.accounts {
  1858  			if res.task.needCode[j] && hash == common.BytesToHash(account.CodeHash) {
  1859  				res.task.needCode[j] = false
  1860  				res.task.pend--
  1861  			}
  1862  		}
  1863  		// Push the bytecode into a database batch
  1864  		codes++
  1865  		rawdb.WriteCode(batch, hash, code)
  1866  	}
  1867  	bytes := common.StorageSize(batch.ValueSize())
  1868  	if err := batch.Write(); err != nil {
  1869  		log.Crit("Failed to persist bytecodes", "err", err)
  1870  	}
  1871  	s.bytecodeSynced += codes
  1872  	s.bytecodeBytes += bytes
  1873  
  1874  	log.Debug("Persisted set of bytecodes", "count", codes, "bytes", bytes)
  1875  
  1876  	// If this delivery completed the last pending task, forward the account task
  1877  	// to the next chunk
  1878  	if res.task.pend == 0 {
  1879  		s.forwardAccountTask(res.task)
  1880  		return
  1881  	}
  1882  	// Some accounts are still incomplete, leave as is for the storage and contract
  1883  	// task assigners to pick up and fill.
  1884  }
  1885  
  1886  // processStorageResponse integrates an already validated storage response
  1887  // into the account tasks.
  1888  func (s *Syncer) processStorageResponse(res *storageResponse) {
  1889  	// Switch the subtask from pending to idle
  1890  	if res.subTask != nil {
  1891  		res.subTask.req = nil
  1892  	}
  1893  	batch := ethdb.HookedBatch{
  1894  		Batch: s.db.NewBatch(),
  1895  		OnPut: func(key []byte, value []byte) {
  1896  			s.storageBytes += common.StorageSize(len(key) + len(value))
  1897  		},
  1898  	}
  1899  	var (
  1900  		slots           int
  1901  		oldStorageBytes = s.storageBytes
  1902  	)
  1903  	// Iterate over all the accounts and reconstruct their storage tries from the
  1904  	// delivered slots
  1905  	for i, account := range res.accounts {
  1906  		// If the account was not delivered, reschedule it
  1907  		if i >= len(res.hashes) {
  1908  			res.mainTask.stateTasks[account] = res.roots[i]
  1909  			continue
  1910  		}
  1911  		// State was delivered, if complete mark as not needed any more, otherwise
  1912  		// mark the account as needing healing
  1913  		for j, hash := range res.mainTask.res.hashes {
  1914  			if account != hash {
  1915  				continue
  1916  			}
  1917  			acc := res.mainTask.res.accounts[j]
  1918  
  1919  			// If the packet contains multiple contract storage slots, all
  1920  			// but the last are surely complete. The last contract may be
  1921  			// chunked, so check it's continuation flag.
  1922  			if res.subTask == nil && res.mainTask.needState[j] && (i < len(res.hashes)-1 || !res.cont) {
  1923  				res.mainTask.needState[j] = false
  1924  				res.mainTask.pend--
  1925  			}
  1926  			// If the last contract was chunked, mark it as needing healing
  1927  			// to avoid writing it out to disk prematurely.
  1928  			if res.subTask == nil && !res.mainTask.needHeal[j] && i == len(res.hashes)-1 && res.cont {
  1929  				res.mainTask.needHeal[j] = true
  1930  			}
  1931  			// If the last contract was chunked, we need to switch to large
  1932  			// contract handling mode
  1933  			if res.subTask == nil && i == len(res.hashes)-1 && res.cont {
  1934  				// If we haven't yet started a large-contract retrieval, create
  1935  				// the subtasks for it within the main account task
  1936  				if tasks, ok := res.mainTask.SubTasks[account]; !ok {
  1937  					var (
  1938  						keys    = res.hashes[i]
  1939  						chunks  = uint64(storageConcurrency)
  1940  						lastKey common.Hash
  1941  					)
  1942  					if len(keys) > 0 {
  1943  						lastKey = keys[len(keys)-1]
  1944  					}
  1945  					// If the number of slots remaining is low, decrease the
  1946  					// number of chunks. Somewhere on the order of 10-15K slots
  1947  					// fit into a packet of 500KB. A key/slot pair is maximum 64
  1948  					// bytes, so pessimistically maxRequestSize/64 = 8K.
  1949  					//
  1950  					// Chunk so that at least 2 packets are needed to fill a task.
  1951  					if estimate, err := estimateRemainingSlots(len(keys), lastKey); err == nil {
  1952  						if n := estimate / (2 * (maxRequestSize / 64)); n+1 < chunks {
  1953  							chunks = n + 1
  1954  						}
  1955  						log.Debug("Chunked large contract", "initiators", len(keys), "tail", lastKey, "remaining", estimate, "chunks", chunks)
  1956  					} else {
  1957  						log.Debug("Chunked large contract", "initiators", len(keys), "tail", lastKey, "chunks", chunks)
  1958  					}
  1959  					r := newHashRange(lastKey, chunks)
  1960  
  1961  					// Our first task is the one that was just filled by this response.
  1962  					batch := ethdb.HookedBatch{
  1963  						Batch: s.db.NewBatch(),
  1964  						OnPut: func(key []byte, value []byte) {
  1965  							s.storageBytes += common.StorageSize(len(key) + len(value))
  1966  						},
  1967  					}
  1968  					tasks = append(tasks, &storageTask{
  1969  						Next:     common.Hash{},
  1970  						Last:     r.End(),
  1971  						root:     acc.Root,
  1972  						genBatch: batch,
  1973  						genTrie:  trie.NewStackTrieWithOwner(batch, account),
  1974  					})
  1975  					for r.Next() {
  1976  						batch := ethdb.HookedBatch{
  1977  							Batch: s.db.NewBatch(),
  1978  							OnPut: func(key []byte, value []byte) {
  1979  								s.storageBytes += common.StorageSize(len(key) + len(value))
  1980  							},
  1981  						}
  1982  						tasks = append(tasks, &storageTask{
  1983  							Next:     r.Start(),
  1984  							Last:     r.End(),
  1985  							root:     acc.Root,
  1986  							genBatch: batch,
  1987  							genTrie:  trie.NewStackTrieWithOwner(batch, account),
  1988  						})
  1989  					}
  1990  					for _, task := range tasks {
  1991  						log.Debug("Created storage sync task", "account", account, "root", acc.Root, "from", task.Next, "last", task.Last)
  1992  					}
  1993  					res.mainTask.SubTasks[account] = tasks
  1994  
  1995  					// Since we've just created the sub-tasks, this response
  1996  					// is surely for the first one (zero origin)
  1997  					res.subTask = tasks[0]
  1998  				}
  1999  			}
  2000  			// If we're in large contract delivery mode, forward the subtask
  2001  			if res.subTask != nil {
  2002  				// Ensure the response doesn't overflow into the subsequent task
  2003  				last := res.subTask.Last.Big()
  2004  				// Find the first overflowing key. While at it, mark res as complete
  2005  				// if we find the range to include or pass the 'last'
  2006  				index := sort.Search(len(res.hashes[i]), func(k int) bool {
  2007  					cmp := res.hashes[i][k].Big().Cmp(last)
  2008  					if cmp >= 0 {
  2009  						res.cont = false
  2010  					}
  2011  					return cmp > 0
  2012  				})
  2013  				if index >= 0 {
  2014  					// cut off excess
  2015  					res.hashes[i] = res.hashes[i][:index]
  2016  					res.slots[i] = res.slots[i][:index]
  2017  				}
  2018  				// Forward the relevant storage chunk (even if created just now)
  2019  				if res.cont {
  2020  					res.subTask.Next = incHash(res.hashes[i][len(res.hashes[i])-1])
  2021  				} else {
  2022  					res.subTask.done = true
  2023  				}
  2024  			}
  2025  		}
  2026  		// Iterate over all the complete contracts, reconstruct the trie nodes and
  2027  		// push them to disk. If the contract is chunked, the trie nodes will be
  2028  		// reconstructed later.
  2029  		slots += len(res.hashes[i])
  2030  
  2031  		if i < len(res.hashes)-1 || res.subTask == nil {
  2032  			tr := trie.NewStackTrieWithOwner(batch, account)
  2033  			for j := 0; j < len(res.hashes[i]); j++ {
  2034  				tr.Update(res.hashes[i][j][:], res.slots[i][j])
  2035  			}
  2036  			tr.Commit()
  2037  		}
  2038  		// Persist the received storage segments. These flat state maybe
  2039  		// outdated during the sync, but it can be fixed later during the
  2040  		// snapshot generation.
  2041  		for j := 0; j < len(res.hashes[i]); j++ {
  2042  			rawdb.WriteStorageSnapshot(batch, account, res.hashes[i][j], res.slots[i][j])
  2043  
  2044  			// If we're storing large contracts, generate the trie nodes
  2045  			// on the fly to not trash the gluing points
  2046  			if i == len(res.hashes)-1 && res.subTask != nil {
  2047  				res.subTask.genTrie.Update(res.hashes[i][j][:], res.slots[i][j])
  2048  			}
  2049  		}
  2050  	}
  2051  	// Large contracts could have generated new trie nodes, flush them to disk
  2052  	if res.subTask != nil {
  2053  		if res.subTask.done {
  2054  			if root, err := res.subTask.genTrie.Commit(); err != nil {
  2055  				log.Error("Failed to commit stack slots", "err", err)
  2056  			} else if root == res.subTask.root {
  2057  				// If the chunk's root is an overflown but full delivery, clear the heal request
  2058  				for i, account := range res.mainTask.res.hashes {
  2059  					if account == res.accounts[len(res.accounts)-1] {
  2060  						res.mainTask.needHeal[i] = false
  2061  					}
  2062  				}
  2063  			}
  2064  		}
  2065  		if res.subTask.genBatch.ValueSize() > ethdb.IdealBatchSize || res.subTask.done {
  2066  			if err := res.subTask.genBatch.Write(); err != nil {
  2067  				log.Error("Failed to persist stack slots", "err", err)
  2068  			}
  2069  			res.subTask.genBatch.Reset()
  2070  		}
  2071  	}
  2072  	// Flush anything written just now and update the stats
  2073  	if err := batch.Write(); err != nil {
  2074  		log.Crit("Failed to persist storage slots", "err", err)
  2075  	}
  2076  	s.storageSynced += uint64(slots)
  2077  
  2078  	log.Debug("Persisted set of storage slots", "accounts", len(res.hashes), "slots", slots, "bytes", s.storageBytes-oldStorageBytes)
  2079  
  2080  	// If this delivery completed the last pending task, forward the account task
  2081  	// to the next chunk
  2082  	if res.mainTask.pend == 0 {
  2083  		s.forwardAccountTask(res.mainTask)
  2084  		return
  2085  	}
  2086  	// Some accounts are still incomplete, leave as is for the storage and contract
  2087  	// task assigners to pick up and fill.
  2088  }
  2089  
  2090  // processTrienodeHealResponse integrates an already validated trienode response
  2091  // into the healer tasks.
  2092  func (s *Syncer) processTrienodeHealResponse(res *trienodeHealResponse) {
  2093  	for i, hash := range res.hashes {
  2094  		node := res.nodes[i]
  2095  
  2096  		// If the trie node was not delivered, reschedule it
  2097  		if node == nil {
  2098  			res.task.trieTasks[res.paths[i]] = res.hashes[i]
  2099  			continue
  2100  		}
  2101  		// Push the trie node into the state syncer
  2102  		s.trienodeHealSynced++
  2103  		s.trienodeHealBytes += common.StorageSize(len(node))
  2104  
  2105  		err := s.healer.scheduler.ProcessNode(trie.NodeSyncResult{Path: res.paths[i], Data: node})
  2106  		switch err {
  2107  		case nil:
  2108  		case trie.ErrAlreadyProcessed:
  2109  			s.trienodeHealDups++
  2110  		case trie.ErrNotRequested:
  2111  			s.trienodeHealNops++
  2112  		default:
  2113  			log.Error("Invalid trienode processed", "hash", hash, "err", err)
  2114  		}
  2115  	}
  2116  	batch := s.db.NewBatch()
  2117  	if err := s.healer.scheduler.Commit(batch); err != nil {
  2118  		log.Error("Failed to commit healing data", "err", err)
  2119  	}
  2120  	if err := batch.Write(); err != nil {
  2121  		log.Crit("Failed to persist healing data", "err", err)
  2122  	}
  2123  	log.Debug("Persisted set of healing data", "type", "trienodes", "bytes", common.StorageSize(batch.ValueSize()))
  2124  }
  2125  
  2126  // processBytecodeHealResponse integrates an already validated bytecode response
  2127  // into the healer tasks.
  2128  func (s *Syncer) processBytecodeHealResponse(res *bytecodeHealResponse) {
  2129  	for i, hash := range res.hashes {
  2130  		node := res.codes[i]
  2131  
  2132  		// If the trie node was not delivered, reschedule it
  2133  		if node == nil {
  2134  			res.task.codeTasks[hash] = struct{}{}
  2135  			continue
  2136  		}
  2137  		// Push the trie node into the state syncer
  2138  		s.bytecodeHealSynced++
  2139  		s.bytecodeHealBytes += common.StorageSize(len(node))
  2140  
  2141  		err := s.healer.scheduler.ProcessCode(trie.CodeSyncResult{Hash: hash, Data: node})
  2142  		switch err {
  2143  		case nil:
  2144  		case trie.ErrAlreadyProcessed:
  2145  			s.bytecodeHealDups++
  2146  		case trie.ErrNotRequested:
  2147  			s.bytecodeHealNops++
  2148  		default:
  2149  			log.Error("Invalid bytecode processed", "hash", hash, "err", err)
  2150  		}
  2151  	}
  2152  	batch := s.db.NewBatch()
  2153  	if err := s.healer.scheduler.Commit(batch); err != nil {
  2154  		log.Error("Failed to commit healing data", "err", err)
  2155  	}
  2156  	if err := batch.Write(); err != nil {
  2157  		log.Crit("Failed to persist healing data", "err", err)
  2158  	}
  2159  	log.Debug("Persisted set of healing data", "type", "bytecode", "bytes", common.StorageSize(batch.ValueSize()))
  2160  }
  2161  
  2162  // forwardAccountTask takes a filled account task and persists anything available
  2163  // into the database, after which it forwards the next account marker so that the
  2164  // task's next chunk may be filled.
  2165  func (s *Syncer) forwardAccountTask(task *accountTask) {
  2166  	// Remove any pending delivery
  2167  	res := task.res
  2168  	if res == nil {
  2169  		return // nothing to forward
  2170  	}
  2171  	task.res = nil
  2172  
  2173  	// Persist the received account segments. These flat state maybe
  2174  	// outdated during the sync, but it can be fixed later during the
  2175  	// snapshot generation.
  2176  	oldAccountBytes := s.accountBytes
  2177  
  2178  	batch := ethdb.HookedBatch{
  2179  		Batch: s.db.NewBatch(),
  2180  		OnPut: func(key []byte, value []byte) {
  2181  			s.accountBytes += common.StorageSize(len(key) + len(value))
  2182  		},
  2183  	}
  2184  	for i, hash := range res.hashes {
  2185  		if task.needCode[i] || task.needState[i] {
  2186  			break
  2187  		}
  2188  		slim := snapshot.SlimAccountRLP(res.accounts[i].Nonce, res.accounts[i].Balance, res.accounts[i].Root, res.accounts[i].CodeHash)
  2189  		rawdb.WriteAccountSnapshot(batch, hash, slim)
  2190  
  2191  		// If the task is complete, drop it into the stack trie to generate
  2192  		// account trie nodes for it
  2193  		if !task.needHeal[i] {
  2194  			full, err := snapshot.FullAccountRLP(slim) // TODO(karalabe): Slim parsing can be omitted
  2195  			if err != nil {
  2196  				panic(err) // Really shouldn't ever happen
  2197  			}
  2198  			task.genTrie.Update(hash[:], full)
  2199  		}
  2200  	}
  2201  	// Flush anything written just now and update the stats
  2202  	if err := batch.Write(); err != nil {
  2203  		log.Crit("Failed to persist accounts", "err", err)
  2204  	}
  2205  	s.accountSynced += uint64(len(res.accounts))
  2206  
  2207  	// Task filling persisted, push it the chunk marker forward to the first
  2208  	// account still missing data.
  2209  	for i, hash := range res.hashes {
  2210  		if task.needCode[i] || task.needState[i] {
  2211  			return
  2212  		}
  2213  		task.Next = incHash(hash)
  2214  	}
  2215  	// All accounts marked as complete, track if the entire task is done
  2216  	task.done = !res.cont
  2217  
  2218  	// Stack trie could have generated trie nodes, push them to disk (we need to
  2219  	// flush after finalizing task.done. It's fine even if we crash and lose this
  2220  	// write as it will only cause more data to be downloaded during heal.
  2221  	if task.done {
  2222  		if _, err := task.genTrie.Commit(); err != nil {
  2223  			log.Error("Failed to commit stack account", "err", err)
  2224  		}
  2225  	}
  2226  	if task.genBatch.ValueSize() > ethdb.IdealBatchSize || task.done {
  2227  		if err := task.genBatch.Write(); err != nil {
  2228  			log.Error("Failed to persist stack account", "err", err)
  2229  		}
  2230  		task.genBatch.Reset()
  2231  	}
  2232  	log.Debug("Persisted range of accounts", "accounts", len(res.accounts), "bytes", s.accountBytes-oldAccountBytes)
  2233  }
  2234  
  2235  // OnAccounts is a callback method to invoke when a range of accounts are
  2236  // received from a remote peer.
  2237  func (s *Syncer) OnAccounts(peer SyncPeer, id uint64, hashes []common.Hash, accounts [][]byte, proof [][]byte) error {
  2238  	size := common.StorageSize(len(hashes) * common.HashLength)
  2239  	for _, account := range accounts {
  2240  		size += common.StorageSize(len(account))
  2241  	}
  2242  	for _, node := range proof {
  2243  		size += common.StorageSize(len(node))
  2244  	}
  2245  	logger := peer.Log().New("reqid", id)
  2246  	logger.Trace("Delivering range of accounts", "hashes", len(hashes), "accounts", len(accounts), "proofs", len(proof), "bytes", size)
  2247  
  2248  	// Whether or not the response is valid, we can mark the peer as idle and
  2249  	// notify the scheduler to assign a new task. If the response is invalid,
  2250  	// we'll drop the peer in a bit.
  2251  	s.lock.Lock()
  2252  	if _, ok := s.peers[peer.ID()]; ok {
  2253  		s.accountIdlers[peer.ID()] = struct{}{}
  2254  	}
  2255  	select {
  2256  	case s.update <- struct{}{}:
  2257  	default:
  2258  	}
  2259  	// Ensure the response is for a valid request
  2260  	req, ok := s.accountReqs[id]
  2261  	if !ok {
  2262  		// Request stale, perhaps the peer timed out but came through in the end
  2263  		logger.Warn("Unexpected account range packet")
  2264  		s.lock.Unlock()
  2265  		return nil
  2266  	}
  2267  	delete(s.accountReqs, id)
  2268  	s.rates.Update(peer.ID(), AccountRangeMsg, time.Since(req.time), int(size))
  2269  
  2270  	// Clean up the request timeout timer, we'll see how to proceed further based
  2271  	// on the actual delivered content
  2272  	if !req.timeout.Stop() {
  2273  		// The timeout is already triggered, and this request will be reverted+rescheduled
  2274  		s.lock.Unlock()
  2275  		return nil
  2276  	}
  2277  	// Response is valid, but check if peer is signalling that it does not have
  2278  	// the requested data. For account range queries that means the state being
  2279  	// retrieved was either already pruned remotely, or the peer is not yet
  2280  	// synced to our head.
  2281  	if len(hashes) == 0 && len(accounts) == 0 && len(proof) == 0 {
  2282  		logger.Debug("Peer rejected account range request", "root", s.root)
  2283  		s.statelessPeers[peer.ID()] = struct{}{}
  2284  		s.lock.Unlock()
  2285  
  2286  		// Signal this request as failed, and ready for rescheduling
  2287  		s.scheduleRevertAccountRequest(req)
  2288  		return nil
  2289  	}
  2290  	root := s.root
  2291  	s.lock.Unlock()
  2292  
  2293  	// Reconstruct a partial trie from the response and verify it
  2294  	keys := make([][]byte, len(hashes))
  2295  	for i, key := range hashes {
  2296  		keys[i] = common.CopyBytes(key[:])
  2297  	}
  2298  	nodes := make(light.NodeList, len(proof))
  2299  	for i, node := range proof {
  2300  		nodes[i] = node
  2301  	}
  2302  	proofdb := nodes.NodeSet()
  2303  
  2304  	var end []byte
  2305  	if len(keys) > 0 {
  2306  		end = keys[len(keys)-1]
  2307  	}
  2308  	cont, err := trie.VerifyRangeProof(root, req.origin[:], end, keys, accounts, proofdb)
  2309  	if err != nil {
  2310  		logger.Warn("Account range failed proof", "err", err)
  2311  		// Signal this request as failed, and ready for rescheduling
  2312  		s.scheduleRevertAccountRequest(req)
  2313  		return err
  2314  	}
  2315  	accs := make([]*types.StateAccount, len(accounts))
  2316  	for i, account := range accounts {
  2317  		acc := new(types.StateAccount)
  2318  		if err := rlp.DecodeBytes(account, acc); err != nil {
  2319  			panic(err) // We created these blobs, we must be able to decode them
  2320  		}
  2321  		accs[i] = acc
  2322  	}
  2323  	response := &accountResponse{
  2324  		task:     req.task,
  2325  		hashes:   hashes,
  2326  		accounts: accs,
  2327  		cont:     cont,
  2328  	}
  2329  	select {
  2330  	case req.deliver <- response:
  2331  	case <-req.cancel:
  2332  	case <-req.stale:
  2333  	}
  2334  	return nil
  2335  }
  2336  
  2337  // OnByteCodes is a callback method to invoke when a batch of contract
  2338  // bytes codes are received from a remote peer.
  2339  func (s *Syncer) OnByteCodes(peer SyncPeer, id uint64, bytecodes [][]byte) error {
  2340  	s.lock.RLock()
  2341  	syncing := !s.snapped
  2342  	s.lock.RUnlock()
  2343  
  2344  	if syncing {
  2345  		return s.onByteCodes(peer, id, bytecodes)
  2346  	}
  2347  	return s.onHealByteCodes(peer, id, bytecodes)
  2348  }
  2349  
  2350  // onByteCodes is a callback method to invoke when a batch of contract
  2351  // bytes codes are received from a remote peer in the syncing phase.
  2352  func (s *Syncer) onByteCodes(peer SyncPeer, id uint64, bytecodes [][]byte) error {
  2353  	var size common.StorageSize
  2354  	for _, code := range bytecodes {
  2355  		size += common.StorageSize(len(code))
  2356  	}
  2357  	logger := peer.Log().New("reqid", id)
  2358  	logger.Trace("Delivering set of bytecodes", "bytecodes", len(bytecodes), "bytes", size)
  2359  
  2360  	// Whether or not the response is valid, we can mark the peer as idle and
  2361  	// notify the scheduler to assign a new task. If the response is invalid,
  2362  	// we'll drop the peer in a bit.
  2363  	s.lock.Lock()
  2364  	if _, ok := s.peers[peer.ID()]; ok {
  2365  		s.bytecodeIdlers[peer.ID()] = struct{}{}
  2366  	}
  2367  	select {
  2368  	case s.update <- struct{}{}:
  2369  	default:
  2370  	}
  2371  	// Ensure the response is for a valid request
  2372  	req, ok := s.bytecodeReqs[id]
  2373  	if !ok {
  2374  		// Request stale, perhaps the peer timed out but came through in the end
  2375  		logger.Warn("Unexpected bytecode packet")
  2376  		s.lock.Unlock()
  2377  		return nil
  2378  	}
  2379  	delete(s.bytecodeReqs, id)
  2380  	s.rates.Update(peer.ID(), ByteCodesMsg, time.Since(req.time), len(bytecodes))
  2381  
  2382  	// Clean up the request timeout timer, we'll see how to proceed further based
  2383  	// on the actual delivered content
  2384  	if !req.timeout.Stop() {
  2385  		// The timeout is already triggered, and this request will be reverted+rescheduled
  2386  		s.lock.Unlock()
  2387  		return nil
  2388  	}
  2389  
  2390  	// Response is valid, but check if peer is signalling that it does not have
  2391  	// the requested data. For bytecode range queries that means the peer is not
  2392  	// yet synced.
  2393  	if len(bytecodes) == 0 {
  2394  		logger.Debug("Peer rejected bytecode request")
  2395  		s.statelessPeers[peer.ID()] = struct{}{}
  2396  		s.lock.Unlock()
  2397  
  2398  		// Signal this request as failed, and ready for rescheduling
  2399  		s.scheduleRevertBytecodeRequest(req)
  2400  		return nil
  2401  	}
  2402  	s.lock.Unlock()
  2403  
  2404  	// Cross reference the requested bytecodes with the response to find gaps
  2405  	// that the serving node is missing
  2406  	hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
  2407  	hash := make([]byte, 32)
  2408  
  2409  	codes := make([][]byte, len(req.hashes))
  2410  	for i, j := 0, 0; i < len(bytecodes); i++ {
  2411  		// Find the next hash that we've been served, leaving misses with nils
  2412  		hasher.Reset()
  2413  		hasher.Write(bytecodes[i])
  2414  		hasher.Read(hash)
  2415  
  2416  		for j < len(req.hashes) && !bytes.Equal(hash, req.hashes[j][:]) {
  2417  			j++
  2418  		}
  2419  		if j < len(req.hashes) {
  2420  			codes[j] = bytecodes[i]
  2421  			j++
  2422  			continue
  2423  		}
  2424  		// We've either ran out of hashes, or got unrequested data
  2425  		logger.Warn("Unexpected bytecodes", "count", len(bytecodes)-i)
  2426  		// Signal this request as failed, and ready for rescheduling
  2427  		s.scheduleRevertBytecodeRequest(req)
  2428  		return errors.New("unexpected bytecode")
  2429  	}
  2430  	// Response validated, send it to the scheduler for filling
  2431  	response := &bytecodeResponse{
  2432  		task:   req.task,
  2433  		hashes: req.hashes,
  2434  		codes:  codes,
  2435  	}
  2436  	select {
  2437  	case req.deliver <- response:
  2438  	case <-req.cancel:
  2439  	case <-req.stale:
  2440  	}
  2441  	return nil
  2442  }
  2443  
  2444  // OnStorage is a callback method to invoke when ranges of storage slots
  2445  // are received from a remote peer.
  2446  func (s *Syncer) OnStorage(peer SyncPeer, id uint64, hashes [][]common.Hash, slots [][][]byte, proof [][]byte) error {
  2447  	// Gather some trace stats to aid in debugging issues
  2448  	var (
  2449  		hashCount int
  2450  		slotCount int
  2451  		size      common.StorageSize
  2452  	)
  2453  	for _, hashset := range hashes {
  2454  		size += common.StorageSize(common.HashLength * len(hashset))
  2455  		hashCount += len(hashset)
  2456  	}
  2457  	for _, slotset := range slots {
  2458  		for _, slot := range slotset {
  2459  			size += common.StorageSize(len(slot))
  2460  		}
  2461  		slotCount += len(slotset)
  2462  	}
  2463  	for _, node := range proof {
  2464  		size += common.StorageSize(len(node))
  2465  	}
  2466  	logger := peer.Log().New("reqid", id)
  2467  	logger.Trace("Delivering ranges of storage slots", "accounts", len(hashes), "hashes", hashCount, "slots", slotCount, "proofs", len(proof), "size", size)
  2468  
  2469  	// Whether or not the response is valid, we can mark the peer as idle and
  2470  	// notify the scheduler to assign a new task. If the response is invalid,
  2471  	// we'll drop the peer in a bit.
  2472  	s.lock.Lock()
  2473  	if _, ok := s.peers[peer.ID()]; ok {
  2474  		s.storageIdlers[peer.ID()] = struct{}{}
  2475  	}
  2476  	select {
  2477  	case s.update <- struct{}{}:
  2478  	default:
  2479  	}
  2480  	// Ensure the response is for a valid request
  2481  	req, ok := s.storageReqs[id]
  2482  	if !ok {
  2483  		// Request stale, perhaps the peer timed out but came through in the end
  2484  		logger.Warn("Unexpected storage ranges packet")
  2485  		s.lock.Unlock()
  2486  		return nil
  2487  	}
  2488  	delete(s.storageReqs, id)
  2489  	s.rates.Update(peer.ID(), StorageRangesMsg, time.Since(req.time), int(size))
  2490  
  2491  	// Clean up the request timeout timer, we'll see how to proceed further based
  2492  	// on the actual delivered content
  2493  	if !req.timeout.Stop() {
  2494  		// The timeout is already triggered, and this request will be reverted+rescheduled
  2495  		s.lock.Unlock()
  2496  		return nil
  2497  	}
  2498  
  2499  	// Reject the response if the hash sets and slot sets don't match, or if the
  2500  	// peer sent more data than requested.
  2501  	if len(hashes) != len(slots) {
  2502  		s.lock.Unlock()
  2503  		s.scheduleRevertStorageRequest(req) // reschedule request
  2504  		logger.Warn("Hash and slot set size mismatch", "hashset", len(hashes), "slotset", len(slots))
  2505  		return errors.New("hash and slot set size mismatch")
  2506  	}
  2507  	if len(hashes) > len(req.accounts) {
  2508  		s.lock.Unlock()
  2509  		s.scheduleRevertStorageRequest(req) // reschedule request
  2510  		logger.Warn("Hash set larger than requested", "hashset", len(hashes), "requested", len(req.accounts))
  2511  		return errors.New("hash set larger than requested")
  2512  	}
  2513  	// Response is valid, but check if peer is signalling that it does not have
  2514  	// the requested data. For storage range queries that means the state being
  2515  	// retrieved was either already pruned remotely, or the peer is not yet
  2516  	// synced to our head.
  2517  	if len(hashes) == 0 {
  2518  		logger.Debug("Peer rejected storage request")
  2519  		s.statelessPeers[peer.ID()] = struct{}{}
  2520  		s.lock.Unlock()
  2521  		s.scheduleRevertStorageRequest(req) // reschedule request
  2522  		return nil
  2523  	}
  2524  	s.lock.Unlock()
  2525  
  2526  	// Reconstruct the partial tries from the response and verify them
  2527  	var cont bool
  2528  
  2529  	for i := 0; i < len(hashes); i++ {
  2530  		// Convert the keys and proofs into an internal format
  2531  		keys := make([][]byte, len(hashes[i]))
  2532  		for j, key := range hashes[i] {
  2533  			keys[j] = common.CopyBytes(key[:])
  2534  		}
  2535  		nodes := make(light.NodeList, 0, len(proof))
  2536  		if i == len(hashes)-1 {
  2537  			for _, node := range proof {
  2538  				nodes = append(nodes, node)
  2539  			}
  2540  		}
  2541  		var err error
  2542  		if len(nodes) == 0 {
  2543  			// No proof has been attached, the response must cover the entire key
  2544  			// space and hash to the origin root.
  2545  			_, err = trie.VerifyRangeProof(req.roots[i], nil, nil, keys, slots[i], nil)
  2546  			if err != nil {
  2547  				s.scheduleRevertStorageRequest(req) // reschedule request
  2548  				logger.Warn("Storage slots failed proof", "err", err)
  2549  				return err
  2550  			}
  2551  		} else {
  2552  			// A proof was attached, the response is only partial, check that the
  2553  			// returned data is indeed part of the storage trie
  2554  			proofdb := nodes.NodeSet()
  2555  
  2556  			var end []byte
  2557  			if len(keys) > 0 {
  2558  				end = keys[len(keys)-1]
  2559  			}
  2560  			cont, err = trie.VerifyRangeProof(req.roots[i], req.origin[:], end, keys, slots[i], proofdb)
  2561  			if err != nil {
  2562  				s.scheduleRevertStorageRequest(req) // reschedule request
  2563  				logger.Warn("Storage range failed proof", "err", err)
  2564  				return err
  2565  			}
  2566  		}
  2567  	}
  2568  	// Partial tries reconstructed, send them to the scheduler for storage filling
  2569  	response := &storageResponse{
  2570  		mainTask: req.mainTask,
  2571  		subTask:  req.subTask,
  2572  		accounts: req.accounts,
  2573  		roots:    req.roots,
  2574  		hashes:   hashes,
  2575  		slots:    slots,
  2576  		cont:     cont,
  2577  	}
  2578  	select {
  2579  	case req.deliver <- response:
  2580  	case <-req.cancel:
  2581  	case <-req.stale:
  2582  	}
  2583  	return nil
  2584  }
  2585  
  2586  // OnTrieNodes is a callback method to invoke when a batch of trie nodes
  2587  // are received from a remote peer.
  2588  func (s *Syncer) OnTrieNodes(peer SyncPeer, id uint64, trienodes [][]byte) error {
  2589  	var size common.StorageSize
  2590  	for _, node := range trienodes {
  2591  		size += common.StorageSize(len(node))
  2592  	}
  2593  	logger := peer.Log().New("reqid", id)
  2594  	logger.Trace("Delivering set of healing trienodes", "trienodes", len(trienodes), "bytes", size)
  2595  
  2596  	// Whether or not the response is valid, we can mark the peer as idle and
  2597  	// notify the scheduler to assign a new task. If the response is invalid,
  2598  	// we'll drop the peer in a bit.
  2599  	s.lock.Lock()
  2600  	if _, ok := s.peers[peer.ID()]; ok {
  2601  		s.trienodeHealIdlers[peer.ID()] = struct{}{}
  2602  	}
  2603  	select {
  2604  	case s.update <- struct{}{}:
  2605  	default:
  2606  	}
  2607  	// Ensure the response is for a valid request
  2608  	req, ok := s.trienodeHealReqs[id]
  2609  	if !ok {
  2610  		// Request stale, perhaps the peer timed out but came through in the end
  2611  		logger.Warn("Unexpected trienode heal packet")
  2612  		s.lock.Unlock()
  2613  		return nil
  2614  	}
  2615  	delete(s.trienodeHealReqs, id)
  2616  	s.rates.Update(peer.ID(), TrieNodesMsg, time.Since(req.time), len(trienodes))
  2617  
  2618  	// Clean up the request timeout timer, we'll see how to proceed further based
  2619  	// on the actual delivered content
  2620  	if !req.timeout.Stop() {
  2621  		// The timeout is already triggered, and this request will be reverted+rescheduled
  2622  		s.lock.Unlock()
  2623  		return nil
  2624  	}
  2625  
  2626  	// Response is valid, but check if peer is signalling that it does not have
  2627  	// the requested data. For bytecode range queries that means the peer is not
  2628  	// yet synced.
  2629  	if len(trienodes) == 0 {
  2630  		logger.Debug("Peer rejected trienode heal request")
  2631  		s.statelessPeers[peer.ID()] = struct{}{}
  2632  		s.lock.Unlock()
  2633  
  2634  		// Signal this request as failed, and ready for rescheduling
  2635  		s.scheduleRevertTrienodeHealRequest(req)
  2636  		return nil
  2637  	}
  2638  	s.lock.Unlock()
  2639  
  2640  	// Cross reference the requested trienodes with the response to find gaps
  2641  	// that the serving node is missing
  2642  	hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
  2643  	hash := make([]byte, 32)
  2644  
  2645  	nodes := make([][]byte, len(req.hashes))
  2646  	for i, j := 0, 0; i < len(trienodes); i++ {
  2647  		// Find the next hash that we've been served, leaving misses with nils
  2648  		hasher.Reset()
  2649  		hasher.Write(trienodes[i])
  2650  		hasher.Read(hash)
  2651  
  2652  		for j < len(req.hashes) && !bytes.Equal(hash, req.hashes[j][:]) {
  2653  			j++
  2654  		}
  2655  		if j < len(req.hashes) {
  2656  			nodes[j] = trienodes[i]
  2657  			j++
  2658  			continue
  2659  		}
  2660  		// We've either ran out of hashes, or got unrequested data
  2661  		logger.Warn("Unexpected healing trienodes", "count", len(trienodes)-i)
  2662  		// Signal this request as failed, and ready for rescheduling
  2663  		s.scheduleRevertTrienodeHealRequest(req)
  2664  		return errors.New("unexpected healing trienode")
  2665  	}
  2666  	// Response validated, send it to the scheduler for filling
  2667  	response := &trienodeHealResponse{
  2668  		paths:  req.paths,
  2669  		task:   req.task,
  2670  		hashes: req.hashes,
  2671  		nodes:  nodes,
  2672  	}
  2673  	select {
  2674  	case req.deliver <- response:
  2675  	case <-req.cancel:
  2676  	case <-req.stale:
  2677  	}
  2678  	return nil
  2679  }
  2680  
  2681  // onHealByteCodes is a callback method to invoke when a batch of contract
  2682  // bytes codes are received from a remote peer in the healing phase.
  2683  func (s *Syncer) onHealByteCodes(peer SyncPeer, id uint64, bytecodes [][]byte) error {
  2684  	var size common.StorageSize
  2685  	for _, code := range bytecodes {
  2686  		size += common.StorageSize(len(code))
  2687  	}
  2688  	logger := peer.Log().New("reqid", id)
  2689  	logger.Trace("Delivering set of healing bytecodes", "bytecodes", len(bytecodes), "bytes", size)
  2690  
  2691  	// Whether or not the response is valid, we can mark the peer as idle and
  2692  	// notify the scheduler to assign a new task. If the response is invalid,
  2693  	// we'll drop the peer in a bit.
  2694  	s.lock.Lock()
  2695  	if _, ok := s.peers[peer.ID()]; ok {
  2696  		s.bytecodeHealIdlers[peer.ID()] = struct{}{}
  2697  	}
  2698  	select {
  2699  	case s.update <- struct{}{}:
  2700  	default:
  2701  	}
  2702  	// Ensure the response is for a valid request
  2703  	req, ok := s.bytecodeHealReqs[id]
  2704  	if !ok {
  2705  		// Request stale, perhaps the peer timed out but came through in the end
  2706  		logger.Warn("Unexpected bytecode heal packet")
  2707  		s.lock.Unlock()
  2708  		return nil
  2709  	}
  2710  	delete(s.bytecodeHealReqs, id)
  2711  	s.rates.Update(peer.ID(), ByteCodesMsg, time.Since(req.time), len(bytecodes))
  2712  
  2713  	// Clean up the request timeout timer, we'll see how to proceed further based
  2714  	// on the actual delivered content
  2715  	if !req.timeout.Stop() {
  2716  		// The timeout is already triggered, and this request will be reverted+rescheduled
  2717  		s.lock.Unlock()
  2718  		return nil
  2719  	}
  2720  
  2721  	// Response is valid, but check if peer is signalling that it does not have
  2722  	// the requested data. For bytecode range queries that means the peer is not
  2723  	// yet synced.
  2724  	if len(bytecodes) == 0 {
  2725  		logger.Debug("Peer rejected bytecode heal request")
  2726  		s.statelessPeers[peer.ID()] = struct{}{}
  2727  		s.lock.Unlock()
  2728  
  2729  		// Signal this request as failed, and ready for rescheduling
  2730  		s.scheduleRevertBytecodeHealRequest(req)
  2731  		return nil
  2732  	}
  2733  	s.lock.Unlock()
  2734  
  2735  	// Cross reference the requested bytecodes with the response to find gaps
  2736  	// that the serving node is missing
  2737  	hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
  2738  	hash := make([]byte, 32)
  2739  
  2740  	codes := make([][]byte, len(req.hashes))
  2741  	for i, j := 0, 0; i < len(bytecodes); i++ {
  2742  		// Find the next hash that we've been served, leaving misses with nils
  2743  		hasher.Reset()
  2744  		hasher.Write(bytecodes[i])
  2745  		hasher.Read(hash)
  2746  
  2747  		for j < len(req.hashes) && !bytes.Equal(hash, req.hashes[j][:]) {
  2748  			j++
  2749  		}
  2750  		if j < len(req.hashes) {
  2751  			codes[j] = bytecodes[i]
  2752  			j++
  2753  			continue
  2754  		}
  2755  		// We've either ran out of hashes, or got unrequested data
  2756  		logger.Warn("Unexpected healing bytecodes", "count", len(bytecodes)-i)
  2757  		// Signal this request as failed, and ready for rescheduling
  2758  		s.scheduleRevertBytecodeHealRequest(req)
  2759  		return errors.New("unexpected healing bytecode")
  2760  	}
  2761  	// Response validated, send it to the scheduler for filling
  2762  	response := &bytecodeHealResponse{
  2763  		task:   req.task,
  2764  		hashes: req.hashes,
  2765  		codes:  codes,
  2766  	}
  2767  	select {
  2768  	case req.deliver <- response:
  2769  	case <-req.cancel:
  2770  	case <-req.stale:
  2771  	}
  2772  	return nil
  2773  }
  2774  
  2775  // onHealState is a callback method to invoke when a flat state(account
  2776  // or storage slot) is downloaded during the healing stage. The flat states
  2777  // can be persisted blindly and can be fixed later in the generation stage.
  2778  // Note it's not concurrent safe, please handle the concurrent issue outside.
  2779  func (s *Syncer) onHealState(paths [][]byte, value []byte) error {
  2780  	if len(paths) == 1 {
  2781  		var account types.StateAccount
  2782  		if err := rlp.DecodeBytes(value, &account); err != nil {
  2783  			return nil // Returning the error here would drop the remote peer
  2784  		}
  2785  		blob := snapshot.SlimAccountRLP(account.Nonce, account.Balance, account.Root, account.CodeHash)
  2786  		rawdb.WriteAccountSnapshot(s.stateWriter, common.BytesToHash(paths[0]), blob)
  2787  		s.accountHealed += 1
  2788  		s.accountHealedBytes += common.StorageSize(1 + common.HashLength + len(blob))
  2789  	}
  2790  	if len(paths) == 2 {
  2791  		rawdb.WriteStorageSnapshot(s.stateWriter, common.BytesToHash(paths[0]), common.BytesToHash(paths[1]), value)
  2792  		s.storageHealed += 1
  2793  		s.storageHealedBytes += common.StorageSize(1 + 2*common.HashLength + len(value))
  2794  	}
  2795  	if s.stateWriter.ValueSize() > ethdb.IdealBatchSize {
  2796  		s.stateWriter.Write() // It's fine to ignore the error here
  2797  		s.stateWriter.Reset()
  2798  	}
  2799  	return nil
  2800  }
  2801  
  2802  // hashSpace is the total size of the 256 bit hash space for accounts.
  2803  var hashSpace = new(big.Int).Exp(common.Big2, common.Big256, nil)
  2804  
  2805  // report calculates various status reports and provides it to the user.
  2806  func (s *Syncer) report(force bool) {
  2807  	if len(s.tasks) > 0 {
  2808  		s.reportSyncProgress(force)
  2809  		return
  2810  	}
  2811  	s.reportHealProgress(force)
  2812  }
  2813  
  2814  // reportSyncProgress calculates various status reports and provides it to the user.
  2815  func (s *Syncer) reportSyncProgress(force bool) {
  2816  	// Don't report all the events, just occasionally
  2817  	if !force && time.Since(s.logTime) < 8*time.Second {
  2818  		return
  2819  	}
  2820  	// Don't report anything until we have a meaningful progress
  2821  	synced := s.accountBytes + s.bytecodeBytes + s.storageBytes
  2822  	if synced == 0 {
  2823  		return
  2824  	}
  2825  	accountGaps := new(big.Int)
  2826  	for _, task := range s.tasks {
  2827  		accountGaps.Add(accountGaps, new(big.Int).Sub(task.Last.Big(), task.Next.Big()))
  2828  	}
  2829  	accountFills := new(big.Int).Sub(hashSpace, accountGaps)
  2830  	if accountFills.BitLen() == 0 {
  2831  		return
  2832  	}
  2833  	s.logTime = time.Now()
  2834  	estBytes := float64(new(big.Int).Div(
  2835  		new(big.Int).Mul(new(big.Int).SetUint64(uint64(synced)), hashSpace),
  2836  		accountFills,
  2837  	).Uint64())
  2838  	// Don't report anything until we have a meaningful progress
  2839  	if estBytes < 1.0 {
  2840  		return
  2841  	}
  2842  	elapsed := time.Since(s.startTime)
  2843  	estTime := elapsed / time.Duration(synced) * time.Duration(estBytes)
  2844  
  2845  	// Create a mega progress report
  2846  	var (
  2847  		progress = fmt.Sprintf("%.2f%%", float64(synced)*100/estBytes)
  2848  		accounts = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.accountSynced), s.accountBytes.TerminalString())
  2849  		storage  = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.storageSynced), s.storageBytes.TerminalString())
  2850  		bytecode = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.bytecodeSynced), s.bytecodeBytes.TerminalString())
  2851  	)
  2852  	log.Info("State sync in progress", "synced", progress, "state", synced,
  2853  		"accounts", accounts, "slots", storage, "codes", bytecode, "eta", common.PrettyDuration(estTime-elapsed))
  2854  }
  2855  
  2856  // reportHealProgress calculates various status reports and provides it to the user.
  2857  func (s *Syncer) reportHealProgress(force bool) {
  2858  	// Don't report all the events, just occasionally
  2859  	if !force && time.Since(s.logTime) < 8*time.Second {
  2860  		return
  2861  	}
  2862  	s.logTime = time.Now()
  2863  
  2864  	// Create a mega progress report
  2865  	var (
  2866  		trienode = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.trienodeHealSynced), s.trienodeHealBytes.TerminalString())
  2867  		bytecode = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.bytecodeHealSynced), s.bytecodeHealBytes.TerminalString())
  2868  		accounts = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.accountHealed), s.accountHealedBytes.TerminalString())
  2869  		storage  = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.storageHealed), s.storageHealedBytes.TerminalString())
  2870  	)
  2871  	log.Info("State heal in progress", "accounts", accounts, "slots", storage,
  2872  		"codes", bytecode, "nodes", trienode, "pending", s.healer.scheduler.Pending())
  2873  }
  2874  
  2875  // estimateRemainingSlots tries to determine roughly how many slots are left in
  2876  // a contract storage, based on the number of keys and the last hash. This method
  2877  // assumes that the hashes are lexicographically ordered and evenly distributed.
  2878  func estimateRemainingSlots(hashes int, last common.Hash) (uint64, error) {
  2879  	if last == (common.Hash{}) {
  2880  		return 0, errors.New("last hash empty")
  2881  	}
  2882  	space := new(big.Int).Mul(math.MaxBig256, big.NewInt(int64(hashes)))
  2883  	space.Div(space, last.Big())
  2884  	if !space.IsUint64() {
  2885  		// Gigantic address space probably due to too few or malicious slots
  2886  		return 0, errors.New("too few slots for estimation")
  2887  	}
  2888  	return space.Uint64() - uint64(hashes), nil
  2889  }
  2890  
  2891  // capacitySort implements the Sort interface, allowing sorting by peer message
  2892  // throughput. Note, callers should use sort.Reverse to get the desired effect
  2893  // of highest capacity being at the front.
  2894  type capacitySort struct {
  2895  	ids  []string
  2896  	caps []int
  2897  }
  2898  
  2899  func (s *capacitySort) Len() int {
  2900  	return len(s.ids)
  2901  }
  2902  
  2903  func (s *capacitySort) Less(i, j int) bool {
  2904  	return s.caps[i] < s.caps[j]
  2905  }
  2906  
  2907  func (s *capacitySort) Swap(i, j int) {
  2908  	s.ids[i], s.ids[j] = s.ids[j], s.ids[i]
  2909  	s.caps[i], s.caps[j] = s.caps[j], s.caps[i]
  2910  }
  2911  
  2912  // healRequestSort implements the Sort interface, allowing sorting trienode
  2913  // heal requests, which is a prerequisite for merging storage-requests.
  2914  type healRequestSort struct {
  2915  	paths     []string
  2916  	hashes    []common.Hash
  2917  	syncPaths []trie.SyncPath
  2918  }
  2919  
  2920  func (t *healRequestSort) Len() int {
  2921  	return len(t.hashes)
  2922  }
  2923  
  2924  func (t *healRequestSort) Less(i, j int) bool {
  2925  	a := t.syncPaths[i]
  2926  	b := t.syncPaths[j]
  2927  	switch bytes.Compare(a[0], b[0]) {
  2928  	case -1:
  2929  		return true
  2930  	case 1:
  2931  		return false
  2932  	}
  2933  	// identical first part
  2934  	if len(a) < len(b) {
  2935  		return true
  2936  	}
  2937  	if len(b) < len(a) {
  2938  		return false
  2939  	}
  2940  	if len(a) == 2 {
  2941  		return bytes.Compare(a[1], b[1]) < 0
  2942  	}
  2943  	return false
  2944  }
  2945  
  2946  func (t *healRequestSort) Swap(i, j int) {
  2947  	t.paths[i], t.paths[j] = t.paths[j], t.paths[i]
  2948  	t.hashes[i], t.hashes[j] = t.hashes[j], t.hashes[i]
  2949  	t.syncPaths[i], t.syncPaths[j] = t.syncPaths[j], t.syncPaths[i]
  2950  }
  2951  
  2952  // Merge merges the pathsets, so that several storage requests concerning the
  2953  // same account are merged into one, to reduce bandwidth.
  2954  // OBS: This operation is moot if t has not first been sorted.
  2955  func (t *healRequestSort) Merge() []TrieNodePathSet {
  2956  	var result []TrieNodePathSet
  2957  	for _, path := range t.syncPaths {
  2958  		pathset := TrieNodePathSet([][]byte(path))
  2959  		if len(path) == 1 {
  2960  			// It's an account reference.
  2961  			result = append(result, pathset)
  2962  		} else {
  2963  			// It's a storage reference.
  2964  			end := len(result) - 1
  2965  			if len(result) == 0 || !bytes.Equal(pathset[0], result[end][0]) {
  2966  				// The account doesn't match last, create a new entry.
  2967  				result = append(result, pathset)
  2968  			} else {
  2969  				// It's the same account as the previous one, add to the storage
  2970  				// paths of that request.
  2971  				result[end] = append(result[end], pathset[1])
  2972  			}
  2973  		}
  2974  	}
  2975  	return result
  2976  }
  2977  
  2978  // sortByAccountPath takes hashes and paths, and sorts them. After that, it generates
  2979  // the TrieNodePaths and merges paths which belongs to the same account path.
  2980  func sortByAccountPath(paths []string, hashes []common.Hash) ([]string, []common.Hash, []trie.SyncPath, []TrieNodePathSet) {
  2981  	var syncPaths []trie.SyncPath
  2982  	for _, path := range paths {
  2983  		syncPaths = append(syncPaths, trie.NewSyncPath([]byte(path)))
  2984  	}
  2985  	n := &healRequestSort{paths, hashes, syncPaths}
  2986  	sort.Sort(n)
  2987  	pathsets := n.Merge()
  2988  	return n.paths, n.hashes, n.syncPaths, pathsets
  2989  }