github.com/jimmyx0x/go-ethereum@v1.10.28/les/downloader/statesync.go (about)

     1  // Copyright 2017 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package downloader
    18  
    19  import (
    20  	"fmt"
    21  	"sync"
    22  	"time"
    23  
    24  	"github.com/ethereum/go-ethereum/common"
    25  	"github.com/ethereum/go-ethereum/core/rawdb"
    26  	"github.com/ethereum/go-ethereum/core/state"
    27  	"github.com/ethereum/go-ethereum/crypto"
    28  	"github.com/ethereum/go-ethereum/ethdb"
    29  	"github.com/ethereum/go-ethereum/log"
    30  	"github.com/ethereum/go-ethereum/trie"
    31  	"golang.org/x/crypto/sha3"
    32  )
    33  
    34  // stateReq represents a batch of state fetch requests grouped together into
    35  // a single data retrieval network packet.
    36  type stateReq struct {
    37  	nItems    uint16                    // Number of items requested for download (max is 384, so uint16 is sufficient)
    38  	trieTasks map[string]*trieTask      // Trie node download tasks to track previous attempts
    39  	codeTasks map[common.Hash]*codeTask // Byte code download tasks to track previous attempts
    40  	timeout   time.Duration             // Maximum round trip time for this to complete
    41  	timer     *time.Timer               // Timer to fire when the RTT timeout expires
    42  	peer      *peerConnection           // Peer that we're requesting from
    43  	delivered time.Time                 // Time when the packet was delivered (independent when we process it)
    44  	response  [][]byte                  // Response data of the peer (nil for timeouts)
    45  	dropped   bool                      // Flag whether the peer dropped off early
    46  }
    47  
    48  // timedOut returns if this request timed out.
    49  func (req *stateReq) timedOut() bool {
    50  	return req.response == nil
    51  }
    52  
    53  // stateSyncStats is a collection of progress stats to report during a state trie
    54  // sync to RPC requests as well as to display in user logs.
    55  type stateSyncStats struct {
    56  	processed  uint64 // Number of state entries processed
    57  	duplicate  uint64 // Number of state entries downloaded twice
    58  	unexpected uint64 // Number of non-requested state entries received
    59  	pending    uint64 // Number of still pending state entries
    60  }
    61  
    62  // syncState starts downloading state with the given root hash.
    63  func (d *Downloader) syncState(root common.Hash) *stateSync {
    64  	// Create the state sync
    65  	s := newStateSync(d, root)
    66  	select {
    67  	case d.stateSyncStart <- s:
    68  		// If we tell the statesync to restart with a new root, we also need
    69  		// to wait for it to actually also start -- when old requests have timed
    70  		// out or been delivered
    71  		<-s.started
    72  	case <-d.quitCh:
    73  		s.err = errCancelStateFetch
    74  		close(s.done)
    75  	}
    76  	return s
    77  }
    78  
    79  // stateFetcher manages the active state sync and accepts requests
    80  // on its behalf.
    81  func (d *Downloader) stateFetcher() {
    82  	for {
    83  		select {
    84  		case s := <-d.stateSyncStart:
    85  			for next := s; next != nil; {
    86  				next = d.runStateSync(next)
    87  			}
    88  		case <-d.stateCh:
    89  			// Ignore state responses while no sync is running.
    90  		case <-d.quitCh:
    91  			return
    92  		}
    93  	}
    94  }
    95  
    96  // runStateSync runs a state synchronisation until it completes or another root
    97  // hash is requested to be switched over to.
    98  func (d *Downloader) runStateSync(s *stateSync) *stateSync {
    99  	var (
   100  		active   = make(map[string]*stateReq) // Currently in-flight requests
   101  		finished []*stateReq                  // Completed or failed requests
   102  		timeout  = make(chan *stateReq)       // Timed out active requests
   103  	)
   104  	log.Trace("State sync starting", "root", s.root)
   105  
   106  	defer func() {
   107  		// Cancel active request timers on exit. Also set peers to idle so they're
   108  		// available for the next sync.
   109  		for _, req := range active {
   110  			req.timer.Stop()
   111  			req.peer.SetNodeDataIdle(int(req.nItems), time.Now())
   112  		}
   113  	}()
   114  	go s.run()
   115  	defer s.Cancel()
   116  
   117  	// Listen for peer departure events to cancel assigned tasks
   118  	peerDrop := make(chan *peerConnection, 1024)
   119  	peerSub := s.d.peers.SubscribePeerDrops(peerDrop)
   120  	defer peerSub.Unsubscribe()
   121  
   122  	for {
   123  		// Enable sending of the first buffered element if there is one.
   124  		var (
   125  			deliverReq   *stateReq
   126  			deliverReqCh chan *stateReq
   127  		)
   128  		if len(finished) > 0 {
   129  			deliverReq = finished[0]
   130  			deliverReqCh = s.deliver
   131  		}
   132  
   133  		select {
   134  		// The stateSync lifecycle:
   135  		case next := <-d.stateSyncStart:
   136  			d.spindownStateSync(active, finished, timeout, peerDrop)
   137  			return next
   138  
   139  		case <-s.done:
   140  			d.spindownStateSync(active, finished, timeout, peerDrop)
   141  			return nil
   142  
   143  		// Send the next finished request to the current sync:
   144  		case deliverReqCh <- deliverReq:
   145  			// Shift out the first request, but also set the emptied slot to nil for GC
   146  			copy(finished, finished[1:])
   147  			finished[len(finished)-1] = nil
   148  			finished = finished[:len(finished)-1]
   149  
   150  		// Handle incoming state packs:
   151  		case pack := <-d.stateCh:
   152  			// Discard any data not requested (or previously timed out)
   153  			req := active[pack.PeerId()]
   154  			if req == nil {
   155  				log.Debug("Unrequested node data", "peer", pack.PeerId(), "len", pack.Items())
   156  				continue
   157  			}
   158  			// Finalize the request and queue up for processing
   159  			req.timer.Stop()
   160  			req.response = pack.(*statePack).states
   161  			req.delivered = time.Now()
   162  
   163  			finished = append(finished, req)
   164  			delete(active, pack.PeerId())
   165  
   166  		// Handle dropped peer connections:
   167  		case p := <-peerDrop:
   168  			// Skip if no request is currently pending
   169  			req := active[p.id]
   170  			if req == nil {
   171  				continue
   172  			}
   173  			// Finalize the request and queue up for processing
   174  			req.timer.Stop()
   175  			req.dropped = true
   176  			req.delivered = time.Now()
   177  
   178  			finished = append(finished, req)
   179  			delete(active, p.id)
   180  
   181  		// Handle timed-out requests:
   182  		case req := <-timeout:
   183  			// If the peer is already requesting something else, ignore the stale timeout.
   184  			// This can happen when the timeout and the delivery happens simultaneously,
   185  			// causing both pathways to trigger.
   186  			if active[req.peer.id] != req {
   187  				continue
   188  			}
   189  			req.delivered = time.Now()
   190  			// Move the timed out data back into the download queue
   191  			finished = append(finished, req)
   192  			delete(active, req.peer.id)
   193  
   194  		// Track outgoing state requests:
   195  		case req := <-d.trackStateReq:
   196  			// If an active request already exists for this peer, we have a problem. In
   197  			// theory the trie node schedule must never assign two requests to the same
   198  			// peer. In practice however, a peer might receive a request, disconnect and
   199  			// immediately reconnect before the previous times out. In this case the first
   200  			// request is never honored, alas we must not silently overwrite it, as that
   201  			// causes valid requests to go missing and sync to get stuck.
   202  			if old := active[req.peer.id]; old != nil {
   203  				log.Warn("Busy peer assigned new state fetch", "peer", old.peer.id)
   204  				// Move the previous request to the finished set
   205  				old.timer.Stop()
   206  				old.dropped = true
   207  				old.delivered = time.Now()
   208  				finished = append(finished, old)
   209  			}
   210  			// Start a timer to notify the sync loop if the peer stalled.
   211  			req.timer = time.AfterFunc(req.timeout, func() {
   212  				timeout <- req
   213  			})
   214  			active[req.peer.id] = req
   215  		}
   216  	}
   217  }
   218  
   219  // spindownStateSync 'drains' the outstanding requests; some will be delivered and other
   220  // will time out. This is to ensure that when the next stateSync starts working, all peers
   221  // are marked as idle and de facto _are_ idle.
   222  func (d *Downloader) spindownStateSync(active map[string]*stateReq, finished []*stateReq, timeout chan *stateReq, peerDrop chan *peerConnection) {
   223  	log.Trace("State sync spinning down", "active", len(active), "finished", len(finished))
   224  	for len(active) > 0 {
   225  		var (
   226  			req    *stateReq
   227  			reason string
   228  		)
   229  		select {
   230  		// Handle (drop) incoming state packs:
   231  		case pack := <-d.stateCh:
   232  			req = active[pack.PeerId()]
   233  			reason = "delivered"
   234  		// Handle dropped peer connections:
   235  		case p := <-peerDrop:
   236  			req = active[p.id]
   237  			reason = "peerdrop"
   238  		// Handle timed-out requests:
   239  		case req = <-timeout:
   240  			reason = "timeout"
   241  		}
   242  		if req == nil {
   243  			continue
   244  		}
   245  		req.peer.log.Trace("State peer marked idle (spindown)", "req.items", int(req.nItems), "reason", reason)
   246  		req.timer.Stop()
   247  		delete(active, req.peer.id)
   248  		req.peer.SetNodeDataIdle(int(req.nItems), time.Now())
   249  	}
   250  	// The 'finished' set contains deliveries that we were going to pass to processing.
   251  	// Those are now moot, but we still need to set those peers as idle, which would
   252  	// otherwise have been done after processing
   253  	for _, req := range finished {
   254  		req.peer.SetNodeDataIdle(int(req.nItems), time.Now())
   255  	}
   256  }
   257  
   258  // stateSync schedules requests for downloading a particular state trie defined
   259  // by a given state root.
   260  type stateSync struct {
   261  	d *Downloader // Downloader instance to access and manage current peerset
   262  
   263  	root   common.Hash        // State root currently being synced
   264  	sched  *trie.Sync         // State trie sync scheduler defining the tasks
   265  	keccak crypto.KeccakState // Keccak256 hasher to verify deliveries with
   266  
   267  	trieTasks map[string]*trieTask      // Set of trie node tasks currently queued for retrieval, indexed by path
   268  	codeTasks map[common.Hash]*codeTask // Set of byte code tasks currently queued for retrieval, indexed by hash
   269  
   270  	numUncommitted   int
   271  	bytesUncommitted int
   272  
   273  	started chan struct{} // Started is signalled once the sync loop starts
   274  
   275  	deliver    chan *stateReq // Delivery channel multiplexing peer responses
   276  	cancel     chan struct{}  // Channel to signal a termination request
   277  	cancelOnce sync.Once      // Ensures cancel only ever gets called once
   278  	done       chan struct{}  // Channel to signal termination completion
   279  	err        error          // Any error hit during sync (set before completion)
   280  }
   281  
   282  // trieTask represents a single trie node download task, containing a set of
   283  // peers already attempted retrieval from to detect stalled syncs and abort.
   284  type trieTask struct {
   285  	hash     common.Hash
   286  	path     [][]byte
   287  	attempts map[string]struct{}
   288  }
   289  
   290  // codeTask represents a single byte code download task, containing a set of
   291  // peers already attempted retrieval from to detect stalled syncs and abort.
   292  type codeTask struct {
   293  	attempts map[string]struct{}
   294  }
   295  
   296  // newStateSync creates a new state trie download scheduler. This method does not
   297  // yet start the sync. The user needs to call run to initiate.
   298  func newStateSync(d *Downloader, root common.Hash) *stateSync {
   299  	// Hack the node scheme here. It's a dead code is not used
   300  	// by light client at all. Just aim for passing tests.
   301  	scheme := trie.NewDatabase(rawdb.NewMemoryDatabase()).Scheme()
   302  	return &stateSync{
   303  		d:         d,
   304  		root:      root,
   305  		sched:     state.NewStateSync(root, d.stateDB, nil, scheme),
   306  		keccak:    sha3.NewLegacyKeccak256().(crypto.KeccakState),
   307  		trieTasks: make(map[string]*trieTask),
   308  		codeTasks: make(map[common.Hash]*codeTask),
   309  		deliver:   make(chan *stateReq),
   310  		cancel:    make(chan struct{}),
   311  		done:      make(chan struct{}),
   312  		started:   make(chan struct{}),
   313  	}
   314  }
   315  
   316  // run starts the task assignment and response processing loop, blocking until
   317  // it finishes, and finally notifying any goroutines waiting for the loop to
   318  // finish.
   319  func (s *stateSync) run() {
   320  	close(s.started)
   321  	if s.d.snapSync {
   322  		s.err = s.d.SnapSyncer.Sync(s.root, s.cancel)
   323  	} else {
   324  		s.err = s.loop()
   325  	}
   326  	close(s.done)
   327  }
   328  
   329  // Wait blocks until the sync is done or canceled.
   330  func (s *stateSync) Wait() error {
   331  	<-s.done
   332  	return s.err
   333  }
   334  
   335  // Cancel cancels the sync and waits until it has shut down.
   336  func (s *stateSync) Cancel() error {
   337  	s.cancelOnce.Do(func() {
   338  		close(s.cancel)
   339  	})
   340  	return s.Wait()
   341  }
   342  
   343  // loop is the main event loop of a state trie sync. It it responsible for the
   344  // assignment of new tasks to peers (including sending it to them) as well as
   345  // for the processing of inbound data. Note, that the loop does not directly
   346  // receive data from peers, rather those are buffered up in the downloader and
   347  // pushed here async. The reason is to decouple processing from data receipt
   348  // and timeouts.
   349  func (s *stateSync) loop() (err error) {
   350  	// Listen for new peer events to assign tasks to them
   351  	newPeer := make(chan *peerConnection, 1024)
   352  	peerSub := s.d.peers.SubscribeNewPeers(newPeer)
   353  	defer peerSub.Unsubscribe()
   354  	defer func() {
   355  		cerr := s.commit(true)
   356  		if err == nil {
   357  			err = cerr
   358  		}
   359  	}()
   360  
   361  	// Keep assigning new tasks until the sync completes or aborts
   362  	for s.sched.Pending() > 0 {
   363  		if err = s.commit(false); err != nil {
   364  			return err
   365  		}
   366  		s.assignTasks()
   367  		// Tasks assigned, wait for something to happen
   368  		select {
   369  		case <-newPeer:
   370  			// New peer arrived, try to assign it download tasks
   371  
   372  		case <-s.cancel:
   373  			return errCancelStateFetch
   374  
   375  		case <-s.d.cancelCh:
   376  			return errCanceled
   377  
   378  		case req := <-s.deliver:
   379  			// Response, disconnect or timeout triggered, drop the peer if stalling
   380  			log.Trace("Received node data response", "peer", req.peer.id, "count", len(req.response), "dropped", req.dropped, "timeout", !req.dropped && req.timedOut())
   381  			if req.nItems <= 2 && !req.dropped && req.timedOut() {
   382  				// 2 items are the minimum requested, if even that times out, we've no use of
   383  				// this peer at the moment.
   384  				log.Warn("Stalling state sync, dropping peer", "peer", req.peer.id)
   385  				if s.d.dropPeer == nil {
   386  					// The dropPeer method is nil when `--copydb` is used for a local copy.
   387  					// Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored
   388  					req.peer.log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", req.peer.id)
   389  				} else {
   390  					s.d.dropPeer(req.peer.id)
   391  
   392  					// If this peer was the master peer, abort sync immediately
   393  					s.d.cancelLock.RLock()
   394  					master := req.peer.id == s.d.cancelPeer
   395  					s.d.cancelLock.RUnlock()
   396  
   397  					if master {
   398  						s.d.cancel()
   399  						return errTimeout
   400  					}
   401  				}
   402  			}
   403  			// Process all the received blobs and check for stale delivery
   404  			delivered, err := s.process(req)
   405  			req.peer.SetNodeDataIdle(delivered, req.delivered)
   406  			if err != nil {
   407  				log.Warn("Node data write error", "err", err)
   408  				return err
   409  			}
   410  		}
   411  	}
   412  	return nil
   413  }
   414  
   415  func (s *stateSync) commit(force bool) error {
   416  	if !force && s.bytesUncommitted < ethdb.IdealBatchSize {
   417  		return nil
   418  	}
   419  	start := time.Now()
   420  	b := s.d.stateDB.NewBatch()
   421  	if err := s.sched.Commit(b); err != nil {
   422  		return err
   423  	}
   424  	if err := b.Write(); err != nil {
   425  		return fmt.Errorf("DB write error: %v", err)
   426  	}
   427  	s.updateStats(s.numUncommitted, 0, 0, time.Since(start))
   428  	s.numUncommitted = 0
   429  	s.bytesUncommitted = 0
   430  	return nil
   431  }
   432  
   433  // assignTasks attempts to assign new tasks to all idle peers, either from the
   434  // batch currently being retried, or fetching new data from the trie sync itself.
   435  func (s *stateSync) assignTasks() {
   436  	// Iterate over all idle peers and try to assign them state fetches
   437  	peers, _ := s.d.peers.NodeDataIdlePeers()
   438  	for _, p := range peers {
   439  		// Assign a batch of fetches proportional to the estimated latency/bandwidth
   440  		cap := p.NodeDataCapacity(s.d.peers.rates.TargetRoundTrip())
   441  		req := &stateReq{peer: p, timeout: s.d.peers.rates.TargetTimeout()}
   442  
   443  		nodes, _, codes := s.fillTasks(cap, req)
   444  
   445  		// If the peer was assigned tasks to fetch, send the network request
   446  		if len(nodes)+len(codes) > 0 {
   447  			req.peer.log.Trace("Requesting batch of state data", "nodes", len(nodes), "codes", len(codes), "root", s.root)
   448  			select {
   449  			case s.d.trackStateReq <- req:
   450  				req.peer.FetchNodeData(append(nodes, codes...)) // Unified retrieval under eth/6x
   451  			case <-s.cancel:
   452  			case <-s.d.cancelCh:
   453  			}
   454  		}
   455  	}
   456  }
   457  
   458  // fillTasks fills the given request object with a maximum of n state download
   459  // tasks to send to the remote peer.
   460  func (s *stateSync) fillTasks(n int, req *stateReq) (nodes []common.Hash, paths []trie.SyncPath, codes []common.Hash) {
   461  	// Refill available tasks from the scheduler.
   462  	if fill := n - (len(s.trieTasks) + len(s.codeTasks)); fill > 0 {
   463  		paths, hashes, codes := s.sched.Missing(fill)
   464  		for i, path := range paths {
   465  			s.trieTasks[path] = &trieTask{
   466  				hash:     hashes[i],
   467  				path:     trie.NewSyncPath([]byte(path)),
   468  				attempts: make(map[string]struct{}),
   469  			}
   470  		}
   471  		for _, hash := range codes {
   472  			s.codeTasks[hash] = &codeTask{
   473  				attempts: make(map[string]struct{}),
   474  			}
   475  		}
   476  	}
   477  	// Find tasks that haven't been tried with the request's peer. Prefer code
   478  	// over trie nodes as those can be written to disk and forgotten about.
   479  	nodes = make([]common.Hash, 0, n)
   480  	paths = make([]trie.SyncPath, 0, n)
   481  	codes = make([]common.Hash, 0, n)
   482  
   483  	req.trieTasks = make(map[string]*trieTask, n)
   484  	req.codeTasks = make(map[common.Hash]*codeTask, n)
   485  
   486  	for hash, t := range s.codeTasks {
   487  		// Stop when we've gathered enough requests
   488  		if len(nodes)+len(codes) == n {
   489  			break
   490  		}
   491  		// Skip any requests we've already tried from this peer
   492  		if _, ok := t.attempts[req.peer.id]; ok {
   493  			continue
   494  		}
   495  		// Assign the request to this peer
   496  		t.attempts[req.peer.id] = struct{}{}
   497  		codes = append(codes, hash)
   498  		req.codeTasks[hash] = t
   499  		delete(s.codeTasks, hash)
   500  	}
   501  	for path, t := range s.trieTasks {
   502  		// Stop when we've gathered enough requests
   503  		if len(nodes)+len(codes) == n {
   504  			break
   505  		}
   506  		// Skip any requests we've already tried from this peer
   507  		if _, ok := t.attempts[req.peer.id]; ok {
   508  			continue
   509  		}
   510  		// Assign the request to this peer
   511  		t.attempts[req.peer.id] = struct{}{}
   512  
   513  		nodes = append(nodes, t.hash)
   514  		paths = append(paths, t.path)
   515  
   516  		req.trieTasks[path] = t
   517  		delete(s.trieTasks, path)
   518  	}
   519  	req.nItems = uint16(len(nodes) + len(codes))
   520  	return nodes, paths, codes
   521  }
   522  
   523  // process iterates over a batch of delivered state data, injecting each item
   524  // into a running state sync, re-queuing any items that were requested but not
   525  // delivered. Returns whether the peer actually managed to deliver anything of
   526  // value, and any error that occurred.
   527  func (s *stateSync) process(req *stateReq) (int, error) {
   528  	// Collect processing stats and update progress if valid data was received
   529  	duplicate, unexpected, successful := 0, 0, 0
   530  
   531  	defer func(start time.Time) {
   532  		if duplicate > 0 || unexpected > 0 {
   533  			s.updateStats(0, duplicate, unexpected, time.Since(start))
   534  		}
   535  	}(time.Now())
   536  
   537  	// Iterate over all the delivered data and inject one-by-one into the trie
   538  	for _, blob := range req.response {
   539  		hash, err := s.processNodeData(req.trieTasks, req.codeTasks, blob)
   540  		switch err {
   541  		case nil:
   542  			s.numUncommitted++
   543  			s.bytesUncommitted += len(blob)
   544  			successful++
   545  		case trie.ErrNotRequested:
   546  			unexpected++
   547  		case trie.ErrAlreadyProcessed:
   548  			duplicate++
   549  		default:
   550  			return successful, fmt.Errorf("invalid state node %s: %v", hash.TerminalString(), err)
   551  		}
   552  	}
   553  	// Put unfulfilled tasks back into the retry queue
   554  	npeers := s.d.peers.Len()
   555  	for path, task := range req.trieTasks {
   556  		// If the node did deliver something, missing items may be due to a protocol
   557  		// limit or a previous timeout + delayed delivery. Both cases should permit
   558  		// the node to retry the missing items (to avoid single-peer stalls).
   559  		if len(req.response) > 0 || req.timedOut() {
   560  			delete(task.attempts, req.peer.id)
   561  		}
   562  		// If we've requested the node too many times already, it may be a malicious
   563  		// sync where nobody has the right data. Abort.
   564  		if len(task.attempts) >= npeers {
   565  			return successful, fmt.Errorf("trie node %s failed with all peers (%d tries, %d peers)", task.hash.TerminalString(), len(task.attempts), npeers)
   566  		}
   567  		// Missing item, place into the retry queue.
   568  		s.trieTasks[path] = task
   569  	}
   570  	for hash, task := range req.codeTasks {
   571  		// If the node did deliver something, missing items may be due to a protocol
   572  		// limit or a previous timeout + delayed delivery. Both cases should permit
   573  		// the node to retry the missing items (to avoid single-peer stalls).
   574  		if len(req.response) > 0 || req.timedOut() {
   575  			delete(task.attempts, req.peer.id)
   576  		}
   577  		// If we've requested the node too many times already, it may be a malicious
   578  		// sync where nobody has the right data. Abort.
   579  		if len(task.attempts) >= npeers {
   580  			return successful, fmt.Errorf("byte code %s failed with all peers (%d tries, %d peers)", hash.TerminalString(), len(task.attempts), npeers)
   581  		}
   582  		// Missing item, place into the retry queue.
   583  		s.codeTasks[hash] = task
   584  	}
   585  	return successful, nil
   586  }
   587  
   588  // processNodeData tries to inject a trie node data blob delivered from a remote
   589  // peer into the state trie, returning whether anything useful was written or any
   590  // error occurred.
   591  //
   592  // If multiple requests correspond to the same hash, this method will inject the
   593  // blob as a result for the first one only, leaving the remaining duplicates to
   594  // be fetched again.
   595  func (s *stateSync) processNodeData(nodeTasks map[string]*trieTask, codeTasks map[common.Hash]*codeTask, blob []byte) (common.Hash, error) {
   596  	var hash common.Hash
   597  	s.keccak.Reset()
   598  	s.keccak.Write(blob)
   599  	s.keccak.Read(hash[:])
   600  
   601  	if _, present := codeTasks[hash]; present {
   602  		err := s.sched.ProcessCode(trie.CodeSyncResult{
   603  			Hash: hash,
   604  			Data: blob,
   605  		})
   606  		delete(codeTasks, hash)
   607  		return hash, err
   608  	}
   609  	for path, task := range nodeTasks {
   610  		if task.hash == hash {
   611  			err := s.sched.ProcessNode(trie.NodeSyncResult{
   612  				Path: path,
   613  				Data: blob,
   614  			})
   615  			delete(nodeTasks, path)
   616  			return hash, err
   617  		}
   618  	}
   619  	return common.Hash{}, trie.ErrNotRequested
   620  }
   621  
   622  // updateStats bumps the various state sync progress counters and displays a log
   623  // message for the user to see.
   624  func (s *stateSync) updateStats(written, duplicate, unexpected int, duration time.Duration) {
   625  	s.d.syncStatsLock.Lock()
   626  	defer s.d.syncStatsLock.Unlock()
   627  
   628  	s.d.syncStatsState.pending = uint64(s.sched.Pending())
   629  	s.d.syncStatsState.processed += uint64(written)
   630  	s.d.syncStatsState.duplicate += uint64(duplicate)
   631  	s.d.syncStatsState.unexpected += uint64(unexpected)
   632  
   633  	if written > 0 || duplicate > 0 || unexpected > 0 {
   634  		log.Info("Imported new state entries", "count", written, "elapsed", common.PrettyDuration(duration), "processed", s.d.syncStatsState.processed, "pending", s.d.syncStatsState.pending, "trieretry", len(s.trieTasks), "coderetry", len(s.codeTasks), "duplicate", s.d.syncStatsState.duplicate, "unexpected", s.d.syncStatsState.unexpected)
   635  	}
   636  	//if written > 0 {
   637  	//rawdb.WriteFastTrieProgress(s.d.stateDB, s.d.syncStatsState.processed)
   638  	//}
   639  }