github.com/core-coin/go-core/v2@v2.1.9/xcb/downloader/statesync.go (about)

     1  // Copyright 2017 by the Authors
     2  // This file is part of the go-core library.
     3  //
     4  // The go-core library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-core library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-core library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package downloader
    18  
    19  import (
    20  	"fmt"
    21  	"hash"
    22  	"sync"
    23  	"time"
    24  
    25  	"golang.org/x/crypto/sha3"
    26  
    27  	"github.com/core-coin/go-core/v2/xcbdb"
    28  
    29  	"github.com/core-coin/go-core/v2/common"
    30  	"github.com/core-coin/go-core/v2/core/rawdb"
    31  	"github.com/core-coin/go-core/v2/core/state"
    32  	"github.com/core-coin/go-core/v2/log"
    33  	"github.com/core-coin/go-core/v2/trie"
    34  )
    35  
    36  // stateReq represents a batch of state fetch requests grouped together into
    37  // a single data retrieval network packet.
    38  type stateReq struct {
    39  	nItems    uint16                    // Number of items requested for download (max is 384, so uint16 is sufficient)
    40  	trieTasks map[common.Hash]*trieTask // Trie node download tasks to track previous attempts
    41  	codeTasks map[common.Hash]*codeTask // Byte code download tasks to track previous attempts
    42  	timeout   time.Duration             // Maximum round trip time for this to complete
    43  	timer     *time.Timer               // Timer to fire when the RTT timeout expires
    44  	peer      *peerConnection           // Peer that we're requesting from
    45  	delivered time.Time                 // Time when the packet was delivered (independent when we process it)
    46  	response  [][]byte                  // Response data of the peer (nil for timeouts)
    47  	dropped   bool                      // Flag whether the peer dropped off early
    48  }
    49  
    50  // timedOut returns if this request timed out.
    51  func (req *stateReq) timedOut() bool {
    52  	return req.response == nil
    53  }
    54  
    55  // stateSyncStats is a collection of progress stats to report during a state trie
    56  // sync to RPC requests as well as to display in user logs.
    57  type stateSyncStats struct {
    58  	processed  uint64 // Number of state entries processed
    59  	duplicate  uint64 // Number of state entries downloaded twice
    60  	unexpected uint64 // Number of non-requested state entries received
    61  	pending    uint64 // Number of still pending state entries
    62  }
    63  
    64  // syncState starts downloading state with the given root hash.
    65  func (d *Downloader) syncState(root common.Hash) *stateSync {
    66  	// Create the state sync
    67  	s := newStateSync(d, root)
    68  	select {
    69  	case d.stateSyncStart <- s:
    70  		// If we tell the statesync to restart with a new root, we also need
    71  		// to wait for it to actually also start -- when old requests have timed
    72  		// out or been delivered
    73  		<-s.started
    74  	case <-d.quitCh:
    75  		s.err = errCancelStateFetch
    76  		close(s.done)
    77  	}
    78  	return s
    79  }
    80  
    81  // stateFetcher manages the active state sync and accepts requests
    82  // on its behalf.
    83  func (d *Downloader) stateFetcher() {
    84  	for {
    85  		select {
    86  		case s := <-d.stateSyncStart:
    87  			for next := s; next != nil; {
    88  				next = d.runStateSync(next)
    89  			}
    90  		case <-d.stateCh:
    91  			// Ignore state responses while no sync is running.
    92  		case <-d.quitCh:
    93  			return
    94  		}
    95  	}
    96  }
    97  
    98  // runStateSync runs a state synchronisation until it completes or another root
    99  // hash is requested to be switched over to.
   100  func (d *Downloader) runStateSync(s *stateSync) *stateSync {
   101  	var (
   102  		active   = make(map[string]*stateReq) // Currently in-flight requests
   103  		finished []*stateReq                  // Completed or failed requests
   104  		timeout  = make(chan *stateReq)       // Timed out active requests
   105  	)
   106  	// Run the state sync.
   107  	log.Trace("State sync starting", "root", s.root)
   108  	go s.run()
   109  	defer s.Cancel()
   110  
   111  	// Listen for peer departure events to cancel assigned tasks
   112  	peerDrop := make(chan *peerConnection, 1024)
   113  	peerSub := s.d.peers.SubscribePeerDrops(peerDrop)
   114  	defer peerSub.Unsubscribe()
   115  
   116  	for {
   117  		// Enable sending of the first buffered element if there is one.
   118  		var (
   119  			deliverReq   *stateReq
   120  			deliverReqCh chan *stateReq
   121  		)
   122  		if len(finished) > 0 {
   123  			deliverReq = finished[0]
   124  			deliverReqCh = s.deliver
   125  		}
   126  
   127  		select {
   128  		// The stateSync lifecycle:
   129  		case next := <-d.stateSyncStart:
   130  			d.spindownStateSync(active, finished, timeout, peerDrop)
   131  			return next
   132  
   133  		case <-s.done:
   134  			d.spindownStateSync(active, finished, timeout, peerDrop)
   135  			return nil
   136  
   137  		// Send the next finished request to the current sync:
   138  		case deliverReqCh <- deliverReq:
   139  			// Shift out the first request, but also set the emptied slot to nil for GC
   140  			copy(finished, finished[1:])
   141  			finished[len(finished)-1] = nil
   142  			finished = finished[:len(finished)-1]
   143  
   144  		// Handle incoming state packs:
   145  		case pack := <-d.stateCh:
   146  			// Discard any data not requested (or previously timed out)
   147  			req := active[pack.PeerId()]
   148  			if req == nil {
   149  				log.Debug("Unrequested node data", "peer", pack.PeerId(), "len", pack.Items())
   150  				continue
   151  			}
   152  			// Finalize the request and queue up for processing
   153  			req.timer.Stop()
   154  			req.response = pack.(*statePack).states
   155  			req.delivered = time.Now()
   156  
   157  			finished = append(finished, req)
   158  			delete(active, pack.PeerId())
   159  
   160  		// Handle dropped peer connections:
   161  		case p := <-peerDrop:
   162  			// Skip if no request is currently pending
   163  			req := active[p.id]
   164  			if req == nil {
   165  				continue
   166  			}
   167  			// Finalize the request and queue up for processing
   168  			req.timer.Stop()
   169  			req.dropped = true
   170  			req.delivered = time.Now()
   171  
   172  			finished = append(finished, req)
   173  			delete(active, p.id)
   174  
   175  		// Handle timed-out requests:
   176  		case req := <-timeout:
   177  			// If the peer is already requesting something else, ignore the stale timeout.
   178  			// This can happen when the timeout and the delivery happens simultaneously,
   179  			// causing both pathways to trigger.
   180  			if active[req.peer.id] != req {
   181  				continue
   182  			}
   183  			req.delivered = time.Now()
   184  			// Move the timed out data back into the download queue
   185  			finished = append(finished, req)
   186  			delete(active, req.peer.id)
   187  
   188  		// Track outgoing state requests:
   189  		case req := <-d.trackStateReq:
   190  			// If an active request already exists for this peer, we have a problem. In
   191  			// theory the trie node schedule must never assign two requests to the same
   192  			// peer. In practice however, a peer might receive a request, disconnect and
   193  			// immediately reconnect before the previous times out. In this case the first
   194  			// request is never honored, alas we must not silently overwrite it, as that
   195  			// causes valid requests to go missing and sync to get stuck.
   196  			if old := active[req.peer.id]; old != nil {
   197  				log.Warn("Busy peer assigned new state fetch", "peer", old.peer.id)
   198  				// Move the previous request to the finished set
   199  				old.timer.Stop()
   200  				old.dropped = true
   201  				old.delivered = time.Now()
   202  				finished = append(finished, old)
   203  			}
   204  			// Start a timer to notify the sync loop if the peer stalled.
   205  			req.timer = time.AfterFunc(req.timeout, func() {
   206  				timeout <- req
   207  			})
   208  			active[req.peer.id] = req
   209  		}
   210  	}
   211  }
   212  
   213  // spindownStateSync 'drains' the outstanding requests; some will be delivered and other
   214  // will time out. This is to ensure that when the next stateSync starts working, all peers
   215  // are marked as idle and de facto _are_ idle.
   216  func (d *Downloader) spindownStateSync(active map[string]*stateReq, finished []*stateReq, timeout chan *stateReq, peerDrop chan *peerConnection) {
   217  	log.Trace("State sync spinning down", "active", len(active), "finished", len(finished))
   218  	for len(active) > 0 {
   219  		var (
   220  			req    *stateReq
   221  			reason string
   222  		)
   223  		select {
   224  		// Handle (drop) incoming state packs:
   225  		case pack := <-d.stateCh:
   226  			req = active[pack.PeerId()]
   227  			reason = "delivered"
   228  		// Handle dropped peer connections:
   229  		case p := <-peerDrop:
   230  			req = active[p.id]
   231  			reason = "peerdrop"
   232  		// Handle timed-out requests:
   233  		case req = <-timeout:
   234  			reason = "timeout"
   235  		}
   236  		if req == nil {
   237  			continue
   238  		}
   239  		req.peer.log.Trace("State peer marked idle (spindown)", "req.items", int(req.nItems), "reason", reason)
   240  		req.timer.Stop()
   241  		delete(active, req.peer.id)
   242  		req.peer.SetNodeDataIdle(int(req.nItems), time.Now())
   243  	}
   244  	// The 'finished' set contains deliveries that we were going to pass to processing.
   245  	// Those are now moot, but we still need to set those peers as idle, which would
   246  	// otherwise have been done after processing
   247  	for _, req := range finished {
   248  		req.peer.SetNodeDataIdle(int(req.nItems), time.Now())
   249  	}
   250  }
   251  
   252  // stateSync schedules requests for downloading a particular state trie defined
   253  // by a given state root.
   254  type stateSync struct {
   255  	d *Downloader // Downloader instance to access and manage current peerset
   256  
   257  	sched  *trie.Sync // State trie sync scheduler defining the tasks
   258  	keccak hash.Hash  // SHA3 hasher to verify deliveries with
   259  
   260  	trieTasks map[common.Hash]*trieTask // Set of trie node tasks currently queued for retrieval
   261  	codeTasks map[common.Hash]*codeTask // Set of byte code tasks currently queued for retrieval
   262  
   263  	numUncommitted   int
   264  	bytesUncommitted int
   265  
   266  	started chan struct{} // Started is signalled once the sync loop starts
   267  
   268  	deliver    chan *stateReq // Delivery channel multiplexing peer responses
   269  	cancel     chan struct{}  // Channel to signal a termination request
   270  	cancelOnce sync.Once      // Ensures cancel only ever gets called once
   271  	done       chan struct{}  // Channel to signal termination completion
   272  	err        error          // Any error hit during sync (set before completion)
   273  
   274  	root common.Hash
   275  }
   276  
   277  // trieTask represents a single trie node download task, containing a set of
   278  // peers already attempted retrieval from to detect stalled syncs and abort.
   279  type trieTask struct {
   280  	path     [][]byte
   281  	attempts map[string]struct{}
   282  }
   283  
   284  // codeTask represents a single byte code download task, containing a set of
   285  // peers already attempted retrieval from to detect stalled syncs and abort.
   286  type codeTask struct {
   287  	attempts map[string]struct{}
   288  }
   289  
   290  // newStateSync creates a new state trie download scheduler. This method does not
   291  // yet start the sync. The user needs to call run to initiate.
   292  func newStateSync(d *Downloader, root common.Hash) *stateSync {
   293  	return &stateSync{
   294  		d:         d,
   295  		sched:     state.NewStateSync(root, d.stateDB, d.stateBloom),
   296  		keccak:    sha3.New256(),
   297  		trieTasks: make(map[common.Hash]*trieTask),
   298  		codeTasks: make(map[common.Hash]*codeTask),
   299  		deliver:   make(chan *stateReq),
   300  		cancel:    make(chan struct{}),
   301  		done:      make(chan struct{}),
   302  		started:   make(chan struct{}),
   303  		root:      root,
   304  	}
   305  }
   306  
   307  // run starts the task assignment and response processing loop, blocking until
   308  // it finishes, and finally notifying any goroutines waiting for the loop to
   309  // finish.
   310  func (s *stateSync) run() {
   311  	s.err = s.loop()
   312  	close(s.done)
   313  }
   314  
   315  // Wait blocks until the sync is done or canceled.
   316  func (s *stateSync) Wait() error {
   317  	<-s.done
   318  	return s.err
   319  }
   320  
   321  // Cancel cancels the sync and waits until it has shut down.
   322  func (s *stateSync) Cancel() error {
   323  	s.cancelOnce.Do(func() { close(s.cancel) })
   324  	return s.Wait()
   325  }
   326  
   327  // loop is the main event loop of a state trie sync. It it responsible for the
   328  // assignment of new tasks to peers (including sending it to them) as well as
   329  // for the processing of inbound data. Note, that the loop does not directly
   330  // receive data from peers, rather those are buffered up in the downloader and
   331  // pushed here async. The reason is to decouple processing from data receipt
   332  // and timeouts.
   333  func (s *stateSync) loop() (err error) {
   334  	close(s.started)
   335  	// Listen for new peer events to assign tasks to them
   336  	newPeer := make(chan *peerConnection, 1024)
   337  	peerSub := s.d.peers.SubscribeNewPeers(newPeer)
   338  	defer peerSub.Unsubscribe()
   339  	defer func() {
   340  		cerr := s.commit(true)
   341  		if err == nil {
   342  			err = cerr
   343  		}
   344  	}()
   345  
   346  	// Keep assigning new tasks until the sync completes or aborts
   347  	for s.sched.Pending() > 0 {
   348  		if err = s.commit(false); err != nil {
   349  			return err
   350  		}
   351  		s.assignTasks()
   352  		// Tasks assigned, wait for something to happen
   353  		select {
   354  		case <-newPeer:
   355  			// New peer arrived, try to assign it download tasks
   356  
   357  		case <-s.cancel:
   358  			return errCancelStateFetch
   359  
   360  		case <-s.d.cancelCh:
   361  			return errCanceled
   362  
   363  		case req := <-s.deliver:
   364  			// Response, disconnect or timeout triggered, drop the peer if stalling
   365  			log.Trace("Received node data response", "peer", req.peer.id, "count", len(req.response), "dropped", req.dropped, "timeout", !req.dropped && req.timedOut())
   366  			if req.nItems <= 2 && !req.dropped && req.timedOut() {
   367  				// 2 items are the minimum requested, if even that times out, we've no use of
   368  				// this peer at the moment.
   369  				log.Warn("Stalling state sync, dropping peer", "peer", req.peer.id)
   370  				if s.d.dropPeer == nil {
   371  					// The dropPeer method is nil when `--copydb` is used for a local copy.
   372  					// Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored
   373  					req.peer.log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", req.peer.id)
   374  				} else {
   375  					s.d.dropPeer(req.peer.id)
   376  
   377  					// If this peer was the master peer, abort sync immediately
   378  					s.d.cancelLock.RLock()
   379  					master := req.peer.id == s.d.cancelPeer
   380  					s.d.cancelLock.RUnlock()
   381  
   382  					if master {
   383  						s.d.cancel()
   384  						return errTimeout
   385  					}
   386  				}
   387  			}
   388  			// Process all the received blobs and check for stale delivery
   389  			delivered, err := s.process(req)
   390  			req.peer.SetNodeDataIdle(delivered, req.delivered)
   391  			if err != nil {
   392  				log.Warn("Node data write error", "err", err)
   393  				return err
   394  			}
   395  		}
   396  	}
   397  	return nil
   398  }
   399  
   400  func (s *stateSync) commit(force bool) error {
   401  	if !force && s.bytesUncommitted < xcbdb.IdealBatchSize {
   402  		return nil
   403  	}
   404  	start := time.Now()
   405  	b := s.d.stateDB.NewBatch()
   406  	if err := s.sched.Commit(b); err != nil {
   407  		return err
   408  	}
   409  	if err := b.Write(); err != nil {
   410  		return fmt.Errorf("DB write error: %v", err)
   411  	}
   412  	s.updateStats(s.numUncommitted, 0, 0, time.Since(start))
   413  	s.numUncommitted = 0
   414  	s.bytesUncommitted = 0
   415  	return nil
   416  }
   417  
   418  // assignTasks attempts to assign new tasks to all idle peers, either from the
   419  // batch currently being retried, or fetching new data from the trie sync itself.
   420  func (s *stateSync) assignTasks() {
   421  	// Iterate over all idle peers and try to assign them state fetches
   422  	peers, _ := s.d.peers.NodeDataIdlePeers()
   423  	for _, p := range peers {
   424  		// Assign a batch of fetches proportional to the estimated latency/bandwidth
   425  		cap := p.NodeDataCapacity(s.d.requestRTT())
   426  		req := &stateReq{peer: p, timeout: s.d.requestTTL()}
   427  
   428  		nodes, _, codes := s.fillTasks(cap, req)
   429  
   430  		// If the peer was assigned tasks to fetch, send the network request
   431  		if len(nodes)+len(codes) > 0 {
   432  			req.peer.log.Trace("Requesting batch of state data", "nodes", len(nodes), "codes", len(codes), "root", s.root)
   433  			select {
   434  			case s.d.trackStateReq <- req:
   435  				req.peer.FetchNodeData(append(nodes, codes...)) // Unified retrieval under xcb/6x
   436  			case <-s.cancel:
   437  			case <-s.d.cancelCh:
   438  			}
   439  		}
   440  	}
   441  }
   442  
   443  // fillTasks fills the given request object with a maximum of n state download
   444  // tasks to send to the remote peer.
   445  func (s *stateSync) fillTasks(n int, req *stateReq) (nodes []common.Hash, paths []trie.SyncPath, codes []common.Hash) {
   446  	// Refill available tasks from the scheduler.
   447  	if fill := n - (len(s.trieTasks) + len(s.codeTasks)); fill > 0 {
   448  		nodes, paths, codes := s.sched.Missing(fill)
   449  		for i, hash := range nodes {
   450  			s.trieTasks[hash] = &trieTask{
   451  				path:     paths[i],
   452  				attempts: make(map[string]struct{}),
   453  			}
   454  		}
   455  		for _, hash := range codes {
   456  			s.codeTasks[hash] = &codeTask{
   457  				attempts: make(map[string]struct{}),
   458  			}
   459  		}
   460  	}
   461  	// Find tasks that haven't been tried with the request's peer. Prefer code
   462  	// over trie nodes as those can be written to disk and forgotten about.
   463  	nodes = make([]common.Hash, 0, n)
   464  	paths = make([]trie.SyncPath, 0, n)
   465  	codes = make([]common.Hash, 0, n)
   466  
   467  	req.trieTasks = make(map[common.Hash]*trieTask, n)
   468  	req.codeTasks = make(map[common.Hash]*codeTask, n)
   469  
   470  	for hash, t := range s.codeTasks {
   471  		// Stop when we've gathered enough requests
   472  		if len(nodes)+len(codes) == n {
   473  			break
   474  		}
   475  		// Skip any requests we've already tried from this peer
   476  		if _, ok := t.attempts[req.peer.id]; ok {
   477  			continue
   478  		}
   479  		// Assign the request to this peer
   480  		t.attempts[req.peer.id] = struct{}{}
   481  		codes = append(codes, hash)
   482  		req.codeTasks[hash] = t
   483  		delete(s.codeTasks, hash)
   484  	}
   485  	for hash, t := range s.trieTasks {
   486  		// Stop when we've gathered enough requests
   487  		if len(nodes)+len(codes) == n {
   488  			break
   489  		}
   490  		// Skip any requests we've already tried from this peer
   491  		if _, ok := t.attempts[req.peer.id]; ok {
   492  			continue
   493  		}
   494  		// Assign the request to this peer
   495  		t.attempts[req.peer.id] = struct{}{}
   496  
   497  		nodes = append(nodes, hash)
   498  		paths = append(paths, t.path)
   499  
   500  		req.trieTasks[hash] = t
   501  		delete(s.trieTasks, hash)
   502  	}
   503  	req.nItems = uint16(len(nodes) + len(codes))
   504  	return nodes, paths, codes
   505  }
   506  
   507  // process iterates over a batch of delivered state data, injecting each item
   508  // into a running state sync, re-queuing any items that were requested but not
   509  // delivered. Returns whether the peer actually managed to deliver anything of
   510  // value, and any error that occurred.
   511  func (s *stateSync) process(req *stateReq) (int, error) {
   512  	// Collect processing stats and update progress if valid data was received
   513  	duplicate, unexpected, successful := 0, 0, 0
   514  
   515  	defer func(start time.Time) {
   516  		if duplicate > 0 || unexpected > 0 {
   517  			s.updateStats(0, duplicate, unexpected, time.Since(start))
   518  		}
   519  	}(time.Now())
   520  
   521  	// Iterate over all the delivered data and inject one-by-one into the trie
   522  	for _, blob := range req.response {
   523  		hash, err := s.processNodeData(blob)
   524  		switch err {
   525  		case nil:
   526  			s.numUncommitted++
   527  			s.bytesUncommitted += len(blob)
   528  			successful++
   529  		case trie.ErrNotRequested:
   530  			unexpected++
   531  		case trie.ErrAlreadyProcessed:
   532  			duplicate++
   533  		default:
   534  			return successful, fmt.Errorf("invalid state node %s: %v", hash.TerminalString(), err)
   535  		}
   536  		// Delete from both queues (one delivery is enough for the syncer)
   537  		delete(req.trieTasks, hash)
   538  		delete(req.codeTasks, hash)
   539  	}
   540  	// Put unfulfilled tasks back into the retry queue
   541  	npeers := s.d.peers.Len()
   542  	for hash, task := range req.trieTasks {
   543  		// If the node did deliver something, missing items may be due to a protocol
   544  		// limit or a previous timeout + delayed delivery. Both cases should permit
   545  		// the node to retry the missing items (to avoid single-peer stalls).
   546  		if len(req.response) > 0 || req.timedOut() {
   547  			delete(task.attempts, req.peer.id)
   548  		}
   549  		// If we've requested the node too many times already, it may be a malicious
   550  		// sync where nobody has the right data. Abort.
   551  		if len(task.attempts) >= npeers {
   552  			return successful, fmt.Errorf("trie node %s failed with all peers (%d tries, %d peers)", hash.TerminalString(), len(task.attempts), npeers)
   553  		}
   554  		// Missing item, place into the retry queue.
   555  		s.trieTasks[hash] = task
   556  	}
   557  	for hash, task := range req.codeTasks {
   558  		// If the node did deliver something, missing items may be due to a protocol
   559  		// limit or a previous timeout + delayed delivery. Both cases should permit
   560  		// the node to retry the missing items (to avoid single-peer stalls).
   561  		if len(req.response) > 0 || req.timedOut() {
   562  			delete(task.attempts, req.peer.id)
   563  		}
   564  		// If we've requested the node too many times already, it may be a malicious
   565  		// sync where nobody has the right data. Abort.
   566  		if len(task.attempts) >= npeers {
   567  			return successful, fmt.Errorf("byte code %s failed with all peers (%d tries, %d peers)", hash.TerminalString(), len(task.attempts), npeers)
   568  		}
   569  		// Missing item, place into the retry queue.
   570  		s.codeTasks[hash] = task
   571  	}
   572  	return successful, nil
   573  }
   574  
   575  // processNodeData tries to inject a trie node data blob delivered from a remote
   576  // peer into the state trie, returning whether anything useful was written or any
   577  // error occurred.
   578  func (s *stateSync) processNodeData(blob []byte) (common.Hash, error) {
   579  	res := trie.SyncResult{Data: blob}
   580  	s.keccak.Reset()
   581  	s.keccak.Write(blob)
   582  	s.keccak.Sum(res.Hash[:0])
   583  	err := s.sched.Process(res)
   584  	return res.Hash, err
   585  }
   586  
   587  // updateStats bumps the various state sync progress counters and displays a log
   588  // message for the user to see.
   589  func (s *stateSync) updateStats(written, duplicate, unexpected int, duration time.Duration) {
   590  	s.d.syncStatsLock.Lock()
   591  	defer s.d.syncStatsLock.Unlock()
   592  
   593  	s.d.syncStatsState.pending = uint64(s.sched.Pending())
   594  	s.d.syncStatsState.processed += uint64(written)
   595  	s.d.syncStatsState.duplicate += uint64(duplicate)
   596  	s.d.syncStatsState.unexpected += uint64(unexpected)
   597  
   598  	if written > 0 || duplicate > 0 || unexpected > 0 {
   599  		log.Info("Imported new state entries", "count", written, "elapsed", common.PrettyDuration(duration), "processed", s.d.syncStatsState.processed, "pending", s.d.syncStatsState.pending, "trieretry", len(s.trieTasks), "coderetry", len(s.codeTasks), "duplicate", s.d.syncStatsState.duplicate, "unexpected", s.d.syncStatsState.unexpected)
   600  	}
   601  	if written > 0 {
   602  		rawdb.WriteFastTrieProgress(s.d.stateDB, s.d.syncStatsState.processed)
   603  	}
   604  }