github.com/bigzoro/my_simplechain@v0.0.0-20240315012955-8ad0a2a29bb9/parallel_trie/sync.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package trie
    18  
    19  import (
    20  	"errors"
    21  	"fmt"
    22  
    23  	"github.com/bigzoro/my_simplechain/log"
    24  
    25  	"github.com/bigzoro/my_simplechain/common"
    26  	"github.com/bigzoro/my_simplechain/common/prque"
    27  	"github.com/bigzoro/my_simplechain/ethdb"
    28  )
    29  
    30  // ErrNotRequested is returned by the trie sync when it's requested to process a
    31  // node it did not request.
    32  var ErrNotRequested = errors.New("not requested")
    33  
    34  // ErrAlreadyProcessed is returned by the trie sync when it's requested to process a
    35  // node it already processed previously.
    36  var ErrAlreadyProcessed = errors.New("already processed")
    37  
    38  // request represents a scheduled or already in-flight state retrieval request.
    39  type request struct {
    40  	hash common.Hash // Hash of the node data content to retrieve
    41  	data []byte      // Data content of the node, cached until all subtrees complete
    42  	raw  bool        // Whether this is a raw entry (code) or a trie node
    43  
    44  	parents []*request // Parent state nodes referencing this entry (notify all upon completion)
    45  	depth   int        // Depth level within the trie the node is located to prioritise DFS
    46  	deps    int        // Number of dependencies before allowed to commit this node
    47  
    48  	callback LeafCallback // Callback to invoke if a leaf node it reached on this branch
    49  }
    50  
    51  // SyncResult is a simple list to return missing nodes along with their request
    52  // hashes.
    53  type SyncResult struct {
    54  	Hash common.Hash // Hash of the originally unknown trie node
    55  	Data []byte      // Data content of the retrieved node
    56  }
    57  
    58  // syncMemBatch is an in-memory buffer of successfully downloaded but not yet
    59  // persisted data items.
    60  type syncMemBatch struct {
    61  	batch map[common.Hash][]byte // In-memory membatch of recently completed items
    62  	order []common.Hash          // Order of completion to prevent out-of-order data loss
    63  }
    64  
    65  // newSyncMemBatch allocates a new memory-buffer for not-yet persisted trie nodes.
    66  func newSyncMemBatch() *syncMemBatch {
    67  	return &syncMemBatch{
    68  		batch: make(map[common.Hash][]byte),
    69  		order: make([]common.Hash, 0, 256),
    70  	}
    71  }
    72  
    73  // Sync is the main state trie synchronisation scheduler, which provides yet
    74  // unknown trie hashes to retrieve, accepts node data associated with said hashes
    75  // and reconstructs the trie step by step until all is done.
    76  type Sync struct {
    77  	database ethdb.KeyValueReader     // Persistent database to check for existing entries
    78  	membatch *syncMemBatch            // Memory buffer to avoid frequent database writes
    79  	requests map[common.Hash]*request // Pending requests pertaining to a key hash
    80  	queue    *prque.Prque             // Priority queue with the pending requests
    81  }
    82  
    83  // NewSync creates a new trie data download scheduler.
    84  func NewSync(root common.Hash, database ethdb.KeyValueReader, callback LeafCallback) *Sync {
    85  	ts := &Sync{
    86  		database: database,
    87  		membatch: newSyncMemBatch(),
    88  		requests: make(map[common.Hash]*request),
    89  		queue:    prque.New(nil),
    90  	}
    91  	ts.AddSubTrie(root, 0, common.Hash{}, callback)
    92  	return ts
    93  }
    94  
    95  // AddSubTrie registers a new trie to the sync code, rooted at the designated parent.
    96  func (s *Sync) AddSubTrie(root common.Hash, depth int, parent common.Hash, callback LeafCallback) {
    97  	// Short circuit if the trie is empty or already known
    98  	if root == emptyRoot {
    99  		return
   100  	}
   101  	if _, ok := s.membatch.batch[root]; ok {
   102  		return
   103  	}
   104  	key := root.Bytes()
   105  	blob, _ := s.database.Get(key)
   106  	log.Debug("sync blob", "root", root, "depth", depth, "parent", parent)
   107  	if local, err := decodeNode(key, blob); local != nil && err == nil {
   108  		return
   109  	}
   110  	// Assemble the new sub-trie sync request
   111  	req := &request{
   112  		hash:     root,
   113  		depth:    depth,
   114  		callback: callback,
   115  	}
   116  	// If this sub-trie has a designated parent, link them together
   117  	if parent != (common.Hash{}) {
   118  		ancestor := s.requests[parent]
   119  		if ancestor == nil {
   120  			panic(fmt.Sprintf("sub-trie ancestor not found: %x", parent))
   121  		}
   122  		ancestor.deps++
   123  		req.parents = append(req.parents, ancestor)
   124  	}
   125  	s.schedule(req)
   126  }
   127  
   128  // AddRawEntry schedules the direct retrieval of a state entry that should not be
   129  // interpreted as a trie node, but rather accepted and stored into the database
   130  // as is. This method's goal is to support misc state metadata retrievals (e.g.
   131  // contract code).
   132  func (s *Sync) AddRawEntry(hash common.Hash, depth int, parent common.Hash) {
   133  	// Short circuit if the entry is empty or already known
   134  	if hash == emptyState {
   135  		return
   136  	}
   137  	if _, ok := s.membatch.batch[hash]; ok {
   138  		return
   139  	}
   140  	if ok, _ := s.database.Has(hash.Bytes()); ok {
   141  		return
   142  	}
   143  	// Assemble the new sub-trie sync request
   144  	req := &request{
   145  		hash:  hash,
   146  		raw:   true,
   147  		depth: depth,
   148  	}
   149  	// If this sub-trie has a designated parent, link them together
   150  	if parent != (common.Hash{}) {
   151  		ancestor := s.requests[parent]
   152  		if ancestor == nil {
   153  			panic(fmt.Sprintf("raw-entry ancestor not found: %x", parent))
   154  		}
   155  		ancestor.deps++
   156  		req.parents = append(req.parents, ancestor)
   157  	}
   158  	s.schedule(req)
   159  }
   160  
   161  // Missing retrieves the known missing nodes from the trie for retrieval.
   162  func (s *Sync) Missing(max int) []common.Hash {
   163  	var requests []common.Hash
   164  	for !s.queue.Empty() && (max == 0 || len(requests) < max) {
   165  		requests = append(requests, s.queue.PopItem().(common.Hash))
   166  	}
   167  	return requests
   168  }
   169  
   170  // Process injects a batch of retrieved trie nodes data, returning if something
   171  // was committed to the database and also the index of an entry if processing of
   172  // it failed.
   173  func (s *Sync) Process(results []SyncResult) (bool, int, error) {
   174  	committed := false
   175  
   176  	for i, item := range results {
   177  		// If the item was not requested, bail out
   178  		request := s.requests[item.Hash]
   179  		if request == nil {
   180  			return committed, i, ErrNotRequested
   181  		}
   182  		if request.data != nil {
   183  			return committed, i, ErrAlreadyProcessed
   184  		}
   185  		// If the item is a raw entry request, commit directly
   186  		if request.raw {
   187  			request.data = item.Data
   188  			s.commit(request)
   189  			committed = true
   190  			continue
   191  		}
   192  		// Decode the node data content and update the request
   193  		node, err := decodeNode(item.Hash[:], item.Data)
   194  		if err != nil {
   195  			return committed, i, err
   196  		}
   197  		request.data = item.Data
   198  
   199  		// Create and schedule a request for all the children nodes
   200  		requests, err := s.children(request, node)
   201  		if err != nil {
   202  			return committed, i, err
   203  		}
   204  		if len(requests) == 0 && request.deps == 0 {
   205  			s.commit(request)
   206  			committed = true
   207  			continue
   208  		}
   209  		request.deps += len(requests)
   210  		for _, child := range requests {
   211  			s.schedule(child)
   212  		}
   213  	}
   214  	return committed, 0, nil
   215  }
   216  
   217  // Commit flushes the data stored in the internal membatch out to persistent
   218  // storage, returning the number of items written and any occurred error.
   219  func (s *Sync) Commit(dbw ethdb.KeyValueWriter) (int, error) {
   220  	// Dump the membatch into a database dbw
   221  	for i, key := range s.membatch.order {
   222  		if err := dbw.Put(key[:], s.membatch.batch[key]); err != nil {
   223  			return i, err
   224  		}
   225  	}
   226  	written := len(s.membatch.order)
   227  
   228  	// Drop the membatch data and return
   229  	s.membatch = newSyncMemBatch()
   230  	return written, nil
   231  }
   232  
   233  // Pending returns the number of state entries currently pending for download.
   234  func (s *Sync) Pending() int {
   235  	return len(s.requests)
   236  }
   237  
   238  // schedule inserts a new state retrieval request into the fetch queue. If there
   239  // is already a pending request for this node, the new request will be discarded
   240  // and only a parent reference added to the old one.
   241  func (s *Sync) schedule(req *request) {
   242  	// If we're already requesting this node, add a new reference and stop
   243  	if old, ok := s.requests[req.hash]; ok {
   244  		old.parents = append(old.parents, req.parents...)
   245  		return
   246  	}
   247  	// Schedule the request for future retrieval
   248  	s.queue.Push(req.hash, int64(req.depth))
   249  	s.requests[req.hash] = req
   250  }
   251  
   252  // children retrieves all the missing children of a state trie entry for future
   253  // retrieval scheduling.
   254  func (s *Sync) children(req *request, object node) ([]*request, error) {
   255  	// Gather all the children of the node, irrelevant whether known or not
   256  	type child struct {
   257  		node  node
   258  		depth int
   259  	}
   260  	children := []child{}
   261  
   262  	switch node := (object).(type) {
   263  	case *shortNode:
   264  		children = []child{{
   265  			node:  node.Val,
   266  			depth: req.depth + len(node.Key),
   267  		}}
   268  	case *fullNode:
   269  		for i := 0; i < 17; i++ {
   270  			if node.Children[i] != nil {
   271  				children = append(children, child{
   272  					node:  node.Children[i],
   273  					depth: req.depth + 1,
   274  				})
   275  			}
   276  		}
   277  	default:
   278  		panic(fmt.Sprintf("unknown node: %+v", node))
   279  	}
   280  	// Iterate over the children, and request all unknown ones
   281  	requests := make([]*request, 0, len(children))
   282  	for _, child := range children {
   283  		// Notify any external watcher of a new key/value node
   284  		if req.callback != nil {
   285  			if node, ok := (child.node).(valueNode); ok {
   286  				if err := req.callback(node, req.hash); err != nil {
   287  					return nil, err
   288  				}
   289  			}
   290  		}
   291  		// If the child references another node, resolve or schedule
   292  		if node, ok := (child.node).(hashNode); ok {
   293  			// Try to resolve the node from the local database
   294  			hash := common.BytesToHash(node)
   295  			if _, ok := s.membatch.batch[hash]; ok {
   296  				continue
   297  			}
   298  			if ok, _ := s.database.Has(node); ok {
   299  				continue
   300  			}
   301  			// Locally unknown node, schedule for retrieval
   302  			requests = append(requests, &request{
   303  				hash:     hash,
   304  				parents:  []*request{req},
   305  				depth:    child.depth,
   306  				callback: req.callback,
   307  			})
   308  		}
   309  	}
   310  	return requests, nil
   311  }
   312  
   313  // commit finalizes a retrieval request and stores it into the membatch. If any
   314  // of the referencing parent requests complete due to this commit, they are also
   315  // committed themselves.
   316  func (s *Sync) commit(req *request) (err error) {
   317  	// Write the node content to the membatch
   318  	s.membatch.batch[req.hash] = req.data
   319  	s.membatch.order = append(s.membatch.order, req.hash)
   320  
   321  	delete(s.requests, req.hash)
   322  
   323  	// Check all parents for completion
   324  	for _, parent := range req.parents {
   325  		parent.deps--
   326  		if parent.deps == 0 {
   327  			if err := s.commit(parent); err != nil {
   328  				return err
   329  			}
   330  		}
   331  	}
   332  	return nil
   333  }