github.com/theQRL/go-zond@v0.2.1/trie/triedb/pathdb/disklayer.go (about)

     1  // Copyright 2022 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package pathdb
    18  
    19  import (
    20  	"errors"
    21  	"fmt"
    22  	"sync"
    23  
    24  	"github.com/VictoriaMetrics/fastcache"
    25  	"github.com/theQRL/go-zond/common"
    26  	"github.com/theQRL/go-zond/core/rawdb"
    27  	"github.com/theQRL/go-zond/crypto"
    28  	"github.com/theQRL/go-zond/log"
    29  	"github.com/theQRL/go-zond/trie/trienode"
    30  	"github.com/theQRL/go-zond/trie/triestate"
    31  	"golang.org/x/crypto/sha3"
    32  )
    33  
    34  // diskLayer is a low level persistent layer built on top of a key-value store.
    35  type diskLayer struct {
    36  	root   common.Hash      // Immutable, root hash to which this layer was made for
    37  	id     uint64           // Immutable, corresponding state id
    38  	db     *Database        // Path-based trie database
    39  	cleans *fastcache.Cache // GC friendly memory cache of clean node RLPs
    40  	buffer *nodebuffer      // Node buffer to aggregate writes
    41  	stale  bool             // Signals that the layer became stale (state progressed)
    42  	lock   sync.RWMutex     // Lock used to protect stale flag
    43  }
    44  
    45  // newDiskLayer creates a new disk layer based on the passing arguments.
    46  func newDiskLayer(root common.Hash, id uint64, db *Database, cleans *fastcache.Cache, buffer *nodebuffer) *diskLayer {
    47  	// Initialize a clean cache if the memory allowance is not zero
    48  	// or reuse the provided cache if it is not nil (inherited from
    49  	// the original disk layer).
    50  	if cleans == nil && db.config.CleanCacheSize != 0 {
    51  		cleans = fastcache.New(db.config.CleanCacheSize)
    52  	}
    53  	return &diskLayer{
    54  		root:   root,
    55  		id:     id,
    56  		db:     db,
    57  		cleans: cleans,
    58  		buffer: buffer,
    59  	}
    60  }
    61  
    62  // root implements the layer interface, returning root hash of corresponding state.
    63  func (dl *diskLayer) rootHash() common.Hash {
    64  	return dl.root
    65  }
    66  
    67  // stateID implements the layer interface, returning the state id of disk layer.
    68  func (dl *diskLayer) stateID() uint64 {
    69  	return dl.id
    70  }
    71  
    72  // parent implements the layer interface, returning nil as there's no layer
    73  // below the disk.
    74  func (dl *diskLayer) parentLayer() layer {
    75  	return nil
    76  }
    77  
    78  // isStale return whether this layer has become stale (was flattened across) or if
    79  // it's still live.
    80  func (dl *diskLayer) isStale() bool {
    81  	dl.lock.RLock()
    82  	defer dl.lock.RUnlock()
    83  
    84  	return dl.stale
    85  }
    86  
    87  // markStale sets the stale flag as true.
    88  func (dl *diskLayer) markStale() {
    89  	dl.lock.Lock()
    90  	defer dl.lock.Unlock()
    91  
    92  	if dl.stale {
    93  		panic("triedb disk layer is stale") // we've committed into the same base from two children, boom
    94  	}
    95  	dl.stale = true
    96  }
    97  
    98  // Node implements the layer interface, retrieving the trie node with the
    99  // provided node info. No error will be returned if the node is not found.
   100  func (dl *diskLayer) Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error) {
   101  	dl.lock.RLock()
   102  	defer dl.lock.RUnlock()
   103  
   104  	if dl.stale {
   105  		return nil, errSnapshotStale
   106  	}
   107  	// Try to retrieve the trie node from the not-yet-written
   108  	// node buffer first. Note the buffer is lock free since
   109  	// it's impossible to mutate the buffer before tagging the
   110  	// layer as stale.
   111  	n, err := dl.buffer.node(owner, path, hash)
   112  	if err != nil {
   113  		return nil, err
   114  	}
   115  	if n != nil {
   116  		dirtyHitMeter.Mark(1)
   117  		dirtyReadMeter.Mark(int64(len(n.Blob)))
   118  		return n.Blob, nil
   119  	}
   120  	dirtyMissMeter.Mark(1)
   121  
   122  	// Try to retrieve the trie node from the clean memory cache
   123  	key := cacheKey(owner, path)
   124  	if dl.cleans != nil {
   125  		if blob := dl.cleans.Get(nil, key); len(blob) > 0 {
   126  			h := newHasher()
   127  			defer h.release()
   128  
   129  			got := h.hash(blob)
   130  			if got == hash {
   131  				cleanHitMeter.Mark(1)
   132  				cleanReadMeter.Mark(int64(len(blob)))
   133  				return blob, nil
   134  			}
   135  			cleanFalseMeter.Mark(1)
   136  			log.Error("Unexpected trie node in clean cache", "owner", owner, "path", path, "expect", hash, "got", got)
   137  		}
   138  		cleanMissMeter.Mark(1)
   139  	}
   140  	// Try to retrieve the trie node from the disk.
   141  	var (
   142  		nBlob []byte
   143  		nHash common.Hash
   144  	)
   145  	if owner == (common.Hash{}) {
   146  		nBlob, nHash = rawdb.ReadAccountTrieNode(dl.db.diskdb, path)
   147  	} else {
   148  		nBlob, nHash = rawdb.ReadStorageTrieNode(dl.db.diskdb, owner, path)
   149  	}
   150  	if nHash != hash {
   151  		diskFalseMeter.Mark(1)
   152  		log.Error("Unexpected trie node in disk", "owner", owner, "path", path, "expect", hash, "got", nHash)
   153  		return nil, newUnexpectedNodeError("disk", hash, nHash, owner, path)
   154  	}
   155  	if dl.cleans != nil && len(nBlob) > 0 {
   156  		dl.cleans.Set(key, nBlob)
   157  		cleanWriteMeter.Mark(int64(len(nBlob)))
   158  	}
   159  	return nBlob, nil
   160  }
   161  
   162  // update implements the layer interface, returning a new diff layer on top
   163  // with the given state set.
   164  func (dl *diskLayer) update(root common.Hash, id uint64, block uint64, nodes map[common.Hash]map[string]*trienode.Node, states *triestate.Set) *diffLayer {
   165  	return newDiffLayer(dl, root, id, block, nodes, states)
   166  }
   167  
   168  // commit merges the given bottom-most diff layer into the node buffer
   169  // and returns a newly constructed disk layer. Note the current disk
   170  // layer must be tagged as stale first to prevent re-access.
   171  func (dl *diskLayer) commit(bottom *diffLayer, force bool) (*diskLayer, error) {
   172  	dl.lock.Lock()
   173  	defer dl.lock.Unlock()
   174  
   175  	// Construct and store the state history first. If crash happens
   176  	// after storing the state history but without flushing the
   177  	// corresponding states(journal), the stored state history will
   178  	// be truncated in the next restart.
   179  	if dl.db.freezer != nil {
   180  		err := writeHistory(dl.db.diskdb, dl.db.freezer, bottom, dl.db.config.StateHistory)
   181  		if err != nil {
   182  			return nil, err
   183  		}
   184  	}
   185  	// Mark the diskLayer as stale before applying any mutations on top.
   186  	dl.stale = true
   187  
   188  	// Store the root->id lookup afterwards. All stored lookups are
   189  	// identified by the **unique** state root. It's impossible that
   190  	// in the same chain blocks are not adjacent but have the same
   191  	// root.
   192  	if dl.id == 0 {
   193  		rawdb.WriteStateID(dl.db.diskdb, dl.root, 0)
   194  	}
   195  	rawdb.WriteStateID(dl.db.diskdb, bottom.rootHash(), bottom.stateID())
   196  
   197  	// Construct a new disk layer by merging the nodes from the provided
   198  	// diff layer, and flush the content in disk layer if there are too
   199  	// many nodes cached. The clean cache is inherited from the original
   200  	// disk layer for reusing.
   201  	ndl := newDiskLayer(bottom.root, bottom.stateID(), dl.db, dl.cleans, dl.buffer.commit(bottom.nodes))
   202  	err := ndl.buffer.flush(ndl.db.diskdb, ndl.cleans, ndl.id, force)
   203  	if err != nil {
   204  		return nil, err
   205  	}
   206  	return ndl, nil
   207  }
   208  
   209  // revert applies the given state history and return a reverted disk layer.
   210  func (dl *diskLayer) revert(h *history, loader triestate.TrieLoader) (*diskLayer, error) {
   211  	if h.meta.root != dl.rootHash() {
   212  		return nil, errUnexpectedHistory
   213  	}
   214  	// Reject if the provided state history is incomplete. It's due to
   215  	// a large construct SELF-DESTRUCT which can't be handled because
   216  	// of memory limitation.
   217  	if len(h.meta.incomplete) > 0 {
   218  		return nil, errors.New("incomplete state history")
   219  	}
   220  	if dl.id == 0 {
   221  		return nil, fmt.Errorf("%w: zero state id", errStateUnrecoverable)
   222  	}
   223  	// Apply the reverse state changes upon the current state. This must
   224  	// be done before holding the lock in order to access state in "this"
   225  	// layer.
   226  	nodes, err := triestate.Apply(h.meta.parent, h.meta.root, h.accounts, h.storages, loader)
   227  	if err != nil {
   228  		return nil, err
   229  	}
   230  	// Mark the diskLayer as stale before applying any mutations on top.
   231  	dl.lock.Lock()
   232  	defer dl.lock.Unlock()
   233  
   234  	dl.stale = true
   235  
   236  	// State change may be applied to node buffer, or the persistent
   237  	// state, depends on if node buffer is empty or not. If the node
   238  	// buffer is not empty, it means that the state transition that
   239  	// needs to be reverted is not yet flushed and cached in node
   240  	// buffer, otherwise, manipulate persistent state directly.
   241  	if !dl.buffer.empty() {
   242  		err := dl.buffer.revert(dl.db.diskdb, nodes)
   243  		if err != nil {
   244  			return nil, err
   245  		}
   246  	} else {
   247  		batch := dl.db.diskdb.NewBatch()
   248  		writeNodes(batch, nodes, dl.cleans)
   249  		rawdb.WritePersistentStateID(batch, dl.id-1)
   250  		if err := batch.Write(); err != nil {
   251  			log.Crit("Failed to write states", "err", err)
   252  		}
   253  	}
   254  	return newDiskLayer(h.meta.parent, dl.id-1, dl.db, dl.cleans, dl.buffer), nil
   255  }
   256  
   257  // setBufferSize sets the node buffer size to the provided value.
   258  func (dl *diskLayer) setBufferSize(size int) error {
   259  	dl.lock.RLock()
   260  	defer dl.lock.RUnlock()
   261  
   262  	if dl.stale {
   263  		return errSnapshotStale
   264  	}
   265  	return dl.buffer.setSize(size, dl.db.diskdb, dl.cleans, dl.id)
   266  }
   267  
   268  // size returns the approximate size of cached nodes in the disk layer.
   269  func (dl *diskLayer) size() common.StorageSize {
   270  	dl.lock.RLock()
   271  	defer dl.lock.RUnlock()
   272  
   273  	if dl.stale {
   274  		return 0
   275  	}
   276  	return common.StorageSize(dl.buffer.size)
   277  }
   278  
   279  // resetCache releases the memory held by clean cache to prevent memory leak.
   280  func (dl *diskLayer) resetCache() {
   281  	dl.lock.RLock()
   282  	defer dl.lock.RUnlock()
   283  
   284  	// Stale disk layer loses the ownership of clean cache.
   285  	if dl.stale {
   286  		return
   287  	}
   288  	if dl.cleans != nil {
   289  		dl.cleans.Reset()
   290  	}
   291  }
   292  
   293  // hasher is used to compute the sha256 hash of the provided data.
   294  type hasher struct{ sha crypto.KeccakState }
   295  
   296  var hasherPool = sync.Pool{
   297  	New: func() interface{} { return &hasher{sha: sha3.NewLegacyKeccak256().(crypto.KeccakState)} },
   298  }
   299  
   300  func newHasher() *hasher {
   301  	return hasherPool.Get().(*hasher)
   302  }
   303  
   304  func (h *hasher) hash(data []byte) common.Hash {
   305  	return crypto.HashData(h.sha, data)
   306  }
   307  
   308  func (h *hasher) release() {
   309  	hasherPool.Put(h)
   310  }