github.com/lbryio/lbcd@v0.22.119/claimtrie/claimtrie.go (about)

     1  package claimtrie
     2  
     3  import (
     4  	"bytes"
     5  	"fmt"
     6  	"path/filepath"
     7  	"sort"
     8  
     9  	"github.com/pkg/errors"
    10  
    11  	"github.com/lbryio/lbcd/claimtrie/block"
    12  	"github.com/lbryio/lbcd/claimtrie/block/blockrepo"
    13  	"github.com/lbryio/lbcd/claimtrie/change"
    14  	"github.com/lbryio/lbcd/claimtrie/config"
    15  	"github.com/lbryio/lbcd/claimtrie/merkletrie"
    16  	"github.com/lbryio/lbcd/claimtrie/merkletrie/merkletrierepo"
    17  	"github.com/lbryio/lbcd/claimtrie/node"
    18  	"github.com/lbryio/lbcd/claimtrie/node/noderepo"
    19  	"github.com/lbryio/lbcd/claimtrie/normalization"
    20  	"github.com/lbryio/lbcd/claimtrie/param"
    21  	"github.com/lbryio/lbcd/claimtrie/temporal"
    22  	"github.com/lbryio/lbcd/claimtrie/temporal/temporalrepo"
    23  
    24  	"github.com/lbryio/lbcd/chaincfg/chainhash"
    25  	"github.com/lbryio/lbcd/wire"
    26  )
    27  
    28  // ClaimTrie implements a Merkle Trie supporting linear history of commits.
    29  type ClaimTrie struct {
    30  
    31  	// Repository for calculated block hashes.
    32  	blockRepo block.Repo
    33  
    34  	// Repository for storing temporal information of nodes at each block height.
    35  	// For example, which nodes (by name) should be refreshed at each block height
    36  	// due to stake expiration or delayed activation.
    37  	temporalRepo temporal.Repo
    38  
    39  	// Cache layer of Nodes.
    40  	nodeManager node.Manager
    41  
    42  	// Prefix tree (trie) that manages merkle hash of each node.
    43  	merkleTrie merkletrie.MerkleTrie
    44  
    45  	// Current block height, which is increased by one when AppendBlock() is called.
    46  	height int32
    47  
    48  	// Registrered cleanup functions which are invoked in the Close() in reverse order.
    49  	cleanups []func() error
    50  
    51  	// claimLogger communicates progress of claimtrie rebuild.
    52  	claimLogger *claimProgressLogger
    53  }
    54  
    55  func New(cfg config.Config) (*ClaimTrie, error) {
    56  
    57  	var cleanups []func() error
    58  
    59  	// The passed in cfg.DataDir has been prepended with netname.
    60  	dataDir := filepath.Join(cfg.DataDir, "claim_dbs")
    61  
    62  	dbPath := filepath.Join(dataDir, cfg.BlockRepoPebble.Path)
    63  	blockRepo, err := blockrepo.NewPebble(dbPath)
    64  	if err != nil {
    65  		return nil, errors.Wrap(err, "creating block repo")
    66  	}
    67  	cleanups = append(cleanups, blockRepo.Close)
    68  	err = blockRepo.Set(0, merkletrie.EmptyTrieHash)
    69  	if err != nil {
    70  		return nil, errors.Wrap(err, "setting block repo genesis")
    71  	}
    72  
    73  	dbPath = filepath.Join(dataDir, cfg.TemporalRepoPebble.Path)
    74  	temporalRepo, err := temporalrepo.NewPebble(dbPath)
    75  	if err != nil {
    76  		return nil, errors.Wrap(err, "creating temporal repo")
    77  	}
    78  	cleanups = append(cleanups, temporalRepo.Close)
    79  
    80  	// Initialize repository for changes to nodes.
    81  	// The cleanup is delegated to the Node Manager.
    82  	dbPath = filepath.Join(dataDir, cfg.NodeRepoPebble.Path)
    83  	nodeRepo, err := noderepo.NewPebble(dbPath)
    84  	if err != nil {
    85  		return nil, errors.Wrap(err, "creating node repo")
    86  	}
    87  
    88  	baseManager, err := node.NewBaseManager(nodeRepo)
    89  	if err != nil {
    90  		return nil, errors.Wrap(err, "creating node base manager")
    91  	}
    92  	normalizingManager := node.NewNormalizingManager(baseManager)
    93  	nodeManager := &node.HashV2Manager{Manager: normalizingManager}
    94  	cleanups = append(cleanups, nodeManager.Close)
    95  
    96  	var trie merkletrie.MerkleTrie
    97  	if cfg.RamTrie {
    98  		trie = merkletrie.NewRamTrie()
    99  	} else {
   100  
   101  		// Initialize repository for MerkleTrie. The cleanup is delegated to MerkleTrie.
   102  		dbPath = filepath.Join(dataDir, cfg.MerkleTrieRepoPebble.Path)
   103  		trieRepo, err := merkletrierepo.NewPebble(dbPath)
   104  		if err != nil {
   105  			return nil, errors.Wrap(err, "creating trie repo")
   106  		}
   107  
   108  		persistentTrie := merkletrie.NewPersistentTrie(trieRepo)
   109  		cleanups = append(cleanups, persistentTrie.Close)
   110  		trie = persistentTrie
   111  	}
   112  
   113  	// Restore the last height.
   114  	previousHeight, err := blockRepo.Load()
   115  	if err != nil {
   116  		return nil, errors.Wrap(err, "load block tip")
   117  	}
   118  
   119  	ct := &ClaimTrie{
   120  		blockRepo:    blockRepo,
   121  		temporalRepo: temporalRepo,
   122  
   123  		nodeManager: nodeManager,
   124  		merkleTrie:  trie,
   125  
   126  		height: previousHeight,
   127  	}
   128  
   129  	ct.cleanups = cleanups
   130  
   131  	if previousHeight > 0 {
   132  		hash, err := blockRepo.Get(previousHeight)
   133  		if err != nil {
   134  			ct.Close() // TODO: the cleanups aren't run when we exit with an err above here (but should be)
   135  			return nil, errors.Wrap(err, "block repo get")
   136  		}
   137  		_, err = nodeManager.IncrementHeightTo(previousHeight, false)
   138  		if err != nil {
   139  			ct.Close()
   140  			return nil, errors.Wrap(err, "increment height to")
   141  		}
   142  		err = trie.SetRoot(hash) // keep this after IncrementHeightTo
   143  		if err == merkletrie.ErrFullRebuildRequired {
   144  			ct.runFullTrieRebuild(nil, cfg.Interrupt)
   145  		}
   146  
   147  		if interruptRequested(cfg.Interrupt) || !ct.MerkleHash().IsEqual(hash) {
   148  			ct.Close()
   149  			return nil, errors.Errorf("unable to restore the claim hash to %s at height %d", hash.String(), previousHeight)
   150  		}
   151  	}
   152  
   153  	return ct, nil
   154  }
   155  
   156  // AddClaim adds a Claim to the ClaimTrie.
   157  func (ct *ClaimTrie) AddClaim(name []byte, op wire.OutPoint, id change.ClaimID, amt int64) error {
   158  
   159  	chg := change.Change{
   160  		Type:     change.AddClaim,
   161  		Name:     name,
   162  		OutPoint: op,
   163  		Amount:   amt,
   164  		ClaimID:  id,
   165  	}
   166  
   167  	return ct.forwardNodeChange(chg)
   168  }
   169  
   170  // UpdateClaim updates a Claim in the ClaimTrie.
   171  func (ct *ClaimTrie) UpdateClaim(name []byte, op wire.OutPoint, amt int64, id change.ClaimID) error {
   172  
   173  	chg := change.Change{
   174  		Type:     change.UpdateClaim,
   175  		Name:     name,
   176  		OutPoint: op,
   177  		Amount:   amt,
   178  		ClaimID:  id,
   179  	}
   180  
   181  	return ct.forwardNodeChange(chg)
   182  }
   183  
   184  // SpendClaim spends a Claim in the ClaimTrie.
   185  func (ct *ClaimTrie) SpendClaim(name []byte, op wire.OutPoint, id change.ClaimID) error {
   186  
   187  	chg := change.Change{
   188  		Type:     change.SpendClaim,
   189  		Name:     name,
   190  		OutPoint: op,
   191  		ClaimID:  id,
   192  	}
   193  
   194  	return ct.forwardNodeChange(chg)
   195  }
   196  
   197  // AddSupport adds a Support to the ClaimTrie.
   198  func (ct *ClaimTrie) AddSupport(name []byte, op wire.OutPoint, amt int64, id change.ClaimID) error {
   199  
   200  	chg := change.Change{
   201  		Type:     change.AddSupport,
   202  		Name:     name,
   203  		OutPoint: op,
   204  		Amount:   amt,
   205  		ClaimID:  id,
   206  	}
   207  
   208  	return ct.forwardNodeChange(chg)
   209  }
   210  
   211  // SpendSupport spends a Support in the ClaimTrie.
   212  func (ct *ClaimTrie) SpendSupport(name []byte, op wire.OutPoint, id change.ClaimID) error {
   213  
   214  	chg := change.Change{
   215  		Type:     change.SpendSupport,
   216  		Name:     name,
   217  		OutPoint: op,
   218  		ClaimID:  id,
   219  	}
   220  
   221  	return ct.forwardNodeChange(chg)
   222  }
   223  
   224  // AppendBlock increases block by one.
   225  func (ct *ClaimTrie) AppendBlock(temporary bool) error {
   226  
   227  	ct.height++
   228  
   229  	names, err := ct.nodeManager.IncrementHeightTo(ct.height, temporary)
   230  	if err != nil {
   231  		return errors.Wrap(err, "node manager increment")
   232  	}
   233  
   234  	expirations, err := ct.temporalRepo.NodesAt(ct.height)
   235  	if err != nil {
   236  		return errors.Wrap(err, "temporal repo get")
   237  	}
   238  
   239  	names = removeDuplicates(names) // comes out sorted
   240  
   241  	updateNames := make([][]byte, 0, len(names)+len(expirations))
   242  	updateHeights := make([]int32, 0, len(names)+len(expirations))
   243  	updateNames = append(updateNames, names...)
   244  	for range names { // log to the db that we updated a name at this height for rollback purposes
   245  		updateHeights = append(updateHeights, ct.height)
   246  	}
   247  	names = append(names, expirations...)
   248  	names = removeDuplicates(names)
   249  
   250  	for _, name := range names {
   251  
   252  		hash, next := ct.nodeManager.Hash(name)
   253  		ct.merkleTrie.Update(name, hash, true)
   254  		if next <= 0 {
   255  			continue
   256  		}
   257  
   258  		newName := normalization.NormalizeIfNecessary(name, next)
   259  		updateNames = append(updateNames, newName)
   260  		updateHeights = append(updateHeights, next)
   261  	}
   262  	if !temporary && len(updateNames) > 0 {
   263  		err = ct.temporalRepo.SetNodesAt(updateNames, updateHeights)
   264  		if err != nil {
   265  			return errors.Wrap(err, "temporal repo set")
   266  		}
   267  	}
   268  
   269  	hitFork := ct.updateTrieForHashForkIfNecessary()
   270  	h := ct.MerkleHash()
   271  
   272  	if !temporary {
   273  		ct.blockRepo.Set(ct.height, h)
   274  	}
   275  
   276  	if hitFork {
   277  		err = ct.merkleTrie.SetRoot(h) // for clearing the memory entirely
   278  	}
   279  
   280  	return errors.Wrap(err, "merkle trie clear memory")
   281  }
   282  
   283  func (ct *ClaimTrie) updateTrieForHashForkIfNecessary() bool {
   284  	if ct.height != param.ActiveParams.AllClaimsInMerkleForkHeight {
   285  		return false
   286  	}
   287  
   288  	node.LogOnce(fmt.Sprintf("Rebuilding all trie nodes for the hash fork at %d...", ct.height))
   289  	ct.runFullTrieRebuild(nil, nil) // I don't think it's safe to allow interrupt during fork
   290  	return true
   291  }
   292  
   293  func removeDuplicates(names [][]byte) [][]byte { // this might be too expensive; we'll have to profile it
   294  	sort.Slice(names, func(i, j int) bool { // put names in order so we can skip duplicates
   295  		return bytes.Compare(names[i], names[j]) < 0
   296  	})
   297  
   298  	for i := len(names) - 2; i >= 0; i-- {
   299  		if bytes.Equal(names[i], names[i+1]) {
   300  			names = append(names[:i], names[i+1:]...)
   301  		}
   302  	}
   303  	return names
   304  }
   305  
   306  // ResetHeight resets the ClaimTrie to a previous known height..
   307  func (ct *ClaimTrie) ResetHeight(height int32) error {
   308  
   309  	names := make([][]byte, 0)
   310  	for h := height + 1; h <= ct.height; h++ {
   311  		results, err := ct.temporalRepo.NodesAt(h)
   312  		if err != nil {
   313  			return err
   314  		}
   315  		names = append(names, results...)
   316  	}
   317  	names, err := ct.nodeManager.DecrementHeightTo(names, height)
   318  	if err != nil {
   319  		return err
   320  	}
   321  
   322  	passedHashFork := ct.height >= param.ActiveParams.AllClaimsInMerkleForkHeight && height < param.ActiveParams.AllClaimsInMerkleForkHeight
   323  	hash, err := ct.blockRepo.Get(height)
   324  	if err != nil {
   325  		return err
   326  	}
   327  
   328  	oldHeight := ct.height
   329  	ct.height = height // keep this before the rebuild
   330  
   331  	if passedHashFork {
   332  		names = nil // force them to reconsider all names
   333  	}
   334  
   335  	var fullRebuildRequired bool
   336  
   337  	err = ct.merkleTrie.SetRoot(hash)
   338  	if err == merkletrie.ErrFullRebuildRequired {
   339  		fullRebuildRequired = true
   340  	} else if err != nil {
   341  		return errors.Wrapf(err, "setRoot")
   342  	}
   343  
   344  	if fullRebuildRequired {
   345  		ct.runFullTrieRebuild(names, nil)
   346  	}
   347  
   348  	if !ct.MerkleHash().IsEqual(hash) {
   349  		return errors.Errorf("unable to restore the hash at height %d"+
   350  			" (fullTriedRebuilt: %t)", height, fullRebuildRequired)
   351  	}
   352  
   353  	return errors.WithStack(ct.blockRepo.Delete(height+1, oldHeight))
   354  }
   355  
   356  func (ct *ClaimTrie) runFullTrieRebuild(names [][]byte, interrupt <-chan struct{}) {
   357  	if names == nil {
   358  		node.Log("Building the entire claim trie in RAM...")
   359  		ct.claimLogger = newClaimProgressLogger("Processed", node.GetLogger())
   360  
   361  		ct.nodeManager.IterateNames(func(name []byte) bool {
   362  			if interruptRequested(interrupt) {
   363  				return false
   364  			}
   365  			clone := make([]byte, len(name))
   366  			copy(clone, name)
   367  			hash, _ := ct.nodeManager.Hash(clone)
   368  			ct.merkleTrie.Update(clone, hash, false)
   369  			ct.claimLogger.LogName(name)
   370  			return true
   371  		})
   372  
   373  	} else {
   374  		for _, name := range names {
   375  			hash, _ := ct.nodeManager.Hash(name)
   376  			ct.merkleTrie.Update(name, hash, false)
   377  		}
   378  	}
   379  
   380  }
   381  
   382  // MerkleHash returns the Merkle Hash of the claimTrie.
   383  func (ct *ClaimTrie) MerkleHash() *chainhash.Hash {
   384  	if ct.height >= param.ActiveParams.AllClaimsInMerkleForkHeight {
   385  		return ct.merkleTrie.MerkleHashAllClaims()
   386  	}
   387  	return ct.merkleTrie.MerkleHash()
   388  }
   389  
   390  // Height returns the current block height.
   391  func (ct *ClaimTrie) Height() int32 {
   392  	return ct.height
   393  }
   394  
   395  // Close persists states.
   396  // Any calls to the ClaimTrie after Close() being called results undefined behaviour.
   397  func (ct *ClaimTrie) Close() {
   398  
   399  	for i := len(ct.cleanups) - 1; i >= 0; i-- {
   400  		cleanup := ct.cleanups[i]
   401  		err := cleanup()
   402  		if err != nil { // it would be better to cleanup what we can than exit early
   403  			node.LogOnce("On cleanup: " + err.Error())
   404  		}
   405  	}
   406  	ct.cleanups = nil
   407  }
   408  
   409  func (ct *ClaimTrie) forwardNodeChange(chg change.Change) error {
   410  
   411  	chg.Height = ct.Height() + 1
   412  	ct.nodeManager.AppendChange(chg)
   413  	return nil
   414  }
   415  
   416  func (ct *ClaimTrie) NodeAt(height int32, name []byte) (*node.Node, error) {
   417  	return ct.nodeManager.NodeAt(height, name)
   418  }
   419  
   420  func (ct *ClaimTrie) NamesChangedInBlock(height int32) ([]string, error) {
   421  	hits, err := ct.temporalRepo.NodesAt(height)
   422  	r := make([]string, len(hits))
   423  	for i := range hits {
   424  		r[i] = string(hits[i])
   425  	}
   426  	return r, err
   427  }
   428  
   429  func (ct *ClaimTrie) FlushToDisk() {
   430  	// maybe the user can fix the file lock shown in the warning before they shut down
   431  	if err := ct.nodeManager.Flush(); err != nil {
   432  		node.Warn("During nodeManager flush: " + err.Error())
   433  	}
   434  	if err := ct.temporalRepo.Flush(); err != nil {
   435  		node.Warn("During temporalRepo flush: " + err.Error())
   436  	}
   437  	if err := ct.merkleTrie.Flush(); err != nil {
   438  		node.Warn("During merkleTrie flush: " + err.Error())
   439  	}
   440  	if err := ct.blockRepo.Flush(); err != nil {
   441  		node.Warn("During blockRepo flush: " + err.Error())
   442  	}
   443  }
   444  
   445  func interruptRequested(interrupted <-chan struct{}) bool {
   446  	select {
   447  	case <-interrupted: // should never block on nil
   448  		return true
   449  	default:
   450  	}
   451  
   452  	return false
   453  }