github.com/aergoio/aergo@v1.3.1/pkg/trie/trie.go (about)

     1  /**
     2   *  @file
     3   *  @copyright defined in aergo/LICENSE.txt
     4   */
     5  
     6  package trie
     7  
     8  import (
     9  	"bytes"
    10  	"fmt"
    11  	"sync"
    12  
    13  	"github.com/aergoio/aergo-lib/db"
    14  )
    15  
    16  // Trie is a modified sparse Merkle tree.
    17  // Instead of storing values at the leaves of the tree,
    18  // the values are stored at the highest subtree root that contains only that value.
    19  // If the tree is sparse, this requires fewer hashing operations.
    20  type Trie struct {
    21  	db *CacheDB
    22  	// Root is the current root of the smt.
    23  	Root []byte
    24  	// prevRoot is the root before the last update
    25  	prevRoot []byte
    26  	// lock is for the whole struct
    27  	lock sync.RWMutex
    28  	// hash is the hash function used in the trie
    29  	hash func(data ...[]byte) []byte
    30  	// TrieHeight is the number if bits in a key
    31  	TrieHeight int
    32  	// LoadDbCounter counts the nb of db reads in on update
    33  	LoadDbCounter int
    34  	// loadDbMux is a lock for LoadDbCounter
    35  	loadDbMux sync.RWMutex
    36  	// LoadCacheCounter counts the nb of cache reads in on update
    37  	LoadCacheCounter int
    38  	// liveCountMux is a lock fo LoadCacheCounter
    39  	liveCountMux sync.RWMutex
    40  	// counterOn is used to enable/diseable for efficiency
    41  	counterOn bool
    42  	// CacheHeightLimit is the number of tree levels we want to store in cache
    43  	CacheHeightLimit int
    44  	// pastTries stores the past maxPastTries trie roots to revert
    45  	pastTries [][]byte
    46  	// atomicUpdate, commit all the changes made by intermediate update calls
    47  	atomicUpdate bool
    48  }
    49  
    50  // NewSMT creates a new SMT given a keySize and a hash function.
    51  func NewTrie(root []byte, hash func(data ...[]byte) []byte, store db.DB) *Trie {
    52  	s := &Trie{
    53  		hash:       hash,
    54  		TrieHeight: len(hash([]byte("height"))) * 8, // hash any string to get output length
    55  		counterOn:  false,
    56  	}
    57  	s.db = &CacheDB{
    58  		liveCache:    make(map[Hash][][]byte),
    59  		updatedNodes: make(map[Hash][][]byte),
    60  		Store:        store,
    61  	}
    62  	// don't store any cache by default (contracts state don't use cache)
    63  	s.CacheHeightLimit = s.TrieHeight + 1
    64  	s.Root = root
    65  	return s
    66  }
    67  
    68  // Update adds and deletes a sorted list of keys and their values to the trie
    69  // Adding and deleting can be simultaneous.
    70  // To delete, set the value to DefaultLeaf.
    71  // If Update is called multiple times, only the state after the last update
    72  // is commited.
    73  func (s *Trie) Update(keys, values [][]byte) ([]byte, error) {
    74  	s.lock.Lock()
    75  	defer s.lock.Unlock()
    76  	s.atomicUpdate = false
    77  	s.LoadDbCounter = 0
    78  	s.LoadCacheCounter = 0
    79  	ch := make(chan mresult, 1)
    80  	s.update(s.Root, keys, values, nil, 0, s.TrieHeight, ch)
    81  	result := <-ch
    82  	if result.err != nil {
    83  		return nil, result.err
    84  	}
    85  	if len(result.update) != 0 {
    86  		s.Root = result.update[:HashLength]
    87  	} else {
    88  		s.Root = nil
    89  	}
    90  	return s.Root, nil
    91  }
    92  
    93  // AtomicUpdate can be called multiple times and all the updated nodes will be commited
    94  // and roots will be stored in past tries.
    95  // Can be used for updating several blocks before committing to DB.
    96  func (s *Trie) AtomicUpdate(keys, values [][]byte) ([]byte, error) {
    97  	s.lock.Lock()
    98  	defer s.lock.Unlock()
    99  	s.atomicUpdate = true
   100  	s.LoadDbCounter = 0
   101  	s.LoadCacheCounter = 0
   102  	ch := make(chan mresult, 1)
   103  	s.update(s.Root, keys, values, nil, 0, s.TrieHeight, ch)
   104  	result := <-ch
   105  	if result.err != nil {
   106  		return nil, result.err
   107  	}
   108  	if len(result.update) != 0 {
   109  		s.Root = result.update[:HashLength]
   110  	} else {
   111  		s.Root = nil
   112  	}
   113  	s.updatePastTries()
   114  	return s.Root, nil
   115  }
   116  
   117  // mresult is used to contain the result of goroutines and is sent through a channel.
   118  type mresult struct {
   119  	update []byte
   120  	// flag if a node was deleted and a shortcut node maybe has to move up the tree
   121  	deleted bool
   122  	err     error
   123  }
   124  
   125  // update adds and deletes a sorted list of keys and their values to the trie.
   126  // Adding and deleting can be simultaneous.
   127  // To delete, set the value to DefaultLeaf.
   128  // It returns the root of the updated tree.
   129  func (s *Trie) update(root []byte, keys, values, batch [][]byte, iBatch, height int, ch chan<- (mresult)) {
   130  	if height == 0 {
   131  		if bytes.Equal(DefaultLeaf, values[0]) {
   132  			// Delete the key-value from the trie if it is being set to DefaultLeaf
   133  			// The value will be set to [] in batch by maybeMoveupShortcut or interiorHash
   134  			s.deleteOldNode(root, height, false)
   135  			ch <- mresult{nil, true, nil}
   136  		} else {
   137  			// create a new shortcut batch.
   138  			// simply storing the value will make it hard to move up the
   139  			// shortcut in case of sibling deletion
   140  			batch = make([][]byte, 31, 31)
   141  			node := s.leafHash(keys[0], values[0], root, batch, 0, height)
   142  			ch <- mresult{node, false, nil}
   143  		}
   144  		return
   145  	}
   146  
   147  	// Load the node to update
   148  	batch, iBatch, lnode, rnode, isShortcut, err := s.loadChildren(root, height, iBatch, batch)
   149  	if err != nil {
   150  		ch <- mresult{nil, false, err}
   151  		return
   152  	}
   153  	// Check if the keys are updating the shortcut node
   154  	if isShortcut {
   155  		keys, values = s.maybeAddShortcutToKV(keys, values, lnode[:HashLength], rnode[:HashLength])
   156  		if iBatch == 0 {
   157  			// shortcut is moving so it's root will change
   158  			s.deleteOldNode(root, height, false)
   159  		}
   160  		// The shortcut node was added to keys and values so consider this subtree default.
   161  		lnode, rnode = nil, nil
   162  		// update in the batch (set key, value to default so the next loadChildren is correct)
   163  		batch[2*iBatch+1] = nil
   164  		batch[2*iBatch+2] = nil
   165  		if len(keys) == 0 {
   166  			// Set true so that a potential sibling shortcut may move up.
   167  			ch <- mresult{nil, true, nil}
   168  			return
   169  		}
   170  	}
   171  	// Store shortcut node
   172  	if (len(lnode) == 0) && (len(rnode) == 0) && (len(keys) == 1) {
   173  		// We are adding 1 key to an empty subtree so store it as a shortcut
   174  		if bytes.Equal(DefaultLeaf, values[0]) {
   175  			ch <- mresult{nil, true, nil}
   176  		} else {
   177  			node := s.leafHash(keys[0], values[0], root, batch, iBatch, height)
   178  			ch <- mresult{node, false, nil}
   179  		}
   180  		return
   181  	}
   182  
   183  	// Split the keys array so each branch can be updated in parallel
   184  	lkeys, rkeys := s.splitKeys(keys, s.TrieHeight-height)
   185  	splitIndex := len(lkeys)
   186  	lvalues, rvalues := values[:splitIndex], values[splitIndex:]
   187  
   188  	switch {
   189  	case len(lkeys) == 0 && len(rkeys) > 0:
   190  		s.updateRight(lnode, rnode, root, keys, values, batch, iBatch, height, ch)
   191  	case len(lkeys) > 0 && len(rkeys) == 0:
   192  		s.updateLeft(lnode, rnode, root, keys, values, batch, iBatch, height, ch)
   193  	default:
   194  		s.updateParallel(lnode, rnode, root, lkeys, rkeys, lvalues, rvalues, batch, iBatch, height, ch)
   195  	}
   196  }
   197  
   198  // updateRight updates the right side of the tree
   199  func (s *Trie) updateRight(lnode, rnode, root []byte, keys, values, batch [][]byte, iBatch, height int, ch chan<- (mresult)) {
   200  	// all the keys go in the right subtree
   201  	newch := make(chan mresult, 1)
   202  	s.update(rnode, keys, values, batch, 2*iBatch+2, height-1, newch)
   203  	result := <-newch
   204  	if result.err != nil {
   205  		ch <- mresult{nil, false, result.err}
   206  		return
   207  	}
   208  	// Move up a shortcut node if necessary.
   209  	if result.deleted {
   210  		if s.maybeMoveUpShortcut(lnode, result.update, root, batch, iBatch, height, ch) {
   211  			return
   212  		}
   213  	}
   214  	node := s.interiorHash(lnode, result.update, root, batch, iBatch, height)
   215  	ch <- mresult{node, false, nil}
   216  }
   217  
   218  // updateLeft updates the left side of the tree
   219  func (s *Trie) updateLeft(lnode, rnode, root []byte, keys, values, batch [][]byte, iBatch, height int, ch chan<- (mresult)) {
   220  	// all the keys go in the left subtree
   221  	newch := make(chan mresult, 1)
   222  	s.update(lnode, keys, values, batch, 2*iBatch+1, height-1, newch)
   223  	result := <-newch
   224  	if result.err != nil {
   225  		ch <- mresult{nil, false, result.err}
   226  		return
   227  	}
   228  	// Move up a shortcut node if necessary.
   229  	if result.deleted {
   230  		if s.maybeMoveUpShortcut(result.update, rnode, root, batch, iBatch, height, ch) {
   231  			return
   232  		}
   233  	}
   234  	node := s.interiorHash(result.update, rnode, root, batch, iBatch, height)
   235  	ch <- mresult{node, false, nil}
   236  }
   237  
   238  // updateParallel updates both sides of the trie simultaneously
   239  func (s *Trie) updateParallel(lnode, rnode, root []byte, lkeys, rkeys, lvalues, rvalues, batch [][]byte, iBatch, height int, ch chan<- (mresult)) {
   240  	lch := make(chan mresult, 1)
   241  	rch := make(chan mresult, 1)
   242  	go s.update(lnode, lkeys, lvalues, batch, 2*iBatch+1, height-1, lch)
   243  	go s.update(rnode, rkeys, rvalues, batch, 2*iBatch+2, height-1, rch)
   244  	lresult := <-lch
   245  	rresult := <-rch
   246  	if lresult.err != nil {
   247  		ch <- mresult{nil, false, lresult.err}
   248  		return
   249  	}
   250  	if rresult.err != nil {
   251  		ch <- mresult{nil, false, rresult.err}
   252  		return
   253  	}
   254  
   255  	// Move up a shortcut node if it's sibling is default
   256  	if lresult.deleted || rresult.deleted {
   257  		if s.maybeMoveUpShortcut(lresult.update, rresult.update, root, batch, iBatch, height, ch) {
   258  			return
   259  		}
   260  	}
   261  	node := s.interiorHash(lresult.update, rresult.update, root, batch, iBatch, height)
   262  	ch <- mresult{node, false, nil}
   263  }
   264  
   265  // deleteOldNode deletes an old node that has been updated
   266  func (s *Trie) deleteOldNode(root []byte, height int, movingUp bool) {
   267  	var node Hash
   268  	copy(node[:], root)
   269  	if !s.atomicUpdate || movingUp {
   270  		// dont delete old nodes with atomic updated except when
   271  		// moving up a shortcut, we dont record every single move
   272  		s.db.updatedMux.Lock()
   273  		delete(s.db.updatedNodes, node)
   274  		s.db.updatedMux.Unlock()
   275  	}
   276  	if height >= s.CacheHeightLimit {
   277  		s.db.liveMux.Lock()
   278  		delete(s.db.liveCache, node)
   279  		s.db.liveMux.Unlock()
   280  	}
   281  }
   282  
   283  // splitKeys devides the array of keys into 2 so they can update left and right branches in parallel
   284  func (s *Trie) splitKeys(keys [][]byte, height int) ([][]byte, [][]byte) {
   285  	for i, key := range keys {
   286  		if bitIsSet(key, height) {
   287  			return keys[:i], keys[i:]
   288  		}
   289  	}
   290  	return keys, nil
   291  }
   292  
   293  // maybeMoveUpShortcut moves up a shortcut if it's sibling node is default
   294  func (s *Trie) maybeMoveUpShortcut(left, right, root []byte, batch [][]byte, iBatch, height int, ch chan<- (mresult)) bool {
   295  	if len(left) == 0 && len(right) == 0 {
   296  		// Both update and sibling are deleted subtrees
   297  		if iBatch == 0 {
   298  			// If the deleted subtrees are at the root, then delete it.
   299  			s.deleteOldNode(root, height, true)
   300  		} else {
   301  			batch[2*iBatch+1] = nil
   302  			batch[2*iBatch+2] = nil
   303  		}
   304  		ch <- mresult{nil, true, nil}
   305  		return true
   306  	} else if len(left) == 0 {
   307  		// If right is a shortcut move it up
   308  		if right[HashLength] == 1 {
   309  			s.moveUpShortcut(right, root, batch, iBatch, 2*iBatch+2, height, ch)
   310  			return true
   311  		}
   312  	} else if len(right) == 0 {
   313  		// If left is a shortcut move it up
   314  		if left[HashLength] == 1 {
   315  			s.moveUpShortcut(left, root, batch, iBatch, 2*iBatch+1, height, ch)
   316  			return true
   317  		}
   318  	}
   319  	return false
   320  }
   321  
   322  func (s *Trie) moveUpShortcut(shortcut, root []byte, batch [][]byte, iBatch, iShortcut, height int, ch chan<- (mresult)) {
   323  	// it doesn't matter if atomic update is true or false since the batch is node modified
   324  	_, _, shortcutKey, shortcutVal, _, err := s.loadChildren(shortcut, height-1, iShortcut, batch)
   325  	if err != nil {
   326  		ch <- mresult{nil, false, err}
   327  		return
   328  	}
   329  	// when moving up the shortcut, it's hash will change because height is +1
   330  	newShortcut := s.hash(shortcutKey[:HashLength], shortcutVal[:HashLength], []byte{byte(height)})
   331  	newShortcut = append(newShortcut, byte(1))
   332  
   333  	if iBatch == 0 {
   334  		// Modify batch to a shortcut batch
   335  		batch[0] = []byte{1}
   336  		batch[2*iBatch+1] = shortcutKey
   337  		batch[2*iBatch+2] = shortcutVal
   338  		batch[2*iShortcut+1] = nil
   339  		batch[2*iShortcut+2] = nil
   340  		// cache and updatedNodes deleted by store node
   341  		s.storeNode(batch, newShortcut, root, height)
   342  	} else if (height-1)%4 == 0 {
   343  		// move up shortcut and delete old batch
   344  		batch[2*iBatch+1] = shortcutKey
   345  		batch[2*iBatch+2] = shortcutVal
   346  		// set true so that AtomicUpdate can also delete a node moving up
   347  		// otherwise every nodes moved up is recorded
   348  		s.deleteOldNode(shortcut, height, true)
   349  	} else {
   350  		//move up shortcut
   351  		batch[2*iBatch+1] = shortcutKey
   352  		batch[2*iBatch+2] = shortcutVal
   353  		batch[2*iShortcut+1] = nil
   354  		batch[2*iShortcut+2] = nil
   355  	}
   356  	// Return the left sibling node to move it up
   357  	ch <- mresult{newShortcut, true, nil}
   358  }
   359  
   360  // maybeAddShortcutToKV adds a shortcut key to the keys array to be updated.
   361  // this is used when a subtree containing a shortcut node is being updated
   362  func (s *Trie) maybeAddShortcutToKV(keys, values [][]byte, shortcutKey, shortcutVal []byte) ([][]byte, [][]byte) {
   363  	newKeys := make([][]byte, 0, len(keys)+1)
   364  	newVals := make([][]byte, 0, len(keys)+1)
   365  
   366  	if bytes.Compare(shortcutKey, keys[0]) < 0 {
   367  		newKeys = append(newKeys, shortcutKey)
   368  		newKeys = append(newKeys, keys...)
   369  		newVals = append(newVals, shortcutVal)
   370  		newVals = append(newVals, values...)
   371  	} else if bytes.Compare(shortcutKey, keys[len(keys)-1]) > 0 {
   372  		newKeys = append(newKeys, keys...)
   373  		newKeys = append(newKeys, shortcutKey)
   374  		newVals = append(newVals, values...)
   375  		newVals = append(newVals, shortcutVal)
   376  	} else {
   377  		higher := false
   378  		for i, key := range keys {
   379  			if bytes.Equal(shortcutKey, key) {
   380  				if !bytes.Equal(DefaultLeaf, values[i]) {
   381  					// Do nothing if the shortcut is simply updated
   382  					return keys, values
   383  				}
   384  				// Delete shortcut if it is updated to DefaultLeaf
   385  				newKeys = append(newKeys, keys[:i]...)
   386  				newKeys = append(newKeys, keys[i+1:]...)
   387  				newVals = append(newVals, values[:i]...)
   388  				newVals = append(newVals, values[i+1:]...)
   389  			}
   390  			if !higher && bytes.Compare(shortcutKey, key) > 0 {
   391  				higher = true
   392  				continue
   393  			}
   394  			if higher && bytes.Compare(shortcutKey, key) < 0 {
   395  				// insert shortcut in slices
   396  				newKeys = append(newKeys, keys[:i]...)
   397  				newKeys = append(newKeys, shortcutKey)
   398  				newKeys = append(newKeys, keys[i:]...)
   399  				newVals = append(newVals, values[:i]...)
   400  				newVals = append(newVals, shortcutVal)
   401  				newVals = append(newVals, values[i:]...)
   402  				break
   403  			}
   404  		}
   405  	}
   406  	return newKeys, newVals
   407  }
   408  
   409  // loadChildren looks for the children of a node.
   410  // if the node is not stored in cache, it will be loaded from db.
   411  func (s *Trie) loadChildren(root []byte, height, iBatch int, batch [][]byte) ([][]byte, int, []byte, []byte, bool, error) {
   412  	isShortcut := false
   413  	if height%4 == 0 {
   414  		if len(root) == 0 {
   415  			// create a new default batch
   416  			batch = make([][]byte, 31, 31)
   417  			batch[0] = []byte{0}
   418  		} else {
   419  			var err error
   420  			batch, err = s.loadBatch(root)
   421  			if err != nil {
   422  				return nil, 0, nil, nil, false, err
   423  			}
   424  		}
   425  		iBatch = 0
   426  		if batch[0][0] == 1 {
   427  			isShortcut = true
   428  		}
   429  	} else {
   430  		if len(batch[iBatch]) != 0 && batch[iBatch][HashLength] == 1 {
   431  			isShortcut = true
   432  		}
   433  	}
   434  	return batch, iBatch, batch[2*iBatch+1], batch[2*iBatch+2], isShortcut, nil
   435  }
   436  
   437  // loadBatch fetches a batch of nodes in cache or db
   438  func (s *Trie) loadBatch(root []byte) ([][]byte, error) {
   439  	var node Hash
   440  	copy(node[:], root)
   441  
   442  	s.db.liveMux.RLock()
   443  	val, exists := s.db.liveCache[node]
   444  	s.db.liveMux.RUnlock()
   445  	if exists {
   446  		if s.counterOn {
   447  			s.liveCountMux.Lock()
   448  			s.LoadCacheCounter++
   449  			s.liveCountMux.Unlock()
   450  		}
   451  		if s.atomicUpdate {
   452  			// Return a copy so that Commit() doesnt have to be called at
   453  			// each block and still commit every state transition.
   454  			// Before Commit, the same batch is in liveCache and in updatedNodes
   455  			newVal := make([][]byte, 31, 31)
   456  			copy(newVal, val)
   457  			return newVal, nil
   458  		}
   459  		return val, nil
   460  	}
   461  	// checking updated nodes is useful if get() or update() is called twice in a row without db commit
   462  	s.db.updatedMux.RLock()
   463  	val, exists = s.db.updatedNodes[node]
   464  	s.db.updatedMux.RUnlock()
   465  	if exists {
   466  		if s.atomicUpdate {
   467  			// Return a copy so that Commit() doesnt have to be called at
   468  			// each block and still commit every state transition.
   469  			newVal := make([][]byte, 31, 31)
   470  			copy(newVal, val)
   471  			return newVal, nil
   472  		}
   473  		return val, nil
   474  	}
   475  	//Fetch node in disk database
   476  	if s.db.Store == nil {
   477  		return nil, fmt.Errorf("DB not connected to trie")
   478  	}
   479  	if s.counterOn {
   480  		s.loadDbMux.Lock()
   481  		s.LoadDbCounter++
   482  		s.loadDbMux.Unlock()
   483  	}
   484  	s.db.lock.Lock()
   485  	dbval := s.db.Store.Get(root[:HashLength])
   486  	s.db.lock.Unlock()
   487  	nodeSize := len(dbval)
   488  	if nodeSize != 0 {
   489  		return s.parseBatch(dbval), nil
   490  	}
   491  	return nil, fmt.Errorf("the trie node %x is unavailable in the disk db, db may be corrupted", root)
   492  }
   493  
   494  // parseBatch decodes the byte data into a slice of nodes and bitmap
   495  func (s *Trie) parseBatch(val []byte) [][]byte {
   496  	batch := make([][]byte, 31, 31)
   497  	bitmap := val[:4]
   498  	// check if the batch root is a shortcut
   499  	if bitIsSet(val, 31) {
   500  		batch[0] = []byte{1}
   501  		batch[1] = val[4 : 4+33]
   502  		batch[2] = val[4+33 : 4+33*2]
   503  	} else {
   504  		batch[0] = []byte{0}
   505  		j := 0
   506  		for i := 1; i <= 30; i++ {
   507  			if bitIsSet(bitmap, i-1) {
   508  				batch[i] = val[4+33*j : 4+33*(j+1)]
   509  				j++
   510  			}
   511  		}
   512  	}
   513  	return batch
   514  }
   515  
   516  // leafHash returns the hash of key_value_byte(height) concatenated, stores it in the updatedNodes and maybe in liveCache.
   517  // leafHash is never called for a default value. Default value should not be stored.
   518  func (s *Trie) leafHash(key, value, oldRoot []byte, batch [][]byte, iBatch, height int) []byte {
   519  	// byte(height) is here for 2 reasons.
   520  	// 1- to prevent potential problems with merkle proofs where if an account
   521  	// has the same address as a node, it would be possible to prove a
   522  	// different value for the account.
   523  	// 2- when accounts are added to the trie, accounts on their path get pushed down the tree
   524  	// with them. if an old account changes position from a shortcut batch to another
   525  	// shortcut batch of different height, if would be deleted when reverting.
   526  	h := s.hash(key, value, []byte{byte(height)})
   527  	h = append(h, byte(1)) // byte(1) is a flag for the shortcut
   528  	batch[2*iBatch+2] = append(value, byte(2))
   529  	batch[2*iBatch+1] = append(key, byte(2))
   530  	if height%4 == 0 {
   531  		batch[0] = []byte{1} // byte(1) is a flag for the shortcut batch
   532  		s.storeNode(batch, h, oldRoot, height)
   533  	}
   534  	return h
   535  }
   536  
   537  // storeNode stores a batch and deletes the old node from cache
   538  func (s *Trie) storeNode(batch [][]byte, h, oldRoot []byte, height int) {
   539  	if !bytes.Equal(h, oldRoot) {
   540  		var node Hash
   541  		copy(node[:], h)
   542  		// record new node
   543  		s.db.updatedMux.Lock()
   544  		s.db.updatedNodes[node] = batch
   545  		s.db.updatedMux.Unlock()
   546  		// Cache the shortcut node if it's height is over CacheHeightLimit
   547  		if height >= s.CacheHeightLimit {
   548  			s.db.liveMux.Lock()
   549  			s.db.liveCache[node] = batch
   550  			s.db.liveMux.Unlock()
   551  		}
   552  		s.deleteOldNode(oldRoot, height, false)
   553  	}
   554  }
   555  
   556  // interiorHash hashes 2 children to get the parent hash and stores it in the updatedNodes and maybe in liveCache.
   557  func (s *Trie) interiorHash(left, right, oldRoot []byte, batch [][]byte, iBatch, height int) []byte {
   558  	var h []byte
   559  	// left and right cannot both be default. It is handled by maybeMoveUpShortcut()
   560  	if len(left) == 0 {
   561  		h = s.hash(DefaultLeaf, right[:HashLength])
   562  	} else if len(right) == 0 {
   563  		h = s.hash(left[:HashLength], DefaultLeaf)
   564  	} else {
   565  		h = s.hash(left[:HashLength], right[:HashLength])
   566  	}
   567  	h = append(h, byte(0))
   568  	batch[2*iBatch+2] = right
   569  	batch[2*iBatch+1] = left
   570  	if height%4 == 0 {
   571  		batch[0] = []byte{0}
   572  		s.storeNode(batch, h, oldRoot, height)
   573  	}
   574  	return h
   575  }
   576  
   577  // updatePastTries appends the current Root to the list of past tries
   578  func (s *Trie) updatePastTries() {
   579  	if len(s.pastTries) >= maxPastTries {
   580  		copy(s.pastTries, s.pastTries[1:])
   581  		s.pastTries[len(s.pastTries)-1] = s.Root
   582  	} else {
   583  		s.pastTries = append(s.pastTries, s.Root)
   584  	}
   585  }