github.com/ledgerwatch/erigon-lib@v1.0.0/state/domain_committed.go (about)

     1  /*
     2     Copyright 2021 Erigon contributors
     3  
     4     Licensed under the Apache License, Version 2.0 (the "License");
     5     you may not use this file except in compliance with the License.
     6     You may obtain a copy of the License at
     7  
     8         http://www.apache.org/licenses/LICENSE-2.0
     9  
    10     Unless required by applicable law or agreed to in writing, software
    11     distributed under the License is distributed on an "AS IS" BASIS,
    12     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13     See the License for the specific language governing permissions and
    14     limitations under the License.
    15  */
    16  
    17  package state
    18  
    19  import (
    20  	"bytes"
    21  	"container/heap"
    22  	"context"
    23  	"encoding/binary"
    24  	"fmt"
    25  	"hash"
    26  	"path/filepath"
    27  	"strings"
    28  	"time"
    29  
    30  	"github.com/google/btree"
    31  	"github.com/ledgerwatch/erigon-lib/common/background"
    32  	"github.com/ledgerwatch/log/v3"
    33  	"golang.org/x/crypto/sha3"
    34  
    35  	"github.com/ledgerwatch/erigon-lib/commitment"
    36  	"github.com/ledgerwatch/erigon-lib/common"
    37  	"github.com/ledgerwatch/erigon-lib/common/length"
    38  	"github.com/ledgerwatch/erigon-lib/compress"
    39  )
    40  
    41  // Defines how to evaluate commitments
    42  type CommitmentMode uint
    43  
    44  const (
    45  	CommitmentModeDisabled CommitmentMode = 0
    46  	CommitmentModeDirect   CommitmentMode = 1
    47  	CommitmentModeUpdate   CommitmentMode = 2
    48  )
    49  
    50  func (m CommitmentMode) String() string {
    51  	switch m {
    52  	case CommitmentModeDisabled:
    53  		return "disabled"
    54  	case CommitmentModeDirect:
    55  		return "direct"
    56  	case CommitmentModeUpdate:
    57  		return "update"
    58  	default:
    59  		return "unknown"
    60  	}
    61  }
    62  
    63  func ParseCommitmentMode(s string) CommitmentMode {
    64  	var mode CommitmentMode
    65  	switch s {
    66  	case "off":
    67  		mode = CommitmentModeDisabled
    68  	case "update":
    69  		mode = CommitmentModeUpdate
    70  	default:
    71  		mode = CommitmentModeDirect
    72  	}
    73  	return mode
    74  }
    75  
    76  type ValueMerger func(prev, current []byte) (merged []byte, err error)
    77  
    78  type DomainCommitted struct {
    79  	*Domain
    80  	mode         CommitmentMode
    81  	trace        bool
    82  	commTree     *btree.BTreeG[*CommitmentItem]
    83  	keccak       hash.Hash
    84  	patriciaTrie commitment.Trie
    85  	branchMerger *commitment.BranchMerger
    86  
    87  	comKeys uint64
    88  	comTook time.Duration
    89  	logger  log.Logger
    90  }
    91  
    92  func NewCommittedDomain(d *Domain, mode CommitmentMode, trieVariant commitment.TrieVariant, logger log.Logger) *DomainCommitted {
    93  	return &DomainCommitted{
    94  		Domain:       d,
    95  		patriciaTrie: commitment.InitializeTrie(trieVariant),
    96  		commTree:     btree.NewG[*CommitmentItem](32, commitmentItemLess),
    97  		keccak:       sha3.NewLegacyKeccak256(),
    98  		mode:         mode,
    99  		branchMerger: commitment.NewHexBranchMerger(8192),
   100  		logger:       logger,
   101  	}
   102  }
   103  
   104  func (d *DomainCommitted) SetCommitmentMode(m CommitmentMode) { d.mode = m }
   105  
   106  // TouchPlainKey marks plainKey as updated and applies different fn for different key types
   107  // (different behaviour for Code, Account and Storage key modifications).
   108  func (d *DomainCommitted) TouchPlainKey(key, val []byte, fn func(c *CommitmentItem, val []byte)) {
   109  	if d.mode == CommitmentModeDisabled {
   110  		return
   111  	}
   112  	c := &CommitmentItem{plainKey: common.Copy(key), hashedKey: d.hashAndNibblizeKey(key)}
   113  	if d.mode > CommitmentModeDirect {
   114  		fn(c, val)
   115  	}
   116  	d.commTree.ReplaceOrInsert(c)
   117  }
   118  
   119  func (d *DomainCommitted) TouchPlainKeyAccount(c *CommitmentItem, val []byte) {
   120  	if len(val) == 0 {
   121  		c.update.Flags = commitment.DeleteUpdate
   122  		return
   123  	}
   124  	c.update.DecodeForStorage(val)
   125  	c.update.Flags = commitment.BalanceUpdate | commitment.NonceUpdate
   126  	item, found := d.commTree.Get(&CommitmentItem{hashedKey: c.hashedKey})
   127  	if !found {
   128  		return
   129  	}
   130  	if item.update.Flags&commitment.CodeUpdate != 0 {
   131  		c.update.Flags |= commitment.CodeUpdate
   132  		copy(c.update.CodeHashOrStorage[:], item.update.CodeHashOrStorage[:])
   133  	}
   134  }
   135  
   136  func (d *DomainCommitted) TouchPlainKeyStorage(c *CommitmentItem, val []byte) {
   137  	c.update.ValLength = len(val)
   138  	if len(val) == 0 {
   139  		c.update.Flags = commitment.DeleteUpdate
   140  	} else {
   141  		c.update.Flags = commitment.StorageUpdate
   142  		copy(c.update.CodeHashOrStorage[:], val)
   143  	}
   144  }
   145  
   146  func (d *DomainCommitted) TouchPlainKeyCode(c *CommitmentItem, val []byte) {
   147  	c.update.Flags = commitment.CodeUpdate
   148  	item, found := d.commTree.Get(c)
   149  	if !found {
   150  		d.keccak.Reset()
   151  		d.keccak.Write(val)
   152  		copy(c.update.CodeHashOrStorage[:], d.keccak.Sum(nil))
   153  		return
   154  	}
   155  	if item.update.Flags&commitment.BalanceUpdate != 0 {
   156  		c.update.Flags |= commitment.BalanceUpdate
   157  		c.update.Balance.Set(&item.update.Balance)
   158  	}
   159  	if item.update.Flags&commitment.NonceUpdate != 0 {
   160  		c.update.Flags |= commitment.NonceUpdate
   161  		c.update.Nonce = item.update.Nonce
   162  	}
   163  	if item.update.Flags == commitment.DeleteUpdate && len(val) == 0 {
   164  		c.update.Flags = commitment.DeleteUpdate
   165  	} else {
   166  		d.keccak.Reset()
   167  		d.keccak.Write(val)
   168  		copy(c.update.CodeHashOrStorage[:], d.keccak.Sum(nil))
   169  	}
   170  }
   171  
   172  type CommitmentItem struct {
   173  	plainKey  []byte
   174  	hashedKey []byte
   175  	update    commitment.Update
   176  }
   177  
   178  func commitmentItemLess(i, j *CommitmentItem) bool {
   179  	return bytes.Compare(i.hashedKey, j.hashedKey) < 0
   180  }
   181  
   182  // Returns list of both plain and hashed keys. If .mode is CommitmentModeUpdate, updates also returned.
   183  func (d *DomainCommitted) TouchedKeyList() ([][]byte, [][]byte, []commitment.Update) {
   184  	plainKeys := make([][]byte, d.commTree.Len())
   185  	hashedKeys := make([][]byte, d.commTree.Len())
   186  	updates := make([]commitment.Update, d.commTree.Len())
   187  
   188  	j := 0
   189  	d.commTree.Ascend(func(item *CommitmentItem) bool {
   190  		plainKeys[j] = item.plainKey
   191  		hashedKeys[j] = item.hashedKey
   192  		updates[j] = item.update
   193  		j++
   194  		return true
   195  	})
   196  
   197  	d.commTree.Clear(true)
   198  	return plainKeys, hashedKeys, updates
   199  }
   200  
   201  // TODO(awskii): let trie define hashing function
   202  func (d *DomainCommitted) hashAndNibblizeKey(key []byte) []byte {
   203  	hashedKey := make([]byte, length.Hash)
   204  
   205  	d.keccak.Reset()
   206  	d.keccak.Write(key[:length.Addr])
   207  	copy(hashedKey[:length.Hash], d.keccak.Sum(nil))
   208  
   209  	if len(key[length.Addr:]) > 0 {
   210  		hashedKey = append(hashedKey, make([]byte, length.Hash)...)
   211  		d.keccak.Reset()
   212  		d.keccak.Write(key[length.Addr:])
   213  		copy(hashedKey[length.Hash:], d.keccak.Sum(nil))
   214  	}
   215  
   216  	nibblized := make([]byte, len(hashedKey)*2)
   217  	for i, b := range hashedKey {
   218  		nibblized[i*2] = (b >> 4) & 0xf
   219  		nibblized[i*2+1] = b & 0xf
   220  	}
   221  	return nibblized
   222  }
   223  
   224  func (d *DomainCommitted) storeCommitmentState(blockNum, txNum uint64) error {
   225  	var state []byte
   226  	var err error
   227  
   228  	switch trie := (d.patriciaTrie).(type) {
   229  	case *commitment.HexPatriciaHashed:
   230  		state, err = trie.EncodeCurrentState(nil)
   231  		if err != nil {
   232  			return err
   233  		}
   234  	default:
   235  		return fmt.Errorf("unsupported state storing for patricia trie type: %T", d.patriciaTrie)
   236  	}
   237  	cs := &commitmentState{txNum: txNum, trieState: state, blockNum: blockNum}
   238  	encoded, err := cs.Encode()
   239  	if err != nil {
   240  		return err
   241  	}
   242  
   243  	var stepbuf [2]byte
   244  	step := uint16(txNum / d.aggregationStep)
   245  	binary.BigEndian.PutUint16(stepbuf[:], step)
   246  	if err = d.Domain.Put(keyCommitmentState, stepbuf[:], encoded); err != nil {
   247  		return err
   248  	}
   249  	return nil
   250  }
   251  
   252  // nolint
   253  func (d *DomainCommitted) replaceKeyWithReference(fullKey, shortKey []byte, typeAS string, list ...*filesItem) bool {
   254  	numBuf := [2]byte{}
   255  	var found bool
   256  	for _, item := range list {
   257  		//g := item.decompressor.MakeGetter()
   258  		//index := recsplit.NewIndexReader(item.index)
   259  
   260  		cur, err := item.bindex.Seek(fullKey)
   261  		if err != nil {
   262  			continue
   263  		}
   264  		step := uint16(item.endTxNum / d.aggregationStep)
   265  		binary.BigEndian.PutUint16(numBuf[:], step)
   266  
   267  		shortKey = encodeU64(cur.Ordinal(), numBuf[:])
   268  
   269  		if d.trace {
   270  			fmt.Printf("replacing %s [%x] => {%x} [step=%d, offset=%d, file=%s.%d-%d]\n", typeAS, fullKey, shortKey, step, cur.Ordinal(), typeAS, item.startTxNum, item.endTxNum)
   271  		}
   272  		found = true
   273  		break
   274  	}
   275  	//if !found {
   276  	//	log.Warn("bt index key replacement seek failed", "key", fmt.Sprintf("%x", fullKey))
   277  	//}
   278  	return found
   279  }
   280  
   281  // nolint
   282  func (d *DomainCommitted) lookupShortenedKey(shortKey, fullKey []byte, typAS string, list []*filesItem) bool {
   283  	fileStep, offset := shortenedKey(shortKey)
   284  	expected := uint64(fileStep) * d.aggregationStep
   285  
   286  	var found bool
   287  	for _, item := range list {
   288  		if item.startTxNum > expected || item.endTxNum < expected {
   289  			continue
   290  		}
   291  
   292  		cur := item.bindex.OrdinalLookup(offset)
   293  		//nolint
   294  		fullKey = cur.Key()
   295  		if d.trace {
   296  			fmt.Printf("offsetToKey %s [%x]=>{%x} step=%d offset=%d, file=%s.%d-%d.kv\n", typAS, fullKey, shortKey, fileStep, offset, typAS, item.startTxNum, item.endTxNum)
   297  		}
   298  		found = true
   299  		break
   300  	}
   301  	return found
   302  }
   303  
   304  // commitmentValTransform parses the value of the commitment record to extract references
   305  // to accounts and storage items, then looks them up in the new, merged files, and replaces them with
   306  // the updated references
   307  func (d *DomainCommitted) commitmentValTransform(files *SelectedStaticFiles, merged *MergedFiles, val commitment.BranchData) ([]byte, error) {
   308  	if len(val) == 0 {
   309  		return nil, nil
   310  	}
   311  	accountPlainKeys, storagePlainKeys, err := val.ExtractPlainKeys()
   312  	if err != nil {
   313  		return nil, err
   314  	}
   315  
   316  	transAccountPks := make([][]byte, 0, len(accountPlainKeys))
   317  	var apkBuf, spkBuf []byte
   318  	for _, accountPlainKey := range accountPlainKeys {
   319  		if len(accountPlainKey) == length.Addr {
   320  			// Non-optimised key originating from a database record
   321  			apkBuf = append(apkBuf[:0], accountPlainKey...)
   322  		} else {
   323  			f := d.lookupShortenedKey(accountPlainKey, apkBuf, "account", files.accounts)
   324  			if !f {
   325  				fmt.Printf("lost key %x\n", accountPlainKeys)
   326  			}
   327  		}
   328  		d.replaceKeyWithReference(apkBuf, accountPlainKey, "account", merged.accounts)
   329  		transAccountPks = append(transAccountPks, accountPlainKey)
   330  	}
   331  
   332  	transStoragePks := make([][]byte, 0, len(storagePlainKeys))
   333  	for _, storagePlainKey := range storagePlainKeys {
   334  		if len(storagePlainKey) == length.Addr+length.Hash {
   335  			// Non-optimised key originating from a database record
   336  			spkBuf = append(spkBuf[:0], storagePlainKey...)
   337  		} else {
   338  			// Optimised key referencing a state file record (file number and offset within the file)
   339  			f := d.lookupShortenedKey(storagePlainKey, spkBuf, "storage", files.storage)
   340  			if !f {
   341  				fmt.Printf("lost skey %x\n", storagePlainKey)
   342  			}
   343  		}
   344  
   345  		d.replaceKeyWithReference(spkBuf, storagePlainKey, "storage", merged.storage)
   346  		transStoragePks = append(transStoragePks, storagePlainKey)
   347  	}
   348  
   349  	transValBuf, err := val.ReplacePlainKeys(transAccountPks, transStoragePks, nil)
   350  	if err != nil {
   351  		return nil, err
   352  	}
   353  	return transValBuf, nil
   354  }
   355  
   356  func (d *DomainCommitted) mergeFiles(ctx context.Context, oldFiles SelectedStaticFiles, mergedFiles MergedFiles, r DomainRanges, workers int, ps *background.ProgressSet) (valuesIn, indexIn, historyIn *filesItem, err error) {
   357  	if !r.any() {
   358  		return
   359  	}
   360  
   361  	domainFiles := oldFiles.commitment
   362  	indexFiles := oldFiles.commitmentIdx
   363  	historyFiles := oldFiles.commitmentHist
   364  
   365  	var comp *compress.Compressor
   366  	var closeItem bool = true
   367  	defer func() {
   368  		if closeItem {
   369  			if comp != nil {
   370  				comp.Close()
   371  			}
   372  			if indexIn != nil {
   373  				if indexIn.decompressor != nil {
   374  					indexIn.decompressor.Close()
   375  				}
   376  				if indexIn.index != nil {
   377  					indexIn.index.Close()
   378  				}
   379  				if indexIn.bindex != nil {
   380  					indexIn.bindex.Close()
   381  				}
   382  			}
   383  			if historyIn != nil {
   384  				if historyIn.decompressor != nil {
   385  					historyIn.decompressor.Close()
   386  				}
   387  				if historyIn.index != nil {
   388  					historyIn.index.Close()
   389  				}
   390  				if historyIn.bindex != nil {
   391  					historyIn.bindex.Close()
   392  				}
   393  			}
   394  			if valuesIn != nil {
   395  				if valuesIn.decompressor != nil {
   396  					valuesIn.decompressor.Close()
   397  				}
   398  				if valuesIn.index != nil {
   399  					valuesIn.index.Close()
   400  				}
   401  				if valuesIn.bindex != nil {
   402  					valuesIn.bindex.Close()
   403  				}
   404  			}
   405  		}
   406  	}()
   407  	if indexIn, historyIn, err = d.History.mergeFiles(ctx, indexFiles, historyFiles,
   408  		HistoryRanges{
   409  			historyStartTxNum: r.historyStartTxNum,
   410  			historyEndTxNum:   r.historyEndTxNum,
   411  			history:           r.history,
   412  			indexStartTxNum:   r.indexStartTxNum,
   413  			indexEndTxNum:     r.indexEndTxNum,
   414  			index:             r.index}, workers, ps); err != nil {
   415  		return nil, nil, nil, err
   416  	}
   417  
   418  	if r.values {
   419  		datFileName := fmt.Sprintf("%s.%d-%d.kv", d.filenameBase, r.valuesStartTxNum/d.aggregationStep, r.valuesEndTxNum/d.aggregationStep)
   420  		datPath := filepath.Join(d.dir, datFileName)
   421  		p := ps.AddNew(datFileName, 1)
   422  		defer ps.Delete(p)
   423  
   424  		if comp, err = compress.NewCompressor(ctx, "merge", datPath, d.dir, compress.MinPatternScore, workers, log.LvlTrace, d.logger); err != nil {
   425  			return nil, nil, nil, fmt.Errorf("merge %s compressor: %w", d.filenameBase, err)
   426  		}
   427  		var cp CursorHeap
   428  		heap.Init(&cp)
   429  		for _, item := range domainFiles {
   430  			g := item.decompressor.MakeGetter()
   431  			g.Reset(0)
   432  			if g.HasNext() {
   433  				key, _ := g.NextUncompressed()
   434  				var val []byte
   435  				if d.compressVals {
   436  					val, _ = g.Next(nil)
   437  				} else {
   438  					val, _ = g.NextUncompressed()
   439  				}
   440  				if d.trace {
   441  					fmt.Printf("merge: read value '%x'\n", key)
   442  				}
   443  				heap.Push(&cp, &CursorItem{
   444  					t:        FILE_CURSOR,
   445  					dg:       g,
   446  					key:      key,
   447  					val:      val,
   448  					endTxNum: item.endTxNum,
   449  					reverse:  true,
   450  				})
   451  			}
   452  		}
   453  		keyCount := 0
   454  		// In the loop below, the pair `keyBuf=>valBuf` is always 1 item behind `lastKey=>lastVal`.
   455  		// `lastKey` and `lastVal` are taken from the top of the multi-way merge (assisted by the CursorHeap cp), but not processed right away
   456  		// instead, the pair from the previous iteration is processed first - `keyBuf=>valBuf`. After that, `keyBuf` and `valBuf` are assigned
   457  		// to `lastKey` and `lastVal` correspondingly, and the next step of multi-way merge happens. Therefore, after the multi-way merge loop
   458  		// (when CursorHeap cp is empty), there is a need to process the last pair `keyBuf=>valBuf`, because it was one step behind
   459  		var keyBuf, valBuf []byte
   460  		for cp.Len() > 0 {
   461  			lastKey := common.Copy(cp[0].key)
   462  			lastVal := common.Copy(cp[0].val)
   463  			// Advance all the items that have this key (including the top)
   464  			for cp.Len() > 0 && bytes.Equal(cp[0].key, lastKey) {
   465  				ci1 := cp[0]
   466  				if ci1.dg.HasNext() {
   467  					ci1.key, _ = ci1.dg.NextUncompressed()
   468  					if d.compressVals {
   469  						ci1.val, _ = ci1.dg.Next(ci1.val[:0])
   470  					} else {
   471  						ci1.val, _ = ci1.dg.NextUncompressed()
   472  					}
   473  					heap.Fix(&cp, 0)
   474  				} else {
   475  					heap.Pop(&cp)
   476  				}
   477  			}
   478  			// For the rest of types, empty value means deletion
   479  			skip := r.valuesStartTxNum == 0 && len(lastVal) == 0
   480  			if !skip {
   481  				if keyBuf != nil {
   482  					if err = comp.AddUncompressedWord(keyBuf); err != nil {
   483  						return nil, nil, nil, err
   484  					}
   485  					keyCount++ // Only counting keys, not values
   486  					switch d.compressVals {
   487  					case true:
   488  						if err = comp.AddWord(valBuf); err != nil {
   489  							return nil, nil, nil, err
   490  						}
   491  					default:
   492  						if err = comp.AddUncompressedWord(valBuf); err != nil {
   493  							return nil, nil, nil, err
   494  						}
   495  					}
   496  				}
   497  				keyBuf = append(keyBuf[:0], lastKey...)
   498  				valBuf = append(valBuf[:0], lastVal...)
   499  			}
   500  		}
   501  		if keyBuf != nil {
   502  			if err = comp.AddUncompressedWord(keyBuf); err != nil {
   503  				return nil, nil, nil, err
   504  			}
   505  			keyCount++ // Only counting keys, not values
   506  			//fmt.Printf("last heap key %x\n", keyBuf)
   507  			valBuf, err = d.commitmentValTransform(&oldFiles, &mergedFiles, valBuf)
   508  			if err != nil {
   509  				return nil, nil, nil, fmt.Errorf("merge: 2valTransform [%x] %w", valBuf, err)
   510  			}
   511  			if d.compressVals {
   512  				if err = comp.AddWord(valBuf); err != nil {
   513  					return nil, nil, nil, err
   514  				}
   515  			} else {
   516  				if err = comp.AddUncompressedWord(valBuf); err != nil {
   517  					return nil, nil, nil, err
   518  				}
   519  			}
   520  		}
   521  		if err = comp.Compress(); err != nil {
   522  			return nil, nil, nil, err
   523  		}
   524  		comp.Close()
   525  		comp = nil
   526  		valuesIn = newFilesItem(r.valuesStartTxNum, r.valuesEndTxNum, d.aggregationStep)
   527  		if valuesIn.decompressor, err = compress.NewDecompressor(datPath); err != nil {
   528  			return nil, nil, nil, fmt.Errorf("merge %s decompressor [%d-%d]: %w", d.filenameBase, r.valuesStartTxNum, r.valuesEndTxNum, err)
   529  		}
   530  		ps.Delete(p)
   531  
   532  		idxFileName := fmt.Sprintf("%s.%d-%d.kvi", d.filenameBase, r.valuesStartTxNum/d.aggregationStep, r.valuesEndTxNum/d.aggregationStep)
   533  		idxPath := filepath.Join(d.dir, idxFileName)
   534  
   535  		p = ps.AddNew(datFileName, uint64(keyCount))
   536  		defer ps.Delete(p)
   537  		if valuesIn.index, err = buildIndexThenOpen(ctx, valuesIn.decompressor, idxPath, d.dir, keyCount, false /* values */, p, d.logger, d.noFsync); err != nil {
   538  			return nil, nil, nil, fmt.Errorf("merge %s buildIndex [%d-%d]: %w", d.filenameBase, r.valuesStartTxNum, r.valuesEndTxNum, err)
   539  		}
   540  
   541  		btPath := strings.TrimSuffix(idxPath, "kvi") + "bt"
   542  		valuesIn.bindex, err = CreateBtreeIndexWithDecompressor(btPath, 2048, valuesIn.decompressor, p, d.tmpdir, d.logger)
   543  		if err != nil {
   544  			return nil, nil, nil, fmt.Errorf("create btindex %s [%d-%d]: %w", d.filenameBase, r.valuesStartTxNum, r.valuesEndTxNum, err)
   545  		}
   546  	}
   547  	closeItem = false
   548  	d.stats.MergesCount++
   549  	d.mergesCount++
   550  	return
   551  }
   552  
   553  // Evaluates commitment for processed state. Commit=true - store trie state after evaluation
   554  func (d *DomainCommitted) ComputeCommitment(trace bool) (rootHash []byte, branchNodeUpdates map[string]commitment.BranchData, err error) {
   555  	defer func(s time.Time) { d.comTook = time.Since(s) }(time.Now())
   556  
   557  	touchedKeys, hashedKeys, updates := d.TouchedKeyList()
   558  	d.comKeys = uint64(len(touchedKeys))
   559  
   560  	if len(touchedKeys) == 0 {
   561  		rootHash, err = d.patriciaTrie.RootHash()
   562  		return rootHash, nil, err
   563  	}
   564  
   565  	// data accessing functions should be set once before
   566  	d.patriciaTrie.Reset()
   567  	d.patriciaTrie.SetTrace(trace)
   568  
   569  	switch d.mode {
   570  	case CommitmentModeDirect:
   571  		rootHash, branchNodeUpdates, err = d.patriciaTrie.ReviewKeys(touchedKeys, hashedKeys)
   572  		if err != nil {
   573  			return nil, nil, err
   574  		}
   575  	case CommitmentModeUpdate:
   576  		rootHash, branchNodeUpdates, err = d.patriciaTrie.ProcessUpdates(touchedKeys, hashedKeys, updates)
   577  		if err != nil {
   578  			return nil, nil, err
   579  		}
   580  	case CommitmentModeDisabled:
   581  		return nil, nil, nil
   582  	default:
   583  		return nil, nil, fmt.Errorf("invalid commitment mode: %d", d.mode)
   584  	}
   585  	return rootHash, branchNodeUpdates, err
   586  }
   587  
   588  var keyCommitmentState = []byte("state")
   589  
   590  // SeekCommitment searches for last encoded state from DomainCommitted
   591  // and if state found, sets it up to current domain
   592  func (d *DomainCommitted) SeekCommitment(aggStep, sinceTx uint64) (blockNum, txNum uint64, err error) {
   593  	if d.patriciaTrie.Variant() != commitment.VariantHexPatriciaTrie {
   594  		return 0, 0, fmt.Errorf("state storing is only supported hex patricia trie")
   595  	}
   596  	// todo add support of bin state dumping
   597  
   598  	var (
   599  		latestState []byte
   600  		stepbuf     [2]byte
   601  		step               = uint16(sinceTx/aggStep) - 1
   602  		latestTxNum uint64 = sinceTx - 1
   603  	)
   604  
   605  	d.SetTxNum(latestTxNum)
   606  	ctx := d.MakeContext()
   607  	defer ctx.Close()
   608  
   609  	for {
   610  		binary.BigEndian.PutUint16(stepbuf[:], step)
   611  
   612  		s, err := ctx.Get(keyCommitmentState, stepbuf[:], d.tx)
   613  		if err != nil {
   614  			return 0, 0, err
   615  		}
   616  		if len(s) < 8 {
   617  			break
   618  		}
   619  		v := binary.BigEndian.Uint64(s)
   620  		if v == latestTxNum && len(latestState) != 0 {
   621  			break
   622  		}
   623  		latestTxNum, latestState = v, s
   624  		lookupTxN := latestTxNum + aggStep
   625  		step = uint16(latestTxNum/aggStep) + 1
   626  		d.SetTxNum(lookupTxN)
   627  	}
   628  
   629  	var latest commitmentState
   630  	if err := latest.Decode(latestState); err != nil {
   631  		return 0, 0, nil
   632  	}
   633  
   634  	if hext, ok := d.patriciaTrie.(*commitment.HexPatriciaHashed); ok {
   635  		if err := hext.SetState(latest.trieState); err != nil {
   636  			return 0, 0, err
   637  		}
   638  	} else {
   639  		return 0, 0, fmt.Errorf("state storing is only supported hex patricia trie")
   640  	}
   641  
   642  	return latest.blockNum, latest.txNum, nil
   643  }
   644  
   645  type commitmentState struct {
   646  	txNum     uint64
   647  	blockNum  uint64
   648  	trieState []byte
   649  }
   650  
   651  func (cs *commitmentState) Decode(buf []byte) error {
   652  	if len(buf) < 10 {
   653  		return fmt.Errorf("ivalid commitment state buffer size")
   654  	}
   655  	pos := 0
   656  	cs.txNum = binary.BigEndian.Uint64(buf[pos : pos+8])
   657  	pos += 8
   658  	cs.blockNum = binary.BigEndian.Uint64(buf[pos : pos+8])
   659  	pos += 8
   660  	cs.trieState = make([]byte, binary.BigEndian.Uint16(buf[pos:pos+2]))
   661  	pos += 2
   662  	if len(cs.trieState) == 0 && len(buf) == 10 {
   663  		return nil
   664  	}
   665  	copy(cs.trieState, buf[pos:pos+len(cs.trieState)])
   666  	return nil
   667  }
   668  
   669  func (cs *commitmentState) Encode() ([]byte, error) {
   670  	buf := bytes.NewBuffer(nil)
   671  	var v [18]byte
   672  	binary.BigEndian.PutUint64(v[:], cs.txNum)
   673  	binary.BigEndian.PutUint64(v[8:16], cs.blockNum)
   674  	binary.BigEndian.PutUint16(v[16:18], uint16(len(cs.trieState)))
   675  	if _, err := buf.Write(v[:]); err != nil {
   676  		return nil, err
   677  	}
   678  	if _, err := buf.Write(cs.trieState); err != nil {
   679  		return nil, err
   680  	}
   681  	return buf.Bytes(), nil
   682  }
   683  
   684  func decodeU64(from []byte) uint64 {
   685  	var i uint64
   686  	for _, b := range from {
   687  		i = (i << 8) | uint64(b)
   688  	}
   689  	return i
   690  }
   691  
   692  func encodeU64(i uint64, to []byte) []byte {
   693  	// writes i to b in big endian byte order, using the least number of bytes needed to represent i.
   694  	switch {
   695  	case i < (1 << 8):
   696  		return append(to, byte(i))
   697  	case i < (1 << 16):
   698  		return append(to, byte(i>>8), byte(i))
   699  	case i < (1 << 24):
   700  		return append(to, byte(i>>16), byte(i>>8), byte(i))
   701  	case i < (1 << 32):
   702  		return append(to, byte(i>>24), byte(i>>16), byte(i>>8), byte(i))
   703  	case i < (1 << 40):
   704  		return append(to, byte(i>>32), byte(i>>24), byte(i>>16), byte(i>>8), byte(i))
   705  	case i < (1 << 48):
   706  		return append(to, byte(i>>40), byte(i>>32), byte(i>>24), byte(i>>16), byte(i>>8), byte(i))
   707  	case i < (1 << 56):
   708  		return append(to, byte(i>>48), byte(i>>40), byte(i>>32), byte(i>>24), byte(i>>16), byte(i>>8), byte(i))
   709  	default:
   710  		return append(to, byte(i>>56), byte(i>>48), byte(i>>40), byte(i>>32), byte(i>>24), byte(i>>16), byte(i>>8), byte(i))
   711  	}
   712  }
   713  
   714  // Optimised key referencing a state file record (file number and offset within the file)
   715  func shortenedKey(apk []byte) (step uint16, offset uint64) {
   716  	step = binary.BigEndian.Uint16(apk[:2])
   717  	return step, decodeU64(apk[1:])
   718  }