github.com/theQRL/go-zond@v0.1.1/trie/triedb/pathdb/history.go (about)

     1  // Copyright 2022 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>
    16  
    17  package pathdb
    18  
    19  import (
    20  	"bytes"
    21  	"encoding/binary"
    22  	"errors"
    23  	"fmt"
    24  	"time"
    25  
    26  	"github.com/theQRL/go-zond/common"
    27  	"github.com/theQRL/go-zond/core/rawdb"
    28  	"github.com/theQRL/go-zond/zonddb"
    29  	"github.com/theQRL/go-zond/log"
    30  	"github.com/theQRL/go-zond/trie/triestate"
    31  	"golang.org/x/exp/slices"
    32  )
    33  
    34  // State history records the state changes involved in executing a block. The
    35  // state can be reverted to the previous version by applying the associated
    36  // history object (state reverse diff). State history objects are kept to
    37  // guarantee that the system can perform state rollbacks in case of deep reorg.
    38  //
    39  // Each state transition will generate a state history object. Note that not
    40  // every block has a corresponding state history object. If a block performs
    41  // no state changes whatsoever, no state is created for it. Each state history
    42  // will have a sequentially increasing number acting as its unique identifier.
    43  //
    44  // The state history is written to disk (ancient store) when the corresponding
    45  // diff layer is merged into the disk layer. At the same time, system can prune
    46  // the oldest histories according to config.
    47  //
    48  //                                                        Disk State
    49  //                                                            ^
    50  //                                                            |
    51  //   +------------+     +---------+     +---------+     +---------+
    52  //   | Init State |---->| State 1 |---->|   ...   |---->| State n |
    53  //   +------------+     +---------+     +---------+     +---------+
    54  //
    55  //                     +-----------+      +------+     +-----------+
    56  //                     | History 1 |----> | ...  |---->| History n |
    57  //                     +-----------+      +------+     +-----------+
    58  //
    59  // # Rollback
    60  //
    61  // If the system wants to roll back to a previous state n, it needs to ensure
    62  // all history objects from n+1 up to the current disk layer are existent. The
    63  // history objects are applied to the state in reverse order, starting from the
    64  // current disk layer.
    65  
    66  const (
    67  	accountIndexSize = common.AddressLength + 13 // The length of encoded account index
    68  	slotIndexSize    = common.HashLength + 5     // The length of encoded slot index
    69  	historyMetaSize  = 9 + 2*common.HashLength   // The length of fixed size part of meta object
    70  
    71  	stateHistoryVersion = uint8(0) // initial version of state history structure.
    72  )
    73  
    74  // Each state history entry is consisted of five elements:
    75  //
    76  // # metadata
    77  //  This object contains a few meta fields, such as the associated state root,
    78  //  block number, version tag and so on. This object may contain an extra
    79  //  accountHash list which means the storage changes belong to these accounts
    80  //  are not complete due to large contract destruction. The incomplete history
    81  //  can not be used for rollback and serving archive state request.
    82  //
    83  // # account index
    84  //  This object contains some index information of account. For example, offset
    85  //  and length indicate the location of the data belonging to the account. Besides,
    86  //  storageOffset and storageSlots indicate the storage modification location
    87  //  belonging to the account.
    88  //
    89  //  The size of each account index is *fixed*, and all indexes are sorted
    90  //  lexicographically. Thus binary search can be performed to quickly locate a
    91  //  specific account.
    92  //
    93  // # account data
    94  //  Account data is a concatenated byte stream composed of all account data.
    95  //  The account data can be solved by the offset and length info indicated
    96  //  by corresponding account index.
    97  //
    98  //            fixed size
    99  //         ^             ^
   100  //        /               \
   101  //        +-----------------+-----------------+----------------+-----------------+
   102  //        | Account index 1 | Account index 2 |       ...      | Account index N |
   103  //        +-----------------+-----------------+----------------+-----------------+
   104  //        |
   105  //        |     length
   106  // offset |----------------+
   107  //        v                v
   108  //        +----------------+----------------+----------------+----------------+
   109  //        | Account data 1 | Account data 2 |       ...      | Account data N |
   110  //        +----------------+----------------+----------------+----------------+
   111  //
   112  // # storage index
   113  //  This object is similar with account index. It's also fixed size and contains
   114  //  the location info of storage slot data.
   115  //
   116  // # storage data
   117  //  Storage data is a concatenated byte stream composed of all storage slot data.
   118  //  The storage slot data can be solved by the location info indicated by
   119  //  corresponding account index and storage slot index.
   120  //
   121  //                    fixed size
   122  //                 ^             ^
   123  //                /               \
   124  //                +-----------------+-----------------+----------------+-----------------+
   125  //                | Account index 1 | Account index 2 |       ...      | Account index N |
   126  //                +-----------------+-----------------+----------------+-----------------+
   127  //                |
   128  //                |                    storage slots
   129  // storage offset |-----------------------------------------------------+
   130  //                v                                                     v
   131  //                +-----------------+-----------------+-----------------+
   132  //                | storage index 1 | storage index 2 | storage index 3 |
   133  //                +-----------------+-----------------+-----------------+
   134  //                |     length
   135  //         offset |-------------+
   136  //                v             v
   137  //                +-------------+
   138  //                | slot data 1 |
   139  //                +-------------+
   140  
   141  // accountIndex describes the metadata belonging to an account.
   142  type accountIndex struct {
   143  	address       common.Address // The address of account
   144  	length        uint8          // The length of account data, size limited by 255
   145  	offset        uint32         // The offset of item in account data table
   146  	storageOffset uint32         // The offset of storage index in storage index table
   147  	storageSlots  uint32         // The number of mutated storage slots belonging to the account
   148  }
   149  
   150  // encode packs account index into byte stream.
   151  func (i *accountIndex) encode() []byte {
   152  	var buf [accountIndexSize]byte
   153  	copy(buf[:], i.address.Bytes())
   154  	buf[common.AddressLength] = i.length
   155  	binary.BigEndian.PutUint32(buf[common.AddressLength+1:], i.offset)
   156  	binary.BigEndian.PutUint32(buf[common.AddressLength+5:], i.storageOffset)
   157  	binary.BigEndian.PutUint32(buf[common.AddressLength+9:], i.storageSlots)
   158  	return buf[:]
   159  }
   160  
   161  // decode unpacks account index from byte stream.
   162  func (i *accountIndex) decode(blob []byte) {
   163  	i.address = common.BytesToAddress(blob[:common.AddressLength])
   164  	i.length = blob[common.AddressLength]
   165  	i.offset = binary.BigEndian.Uint32(blob[common.AddressLength+1:])
   166  	i.storageOffset = binary.BigEndian.Uint32(blob[common.AddressLength+5:])
   167  	i.storageSlots = binary.BigEndian.Uint32(blob[common.AddressLength+9:])
   168  }
   169  
   170  // slotIndex describes the metadata belonging to a storage slot.
   171  type slotIndex struct {
   172  	hash   common.Hash // The hash of slot key
   173  	length uint8       // The length of storage slot, up to 32 bytes defined in protocol
   174  	offset uint32      // The offset of item in storage slot data table
   175  }
   176  
   177  // encode packs slot index into byte stream.
   178  func (i *slotIndex) encode() []byte {
   179  	var buf [slotIndexSize]byte
   180  	copy(buf[:common.HashLength], i.hash.Bytes())
   181  	buf[common.HashLength] = i.length
   182  	binary.BigEndian.PutUint32(buf[common.HashLength+1:], i.offset)
   183  	return buf[:]
   184  }
   185  
   186  // decode unpack slot index from the byte stream.
   187  func (i *slotIndex) decode(blob []byte) {
   188  	i.hash = common.BytesToHash(blob[:common.HashLength])
   189  	i.length = blob[common.HashLength]
   190  	i.offset = binary.BigEndian.Uint32(blob[common.HashLength+1:])
   191  }
   192  
   193  // meta describes the meta data of state history object.
   194  type meta struct {
   195  	version    uint8            // version tag of history object
   196  	parent     common.Hash      // prev-state root before the state transition
   197  	root       common.Hash      // post-state root after the state transition
   198  	block      uint64           // associated block number
   199  	incomplete []common.Address // list of address whose storage set is incomplete
   200  }
   201  
   202  // encode packs the meta object into byte stream.
   203  func (m *meta) encode() []byte {
   204  	buf := make([]byte, historyMetaSize+len(m.incomplete)*common.AddressLength)
   205  	buf[0] = m.version
   206  	copy(buf[1:1+common.HashLength], m.parent.Bytes())
   207  	copy(buf[1+common.HashLength:1+2*common.HashLength], m.root.Bytes())
   208  	binary.BigEndian.PutUint64(buf[1+2*common.HashLength:historyMetaSize], m.block)
   209  	for i, h := range m.incomplete {
   210  		copy(buf[i*common.AddressLength+historyMetaSize:], h.Bytes())
   211  	}
   212  	return buf[:]
   213  }
   214  
   215  // decode unpacks the meta object from byte stream.
   216  func (m *meta) decode(blob []byte) error {
   217  	if len(blob) < 1 {
   218  		return fmt.Errorf("no version tag")
   219  	}
   220  	switch blob[0] {
   221  	case stateHistoryVersion:
   222  		if len(blob) < historyMetaSize {
   223  			return fmt.Errorf("invalid state history meta, len: %d", len(blob))
   224  		}
   225  		if (len(blob)-historyMetaSize)%common.AddressLength != 0 {
   226  			return fmt.Errorf("corrupted state history meta, len: %d", len(blob))
   227  		}
   228  		m.version = blob[0]
   229  		m.parent = common.BytesToHash(blob[1 : 1+common.HashLength])
   230  		m.root = common.BytesToHash(blob[1+common.HashLength : 1+2*common.HashLength])
   231  		m.block = binary.BigEndian.Uint64(blob[1+2*common.HashLength : historyMetaSize])
   232  		for pos := historyMetaSize; pos < len(blob); {
   233  			m.incomplete = append(m.incomplete, common.BytesToAddress(blob[pos:pos+common.AddressLength]))
   234  			pos += common.AddressLength
   235  		}
   236  		return nil
   237  	default:
   238  		return fmt.Errorf("unknown version %d", blob[0])
   239  	}
   240  }
   241  
   242  // history represents a set of state changes belong to a block along with
   243  // the metadata including the state roots involved in the state transition.
   244  // State history objects in disk are linked with each other by a unique id
   245  // (8-bytes integer), the oldest state history object can be pruned on demand
   246  // in order to control the storage size.
   247  type history struct {
   248  	meta        *meta                                     // Meta data of history
   249  	accounts    map[common.Address][]byte                 // Account data keyed by its address hash
   250  	accountList []common.Address                          // Sorted account hash list
   251  	storages    map[common.Address]map[common.Hash][]byte // Storage data keyed by its address hash and slot hash
   252  	storageList map[common.Address][]common.Hash          // Sorted slot hash list
   253  }
   254  
   255  // newHistory constructs the state history object with provided state change set.
   256  func newHistory(root common.Hash, parent common.Hash, block uint64, states *triestate.Set) *history {
   257  	var (
   258  		accountList []common.Address
   259  		storageList = make(map[common.Address][]common.Hash)
   260  		incomplete  []common.Address
   261  	)
   262  	for addr := range states.Accounts {
   263  		accountList = append(accountList, addr)
   264  	}
   265  	slices.SortFunc(accountList, common.Address.Cmp)
   266  
   267  	for addr, slots := range states.Storages {
   268  		slist := make([]common.Hash, 0, len(slots))
   269  		for slotHash := range slots {
   270  			slist = append(slist, slotHash)
   271  		}
   272  		slices.SortFunc(slist, common.Hash.Cmp)
   273  		storageList[addr] = slist
   274  	}
   275  	for addr := range states.Incomplete {
   276  		incomplete = append(incomplete, addr)
   277  	}
   278  	slices.SortFunc(incomplete, common.Address.Cmp)
   279  
   280  	return &history{
   281  		meta: &meta{
   282  			version:    stateHistoryVersion,
   283  			parent:     parent,
   284  			root:       root,
   285  			block:      block,
   286  			incomplete: incomplete,
   287  		},
   288  		accounts:    states.Accounts,
   289  		accountList: accountList,
   290  		storages:    states.Storages,
   291  		storageList: storageList,
   292  	}
   293  }
   294  
   295  // encode serializes the state history and returns four byte streams represent
   296  // concatenated account/storage data, account/storage indexes respectively.
   297  func (h *history) encode() ([]byte, []byte, []byte, []byte) {
   298  	var (
   299  		slotNumber     uint32 // the number of processed slots
   300  		accountData    []byte // the buffer for concatenated account data
   301  		storageData    []byte // the buffer for concatenated storage data
   302  		accountIndexes []byte // the buffer for concatenated account index
   303  		storageIndexes []byte // the buffer for concatenated storage index
   304  	)
   305  	for _, addr := range h.accountList {
   306  		accIndex := accountIndex{
   307  			address: addr,
   308  			length:  uint8(len(h.accounts[addr])),
   309  			offset:  uint32(len(accountData)),
   310  		}
   311  		slots, exist := h.storages[addr]
   312  		if exist {
   313  			// Encode storage slots in order
   314  			for _, slotHash := range h.storageList[addr] {
   315  				sIndex := slotIndex{
   316  					hash:   slotHash,
   317  					length: uint8(len(slots[slotHash])),
   318  					offset: uint32(len(storageData)),
   319  				}
   320  				storageData = append(storageData, slots[slotHash]...)
   321  				storageIndexes = append(storageIndexes, sIndex.encode()...)
   322  			}
   323  			// Fill up the storage meta in account index
   324  			accIndex.storageOffset = slotNumber
   325  			accIndex.storageSlots = uint32(len(slots))
   326  			slotNumber += uint32(len(slots))
   327  		}
   328  		accountData = append(accountData, h.accounts[addr]...)
   329  		accountIndexes = append(accountIndexes, accIndex.encode()...)
   330  	}
   331  	return accountData, storageData, accountIndexes, storageIndexes
   332  }
   333  
   334  // decoder wraps the byte streams for decoding with extra meta fields.
   335  type decoder struct {
   336  	accountData    []byte // the buffer for concatenated account data
   337  	storageData    []byte // the buffer for concatenated storage data
   338  	accountIndexes []byte // the buffer for concatenated account index
   339  	storageIndexes []byte // the buffer for concatenated storage index
   340  
   341  	lastAccount       *common.Address // the address of last resolved account
   342  	lastAccountRead   uint32          // the read-cursor position of account data
   343  	lastSlotIndexRead uint32          // the read-cursor position of storage slot index
   344  	lastSlotDataRead  uint32          // the read-cursor position of storage slot data
   345  }
   346  
   347  // verify validates the provided byte streams for decoding state history. A few
   348  // checks will be performed to quickly detect data corruption. The byte stream
   349  // is regarded as corrupted if:
   350  //
   351  // - account indexes buffer is empty(empty state set is invalid)
   352  // - account indexes/storage indexer buffer is not aligned
   353  //
   354  // note, these situations are allowed:
   355  //
   356  // - empty account data: all accounts were not present
   357  // - empty storage set: no slots are modified
   358  func (r *decoder) verify() error {
   359  	if len(r.accountIndexes)%accountIndexSize != 0 || len(r.accountIndexes) == 0 {
   360  		return fmt.Errorf("invalid account index, len: %d", len(r.accountIndexes))
   361  	}
   362  	if len(r.storageIndexes)%slotIndexSize != 0 {
   363  		return fmt.Errorf("invalid storage index, len: %d", len(r.storageIndexes))
   364  	}
   365  	return nil
   366  }
   367  
   368  // readAccount parses the account from the byte stream with specified position.
   369  func (r *decoder) readAccount(pos int) (accountIndex, []byte, error) {
   370  	// Decode account index from the index byte stream.
   371  	var index accountIndex
   372  	if (pos+1)*accountIndexSize > len(r.accountIndexes) {
   373  		return accountIndex{}, nil, errors.New("account data buffer is corrupted")
   374  	}
   375  	index.decode(r.accountIndexes[pos*accountIndexSize : (pos+1)*accountIndexSize])
   376  
   377  	// Perform validation before parsing account data, ensure
   378  	// - account is sorted in order in byte stream
   379  	// - account data is strictly encoded with no gap inside
   380  	// - account data is not out-of-slice
   381  	if r.lastAccount != nil { // zero address is possible
   382  		if bytes.Compare(r.lastAccount.Bytes(), index.address.Bytes()) >= 0 {
   383  			return accountIndex{}, nil, errors.New("account is not in order")
   384  		}
   385  	}
   386  	if index.offset != r.lastAccountRead {
   387  		return accountIndex{}, nil, errors.New("account data buffer is gaped")
   388  	}
   389  	last := index.offset + uint32(index.length)
   390  	if uint32(len(r.accountData)) < last {
   391  		return accountIndex{}, nil, errors.New("account data buffer is corrupted")
   392  	}
   393  	data := r.accountData[index.offset:last]
   394  
   395  	r.lastAccount = &index.address
   396  	r.lastAccountRead = last
   397  
   398  	return index, data, nil
   399  }
   400  
   401  // readStorage parses the storage slots from the byte stream with specified account.
   402  func (r *decoder) readStorage(accIndex accountIndex) ([]common.Hash, map[common.Hash][]byte, error) {
   403  	var (
   404  		last    common.Hash
   405  		list    []common.Hash
   406  		storage = make(map[common.Hash][]byte)
   407  	)
   408  	for j := 0; j < int(accIndex.storageSlots); j++ {
   409  		var (
   410  			index slotIndex
   411  			start = (accIndex.storageOffset + uint32(j)) * uint32(slotIndexSize)
   412  			end   = (accIndex.storageOffset + uint32(j+1)) * uint32(slotIndexSize)
   413  		)
   414  		// Perform validation before parsing storage slot data, ensure
   415  		// - slot index is not out-of-slice
   416  		// - slot data is not out-of-slice
   417  		// - slot is sorted in order in byte stream
   418  		// - slot indexes is strictly encoded with no gap inside
   419  		// - slot data is strictly encoded with no gap inside
   420  		if start != r.lastSlotIndexRead {
   421  			return nil, nil, errors.New("storage index buffer is gapped")
   422  		}
   423  		if uint32(len(r.storageIndexes)) < end {
   424  			return nil, nil, errors.New("storage index buffer is corrupted")
   425  		}
   426  		index.decode(r.storageIndexes[start:end])
   427  
   428  		if bytes.Compare(last.Bytes(), index.hash.Bytes()) >= 0 {
   429  			return nil, nil, errors.New("storage slot is not in order")
   430  		}
   431  		if index.offset != r.lastSlotDataRead {
   432  			return nil, nil, errors.New("storage data buffer is gapped")
   433  		}
   434  		sEnd := index.offset + uint32(index.length)
   435  		if uint32(len(r.storageData)) < sEnd {
   436  			return nil, nil, errors.New("storage data buffer is corrupted")
   437  		}
   438  		storage[index.hash] = r.storageData[r.lastSlotDataRead:sEnd]
   439  		list = append(list, index.hash)
   440  
   441  		last = index.hash
   442  		r.lastSlotIndexRead = end
   443  		r.lastSlotDataRead = sEnd
   444  	}
   445  	return list, storage, nil
   446  }
   447  
   448  // decode deserializes the account and storage data from the provided byte stream.
   449  func (h *history) decode(accountData, storageData, accountIndexes, storageIndexes []byte) error {
   450  	var (
   451  		accounts    = make(map[common.Address][]byte)
   452  		storages    = make(map[common.Address]map[common.Hash][]byte)
   453  		accountList []common.Address
   454  		storageList = make(map[common.Address][]common.Hash)
   455  
   456  		r = &decoder{
   457  			accountData:    accountData,
   458  			storageData:    storageData,
   459  			accountIndexes: accountIndexes,
   460  			storageIndexes: storageIndexes,
   461  		}
   462  	)
   463  	if err := r.verify(); err != nil {
   464  		return err
   465  	}
   466  	for i := 0; i < len(accountIndexes)/accountIndexSize; i++ {
   467  		// Resolve account first
   468  		accIndex, accData, err := r.readAccount(i)
   469  		if err != nil {
   470  			return err
   471  		}
   472  		accounts[accIndex.address] = accData
   473  		accountList = append(accountList, accIndex.address)
   474  
   475  		// Resolve storage slots
   476  		slotList, slotData, err := r.readStorage(accIndex)
   477  		if err != nil {
   478  			return err
   479  		}
   480  		if len(slotList) > 0 {
   481  			storageList[accIndex.address] = slotList
   482  			storages[accIndex.address] = slotData
   483  		}
   484  	}
   485  	h.accounts = accounts
   486  	h.accountList = accountList
   487  	h.storages = storages
   488  	h.storageList = storageList
   489  	return nil
   490  }
   491  
   492  // readHistory reads and decodes the state history object by the given id.
   493  func readHistory(freezer *rawdb.ResettableFreezer, id uint64) (*history, error) {
   494  	blob := rawdb.ReadStateHistoryMeta(freezer, id)
   495  	if len(blob) == 0 {
   496  		return nil, fmt.Errorf("state history not found %d", id)
   497  	}
   498  	var m meta
   499  	if err := m.decode(blob); err != nil {
   500  		return nil, err
   501  	}
   502  	var (
   503  		dec            = history{meta: &m}
   504  		accountData    = rawdb.ReadStateAccountHistory(freezer, id)
   505  		storageData    = rawdb.ReadStateStorageHistory(freezer, id)
   506  		accountIndexes = rawdb.ReadStateAccountIndex(freezer, id)
   507  		storageIndexes = rawdb.ReadStateStorageIndex(freezer, id)
   508  	)
   509  	if err := dec.decode(accountData, storageData, accountIndexes, storageIndexes); err != nil {
   510  		return nil, err
   511  	}
   512  	return &dec, nil
   513  }
   514  
   515  // writeHistory writes the state history with provided state set. After
   516  // storing the corresponding state history, it will also prune the stale
   517  // histories from the disk with the given threshold.
   518  func writeHistory(db zonddb.KeyValueStore, freezer *rawdb.ResettableFreezer, dl *diffLayer, limit uint64) error {
   519  	// Short circuit if state set is not available.
   520  	if dl.states == nil {
   521  		return errors.New("state change set is not available")
   522  	}
   523  	var (
   524  		err   error
   525  		n     int
   526  		start = time.Now()
   527  		h     = newHistory(dl.rootHash(), dl.parentLayer().rootHash(), dl.block, dl.states)
   528  	)
   529  	accountData, storageData, accountIndex, storageIndex := h.encode()
   530  	dataSize := common.StorageSize(len(accountData) + len(storageData))
   531  	indexSize := common.StorageSize(len(accountIndex) + len(storageIndex))
   532  
   533  	// Write history data into five freezer table respectively.
   534  	rawdb.WriteStateHistory(freezer, dl.stateID(), h.meta.encode(), accountIndex, storageIndex, accountData, storageData)
   535  
   536  	// Prune stale state histories based on the config.
   537  	if limit != 0 && dl.stateID() > limit {
   538  		n, err = truncateFromTail(db, freezer, dl.stateID()-limit)
   539  		if err != nil {
   540  			return err
   541  		}
   542  	}
   543  	historyDataBytesMeter.Mark(int64(dataSize))
   544  	historyIndexBytesMeter.Mark(int64(indexSize))
   545  	historyBuildTimeMeter.UpdateSince(start)
   546  	log.Debug("Stored state history", "id", dl.stateID(), "block", dl.block, "data", dataSize, "index", indexSize, "pruned", n, "elapsed", common.PrettyDuration(time.Since(start)))
   547  	return nil
   548  }
   549  
   550  // checkHistories retrieves a batch of meta objects with the specified range
   551  // and performs the callback on each item.
   552  func checkHistories(freezer *rawdb.ResettableFreezer, start, count uint64, check func(*meta) error) error {
   553  	for count > 0 {
   554  		number := count
   555  		if number > 10000 {
   556  			number = 10000 // split the big read into small chunks
   557  		}
   558  		blobs, err := rawdb.ReadStateHistoryMetaList(freezer, start, number)
   559  		if err != nil {
   560  			return err
   561  		}
   562  		for _, blob := range blobs {
   563  			var dec meta
   564  			if err := dec.decode(blob); err != nil {
   565  				return err
   566  			}
   567  			if err := check(&dec); err != nil {
   568  				return err
   569  			}
   570  		}
   571  		count -= uint64(len(blobs))
   572  		start += uint64(len(blobs))
   573  	}
   574  	return nil
   575  }
   576  
   577  // truncateFromHead removes the extra state histories from the head with the given
   578  // parameters. It returns the number of items removed from the head.
   579  func truncateFromHead(db zonddb.Batcher, freezer *rawdb.ResettableFreezer, nhead uint64) (int, error) {
   580  	ohead, err := freezer.Ancients()
   581  	if err != nil {
   582  		return 0, err
   583  	}
   584  	if ohead <= nhead {
   585  		return 0, nil
   586  	}
   587  	// Load the meta objects in range [nhead+1, ohead]
   588  	blobs, err := rawdb.ReadStateHistoryMetaList(freezer, nhead+1, ohead-nhead)
   589  	if err != nil {
   590  		return 0, err
   591  	}
   592  	batch := db.NewBatch()
   593  	for _, blob := range blobs {
   594  		var m meta
   595  		if err := m.decode(blob); err != nil {
   596  			return 0, err
   597  		}
   598  		rawdb.DeleteStateID(batch, m.root)
   599  	}
   600  	if err := batch.Write(); err != nil {
   601  		return 0, err
   602  	}
   603  	ohead, err = freezer.TruncateHead(nhead)
   604  	if err != nil {
   605  		return 0, err
   606  	}
   607  	return int(ohead - nhead), nil
   608  }
   609  
   610  // truncateFromTail removes the extra state histories from the tail with the given
   611  // parameters. It returns the number of items removed from the tail.
   612  func truncateFromTail(db zonddb.Batcher, freezer *rawdb.ResettableFreezer, ntail uint64) (int, error) {
   613  	otail, err := freezer.Tail()
   614  	if err != nil {
   615  		return 0, err
   616  	}
   617  	if otail >= ntail {
   618  		return 0, nil
   619  	}
   620  	// Load the meta objects in range [otail+1, ntail]
   621  	blobs, err := rawdb.ReadStateHistoryMetaList(freezer, otail+1, ntail-otail)
   622  	if err != nil {
   623  		return 0, err
   624  	}
   625  	batch := db.NewBatch()
   626  	for _, blob := range blobs {
   627  		var m meta
   628  		if err := m.decode(blob); err != nil {
   629  			return 0, err
   630  		}
   631  		rawdb.DeleteStateID(batch, m.root)
   632  	}
   633  	if err := batch.Write(); err != nil {
   634  		return 0, err
   635  	}
   636  	otail, err = freezer.TruncateTail(ntail)
   637  	if err != nil {
   638  		return 0, err
   639  	}
   640  	return int(ntail - otail), nil
   641  }