github.com/theQRL/go-zond@v0.1.1/light/postprocess.go (about)

     1  // Copyright 2017 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package light
    18  
    19  import (
    20  	"bytes"
    21  	"context"
    22  	"encoding/binary"
    23  	"errors"
    24  	"fmt"
    25  	"math/big"
    26  	"time"
    27  
    28  	"github.com/theQRL/go-zond/common"
    29  	"github.com/theQRL/go-zond/common/bitutil"
    30  	"github.com/theQRL/go-zond/core"
    31  	"github.com/theQRL/go-zond/core/rawdb"
    32  	"github.com/theQRL/go-zond/core/types"
    33  	"github.com/theQRL/go-zond/zonddb"
    34  	"github.com/theQRL/go-zond/log"
    35  	"github.com/theQRL/go-zond/params"
    36  	"github.com/theQRL/go-zond/rlp"
    37  	"github.com/theQRL/go-zond/trie"
    38  	"github.com/theQRL/go-zond/trie/trienode"
    39  )
    40  
    41  // IndexerConfig includes a set of configs for chain indexers.
    42  type IndexerConfig struct {
    43  	// The block frequency for creating CHTs.
    44  	ChtSize uint64
    45  
    46  	// The number of confirmations needed to generate/accept a canonical hash help trie.
    47  	ChtConfirms uint64
    48  
    49  	// The block frequency for creating new bloom bits.
    50  	BloomSize uint64
    51  
    52  	// The number of confirmation needed before a bloom section is considered probably final and its rotated bits
    53  	// are calculated.
    54  	BloomConfirms uint64
    55  
    56  	// The block frequency for creating BloomTrie.
    57  	BloomTrieSize uint64
    58  
    59  	// The number of confirmations needed to generate/accept a bloom trie.
    60  	BloomTrieConfirms uint64
    61  }
    62  
    63  var (
    64  	// DefaultServerIndexerConfig wraps a set of configs as a default indexer config for server side.
    65  	DefaultServerIndexerConfig = &IndexerConfig{
    66  		ChtSize:           params.CHTFrequency,
    67  		ChtConfirms:       params.HelperTrieProcessConfirmations,
    68  		BloomSize:         params.BloomBitsBlocks,
    69  		BloomConfirms:     params.BloomConfirms,
    70  		BloomTrieSize:     params.BloomTrieFrequency,
    71  		BloomTrieConfirms: params.HelperTrieProcessConfirmations,
    72  	}
    73  	// DefaultClientIndexerConfig wraps a set of configs as a default indexer config for client side.
    74  	DefaultClientIndexerConfig = &IndexerConfig{
    75  		ChtSize:           params.CHTFrequency,
    76  		ChtConfirms:       params.HelperTrieConfirmations,
    77  		BloomSize:         params.BloomBitsBlocksClient,
    78  		BloomConfirms:     params.HelperTrieConfirmations,
    79  		BloomTrieSize:     params.BloomTrieFrequency,
    80  		BloomTrieConfirms: params.HelperTrieConfirmations,
    81  	}
    82  	// TestServerIndexerConfig wraps a set of configs as a test indexer config for server side.
    83  	TestServerIndexerConfig = &IndexerConfig{
    84  		ChtSize:           128,
    85  		ChtConfirms:       1,
    86  		BloomSize:         16,
    87  		BloomConfirms:     1,
    88  		BloomTrieSize:     128,
    89  		BloomTrieConfirms: 1,
    90  	}
    91  	// TestClientIndexerConfig wraps a set of configs as a test indexer config for client side.
    92  	TestClientIndexerConfig = &IndexerConfig{
    93  		ChtSize:           128,
    94  		ChtConfirms:       8,
    95  		BloomSize:         128,
    96  		BloomConfirms:     8,
    97  		BloomTrieSize:     128,
    98  		BloomTrieConfirms: 8,
    99  	}
   100  )
   101  
   102  var (
   103  	errNoTrustedCht       = errors.New("no trusted canonical hash trie")
   104  	errNoTrustedBloomTrie = errors.New("no trusted bloom trie")
   105  	errNoHeader           = errors.New("header not found")
   106  )
   107  
   108  // ChtNode structures are stored in the Canonical Hash Trie in an RLP encoded format
   109  type ChtNode struct {
   110  	Hash common.Hash
   111  	Td   *big.Int
   112  }
   113  
   114  // GetChtRoot reads the CHT root associated to the given section from the database
   115  func GetChtRoot(db zonddb.Database, sectionIdx uint64, sectionHead common.Hash) common.Hash {
   116  	var encNumber [8]byte
   117  	binary.BigEndian.PutUint64(encNumber[:], sectionIdx)
   118  	data, _ := db.Get(append(append(rawdb.ChtPrefix, encNumber[:]...), sectionHead.Bytes()...))
   119  	return common.BytesToHash(data)
   120  }
   121  
   122  // StoreChtRoot writes the CHT root associated to the given section into the database
   123  func StoreChtRoot(db zonddb.Database, sectionIdx uint64, sectionHead, root common.Hash) {
   124  	var encNumber [8]byte
   125  	binary.BigEndian.PutUint64(encNumber[:], sectionIdx)
   126  	db.Put(append(append(rawdb.ChtPrefix, encNumber[:]...), sectionHead.Bytes()...), root.Bytes())
   127  }
   128  
   129  // ChtIndexerBackend implements core.ChainIndexerBackend.
   130  type ChtIndexerBackend struct {
   131  	disablePruning       bool
   132  	diskdb, trieTable    zonddb.Database
   133  	odr                  OdrBackend
   134  	triedb               *trie.Database
   135  	section, sectionSize uint64
   136  	lastHash             common.Hash
   137  	trie                 *trie.Trie
   138  	originRoot           common.Hash
   139  }
   140  
   141  // NewChtIndexer creates a Cht chain indexer
   142  func NewChtIndexer(db zonddb.Database, odr OdrBackend, size, confirms uint64, disablePruning bool) *core.ChainIndexer {
   143  	trieTable := rawdb.NewTable(db, string(rawdb.ChtTablePrefix))
   144  	backend := &ChtIndexerBackend{
   145  		diskdb:         db,
   146  		odr:            odr,
   147  		trieTable:      trieTable,
   148  		triedb:         trie.NewDatabase(trieTable, trie.HashDefaults),
   149  		sectionSize:    size,
   150  		disablePruning: disablePruning,
   151  	}
   152  	return core.NewChainIndexer(db, rawdb.NewTable(db, string(rawdb.ChtIndexTablePrefix)), backend, size, confirms, time.Millisecond*100, "cht")
   153  }
   154  
   155  // fetchMissingNodes tries to retrieve the last entry of the latest trusted CHT from the
   156  // ODR backend in order to be able to add new entries and calculate subsequent root hashes
   157  func (c *ChtIndexerBackend) fetchMissingNodes(ctx context.Context, section uint64, root common.Hash) error {
   158  	batch := c.trieTable.NewBatch()
   159  	r := &ChtRequest{ChtRoot: root, ChtNum: section - 1, BlockNum: section*c.sectionSize - 1, Config: c.odr.IndexerConfig()}
   160  	for {
   161  		err := c.odr.Retrieve(ctx, r)
   162  		switch err {
   163  		case nil:
   164  			r.Proof.Store(batch)
   165  			return batch.Write()
   166  		case ErrNoPeers:
   167  			// if there are no peers to serve, retry later
   168  			select {
   169  			case <-ctx.Done():
   170  				return ctx.Err()
   171  			case <-time.After(time.Second * 10):
   172  				// stay in the loop and try again
   173  			}
   174  		default:
   175  			return err
   176  		}
   177  	}
   178  }
   179  
   180  // Reset implements core.ChainIndexerBackend
   181  func (c *ChtIndexerBackend) Reset(ctx context.Context, section uint64, lastSectionHead common.Hash) error {
   182  	root := types.EmptyRootHash
   183  	if section > 0 {
   184  		root = GetChtRoot(c.diskdb, section-1, lastSectionHead)
   185  	}
   186  	var err error
   187  	c.trie, err = trie.New(trie.TrieID(root), c.triedb)
   188  
   189  	if err != nil && c.odr != nil {
   190  		err = c.fetchMissingNodes(ctx, section, root)
   191  		if err == nil {
   192  			c.trie, err = trie.New(trie.TrieID(root), c.triedb)
   193  		}
   194  	}
   195  	c.section = section
   196  	c.originRoot = root
   197  	return err
   198  }
   199  
   200  // Process implements core.ChainIndexerBackend
   201  func (c *ChtIndexerBackend) Process(ctx context.Context, header *types.Header) error {
   202  	hash, num := header.Hash(), header.Number.Uint64()
   203  	c.lastHash = hash
   204  
   205  	td := rawdb.ReadTd(c.diskdb, hash, num)
   206  	if td == nil {
   207  		panic(nil)
   208  	}
   209  	var encNumber [8]byte
   210  	binary.BigEndian.PutUint64(encNumber[:], num)
   211  	data, _ := rlp.EncodeToBytes(ChtNode{hash, td})
   212  	return c.trie.Update(encNumber[:], data)
   213  }
   214  
   215  // Commit implements core.ChainIndexerBackend
   216  func (c *ChtIndexerBackend) Commit() error {
   217  	root, nodes, err := c.trie.Commit(false)
   218  	if err != nil {
   219  		return err
   220  	}
   221  	// Commit trie changes into trie database in case it's not nil.
   222  	if nodes != nil {
   223  		if err := c.triedb.Update(root, c.originRoot, 0, trienode.NewWithNodeSet(nodes), nil); err != nil {
   224  			return err
   225  		}
   226  		if err := c.triedb.Commit(root, false); err != nil {
   227  			return err
   228  		}
   229  	}
   230  	// Re-create trie with newly generated root and updated database.
   231  	c.trie, err = trie.New(trie.TrieID(root), c.triedb)
   232  	if err != nil {
   233  		return err
   234  	}
   235  	// Pruning historical trie nodes if necessary.
   236  	if !c.disablePruning {
   237  		it := c.trieTable.NewIterator(nil, nil)
   238  		defer it.Release()
   239  
   240  		var (
   241  			deleted int
   242  			batch   = c.trieTable.NewBatch()
   243  			t       = time.Now()
   244  		)
   245  		hashes := make(map[common.Hash]struct{})
   246  		if nodes != nil {
   247  			for _, hash := range nodes.Hashes() {
   248  				hashes[hash] = struct{}{}
   249  			}
   250  		}
   251  		for it.Next() {
   252  			trimmed := bytes.TrimPrefix(it.Key(), rawdb.ChtTablePrefix)
   253  			if len(trimmed) == common.HashLength {
   254  				if _, ok := hashes[common.BytesToHash(trimmed)]; !ok {
   255  					batch.Delete(trimmed)
   256  					deleted += 1
   257  				}
   258  			}
   259  		}
   260  		if err := batch.Write(); err != nil {
   261  			return err
   262  		}
   263  		log.Debug("Prune historical CHT trie nodes", "deleted", deleted, "remaining", len(hashes), "elapsed", common.PrettyDuration(time.Since(t)))
   264  	}
   265  	log.Info("Storing CHT", "section", c.section, "head", fmt.Sprintf("%064x", c.lastHash), "root", fmt.Sprintf("%064x", root))
   266  	StoreChtRoot(c.diskdb, c.section, c.lastHash, root)
   267  	return nil
   268  }
   269  
   270  // Prune implements core.ChainIndexerBackend which deletes all chain data
   271  // (except hash<->number mappings) older than the specified threshold.
   272  func (c *ChtIndexerBackend) Prune(threshold uint64) error {
   273  	// Short circuit if the light pruning is disabled.
   274  	if c.disablePruning {
   275  		return nil
   276  	}
   277  	t := time.Now()
   278  	// Always keep genesis header in database.
   279  	start, end := uint64(1), (threshold+1)*c.sectionSize
   280  
   281  	var batch = c.diskdb.NewBatch()
   282  	for {
   283  		numbers, hashes := rawdb.ReadAllCanonicalHashes(c.diskdb, start, end, 10240)
   284  		if len(numbers) == 0 {
   285  			break
   286  		}
   287  		for i := 0; i < len(numbers); i++ {
   288  			// Keep hash<->number mapping in database otherwise the hash based
   289  			// API(e.g. GetReceipt, GetLogs) will be broken.
   290  			//
   291  			// Storage size wise, the size of a mapping is ~41bytes. For one
   292  			// section is about 1.3MB which is acceptable.
   293  			//
   294  			// In order to totally get rid of this index, we need an additional
   295  			// flag to specify how many historical data light client can serve.
   296  			rawdb.DeleteCanonicalHash(batch, numbers[i])
   297  			rawdb.DeleteBlockWithoutNumber(batch, hashes[i], numbers[i])
   298  		}
   299  		if batch.ValueSize() > zonddb.IdealBatchSize {
   300  			if err := batch.Write(); err != nil {
   301  				return err
   302  			}
   303  			batch.Reset()
   304  		}
   305  		start = numbers[len(numbers)-1] + 1
   306  	}
   307  	if err := batch.Write(); err != nil {
   308  		return err
   309  	}
   310  	log.Debug("Prune history headers", "threshold", threshold, "elapsed", common.PrettyDuration(time.Since(t)))
   311  	return nil
   312  }
   313  
   314  // GetBloomTrieRoot reads the BloomTrie root associated to the given section from the database
   315  func GetBloomTrieRoot(db zonddb.Database, sectionIdx uint64, sectionHead common.Hash) common.Hash {
   316  	var encNumber [8]byte
   317  	binary.BigEndian.PutUint64(encNumber[:], sectionIdx)
   318  	data, _ := db.Get(append(append(rawdb.BloomTriePrefix, encNumber[:]...), sectionHead.Bytes()...))
   319  	return common.BytesToHash(data)
   320  }
   321  
   322  // StoreBloomTrieRoot writes the BloomTrie root associated to the given section into the database
   323  func StoreBloomTrieRoot(db zonddb.Database, sectionIdx uint64, sectionHead, root common.Hash) {
   324  	var encNumber [8]byte
   325  	binary.BigEndian.PutUint64(encNumber[:], sectionIdx)
   326  	db.Put(append(append(rawdb.BloomTriePrefix, encNumber[:]...), sectionHead.Bytes()...), root.Bytes())
   327  }
   328  
   329  // BloomTrieIndexerBackend implements core.ChainIndexerBackend
   330  type BloomTrieIndexerBackend struct {
   331  	disablePruning    bool
   332  	diskdb, trieTable zonddb.Database
   333  	triedb            *trie.Database
   334  	odr               OdrBackend
   335  	section           uint64
   336  	parentSize        uint64
   337  	size              uint64
   338  	bloomTrieRatio    uint64
   339  	trie              *trie.Trie
   340  	originRoot        common.Hash
   341  	sectionHeads      []common.Hash
   342  }
   343  
   344  // NewBloomTrieIndexer creates a BloomTrie chain indexer
   345  func NewBloomTrieIndexer(db zonddb.Database, odr OdrBackend, parentSize, size uint64, disablePruning bool) *core.ChainIndexer {
   346  	trieTable := rawdb.NewTable(db, string(rawdb.BloomTrieTablePrefix))
   347  	backend := &BloomTrieIndexerBackend{
   348  		diskdb:         db,
   349  		odr:            odr,
   350  		trieTable:      trieTable,
   351  		triedb:         trie.NewDatabase(trieTable, trie.HashDefaults),
   352  		parentSize:     parentSize,
   353  		size:           size,
   354  		disablePruning: disablePruning,
   355  	}
   356  	backend.bloomTrieRatio = size / parentSize
   357  	backend.sectionHeads = make([]common.Hash, backend.bloomTrieRatio)
   358  	return core.NewChainIndexer(db, rawdb.NewTable(db, string(rawdb.BloomTrieIndexPrefix)), backend, size, 0, time.Millisecond*100, "bloomtrie")
   359  }
   360  
   361  // fetchMissingNodes tries to retrieve the last entries of the latest trusted bloom trie from the
   362  // ODR backend in order to be able to add new entries and calculate subsequent root hashes
   363  func (b *BloomTrieIndexerBackend) fetchMissingNodes(ctx context.Context, section uint64, root common.Hash) error {
   364  	indexCh := make(chan uint, types.BloomBitLength)
   365  	type res struct {
   366  		nodes *NodeSet
   367  		err   error
   368  	}
   369  	resCh := make(chan res, types.BloomBitLength)
   370  	for i := 0; i < 20; i++ {
   371  		go func() {
   372  			for bitIndex := range indexCh {
   373  				r := &BloomRequest{BloomTrieRoot: root, BloomTrieNum: section - 1, BitIdx: bitIndex, SectionIndexList: []uint64{section - 1}, Config: b.odr.IndexerConfig()}
   374  				for {
   375  					if err := b.odr.Retrieve(ctx, r); err == ErrNoPeers {
   376  						// if there are no peers to serve, retry later
   377  						select {
   378  						case <-ctx.Done():
   379  							resCh <- res{nil, ctx.Err()}
   380  							return
   381  						case <-time.After(time.Second * 10):
   382  							// stay in the loop and try again
   383  						}
   384  					} else {
   385  						resCh <- res{r.Proofs, err}
   386  						break
   387  					}
   388  				}
   389  			}
   390  		}()
   391  	}
   392  	for i := uint(0); i < types.BloomBitLength; i++ {
   393  		indexCh <- i
   394  	}
   395  	close(indexCh)
   396  	batch := b.trieTable.NewBatch()
   397  	for i := uint(0); i < types.BloomBitLength; i++ {
   398  		res := <-resCh
   399  		if res.err != nil {
   400  			return res.err
   401  		}
   402  		res.nodes.Store(batch)
   403  	}
   404  	return batch.Write()
   405  }
   406  
   407  // Reset implements core.ChainIndexerBackend
   408  func (b *BloomTrieIndexerBackend) Reset(ctx context.Context, section uint64, lastSectionHead common.Hash) error {
   409  	root := types.EmptyRootHash
   410  	if section > 0 {
   411  		root = GetBloomTrieRoot(b.diskdb, section-1, lastSectionHead)
   412  	}
   413  	var err error
   414  	b.trie, err = trie.New(trie.TrieID(root), b.triedb)
   415  	if err != nil && b.odr != nil {
   416  		err = b.fetchMissingNodes(ctx, section, root)
   417  		if err == nil {
   418  			b.trie, err = trie.New(trie.TrieID(root), b.triedb)
   419  		}
   420  	}
   421  	b.section = section
   422  	b.originRoot = root
   423  	return err
   424  }
   425  
   426  // Process implements core.ChainIndexerBackend
   427  func (b *BloomTrieIndexerBackend) Process(ctx context.Context, header *types.Header) error {
   428  	num := header.Number.Uint64() - b.section*b.size
   429  	if (num+1)%b.parentSize == 0 {
   430  		b.sectionHeads[num/b.parentSize] = header.Hash()
   431  	}
   432  	return nil
   433  }
   434  
   435  // Commit implements core.ChainIndexerBackend
   436  func (b *BloomTrieIndexerBackend) Commit() error {
   437  	var compSize, decompSize uint64
   438  
   439  	for i := uint(0); i < types.BloomBitLength; i++ {
   440  		var encKey [10]byte
   441  		binary.BigEndian.PutUint16(encKey[0:2], uint16(i))
   442  		binary.BigEndian.PutUint64(encKey[2:10], b.section)
   443  		var decomp []byte
   444  		for j := uint64(0); j < b.bloomTrieRatio; j++ {
   445  			data, err := rawdb.ReadBloomBits(b.diskdb, i, b.section*b.bloomTrieRatio+j, b.sectionHeads[j])
   446  			if err != nil {
   447  				return err
   448  			}
   449  			decompData, err2 := bitutil.DecompressBytes(data, int(b.parentSize/8))
   450  			if err2 != nil {
   451  				return err2
   452  			}
   453  			decomp = append(decomp, decompData...)
   454  		}
   455  		comp := bitutil.CompressBytes(decomp)
   456  
   457  		decompSize += uint64(len(decomp))
   458  		compSize += uint64(len(comp))
   459  
   460  		var terr error
   461  		if len(comp) > 0 {
   462  			terr = b.trie.Update(encKey[:], comp)
   463  		} else {
   464  			terr = b.trie.Delete(encKey[:])
   465  		}
   466  		if terr != nil {
   467  			return terr
   468  		}
   469  	}
   470  	root, nodes, err := b.trie.Commit(false)
   471  	if err != nil {
   472  		return err
   473  	}
   474  	// Commit trie changes into trie database in case it's not nil.
   475  	if nodes != nil {
   476  		if err := b.triedb.Update(root, b.originRoot, 0, trienode.NewWithNodeSet(nodes), nil); err != nil {
   477  			return err
   478  		}
   479  		if err := b.triedb.Commit(root, false); err != nil {
   480  			return err
   481  		}
   482  	}
   483  	// Re-create trie with newly generated root and updated database.
   484  	b.trie, err = trie.New(trie.TrieID(root), b.triedb)
   485  	if err != nil {
   486  		return err
   487  	}
   488  	// Pruning historical trie nodes if necessary.
   489  	if !b.disablePruning {
   490  		it := b.trieTable.NewIterator(nil, nil)
   491  		defer it.Release()
   492  
   493  		var (
   494  			deleted int
   495  			batch   = b.trieTable.NewBatch()
   496  			t       = time.Now()
   497  		)
   498  		hashes := make(map[common.Hash]struct{})
   499  		if nodes != nil {
   500  			for _, hash := range nodes.Hashes() {
   501  				hashes[hash] = struct{}{}
   502  			}
   503  		}
   504  		for it.Next() {
   505  			trimmed := bytes.TrimPrefix(it.Key(), rawdb.BloomTrieTablePrefix)
   506  			if len(trimmed) == common.HashLength {
   507  				if _, ok := hashes[common.BytesToHash(trimmed)]; !ok {
   508  					batch.Delete(trimmed)
   509  					deleted += 1
   510  				}
   511  			}
   512  		}
   513  		if err := batch.Write(); err != nil {
   514  			return err
   515  		}
   516  		log.Debug("Prune historical bloom trie nodes", "deleted", deleted, "remaining", len(hashes), "elapsed", common.PrettyDuration(time.Since(t)))
   517  	}
   518  	sectionHead := b.sectionHeads[b.bloomTrieRatio-1]
   519  	StoreBloomTrieRoot(b.diskdb, b.section, sectionHead, root)
   520  	log.Info("Storing bloom trie", "section", b.section, "head", fmt.Sprintf("%064x", sectionHead), "root", fmt.Sprintf("%064x", root), "compression", float64(compSize)/float64(decompSize))
   521  
   522  	return nil
   523  }
   524  
   525  // Prune implements core.ChainIndexerBackend which deletes all
   526  // bloombits which older than the specified threshold.
   527  func (b *BloomTrieIndexerBackend) Prune(threshold uint64) error {
   528  	// Short circuit if the light pruning is disabled.
   529  	if b.disablePruning {
   530  		return nil
   531  	}
   532  	start := time.Now()
   533  	for i := uint(0); i < types.BloomBitLength; i++ {
   534  		rawdb.DeleteBloombits(b.diskdb, i, 0, threshold*b.bloomTrieRatio+b.bloomTrieRatio)
   535  	}
   536  	log.Debug("Prune history bloombits", "threshold", threshold, "elapsed", common.PrettyDuration(time.Since(start)))
   537  	return nil
   538  }