github.com/core-coin/go-core/v2@v2.1.9/light/postprocess.go (about)

     1  // Copyright 2017 by the Authors
     2  // This file is part of the go-core library.
     3  //
     4  // The go-core library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-core library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-core library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package light
    18  
    19  import (
    20  	"bytes"
    21  	"context"
    22  	"encoding/binary"
    23  	"errors"
    24  	"fmt"
    25  	"math/big"
    26  	"time"
    27  
    28  	mapset "github.com/deckarep/golang-set"
    29  
    30  	"github.com/core-coin/go-core/v2/xcbdb"
    31  
    32  	"github.com/core-coin/go-core/v2/common"
    33  	"github.com/core-coin/go-core/v2/common/bitutil"
    34  	"github.com/core-coin/go-core/v2/core"
    35  	"github.com/core-coin/go-core/v2/core/rawdb"
    36  	"github.com/core-coin/go-core/v2/core/types"
    37  	"github.com/core-coin/go-core/v2/log"
    38  	"github.com/core-coin/go-core/v2/params"
    39  	"github.com/core-coin/go-core/v2/rlp"
    40  	"github.com/core-coin/go-core/v2/trie"
    41  )
    42  
    43  // IndexerConfig includes a set of configs for chain indexers.
    44  type IndexerConfig struct {
    45  	// The block frequency for creating CHTs.
    46  	ChtSize uint64
    47  
    48  	// The number of confirmations needed to generate/accept a canonical hash help trie.
    49  	ChtConfirms uint64
    50  
    51  	// The block frequency for creating new bloom bits.
    52  	BloomSize uint64
    53  
    54  	// The number of confirmation needed before a bloom section is considered probably final and its rotated bits
    55  	// are calculated.
    56  	BloomConfirms uint64
    57  
    58  	// The block frequency for creating BloomTrie.
    59  	BloomTrieSize uint64
    60  
    61  	// The number of confirmations needed to generate/accept a bloom trie.
    62  	BloomTrieConfirms uint64
    63  }
    64  
    65  var (
    66  	// DefaultServerIndexerConfig wraps a set of configs as a default indexer config for server side.
    67  	DefaultServerIndexerConfig = &IndexerConfig{
    68  		ChtSize:           params.CHTFrequency,
    69  		ChtConfirms:       params.HelperTrieProcessConfirmations,
    70  		BloomSize:         params.BloomBitsBlocks,
    71  		BloomConfirms:     params.BloomConfirms,
    72  		BloomTrieSize:     params.BloomTrieFrequency,
    73  		BloomTrieConfirms: params.HelperTrieProcessConfirmations,
    74  	}
    75  	// DefaultClientIndexerConfig wraps a set of configs as a default indexer config for client side.
    76  	DefaultClientIndexerConfig = &IndexerConfig{
    77  		ChtSize:           params.CHTFrequency,
    78  		ChtConfirms:       params.HelperTrieConfirmations,
    79  		BloomSize:         params.BloomBitsBlocksClient,
    80  		BloomConfirms:     params.HelperTrieConfirmations,
    81  		BloomTrieSize:     params.BloomTrieFrequency,
    82  		BloomTrieConfirms: params.HelperTrieConfirmations,
    83  	}
    84  	// TestServerIndexerConfig wraps a set of configs as a test indexer config for server side.
    85  	TestServerIndexerConfig = &IndexerConfig{
    86  		ChtSize:           128,
    87  		ChtConfirms:       1,
    88  		BloomSize:         16,
    89  		BloomConfirms:     1,
    90  		BloomTrieSize:     128,
    91  		BloomTrieConfirms: 1,
    92  	}
    93  	// TestClientIndexerConfig wraps a set of configs as a test indexer config for client side.
    94  	TestClientIndexerConfig = &IndexerConfig{
    95  		ChtSize:           128,
    96  		ChtConfirms:       8,
    97  		BloomSize:         128,
    98  		BloomConfirms:     8,
    99  		BloomTrieSize:     128,
   100  		BloomTrieConfirms: 8,
   101  	}
   102  )
   103  
   104  var (
   105  	errNoTrustedCht       = errors.New("no trusted canonical hash trie")
   106  	errNoTrustedBloomTrie = errors.New("no trusted bloom trie")
   107  	errNoHeader           = errors.New("header not found")
   108  	chtPrefix             = []byte("chtRootV2-") // chtPrefix + chtNum (uint64 big endian) -> trie root hash
   109  	ChtTablePrefix        = "cht-"
   110  )
   111  
   112  // ChtNode structures are stored in the Canonical Hash Trie in an RLP encoded format
   113  type ChtNode struct {
   114  	Hash common.Hash
   115  	Td   *big.Int
   116  }
   117  
   118  // GetChtRoot reads the CHT root associated to the given section from the database
   119  func GetChtRoot(db xcbdb.Database, sectionIdx uint64, sectionHead common.Hash) common.Hash {
   120  	var encNumber [8]byte
   121  	binary.BigEndian.PutUint64(encNumber[:], sectionIdx)
   122  	data, _ := db.Get(append(append(chtPrefix, encNumber[:]...), sectionHead.Bytes()...))
   123  	return common.BytesToHash(data)
   124  }
   125  
   126  // StoreChtRoot writes the CHT root associated to the given section into the database
   127  func StoreChtRoot(db xcbdb.Database, sectionIdx uint64, sectionHead, root common.Hash) {
   128  	var encNumber [8]byte
   129  	binary.BigEndian.PutUint64(encNumber[:], sectionIdx)
   130  	db.Put(append(append(chtPrefix, encNumber[:]...), sectionHead.Bytes()...), root.Bytes())
   131  }
   132  
   133  // ChtIndexerBackend implements core.ChainIndexerBackend.
   134  type ChtIndexerBackend struct {
   135  	disablePruning       bool
   136  	diskdb, trieTable    xcbdb.Database
   137  	odr                  OdrBackend
   138  	triedb               *trie.Database
   139  	trieset              mapset.Set
   140  	section, sectionSize uint64
   141  	lastHash             common.Hash
   142  	trie                 *trie.Trie
   143  }
   144  
   145  // NewChtIndexer creates a Cht chain indexer
   146  func NewChtIndexer(db xcbdb.Database, odr OdrBackend, size, confirms uint64, disablePruning bool) *core.ChainIndexer {
   147  	trieTable := rawdb.NewTable(db, ChtTablePrefix)
   148  	backend := &ChtIndexerBackend{
   149  		diskdb:         db,
   150  		odr:            odr,
   151  		trieTable:      trieTable,
   152  		triedb:         trie.NewDatabaseWithConfig(trieTable, &trie.Config{Cache: 1}), // Use a tiny cache only to keep memory down
   153  		trieset:        mapset.NewSet(),
   154  		sectionSize:    size,
   155  		disablePruning: disablePruning,
   156  	}
   157  	return core.NewChainIndexer(db, rawdb.NewTable(db, "chtIndexV2-"), backend, size, confirms, time.Millisecond*100, "cht")
   158  }
   159  
   160  // fetchMissingNodes tries to retrieve the last entry of the latest trusted CHT from the
   161  // ODR backend in order to be able to add new entries and calculate subsequent root hashes
   162  func (c *ChtIndexerBackend) fetchMissingNodes(ctx context.Context, section uint64, root common.Hash) error {
   163  	batch := c.trieTable.NewBatch()
   164  	r := &ChtRequest{ChtRoot: root, ChtNum: section - 1, BlockNum: section*c.sectionSize - 1, Config: c.odr.IndexerConfig()}
   165  	for {
   166  		err := c.odr.Retrieve(ctx, r)
   167  		switch err {
   168  		case nil:
   169  			r.Proof.Store(batch)
   170  			return batch.Write()
   171  		case ErrNoPeers:
   172  			// if there are no peers to serve, retry later
   173  			select {
   174  			case <-ctx.Done():
   175  				return ctx.Err()
   176  			case <-time.After(time.Second * 10):
   177  				// stay in the loop and try again
   178  			}
   179  		default:
   180  			return err
   181  		}
   182  	}
   183  }
   184  
   185  // Reset implements core.ChainIndexerBackend
   186  func (c *ChtIndexerBackend) Reset(ctx context.Context, section uint64, lastSectionHead common.Hash) error {
   187  	var root common.Hash
   188  	if section > 0 {
   189  		root = GetChtRoot(c.diskdb, section-1, lastSectionHead)
   190  	}
   191  	var err error
   192  	c.trie, err = trie.New(root, c.triedb)
   193  
   194  	if err != nil && c.odr != nil {
   195  		err = c.fetchMissingNodes(ctx, section, root)
   196  		if err == nil {
   197  			c.trie, err = trie.New(root, c.triedb)
   198  		}
   199  	}
   200  	c.section = section
   201  	return err
   202  }
   203  
   204  // Process implements core.ChainIndexerBackend
   205  func (c *ChtIndexerBackend) Process(ctx context.Context, header *types.Header) error {
   206  	hash, num := header.Hash(), header.Number.Uint64()
   207  	c.lastHash = hash
   208  
   209  	td := rawdb.ReadTd(c.diskdb, hash, num)
   210  	if td == nil {
   211  		panic(nil)
   212  	}
   213  	var encNumber [8]byte
   214  	binary.BigEndian.PutUint64(encNumber[:], num)
   215  	data, _ := rlp.EncodeToBytes(ChtNode{hash, td})
   216  	c.trie.Update(encNumber[:], data)
   217  	return nil
   218  }
   219  
   220  // Commit implements core.ChainIndexerBackend
   221  func (c *ChtIndexerBackend) Commit() error {
   222  	root, err := c.trie.Commit(nil)
   223  	if err != nil {
   224  		return err
   225  	}
   226  	// Pruning historical trie nodes if necessary.
   227  	if !c.disablePruning {
   228  		// Flush the triedb and track the latest trie nodes.
   229  		c.trieset.Clear()
   230  		c.triedb.Commit(root, false, func(hash common.Hash) { c.trieset.Add(hash) })
   231  
   232  		it := c.trieTable.NewIterator(nil, nil)
   233  		defer it.Release()
   234  
   235  		var (
   236  			deleted   int
   237  			remaining int
   238  			t         = time.Now()
   239  		)
   240  		for it.Next() {
   241  			trimmed := bytes.TrimPrefix(it.Key(), []byte(ChtTablePrefix))
   242  			if !c.trieset.Contains(common.BytesToHash(trimmed)) {
   243  				c.trieTable.Delete(trimmed)
   244  				deleted += 1
   245  			} else {
   246  				remaining += 1
   247  			}
   248  		}
   249  		log.Debug("Prune historical CHT trie nodes", "deleted", deleted, "remaining", remaining, "elapsed", common.PrettyDuration(time.Since(t)))
   250  	} else {
   251  		c.triedb.Commit(root, false, nil)
   252  	}
   253  	log.Info("Storing CHT", "section", c.section, "head", fmt.Sprintf("%064x", c.lastHash), "root", fmt.Sprintf("%064x", root))
   254  	StoreChtRoot(c.diskdb, c.section, c.lastHash, root)
   255  	return nil
   256  }
   257  
   258  // PruneSections implements core.ChainIndexerBackend which deletes all
   259  // chain data(except hash<->number mappings) older than the specified
   260  // threshold.
   261  func (c *ChtIndexerBackend) Prune(threshold uint64) error {
   262  	// Short circuit if the light pruning is disabled.
   263  	if c.disablePruning {
   264  		return nil
   265  	}
   266  	t := time.Now()
   267  	// Always keep genesis header in database.
   268  	start, end := uint64(1), (threshold+1)*c.sectionSize
   269  
   270  	var batch = c.diskdb.NewBatch()
   271  	for {
   272  		numbers, hashes := rawdb.ReadAllCanonicalHashes(c.diskdb, start, end, 10240)
   273  		if len(numbers) == 0 {
   274  			break
   275  		}
   276  		for i := 0; i < len(numbers); i++ {
   277  			// Keep hash<->number mapping in database otherwise the hash based
   278  			// API(e.g. GetReceipt, GetLogs) will be broken.
   279  			//
   280  			// Storage size wise, the size of a mapping is ~41bytes. For one
   281  			// section is about 1.3MB which is acceptable.
   282  			//
   283  			// In order to totally get rid of this index, we need an additional
   284  			// flag to specify how many historical data light client can serve.
   285  			rawdb.DeleteCanonicalHash(batch, numbers[i])
   286  			rawdb.DeleteBlockWithoutNumber(batch, hashes[i], numbers[i])
   287  		}
   288  		if batch.ValueSize() > xcbdb.IdealBatchSize {
   289  			if err := batch.Write(); err != nil {
   290  				return err
   291  			}
   292  			batch.Reset()
   293  		}
   294  		start = numbers[len(numbers)-1] + 1
   295  	}
   296  	if err := batch.Write(); err != nil {
   297  		return err
   298  	}
   299  	log.Debug("Prune history headers", "threshold", threshold, "elapsed", common.PrettyDuration(time.Since(t)))
   300  	return nil
   301  }
   302  
   303  var (
   304  	bloomTriePrefix      = []byte("bltRoot-") // bloomTriePrefix + bloomTrieNum (uint64 big endian) -> trie root hash
   305  	BloomTrieTablePrefix = "blt-"
   306  )
   307  
   308  // GetBloomTrieRoot reads the BloomTrie root assoctiated to the given section from the database
   309  func GetBloomTrieRoot(db xcbdb.Database, sectionIdx uint64, sectionHead common.Hash) common.Hash {
   310  	var encNumber [8]byte
   311  	binary.BigEndian.PutUint64(encNumber[:], sectionIdx)
   312  	data, _ := db.Get(append(append(bloomTriePrefix, encNumber[:]...), sectionHead.Bytes()...))
   313  	return common.BytesToHash(data)
   314  }
   315  
   316  // StoreBloomTrieRoot writes the BloomTrie root assoctiated to the given section into the database
   317  func StoreBloomTrieRoot(db xcbdb.Database, sectionIdx uint64, sectionHead, root common.Hash) {
   318  	var encNumber [8]byte
   319  	binary.BigEndian.PutUint64(encNumber[:], sectionIdx)
   320  	db.Put(append(append(bloomTriePrefix, encNumber[:]...), sectionHead.Bytes()...), root.Bytes())
   321  }
   322  
   323  // BloomTrieIndexerBackend implements core.ChainIndexerBackend
   324  type BloomTrieIndexerBackend struct {
   325  	disablePruning    bool
   326  	diskdb, trieTable xcbdb.Database
   327  	triedb            *trie.Database
   328  	trieset           mapset.Set
   329  	odr               OdrBackend
   330  	section           uint64
   331  	parentSize        uint64
   332  	size              uint64
   333  	bloomTrieRatio    uint64
   334  	trie              *trie.Trie
   335  	sectionHeads      []common.Hash
   336  }
   337  
   338  // NewBloomTrieIndexer creates a BloomTrie chain indexer
   339  func NewBloomTrieIndexer(db xcbdb.Database, odr OdrBackend, parentSize, size uint64, disablePruning bool) *core.ChainIndexer {
   340  	trieTable := rawdb.NewTable(db, BloomTrieTablePrefix)
   341  	backend := &BloomTrieIndexerBackend{
   342  		diskdb:         db,
   343  		odr:            odr,
   344  		trieTable:      trieTable,
   345  		triedb:         trie.NewDatabaseWithConfig(trieTable, &trie.Config{Cache: 1}), // Use a tiny cache only to keep memory down
   346  		trieset:        mapset.NewSet(),
   347  		parentSize:     parentSize,
   348  		size:           size,
   349  		disablePruning: disablePruning,
   350  	}
   351  	backend.bloomTrieRatio = size / parentSize
   352  	backend.sectionHeads = make([]common.Hash, backend.bloomTrieRatio)
   353  	return core.NewChainIndexer(db, rawdb.NewTable(db, "bltIndex-"), backend, size, 0, time.Millisecond*100, "bloomtrie")
   354  }
   355  
   356  // fetchMissingNodes tries to retrieve the last entries of the latest trusted bloom trie from the
   357  // ODR backend in order to be able to add new entries and calculate subsequent root hashes
   358  func (b *BloomTrieIndexerBackend) fetchMissingNodes(ctx context.Context, section uint64, root common.Hash) error {
   359  	indexCh := make(chan uint, types.BloomBitLength)
   360  	type res struct {
   361  		nodes *NodeSet
   362  		err   error
   363  	}
   364  	resCh := make(chan res, types.BloomBitLength)
   365  	for i := 0; i < 20; i++ {
   366  		go func() {
   367  			for bitIndex := range indexCh {
   368  				r := &BloomRequest{BloomTrieRoot: root, BloomTrieNum: section - 1, BitIdx: bitIndex, SectionIndexList: []uint64{section - 1}, Config: b.odr.IndexerConfig()}
   369  				for {
   370  					if err := b.odr.Retrieve(ctx, r); err == ErrNoPeers {
   371  						// if there are no peers to serve, retry later
   372  						select {
   373  						case <-ctx.Done():
   374  							resCh <- res{nil, ctx.Err()}
   375  							return
   376  						case <-time.After(time.Second * 10):
   377  							// stay in the loop and try again
   378  						}
   379  					} else {
   380  						resCh <- res{r.Proofs, err}
   381  						break
   382  					}
   383  				}
   384  			}
   385  		}()
   386  	}
   387  	for i := uint(0); i < types.BloomBitLength; i++ {
   388  		indexCh <- i
   389  	}
   390  	close(indexCh)
   391  	batch := b.trieTable.NewBatch()
   392  	for i := uint(0); i < types.BloomBitLength; i++ {
   393  		res := <-resCh
   394  		if res.err != nil {
   395  			return res.err
   396  		}
   397  		res.nodes.Store(batch)
   398  	}
   399  	return batch.Write()
   400  }
   401  
   402  // Reset implements core.ChainIndexerBackend
   403  func (b *BloomTrieIndexerBackend) Reset(ctx context.Context, section uint64, lastSectionHead common.Hash) error {
   404  	var root common.Hash
   405  	if section > 0 {
   406  		root = GetBloomTrieRoot(b.diskdb, section-1, lastSectionHead)
   407  	}
   408  	var err error
   409  	b.trie, err = trie.New(root, b.triedb)
   410  	if err != nil && b.odr != nil {
   411  		err = b.fetchMissingNodes(ctx, section, root)
   412  		if err == nil {
   413  			b.trie, err = trie.New(root, b.triedb)
   414  		}
   415  	}
   416  	b.section = section
   417  	return err
   418  }
   419  
   420  // Process implements core.ChainIndexerBackend
   421  func (b *BloomTrieIndexerBackend) Process(ctx context.Context, header *types.Header) error {
   422  	num := header.Number.Uint64() - b.section*b.size
   423  	if (num+1)%b.parentSize == 0 {
   424  		b.sectionHeads[num/b.parentSize] = header.Hash()
   425  	}
   426  	return nil
   427  }
   428  
   429  // Commit implements core.ChainIndexerBackend
   430  func (b *BloomTrieIndexerBackend) Commit() error {
   431  	var compSize, decompSize uint64
   432  
   433  	for i := uint(0); i < types.BloomBitLength; i++ {
   434  		var encKey [10]byte
   435  		binary.BigEndian.PutUint16(encKey[0:2], uint16(i))
   436  		binary.BigEndian.PutUint64(encKey[2:10], b.section)
   437  		var decomp []byte
   438  		for j := uint64(0); j < b.bloomTrieRatio; j++ {
   439  			data, err := rawdb.ReadBloomBits(b.diskdb, i, b.section*b.bloomTrieRatio+j, b.sectionHeads[j])
   440  			if err != nil {
   441  				return err
   442  			}
   443  			decompData, err2 := bitutil.DecompressBytes(data, int(b.parentSize/8))
   444  			if err2 != nil {
   445  				return err2
   446  			}
   447  			decomp = append(decomp, decompData...)
   448  		}
   449  		comp := bitutil.CompressBytes(decomp)
   450  
   451  		decompSize += uint64(len(decomp))
   452  		compSize += uint64(len(comp))
   453  		if len(comp) > 0 {
   454  			b.trie.Update(encKey[:], comp)
   455  		} else {
   456  			b.trie.Delete(encKey[:])
   457  		}
   458  	}
   459  	root, err := b.trie.Commit(nil)
   460  	if err != nil {
   461  		return err
   462  	}
   463  	// Pruning historical trie nodes if necessary.
   464  	if !b.disablePruning {
   465  		// Flush the triedb and track the latest trie nodes.
   466  		b.trieset.Clear()
   467  		b.triedb.Commit(root, false, func(hash common.Hash) { b.trieset.Add(hash) })
   468  
   469  		it := b.trieTable.NewIterator(nil, nil)
   470  		defer it.Release()
   471  
   472  		var (
   473  			deleted   int
   474  			remaining int
   475  			t         = time.Now()
   476  		)
   477  		for it.Next() {
   478  			trimmed := bytes.TrimPrefix(it.Key(), []byte(BloomTrieTablePrefix))
   479  			if !b.trieset.Contains(common.BytesToHash(trimmed)) {
   480  				b.trieTable.Delete(trimmed)
   481  				deleted += 1
   482  			} else {
   483  				remaining += 1
   484  			}
   485  		}
   486  		log.Debug("Prune historical bloom trie nodes", "deleted", deleted, "remaining", remaining, "elapsed", common.PrettyDuration(time.Since(t)))
   487  	} else {
   488  		b.triedb.Commit(root, false, nil)
   489  	}
   490  	sectionHead := b.sectionHeads[b.bloomTrieRatio-1]
   491  	StoreBloomTrieRoot(b.diskdb, b.section, sectionHead, root)
   492  	log.Info("Storing bloom trie", "section", b.section, "head", fmt.Sprintf("%064x", sectionHead), "root", fmt.Sprintf("%064x", root), "compression", float64(compSize)/float64(decompSize))
   493  
   494  	return nil
   495  }
   496  
   497  // Prune implements core.ChainIndexerBackend which deletes all
   498  // bloombits which older than the specified threshold.
   499  func (b *BloomTrieIndexerBackend) Prune(threshold uint64) error {
   500  	// Short circuit if the light pruning is disabled.
   501  	if b.disablePruning {
   502  		return nil
   503  	}
   504  	start := time.Now()
   505  	for i := uint(0); i < types.BloomBitLength; i++ {
   506  		rawdb.DeleteBloombits(b.diskdb, i, 0, threshold*b.bloomTrieRatio+b.bloomTrieRatio)
   507  	}
   508  	log.Debug("Prune history bloombits", "threshold", threshold, "elapsed", common.PrettyDuration(time.Since(start)))
   509  	return nil
   510  }