github.com/FusionFoundation/efsn/v4@v4.2.0/light/postprocess.go (about)

     1  // Copyright 2017 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package light
    18  
    19  import (
    20  	"context"
    21  	"encoding/binary"
    22  	"errors"
    23  	"fmt"
    24  	"math/big"
    25  	"time"
    26  
    27  	"github.com/FusionFoundation/efsn/v4/common"
    28  	"github.com/FusionFoundation/efsn/v4/common/bitutil"
    29  	"github.com/FusionFoundation/efsn/v4/core"
    30  	"github.com/FusionFoundation/efsn/v4/core/rawdb"
    31  	"github.com/FusionFoundation/efsn/v4/core/types"
    32  	"github.com/FusionFoundation/efsn/v4/ethdb"
    33  	"github.com/FusionFoundation/efsn/v4/log"
    34  	"github.com/FusionFoundation/efsn/v4/params"
    35  	"github.com/FusionFoundation/efsn/v4/rlp"
    36  	"github.com/FusionFoundation/efsn/v4/trie"
    37  )
    38  
    39  // IndexerConfig includes a set of configs for chain indexers.
    40  type IndexerConfig struct {
    41  	// The block frequency for creating CHTs.
    42  	ChtSize uint64
    43  
    44  	// A special auxiliary field represents client's chtsize for server config, otherwise represents server's chtsize.
    45  	PairChtSize uint64
    46  
    47  	// The number of confirmations needed to generate/accept a canonical hash help trie.
    48  	ChtConfirms uint64
    49  
    50  	// The block frequency for creating new bloom bits.
    51  	BloomSize uint64
    52  
    53  	// The number of confirmation needed before a bloom section is considered probably final and its rotated bits
    54  	// are calculated.
    55  	BloomConfirms uint64
    56  
    57  	// The block frequency for creating BloomTrie.
    58  	BloomTrieSize uint64
    59  
    60  	// The number of confirmations needed to generate/accept a bloom trie.
    61  	BloomTrieConfirms uint64
    62  }
    63  
    64  var (
    65  	// DefaultServerIndexerConfig wraps a set of configs as a default indexer config for server side.
    66  	DefaultServerIndexerConfig = &IndexerConfig{
    67  		ChtSize:           params.CHTFrequencyServer,
    68  		PairChtSize:       params.CHTFrequencyClient,
    69  		ChtConfirms:       params.HelperTrieProcessConfirmations,
    70  		BloomSize:         params.BloomBitsBlocks,
    71  		BloomConfirms:     params.BloomConfirms,
    72  		BloomTrieSize:     params.BloomTrieFrequency,
    73  		BloomTrieConfirms: params.HelperTrieProcessConfirmations,
    74  	}
    75  	// DefaultClientIndexerConfig wraps a set of configs as a default indexer config for client side.
    76  	DefaultClientIndexerConfig = &IndexerConfig{
    77  		ChtSize:           params.CHTFrequencyClient,
    78  		PairChtSize:       params.CHTFrequencyServer,
    79  		ChtConfirms:       params.HelperTrieConfirmations,
    80  		BloomSize:         params.BloomBitsBlocksClient,
    81  		BloomConfirms:     params.HelperTrieConfirmations,
    82  		BloomTrieSize:     params.BloomTrieFrequency,
    83  		BloomTrieConfirms: params.HelperTrieConfirmations,
    84  	}
    85  	// TestServerIndexerConfig wraps a set of configs as a test indexer config for server side.
    86  	TestServerIndexerConfig = &IndexerConfig{
    87  		ChtSize:           256,
    88  		PairChtSize:       2048,
    89  		ChtConfirms:       16,
    90  		BloomSize:         256,
    91  		BloomConfirms:     16,
    92  		BloomTrieSize:     2048,
    93  		BloomTrieConfirms: 16,
    94  	}
    95  	// TestClientIndexerConfig wraps a set of configs as a test indexer config for client side.
    96  	TestClientIndexerConfig = &IndexerConfig{
    97  		ChtSize:           2048,
    98  		PairChtSize:       256,
    99  		ChtConfirms:       128,
   100  		BloomSize:         2048,
   101  		BloomConfirms:     128,
   102  		BloomTrieSize:     2048,
   103  		BloomTrieConfirms: 128,
   104  	}
   105  )
   106  
   107  // trustedCheckpoints associates each known checkpoint with the genesis hash of the chain it belongs to
   108  var trustedCheckpoints = map[common.Hash]*params.TrustedCheckpoint{
   109  	params.MainnetGenesisHash: params.MainnetTrustedCheckpoint,
   110  	params.TestnetGenesisHash: params.TestnetTrustedCheckpoint,
   111  	params.RinkebyGenesisHash: params.RinkebyTrustedCheckpoint,
   112  }
   113  
   114  var (
   115  	ErrNoTrustedCht       = errors.New("no trusted canonical hash trie")
   116  	ErrNoTrustedBloomTrie = errors.New("no trusted bloom trie")
   117  	ErrNoHeader           = errors.New("header not found")
   118  	chtPrefix             = []byte("chtRoot-") // chtPrefix + chtNum (uint64 big endian) -> trie root hash
   119  	ChtTablePrefix        = "cht-"
   120  )
   121  
   122  // ChtNode structures are stored in the Canonical Hash Trie in an RLP encoded format
   123  type ChtNode struct {
   124  	Hash common.Hash
   125  	Td   *big.Int
   126  }
   127  
   128  // GetChtRoot reads the CHT root associated to the given section from the database
   129  // Note that sectionIdx is specified according to LES/1 CHT section size.
   130  func GetChtRoot(db ethdb.Database, sectionIdx uint64, sectionHead common.Hash) common.Hash {
   131  	var encNumber [8]byte
   132  	binary.BigEndian.PutUint64(encNumber[:], sectionIdx)
   133  	data, _ := db.Get(append(append(chtPrefix, encNumber[:]...), sectionHead.Bytes()...))
   134  	return common.BytesToHash(data)
   135  }
   136  
   137  // StoreChtRoot writes the CHT root associated to the given section into the database
   138  // Note that sectionIdx is specified according to LES/1 CHT section size.
   139  func StoreChtRoot(db ethdb.Database, sectionIdx uint64, sectionHead, root common.Hash) {
   140  	var encNumber [8]byte
   141  	binary.BigEndian.PutUint64(encNumber[:], sectionIdx)
   142  	db.Put(append(append(chtPrefix, encNumber[:]...), sectionHead.Bytes()...), root.Bytes())
   143  }
   144  
   145  // ChtIndexerBackend implements core.ChainIndexerBackend.
   146  type ChtIndexerBackend struct {
   147  	disablePruning       bool
   148  	diskdb, trieTable    ethdb.Database
   149  	odr                  OdrBackend
   150  	triedb               *trie.Database
   151  	section, sectionSize uint64
   152  	lastHash             common.Hash
   153  	trie                 *trie.Trie
   154  }
   155  
   156  // NewChtIndexer creates a Cht chain indexer
   157  func NewChtIndexer(db ethdb.Database, odr OdrBackend, size, confirms uint64) *core.ChainIndexer {
   158  	trieTable := rawdb.NewTable(db, ChtTablePrefix)
   159  	backend := &ChtIndexerBackend{
   160  		diskdb:      db,
   161  		odr:         odr,
   162  		trieTable:   trieTable,
   163  		triedb:      trie.NewDatabaseWithConfig(trieTable, &trie.Config{Cache: 1}), // Use a tiny cache only to keep memory down
   164  		sectionSize: size,
   165  	}
   166  	return core.NewChainIndexer(db, rawdb.NewTable(db, "chtIndex-"), backend, size, confirms, time.Millisecond*100, "cht")
   167  }
   168  
   169  // fetchMissingNodes tries to retrieve the last entry of the latest trusted CHT from the
   170  // ODR backend in order to be able to add new entries and calculate subsequent root hashes
   171  func (c *ChtIndexerBackend) fetchMissingNodes(ctx context.Context, section uint64, root common.Hash) error {
   172  	batch := c.trieTable.NewBatch()
   173  	r := &ChtRequest{ChtRoot: root, ChtNum: section - 1, BlockNum: section*c.sectionSize - 1, Config: c.odr.IndexerConfig()}
   174  	for {
   175  		err := c.odr.Retrieve(ctx, r)
   176  		switch err {
   177  		case nil:
   178  			r.Proof.Store(batch)
   179  			return batch.Write()
   180  		case ErrNoPeers:
   181  			// if there are no peers to serve, retry later
   182  			select {
   183  			case <-ctx.Done():
   184  				return ctx.Err()
   185  			case <-time.After(time.Second * 10):
   186  				// stay in the loop and try again
   187  			}
   188  		default:
   189  			return err
   190  		}
   191  	}
   192  }
   193  
   194  // Reset implements core.ChainIndexerBackend
   195  func (c *ChtIndexerBackend) Reset(ctx context.Context, section uint64, lastSectionHead common.Hash) error {
   196  	var root common.Hash
   197  	if section > 0 {
   198  		root = GetChtRoot(c.diskdb, section-1, lastSectionHead)
   199  	}
   200  	var err error
   201  	c.trie, err = trie.New(root, c.triedb)
   202  
   203  	if err != nil && c.odr != nil {
   204  		err = c.fetchMissingNodes(ctx, section, root)
   205  		if err == nil {
   206  			c.trie, err = trie.New(root, c.triedb)
   207  		}
   208  	}
   209  
   210  	c.section = section
   211  	return err
   212  }
   213  
   214  // Process implements core.ChainIndexerBackend
   215  func (c *ChtIndexerBackend) Process(ctx context.Context, header *types.Header) error {
   216  	hash, num := header.Hash(), header.Number.Uint64()
   217  	c.lastHash = hash
   218  
   219  	td := rawdb.ReadTd(c.diskdb, hash, num)
   220  	if td == nil {
   221  		panic(nil)
   222  	}
   223  	var encNumber [8]byte
   224  	binary.BigEndian.PutUint64(encNumber[:], num)
   225  	data, _ := rlp.EncodeToBytes(ChtNode{hash, td})
   226  	c.trie.Update(encNumber[:], data)
   227  	return nil
   228  }
   229  
   230  // Commit implements core.ChainIndexerBackend
   231  func (c *ChtIndexerBackend) Commit() error {
   232  	root, err := c.trie.Commit(nil)
   233  	if err != nil {
   234  		return err
   235  	}
   236  	c.triedb.Commit(root, false, nil)
   237  
   238  	if ((c.section+1)*c.sectionSize)%params.CHTFrequencyClient == 0 {
   239  		log.Info("Storing CHT", "section", c.section*c.sectionSize/params.CHTFrequencyClient, "head", fmt.Sprintf("%064x", c.lastHash), "root", fmt.Sprintf("%064x", root))
   240  	}
   241  	StoreChtRoot(c.diskdb, c.section, c.lastHash, root)
   242  	return nil
   243  }
   244  
   245  // PruneSections implements core.ChainIndexerBackend which deletes all
   246  // chain data(except hash<->number mappings) older than the specified
   247  // threshold.
   248  func (c *ChtIndexerBackend) Prune(threshold uint64) error {
   249  	// Short circuit if the light pruning is disabled.
   250  	if c.disablePruning {
   251  		return nil
   252  	}
   253  	t := time.Now()
   254  	// Always keep genesis header in database.
   255  	start, end := uint64(1), (threshold+1)*c.sectionSize
   256  
   257  	var batch = c.diskdb.NewBatch()
   258  	for {
   259  		numbers, hashes := rawdb.ReadAllCanonicalHashes(c.diskdb, start, end, 10240)
   260  		if len(numbers) == 0 {
   261  			break
   262  		}
   263  		for i := 0; i < len(numbers); i++ {
   264  			// Keep hash<->number mapping in database otherwise the hash based
   265  			// API(e.g. GetReceipt, GetLogs) will be broken.
   266  			//
   267  			// Storage size wise, the size of a mapping is ~41bytes. For one
   268  			// section is about 1.3MB which is acceptable.
   269  			//
   270  			// In order to totally get rid of this index, we need an additional
   271  			// flag to specify how many historical data light client can serve.
   272  			rawdb.DeleteCanonicalHash(batch, numbers[i])
   273  			rawdb.DeleteBlockWithoutNumber(batch, hashes[i], numbers[i])
   274  		}
   275  		if batch.ValueSize() > ethdb.IdealBatchSize {
   276  			if err := batch.Write(); err != nil {
   277  				return err
   278  			}
   279  			batch.Reset()
   280  		}
   281  		start = numbers[len(numbers)-1] + 1
   282  	}
   283  	if err := batch.Write(); err != nil {
   284  		return err
   285  	}
   286  	log.Debug("Prune history headers", "threshold", threshold, "elapsed", common.PrettyDuration(time.Since(t)))
   287  	return nil
   288  }
   289  
   290  var (
   291  	bloomTriePrefix      = []byte("bltRoot-") // bloomTriePrefix + bloomTrieNum (uint64 big endian) -> trie root hash
   292  	BloomTrieTablePrefix = "blt-"
   293  )
   294  
   295  // GetBloomTrieRoot reads the BloomTrie root assoctiated to the given section from the database
   296  func GetBloomTrieRoot(db ethdb.Database, sectionIdx uint64, sectionHead common.Hash) common.Hash {
   297  	var encNumber [8]byte
   298  	binary.BigEndian.PutUint64(encNumber[:], sectionIdx)
   299  	data, _ := db.Get(append(append(bloomTriePrefix, encNumber[:]...), sectionHead.Bytes()...))
   300  	return common.BytesToHash(data)
   301  }
   302  
   303  // StoreBloomTrieRoot writes the BloomTrie root assoctiated to the given section into the database
   304  func StoreBloomTrieRoot(db ethdb.Database, sectionIdx uint64, sectionHead, root common.Hash) {
   305  	var encNumber [8]byte
   306  	binary.BigEndian.PutUint64(encNumber[:], sectionIdx)
   307  	db.Put(append(append(bloomTriePrefix, encNumber[:]...), sectionHead.Bytes()...), root.Bytes())
   308  }
   309  
   310  // BloomTrieIndexerBackend implements core.ChainIndexerBackend
   311  type BloomTrieIndexerBackend struct {
   312  	disablePruning    bool
   313  	diskdb, trieTable ethdb.Database
   314  	triedb            *trie.Database
   315  	odr               OdrBackend
   316  	section           uint64
   317  	parentSize        uint64
   318  	size              uint64
   319  	bloomTrieRatio    uint64
   320  	trie              *trie.Trie
   321  	sectionHeads      []common.Hash
   322  }
   323  
   324  // NewBloomTrieIndexer creates a BloomTrie chain indexer
   325  func NewBloomTrieIndexer(db ethdb.Database, odr OdrBackend, parentSize, size uint64) *core.ChainIndexer {
   326  	trieTable := rawdb.NewTable(db, BloomTrieTablePrefix)
   327  	backend := &BloomTrieIndexerBackend{
   328  		diskdb:     db,
   329  		odr:        odr,
   330  		trieTable:  trieTable,
   331  		triedb:     trie.NewDatabase(trieTable),
   332  		parentSize: parentSize,
   333  		size:       size,
   334  	}
   335  	backend.bloomTrieRatio = size / parentSize
   336  	backend.sectionHeads = make([]common.Hash, backend.bloomTrieRatio)
   337  	return core.NewChainIndexer(db, rawdb.NewTable(db, "bltIndex-"), backend, size, 0, time.Millisecond*100, "bloomtrie")
   338  }
   339  
   340  // fetchMissingNodes tries to retrieve the last entries of the latest trusted bloom trie from the
   341  // ODR backend in order to be able to add new entries and calculate subsequent root hashes
   342  func (b *BloomTrieIndexerBackend) fetchMissingNodes(ctx context.Context, section uint64, root common.Hash) error {
   343  	indexCh := make(chan uint, types.BloomBitLength)
   344  	type res struct {
   345  		nodes *NodeSet
   346  		err   error
   347  	}
   348  	resCh := make(chan res, types.BloomBitLength)
   349  	for i := 0; i < 20; i++ {
   350  		go func() {
   351  			for bitIndex := range indexCh {
   352  				r := &BloomRequest{BloomTrieRoot: root, BloomTrieNum: section - 1, BitIdx: bitIndex, SectionIndexList: []uint64{section - 1}, Config: b.odr.IndexerConfig()}
   353  				for {
   354  					if err := b.odr.Retrieve(ctx, r); err == ErrNoPeers {
   355  						// if there are no peers to serve, retry later
   356  						select {
   357  						case <-ctx.Done():
   358  							resCh <- res{nil, ctx.Err()}
   359  							return
   360  						case <-time.After(time.Second * 10):
   361  							// stay in the loop and try again
   362  						}
   363  					} else {
   364  						resCh <- res{r.Proofs, err}
   365  						break
   366  					}
   367  				}
   368  			}
   369  		}()
   370  	}
   371  
   372  	for i := uint(0); i < types.BloomBitLength; i++ {
   373  		indexCh <- i
   374  	}
   375  	close(indexCh)
   376  	batch := b.trieTable.NewBatch()
   377  	for i := uint(0); i < types.BloomBitLength; i++ {
   378  		res := <-resCh
   379  		if res.err != nil {
   380  			return res.err
   381  		}
   382  		res.nodes.Store(batch)
   383  	}
   384  	return batch.Write()
   385  }
   386  
   387  // Reset implements core.ChainIndexerBackend
   388  func (b *BloomTrieIndexerBackend) Reset(ctx context.Context, section uint64, lastSectionHead common.Hash) error {
   389  	var root common.Hash
   390  	if section > 0 {
   391  		root = GetBloomTrieRoot(b.diskdb, section-1, lastSectionHead)
   392  	}
   393  	var err error
   394  	b.trie, err = trie.New(root, b.triedb)
   395  	if err != nil && b.odr != nil {
   396  		err = b.fetchMissingNodes(ctx, section, root)
   397  		if err == nil {
   398  			b.trie, err = trie.New(root, b.triedb)
   399  		}
   400  	}
   401  	b.section = section
   402  	return err
   403  }
   404  
   405  // Process implements core.ChainIndexerBackend
   406  func (b *BloomTrieIndexerBackend) Process(ctx context.Context, header *types.Header) error {
   407  	num := header.Number.Uint64() - b.section*b.size
   408  	if (num+1)%b.parentSize == 0 {
   409  		b.sectionHeads[num/b.parentSize] = header.Hash()
   410  	}
   411  	return nil
   412  }
   413  
   414  // Commit implements core.ChainIndexerBackend
   415  func (b *BloomTrieIndexerBackend) Commit() error {
   416  	var compSize, decompSize uint64
   417  
   418  	for i := uint(0); i < types.BloomBitLength; i++ {
   419  		var encKey [10]byte
   420  		binary.BigEndian.PutUint16(encKey[0:2], uint16(i))
   421  		binary.BigEndian.PutUint64(encKey[2:10], b.section)
   422  		var decomp []byte
   423  		for j := uint64(0); j < b.bloomTrieRatio; j++ {
   424  			data, err := rawdb.ReadBloomBits(b.diskdb, i, b.section*b.bloomTrieRatio+j, b.sectionHeads[j])
   425  			if err != nil {
   426  				return err
   427  			}
   428  			decompData, err2 := bitutil.DecompressBytes(data, int(b.parentSize/8))
   429  			if err2 != nil {
   430  				return err2
   431  			}
   432  			decomp = append(decomp, decompData...)
   433  		}
   434  		comp := bitutil.CompressBytes(decomp)
   435  
   436  		decompSize += uint64(len(decomp))
   437  		compSize += uint64(len(comp))
   438  		if len(comp) > 0 {
   439  			b.trie.Update(encKey[:], comp)
   440  		} else {
   441  			b.trie.Delete(encKey[:])
   442  		}
   443  	}
   444  	root, err := b.trie.Commit(nil)
   445  	if err != nil {
   446  		return err
   447  	}
   448  	b.triedb.Commit(root, false, nil)
   449  
   450  	sectionHead := b.sectionHeads[b.bloomTrieRatio-1]
   451  	log.Info("Storing bloom trie", "section", b.section, "head", fmt.Sprintf("%064x", sectionHead), "root", fmt.Sprintf("%064x", root), "compression", float64(compSize)/float64(decompSize))
   452  	StoreBloomTrieRoot(b.diskdb, b.section, sectionHead, root)
   453  	return nil
   454  }
   455  
   456  // Prune implements core.ChainIndexerBackend which deletes all
   457  // bloombits which older than the specified threshold.
   458  func (b *BloomTrieIndexerBackend) Prune(threshold uint64) error {
   459  	// Short circuit if the light pruning is disabled.
   460  	if b.disablePruning {
   461  		return nil
   462  	}
   463  	start := time.Now()
   464  	for i := uint(0); i < types.BloomBitLength; i++ {
   465  		rawdb.DeleteBloombits(b.diskdb, i, 0, threshold*b.bloomTrieRatio+b.bloomTrieRatio)
   466  	}
   467  	log.Debug("Prune history bloombits", "threshold", threshold, "elapsed", common.PrettyDuration(time.Since(start)))
   468  	return nil
   469  }