github.com/alexdevranger/node-1.8.27@v0.0.0-20221128213301-aa5841e41d2d/light/postprocess.go (about)

     1  // Copyright 2017 The go-ethereum Authors
     2  // This file is part of the go-dubxcoin library.
     3  //
     4  // The go-dubxcoin library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-dubxcoin library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-dubxcoin library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package light
    18  
    19  import (
    20  	"context"
    21  	"encoding/binary"
    22  	"errors"
    23  	"fmt"
    24  	"math/big"
    25  	"time"
    26  
    27  	"github.com/alexdevranger/node-1.8.27/common"
    28  	"github.com/alexdevranger/node-1.8.27/common/bitutil"
    29  	"github.com/alexdevranger/node-1.8.27/core"
    30  	"github.com/alexdevranger/node-1.8.27/core/rawdb"
    31  	"github.com/alexdevranger/node-1.8.27/core/types"
    32  	"github.com/alexdevranger/node-1.8.27/ethdb"
    33  	"github.com/alexdevranger/node-1.8.27/log"
    34  	"github.com/alexdevranger/node-1.8.27/params"
    35  	"github.com/alexdevranger/node-1.8.27/rlp"
    36  	"github.com/alexdevranger/node-1.8.27/trie"
    37  )
    38  
    39  // IndexerConfig includes a set of configs for chain indexers.
    40  type IndexerConfig struct {
    41  	// The block frequency for creating CHTs.
    42  	ChtSize uint64
    43  
    44  	// A special auxiliary field represents client's chtsize for server config, otherwise represents server's chtsize.
    45  	PairChtSize uint64
    46  
    47  	// The number of confirmations needed to generate/accept a canonical hash help trie.
    48  	ChtConfirms uint64
    49  
    50  	// The block frequency for creating new bloom bits.
    51  	BloomSize uint64
    52  
    53  	// The number of confirmation needed before a bloom section is considered probably final and its rotated bits
    54  	// are calculated.
    55  	BloomConfirms uint64
    56  
    57  	// The block frequency for creating BloomTrie.
    58  	BloomTrieSize uint64
    59  
    60  	// The number of confirmations needed to generate/accept a bloom trie.
    61  	BloomTrieConfirms uint64
    62  }
    63  
    64  var (
    65  	// DefaultServerIndexerConfig wraps a set of configs as a default indexer config for server side.
    66  	DefaultServerIndexerConfig = &IndexerConfig{
    67  		ChtSize:           params.CHTFrequencyServer,
    68  		PairChtSize:       params.CHTFrequencyClient,
    69  		ChtConfirms:       params.HelperTrieProcessConfirmations,
    70  		BloomSize:         params.BloomBitsBlocks,
    71  		BloomConfirms:     params.BloomConfirms,
    72  		BloomTrieSize:     params.BloomTrieFrequency,
    73  		BloomTrieConfirms: params.HelperTrieProcessConfirmations,
    74  	}
    75  	// DefaultClientIndexerConfig wraps a set of configs as a default indexer config for client side.
    76  	DefaultClientIndexerConfig = &IndexerConfig{
    77  		ChtSize:           params.CHTFrequencyClient,
    78  		PairChtSize:       params.CHTFrequencyServer,
    79  		ChtConfirms:       params.HelperTrieConfirmations,
    80  		BloomSize:         params.BloomBitsBlocksClient,
    81  		BloomConfirms:     params.HelperTrieConfirmations,
    82  		BloomTrieSize:     params.BloomTrieFrequency,
    83  		BloomTrieConfirms: params.HelperTrieConfirmations,
    84  	}
    85  	// TestServerIndexerConfig wraps a set of configs as a test indexer config for server side.
    86  	TestServerIndexerConfig = &IndexerConfig{
    87  		ChtSize:           64,
    88  		PairChtSize:       512,
    89  		ChtConfirms:       4,
    90  		BloomSize:         64,
    91  		BloomConfirms:     4,
    92  		BloomTrieSize:     512,
    93  		BloomTrieConfirms: 4,
    94  	}
    95  	// TestClientIndexerConfig wraps a set of configs as a test indexer config for client side.
    96  	TestClientIndexerConfig = &IndexerConfig{
    97  		ChtSize:           512,
    98  		PairChtSize:       64,
    99  		ChtConfirms:       32,
   100  		BloomSize:         512,
   101  		BloomConfirms:     32,
   102  		BloomTrieSize:     512,
   103  		BloomTrieConfirms: 32,
   104  	}
   105  )
   106  
   107  var (
   108  	ErrNoTrustedCht       = errors.New("no trusted canonical hash trie")
   109  	ErrNoTrustedBloomTrie = errors.New("no trusted bloom trie")
   110  	ErrNoHeader           = errors.New("header not found")
   111  	chtPrefix             = []byte("chtRoot-") // chtPrefix + chtNum (uint64 big endian) -> trie root hash
   112  	ChtTablePrefix        = "cht-"
   113  )
   114  
   115  // ChtNode structures are stored in the Canonical Hash Trie in an RLP encoded format
   116  type ChtNode struct {
   117  	Hash common.Hash
   118  	Td   *big.Int
   119  }
   120  
   121  // GetChtRoot reads the CHT root associated to the given section from the database
   122  // Note that sectionIdx is specified according to LES/1 CHT section size.
   123  func GetChtRoot(db ethdb.Database, sectionIdx uint64, sectionHead common.Hash) common.Hash {
   124  	var encNumber [8]byte
   125  	binary.BigEndian.PutUint64(encNumber[:], sectionIdx)
   126  	data, _ := db.Get(append(append(chtPrefix, encNumber[:]...), sectionHead.Bytes()...))
   127  	return common.BytesToHash(data)
   128  }
   129  
   130  // StoreChtRoot writes the CHT root associated to the given section into the database
   131  // Note that sectionIdx is specified according to LES/1 CHT section size.
   132  func StoreChtRoot(db ethdb.Database, sectionIdx uint64, sectionHead, root common.Hash) {
   133  	var encNumber [8]byte
   134  	binary.BigEndian.PutUint64(encNumber[:], sectionIdx)
   135  	db.Put(append(append(chtPrefix, encNumber[:]...), sectionHead.Bytes()...), root.Bytes())
   136  }
   137  
   138  // ChtIndexerBackend implements core.ChainIndexerBackend.
   139  type ChtIndexerBackend struct {
   140  	diskdb, trieTable    ethdb.Database
   141  	odr                  OdrBackend
   142  	triedb               *trie.Database
   143  	section, sectionSize uint64
   144  	lastHash             common.Hash
   145  	trie                 *trie.Trie
   146  }
   147  
   148  // NewChtIndexer creates a Cht chain indexer
   149  func NewChtIndexer(db ethdb.Database, odr OdrBackend, size, confirms uint64) *core.ChainIndexer {
   150  	trieTable := ethdb.NewTable(db, ChtTablePrefix)
   151  	backend := &ChtIndexerBackend{
   152  		diskdb:      db,
   153  		odr:         odr,
   154  		trieTable:   trieTable,
   155  		triedb:      trie.NewDatabaseWithCache(trieTable, 1), // Use a tiny cache only to keep memory down
   156  		sectionSize: size,
   157  	}
   158  	return core.NewChainIndexer(db, ethdb.NewTable(db, "chtIndex-"), backend, size, confirms, time.Millisecond*100, "cht")
   159  }
   160  
   161  // fetchMissingNodes tries to retrieve the last entry of the latest trusted CHT from the
   162  // ODR backend in order to be able to add new entries and calculate subsequent root hashes
   163  func (c *ChtIndexerBackend) fetchMissingNodes(ctx context.Context, section uint64, root common.Hash) error {
   164  	batch := c.trieTable.NewBatch()
   165  	r := &ChtRequest{ChtRoot: root, ChtNum: section - 1, BlockNum: section*c.sectionSize - 1, Config: c.odr.IndexerConfig()}
   166  	for {
   167  		err := c.odr.Retrieve(ctx, r)
   168  		switch err {
   169  		case nil:
   170  			r.Proof.Store(batch)
   171  			return batch.Write()
   172  		case ErrNoPeers:
   173  			// if there are no peers to serve, retry later
   174  			select {
   175  			case <-ctx.Done():
   176  				return ctx.Err()
   177  			case <-time.After(time.Second * 10):
   178  				// stay in the loop and try again
   179  			}
   180  		default:
   181  			return err
   182  		}
   183  	}
   184  }
   185  
   186  // Reset implements core.ChainIndexerBackend
   187  func (c *ChtIndexerBackend) Reset(ctx context.Context, section uint64, lastSectionHead common.Hash) error {
   188  	var root common.Hash
   189  	if section > 0 {
   190  		root = GetChtRoot(c.diskdb, section-1, lastSectionHead)
   191  	}
   192  	var err error
   193  	c.trie, err = trie.New(root, c.triedb)
   194  
   195  	if err != nil && c.odr != nil {
   196  		err = c.fetchMissingNodes(ctx, section, root)
   197  		if err == nil {
   198  			c.trie, err = trie.New(root, c.triedb)
   199  		}
   200  	}
   201  
   202  	c.section = section
   203  	return err
   204  }
   205  
   206  // Process implements core.ChainIndexerBackend
   207  func (c *ChtIndexerBackend) Process(ctx context.Context, header *types.Header) error {
   208  	hash, num := header.Hash(), header.Number.Uint64()
   209  	c.lastHash = hash
   210  
   211  	td := rawdb.ReadTd(c.diskdb, hash, num)
   212  	if td == nil {
   213  		panic(nil)
   214  	}
   215  	var encNumber [8]byte
   216  	binary.BigEndian.PutUint64(encNumber[:], num)
   217  	data, _ := rlp.EncodeToBytes(ChtNode{hash, td})
   218  	c.trie.Update(encNumber[:], data)
   219  	return nil
   220  }
   221  
   222  // Commit implements core.ChainIndexerBackend
   223  func (c *ChtIndexerBackend) Commit() error {
   224  	root, err := c.trie.Commit(nil)
   225  	if err != nil {
   226  		return err
   227  	}
   228  	c.triedb.Commit(root, false)
   229  
   230  	if ((c.section+1)*c.sectionSize)%params.CHTFrequencyClient == 0 {
   231  		log.Info("Storing CHT", "section", c.section*c.sectionSize/params.CHTFrequencyClient, "head", fmt.Sprintf("%064x", c.lastHash), "root", fmt.Sprintf("%064x", root))
   232  	}
   233  	StoreChtRoot(c.diskdb, c.section, c.lastHash, root)
   234  	return nil
   235  }
   236  
   237  var (
   238  	bloomTriePrefix      = []byte("bltRoot-") // bloomTriePrefix + bloomTrieNum (uint64 big endian) -> trie root hash
   239  	BloomTrieTablePrefix = "blt-"
   240  )
   241  
   242  // GetBloomTrieRoot reads the BloomTrie root assoctiated to the given section from the database
   243  func GetBloomTrieRoot(db ethdb.Database, sectionIdx uint64, sectionHead common.Hash) common.Hash {
   244  	var encNumber [8]byte
   245  	binary.BigEndian.PutUint64(encNumber[:], sectionIdx)
   246  	data, _ := db.Get(append(append(bloomTriePrefix, encNumber[:]...), sectionHead.Bytes()...))
   247  	return common.BytesToHash(data)
   248  }
   249  
   250  // StoreBloomTrieRoot writes the BloomTrie root assoctiated to the given section into the database
   251  func StoreBloomTrieRoot(db ethdb.Database, sectionIdx uint64, sectionHead, root common.Hash) {
   252  	var encNumber [8]byte
   253  	binary.BigEndian.PutUint64(encNumber[:], sectionIdx)
   254  	db.Put(append(append(bloomTriePrefix, encNumber[:]...), sectionHead.Bytes()...), root.Bytes())
   255  }
   256  
   257  // BloomTrieIndexerBackend implements core.ChainIndexerBackend
   258  type BloomTrieIndexerBackend struct {
   259  	diskdb, trieTable ethdb.Database
   260  	triedb            *trie.Database
   261  	odr               OdrBackend
   262  	section           uint64
   263  	parentSize        uint64
   264  	size              uint64
   265  	bloomTrieRatio    uint64
   266  	trie              *trie.Trie
   267  	sectionHeads      []common.Hash
   268  }
   269  
   270  // NewBloomTrieIndexer creates a BloomTrie chain indexer
   271  func NewBloomTrieIndexer(db ethdb.Database, odr OdrBackend, parentSize, size uint64) *core.ChainIndexer {
   272  	trieTable := ethdb.NewTable(db, BloomTrieTablePrefix)
   273  	backend := &BloomTrieIndexerBackend{
   274  		diskdb:     db,
   275  		odr:        odr,
   276  		trieTable:  trieTable,
   277  		triedb:     trie.NewDatabaseWithCache(trieTable, 1), // Use a tiny cache only to keep memory down
   278  		parentSize: parentSize,
   279  		size:       size,
   280  	}
   281  	backend.bloomTrieRatio = size / parentSize
   282  	backend.sectionHeads = make([]common.Hash, backend.bloomTrieRatio)
   283  	return core.NewChainIndexer(db, ethdb.NewTable(db, "bltIndex-"), backend, size, 0, time.Millisecond*100, "bloomtrie")
   284  }
   285  
   286  // fetchMissingNodes tries to retrieve the last entries of the latest trusted bloom trie from the
   287  // ODR backend in order to be able to add new entries and calculate subsequent root hashes
   288  func (b *BloomTrieIndexerBackend) fetchMissingNodes(ctx context.Context, section uint64, root common.Hash) error {
   289  	indexCh := make(chan uint, types.BloomBitLength)
   290  	type res struct {
   291  		nodes *NodeSet
   292  		err   error
   293  	}
   294  	resCh := make(chan res, types.BloomBitLength)
   295  	for i := 0; i < 20; i++ {
   296  		go func() {
   297  			for bitIndex := range indexCh {
   298  				r := &BloomRequest{BloomTrieRoot: root, BloomTrieNum: section - 1, BitIdx: bitIndex, SectionIndexList: []uint64{section - 1}, Config: b.odr.IndexerConfig()}
   299  				for {
   300  					if err := b.odr.Retrieve(ctx, r); err == ErrNoPeers {
   301  						// if there are no peers to serve, retry later
   302  						select {
   303  						case <-ctx.Done():
   304  							resCh <- res{nil, ctx.Err()}
   305  							return
   306  						case <-time.After(time.Second * 10):
   307  							// stay in the loop and try again
   308  						}
   309  					} else {
   310  						resCh <- res{r.Proofs, err}
   311  						break
   312  					}
   313  				}
   314  			}
   315  		}()
   316  	}
   317  
   318  	for i := uint(0); i < types.BloomBitLength; i++ {
   319  		indexCh <- i
   320  	}
   321  	close(indexCh)
   322  	batch := b.trieTable.NewBatch()
   323  	for i := uint(0); i < types.BloomBitLength; i++ {
   324  		res := <-resCh
   325  		if res.err != nil {
   326  			return res.err
   327  		}
   328  		res.nodes.Store(batch)
   329  	}
   330  	return batch.Write()
   331  }
   332  
   333  // Reset implements core.ChainIndexerBackend
   334  func (b *BloomTrieIndexerBackend) Reset(ctx context.Context, section uint64, lastSectionHead common.Hash) error {
   335  	var root common.Hash
   336  	if section > 0 {
   337  		root = GetBloomTrieRoot(b.diskdb, section-1, lastSectionHead)
   338  	}
   339  	var err error
   340  	b.trie, err = trie.New(root, b.triedb)
   341  	if err != nil && b.odr != nil {
   342  		err = b.fetchMissingNodes(ctx, section, root)
   343  		if err == nil {
   344  			b.trie, err = trie.New(root, b.triedb)
   345  		}
   346  	}
   347  	b.section = section
   348  	return err
   349  }
   350  
   351  // Process implements core.ChainIndexerBackend
   352  func (b *BloomTrieIndexerBackend) Process(ctx context.Context, header *types.Header) error {
   353  	num := header.Number.Uint64() - b.section*b.size
   354  	if (num+1)%b.parentSize == 0 {
   355  		b.sectionHeads[num/b.parentSize] = header.Hash()
   356  	}
   357  	return nil
   358  }
   359  
   360  // Commit implements core.ChainIndexerBackend
   361  func (b *BloomTrieIndexerBackend) Commit() error {
   362  	var compSize, decompSize uint64
   363  
   364  	for i := uint(0); i < types.BloomBitLength; i++ {
   365  		var encKey [10]byte
   366  		binary.BigEndian.PutUint16(encKey[0:2], uint16(i))
   367  		binary.BigEndian.PutUint64(encKey[2:10], b.section)
   368  		var decomp []byte
   369  		for j := uint64(0); j < b.bloomTrieRatio; j++ {
   370  			data, err := rawdb.ReadBloomBits(b.diskdb, i, b.section*b.bloomTrieRatio+j, b.sectionHeads[j])
   371  			if err != nil {
   372  				return err
   373  			}
   374  			decompData, err2 := bitutil.DecompressBytes(data, int(b.parentSize/8))
   375  			if err2 != nil {
   376  				return err2
   377  			}
   378  			decomp = append(decomp, decompData...)
   379  		}
   380  		comp := bitutil.CompressBytes(decomp)
   381  
   382  		decompSize += uint64(len(decomp))
   383  		compSize += uint64(len(comp))
   384  		if len(comp) > 0 {
   385  			b.trie.Update(encKey[:], comp)
   386  		} else {
   387  			b.trie.Delete(encKey[:])
   388  		}
   389  	}
   390  	root, err := b.trie.Commit(nil)
   391  	if err != nil {
   392  		return err
   393  	}
   394  	b.triedb.Commit(root, false)
   395  
   396  	sectionHead := b.sectionHeads[b.bloomTrieRatio-1]
   397  	log.Info("Storing bloom trie", "section", b.section, "head", fmt.Sprintf("%064x", sectionHead), "root", fmt.Sprintf("%064x", root), "compression", float64(compSize)/float64(decompSize))
   398  	StoreBloomTrieRoot(b.diskdb, b.section, sectionHead, root)
   399  	return nil
   400  }