github.com/dim4egster/coreth@v0.10.2/sync/handlers/leafs_request.go (about)

     1  // (c) 2021-2022, Ava Labs, Inc. All rights reserved.
     2  // See the file LICENSE for licensing terms.
     3  
     4  package handlers
     5  
     6  import (
     7  	"bytes"
     8  	"context"
     9  	"fmt"
    10  	"sync"
    11  	"time"
    12  
    13  	"github.com/dim4egster/qmallgo/codec"
    14  	"github.com/dim4egster/qmallgo/ids"
    15  	"github.com/dim4egster/qmallgo/utils/math"
    16  	"github.com/dim4egster/qmallgo/utils/wrappers"
    17  	"github.com/dim4egster/coreth/core/state/snapshot"
    18  	"github.com/dim4egster/coreth/core/types"
    19  	"github.com/dim4egster/coreth/ethdb"
    20  	"github.com/dim4egster/coreth/ethdb/memorydb"
    21  	"github.com/dim4egster/coreth/plugin/evm/message"
    22  	"github.com/dim4egster/coreth/sync/handlers/stats"
    23  	"github.com/dim4egster/coreth/sync/syncutils"
    24  	"github.com/dim4egster/coreth/trie"
    25  	"github.com/dim4egster/coreth/utils"
    26  	"github.com/ethereum/go-ethereum/common"
    27  	"github.com/ethereum/go-ethereum/log"
    28  )
    29  
    30  const (
    31  	// Maximum number of leaves to return in a message.LeafsResponse
    32  	// This parameter overrides any other Limit specified
    33  	// in message.LeafsRequest if it is greater than this value
    34  	maxLeavesLimit = uint16(1024)
    35  
    36  	segmentLen = 64 // divide data from snapshot to segments of this size
    37  )
    38  
    39  // LeafsRequestHandler is a peer.RequestHandler for types.LeafsRequest
    40  // serving requested trie data
    41  type LeafsRequestHandler struct {
    42  	trieDB           *trie.Database
    43  	snapshotProvider SnapshotProvider
    44  	codec            codec.Manager
    45  	stats            stats.LeafsRequestHandlerStats
    46  	pool             sync.Pool
    47  }
    48  
    49  func NewLeafsRequestHandler(trieDB *trie.Database, snapshotProvider SnapshotProvider, codec codec.Manager, syncerStats stats.LeafsRequestHandlerStats) *LeafsRequestHandler {
    50  	return &LeafsRequestHandler{
    51  		trieDB:           trieDB,
    52  		snapshotProvider: snapshotProvider,
    53  		codec:            codec,
    54  		stats:            syncerStats,
    55  		pool: sync.Pool{
    56  			New: func() interface{} { return make([][]byte, 0, maxLeavesLimit) },
    57  		},
    58  	}
    59  }
    60  
    61  // OnLeafsRequest returns encoded message.LeafsResponse for a given message.LeafsRequest
    62  // Returns leaves with proofs for specified (Start-End) (both inclusive) ranges
    63  // Returned message.LeafsResponse may contain partial leaves within requested Start and End range if:
    64  // - ctx expired while fetching leafs
    65  // - number of leaves read is greater than Limit (message.LeafsRequest)
    66  // Specified Limit in message.LeafsRequest is overridden to maxLeavesLimit if it is greater than maxLeavesLimit
    67  // Expects returned errors to be treated as FATAL
    68  // Never returns errors
    69  // Expects NodeType to be one of message.AtomicTrieNode or message.StateTrieNode
    70  // Returns nothing if NodeType is invalid or requested trie root is not found
    71  // Assumes ctx is active
    72  func (lrh *LeafsRequestHandler) OnLeafsRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, leafsRequest message.LeafsRequest) ([]byte, error) {
    73  	startTime := time.Now()
    74  	lrh.stats.IncLeafsRequest()
    75  
    76  	if (len(leafsRequest.End) > 0 && bytes.Compare(leafsRequest.Start, leafsRequest.End) > 0) ||
    77  		leafsRequest.Root == (common.Hash{}) ||
    78  		leafsRequest.Root == types.EmptyRootHash ||
    79  		leafsRequest.Limit == 0 {
    80  		log.Debug("invalid leafs request, dropping request", "nodeID", nodeID, "requestID", requestID, "request", leafsRequest)
    81  		lrh.stats.IncInvalidLeafsRequest()
    82  		return nil, nil
    83  	}
    84  	keyLength, err := getKeyLength(leafsRequest.NodeType)
    85  	if err != nil {
    86  		// Note: LeafsRequest.Handle checks NodeType's validity so clients cannot cause the server to spam this error
    87  		log.Error("Failed to get key length for leafs request", "err", err)
    88  		lrh.stats.IncInvalidLeafsRequest()
    89  		return nil, nil
    90  	}
    91  	if len(leafsRequest.Start) != 0 && len(leafsRequest.Start) != keyLength ||
    92  		len(leafsRequest.End) != 0 && len(leafsRequest.End) != keyLength {
    93  		log.Debug("invalid length for leafs request range, dropping request", "startLen", len(leafsRequest.Start), "endLen", len(leafsRequest.End), "expected", keyLength)
    94  		lrh.stats.IncInvalidLeafsRequest()
    95  		return nil, nil
    96  	}
    97  
    98  	t, err := trie.New(leafsRequest.Account, leafsRequest.Root, lrh.trieDB)
    99  	if err != nil {
   100  		log.Debug("error opening trie when processing request, dropping request", "nodeID", nodeID, "requestID", requestID, "root", leafsRequest.Root, "err", err)
   101  		lrh.stats.IncMissingRoot()
   102  		return nil, nil
   103  	}
   104  	// override limit if it is greater than the configured maxLeavesLimit
   105  	limit := leafsRequest.Limit
   106  	if limit > maxLeavesLimit {
   107  		limit = maxLeavesLimit
   108  	}
   109  
   110  	var leafsResponse message.LeafsResponse
   111  	// pool response's key/val allocations
   112  	leafsResponse.Keys = lrh.pool.Get().([][]byte)
   113  	leafsResponse.Vals = lrh.pool.Get().([][]byte)
   114  	defer func() {
   115  		for i := range leafsResponse.Keys {
   116  			// clear out slices before returning them to the pool
   117  			// to avoid memory leak.
   118  			leafsResponse.Keys[i] = nil
   119  			leafsResponse.Vals[i] = nil
   120  		}
   121  		lrh.pool.Put(leafsResponse.Keys[:0])
   122  		lrh.pool.Put(leafsResponse.Vals[:0])
   123  	}()
   124  
   125  	responseBuilder := &responseBuilder{
   126  		request:   &leafsRequest,
   127  		response:  &leafsResponse,
   128  		t:         t,
   129  		keyLength: keyLength,
   130  		limit:     limit,
   131  		stats:     lrh.stats,
   132  	}
   133  	// pass snapshot to responseBuilder if non-nil snapshot getter provided
   134  	if lrh.snapshotProvider != nil {
   135  		responseBuilder.snap = lrh.snapshotProvider.Snapshots()
   136  	}
   137  	err = responseBuilder.handleRequest(ctx)
   138  
   139  	// ensure metrics are captured properly on all return paths
   140  	defer func() {
   141  		lrh.stats.UpdateLeafsRequestProcessingTime(time.Since(startTime))
   142  		lrh.stats.UpdateLeafsReturned(uint16(len(leafsResponse.Keys)))
   143  		lrh.stats.UpdateRangeProofValsReturned(int64(len(leafsResponse.ProofVals)))
   144  		lrh.stats.UpdateGenerateRangeProofTime(responseBuilder.proofTime)
   145  		lrh.stats.UpdateReadLeafsTime(responseBuilder.trieReadTime)
   146  	}()
   147  	if err != nil {
   148  		log.Debug("failed to serve leafs request", "nodeID", nodeID, "requestID", requestID, "request", leafsRequest, "err", err)
   149  		return nil, nil
   150  	}
   151  	if len(leafsResponse.Keys) == 0 && ctx.Err() != nil {
   152  		log.Debug("context err set before any leafs were iterated", "nodeID", nodeID, "requestID", requestID, "request", leafsRequest, "ctxErr", ctx.Err())
   153  		return nil, nil
   154  	}
   155  
   156  	responseBytes, err := lrh.codec.Marshal(message.Version, leafsResponse)
   157  	if err != nil {
   158  		log.Debug("failed to marshal LeafsResponse, dropping request", "nodeID", nodeID, "requestID", requestID, "request", leafsRequest, "err", err)
   159  		return nil, nil
   160  	}
   161  
   162  	log.Debug("handled leafsRequest", "time", time.Since(startTime), "leafs", len(leafsResponse.Keys), "proofLen", len(leafsResponse.ProofVals))
   163  	return responseBytes, nil
   164  }
   165  
   166  type responseBuilder struct {
   167  	request   *message.LeafsRequest
   168  	response  *message.LeafsResponse
   169  	t         *trie.Trie
   170  	snap      *snapshot.Tree
   171  	keyLength int
   172  	limit     uint16
   173  
   174  	// stats
   175  	trieReadTime time.Duration
   176  	proofTime    time.Duration
   177  	stats        stats.LeafsRequestHandlerStats
   178  }
   179  
   180  func (rb *responseBuilder) handleRequest(ctx context.Context) error {
   181  	// Read from snapshot if a [snapshot.Tree] was provided in initialization
   182  	if rb.snap != nil {
   183  		if done, err := rb.fillFromSnapshot(ctx); err != nil {
   184  			return err
   185  		} else if done {
   186  			return nil
   187  		}
   188  		// reset the proof if we will iterate the trie further
   189  		rb.response.ProofVals = nil
   190  	}
   191  
   192  	if len(rb.response.Keys) < int(rb.limit) {
   193  		// more indicates whether there are more leaves in the trie
   194  		more, err := rb.fillFromTrie(ctx, rb.request.End)
   195  		if err != nil {
   196  			rb.stats.IncTrieError()
   197  			return err
   198  		}
   199  		if len(rb.request.Start) == 0 && !more {
   200  			// omit proof via early return
   201  			return nil
   202  		}
   203  	}
   204  
   205  	// Generate the proof and add it to the response.
   206  	proof, err := rb.generateRangeProof(rb.request.Start, rb.response.Keys)
   207  	if err != nil {
   208  		rb.stats.IncProofError()
   209  		return err
   210  	}
   211  	defer proof.Close() // closing memdb does not error
   212  
   213  	rb.response.ProofVals, err = iterateVals(proof)
   214  	if err != nil {
   215  		rb.stats.IncProofError()
   216  		return err
   217  	}
   218  	return nil
   219  }
   220  
   221  // fillFromSnapshot reads data from snapshot and returns true if the response is complete.
   222  // Otherwise, the caller should attempt to iterate the trie and determine if a range proof
   223  // should be added to the response.
   224  func (rb *responseBuilder) fillFromSnapshot(ctx context.Context) (bool, error) {
   225  	snapshotReadStart := time.Now()
   226  	rb.stats.IncSnapshotReadAttempt()
   227  
   228  	// Optimistically read leafs from the snapshot, assuming they have not been
   229  	// modified since the requested root. If this assumption can be verified with
   230  	// range proofs and data from the trie, we can skip iterating the trie as
   231  	// an optimization.
   232  	snapKeys, snapVals, err := rb.readLeafsFromSnapshot(ctx)
   233  	// Update read snapshot time here, so that we include the case that an error occurred.
   234  	rb.stats.UpdateSnapshotReadTime(time.Since(snapshotReadStart))
   235  	if err != nil {
   236  		rb.stats.IncSnapshotReadError()
   237  		return false, err
   238  	}
   239  
   240  	// Check if the entire range read from the snapshot is valid according to the trie.
   241  	proof, ok, more, err := rb.isRangeValid(snapKeys, snapVals, false)
   242  	if err != nil {
   243  		rb.stats.IncProofError()
   244  		return false, err
   245  	}
   246  	defer proof.Close() // closing memdb does not error
   247  	if ok {
   248  		rb.response.Keys, rb.response.Vals = snapKeys, snapVals
   249  		if len(rb.request.Start) == 0 && !more {
   250  			// omit proof via early return
   251  			rb.stats.IncSnapshotReadSuccess()
   252  			return true, nil
   253  		}
   254  		rb.response.ProofVals, err = iterateVals(proof)
   255  		if err != nil {
   256  			rb.stats.IncProofError()
   257  			return false, err
   258  		}
   259  		rb.stats.IncSnapshotReadSuccess()
   260  		return !more, nil
   261  	}
   262  	// The data from the snapshot could not be validated as a whole. It is still likely
   263  	// most of the data from the snapshot is useable, so we try to validate smaller
   264  	// segments of the data and use them in the response.
   265  	hasGap := false
   266  	for i := 0; i < len(snapKeys); i += segmentLen {
   267  		segmentEnd := math.Min(i+segmentLen, len(snapKeys))
   268  		proof, ok, _, err := rb.isRangeValid(snapKeys[i:segmentEnd], snapVals[i:segmentEnd], hasGap)
   269  		if err != nil {
   270  			rb.stats.IncProofError()
   271  			return false, err
   272  		}
   273  		_ = proof.Close() // we don't need this proof
   274  		if !ok {
   275  			// segment is not valid
   276  			rb.stats.IncSnapshotSegmentInvalid()
   277  			hasGap = true
   278  			continue
   279  		}
   280  
   281  		// segment is valid
   282  		rb.stats.IncSnapshotSegmentValid()
   283  		if hasGap {
   284  			// if there is a gap between valid segments, fill the gap with data from the trie
   285  			_, err := rb.fillFromTrie(ctx, snapKeys[i])
   286  			if err != nil {
   287  				rb.stats.IncTrieError()
   288  				return false, err
   289  			}
   290  			if len(rb.response.Keys) >= int(rb.limit) || ctx.Err() != nil {
   291  				break
   292  			}
   293  			// remove the last key added since it is snapKeys[i] and will be added back
   294  			// Note: this is safe because we were able to verify the range proof that
   295  			// shows snapKeys[i] is part of the trie.
   296  			rb.response.Keys = rb.response.Keys[:len(rb.response.Keys)-1]
   297  			rb.response.Vals = rb.response.Vals[:len(rb.response.Vals)-1]
   298  		}
   299  		hasGap = false
   300  		// all the key/vals in the segment are valid, but possibly shorten segmentEnd
   301  		// here to respect limit. this is necessary in case the number of leafs we read
   302  		// from the trie is more than the length of a segment which cannot be validated. limit
   303  		segmentEnd = math.Min(segmentEnd, i+int(rb.limit)-len(rb.response.Keys))
   304  		rb.response.Keys = append(rb.response.Keys, snapKeys[i:segmentEnd]...)
   305  		rb.response.Vals = append(rb.response.Vals, snapVals[i:segmentEnd]...)
   306  
   307  		if len(rb.response.Keys) >= int(rb.limit) {
   308  			break
   309  		}
   310  	}
   311  	return false, nil
   312  }
   313  
   314  // generateRangeProof returns a range proof for the range specified by [start] and [keys] using [t].
   315  func (rb *responseBuilder) generateRangeProof(start []byte, keys [][]byte) (*memorydb.Database, error) {
   316  	proof := memorydb.New()
   317  	startTime := time.Now()
   318  	defer func() { rb.proofTime += time.Since(startTime) }()
   319  
   320  	// If [start] is empty, populate it with the appropriate length key starting at 0.
   321  	if len(start) == 0 {
   322  		start = bytes.Repeat([]byte{0x00}, rb.keyLength)
   323  	}
   324  
   325  	if err := rb.t.Prove(start, 0, proof); err != nil {
   326  		_ = proof.Close() // closing memdb does not error
   327  		return nil, err
   328  	}
   329  	if len(keys) > 0 {
   330  		// If there is a non-zero number of keys, set [end] for the range proof to the last key.
   331  		end := keys[len(keys)-1]
   332  		if err := rb.t.Prove(end, 0, proof); err != nil {
   333  			_ = proof.Close() // closing memdb does not error
   334  			return nil, err
   335  		}
   336  	}
   337  	return proof, nil
   338  }
   339  
   340  // verifyRangeProof verifies the provided range proof with [keys/vals], starting at [start].
   341  // Returns a boolean indicating if there are more leaves to the right of the last key in the trie and a nil error if the range proof is successfully verified.
   342  func (rb *responseBuilder) verifyRangeProof(keys, vals [][]byte, start []byte, proof *memorydb.Database) (bool, error) {
   343  	startTime := time.Now()
   344  	defer func() { rb.proofTime += time.Since(startTime) }()
   345  
   346  	// If [start] is empty, populate it with the appropriate length key starting at 0.
   347  	if len(start) == 0 {
   348  		start = bytes.Repeat([]byte{0x00}, rb.keyLength)
   349  	}
   350  	var end []byte
   351  	if len(keys) > 0 {
   352  		end = keys[len(keys)-1]
   353  	}
   354  	return trie.VerifyRangeProof(rb.request.Root, start, end, keys, vals, proof)
   355  }
   356  
   357  // iterateVals returns the values contained in [db]
   358  func iterateVals(db *memorydb.Database) ([][]byte, error) {
   359  	if db == nil {
   360  		return nil, nil
   361  	}
   362  	// iterate db into [][]byte and return
   363  	it := db.NewIterator(nil, nil)
   364  	defer it.Release()
   365  
   366  	vals := make([][]byte, 0, db.Len())
   367  	for it.Next() {
   368  		vals = append(vals, it.Value())
   369  	}
   370  
   371  	return vals, it.Error()
   372  }
   373  
   374  // isRangeValid generates and verifies a range proof, returning true if keys/vals are
   375  // part of the trie. If [hasGap] is true, the range is validated independent of the
   376  // existing response. If [hasGap] is false, the range proof begins at a key which
   377  // guarantees the range can be appended to the response.
   378  // Additionally returns a boolean indicating if there are more leaves in the trie.
   379  func (rb *responseBuilder) isRangeValid(keys, vals [][]byte, hasGap bool) (*memorydb.Database, bool, bool, error) {
   380  	var startKey []byte
   381  	if hasGap {
   382  		startKey = keys[0]
   383  	} else {
   384  		startKey = rb.nextKey()
   385  	}
   386  
   387  	proof, err := rb.generateRangeProof(startKey, keys)
   388  	if err != nil {
   389  		return nil, false, false, err
   390  	}
   391  	more, proofErr := rb.verifyRangeProof(keys, vals, startKey, proof)
   392  	return proof, proofErr == nil, more, nil
   393  }
   394  
   395  // nextKey returns the nextKey that could potentially be part of the response.
   396  func (rb *responseBuilder) nextKey() []byte {
   397  	if len(rb.response.Keys) == 0 {
   398  		return rb.request.Start
   399  	}
   400  	nextKey := common.CopyBytes(rb.response.Keys[len(rb.response.Keys)-1])
   401  	utils.IncrOne(nextKey)
   402  	return nextKey
   403  }
   404  
   405  // fillFromTrie iterates key/values from the response builder's trie and appends
   406  // them to the response. Iteration begins from the last key already in the response,
   407  // or the request start if the response is empty. Iteration ends at [end] or if
   408  // the number of leafs reaches the builder's limit.
   409  // Returns true if there are more keys in the trie.
   410  func (rb *responseBuilder) fillFromTrie(ctx context.Context, end []byte) (bool, error) {
   411  	startTime := time.Now()
   412  	defer func() { rb.trieReadTime += time.Since(startTime) }()
   413  
   414  	// create iterator to iterate the trie
   415  	it := trie.NewIterator(rb.t.NodeIterator(rb.nextKey()))
   416  	more := false
   417  	for it.Next() {
   418  		// if we're at the end, break this loop
   419  		if len(end) > 0 && bytes.Compare(it.Key, end) > 0 {
   420  			more = true
   421  			break
   422  		}
   423  
   424  		// If we've returned enough data or run out of time, set the more flag and exit
   425  		// this flag will determine if the proof is generated or not
   426  		if len(rb.response.Keys) >= int(rb.limit) || ctx.Err() != nil {
   427  			more = true
   428  			break
   429  		}
   430  
   431  		// append key/vals to the response
   432  		rb.response.Keys = append(rb.response.Keys, it.Key)
   433  		rb.response.Vals = append(rb.response.Vals, it.Value)
   434  	}
   435  	return more, it.Err
   436  }
   437  
   438  // getKeyLength returns trie key length for given nodeType
   439  // expects nodeType to be one of message.AtomicTrieNode or message.StateTrieNode
   440  func getKeyLength(nodeType message.NodeType) (int, error) {
   441  	switch nodeType {
   442  	case message.AtomicTrieNode:
   443  		return wrappers.LongLen + common.HashLength, nil
   444  	case message.StateTrieNode:
   445  		return common.HashLength, nil
   446  	}
   447  	return 0, fmt.Errorf("cannot get key length for unknown node type: %s", nodeType)
   448  }
   449  
   450  // readLeafsFromSnapshot iterates the storage snapshot of the requested account
   451  // (or the main account trie if account is empty). Returns up to [rb.limit] key/value
   452  // pairs for keys that are in the request's range (inclusive).
   453  func (rb *responseBuilder) readLeafsFromSnapshot(ctx context.Context) ([][]byte, [][]byte, error) {
   454  	var (
   455  		snapIt    ethdb.Iterator
   456  		startHash = common.BytesToHash(rb.request.Start)
   457  		keys      = make([][]byte, 0, rb.limit)
   458  		vals      = make([][]byte, 0, rb.limit)
   459  	)
   460  
   461  	// Get an iterator into the storage or the main account snapshot.
   462  	if rb.request.Account == (common.Hash{}) {
   463  		snapIt = &syncutils.AccountIterator{AccountIterator: rb.snap.DiskAccountIterator(startHash)}
   464  	} else {
   465  		snapIt = &syncutils.StorageIterator{StorageIterator: rb.snap.DiskStorageIterator(rb.request.Account, startHash)}
   466  	}
   467  	defer snapIt.Release()
   468  	for snapIt.Next() {
   469  		// if we're at the end, break this loop
   470  		if len(rb.request.End) > 0 && bytes.Compare(snapIt.Key(), rb.request.End) > 0 {
   471  			break
   472  		}
   473  		// If we've returned enough data or run out of time, set the more flag and exit
   474  		// this flag will determine if the proof is generated or not
   475  		if len(keys) >= int(rb.limit) || ctx.Err() != nil {
   476  			break
   477  		}
   478  
   479  		keys = append(keys, snapIt.Key())
   480  		vals = append(vals, snapIt.Value())
   481  	}
   482  	return keys, vals, snapIt.Error()
   483  }