github.com/MetalBlockchain/subnet-evm@v0.4.9/sync/handlers/leafs_request.go (about)

     1  // (c) 2021-2022, Ava Labs, Inc. All rights reserved.
     2  // See the file LICENSE for licensing terms.
     3  
     4  package handlers
     5  
     6  import (
     7  	"bytes"
     8  	"context"
     9  	"sync"
    10  	"time"
    11  
    12  	"github.com/MetalBlockchain/metalgo/codec"
    13  	"github.com/MetalBlockchain/metalgo/ids"
    14  	"github.com/MetalBlockchain/metalgo/utils/math"
    15  	"github.com/MetalBlockchain/subnet-evm/core/state/snapshot"
    16  	"github.com/MetalBlockchain/subnet-evm/core/types"
    17  	"github.com/MetalBlockchain/subnet-evm/ethdb"
    18  	"github.com/MetalBlockchain/subnet-evm/ethdb/memorydb"
    19  	"github.com/MetalBlockchain/subnet-evm/plugin/evm/message"
    20  	"github.com/MetalBlockchain/subnet-evm/sync/handlers/stats"
    21  	"github.com/MetalBlockchain/subnet-evm/sync/syncutils"
    22  	"github.com/MetalBlockchain/subnet-evm/trie"
    23  	"github.com/MetalBlockchain/subnet-evm/utils"
    24  	"github.com/ethereum/go-ethereum/common"
    25  	"github.com/ethereum/go-ethereum/log"
    26  )
    27  
    28  const (
    29  	// Maximum number of leaves to return in a message.LeafsResponse
    30  	// This parameter overrides any other Limit specified
    31  	// in message.LeafsRequest if it is greater than this value
    32  	maxLeavesLimit = uint16(1024)
    33  
    34  	segmentLen = 64                // divide data from snapshot to segments of this size
    35  	keyLength  = common.HashLength // length of the keys of the trie to sync
    36  )
    37  
    38  // LeafsRequestHandler is a peer.RequestHandler for types.LeafsRequest
    39  // serving requested trie data
    40  type LeafsRequestHandler struct {
    41  	trieDB           *trie.Database
    42  	snapshotProvider SnapshotProvider
    43  	codec            codec.Manager
    44  	stats            stats.LeafsRequestHandlerStats
    45  	pool             sync.Pool
    46  }
    47  
    48  func NewLeafsRequestHandler(trieDB *trie.Database, snapshotProvider SnapshotProvider, codec codec.Manager, syncerStats stats.LeafsRequestHandlerStats) *LeafsRequestHandler {
    49  	return &LeafsRequestHandler{
    50  		trieDB:           trieDB,
    51  		snapshotProvider: snapshotProvider,
    52  		codec:            codec,
    53  		stats:            syncerStats,
    54  		pool: sync.Pool{
    55  			New: func() interface{} { return make([][]byte, 0, maxLeavesLimit) },
    56  		},
    57  	}
    58  }
    59  
    60  // OnLeafsRequest returns encoded message.LeafsResponse for a given message.LeafsRequest
    61  // Returns leaves with proofs for specified (Start-End) (both inclusive) ranges
    62  // Returned message.LeafsResponse may contain partial leaves within requested Start and End range if:
    63  // - ctx expired while fetching leafs
    64  // - number of leaves read is greater than Limit (message.LeafsRequest)
    65  // Specified Limit in message.LeafsRequest is overridden to maxLeavesLimit if it is greater than maxLeavesLimit
    66  // Expects returned errors to be treated as FATAL
    67  // Never returns errors
    68  // Returns nothing if the requested trie root is not found
    69  // Assumes ctx is active
    70  func (lrh *LeafsRequestHandler) OnLeafsRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, leafsRequest message.LeafsRequest) ([]byte, error) {
    71  	startTime := time.Now()
    72  	lrh.stats.IncLeafsRequest()
    73  
    74  	if (len(leafsRequest.End) > 0 && bytes.Compare(leafsRequest.Start, leafsRequest.End) > 0) ||
    75  		leafsRequest.Root == (common.Hash{}) ||
    76  		leafsRequest.Root == types.EmptyRootHash ||
    77  		leafsRequest.Limit == 0 {
    78  		log.Debug("invalid leafs request, dropping request", "nodeID", nodeID, "requestID", requestID, "request", leafsRequest)
    79  		lrh.stats.IncInvalidLeafsRequest()
    80  		return nil, nil
    81  	}
    82  	if len(leafsRequest.Start) != 0 && len(leafsRequest.Start) != keyLength ||
    83  		len(leafsRequest.End) != 0 && len(leafsRequest.End) != keyLength {
    84  		log.Debug("invalid length for leafs request range, dropping request", "startLen", len(leafsRequest.Start), "endLen", len(leafsRequest.End), "expected", keyLength)
    85  		lrh.stats.IncInvalidLeafsRequest()
    86  		return nil, nil
    87  	}
    88  
    89  	t, err := trie.New(leafsRequest.Account, leafsRequest.Root, lrh.trieDB)
    90  	if err != nil {
    91  		log.Debug("error opening trie when processing request, dropping request", "nodeID", nodeID, "requestID", requestID, "root", leafsRequest.Root, "err", err)
    92  		lrh.stats.IncMissingRoot()
    93  		return nil, nil
    94  	}
    95  	// override limit if it is greater than the configured maxLeavesLimit
    96  	limit := leafsRequest.Limit
    97  	if limit > maxLeavesLimit {
    98  		limit = maxLeavesLimit
    99  	}
   100  
   101  	var leafsResponse message.LeafsResponse
   102  	// pool response's key/val allocations
   103  	leafsResponse.Keys = lrh.pool.Get().([][]byte)
   104  	leafsResponse.Vals = lrh.pool.Get().([][]byte)
   105  	defer func() {
   106  		for i := range leafsResponse.Keys {
   107  			// clear out slices before returning them to the pool
   108  			// to avoid memory leak.
   109  			leafsResponse.Keys[i] = nil
   110  			leafsResponse.Vals[i] = nil
   111  		}
   112  		lrh.pool.Put(leafsResponse.Keys[:0])
   113  		lrh.pool.Put(leafsResponse.Vals[:0])
   114  	}()
   115  
   116  	responseBuilder := &responseBuilder{
   117  		request:   &leafsRequest,
   118  		response:  &leafsResponse,
   119  		t:         t,
   120  		keyLength: keyLength,
   121  		limit:     limit,
   122  		stats:     lrh.stats,
   123  	}
   124  	// pass snapshot to responseBuilder if non-nil snapshot getter provided
   125  	if lrh.snapshotProvider != nil {
   126  		responseBuilder.snap = lrh.snapshotProvider.Snapshots()
   127  	}
   128  	err = responseBuilder.handleRequest(ctx)
   129  
   130  	// ensure metrics are captured properly on all return paths
   131  	defer func() {
   132  		lrh.stats.UpdateLeafsRequestProcessingTime(time.Since(startTime))
   133  		lrh.stats.UpdateLeafsReturned(uint16(len(leafsResponse.Keys)))
   134  		lrh.stats.UpdateRangeProofValsReturned(int64(len(leafsResponse.ProofVals)))
   135  		lrh.stats.UpdateGenerateRangeProofTime(responseBuilder.proofTime)
   136  		lrh.stats.UpdateReadLeafsTime(responseBuilder.trieReadTime)
   137  	}()
   138  	if err != nil {
   139  		log.Debug("failed to serve leafs request", "nodeID", nodeID, "requestID", requestID, "request", leafsRequest, "err", err)
   140  		return nil, nil
   141  	}
   142  	if len(leafsResponse.Keys) == 0 && ctx.Err() != nil {
   143  		log.Debug("context err set before any leafs were iterated", "nodeID", nodeID, "requestID", requestID, "request", leafsRequest, "ctxErr", ctx.Err())
   144  		return nil, nil
   145  	}
   146  
   147  	responseBytes, err := lrh.codec.Marshal(message.Version, leafsResponse)
   148  	if err != nil {
   149  		log.Debug("failed to marshal LeafsResponse, dropping request", "nodeID", nodeID, "requestID", requestID, "request", leafsRequest, "err", err)
   150  		return nil, nil
   151  	}
   152  
   153  	log.Debug("handled leafsRequest", "time", time.Since(startTime), "leafs", len(leafsResponse.Keys), "proofLen", len(leafsResponse.ProofVals))
   154  	return responseBytes, nil
   155  }
   156  
   157  type responseBuilder struct {
   158  	request   *message.LeafsRequest
   159  	response  *message.LeafsResponse
   160  	t         *trie.Trie
   161  	snap      *snapshot.Tree
   162  	keyLength int
   163  	limit     uint16
   164  
   165  	// stats
   166  	trieReadTime time.Duration
   167  	proofTime    time.Duration
   168  	stats        stats.LeafsRequestHandlerStats
   169  }
   170  
   171  func (rb *responseBuilder) handleRequest(ctx context.Context) error {
   172  	// Read from snapshot if a [snapshot.Tree] was provided in initialization
   173  	if rb.snap != nil {
   174  		if done, err := rb.fillFromSnapshot(ctx); err != nil {
   175  			return err
   176  		} else if done {
   177  			return nil
   178  		}
   179  		// reset the proof if we will iterate the trie further
   180  		rb.response.ProofVals = nil
   181  	}
   182  
   183  	if len(rb.response.Keys) < int(rb.limit) {
   184  		// more indicates whether there are more leaves in the trie
   185  		more, err := rb.fillFromTrie(ctx, rb.request.End)
   186  		if err != nil {
   187  			rb.stats.IncTrieError()
   188  			return err
   189  		}
   190  		if len(rb.request.Start) == 0 && !more {
   191  			// omit proof via early return
   192  			return nil
   193  		}
   194  	}
   195  
   196  	// Generate the proof and add it to the response.
   197  	proof, err := rb.generateRangeProof(rb.request.Start, rb.response.Keys)
   198  	if err != nil {
   199  		rb.stats.IncProofError()
   200  		return err
   201  	}
   202  	defer proof.Close() // closing memdb does not error
   203  
   204  	rb.response.ProofVals, err = iterateVals(proof)
   205  	if err != nil {
   206  		rb.stats.IncProofError()
   207  		return err
   208  	}
   209  	return nil
   210  }
   211  
   212  // fillFromSnapshot reads data from snapshot and returns true if the response is complete.
   213  // Otherwise, the caller should attempt to iterate the trie and determine if a range proof
   214  // should be added to the response.
   215  func (rb *responseBuilder) fillFromSnapshot(ctx context.Context) (bool, error) {
   216  	snapshotReadStart := time.Now()
   217  	rb.stats.IncSnapshotReadAttempt()
   218  
   219  	// Optimistically read leafs from the snapshot, assuming they have not been
   220  	// modified since the requested root. If this assumption can be verified with
   221  	// range proofs and data from the trie, we can skip iterating the trie as
   222  	// an optimization.
   223  	snapKeys, snapVals, err := rb.readLeafsFromSnapshot(ctx)
   224  	// Update read snapshot time here, so that we include the case that an error occurred.
   225  	rb.stats.UpdateSnapshotReadTime(time.Since(snapshotReadStart))
   226  	if err != nil {
   227  		rb.stats.IncSnapshotReadError()
   228  		return false, err
   229  	}
   230  
   231  	// Check if the entire range read from the snapshot is valid according to the trie.
   232  	proof, ok, more, err := rb.isRangeValid(snapKeys, snapVals, false)
   233  	if err != nil {
   234  		rb.stats.IncProofError()
   235  		return false, err
   236  	}
   237  	defer proof.Close() // closing memdb does not error
   238  	if ok {
   239  		rb.response.Keys, rb.response.Vals = snapKeys, snapVals
   240  		if len(rb.request.Start) == 0 && !more {
   241  			// omit proof via early return
   242  			rb.stats.IncSnapshotReadSuccess()
   243  			return true, nil
   244  		}
   245  		rb.response.ProofVals, err = iterateVals(proof)
   246  		if err != nil {
   247  			rb.stats.IncProofError()
   248  			return false, err
   249  		}
   250  		rb.stats.IncSnapshotReadSuccess()
   251  		return !more, nil
   252  	}
   253  	// The data from the snapshot could not be validated as a whole. It is still likely
   254  	// most of the data from the snapshot is useable, so we try to validate smaller
   255  	// segments of the data and use them in the response.
   256  	hasGap := false
   257  	for i := 0; i < len(snapKeys); i += segmentLen {
   258  		segmentEnd := math.Min(i+segmentLen, len(snapKeys))
   259  		proof, ok, _, err := rb.isRangeValid(snapKeys[i:segmentEnd], snapVals[i:segmentEnd], hasGap)
   260  		if err != nil {
   261  			rb.stats.IncProofError()
   262  			return false, err
   263  		}
   264  		_ = proof.Close() // we don't need this proof
   265  		if !ok {
   266  			// segment is not valid
   267  			rb.stats.IncSnapshotSegmentInvalid()
   268  			hasGap = true
   269  			continue
   270  		}
   271  
   272  		// segment is valid
   273  		rb.stats.IncSnapshotSegmentValid()
   274  		if hasGap {
   275  			// if there is a gap between valid segments, fill the gap with data from the trie
   276  			_, err := rb.fillFromTrie(ctx, snapKeys[i])
   277  			if err != nil {
   278  				rb.stats.IncTrieError()
   279  				return false, err
   280  			}
   281  			if len(rb.response.Keys) >= int(rb.limit) || ctx.Err() != nil {
   282  				break
   283  			}
   284  			// remove the last key added since it is snapKeys[i] and will be added back
   285  			// Note: this is safe because we were able to verify the range proof that
   286  			// shows snapKeys[i] is part of the trie.
   287  			rb.response.Keys = rb.response.Keys[:len(rb.response.Keys)-1]
   288  			rb.response.Vals = rb.response.Vals[:len(rb.response.Vals)-1]
   289  		}
   290  		hasGap = false
   291  		// all the key/vals in the segment are valid, but possibly shorten segmentEnd
   292  		// here to respect limit. this is necessary in case the number of leafs we read
   293  		// from the trie is more than the length of a segment which cannot be validated. limit
   294  		segmentEnd = math.Min(segmentEnd, i+int(rb.limit)-len(rb.response.Keys))
   295  		rb.response.Keys = append(rb.response.Keys, snapKeys[i:segmentEnd]...)
   296  		rb.response.Vals = append(rb.response.Vals, snapVals[i:segmentEnd]...)
   297  
   298  		if len(rb.response.Keys) >= int(rb.limit) {
   299  			break
   300  		}
   301  	}
   302  	return false, nil
   303  }
   304  
   305  // generateRangeProof returns a range proof for the range specified by [start] and [keys] using [t].
   306  func (rb *responseBuilder) generateRangeProof(start []byte, keys [][]byte) (*memorydb.Database, error) {
   307  	proof := memorydb.New()
   308  	startTime := time.Now()
   309  	defer func() { rb.proofTime += time.Since(startTime) }()
   310  
   311  	// If [start] is empty, populate it with the appropriate length key starting at 0.
   312  	if len(start) == 0 {
   313  		start = bytes.Repeat([]byte{0x00}, rb.keyLength)
   314  	}
   315  
   316  	if err := rb.t.Prove(start, 0, proof); err != nil {
   317  		_ = proof.Close() // closing memdb does not error
   318  		return nil, err
   319  	}
   320  	if len(keys) > 0 {
   321  		// If there is a non-zero number of keys, set [end] for the range proof to the last key.
   322  		end := keys[len(keys)-1]
   323  		if err := rb.t.Prove(end, 0, proof); err != nil {
   324  			_ = proof.Close() // closing memdb does not error
   325  			return nil, err
   326  		}
   327  	}
   328  	return proof, nil
   329  }
   330  
   331  // verifyRangeProof verifies the provided range proof with [keys/vals], starting at [start].
   332  // Returns a boolean indicating if there are more leaves to the right of the last key in the trie and a nil error if the range proof is successfully verified.
   333  func (rb *responseBuilder) verifyRangeProof(keys, vals [][]byte, start []byte, proof *memorydb.Database) (bool, error) {
   334  	startTime := time.Now()
   335  	defer func() { rb.proofTime += time.Since(startTime) }()
   336  
   337  	// If [start] is empty, populate it with the appropriate length key starting at 0.
   338  	if len(start) == 0 {
   339  		start = bytes.Repeat([]byte{0x00}, rb.keyLength)
   340  	}
   341  	var end []byte
   342  	if len(keys) > 0 {
   343  		end = keys[len(keys)-1]
   344  	}
   345  	return trie.VerifyRangeProof(rb.request.Root, start, end, keys, vals, proof)
   346  }
   347  
   348  // iterateVals returns the values contained in [db]
   349  func iterateVals(db *memorydb.Database) ([][]byte, error) {
   350  	if db == nil {
   351  		return nil, nil
   352  	}
   353  	// iterate db into [][]byte and return
   354  	it := db.NewIterator(nil, nil)
   355  	defer it.Release()
   356  
   357  	vals := make([][]byte, 0, db.Len())
   358  	for it.Next() {
   359  		vals = append(vals, it.Value())
   360  	}
   361  
   362  	return vals, it.Error()
   363  }
   364  
   365  // isRangeValid generates and verifies a range proof, returning true if keys/vals are
   366  // part of the trie. If [hasGap] is true, the range is validated independent of the
   367  // existing response. If [hasGap] is false, the range proof begins at a key which
   368  // guarantees the range can be appended to the response.
   369  // Additionally returns a boolean indicating if there are more leaves in the trie.
   370  func (rb *responseBuilder) isRangeValid(keys, vals [][]byte, hasGap bool) (*memorydb.Database, bool, bool, error) {
   371  	var startKey []byte
   372  	if hasGap {
   373  		startKey = keys[0]
   374  	} else {
   375  		startKey = rb.nextKey()
   376  	}
   377  
   378  	proof, err := rb.generateRangeProof(startKey, keys)
   379  	if err != nil {
   380  		return nil, false, false, err
   381  	}
   382  	more, proofErr := rb.verifyRangeProof(keys, vals, startKey, proof)
   383  	return proof, proofErr == nil, more, nil
   384  }
   385  
   386  // nextKey returns the nextKey that could potentially be part of the response.
   387  func (rb *responseBuilder) nextKey() []byte {
   388  	if len(rb.response.Keys) == 0 {
   389  		return rb.request.Start
   390  	}
   391  	nextKey := common.CopyBytes(rb.response.Keys[len(rb.response.Keys)-1])
   392  	utils.IncrOne(nextKey)
   393  	return nextKey
   394  }
   395  
   396  // fillFromTrie iterates key/values from the response builder's trie and appends
   397  // them to the response. Iteration begins from the last key already in the response,
   398  // or the request start if the response is empty. Iteration ends at [end] or if
   399  // the number of leafs reaches the builder's limit.
   400  // Returns true if there are more keys in the trie.
   401  func (rb *responseBuilder) fillFromTrie(ctx context.Context, end []byte) (bool, error) {
   402  	startTime := time.Now()
   403  	defer func() { rb.trieReadTime += time.Since(startTime) }()
   404  
   405  	// create iterator to iterate the trie
   406  	it := trie.NewIterator(rb.t.NodeIterator(rb.nextKey()))
   407  	more := false
   408  	for it.Next() {
   409  		// if we're at the end, break this loop
   410  		if len(end) > 0 && bytes.Compare(it.Key, end) > 0 {
   411  			more = true
   412  			break
   413  		}
   414  
   415  		// If we've returned enough data or run out of time, set the more flag and exit
   416  		// this flag will determine if the proof is generated or not
   417  		if len(rb.response.Keys) >= int(rb.limit) || ctx.Err() != nil {
   418  			more = true
   419  			break
   420  		}
   421  
   422  		// append key/vals to the response
   423  		rb.response.Keys = append(rb.response.Keys, it.Key)
   424  		rb.response.Vals = append(rb.response.Vals, it.Value)
   425  	}
   426  	return more, it.Err
   427  }
   428  
   429  // readLeafsFromSnapshot iterates the storage snapshot of the requested account
   430  // (or the main account trie if account is empty). Returns up to [rb.limit] key/value
   431  // pairs for keys that are in the request's range (inclusive).
   432  func (rb *responseBuilder) readLeafsFromSnapshot(ctx context.Context) ([][]byte, [][]byte, error) {
   433  	var (
   434  		snapIt    ethdb.Iterator
   435  		startHash = common.BytesToHash(rb.request.Start)
   436  		keys      = make([][]byte, 0, rb.limit)
   437  		vals      = make([][]byte, 0, rb.limit)
   438  	)
   439  
   440  	// Get an iterator into the storage or the main account snapshot.
   441  	if rb.request.Account == (common.Hash{}) {
   442  		snapIt = &syncutils.AccountIterator{AccountIterator: rb.snap.DiskAccountIterator(startHash)}
   443  	} else {
   444  		snapIt = &syncutils.StorageIterator{StorageIterator: rb.snap.DiskStorageIterator(rb.request.Account, startHash)}
   445  	}
   446  	defer snapIt.Release()
   447  	for snapIt.Next() {
   448  		// if we're at the end, break this loop
   449  		if len(rb.request.End) > 0 && bytes.Compare(snapIt.Key(), rb.request.End) > 0 {
   450  			break
   451  		}
   452  		// If we've returned enough data or run out of time, set the more flag and exit
   453  		// this flag will determine if the proof is generated or not
   454  		if len(keys) >= int(rb.limit) || ctx.Err() != nil {
   455  			break
   456  		}
   457  
   458  		keys = append(keys, snapIt.Key())
   459  		vals = append(vals, snapIt.Value())
   460  	}
   461  	return keys, vals, snapIt.Error()
   462  }