github.com/theQRL/go-zond@v0.1.1/core/forkid/forkid.go (about)

     1  // Copyright 2019 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package forkid implements EIP-2124 (https://eips.ethereum.org/EIPS/eip-2124).
    18  package forkid
    19  
    20  import (
    21  	"encoding/binary"
    22  	"errors"
    23  	"hash/crc32"
    24  	"math"
    25  	"math/big"
    26  	"reflect"
    27  	"strings"
    28  
    29  	"github.com/theQRL/go-zond/core/types"
    30  	"github.com/theQRL/go-zond/log"
    31  	"github.com/theQRL/go-zond/params"
    32  	"golang.org/x/exp/slices"
    33  )
    34  
    35  var (
    36  	// ErrRemoteStale is returned by the validator if a remote fork checksum is a
    37  	// subset of our already applied forks, but the announced next fork block is
    38  	// not on our already passed chain.
    39  	ErrRemoteStale = errors.New("remote needs update")
    40  
    41  	// ErrLocalIncompatibleOrStale is returned by the validator if a remote fork
    42  	// checksum does not match any local checksum variation, signalling that the
    43  	// two chains have diverged in the past at some point (possibly at genesis).
    44  	ErrLocalIncompatibleOrStale = errors.New("local incompatible or needs update")
    45  )
    46  
    47  // timestampThreshold is the Ethereum mainnet genesis timestamp. It is used to
    48  // differentiate if a forkid.next field is a block number or a timestamp. Whilst
    49  // very hacky, something's needed to split the validation during the transition
    50  // period (block forks -> time forks).
    51  const timestampThreshold = 1438269973
    52  
    53  // Blockchain defines all necessary method to build a forkID.
    54  type Blockchain interface {
    55  	// Config retrieves the chain's fork configuration.
    56  	Config() *params.ChainConfig
    57  
    58  	// Genesis retrieves the chain's genesis block.
    59  	Genesis() *types.Block
    60  
    61  	// CurrentHeader retrieves the current head header of the canonical chain.
    62  	CurrentHeader() *types.Header
    63  }
    64  
    65  // ID is a fork identifier as defined by EIP-2124.
    66  type ID struct {
    67  	Hash [4]byte // CRC32 checksum of the genesis block and passed fork block numbers
    68  	Next uint64  // Block number of the next upcoming fork, or 0 if no forks are known
    69  }
    70  
    71  // Filter is a fork id filter to validate a remotely advertised ID.
    72  type Filter func(id ID) error
    73  
    74  // NewID calculates the Ethereum fork ID from the chain config, genesis hash, head and time.
    75  func NewID(config *params.ChainConfig, genesis *types.Block, head, time uint64) ID {
    76  	// Calculate the starting checksum from the genesis hash
    77  	hash := crc32.ChecksumIEEE(genesis.Hash().Bytes())
    78  
    79  	// Calculate the current fork checksum and the next fork block
    80  	forksByBlock, forksByTime := gatherForks(config, genesis.Time())
    81  	for _, fork := range forksByBlock {
    82  		if fork <= head {
    83  			// Fork already passed, checksum the previous hash and the fork number
    84  			hash = checksumUpdate(hash, fork)
    85  			continue
    86  		}
    87  		return ID{Hash: checksumToBytes(hash), Next: fork}
    88  	}
    89  	for _, fork := range forksByTime {
    90  		if fork <= time {
    91  			// Fork already passed, checksum the previous hash and fork timestamp
    92  			hash = checksumUpdate(hash, fork)
    93  			continue
    94  		}
    95  		return ID{Hash: checksumToBytes(hash), Next: fork}
    96  	}
    97  	return ID{Hash: checksumToBytes(hash), Next: 0}
    98  }
    99  
   100  // NewIDWithChain calculates the Ethereum fork ID from an existing chain instance.
   101  func NewIDWithChain(chain Blockchain) ID {
   102  	head := chain.CurrentHeader()
   103  
   104  	return NewID(
   105  		chain.Config(),
   106  		chain.Genesis(),
   107  		head.Number.Uint64(),
   108  		head.Time,
   109  	)
   110  }
   111  
   112  // NewFilter creates a filter that returns if a fork ID should be rejected or not
   113  // based on the local chain's status.
   114  func NewFilter(chain Blockchain) Filter {
   115  	return newFilter(
   116  		chain.Config(),
   117  		chain.Genesis(),
   118  		func() (uint64, uint64) {
   119  			head := chain.CurrentHeader()
   120  			return head.Number.Uint64(), head.Time
   121  		},
   122  	)
   123  }
   124  
   125  // NewStaticFilter creates a filter at block zero.
   126  func NewStaticFilter(config *params.ChainConfig, genesis *types.Block) Filter {
   127  	head := func() (uint64, uint64) { return 0, 0 }
   128  	return newFilter(config, genesis, head)
   129  }
   130  
   131  // newFilter is the internal version of NewFilter, taking closures as its arguments
   132  // instead of a chain. The reason is to allow testing it without having to simulate
   133  // an entire blockchain.
   134  func newFilter(config *params.ChainConfig, genesis *types.Block, headfn func() (uint64, uint64)) Filter {
   135  	// Calculate the all the valid fork hash and fork next combos
   136  	var (
   137  		forksByBlock, forksByTime = gatherForks(config, genesis.Time())
   138  		forks                     = append(append([]uint64{}, forksByBlock...), forksByTime...)
   139  		sums                      = make([][4]byte, len(forks)+1) // 0th is the genesis
   140  	)
   141  	hash := crc32.ChecksumIEEE(genesis.Hash().Bytes())
   142  	sums[0] = checksumToBytes(hash)
   143  	for i, fork := range forks {
   144  		hash = checksumUpdate(hash, fork)
   145  		sums[i+1] = checksumToBytes(hash)
   146  	}
   147  	// Add two sentries to simplify the fork checks and don't require special
   148  	// casing the last one.
   149  	forks = append(forks, math.MaxUint64) // Last fork will never be passed
   150  	if len(forksByTime) == 0 {
   151  		// In purely block based forks, avoid the sentry spilling into timestapt territory
   152  		forksByBlock = append(forksByBlock, math.MaxUint64) // Last fork will never be passed
   153  	}
   154  	// Create a validator that will filter out incompatible chains
   155  	return func(id ID) error {
   156  		// Run the fork checksum validation ruleset:
   157  		//   1. If local and remote FORK_CSUM matches, compare local head to FORK_NEXT.
   158  		//        The two nodes are in the same fork state currently. They might know
   159  		//        of differing future forks, but that's not relevant until the fork
   160  		//        triggers (might be postponed, nodes might be updated to match).
   161  		//      1a. A remotely announced but remotely not passed block is already passed
   162  		//          locally, disconnect, since the chains are incompatible.
   163  		//      1b. No remotely announced fork; or not yet passed locally, connect.
   164  		//   2. If the remote FORK_CSUM is a subset of the local past forks and the
   165  		//      remote FORK_NEXT matches with the locally following fork block number,
   166  		//      connect.
   167  		//        Remote node is currently syncing. It might eventually diverge from
   168  		//        us, but at this current point in time we don't have enough information.
   169  		//   3. If the remote FORK_CSUM is a superset of the local past forks and can
   170  		//      be completed with locally known future forks, connect.
   171  		//        Local node is currently syncing. It might eventually diverge from
   172  		//        the remote, but at this current point in time we don't have enough
   173  		//        information.
   174  		//   4. Reject in all other cases.
   175  		block, time := headfn()
   176  		for i, fork := range forks {
   177  			// Pick the head comparison based on fork progression
   178  			head := block
   179  			if i >= len(forksByBlock) {
   180  				head = time
   181  			}
   182  			// If our head is beyond this fork, continue to the next (we have a dummy
   183  			// fork of maxuint64 as the last item to always fail this check eventually).
   184  			if head >= fork {
   185  				continue
   186  			}
   187  			// Found the first unpassed fork block, check if our current state matches
   188  			// the remote checksum (rule #1).
   189  			if sums[i] == id.Hash {
   190  				// Fork checksum matched, check if a remote future fork block already passed
   191  				// locally without the local node being aware of it (rule #1a).
   192  				if id.Next > 0 && (head >= id.Next || (id.Next > timestampThreshold && time >= id.Next)) {
   193  					return ErrLocalIncompatibleOrStale
   194  				}
   195  				// Haven't passed locally a remote-only fork, accept the connection (rule #1b).
   196  				return nil
   197  			}
   198  			// The local and remote nodes are in different forks currently, check if the
   199  			// remote checksum is a subset of our local forks (rule #2).
   200  			for j := 0; j < i; j++ {
   201  				if sums[j] == id.Hash {
   202  					// Remote checksum is a subset, validate based on the announced next fork
   203  					if forks[j] != id.Next {
   204  						return ErrRemoteStale
   205  					}
   206  					return nil
   207  				}
   208  			}
   209  			// Remote chain is not a subset of our local one, check if it's a superset by
   210  			// any chance, signalling that we're simply out of sync (rule #3).
   211  			for j := i + 1; j < len(sums); j++ {
   212  				if sums[j] == id.Hash {
   213  					// Yay, remote checksum is a superset, ignore upcoming forks
   214  					return nil
   215  				}
   216  			}
   217  			// No exact, subset or superset match. We are on differing chains, reject.
   218  			return ErrLocalIncompatibleOrStale
   219  		}
   220  		log.Error("Impossible fork ID validation", "id", id)
   221  		return nil // Something's very wrong, accept rather than reject
   222  	}
   223  }
   224  
   225  // checksumUpdate calculates the next IEEE CRC32 checksum based on the previous
   226  // one and a fork block number (equivalent to CRC32(original-blob || fork)).
   227  func checksumUpdate(hash uint32, fork uint64) uint32 {
   228  	var blob [8]byte
   229  	binary.BigEndian.PutUint64(blob[:], fork)
   230  	return crc32.Update(hash, crc32.IEEETable, blob[:])
   231  }
   232  
   233  // checksumToBytes converts a uint32 checksum into a [4]byte array.
   234  func checksumToBytes(hash uint32) [4]byte {
   235  	var blob [4]byte
   236  	binary.BigEndian.PutUint32(blob[:], hash)
   237  	return blob
   238  }
   239  
   240  // gatherForks gathers all the known forks and creates two sorted lists out of
   241  // them, one for the block number based forks and the second for the timestamps.
   242  func gatherForks(config *params.ChainConfig, genesis uint64) ([]uint64, []uint64) {
   243  	// Gather all the fork block numbers via reflection
   244  	kind := reflect.TypeOf(params.ChainConfig{})
   245  	conf := reflect.ValueOf(config).Elem()
   246  	x := uint64(0)
   247  	var (
   248  		forksByBlock []uint64
   249  		forksByTime  []uint64
   250  	)
   251  	for i := 0; i < kind.NumField(); i++ {
   252  		// Fetch the next field and skip non-fork rules
   253  		field := kind.Field(i)
   254  
   255  		time := strings.HasSuffix(field.Name, "Time")
   256  		if !time && !strings.HasSuffix(field.Name, "Block") {
   257  			continue
   258  		}
   259  
   260  		// Extract the fork rule block number or timestamp and aggregate it
   261  		if field.Type == reflect.TypeOf(&x) {
   262  			if rule := conf.Field(i).Interface().(*uint64); rule != nil {
   263  				forksByTime = append(forksByTime, *rule)
   264  			}
   265  		}
   266  		if field.Type == reflect.TypeOf(new(big.Int)) {
   267  			if rule := conf.Field(i).Interface().(*big.Int); rule != nil {
   268  				forksByBlock = append(forksByBlock, rule.Uint64())
   269  			}
   270  		}
   271  	}
   272  	slices.Sort(forksByBlock)
   273  	slices.Sort(forksByTime)
   274  
   275  	// Deduplicate fork identifiers applying multiple forks
   276  	for i := 1; i < len(forksByBlock); i++ {
   277  		if forksByBlock[i] == forksByBlock[i-1] {
   278  			forksByBlock = append(forksByBlock[:i], forksByBlock[i+1:]...)
   279  			i--
   280  		}
   281  	}
   282  	for i := 1; i < len(forksByTime); i++ {
   283  		if forksByTime[i] == forksByTime[i-1] {
   284  			forksByTime = append(forksByTime[:i], forksByTime[i+1:]...)
   285  			i--
   286  		}
   287  	}
   288  	// Skip any forks in block 0, that's the genesis ruleset
   289  	if len(forksByBlock) > 0 && forksByBlock[0] == 0 {
   290  		forksByBlock = forksByBlock[1:]
   291  	}
   292  	// Skip any forks before genesis.
   293  	for len(forksByTime) > 0 && forksByTime[0] <= genesis {
   294  		forksByTime = forksByTime[1:]
   295  	}
   296  	return forksByBlock, forksByTime
   297  }