github.com/jimmyx0x/go-ethereum@v1.10.28/core/forkid/forkid.go (about) 1 // Copyright 2019 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package forkid implements EIP-2124 (https://eips.ethereum.org/EIPS/eip-2124). 18 package forkid 19 20 import ( 21 "encoding/binary" 22 "errors" 23 "hash/crc32" 24 "math" 25 "math/big" 26 "reflect" 27 "sort" 28 "strings" 29 30 "github.com/ethereum/go-ethereum/common" 31 "github.com/ethereum/go-ethereum/core/types" 32 "github.com/ethereum/go-ethereum/log" 33 "github.com/ethereum/go-ethereum/params" 34 ) 35 36 var ( 37 // ErrRemoteStale is returned by the validator if a remote fork checksum is a 38 // subset of our already applied forks, but the announced next fork block is 39 // not on our already passed chain. 40 ErrRemoteStale = errors.New("remote needs update") 41 42 // ErrLocalIncompatibleOrStale is returned by the validator if a remote fork 43 // checksum does not match any local checksum variation, signalling that the 44 // two chains have diverged in the past at some point (possibly at genesis). 45 ErrLocalIncompatibleOrStale = errors.New("local incompatible or needs update") 46 ) 47 48 // timestampThreshold is the Ethereum mainnet genesis timestamp. It is used to 49 // differentiate if a forkid.next field is a block number or a timestamp. Whilst 50 // very hacky, something's needed to split the validation during the transition 51 // period (block forks -> time forks). 52 const timestampThreshold = 1438269973 53 54 // Blockchain defines all necessary method to build a forkID. 55 type Blockchain interface { 56 // Config retrieves the chain's fork configuration. 57 Config() *params.ChainConfig 58 59 // Genesis retrieves the chain's genesis block. 60 Genesis() *types.Block 61 62 // CurrentHeader retrieves the current head header of the canonical chain. 63 CurrentHeader() *types.Header 64 } 65 66 // ID is a fork identifier as defined by EIP-2124. 67 type ID struct { 68 Hash [4]byte // CRC32 checksum of the genesis block and passed fork block numbers 69 Next uint64 // Block number of the next upcoming fork, or 0 if no forks are known 70 } 71 72 // Filter is a fork id filter to validate a remotely advertised ID. 73 type Filter func(id ID) error 74 75 // NewID calculates the Ethereum fork ID from the chain config, genesis hash, head and time. 76 func NewID(config *params.ChainConfig, genesis common.Hash, head, time uint64) ID { 77 // Calculate the starting checksum from the genesis hash 78 hash := crc32.ChecksumIEEE(genesis[:]) 79 80 // Calculate the current fork checksum and the next fork block 81 forksByBlock, forksByTime := gatherForks(config) 82 for _, fork := range forksByBlock { 83 if fork <= head { 84 // Fork already passed, checksum the previous hash and the fork number 85 hash = checksumUpdate(hash, fork) 86 continue 87 } 88 return ID{Hash: checksumToBytes(hash), Next: fork} 89 } 90 for _, fork := range forksByTime { 91 if fork <= time { 92 // Fork already passed, checksum the previous hash and fork timestamp 93 hash = checksumUpdate(hash, fork) 94 continue 95 } 96 return ID{Hash: checksumToBytes(hash), Next: fork} 97 } 98 return ID{Hash: checksumToBytes(hash), Next: 0} 99 } 100 101 // NewIDWithChain calculates the Ethereum fork ID from an existing chain instance. 102 func NewIDWithChain(chain Blockchain) ID { 103 head := chain.CurrentHeader() 104 105 return NewID( 106 chain.Config(), 107 chain.Genesis().Hash(), 108 head.Number.Uint64(), 109 head.Time, 110 ) 111 } 112 113 // NewFilter creates a filter that returns if a fork ID should be rejected or not 114 // based on the local chain's status. 115 func NewFilter(chain Blockchain) Filter { 116 return newFilter( 117 chain.Config(), 118 chain.Genesis().Hash(), 119 func() (uint64, uint64) { 120 head := chain.CurrentHeader() 121 return head.Number.Uint64(), head.Time 122 }, 123 ) 124 } 125 126 // NewStaticFilter creates a filter at block zero. 127 func NewStaticFilter(config *params.ChainConfig, genesis common.Hash) Filter { 128 head := func() (uint64, uint64) { return 0, 0 } 129 return newFilter(config, genesis, head) 130 } 131 132 // newFilter is the internal version of NewFilter, taking closures as its arguments 133 // instead of a chain. The reason is to allow testing it without having to simulate 134 // an entire blockchain. 135 func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() (uint64, uint64)) Filter { 136 // Calculate the all the valid fork hash and fork next combos 137 var ( 138 forksByBlock, forksByTime = gatherForks(config) 139 forks = append(append([]uint64{}, forksByBlock...), forksByTime...) 140 sums = make([][4]byte, len(forks)+1) // 0th is the genesis 141 ) 142 hash := crc32.ChecksumIEEE(genesis[:]) 143 sums[0] = checksumToBytes(hash) 144 for i, fork := range forks { 145 hash = checksumUpdate(hash, fork) 146 sums[i+1] = checksumToBytes(hash) 147 } 148 // Add two sentries to simplify the fork checks and don't require special 149 // casing the last one. 150 forks = append(forks, math.MaxUint64) // Last fork will never be passed 151 if len(forksByTime) == 0 { 152 // In purely block based forks, avoid the sentry spilling into timestapt territory 153 forksByBlock = append(forksByBlock, math.MaxUint64) // Last fork will never be passed 154 } 155 // Create a validator that will filter out incompatible chains 156 return func(id ID) error { 157 // Run the fork checksum validation ruleset: 158 // 1. If local and remote FORK_CSUM matches, compare local head to FORK_NEXT. 159 // The two nodes are in the same fork state currently. They might know 160 // of differing future forks, but that's not relevant until the fork 161 // triggers (might be postponed, nodes might be updated to match). 162 // 1a. A remotely announced but remotely not passed block is already passed 163 // locally, disconnect, since the chains are incompatible. 164 // 1b. No remotely announced fork; or not yet passed locally, connect. 165 // 2. If the remote FORK_CSUM is a subset of the local past forks and the 166 // remote FORK_NEXT matches with the locally following fork block number, 167 // connect. 168 // Remote node is currently syncing. It might eventually diverge from 169 // us, but at this current point in time we don't have enough information. 170 // 3. If the remote FORK_CSUM is a superset of the local past forks and can 171 // be completed with locally known future forks, connect. 172 // Local node is currently syncing. It might eventually diverge from 173 // the remote, but at this current point in time we don't have enough 174 // information. 175 // 4. Reject in all other cases. 176 block, time := headfn() 177 for i, fork := range forks { 178 // Pick the head comparison based on fork progression 179 head := block 180 if i >= len(forksByBlock) { 181 head = time 182 } 183 // If our head is beyond this fork, continue to the next (we have a dummy 184 // fork of maxuint64 as the last item to always fail this check eventually). 185 if head >= fork { 186 continue 187 } 188 // Found the first unpassed fork block, check if our current state matches 189 // the remote checksum (rule #1). 190 if sums[i] == id.Hash { 191 // Fork checksum matched, check if a remote future fork block already passed 192 // locally without the local node being aware of it (rule #1a). 193 if id.Next > 0 && (head >= id.Next || (id.Next > timestampThreshold && time >= id.Next)) { 194 return ErrLocalIncompatibleOrStale 195 } 196 // Haven't passed locally a remote-only fork, accept the connection (rule #1b). 197 return nil 198 } 199 // The local and remote nodes are in different forks currently, check if the 200 // remote checksum is a subset of our local forks (rule #2). 201 for j := 0; j < i; j++ { 202 if sums[j] == id.Hash { 203 // Remote checksum is a subset, validate based on the announced next fork 204 if forks[j] != id.Next { 205 return ErrRemoteStale 206 } 207 return nil 208 } 209 } 210 // Remote chain is not a subset of our local one, check if it's a superset by 211 // any chance, signalling that we're simply out of sync (rule #3). 212 for j := i + 1; j < len(sums); j++ { 213 if sums[j] == id.Hash { 214 // Yay, remote checksum is a superset, ignore upcoming forks 215 return nil 216 } 217 } 218 // No exact, subset or superset match. We are on differing chains, reject. 219 return ErrLocalIncompatibleOrStale 220 } 221 log.Error("Impossible fork ID validation", "id", id) 222 return nil // Something's very wrong, accept rather than reject 223 } 224 } 225 226 // checksumUpdate calculates the next IEEE CRC32 checksum based on the previous 227 // one and a fork block number (equivalent to CRC32(original-blob || fork)). 228 func checksumUpdate(hash uint32, fork uint64) uint32 { 229 var blob [8]byte 230 binary.BigEndian.PutUint64(blob[:], fork) 231 return crc32.Update(hash, crc32.IEEETable, blob[:]) 232 } 233 234 // checksumToBytes converts a uint32 checksum into a [4]byte array. 235 func checksumToBytes(hash uint32) [4]byte { 236 var blob [4]byte 237 binary.BigEndian.PutUint32(blob[:], hash) 238 return blob 239 } 240 241 // gatherForks gathers all the known forks and creates two sorted lists out of 242 // them, one for the block number based forks and the second for the timestamps. 243 func gatherForks(config *params.ChainConfig) ([]uint64, []uint64) { 244 // Gather all the fork block numbers via reflection 245 kind := reflect.TypeOf(params.ChainConfig{}) 246 conf := reflect.ValueOf(config).Elem() 247 248 var ( 249 forksByBlock []uint64 250 forksByTime []uint64 251 ) 252 for i := 0; i < kind.NumField(); i++ { 253 // Fetch the next field and skip non-fork rules 254 field := kind.Field(i) 255 256 time := strings.HasSuffix(field.Name, "Time") 257 if !time && !strings.HasSuffix(field.Name, "Block") { 258 continue 259 } 260 if field.Type != reflect.TypeOf(new(big.Int)) { 261 continue 262 } 263 // Extract the fork rule block number or timestamp and aggregate it 264 rule := conf.Field(i).Interface().(*big.Int) 265 if rule != nil { 266 if time { 267 forksByTime = append(forksByTime, rule.Uint64()) 268 } else { 269 forksByBlock = append(forksByBlock, rule.Uint64()) 270 } 271 } 272 } 273 sort.Slice(forksByBlock, func(i, j int) bool { return forksByBlock[i] < forksByBlock[j] }) 274 sort.Slice(forksByTime, func(i, j int) bool { return forksByTime[i] < forksByTime[j] }) 275 276 // Deduplicate fork identifiers applying multiple forks 277 for i := 1; i < len(forksByBlock); i++ { 278 if forksByBlock[i] == forksByBlock[i-1] { 279 forksByBlock = append(forksByBlock[:i], forksByBlock[i+1:]...) 280 i-- 281 } 282 } 283 for i := 1; i < len(forksByTime); i++ { 284 if forksByTime[i] == forksByTime[i-1] { 285 forksByTime = append(forksByTime[:i], forksByTime[i+1:]...) 286 i-- 287 } 288 } 289 // Skip any forks in block 0, that's the genesis ruleset 290 if len(forksByBlock) > 0 && forksByBlock[0] == 0 { 291 forksByBlock = forksByBlock[1:] 292 } 293 if len(forksByTime) > 0 && forksByTime[0] == 0 { 294 forksByTime = forksByTime[1:] 295 } 296 return forksByBlock, forksByTime 297 }