github.com/palisadeinc/bor@v0.0.0-20230615125219-ab7196213d15/tests/bor/bor_sprint_length_change_test.go (about) 1 package bor 2 3 import ( 4 "crypto/ecdsa" 5 "encoding/csv" 6 "encoding/json" 7 "fmt" 8 "io/ioutil" // nolint: staticcheck 9 _log "log" 10 "math/big" 11 "os" 12 "sync" 13 "testing" 14 "time" 15 16 "gotest.tools/assert" 17 18 "github.com/ethereum/go-ethereum/accounts/keystore" 19 "github.com/ethereum/go-ethereum/common" 20 "github.com/ethereum/go-ethereum/common/fdlimit" 21 "github.com/ethereum/go-ethereum/core" 22 "github.com/ethereum/go-ethereum/crypto" 23 "github.com/ethereum/go-ethereum/eth" 24 "github.com/ethereum/go-ethereum/eth/downloader" 25 "github.com/ethereum/go-ethereum/eth/ethconfig" 26 "github.com/ethereum/go-ethereum/log" 27 "github.com/ethereum/go-ethereum/miner" 28 "github.com/ethereum/go-ethereum/node" 29 "github.com/ethereum/go-ethereum/p2p" 30 "github.com/ethereum/go-ethereum/p2p/enode" 31 "github.com/ethereum/go-ethereum/params" 32 ) 33 34 var ( 35 36 // Only this account is a validator for the 0th span 37 keySprintLength, _ = crypto.HexToECDSA(privKeySprintLength) 38 39 // This account is one the validators for 1st span (0-indexed) 40 keySprintLength2, _ = crypto.HexToECDSA(privKeySprintLength2) 41 42 keysSprintLength = []*ecdsa.PrivateKey{keySprintLength, keySprintLength2} 43 ) 44 45 const ( 46 privKeySprintLength = "b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291" 47 privKeySprintLength2 = "9b28f36fbd67381120752d6172ecdcf10e06ab2d9a1367aac00cdcd6ac7855d3" 48 ) 49 50 // Sprint length change tests 51 func TestValidatorsBlockProduction(t *testing.T) { 52 t.Parallel() 53 54 log.Root().SetHandler(log.LvlFilterHandler(3, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) 55 56 _, err := fdlimit.Raise(2048) 57 58 if err != nil { 59 panic(err) 60 } 61 62 // Generate a batch of accounts to seal and fund with 63 faucets := make([]*ecdsa.PrivateKey, 128) 64 for i := 0; i < len(faucets); i++ { 65 faucets[i], _ = crypto.GenerateKey() 66 } 67 68 // Create an Ethash network based off of the Ropsten config 69 // Generate a batch of accounts to seal and fund with 70 genesis := InitGenesisSprintLength(t, faucets, "./testdata/genesis_sprint_length_change.json", 8) 71 72 nodes := make([]*eth.Ethereum, 2) 73 enodes := make([]*enode.Node, 2) 74 75 for i := 0; i < 2; i++ { 76 // Start the node and wait until it's up 77 stack, ethBackend, err := InitMinerSprintLength(genesis, keysSprintLength[i], true) 78 if err != nil { 79 panic(err) 80 } 81 defer stack.Close() 82 83 for stack.Server().NodeInfo().Ports.Listener == 0 { 84 time.Sleep(250 * time.Millisecond) 85 } 86 // Connect the node to all the previous ones 87 for j, n := range enodes { 88 if j < i { 89 stack.Server().AddPeer(n) 90 } 91 } 92 // Start tracking the node and its enode 93 nodes[i] = ethBackend 94 enodes[i] = stack.Server().Self() 95 } 96 97 // Iterate over all the nodes and start mining 98 time.Sleep(3 * time.Second) 99 100 for _, node := range nodes { 101 if err := node.StartMining(1); err != nil { 102 panic(err) 103 } 104 } 105 106 for { 107 // for block 0 to 7, the primary validator is node0 108 // for block 8 to 15, the primary validator is node1 109 // for block 16 to 19, the primary validator is node0 110 // for block 20 to 23, the primary validator is node1 111 blockHeaderVal0 := nodes[0].BlockChain().CurrentHeader() 112 113 if blockHeaderVal0.Number.Uint64() == 24 { 114 break 115 } 116 } 117 118 // check block 7 miner ; expected author is node0 signer 119 blockHeaderVal0 := nodes[0].BlockChain().GetHeaderByNumber(7) 120 blockHeaderVal1 := nodes[1].BlockChain().GetHeaderByNumber(7) 121 authorVal0, err := nodes[0].Engine().Author(blockHeaderVal0) 122 123 if err != nil { 124 log.Error("Error in getting author", "err", err) 125 } 126 127 authorVal1, err := nodes[1].Engine().Author(blockHeaderVal1) 128 129 if err != nil { 130 log.Error("Error in getting author", "err", err) 131 } 132 133 // check both nodes have the same block 7 134 assert.Equal(t, authorVal0, authorVal1) 135 136 // check block mined by node0 137 assert.Equal(t, authorVal0, nodes[0].AccountManager().Accounts()[0]) 138 139 // check block 15 miner ; expected author is node1 signer 140 blockHeaderVal0 = nodes[0].BlockChain().GetHeaderByNumber(15) 141 blockHeaderVal1 = nodes[1].BlockChain().GetHeaderByNumber(15) 142 authorVal0, err = nodes[0].Engine().Author(blockHeaderVal0) 143 144 if err != nil { 145 log.Error("Error in getting author", "err", err) 146 } 147 148 authorVal1, err = nodes[1].Engine().Author(blockHeaderVal1) 149 150 if err != nil { 151 log.Error("Error in getting author", "err", err) 152 } 153 154 // check both nodes have the same block 15 155 assert.Equal(t, authorVal0, authorVal1) 156 157 // check block mined by node1 158 assert.Equal(t, authorVal0, nodes[1].AccountManager().Accounts()[0]) 159 160 // check block 19 miner ; expected author is node0 signer 161 blockHeaderVal0 = nodes[0].BlockChain().GetHeaderByNumber(19) 162 blockHeaderVal1 = nodes[1].BlockChain().GetHeaderByNumber(19) 163 authorVal0, err = nodes[0].Engine().Author(blockHeaderVal0) 164 165 if err != nil { 166 log.Error("Error in getting author", "err", err) 167 } 168 169 authorVal1, err = nodes[1].Engine().Author(blockHeaderVal1) 170 171 if err != nil { 172 log.Error("Error in getting author", "err", err) 173 } 174 175 // check both nodes have the same block 19 176 assert.Equal(t, authorVal0, authorVal1) 177 178 // check block mined by node0 179 assert.Equal(t, authorVal0, nodes[0].AccountManager().Accounts()[0]) 180 181 // check block 23 miner ; expected author is node1 signer 182 blockHeaderVal0 = nodes[0].BlockChain().GetHeaderByNumber(23) 183 blockHeaderVal1 = nodes[1].BlockChain().GetHeaderByNumber(23) 184 authorVal0, err = nodes[0].Engine().Author(blockHeaderVal0) 185 186 if err != nil { 187 log.Error("Error in getting author", "err", err) 188 } 189 190 authorVal1, err = nodes[1].Engine().Author(blockHeaderVal1) 191 192 if err != nil { 193 log.Error("Error in getting author", "err", err) 194 } 195 196 // check both nodes have the same block 23 197 assert.Equal(t, authorVal0, authorVal1) 198 199 // check block mined by node1 200 assert.Equal(t, authorVal0, nodes[1].AccountManager().Accounts()[0]) 201 } 202 203 func TestSprintLengths(t *testing.T) { 204 t.Parallel() 205 206 testBorConfig := params.TestChainConfig.Bor 207 testBorConfig.Sprint = map[string]uint64{ 208 "0": 16, 209 "8": 4, 210 } 211 assert.Equal(t, testBorConfig.CalculateSprint(0), uint64(16)) 212 assert.Equal(t, testBorConfig.CalculateSprint(8), uint64(4)) 213 assert.Equal(t, testBorConfig.CalculateSprint(9), uint64(4)) 214 } 215 216 func TestProducerDelay(t *testing.T) { 217 t.Parallel() 218 219 testBorConfig := params.TestChainConfig.Bor 220 testBorConfig.ProducerDelay = map[string]uint64{ 221 "0": 16, 222 "8": 4, 223 } 224 assert.Equal(t, testBorConfig.CalculateProducerDelay(0), uint64(16)) 225 assert.Equal(t, testBorConfig.CalculateProducerDelay(8), uint64(4)) 226 assert.Equal(t, testBorConfig.CalculateProducerDelay(9), uint64(4)) 227 } 228 229 var keys_21val = []map[string]string{ 230 { 231 "address": "0x5C3E1B893B9315a968fcC6bce9EB9F7d8E22edB3", 232 "priv_key": "c19fac8e538447124ad2408d9fbaeda2bb686fee763dca7a6bab58ea12442413", 233 "pub_key": "0x0495421933eda03dcc37f9186c24e255b569513aefae71e96d55d0db3df17502e24e86297b01a167fab9ce1174f06ee3110510ac242e39218bd964de5b345edbd6", 234 }, 235 { 236 "address": "0x73E033779C9030D4528d86FbceF5B02e97488921", 237 "priv_key": "61eb51cf8936309151ab7b931841ea033b6a09931f6a100b464fbbd74f3e0bd7", 238 "pub_key": "0x04f9a5e9bf76b45ac58f1b018ccba4b83b3531010cdadf42174c18a9db9879ef1dcb5d1254ce834bc108b110cd8d0186ed69a0387528a142bdb5936faf58bf98c9", 239 }, 240 { 241 "address": "0x751eC4877450B8a4D652d0D70197337FC38a42e6", 242 "priv_key": "6e7f48d012c9c0baadbdc88af32521e2e477fd6898a9b65e6abe19fd6652cb2e", 243 "pub_key": "0x0479db4c0b757bf0e5d9b8954b078ab7c0e91d6c19697904d23d07ea4853c8584382de91174929ba5c598214b8a991471ae051458ea787cdc15a4e435a55ef8059", 244 }, 245 { 246 "address": "0xA464DC4810Bc79B956810759e314d85BcE35cD1c", 247 "priv_key": "3efcf3f7014a6257f4a443119851414111820c681b27525dab3f35e72e28e51e", 248 "pub_key": "0x040180920306bf598ea050e258f2c7e50804a77a64f5a11705e08d18ee71eb0a80fafc95d0a42b92371ded042edda16c1f0b5f2fef7c4113ad66c59a71c29d977e", 249 }, 250 { 251 "address": "0xb005bc07015170266Bd430f3EC1322938603be20", 252 "priv_key": "17cd9b38c2b3a639c7d97ccbf2bb6c7140ab8f625aec4c249bc8e4cfd3bf9a96", 253 "pub_key": "0x04435a70d343aa569e6f3386c73e39a1aa6f88c30e5943baedda9618b55cc944a2de1d114aff6d0e9fa002bebc780b04ef6c1b8a06bbf0d41c10d1efa55390f198", 254 }, 255 { 256 "address": "0xE8d02Da3dFeeB3e755472D95D666BD6821D92129", 257 "priv_key": "45c9ef66361a2283cef14184f128c41949103b791aa622ead3c0bc844648b835", 258 "pub_key": "0x04a14651ddc80467eb589d72d95153fa695e4cb2e4bb99edeb912e840d309d61313b6f4676081b099f29e6598ecf98cb7b44bb862d019920718b558f27ba94ca51", 259 }, 260 { 261 "address": "0xF93B54Cf36E917f625B48e1e3C9F93BC2344Fb06", 262 "priv_key": "93788a1305605808df1f9a96b5e1157da191680cf08bc15e077138f517563cd5", 263 "pub_key": "0x045eee11dceccd9cccc371ca3d96d74c848e785223f1e5df4d1a7f08efdfeb90bd8f0035342a9c26068cf6c7ab395ca3ceea555541325067fc187c375390efa57d", 264 }, 265 } 266 267 func getTestSprintLengthReorgCases2Nodes() []map[string]interface{} { 268 sprintSizes := []uint64{64} 269 faultyNodes := [][]uint64{{0, 1}, {1, 2}, {0, 2}} 270 reorgsLengthTests := make([]map[string]interface{}, 0) 271 272 for i := uint64(0); i < uint64(len(sprintSizes)); i++ { 273 maxReorgLength := sprintSizes[i] * 4 274 for j := uint64(20); j <= maxReorgLength; j = j + 8 { 275 maxStartBlock := sprintSizes[i] - 1 276 for k := sprintSizes[i] / 2; k <= maxStartBlock; k = k + 4 { 277 for l := uint64(0); l < uint64(len(faultyNodes)); l++ { 278 if j+k < sprintSizes[i] { 279 continue 280 } 281 282 reorgsLengthTest := map[string]interface{}{ 283 "reorgLength": j, 284 "startBlock": k, 285 "sprintSize": sprintSizes[i], 286 "faultyNodes": faultyNodes[l], // node 1(index) is primary validator of the first sprint 287 } 288 reorgsLengthTests = append(reorgsLengthTests, reorgsLengthTest) 289 } 290 } 291 } 292 } 293 // reorgsLengthTests := []map[string]uint64{ 294 // { 295 // "reorgLength": 3, 296 // "startBlock": 7, 297 // "sprintSize": 8, 298 // "faultyNode": 1, 299 // }, 300 // } 301 return reorgsLengthTests 302 } 303 304 func getTestSprintLengthReorgCases() []map[string]uint64 { 305 sprintSizes := []uint64{64, 32, 16, 8} 306 faultyNodes := []uint64{0, 1} 307 reorgsLengthTests := make([]map[string]uint64, 0) 308 309 for i := uint64(0); i < uint64(len(sprintSizes)); i++ { 310 maxReorgLength := sprintSizes[i] * 4 311 for j := uint64(3); j <= maxReorgLength; j = j + 4 { 312 maxStartBlock := sprintSizes[i] - 1 313 for k := sprintSizes[i] / 2; k <= maxStartBlock; k = k + 4 { 314 for l := uint64(0); l < uint64(len(faultyNodes)); l++ { 315 if j+k < sprintSizes[i] { 316 continue 317 } 318 319 reorgsLengthTest := map[string]uint64{ 320 "reorgLength": j, 321 "startBlock": k, 322 "sprintSize": sprintSizes[i], 323 "faultyNode": faultyNodes[l], // node 1(index) is primary validator of the first sprint 324 } 325 reorgsLengthTests = append(reorgsLengthTests, reorgsLengthTest) 326 } 327 } 328 } 329 } 330 // reorgsLengthTests := []map[string]uint64{ 331 // { 332 // "reorgLength": 3, 333 // "startBlock": 7, 334 // "sprintSize": 8, 335 // "faultyNode": 1, 336 // }, 337 // } 338 return reorgsLengthTests 339 } 340 341 func SprintLengthReorgIndividual(t *testing.T, index int, tt map[string]uint64) (uint64, uint64, uint64, uint64, uint64, uint64) { 342 t.Helper() 343 344 log.Warn("Case ----- ", "Index", index, "InducedReorgLength", tt["reorgLength"], "BlockStart", tt["startBlock"], "SprintSize", tt["sprintSize"], "DisconnectedNode", tt["faultyNode"]) 345 observerOldChainLength, faultyOldChainLength := SetupValidatorsAndTest(t, tt) 346 347 if observerOldChainLength > 0 { 348 log.Warn("Observer", "Old Chain length", observerOldChainLength) 349 } 350 351 if faultyOldChainLength > 0 { 352 log.Warn("Faulty", "Old Chain length", faultyOldChainLength) 353 } 354 355 return tt["reorgLength"], tt["startBlock"], tt["sprintSize"], tt["faultyNode"], faultyOldChainLength, observerOldChainLength 356 } 357 358 func SprintLengthReorgIndividual2Nodes(t *testing.T, index int, tt map[string]interface{}) (uint64, uint64, uint64, []uint64, uint64, uint64) { 359 t.Helper() 360 361 log.Warn("Case ----- ", "Index", index, "InducedReorgLength", tt["reorgLength"], "BlockStart", tt["startBlock"], "SprintSize", tt["sprintSize"], "DisconnectedNode", tt["faultyNodes"]) 362 observerOldChainLength, faultyOldChainLength := SetupValidatorsAndTest2Nodes(t, tt) 363 364 if observerOldChainLength > 0 { 365 log.Warn("Observer", "Old Chain length", observerOldChainLength) 366 } 367 368 if faultyOldChainLength > 0 { 369 log.Warn("Faulty", "Old Chain length", faultyOldChainLength) 370 } 371 372 fNodes, _ := tt["faultyNodes"].([]uint64) 373 374 return tt["reorgLength"].(uint64), tt["startBlock"].(uint64), tt["sprintSize"].(uint64), fNodes, faultyOldChainLength, observerOldChainLength 375 } 376 377 func TestSprintLengthReorg2Nodes(t *testing.T) { 378 t.Skip() 379 t.Parallel() 380 381 log.Root().SetHandler(log.LvlFilterHandler(3, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) 382 383 _, err := fdlimit.Raise(2048) 384 385 if err != nil { 386 panic(err) 387 } 388 389 reorgsLengthTests := getTestSprintLengthReorgCases2Nodes() 390 f, err := os.Create("sprintReorg2Nodes.csv") 391 392 defer func() { 393 err = f.Close() 394 395 if err != nil { 396 panic(err) 397 } 398 }() 399 400 if err != nil { 401 _log.Fatalln("failed to open file", err) 402 } 403 404 w := csv.NewWriter(f) 405 err = w.Write([]string{"Induced Reorg Length", "Start Block", "Sprint Size", "Disconnected Node Ids", "Disconnected Node Id's Reorg Length", "Observer Node Id's Reorg Length"}) 406 w.Flush() 407 408 if err != nil { 409 panic(err) 410 } 411 412 var wg sync.WaitGroup 413 414 for index, tt := range reorgsLengthTests { 415 if index%4 == 0 { 416 wg.Wait() 417 } 418 419 wg.Add(1) 420 421 go SprintLengthReorgIndividual2NodesHelper(t, index, tt, w, &wg) 422 } 423 } 424 425 func TestSprintLengthReorg(t *testing.T) { 426 t.Skip() 427 t.Parallel() 428 429 reorgsLengthTests := getTestSprintLengthReorgCases() 430 f, err := os.Create("sprintReorg.csv") 431 432 defer func() { 433 err = f.Close() 434 435 if err != nil { 436 panic(err) 437 } 438 }() 439 440 if err != nil { 441 _log.Fatalln("failed to open file", err) 442 } 443 444 w := csv.NewWriter(f) 445 err = w.Write([]string{"Induced Reorg Length", "Start Block", "Sprint Size", "Disconnected Node Id", "Disconnected Node Id's Reorg Length", "Observer Node Id's Reorg Length"}) 446 w.Flush() 447 448 if err != nil { 449 panic(err) 450 } 451 452 var wg sync.WaitGroup 453 454 for index, tt := range reorgsLengthTests { 455 if index%4 == 0 { 456 wg.Wait() 457 } 458 459 wg.Add(1) 460 461 go SprintLengthReorgIndividualHelper(t, index, tt, w, &wg) 462 } 463 } 464 465 func SprintLengthReorgIndividualHelper(t *testing.T, index int, tt map[string]uint64, w *csv.Writer, wg *sync.WaitGroup) { 466 t.Helper() 467 468 r1, r2, r3, r4, r5, r6 := SprintLengthReorgIndividual(t, index, tt) 469 err := w.Write([]string{fmt.Sprint(r1), fmt.Sprint(r2), fmt.Sprint(r3), fmt.Sprint(r4), fmt.Sprint(r5), fmt.Sprint(r6)}) 470 471 if err != nil { 472 panic(err) 473 } 474 475 w.Flush() 476 (*wg).Done() 477 } 478 479 func SprintLengthReorgIndividual2NodesHelper(t *testing.T, index int, tt map[string]interface{}, w *csv.Writer, wg *sync.WaitGroup) { 480 t.Helper() 481 482 r1, r2, r3, r4, r5, r6 := SprintLengthReorgIndividual2Nodes(t, index, tt) 483 err := w.Write([]string{fmt.Sprint(r1), fmt.Sprint(r2), fmt.Sprint(r3), fmt.Sprint(r4), fmt.Sprint(r5), fmt.Sprint(r6)}) 484 485 if err != nil { 486 panic(err) 487 } 488 489 w.Flush() 490 (*wg).Done() 491 } 492 493 // nolint: gocognit 494 func SetupValidatorsAndTest(t *testing.T, tt map[string]uint64) (uint64, uint64) { 495 t.Helper() 496 497 log.Root().SetHandler(log.LvlFilterHandler(3, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) 498 499 _, err := fdlimit.Raise(2048) 500 501 if err != nil { 502 panic(err) 503 } 504 505 // Generate a batch of accounts to seal and fund with 506 faucets := make([]*ecdsa.PrivateKey, 128) 507 for i := 0; i < len(faucets); i++ { 508 faucets[i], _ = crypto.GenerateKey() 509 } 510 511 // Create an Ethash network based off of the Ropsten config 512 // Generate a batch of accounts to seal and fund with 513 genesis := InitGenesisSprintLength(t, faucets, "./testdata/genesis_7val.json", tt["sprintSize"]) 514 515 nodes := make([]*eth.Ethereum, len(keys_21val)) 516 enodes := make([]*enode.Node, len(keys_21val)) 517 stacks := make([]*node.Node, len(keys_21val)) 518 519 pkeys_21val := make([]*ecdsa.PrivateKey, len(keys_21val)) 520 521 for index, signerdata := range keys_21val { 522 pkeys_21val[index], _ = crypto.HexToECDSA(signerdata["priv_key"]) 523 } 524 525 for i := 0; i < len(keys_21val); i++ { 526 // Start the node and wait until it's up 527 stack, ethBackend, err := InitMinerSprintLength(genesis, pkeys_21val[i], true) 528 if err != nil { 529 panic(err) 530 } 531 defer stack.Close() 532 533 for stack.Server().NodeInfo().Ports.Listener == 0 { 534 time.Sleep(250 * time.Millisecond) 535 } 536 // Connect the node to all the previous ones 537 for j, n := range enodes { 538 if j < i { 539 stack.Server().AddPeer(n) 540 } 541 } 542 // Start tracking the node and its enode 543 stacks[i] = stack 544 nodes[i] = ethBackend 545 enodes[i] = stack.Server().Self() 546 } 547 548 // Iterate over all the nodes and start mining 549 time.Sleep(3 * time.Second) 550 551 for _, node := range nodes { 552 if err := node.StartMining(1); err != nil { 553 panic(err) 554 } 555 } 556 557 chain2HeadChObserver := make(chan core.Chain2HeadEvent, 64) 558 chain2HeadChFaulty := make(chan core.Chain2HeadEvent, 64) 559 560 var observerOldChainLength, faultyOldChainLength uint64 561 562 faultyProducerIndex := tt["faultyNode"] // node causing reorg :: faulty :: 563 subscribedNodeIndex := 6 // node on different partition, produces 7th sprint but our testcase does not run till 7th sprint. :: observer :: 564 565 nodes[subscribedNodeIndex].BlockChain().SubscribeChain2HeadEvent(chain2HeadChObserver) 566 nodes[faultyProducerIndex].BlockChain().SubscribeChain2HeadEvent(chain2HeadChFaulty) 567 568 stacks[faultyProducerIndex].Server().NoDiscovery = true 569 570 for { 571 blockHeaderObserver := nodes[subscribedNodeIndex].BlockChain().CurrentHeader() 572 blockHeaderFaulty := nodes[faultyProducerIndex].BlockChain().CurrentHeader() 573 574 log.Warn("Current Observer block", "number", blockHeaderObserver.Number, "hash", blockHeaderObserver.Hash()) 575 log.Warn("Current Faulty block", "number", blockHeaderFaulty.Number, "hash", blockHeaderFaulty.Hash()) 576 577 if blockHeaderFaulty.Number.Uint64() == tt["startBlock"] { 578 stacks[faultyProducerIndex].Server().MaxPeers = 0 579 580 for _, enode := range enodes { 581 stacks[faultyProducerIndex].Server().RemovePeer(enode) 582 } 583 } 584 585 if blockHeaderObserver.Number.Uint64() >= tt["startBlock"] && blockHeaderObserver.Number.Uint64() < tt["startBlock"]+tt["reorgLength"] { 586 for _, enode := range enodes { 587 stacks[faultyProducerIndex].Server().RemovePeer(enode) 588 } 589 } 590 591 if blockHeaderObserver.Number.Uint64() == tt["startBlock"]+tt["reorgLength"] { 592 stacks[faultyProducerIndex].Server().NoDiscovery = false 593 stacks[faultyProducerIndex].Server().MaxPeers = 100 594 595 for _, enode := range enodes { 596 stacks[faultyProducerIndex].Server().AddPeer(enode) 597 } 598 } 599 600 if blockHeaderFaulty.Number.Uint64() >= 255 { 601 break 602 } 603 604 select { 605 case ev := <-chain2HeadChObserver: 606 if ev.Type == core.Chain2HeadReorgEvent { 607 if len(ev.OldChain) > 1 { 608 observerOldChainLength = uint64(len(ev.OldChain)) 609 return observerOldChainLength, 0 610 } 611 } 612 613 case ev := <-chain2HeadChFaulty: 614 if ev.Type == core.Chain2HeadReorgEvent { 615 if len(ev.OldChain) > 1 { 616 faultyOldChainLength = uint64(len(ev.OldChain)) 617 return 0, faultyOldChainLength 618 } 619 } 620 621 default: 622 time.Sleep(500 * time.Millisecond) 623 } 624 } 625 626 return 0, 0 627 } 628 629 // nolint: gocognit 630 func SetupValidatorsAndTest2Nodes(t *testing.T, tt map[string]interface{}) (uint64, uint64) { 631 t.Helper() 632 633 log.Root().SetHandler(log.LvlFilterHandler(3, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) 634 635 _, err := fdlimit.Raise(2048) 636 637 if err != nil { 638 panic(err) 639 } 640 641 // Generate a batch of accounts to seal and fund with 642 faucets := make([]*ecdsa.PrivateKey, 128) 643 for i := 0; i < len(faucets); i++ { 644 faucets[i], _ = crypto.GenerateKey() 645 } 646 647 // Create an Ethash network based off of the Ropsten config 648 // Generate a batch of accounts to seal and fund with 649 genesis := InitGenesisSprintLength(t, faucets, "./testdata/genesis_7val.json", tt["sprintSize"].(uint64)) 650 651 nodes := make([]*eth.Ethereum, len(keys_21val)) 652 enodes := make([]*enode.Node, len(keys_21val)) 653 stacks := make([]*node.Node, len(keys_21val)) 654 655 pkeys_21val := make([]*ecdsa.PrivateKey, len(keys_21val)) 656 657 for index, signerdata := range keys_21val { 658 pkeys_21val[index], _ = crypto.HexToECDSA(signerdata["priv_key"]) 659 } 660 661 for i := 0; i < len(keys_21val); i++ { 662 // Start the node and wait until it's up 663 stack, ethBackend, err := InitMinerSprintLength(genesis, pkeys_21val[i], true) 664 if err != nil { 665 panic(err) 666 } 667 defer stack.Close() 668 669 for stack.Server().NodeInfo().Ports.Listener == 0 { 670 time.Sleep(250 * time.Millisecond) 671 } 672 // Connect the node to all the previous ones 673 for j, n := range enodes { 674 if j < i { 675 stack.Server().AddPeer(n) 676 } 677 } 678 // Start tracking the node and its enode 679 stacks[i] = stack 680 nodes[i] = ethBackend 681 enodes[i] = stack.Server().Self() 682 } 683 684 // Iterate over all the nodes and start mining 685 time.Sleep(3 * time.Second) 686 687 for _, node := range nodes { 688 if err := node.StartMining(1); err != nil { 689 panic(err) 690 } 691 } 692 693 chain2HeadChObserver := make(chan core.Chain2HeadEvent, 64) 694 chain2HeadChFaulty := make(chan core.Chain2HeadEvent, 64) 695 696 var observerOldChainLength, faultyOldChainLength uint64 697 698 faultyProducerIndex := tt["faultyNodes"].([]uint64)[0] // node causing reorg :: faulty :: 699 subscribedNodeIndex := 6 // node on different partition, produces 7th sprint but our testcase does not run till 7th sprint. :: observer :: 700 701 nodes[subscribedNodeIndex].BlockChain().SubscribeChain2HeadEvent(chain2HeadChObserver) 702 nodes[faultyProducerIndex].BlockChain().SubscribeChain2HeadEvent(chain2HeadChFaulty) 703 704 stacks[faultyProducerIndex].Server().NoDiscovery = true 705 706 for { 707 blockHeaderObserver := nodes[subscribedNodeIndex].BlockChain().CurrentHeader() 708 blockHeaderFaulty := nodes[faultyProducerIndex].BlockChain().CurrentHeader() 709 710 log.Warn("Current Observer block", "number", blockHeaderObserver.Number, "hash", blockHeaderObserver.Hash()) 711 log.Warn("Current Faulty block", "number", blockHeaderFaulty.Number, "hash", blockHeaderFaulty.Hash()) 712 713 if blockHeaderObserver.Number.Uint64() >= tt["startBlock"].(uint64) && blockHeaderObserver.Number.Uint64() < tt["startBlock"].(uint64)+tt["reorgLength"].(uint64) { 714 for _, n := range tt["faultyNodes"].([]uint64) { 715 stacks[n].Server().MaxPeers = 1 716 717 for _, enode := range enodes { 718 stacks[n].Server().RemovePeer(enode) 719 } 720 721 for _, m := range tt["faultyNodes"].([]uint64) { 722 stacks[m].Server().AddPeer(enodes[n]) 723 } 724 } 725 } 726 727 if blockHeaderObserver.Number.Uint64() == tt["startBlock"].(uint64)+tt["reorgLength"].(uint64) { 728 stacks[faultyProducerIndex].Server().NoDiscovery = false 729 stacks[faultyProducerIndex].Server().MaxPeers = 100 730 731 for _, enode := range enodes { 732 stacks[faultyProducerIndex].Server().AddPeer(enode) 733 } 734 } 735 736 if blockHeaderFaulty.Number.Uint64() >= 255 { 737 break 738 } 739 740 select { 741 case ev := <-chain2HeadChObserver: 742 if ev.Type == core.Chain2HeadReorgEvent { 743 if len(ev.OldChain) > 1 { 744 observerOldChainLength = uint64(len(ev.OldChain)) 745 return observerOldChainLength, 0 746 } 747 } 748 749 case ev := <-chain2HeadChFaulty: 750 if ev.Type == core.Chain2HeadReorgEvent { 751 if len(ev.OldChain) > 1 { 752 faultyOldChainLength = uint64(len(ev.OldChain)) 753 return 0, faultyOldChainLength 754 } 755 } 756 757 default: 758 time.Sleep(500 * time.Millisecond) 759 } 760 } 761 762 return 0, 0 763 } 764 765 func InitGenesisSprintLength(t *testing.T, faucets []*ecdsa.PrivateKey, fileLocation string, sprintSize uint64) *core.Genesis { 766 t.Helper() 767 768 // sprint size = 8 in genesis 769 genesisData, err := ioutil.ReadFile(fileLocation) 770 if err != nil { 771 t.Fatalf("%s", err) 772 } 773 774 genesis := &core.Genesis{} 775 776 if err := json.Unmarshal(genesisData, genesis); err != nil { 777 t.Fatalf("%s", err) 778 } 779 780 genesis.Config.ChainID = big.NewInt(15001) 781 genesis.Config.EIP150Hash = common.Hash{} 782 genesis.Config.Bor.Sprint["0"] = sprintSize 783 784 return genesis 785 } 786 787 func InitMinerSprintLength(genesis *core.Genesis, privKey *ecdsa.PrivateKey, withoutHeimdall bool) (*node.Node, *eth.Ethereum, error) { 788 // Define the basic configurations for the Ethereum node 789 datadir, _ := ioutil.TempDir("", "") 790 791 config := &node.Config{ 792 Name: "geth", 793 Version: params.Version, 794 DataDir: datadir, 795 P2P: p2p.Config{ 796 ListenAddr: "0.0.0.0:0", 797 NoDiscovery: true, 798 MaxPeers: 25, 799 }, 800 UseLightweightKDF: true, 801 } 802 // Create the node and configure a full Ethereum node on it 803 stack, err := node.New(config) 804 if err != nil { 805 return nil, nil, err 806 } 807 808 ethBackend, err := eth.New(stack, ðconfig.Config{ 809 Genesis: genesis, 810 NetworkId: genesis.Config.ChainID.Uint64(), 811 SyncMode: downloader.FullSync, 812 DatabaseCache: 256, 813 DatabaseHandles: 256, 814 TxPool: core.DefaultTxPoolConfig, 815 GPO: ethconfig.Defaults.GPO, 816 Ethash: ethconfig.Defaults.Ethash, 817 Miner: miner.Config{ 818 Etherbase: crypto.PubkeyToAddress(privKey.PublicKey), 819 GasCeil: genesis.GasLimit * 11 / 10, 820 GasPrice: big.NewInt(1), 821 Recommit: time.Second, 822 }, 823 WithoutHeimdall: withoutHeimdall, 824 }) 825 826 if err != nil { 827 return nil, nil, err 828 } 829 830 // register backend to account manager with keystore for signing 831 keydir := stack.KeyStoreDir() 832 833 n, p := keystore.StandardScryptN, keystore.StandardScryptP 834 kStore := keystore.NewKeyStore(keydir, n, p) 835 836 _, err = kStore.ImportECDSA(privKey, "") 837 838 if err != nil { 839 return nil, nil, err 840 } 841 842 acc := kStore.Accounts()[0] 843 err = kStore.Unlock(acc, "") 844 845 if err != nil { 846 return nil, nil, err 847 } 848 849 // proceed to authorize the local account manager in any case 850 ethBackend.AccountManager().AddBackend(kStore) 851 852 err = stack.Start() 853 854 return stack, ethBackend, err 855 }