github.com/carter-ya/go-ethereum@v0.0.0-20230628080049-d2309be3983b/eth/handler_eth_test.go (about) 1 // Copyright 2020 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package eth 18 19 import ( 20 "fmt" 21 "math/big" 22 "math/rand" 23 "sync/atomic" 24 "testing" 25 "time" 26 27 "github.com/ethereum/go-ethereum/common" 28 "github.com/ethereum/go-ethereum/consensus" 29 "github.com/ethereum/go-ethereum/consensus/ethash" 30 "github.com/ethereum/go-ethereum/core" 31 "github.com/ethereum/go-ethereum/core/forkid" 32 "github.com/ethereum/go-ethereum/core/rawdb" 33 "github.com/ethereum/go-ethereum/core/types" 34 "github.com/ethereum/go-ethereum/core/vm" 35 "github.com/ethereum/go-ethereum/eth/downloader" 36 "github.com/ethereum/go-ethereum/eth/protocols/eth" 37 "github.com/ethereum/go-ethereum/event" 38 "github.com/ethereum/go-ethereum/p2p" 39 "github.com/ethereum/go-ethereum/p2p/enode" 40 "github.com/ethereum/go-ethereum/params" 41 "github.com/ethereum/go-ethereum/rlp" 42 ) 43 44 // testEthHandler is a mock event handler to listen for inbound network requests 45 // on the `eth` protocol and convert them into a more easily testable form. 46 type testEthHandler struct { 47 blockBroadcasts event.Feed 48 txAnnounces event.Feed 49 txBroadcasts event.Feed 50 } 51 52 func (h *testEthHandler) Chain() *core.BlockChain { panic("no backing chain") } 53 func (h *testEthHandler) TxPool() eth.TxPool { panic("no backing tx pool") } 54 func (h *testEthHandler) AcceptTxs() bool { return true } 55 func (h *testEthHandler) RunPeer(*eth.Peer, eth.Handler) error { panic("not used in tests") } 56 func (h *testEthHandler) PeerInfo(enode.ID) interface{} { panic("not used in tests") } 57 58 func (h *testEthHandler) Handle(peer *eth.Peer, packet eth.Packet) error { 59 switch packet := packet.(type) { 60 case *eth.NewBlockPacket: 61 h.blockBroadcasts.Send(packet.Block) 62 return nil 63 64 case *eth.NewPooledTransactionHashesPacket: 65 h.txAnnounces.Send(([]common.Hash)(*packet)) 66 return nil 67 68 case *eth.TransactionsPacket: 69 h.txBroadcasts.Send(([]*types.Transaction)(*packet)) 70 return nil 71 72 case *eth.PooledTransactionsPacket: 73 h.txBroadcasts.Send(([]*types.Transaction)(*packet)) 74 return nil 75 76 default: 77 panic(fmt.Sprintf("unexpected eth packet type in tests: %T", packet)) 78 } 79 } 80 81 // Tests that peers are correctly accepted (or rejected) based on the advertised 82 // fork IDs in the protocol handshake. 83 func TestForkIDSplit66(t *testing.T) { testForkIDSplit(t, eth.ETH66) } 84 85 func testForkIDSplit(t *testing.T, protocol uint) { 86 t.Parallel() 87 88 var ( 89 engine = ethash.NewFaker() 90 91 configNoFork = ¶ms.ChainConfig{HomesteadBlock: big.NewInt(1)} 92 configProFork = ¶ms.ChainConfig{ 93 HomesteadBlock: big.NewInt(1), 94 EIP150Block: big.NewInt(2), 95 EIP155Block: big.NewInt(2), 96 EIP158Block: big.NewInt(2), 97 ByzantiumBlock: big.NewInt(3), 98 } 99 dbNoFork = rawdb.NewMemoryDatabase() 100 dbProFork = rawdb.NewMemoryDatabase() 101 102 gspecNoFork = &core.Genesis{Config: configNoFork} 103 gspecProFork = &core.Genesis{Config: configProFork} 104 105 chainNoFork, _ = core.NewBlockChain(dbNoFork, nil, gspecNoFork, nil, engine, vm.Config{}, nil, nil) 106 chainProFork, _ = core.NewBlockChain(dbProFork, nil, gspecProFork, nil, engine, vm.Config{}, nil, nil) 107 108 _, blocksNoFork, _ = core.GenerateChainWithGenesis(gspecNoFork, engine, 2, nil) 109 _, blocksProFork, _ = core.GenerateChainWithGenesis(gspecProFork, engine, 2, nil) 110 111 ethNoFork, _ = newHandler(&handlerConfig{ 112 Database: dbNoFork, 113 Chain: chainNoFork, 114 TxPool: newTestTxPool(), 115 Merger: consensus.NewMerger(rawdb.NewMemoryDatabase()), 116 Network: 1, 117 Sync: downloader.FullSync, 118 BloomCache: 1, 119 }) 120 ethProFork, _ = newHandler(&handlerConfig{ 121 Database: dbProFork, 122 Chain: chainProFork, 123 TxPool: newTestTxPool(), 124 Merger: consensus.NewMerger(rawdb.NewMemoryDatabase()), 125 Network: 1, 126 Sync: downloader.FullSync, 127 BloomCache: 1, 128 }) 129 ) 130 ethNoFork.Start(1000) 131 ethProFork.Start(1000) 132 133 // Clean up everything after ourselves 134 defer chainNoFork.Stop() 135 defer chainProFork.Stop() 136 137 defer ethNoFork.Stop() 138 defer ethProFork.Stop() 139 140 // Both nodes should allow the other to connect (same genesis, next fork is the same) 141 p2pNoFork, p2pProFork := p2p.MsgPipe() 142 defer p2pNoFork.Close() 143 defer p2pProFork.Close() 144 145 peerNoFork := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{1}, "", nil, p2pNoFork), p2pNoFork, nil) 146 peerProFork := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{2}, "", nil, p2pProFork), p2pProFork, nil) 147 defer peerNoFork.Close() 148 defer peerProFork.Close() 149 150 errc := make(chan error, 2) 151 go func(errc chan error) { 152 errc <- ethNoFork.runEthPeer(peerProFork, func(peer *eth.Peer) error { return nil }) 153 }(errc) 154 go func(errc chan error) { 155 errc <- ethProFork.runEthPeer(peerNoFork, func(peer *eth.Peer) error { return nil }) 156 }(errc) 157 158 for i := 0; i < 2; i++ { 159 select { 160 case err := <-errc: 161 if err != nil { 162 t.Fatalf("frontier nofork <-> profork failed: %v", err) 163 } 164 case <-time.After(250 * time.Millisecond): 165 t.Fatalf("frontier nofork <-> profork handler timeout") 166 } 167 } 168 // Progress into Homestead. Fork's match, so we don't care what the future holds 169 chainNoFork.InsertChain(blocksNoFork[:1]) 170 chainProFork.InsertChain(blocksProFork[:1]) 171 172 p2pNoFork, p2pProFork = p2p.MsgPipe() 173 defer p2pNoFork.Close() 174 defer p2pProFork.Close() 175 176 peerNoFork = eth.NewPeer(protocol, p2p.NewPeer(enode.ID{1}, "", nil), p2pNoFork, nil) 177 peerProFork = eth.NewPeer(protocol, p2p.NewPeer(enode.ID{2}, "", nil), p2pProFork, nil) 178 defer peerNoFork.Close() 179 defer peerProFork.Close() 180 181 errc = make(chan error, 2) 182 go func(errc chan error) { 183 errc <- ethNoFork.runEthPeer(peerProFork, func(peer *eth.Peer) error { return nil }) 184 }(errc) 185 go func(errc chan error) { 186 errc <- ethProFork.runEthPeer(peerNoFork, func(peer *eth.Peer) error { return nil }) 187 }(errc) 188 189 for i := 0; i < 2; i++ { 190 select { 191 case err := <-errc: 192 if err != nil { 193 t.Fatalf("homestead nofork <-> profork failed: %v", err) 194 } 195 case <-time.After(250 * time.Millisecond): 196 t.Fatalf("homestead nofork <-> profork handler timeout") 197 } 198 } 199 // Progress into Spurious. Forks mismatch, signalling differing chains, reject 200 chainNoFork.InsertChain(blocksNoFork[1:2]) 201 chainProFork.InsertChain(blocksProFork[1:2]) 202 203 p2pNoFork, p2pProFork = p2p.MsgPipe() 204 defer p2pNoFork.Close() 205 defer p2pProFork.Close() 206 207 peerNoFork = eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{1}, "", nil, p2pNoFork), p2pNoFork, nil) 208 peerProFork = eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{2}, "", nil, p2pProFork), p2pProFork, nil) 209 defer peerNoFork.Close() 210 defer peerProFork.Close() 211 212 errc = make(chan error, 2) 213 go func(errc chan error) { 214 errc <- ethNoFork.runEthPeer(peerProFork, func(peer *eth.Peer) error { return nil }) 215 }(errc) 216 go func(errc chan error) { 217 errc <- ethProFork.runEthPeer(peerNoFork, func(peer *eth.Peer) error { return nil }) 218 }(errc) 219 220 var successes int 221 for i := 0; i < 2; i++ { 222 select { 223 case err := <-errc: 224 if err == nil { 225 successes++ 226 if successes == 2 { // Only one side disconnects 227 t.Fatalf("fork ID rejection didn't happen") 228 } 229 } 230 case <-time.After(250 * time.Millisecond): 231 t.Fatalf("split peers not rejected") 232 } 233 } 234 } 235 236 // Tests that received transactions are added to the local pool. 237 func TestRecvTransactions66(t *testing.T) { testRecvTransactions(t, eth.ETH66) } 238 239 func testRecvTransactions(t *testing.T, protocol uint) { 240 t.Parallel() 241 242 // Create a message handler, configure it to accept transactions and watch them 243 handler := newTestHandler() 244 defer handler.close() 245 246 handler.handler.acceptTxs = 1 // mark synced to accept transactions 247 248 txs := make(chan core.NewTxsEvent) 249 sub := handler.txpool.SubscribeNewTxsEvent(txs) 250 defer sub.Unsubscribe() 251 252 // Create a source peer to send messages through and a sink handler to receive them 253 p2pSrc, p2pSink := p2p.MsgPipe() 254 defer p2pSrc.Close() 255 defer p2pSink.Close() 256 257 src := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{1}, "", nil, p2pSrc), p2pSrc, handler.txpool) 258 sink := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{2}, "", nil, p2pSink), p2pSink, handler.txpool) 259 defer src.Close() 260 defer sink.Close() 261 262 go handler.handler.runEthPeer(sink, func(peer *eth.Peer) error { 263 return eth.Handle((*ethHandler)(handler.handler), peer) 264 }) 265 // Run the handshake locally to avoid spinning up a source handler 266 var ( 267 genesis = handler.chain.Genesis() 268 head = handler.chain.CurrentBlock() 269 td = handler.chain.GetTd(head.Hash(), head.NumberU64()) 270 ) 271 if err := src.Handshake(1, td, head.Hash(), genesis.Hash(), forkid.NewIDWithChain(handler.chain), forkid.NewFilter(handler.chain)); err != nil { 272 t.Fatalf("failed to run protocol handshake") 273 } 274 // Send the transaction to the sink and verify that it's added to the tx pool 275 tx := types.NewTransaction(0, common.Address{}, big.NewInt(0), 100000, big.NewInt(0), nil) 276 tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey) 277 278 if err := src.SendTransactions([]*types.Transaction{tx}); err != nil { 279 t.Fatalf("failed to send transaction: %v", err) 280 } 281 select { 282 case event := <-txs: 283 if len(event.Txs) != 1 { 284 t.Errorf("wrong number of added transactions: got %d, want 1", len(event.Txs)) 285 } else if event.Txs[0].Hash() != tx.Hash() { 286 t.Errorf("added wrong tx hash: got %v, want %v", event.Txs[0].Hash(), tx.Hash()) 287 } 288 case <-time.After(2 * time.Second): 289 t.Errorf("no NewTxsEvent received within 2 seconds") 290 } 291 } 292 293 // This test checks that pending transactions are sent. 294 func TestSendTransactions66(t *testing.T) { testSendTransactions(t, eth.ETH66) } 295 296 func testSendTransactions(t *testing.T, protocol uint) { 297 t.Parallel() 298 299 // Create a message handler and fill the pool with big transactions 300 handler := newTestHandler() 301 defer handler.close() 302 303 insert := make([]*types.Transaction, 100) 304 for nonce := range insert { 305 tx := types.NewTransaction(uint64(nonce), common.Address{}, big.NewInt(0), 100000, big.NewInt(0), make([]byte, 10240)) 306 tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey) 307 308 insert[nonce] = tx 309 } 310 go handler.txpool.AddRemotes(insert) // Need goroutine to not block on feed 311 time.Sleep(250 * time.Millisecond) // Wait until tx events get out of the system (can't use events, tx broadcaster races with peer join) 312 313 // Create a source handler to send messages through and a sink peer to receive them 314 p2pSrc, p2pSink := p2p.MsgPipe() 315 defer p2pSrc.Close() 316 defer p2pSink.Close() 317 318 src := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{1}, "", nil, p2pSrc), p2pSrc, handler.txpool) 319 sink := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{2}, "", nil, p2pSink), p2pSink, handler.txpool) 320 defer src.Close() 321 defer sink.Close() 322 323 go handler.handler.runEthPeer(src, func(peer *eth.Peer) error { 324 return eth.Handle((*ethHandler)(handler.handler), peer) 325 }) 326 // Run the handshake locally to avoid spinning up a source handler 327 var ( 328 genesis = handler.chain.Genesis() 329 head = handler.chain.CurrentBlock() 330 td = handler.chain.GetTd(head.Hash(), head.NumberU64()) 331 ) 332 if err := sink.Handshake(1, td, head.Hash(), genesis.Hash(), forkid.NewIDWithChain(handler.chain), forkid.NewFilter(handler.chain)); err != nil { 333 t.Fatalf("failed to run protocol handshake") 334 } 335 // After the handshake completes, the source handler should stream the sink 336 // the transactions, subscribe to all inbound network events 337 backend := new(testEthHandler) 338 339 anns := make(chan []common.Hash) 340 annSub := backend.txAnnounces.Subscribe(anns) 341 defer annSub.Unsubscribe() 342 343 bcasts := make(chan []*types.Transaction) 344 bcastSub := backend.txBroadcasts.Subscribe(bcasts) 345 defer bcastSub.Unsubscribe() 346 347 go eth.Handle(backend, sink) 348 349 // Make sure we get all the transactions on the correct channels 350 seen := make(map[common.Hash]struct{}) 351 for len(seen) < len(insert) { 352 switch protocol { 353 case 66: 354 select { 355 case hashes := <-anns: 356 for _, hash := range hashes { 357 if _, ok := seen[hash]; ok { 358 t.Errorf("duplicate transaction announced: %x", hash) 359 } 360 seen[hash] = struct{}{} 361 } 362 case <-bcasts: 363 t.Errorf("initial tx broadcast received on post eth/66") 364 } 365 366 default: 367 panic("unsupported protocol, please extend test") 368 } 369 } 370 for _, tx := range insert { 371 if _, ok := seen[tx.Hash()]; !ok { 372 t.Errorf("missing transaction: %x", tx.Hash()) 373 } 374 } 375 } 376 377 // Tests that transactions get propagated to all attached peers, either via direct 378 // broadcasts or via announcements/retrievals. 379 func TestTransactionPropagation66(t *testing.T) { testTransactionPropagation(t, eth.ETH66) } 380 381 func testTransactionPropagation(t *testing.T, protocol uint) { 382 t.Parallel() 383 384 // Create a source handler to send transactions from and a number of sinks 385 // to receive them. We need multiple sinks since a one-to-one peering would 386 // broadcast all transactions without announcement. 387 source := newTestHandler() 388 source.handler.snapSync = 0 // Avoid requiring snap, otherwise some will be dropped below 389 defer source.close() 390 391 sinks := make([]*testHandler, 10) 392 for i := 0; i < len(sinks); i++ { 393 sinks[i] = newTestHandler() 394 defer sinks[i].close() 395 396 sinks[i].handler.acceptTxs = 1 // mark synced to accept transactions 397 } 398 // Interconnect all the sink handlers with the source handler 399 for i, sink := range sinks { 400 sink := sink // Closure for gorotuine below 401 402 sourcePipe, sinkPipe := p2p.MsgPipe() 403 defer sourcePipe.Close() 404 defer sinkPipe.Close() 405 406 sourcePeer := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{byte(i + 1)}, "", nil, sourcePipe), sourcePipe, source.txpool) 407 sinkPeer := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{0}, "", nil, sinkPipe), sinkPipe, sink.txpool) 408 defer sourcePeer.Close() 409 defer sinkPeer.Close() 410 411 go source.handler.runEthPeer(sourcePeer, func(peer *eth.Peer) error { 412 return eth.Handle((*ethHandler)(source.handler), peer) 413 }) 414 go sink.handler.runEthPeer(sinkPeer, func(peer *eth.Peer) error { 415 return eth.Handle((*ethHandler)(sink.handler), peer) 416 }) 417 } 418 // Subscribe to all the transaction pools 419 txChs := make([]chan core.NewTxsEvent, len(sinks)) 420 for i := 0; i < len(sinks); i++ { 421 txChs[i] = make(chan core.NewTxsEvent, 1024) 422 423 sub := sinks[i].txpool.SubscribeNewTxsEvent(txChs[i]) 424 defer sub.Unsubscribe() 425 } 426 // Fill the source pool with transactions and wait for them at the sinks 427 txs := make([]*types.Transaction, 1024) 428 for nonce := range txs { 429 tx := types.NewTransaction(uint64(nonce), common.Address{}, big.NewInt(0), 100000, big.NewInt(0), nil) 430 tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey) 431 432 txs[nonce] = tx 433 } 434 source.txpool.AddRemotes(txs) 435 436 // Iterate through all the sinks and ensure they all got the transactions 437 for i := range sinks { 438 for arrived, timeout := 0, false; arrived < len(txs) && !timeout; { 439 select { 440 case event := <-txChs[i]: 441 arrived += len(event.Txs) 442 case <-time.After(time.Second): 443 t.Errorf("sink %d: transaction propagation timed out: have %d, want %d", i, arrived, len(txs)) 444 timeout = true 445 } 446 } 447 } 448 } 449 450 // Tests that post eth protocol handshake, clients perform a mutual checkpoint 451 // challenge to validate each other's chains. Hash mismatches, or missing ones 452 // during a fast sync should lead to the peer getting dropped. 453 func TestCheckpointChallenge(t *testing.T) { 454 tests := []struct { 455 syncmode downloader.SyncMode 456 checkpoint bool 457 timeout bool 458 empty bool 459 match bool 460 drop bool 461 }{ 462 // If checkpointing is not enabled locally, don't challenge and don't drop 463 {downloader.FullSync, false, false, false, false, false}, 464 {downloader.SnapSync, false, false, false, false, false}, 465 466 // If checkpointing is enabled locally and remote response is empty, only drop during fast sync 467 {downloader.FullSync, true, false, true, false, false}, 468 {downloader.SnapSync, true, false, true, false, true}, // Special case, fast sync, unsynced peer 469 470 // If checkpointing is enabled locally and remote response mismatches, always drop 471 {downloader.FullSync, true, false, false, false, true}, 472 {downloader.SnapSync, true, false, false, false, true}, 473 474 // If checkpointing is enabled locally and remote response matches, never drop 475 {downloader.FullSync, true, false, false, true, false}, 476 {downloader.SnapSync, true, false, false, true, false}, 477 478 // If checkpointing is enabled locally and remote times out, always drop 479 {downloader.FullSync, true, true, false, true, true}, 480 {downloader.SnapSync, true, true, false, true, true}, 481 } 482 for _, tt := range tests { 483 t.Run(fmt.Sprintf("sync %v checkpoint %v timeout %v empty %v match %v", tt.syncmode, tt.checkpoint, tt.timeout, tt.empty, tt.match), func(t *testing.T) { 484 testCheckpointChallenge(t, tt.syncmode, tt.checkpoint, tt.timeout, tt.empty, tt.match, tt.drop) 485 }) 486 } 487 } 488 489 func testCheckpointChallenge(t *testing.T, syncmode downloader.SyncMode, checkpoint bool, timeout bool, empty bool, match bool, drop bool) { 490 // Reduce the checkpoint handshake challenge timeout 491 defer func(old time.Duration) { syncChallengeTimeout = old }(syncChallengeTimeout) 492 syncChallengeTimeout = 250 * time.Millisecond 493 494 // Create a test handler and inject a CHT into it. The injection is a bit 495 // ugly, but it beats creating everything manually just to avoid reaching 496 // into the internals a bit. 497 handler := newTestHandler() 498 defer handler.close() 499 500 if syncmode == downloader.SnapSync { 501 atomic.StoreUint32(&handler.handler.snapSync, 1) 502 } else { 503 atomic.StoreUint32(&handler.handler.snapSync, 0) 504 } 505 var response *types.Header 506 if checkpoint { 507 number := (uint64(rand.Intn(500))+1)*params.CHTFrequency - 1 508 response = &types.Header{Number: big.NewInt(int64(number)), Extra: []byte("valid")} 509 510 handler.handler.checkpointNumber = number 511 handler.handler.checkpointHash = response.Hash() 512 } 513 514 // Create a challenger peer and a challenged one. 515 p2pLocal, p2pRemote := p2p.MsgPipe() 516 defer p2pLocal.Close() 517 defer p2pRemote.Close() 518 519 local := eth.NewPeer(eth.ETH66, p2p.NewPeerPipe(enode.ID{1}, "", nil, p2pLocal), p2pLocal, handler.txpool) 520 remote := eth.NewPeer(eth.ETH66, p2p.NewPeerPipe(enode.ID{2}, "", nil, p2pRemote), p2pRemote, handler.txpool) 521 defer local.Close() 522 defer remote.Close() 523 524 handlerDone := make(chan struct{}) 525 go func() { 526 defer close(handlerDone) 527 handler.handler.runEthPeer(local, func(peer *eth.Peer) error { 528 return eth.Handle((*ethHandler)(handler.handler), peer) 529 }) 530 }() 531 532 // Run the handshake locally to avoid spinning up a remote handler. 533 var ( 534 genesis = handler.chain.Genesis() 535 head = handler.chain.CurrentBlock() 536 td = handler.chain.GetTd(head.Hash(), head.NumberU64()) 537 ) 538 if err := remote.Handshake(1, td, head.Hash(), genesis.Hash(), forkid.NewIDWithChain(handler.chain), forkid.NewFilter(handler.chain)); err != nil { 539 t.Fatalf("failed to run protocol handshake") 540 } 541 // Connect a new peer and check that we receive the checkpoint challenge. 542 if checkpoint { 543 msg, err := p2pRemote.ReadMsg() 544 if err != nil { 545 t.Fatalf("failed to read checkpoint challenge: %v", err) 546 } 547 request := new(eth.GetBlockHeadersPacket66) 548 if err := msg.Decode(request); err != nil { 549 t.Fatalf("failed to decode checkpoint challenge: %v", err) 550 } 551 query := request.GetBlockHeadersPacket 552 if query.Origin.Number != response.Number.Uint64() || query.Amount != 1 || query.Skip != 0 || query.Reverse { 553 t.Fatalf("challenge mismatch: have [%d, %d, %d, %v] want [%d, %d, %d, %v]", 554 query.Origin.Number, query.Amount, query.Skip, query.Reverse, 555 response.Number.Uint64(), 1, 0, false) 556 } 557 // Create a block to reply to the challenge if no timeout is simulated. 558 if !timeout { 559 if empty { 560 if err := remote.ReplyBlockHeadersRLP(request.RequestId, []rlp.RawValue{}); err != nil { 561 t.Fatalf("failed to answer challenge: %v", err) 562 } 563 } else if match { 564 responseRlp, _ := rlp.EncodeToBytes(response) 565 if err := remote.ReplyBlockHeadersRLP(request.RequestId, []rlp.RawValue{responseRlp}); err != nil { 566 t.Fatalf("failed to answer challenge: %v", err) 567 } 568 } else { 569 responseRlp, _ := rlp.EncodeToBytes(&types.Header{Number: response.Number}) 570 if err := remote.ReplyBlockHeadersRLP(request.RequestId, []rlp.RawValue{responseRlp}); err != nil { 571 t.Fatalf("failed to answer challenge: %v", err) 572 } 573 } 574 } 575 } 576 // Wait until the test timeout passes to ensure proper cleanup 577 time.Sleep(syncChallengeTimeout + 300*time.Millisecond) 578 579 // Verify that the remote peer is maintained or dropped. 580 if drop { 581 <-handlerDone 582 if peers := handler.handler.peers.len(); peers != 0 { 583 t.Fatalf("peer count mismatch: have %d, want %d", peers, 0) 584 } 585 } else { 586 if peers := handler.handler.peers.len(); peers != 1 { 587 t.Fatalf("peer count mismatch: have %d, want %d", peers, 1) 588 } 589 } 590 } 591 592 // Tests that blocks are broadcast to a sqrt number of peers only. 593 func TestBroadcastBlock1Peer(t *testing.T) { testBroadcastBlock(t, 1, 1) } 594 func TestBroadcastBlock2Peers(t *testing.T) { testBroadcastBlock(t, 2, 1) } 595 func TestBroadcastBlock3Peers(t *testing.T) { testBroadcastBlock(t, 3, 1) } 596 func TestBroadcastBlock4Peers(t *testing.T) { testBroadcastBlock(t, 4, 2) } 597 func TestBroadcastBlock5Peers(t *testing.T) { testBroadcastBlock(t, 5, 2) } 598 func TestBroadcastBlock8Peers(t *testing.T) { testBroadcastBlock(t, 9, 3) } 599 func TestBroadcastBlock12Peers(t *testing.T) { testBroadcastBlock(t, 12, 3) } 600 func TestBroadcastBlock16Peers(t *testing.T) { testBroadcastBlock(t, 16, 4) } 601 func TestBroadcastBloc26Peers(t *testing.T) { testBroadcastBlock(t, 26, 5) } 602 func TestBroadcastBlock100Peers(t *testing.T) { testBroadcastBlock(t, 100, 10) } 603 604 func testBroadcastBlock(t *testing.T, peers, bcasts int) { 605 t.Parallel() 606 607 // Create a source handler to broadcast blocks from and a number of sinks 608 // to receive them. 609 source := newTestHandlerWithBlocks(1) 610 defer source.close() 611 612 sinks := make([]*testEthHandler, peers) 613 for i := 0; i < len(sinks); i++ { 614 sinks[i] = new(testEthHandler) 615 } 616 // Interconnect all the sink handlers with the source handler 617 var ( 618 genesis = source.chain.Genesis() 619 td = source.chain.GetTd(genesis.Hash(), genesis.NumberU64()) 620 ) 621 for i, sink := range sinks { 622 sink := sink // Closure for gorotuine below 623 624 sourcePipe, sinkPipe := p2p.MsgPipe() 625 defer sourcePipe.Close() 626 defer sinkPipe.Close() 627 628 sourcePeer := eth.NewPeer(eth.ETH66, p2p.NewPeerPipe(enode.ID{byte(i)}, "", nil, sourcePipe), sourcePipe, nil) 629 sinkPeer := eth.NewPeer(eth.ETH66, p2p.NewPeerPipe(enode.ID{0}, "", nil, sinkPipe), sinkPipe, nil) 630 defer sourcePeer.Close() 631 defer sinkPeer.Close() 632 633 go source.handler.runEthPeer(sourcePeer, func(peer *eth.Peer) error { 634 return eth.Handle((*ethHandler)(source.handler), peer) 635 }) 636 if err := sinkPeer.Handshake(1, td, genesis.Hash(), genesis.Hash(), forkid.NewIDWithChain(source.chain), forkid.NewFilter(source.chain)); err != nil { 637 t.Fatalf("failed to run protocol handshake") 638 } 639 go eth.Handle(sink, sinkPeer) 640 } 641 // Subscribe to all the transaction pools 642 blockChs := make([]chan *types.Block, len(sinks)) 643 for i := 0; i < len(sinks); i++ { 644 blockChs[i] = make(chan *types.Block, 1) 645 defer close(blockChs[i]) 646 647 sub := sinks[i].blockBroadcasts.Subscribe(blockChs[i]) 648 defer sub.Unsubscribe() 649 } 650 // Initiate a block propagation across the peers 651 time.Sleep(100 * time.Millisecond) 652 source.handler.BroadcastBlock(source.chain.CurrentBlock(), true) 653 654 // Iterate through all the sinks and ensure the correct number got the block 655 done := make(chan struct{}, peers) 656 for _, ch := range blockChs { 657 ch := ch 658 go func() { 659 <-ch 660 done <- struct{}{} 661 }() 662 } 663 var received int 664 for { 665 select { 666 case <-done: 667 received++ 668 669 case <-time.After(100 * time.Millisecond): 670 if received != bcasts { 671 t.Errorf("broadcast count mismatch: have %d, want %d", received, bcasts) 672 } 673 return 674 } 675 } 676 } 677 678 // Tests that a propagated malformed block (uncles or transactions don't match 679 // with the hashes in the header) gets discarded and not broadcast forward. 680 func TestBroadcastMalformedBlock66(t *testing.T) { testBroadcastMalformedBlock(t, eth.ETH66) } 681 682 func testBroadcastMalformedBlock(t *testing.T, protocol uint) { 683 t.Parallel() 684 685 // Create a source handler to broadcast blocks from and a number of sinks 686 // to receive them. 687 source := newTestHandlerWithBlocks(1) 688 defer source.close() 689 690 // Create a source handler to send messages through and a sink peer to receive them 691 p2pSrc, p2pSink := p2p.MsgPipe() 692 defer p2pSrc.Close() 693 defer p2pSink.Close() 694 695 src := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{1}, "", nil, p2pSrc), p2pSrc, source.txpool) 696 sink := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{2}, "", nil, p2pSink), p2pSink, source.txpool) 697 defer src.Close() 698 defer sink.Close() 699 700 go source.handler.runEthPeer(src, func(peer *eth.Peer) error { 701 return eth.Handle((*ethHandler)(source.handler), peer) 702 }) 703 // Run the handshake locally to avoid spinning up a sink handler 704 var ( 705 genesis = source.chain.Genesis() 706 td = source.chain.GetTd(genesis.Hash(), genesis.NumberU64()) 707 ) 708 if err := sink.Handshake(1, td, genesis.Hash(), genesis.Hash(), forkid.NewIDWithChain(source.chain), forkid.NewFilter(source.chain)); err != nil { 709 t.Fatalf("failed to run protocol handshake") 710 } 711 // After the handshake completes, the source handler should stream the sink 712 // the blocks, subscribe to inbound network events 713 backend := new(testEthHandler) 714 715 blocks := make(chan *types.Block, 1) 716 sub := backend.blockBroadcasts.Subscribe(blocks) 717 defer sub.Unsubscribe() 718 719 go eth.Handle(backend, sink) 720 721 // Create various combinations of malformed blocks 722 head := source.chain.CurrentBlock() 723 724 malformedUncles := head.Header() 725 malformedUncles.UncleHash[0]++ 726 malformedTransactions := head.Header() 727 malformedTransactions.TxHash[0]++ 728 malformedEverything := head.Header() 729 malformedEverything.UncleHash[0]++ 730 malformedEverything.TxHash[0]++ 731 732 // Try to broadcast all malformations and ensure they all get discarded 733 for _, header := range []*types.Header{malformedUncles, malformedTransactions, malformedEverything} { 734 block := types.NewBlockWithHeader(header).WithBody(head.Transactions(), head.Uncles()) 735 if err := src.SendNewBlock(block, big.NewInt(131136)); err != nil { 736 t.Fatalf("failed to broadcast block: %v", err) 737 } 738 select { 739 case <-blocks: 740 t.Fatalf("malformed block forwarded") 741 case <-time.After(100 * time.Millisecond): 742 } 743 } 744 }