github.com/klaytn/klaytn@v1.12.1/datasync/downloader/queue_test.go (about) 1 // Modifications Copyright 2020 The klaytn Authors 2 // Copyright 2019 The go-ethereum Authors 3 // This file is part of the go-ethereum library. 4 // 5 // The go-ethereum library is free software: you can redistribute it and/or modify 6 // it under the terms of the GNU Lesser General Public License as published by 7 // the Free Software Foundation, either version 3 of the License, or 8 // (at your option) any later version. 9 // 10 // The go-ethereum library is distributed in the hope that it will be useful, 11 // but WITHOUT ANY WARRANTY; without even the implied warranty of 12 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 // GNU Lesser General Public License for more details. 14 // 15 // You should have received a copy of the GNU Lesser General Public License 16 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 17 // 18 // This file is derived from eth/downloader/queue_test.go (2020/07/24). 19 // Modified and improved for the klaytn development. 20 21 package downloader 22 23 import ( 24 "fmt" 25 "math/big" 26 "math/rand" 27 "sync" 28 "testing" 29 "time" 30 31 "github.com/klaytn/klaytn/blockchain" 32 "github.com/klaytn/klaytn/blockchain/types" 33 "github.com/klaytn/klaytn/common" 34 "github.com/klaytn/klaytn/consensus/gxhash" 35 "github.com/klaytn/klaytn/consensus/istanbul" 36 "github.com/klaytn/klaytn/log" 37 "github.com/klaytn/klaytn/params" 38 "github.com/klaytn/klaytn/reward" 39 "github.com/klaytn/klaytn/storage/database" 40 ) 41 42 var ( 43 testdb = database.NewMemoryDBManager() 44 genesis = blockchain.GenesisBlockForTesting(testdb, testAddress, big.NewInt(1000000000)) 45 ) 46 47 // makeChain creates a chain of n blocks starting at and including parent. 48 // the returned hash chain is ordered head->parent. In addition, every 2nd block 49 // contains a transaction. 50 func makeChain(n int, seed byte, parent *types.Block, empty bool) ([]*types.Block, []types.Receipts) { 51 blocks, receipts := blockchain.GenerateChain(params.TestChainConfig, parent, gxhash.NewFaker(), testdb, n, func(i int, block *blockchain.BlockGen) { 52 block.SetRewardbase(common.Address{seed}) 53 // Add one tx to every second block 54 if !empty && i%2 == 0 { 55 signer := types.MakeSigner(params.TestChainConfig, block.Number()) 56 tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, nil, nil), signer, testKey) 57 if err != nil { 58 panic(err) 59 } 60 block.AddTx(tx) 61 } 62 }) 63 return blocks, receipts 64 } 65 66 type chainData struct { 67 blocks []*types.Block 68 stakingInfos []*reward.StakingInfo 69 offset int 70 } 71 72 var ( 73 chain *chainData 74 emptyChain *chainData 75 testInterval uint64 = 4 76 ) 77 78 func init() { 79 // Create a chain of blocks to import. 128 blocks are created and a transaction is contained on every 2nd block 80 targetBlocks := 128 81 82 var stakingInfos []*reward.StakingInfo 83 for i := 4; i <= 128; i += 4 { 84 stakingInfos = append(stakingInfos, &reward.StakingInfo{BlockNum: uint64(i)}) 85 } 86 87 blocks, _ := makeChain(targetBlocks, 0, genesis, false) 88 chain = &chainData{blocks, stakingInfos, 0} 89 90 blocks, _ = makeChain(targetBlocks, 0, genesis, true) 91 emptyChain = &chainData{blocks, stakingInfos, 0} 92 } 93 94 func (chain *chainData) headers() []*types.Header { 95 hdrs := make([]*types.Header, len(chain.blocks)) 96 for i, b := range chain.blocks { 97 hdrs[i] = b.Header() 98 } 99 return hdrs 100 } 101 102 func (chain *chainData) Len() int { 103 return len(chain.blocks) 104 } 105 106 func dummyPeer(id string) *peerConnection { 107 p := &peerConnection{ 108 id: id, 109 lacking: make(map[common.Hash]struct{}), 110 } 111 return p 112 } 113 114 func TestBasics(t *testing.T) { 115 // set test staking update interval 116 orig := params.StakingUpdateInterval() 117 params.SetStakingUpdateInterval(testInterval) 118 defer params.SetStakingUpdateInterval(orig) 119 120 numOfBlocks := len(chain.blocks) 121 numOfReceipts := len(chain.blocks) / 2 122 numOfStakingInfos := len(chain.stakingInfos) 123 124 q := newQueue(10, 10, uint64(istanbul.WeightedRandom)) 125 if !q.Idle() { 126 t.Errorf("new queue should be idle") 127 } 128 q.Prepare(1, FastSync) 129 if res := q.Results(false); len(res) != 0 { 130 t.Fatal("new queue should have 0 results") 131 } 132 133 // Schedule a batch of headers 134 q.Schedule(chain.headers(), 1) 135 if q.Idle() { 136 t.Errorf("queue should not be idle") 137 } 138 if got, exp := q.PendingBlocks(), numOfBlocks; got != exp { 139 t.Errorf("wrong pending block count, got %d, exp %d", got, exp) 140 } 141 // Only non-empty receipts get added to task-queue 142 if got, exp := q.PendingReceipts(), numOfReceipts; got != exp { 143 t.Errorf("wrong pending receipt count, got %d, exp %d", got, exp) 144 } 145 // staking info on every 4th block get added to task-queue 146 if got, exp := q.PendingStakingInfos(), numOfStakingInfos; got != exp { 147 t.Errorf("wrong pending receipt count, got %d, exp %d", got, exp) 148 } 149 // Items are now queued for downloading, next step is that we tell the 150 // queue that a certain peer will deliver them for us 151 { 152 peer := dummyPeer("peer-1") 153 fetchReq, _, throttle := q.ReserveBodies(peer, 50) 154 if !throttle { 155 // queue size is only 10, so throttling should occur 156 t.Fatal("should throttle") 157 } 158 // But we should still get the first things to fetch 159 if got, exp := len(fetchReq.Headers), 5; got != exp { 160 t.Fatalf("expected %d requests, got %d", exp, got) 161 } 162 if got, exp := fetchReq.Headers[0].Number.Uint64(), uint64(1); got != exp { 163 t.Fatalf("expected header %d, got %d", exp, got) 164 } 165 } 166 if got, exp := q.blockTaskQueue.Size(), numOfBlocks-10; got != exp { 167 t.Errorf("expected block task queue to be %d, got %d", exp, got) 168 } 169 if got, exp := q.receiptTaskQueue.Size(), numOfReceipts; got != exp { 170 t.Errorf("expected receipt task queue to be %d, got %d", exp, got) 171 } 172 if got, exp := q.stakingInfoTaskQueue.Size(), numOfStakingInfos; got != exp { 173 t.Errorf("expected staking info task queue to be %d, got %d", exp, got) 174 } 175 { 176 peer := dummyPeer("peer-2") 177 fetchReq, _, throttle := q.ReserveBodies(peer, 50) 178 179 // The second peer should hit throttling 180 if !throttle { 181 t.Fatalf("should not throttle") 182 } 183 // And not get any fetches at all, since it was throttled to begin with 184 if fetchReq != nil { 185 t.Fatalf("should have no fetches, got %d", len(fetchReq.Headers)) 186 } 187 } 188 if got, exp := q.blockTaskQueue.Size(), numOfBlocks-10; got != exp { 189 t.Errorf("expected block task queue to be %d, got %d", exp, got) 190 } 191 if got, exp := q.receiptTaskQueue.Size(), numOfReceipts; got != exp { 192 t.Errorf("expected receipt task queue to be %d, got %d", exp, got) 193 } 194 if got, exp := q.stakingInfoTaskQueue.Size(), numOfStakingInfos; got != exp { 195 t.Errorf("expected staking info task queue to be %d, got %d", exp, got) 196 } 197 { 198 // The receipt delivering peer should not be affected 199 // by the throttling of body deliveries 200 peer := dummyPeer("peer-3") 201 fetchReq, _, throttle := q.ReserveReceipts(peer, 50) 202 if !throttle { 203 // queue size is only 10, so throttling should occur 204 t.Fatal("should throttle") 205 } 206 // But we should still get the first things to fetch 207 if got, exp := len(fetchReq.Headers), 5; got != exp { 208 t.Fatalf("expected %d requests, got %d", exp, got) 209 } 210 if got, exp := fetchReq.Headers[0].Number.Uint64(), uint64(1); got != exp { 211 t.Fatalf("expected header %d, got %d", exp, got) 212 } 213 } 214 if got, exp := q.blockTaskQueue.Size(), numOfBlocks-10; got != exp { 215 t.Fatalf("expected block task queue size %d, got %d", exp, got) 216 } 217 if got, exp := q.receiptTaskQueue.Size(), numOfReceipts-5; got != exp { 218 t.Fatalf("expected receipt task queue size %d, got %d", exp, got) 219 } 220 if got, exp := q.stakingInfoTaskQueue.Size(), numOfStakingInfos; got != exp { 221 t.Fatalf("expected staking info task queue size %d, got %d", exp, got) 222 } 223 { 224 // The staking info delivering peer should not be affected 225 // by the throttling of body deliveries 226 peer := dummyPeer("peer-4") 227 fetchReq, _, throttle := q.ReserveStakingInfos(peer, 50) 228 if !throttle { 229 // queue size is only 10, so throttling should occur 230 t.Fatal("should throttle") 231 } 232 // But we should still get the first things to fetch 233 if got, exp := len(fetchReq.Headers), 2; got != exp { 234 t.Fatalf("expected %d requests, got %d", exp, got) 235 } 236 if got, exp := fetchReq.Headers[0].Number.Uint64(), uint64(4); got != exp { 237 t.Fatalf("expected header %d, got %d", exp, got) 238 } 239 } 240 if got, exp := q.blockTaskQueue.Size(), numOfBlocks-10; got != exp { 241 t.Fatalf("expected block task queue size %d, got %d", exp, got) 242 } 243 if got, exp := q.receiptTaskQueue.Size(), numOfReceipts-5; got != exp { 244 t.Fatalf("expected receipt task queue size %d, got %d", exp, got) 245 } 246 if got, exp := q.stakingInfoTaskQueue.Size(), numOfStakingInfos-2; got != exp { 247 t.Fatalf("expected staking info task queue size %d, got %d", exp, got) 248 } 249 if got, exp := q.resultCache.countCompleted(), 0; got != exp { 250 t.Errorf("wrong processable count, got %d, exp %d", got, exp) 251 } 252 } 253 254 func TestEmptyBlocks(t *testing.T) { 255 // set test staking update interval 256 orig := params.StakingUpdateInterval() 257 params.SetStakingUpdateInterval(testInterval) 258 defer params.SetStakingUpdateInterval(orig) 259 260 numOfBlocks := len(emptyChain.blocks) 261 numOfStakingInfos := len(emptyChain.stakingInfos) 262 263 q := newQueue(10, 10, uint64(istanbul.WeightedRandom)) 264 265 q.Prepare(1, FastSync) 266 // Schedule a batch of headers 267 q.Schedule(emptyChain.headers(), 1) 268 if q.Idle() { 269 t.Errorf("queue should not be idle") 270 } 271 if got, exp := q.PendingBlocks(), numOfBlocks; got != exp { 272 t.Errorf("wrong pending block count, got %d, exp %d", got, exp) 273 } 274 if got, exp := q.PendingReceipts(), 0; got != exp { 275 t.Errorf("wrong pending receipt count, got %d, exp %d", got, exp) 276 } 277 if got, exp := q.PendingStakingInfos(), numOfStakingInfos; got != exp { 278 t.Errorf("wrong pending staking infos count, got %d, exp %d", got, exp) 279 } 280 // They won't be processable, because the fetchresults haven't been 281 // created yet 282 if got, exp := q.resultCache.countCompleted(), 0; got != exp { 283 t.Errorf("wrong processable count, got %d, exp %d", got, exp) 284 } 285 286 // Items are now queued for downloading, next step is that we tell the 287 // queue that a certain peer will deliver them for us 288 // That should trigger all of them to suddenly become 'done' 289 { 290 // Reserve blocks 291 peer := dummyPeer("peer-1") 292 fetchReq, _, _ := q.ReserveBodies(peer, 50) 293 294 // there should be nothing to fetch, blocks are empty 295 if fetchReq != nil { 296 t.Fatal("there should be no body fetch tasks remaining") 297 } 298 } 299 if q.blockTaskQueue.Size() != numOfBlocks-10 { 300 t.Errorf("expected block task queue to be %d, got %d", numOfBlocks-10, q.blockTaskQueue.Size()) 301 } 302 if q.receiptTaskQueue.Size() != 0 { 303 t.Errorf("expected receipt task queue to be %d, got %d", 0, q.receiptTaskQueue.Size()) 304 } 305 if got, exp := q.stakingInfoTaskQueue.Size(), numOfStakingInfos; got != exp { 306 t.Fatalf("expected staking info task queue size %d, got %d", exp, got) 307 } 308 { 309 peer := dummyPeer("peer-3") 310 fetchReq, _, _ := q.ReserveReceipts(peer, 50) 311 312 // there should be nothing to fetch, blocks are empty 313 if fetchReq != nil { 314 t.Fatal("there should be no body fetch tasks remaining") 315 } 316 } 317 if q.blockTaskQueue.Size() != numOfBlocks-10 { 318 t.Errorf("expected block task queue to be %d, got %d", numOfBlocks-10, q.blockTaskQueue.Size()) 319 } 320 if q.receiptTaskQueue.Size() != 0 { 321 t.Errorf("expected receipt task queue to be %d, got %d", 0, q.receiptTaskQueue.Size()) 322 } 323 if got, exp := q.stakingInfoTaskQueue.Size(), numOfStakingInfos; got != exp { 324 t.Fatalf("expected staking info task queue size %d, got %d", exp, got) 325 } 326 { 327 // The staking info delivering peer should not be affected 328 // by the throttling of body deliveries 329 peer := dummyPeer("peer-4") 330 fetchReq, _, throttle := q.ReserveStakingInfos(peer, 50) 331 if !throttle { 332 // queue size is only 10, so throttling should occur 333 t.Fatal("should throttle") 334 } 335 // But we should still get the first things to fetch 336 if got, exp := len(fetchReq.Headers), 2; got != exp { 337 t.Fatalf("expected %d requests, got %d", exp, got) 338 } 339 if got, exp := fetchReq.Headers[0].Number.Uint64(), uint64(4); got != exp { 340 t.Fatalf("expected header %d, got %d", exp, got) 341 } 342 } 343 if q.blockTaskQueue.Size() != numOfBlocks-10 { 344 t.Errorf("expected block task queue to be %d, got %d", numOfBlocks-10, q.blockTaskQueue.Size()) 345 } 346 if q.receiptTaskQueue.Size() != 0 { 347 t.Errorf("expected receipt task queue to be %d, got %d", 0, q.receiptTaskQueue.Size()) 348 } 349 if got, exp := q.stakingInfoTaskQueue.Size(), numOfStakingInfos-2; got != exp { 350 t.Fatalf("expected staking info task queue size %d, got %d", exp, got) 351 } 352 if got, exp := q.resultCache.countCompleted(), 3; got != exp { 353 t.Errorf("wrong processable count, got %d, exp %d", got, exp) 354 } 355 } 356 357 // XTestDelivery does some more extensive testing of events that happen, 358 // blocks that become known and peers that make reservations and deliveries. 359 // disabled since it's not really a unit-test, but can be executed to test 360 // some more advanced scenarios 361 func XTestDelivery(t *testing.T) { 362 // the outside network, holding blocks 363 blo, rec := makeChain(128, 0, genesis, false) 364 world := newNetwork() 365 world.receipts = rec 366 world.chain = blo 367 world.progress(10) 368 if false { 369 log.Root().SetHandler(log.StdoutHandler) 370 } 371 q := newQueue(10, 10, uint64(istanbul.WeightedRandom)) 372 var wg sync.WaitGroup 373 q.Prepare(1, FastSync) 374 wg.Add(1) 375 go func() { 376 // deliver headers 377 defer wg.Done() 378 c := 1 379 for { 380 // fmt.Printf("getting headers from %d\n", c) 381 hdrs := world.headers(c) 382 l := len(hdrs) 383 // fmt.Printf("scheduling %d headers, first %d last %d\n", 384 // l, hdrs[0].Number.Uint64(), hdrs[len(hdrs)-1].Number.Uint64()) 385 q.Schedule(hdrs, uint64(c)) 386 c += l 387 } 388 }() 389 wg.Add(1) 390 go func() { 391 // collect results 392 defer wg.Done() 393 tot := 0 394 for { 395 res := q.Results(true) 396 tot += len(res) 397 fmt.Printf("got %d results, %d tot\n", len(res), tot) 398 // Now we can forget about these 399 world.forget(res[len(res)-1].Header.Number.Uint64()) 400 401 } 402 }() 403 wg.Add(1) 404 go func() { 405 defer wg.Done() 406 // reserve body fetch 407 i := 4 408 for { 409 peer := dummyPeer(fmt.Sprintf("peer-%d", i)) 410 f, _, _ := q.ReserveBodies(peer, rand.Intn(30)) 411 if f != nil { 412 var txs [][]*types.Transaction 413 numToSkip := rand.Intn(len(f.Headers)) 414 for _, hdr := range f.Headers[0 : len(f.Headers)-numToSkip] { 415 txs = append(txs, world.getTransactions(hdr.Number.Uint64())) 416 } 417 time.Sleep(100 * time.Millisecond) 418 _, err := q.DeliverBodies(peer.id, txs) 419 if err != nil { 420 fmt.Printf("delivered %d bodies %v\n", len(txs), err) 421 } 422 } else { 423 i++ 424 time.Sleep(200 * time.Millisecond) 425 } 426 } 427 }() 428 go func() { 429 defer wg.Done() 430 // reserve receiptfetch 431 peer := dummyPeer("peer-3") 432 for { 433 f, _, _ := q.ReserveReceipts(peer, rand.Intn(50)) 434 if f != nil { 435 var rcs [][]*types.Receipt 436 for _, hdr := range f.Headers { 437 rcs = append(rcs, world.getReceipts(hdr.Number.Uint64())) 438 } 439 _, err := q.DeliverReceipts(peer.id, rcs) 440 if err != nil { 441 fmt.Printf("delivered %d receipts %v\n", len(rcs), err) 442 } 443 time.Sleep(100 * time.Millisecond) 444 } else { 445 time.Sleep(200 * time.Millisecond) 446 } 447 } 448 }() 449 wg.Add(1) 450 go func() { 451 defer wg.Done() 452 for i := 0; i < 50; i++ { 453 time.Sleep(300 * time.Millisecond) 454 // world.tick() 455 // fmt.Printf("trying to progress\n") 456 world.progress(rand.Intn(100)) 457 } 458 for i := 0; i < 50; i++ { 459 time.Sleep(2990 * time.Millisecond) 460 } 461 }() 462 wg.Add(1) 463 go func() { 464 defer wg.Done() 465 for { 466 time.Sleep(990 * time.Millisecond) 467 fmt.Printf("world block tip is %d\n", 468 world.chain[len(world.chain)-1].Header().Number.Uint64()) 469 fmt.Println(q.Stats()) 470 } 471 }() 472 wg.Wait() 473 } 474 475 func newNetwork() *network { 476 var l sync.RWMutex 477 return &network{ 478 cond: sync.NewCond(&l), 479 offset: 1, // block 1 is at blocks[0] 480 } 481 } 482 483 // represents the network 484 type network struct { 485 offset int 486 chain []*types.Block 487 receipts []types.Receipts 488 lock sync.RWMutex 489 cond *sync.Cond 490 } 491 492 func (n *network) getTransactions(blocknum uint64) types.Transactions { 493 index := blocknum - uint64(n.offset) 494 return n.chain[index].Transactions() 495 } 496 497 func (n *network) getReceipts(blocknum uint64) types.Receipts { 498 index := blocknum - uint64(n.offset) 499 if got := n.chain[index].Header().Number.Uint64(); got != blocknum { 500 fmt.Printf("Err, got %d exp %d\n", got, blocknum) 501 panic("sd") 502 } 503 return n.receipts[index] 504 } 505 506 func (n *network) forget(blocknum uint64) { 507 index := blocknum - uint64(n.offset) 508 n.chain = n.chain[index:] 509 n.receipts = n.receipts[index:] 510 n.offset = int(blocknum) 511 } 512 513 func (n *network) progress(numBlocks int) { 514 n.lock.Lock() 515 defer n.lock.Unlock() 516 // fmt.Printf("progressing...\n") 517 newBlocks, newR := makeChain(numBlocks, 0, n.chain[len(n.chain)-1], false) 518 n.chain = append(n.chain, newBlocks...) 519 n.receipts = append(n.receipts, newR...) 520 n.cond.Broadcast() 521 } 522 523 func (n *network) headers(from int) []*types.Header { 524 numHeaders := 128 525 var hdrs []*types.Header 526 index := from - n.offset 527 528 for index >= len(n.chain) { 529 // wait for progress 530 n.cond.L.Lock() 531 // fmt.Printf("header going into wait\n") 532 n.cond.Wait() 533 index = from - n.offset 534 n.cond.L.Unlock() 535 } 536 n.lock.RLock() 537 defer n.lock.RUnlock() 538 for i, b := range n.chain[index:] { 539 hdrs = append(hdrs, b.Header()) 540 if i >= numHeaders { 541 break 542 } 543 } 544 return hdrs 545 }