github.com/phillinzzz/newBsc@v1.1.6/eth/downloader/queue_test.go (about) 1 // Copyright 2019 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package downloader 18 19 import ( 20 "fmt" 21 "math/big" 22 "math/rand" 23 "sync" 24 "testing" 25 "time" 26 27 "github.com/phillinzzz/newBsc/common" 28 "github.com/phillinzzz/newBsc/consensus/ethash" 29 "github.com/phillinzzz/newBsc/core" 30 "github.com/phillinzzz/newBsc/core/rawdb" 31 "github.com/phillinzzz/newBsc/core/types" 32 "github.com/phillinzzz/newBsc/log" 33 "github.com/phillinzzz/newBsc/params" 34 ) 35 36 var ( 37 testdb = rawdb.NewMemoryDatabase() 38 genesis = core.GenesisBlockForTesting(testdb, testAddress, big.NewInt(1000000000)) 39 ) 40 41 // makeChain creates a chain of n blocks starting at and including parent. 42 // the returned hash chain is ordered head->parent. In addition, every 3rd block 43 // contains a transaction and every 5th an uncle to allow testing correct block 44 // reassembly. 45 func makeChain(n int, seed byte, parent *types.Block, empty bool) ([]*types.Block, []types.Receipts) { 46 blocks, receipts := core.GenerateChain(params.TestChainConfig, parent, ethash.NewFaker(), testdb, n, func(i int, block *core.BlockGen) { 47 block.SetCoinbase(common.Address{seed}) 48 // Add one tx to every secondblock 49 if !empty && i%2 == 0 { 50 signer := types.MakeSigner(params.TestChainConfig, block.Number()) 51 tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, nil, nil), signer, testKey) 52 if err != nil { 53 panic(err) 54 } 55 block.AddTx(tx) 56 } 57 }) 58 return blocks, receipts 59 } 60 61 type chainData struct { 62 blocks []*types.Block 63 offset int 64 } 65 66 var chain *chainData 67 var emptyChain *chainData 68 69 func init() { 70 // Create a chain of blocks to import 71 targetBlocks := 128 72 blocks, _ := makeChain(targetBlocks, 0, genesis, false) 73 chain = &chainData{blocks, 0} 74 75 blocks, _ = makeChain(targetBlocks, 0, genesis, true) 76 emptyChain = &chainData{blocks, 0} 77 } 78 79 func (chain *chainData) headers() []*types.Header { 80 hdrs := make([]*types.Header, len(chain.blocks)) 81 for i, b := range chain.blocks { 82 hdrs[i] = b.Header() 83 } 84 return hdrs 85 } 86 87 func (chain *chainData) Len() int { 88 return len(chain.blocks) 89 } 90 91 func dummyPeer(id string) *peerConnection { 92 p := &peerConnection{ 93 id: id, 94 lacking: make(map[common.Hash]struct{}), 95 } 96 return p 97 } 98 99 func TestBasics(t *testing.T) { 100 numOfBlocks := len(emptyChain.blocks) 101 numOfReceipts := len(emptyChain.blocks) / 2 102 103 q := newQueue(10, 10) 104 if !q.Idle() { 105 t.Errorf("new queue should be idle") 106 } 107 q.Prepare(1, FastSync) 108 if res := q.Results(false); len(res) != 0 { 109 t.Fatal("new queue should have 0 results") 110 } 111 112 // Schedule a batch of headers 113 q.Schedule(chain.headers(), 1) 114 if q.Idle() { 115 t.Errorf("queue should not be idle") 116 } 117 if got, exp := q.PendingBlocks(), chain.Len(); got != exp { 118 t.Errorf("wrong pending block count, got %d, exp %d", got, exp) 119 } 120 // Only non-empty receipts get added to task-queue 121 if got, exp := q.PendingReceipts(), 64; got != exp { 122 t.Errorf("wrong pending receipt count, got %d, exp %d", got, exp) 123 } 124 // Items are now queued for downloading, next step is that we tell the 125 // queue that a certain peer will deliver them for us 126 { 127 peer := dummyPeer("peer-1") 128 fetchReq, _, throttle := q.ReserveBodies(peer, 50) 129 if !throttle { 130 // queue size is only 10, so throttling should occur 131 t.Fatal("should throttle") 132 } 133 // But we should still get the first things to fetch 134 if got, exp := len(fetchReq.Headers), 5; got != exp { 135 t.Fatalf("expected %d requests, got %d", exp, got) 136 } 137 if got, exp := fetchReq.Headers[0].Number.Uint64(), uint64(1); got != exp { 138 t.Fatalf("expected header %d, got %d", exp, got) 139 } 140 } 141 if exp, got := q.blockTaskQueue.Size(), numOfBlocks-10; exp != got { 142 t.Errorf("expected block task queue to be %d, got %d", exp, got) 143 } 144 if exp, got := q.receiptTaskQueue.Size(), numOfReceipts; exp != got { 145 t.Errorf("expected receipt task queue to be %d, got %d", exp, got) 146 } 147 { 148 peer := dummyPeer("peer-2") 149 fetchReq, _, throttle := q.ReserveBodies(peer, 50) 150 151 // The second peer should hit throttling 152 if !throttle { 153 t.Fatalf("should not throttle") 154 } 155 // And not get any fetches at all, since it was throttled to begin with 156 if fetchReq != nil { 157 t.Fatalf("should have no fetches, got %d", len(fetchReq.Headers)) 158 } 159 } 160 if exp, got := q.blockTaskQueue.Size(), numOfBlocks-10; exp != got { 161 t.Errorf("expected block task queue to be %d, got %d", exp, got) 162 } 163 if exp, got := q.receiptTaskQueue.Size(), numOfReceipts; exp != got { 164 t.Errorf("expected receipt task queue to be %d, got %d", exp, got) 165 } 166 { 167 // The receipt delivering peer should not be affected 168 // by the throttling of body deliveries 169 peer := dummyPeer("peer-3") 170 fetchReq, _, throttle := q.ReserveReceipts(peer, 50) 171 if !throttle { 172 // queue size is only 10, so throttling should occur 173 t.Fatal("should throttle") 174 } 175 // But we should still get the first things to fetch 176 if got, exp := len(fetchReq.Headers), 5; got != exp { 177 t.Fatalf("expected %d requests, got %d", exp, got) 178 } 179 if got, exp := fetchReq.Headers[0].Number.Uint64(), uint64(1); got != exp { 180 t.Fatalf("expected header %d, got %d", exp, got) 181 } 182 183 } 184 if exp, got := q.blockTaskQueue.Size(), numOfBlocks-10; exp != got { 185 t.Errorf("expected block task queue to be %d, got %d", exp, got) 186 } 187 if exp, got := q.receiptTaskQueue.Size(), numOfReceipts-5; exp != got { 188 t.Errorf("expected receipt task queue to be %d, got %d", exp, got) 189 } 190 if got, exp := q.resultCache.countCompleted(), 0; got != exp { 191 t.Errorf("wrong processable count, got %d, exp %d", got, exp) 192 } 193 } 194 195 func TestEmptyBlocks(t *testing.T) { 196 numOfBlocks := len(emptyChain.blocks) 197 198 q := newQueue(10, 10) 199 200 q.Prepare(1, FastSync) 201 // Schedule a batch of headers 202 q.Schedule(emptyChain.headers(), 1) 203 if q.Idle() { 204 t.Errorf("queue should not be idle") 205 } 206 if got, exp := q.PendingBlocks(), len(emptyChain.blocks); got != exp { 207 t.Errorf("wrong pending block count, got %d, exp %d", got, exp) 208 } 209 if got, exp := q.PendingReceipts(), 0; got != exp { 210 t.Errorf("wrong pending receipt count, got %d, exp %d", got, exp) 211 } 212 // They won't be processable, because the fetchresults haven't been 213 // created yet 214 if got, exp := q.resultCache.countCompleted(), 0; got != exp { 215 t.Errorf("wrong processable count, got %d, exp %d", got, exp) 216 } 217 218 // Items are now queued for downloading, next step is that we tell the 219 // queue that a certain peer will deliver them for us 220 // That should trigger all of them to suddenly become 'done' 221 { 222 // Reserve blocks 223 peer := dummyPeer("peer-1") 224 fetchReq, _, _ := q.ReserveBodies(peer, 50) 225 226 // there should be nothing to fetch, blocks are empty 227 if fetchReq != nil { 228 t.Fatal("there should be no body fetch tasks remaining") 229 } 230 231 } 232 if q.blockTaskQueue.Size() != numOfBlocks-10 { 233 t.Errorf("expected block task queue to be %d, got %d", numOfBlocks-10, q.blockTaskQueue.Size()) 234 } 235 if q.receiptTaskQueue.Size() != 0 { 236 t.Errorf("expected receipt task queue to be %d, got %d", 0, q.receiptTaskQueue.Size()) 237 } 238 { 239 peer := dummyPeer("peer-3") 240 fetchReq, _, _ := q.ReserveReceipts(peer, 50) 241 242 // there should be nothing to fetch, blocks are empty 243 if fetchReq != nil { 244 t.Fatal("there should be no body fetch tasks remaining") 245 } 246 } 247 if q.blockTaskQueue.Size() != numOfBlocks-10 { 248 t.Errorf("expected block task queue to be %d, got %d", numOfBlocks-10, q.blockTaskQueue.Size()) 249 } 250 if q.receiptTaskQueue.Size() != 0 { 251 t.Errorf("expected receipt task queue to be %d, got %d", 0, q.receiptTaskQueue.Size()) 252 } 253 if got, exp := q.resultCache.countCompleted(), 10; got != exp { 254 t.Errorf("wrong processable count, got %d, exp %d", got, exp) 255 } 256 } 257 258 // XTestDelivery does some more extensive testing of events that happen, 259 // blocks that become known and peers that make reservations and deliveries. 260 // disabled since it's not really a unit-test, but can be executed to test 261 // some more advanced scenarios 262 func XTestDelivery(t *testing.T) { 263 // the outside network, holding blocks 264 blo, rec := makeChain(128, 0, genesis, false) 265 world := newNetwork() 266 world.receipts = rec 267 world.chain = blo 268 world.progress(10) 269 if false { 270 log.Root().SetHandler(log.StdoutHandler) 271 272 } 273 q := newQueue(10, 10) 274 var wg sync.WaitGroup 275 q.Prepare(1, FastSync) 276 wg.Add(1) 277 go func() { 278 // deliver headers 279 defer wg.Done() 280 c := 1 281 for { 282 //fmt.Printf("getting headers from %d\n", c) 283 hdrs := world.headers(c) 284 l := len(hdrs) 285 //fmt.Printf("scheduling %d headers, first %d last %d\n", 286 // l, hdrs[0].Number.Uint64(), hdrs[len(hdrs)-1].Number.Uint64()) 287 q.Schedule(hdrs, uint64(c)) 288 c += l 289 } 290 }() 291 wg.Add(1) 292 go func() { 293 // collect results 294 defer wg.Done() 295 tot := 0 296 for { 297 res := q.Results(true) 298 tot += len(res) 299 fmt.Printf("got %d results, %d tot\n", len(res), tot) 300 // Now we can forget about these 301 world.forget(res[len(res)-1].Header.Number.Uint64()) 302 303 } 304 }() 305 wg.Add(1) 306 go func() { 307 defer wg.Done() 308 // reserve body fetch 309 i := 4 310 for { 311 peer := dummyPeer(fmt.Sprintf("peer-%d", i)) 312 f, _, _ := q.ReserveBodies(peer, rand.Intn(30)) 313 if f != nil { 314 var emptyList []*types.Header 315 var txs [][]*types.Transaction 316 var uncles [][]*types.Header 317 numToSkip := rand.Intn(len(f.Headers)) 318 for _, hdr := range f.Headers[0 : len(f.Headers)-numToSkip] { 319 txs = append(txs, world.getTransactions(hdr.Number.Uint64())) 320 uncles = append(uncles, emptyList) 321 } 322 time.Sleep(100 * time.Millisecond) 323 _, err := q.DeliverBodies(peer.id, txs, uncles) 324 if err != nil { 325 fmt.Printf("delivered %d bodies %v\n", len(txs), err) 326 } 327 } else { 328 i++ 329 time.Sleep(200 * time.Millisecond) 330 } 331 } 332 }() 333 go func() { 334 defer wg.Done() 335 // reserve receiptfetch 336 peer := dummyPeer("peer-3") 337 for { 338 f, _, _ := q.ReserveReceipts(peer, rand.Intn(50)) 339 if f != nil { 340 var rcs [][]*types.Receipt 341 for _, hdr := range f.Headers { 342 rcs = append(rcs, world.getReceipts(hdr.Number.Uint64())) 343 } 344 _, err := q.DeliverReceipts(peer.id, rcs) 345 if err != nil { 346 fmt.Printf("delivered %d receipts %v\n", len(rcs), err) 347 } 348 time.Sleep(100 * time.Millisecond) 349 } else { 350 time.Sleep(200 * time.Millisecond) 351 } 352 } 353 }() 354 wg.Add(1) 355 go func() { 356 defer wg.Done() 357 for i := 0; i < 50; i++ { 358 time.Sleep(300 * time.Millisecond) 359 //world.tick() 360 //fmt.Printf("trying to progress\n") 361 world.progress(rand.Intn(100)) 362 } 363 for i := 0; i < 50; i++ { 364 time.Sleep(2990 * time.Millisecond) 365 366 } 367 }() 368 wg.Add(1) 369 go func() { 370 defer wg.Done() 371 for { 372 time.Sleep(990 * time.Millisecond) 373 fmt.Printf("world block tip is %d\n", 374 world.chain[len(world.chain)-1].Header().Number.Uint64()) 375 fmt.Println(q.Stats()) 376 } 377 }() 378 wg.Wait() 379 } 380 381 func newNetwork() *network { 382 var l sync.RWMutex 383 return &network{ 384 cond: sync.NewCond(&l), 385 offset: 1, // block 1 is at blocks[0] 386 } 387 } 388 389 // represents the network 390 type network struct { 391 offset int 392 chain []*types.Block 393 receipts []types.Receipts 394 lock sync.RWMutex 395 cond *sync.Cond 396 } 397 398 func (n *network) getTransactions(blocknum uint64) types.Transactions { 399 index := blocknum - uint64(n.offset) 400 return n.chain[index].Transactions() 401 } 402 func (n *network) getReceipts(blocknum uint64) types.Receipts { 403 index := blocknum - uint64(n.offset) 404 if got := n.chain[index].Header().Number.Uint64(); got != blocknum { 405 fmt.Printf("Err, got %d exp %d\n", got, blocknum) 406 panic("sd") 407 } 408 return n.receipts[index] 409 } 410 411 func (n *network) forget(blocknum uint64) { 412 index := blocknum - uint64(n.offset) 413 n.chain = n.chain[index:] 414 n.receipts = n.receipts[index:] 415 n.offset = int(blocknum) 416 417 } 418 func (n *network) progress(numBlocks int) { 419 420 n.lock.Lock() 421 defer n.lock.Unlock() 422 //fmt.Printf("progressing...\n") 423 newBlocks, newR := makeChain(numBlocks, 0, n.chain[len(n.chain)-1], false) 424 n.chain = append(n.chain, newBlocks...) 425 n.receipts = append(n.receipts, newR...) 426 n.cond.Broadcast() 427 428 } 429 430 func (n *network) headers(from int) []*types.Header { 431 numHeaders := 128 432 var hdrs []*types.Header 433 index := from - n.offset 434 435 for index >= len(n.chain) { 436 // wait for progress 437 n.cond.L.Lock() 438 //fmt.Printf("header going into wait\n") 439 n.cond.Wait() 440 index = from - n.offset 441 n.cond.L.Unlock() 442 } 443 n.lock.RLock() 444 defer n.lock.RUnlock() 445 for i, b := range n.chain[index:] { 446 hdrs = append(hdrs, b.Header()) 447 if i >= numHeaders { 448 break 449 } 450 } 451 return hdrs 452 }