github.com/digdeepmining/go-atheios@v1.5.13-0.20180902133602-d5687a2e6f43/eth/downloader/queue.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Contains the block download scheduler to collect download tasks and schedule 18 // them in an ordered, and throttled way. 19 20 package downloader 21 22 import ( 23 "errors" 24 "fmt" 25 "sync" 26 "time" 27 28 "github.com/atheioschain/go-atheios/common" 29 "github.com/atheioschain/go-atheios/core/state" 30 "github.com/atheioschain/go-atheios/core/types" 31 "github.com/atheioschain/go-atheios/crypto" 32 "github.com/atheioschain/go-atheios/ethdb" 33 "github.com/atheioschain/go-atheios/logger" 34 "github.com/atheioschain/go-atheios/logger/glog" 35 "github.com/atheioschain/go-atheios/trie" 36 "github.com/rcrowley/go-metrics" 37 "gopkg.in/karalabe/cookiejar.v2/collections/prque" 38 ) 39 40 var ( 41 blockCacheLimit = 8192 // Maximum number of blocks to cache before throttling the download 42 maxInFlightStates = 8192 // Maximum number of state downloads to allow concurrently 43 ) 44 45 var ( 46 errNoFetchesPending = errors.New("no fetches pending") 47 errStaleDelivery = errors.New("stale delivery") 48 ) 49 50 // fetchRequest is a currently running data retrieval operation. 51 type fetchRequest struct { 52 Peer *peer // Peer to which the request was sent 53 From uint64 // [eth/62] Requested chain element index (used for skeleton fills only) 54 Hashes map[common.Hash]int // [eth/61] Requested hashes with their insertion index (priority) 55 Headers []*types.Header // [eth/62] Requested headers, sorted by request order 56 Time time.Time // Time when the request was made 57 } 58 59 // fetchResult is a struct collecting partial results from data fetchers until 60 // all outstanding pieces complete and the result as a whole can be processed. 61 type fetchResult struct { 62 Pending int // Number of data fetches still pending 63 64 Header *types.Header 65 Uncles []*types.Header 66 Transactions types.Transactions 67 Receipts types.Receipts 68 } 69 70 // queue represents hashes that are either need fetching or are being fetched 71 type queue struct { 72 mode SyncMode // Synchronisation mode to decide on the block parts to schedule for fetching 73 fastSyncPivot uint64 // Block number where the fast sync pivots into archive synchronisation mode 74 75 headerHead common.Hash // [eth/62] Hash of the last queued header to verify order 76 77 // Headers are "special", they download in batches, supported by a skeleton chain 78 headerTaskPool map[uint64]*types.Header // [eth/62] Pending header retrieval tasks, mapping starting indexes to skeleton headers 79 headerTaskQueue *prque.Prque // [eth/62] Priority queue of the skeleton indexes to fetch the filling headers for 80 headerPeerMiss map[string]map[uint64]struct{} // [eth/62] Set of per-peer header batches known to be unavailable 81 headerPendPool map[string]*fetchRequest // [eth/62] Currently pending header retrieval operations 82 headerResults []*types.Header // [eth/62] Result cache accumulating the completed headers 83 headerProced int // [eth/62] Number of headers already processed from the results 84 headerOffset uint64 // [eth/62] Number of the first header in the result cache 85 headerContCh chan bool // [eth/62] Channel to notify when header download finishes 86 87 // All data retrievals below are based on an already assembles header chain 88 blockTaskPool map[common.Hash]*types.Header // [eth/62] Pending block (body) retrieval tasks, mapping hashes to headers 89 blockTaskQueue *prque.Prque // [eth/62] Priority queue of the headers to fetch the blocks (bodies) for 90 blockPendPool map[string]*fetchRequest // [eth/62] Currently pending block (body) retrieval operations 91 blockDonePool map[common.Hash]struct{} // [eth/62] Set of the completed block (body) fetches 92 93 receiptTaskPool map[common.Hash]*types.Header // [eth/63] Pending receipt retrieval tasks, mapping hashes to headers 94 receiptTaskQueue *prque.Prque // [eth/63] Priority queue of the headers to fetch the receipts for 95 receiptPendPool map[string]*fetchRequest // [eth/63] Currently pending receipt retrieval operations 96 receiptDonePool map[common.Hash]struct{} // [eth/63] Set of the completed receipt fetches 97 98 stateTaskIndex int // [eth/63] Counter indexing the added hashes to ensure prioritised retrieval order 99 stateTaskPool map[common.Hash]int // [eth/63] Pending node data retrieval tasks, mapping to their priority 100 stateTaskQueue *prque.Prque // [eth/63] Priority queue of the hashes to fetch the node data for 101 statePendPool map[string]*fetchRequest // [eth/63] Currently pending node data retrieval operations 102 103 stateDatabase ethdb.Database // [eth/63] Trie database to populate during state reassembly 104 stateScheduler *state.StateSync // [eth/63] State trie synchronisation scheduler and integrator 105 stateWriters int // [eth/63] Number of running state DB writer goroutines 106 107 resultCache []*fetchResult // Downloaded but not yet delivered fetch results 108 resultOffset uint64 // Offset of the first cached fetch result in the block chain 109 110 lock *sync.Mutex 111 active *sync.Cond 112 closed bool 113 } 114 115 // newQueue creates a new download queue for scheduling block retrieval. 116 func newQueue(stateDb ethdb.Database) *queue { 117 lock := new(sync.Mutex) 118 return &queue{ 119 headerPendPool: make(map[string]*fetchRequest), 120 headerContCh: make(chan bool), 121 blockTaskPool: make(map[common.Hash]*types.Header), 122 blockTaskQueue: prque.New(), 123 blockPendPool: make(map[string]*fetchRequest), 124 blockDonePool: make(map[common.Hash]struct{}), 125 receiptTaskPool: make(map[common.Hash]*types.Header), 126 receiptTaskQueue: prque.New(), 127 receiptPendPool: make(map[string]*fetchRequest), 128 receiptDonePool: make(map[common.Hash]struct{}), 129 stateTaskPool: make(map[common.Hash]int), 130 stateTaskQueue: prque.New(), 131 statePendPool: make(map[string]*fetchRequest), 132 stateDatabase: stateDb, 133 resultCache: make([]*fetchResult, blockCacheLimit), 134 active: sync.NewCond(lock), 135 lock: lock, 136 } 137 } 138 139 // Reset clears out the queue contents. 140 func (q *queue) Reset() { 141 q.lock.Lock() 142 defer q.lock.Unlock() 143 144 q.closed = false 145 q.mode = FullSync 146 q.fastSyncPivot = 0 147 148 q.headerHead = common.Hash{} 149 150 q.headerPendPool = make(map[string]*fetchRequest) 151 152 q.blockTaskPool = make(map[common.Hash]*types.Header) 153 q.blockTaskQueue.Reset() 154 q.blockPendPool = make(map[string]*fetchRequest) 155 q.blockDonePool = make(map[common.Hash]struct{}) 156 157 q.receiptTaskPool = make(map[common.Hash]*types.Header) 158 q.receiptTaskQueue.Reset() 159 q.receiptPendPool = make(map[string]*fetchRequest) 160 q.receiptDonePool = make(map[common.Hash]struct{}) 161 162 q.stateTaskIndex = 0 163 q.stateTaskPool = make(map[common.Hash]int) 164 q.stateTaskQueue.Reset() 165 q.statePendPool = make(map[string]*fetchRequest) 166 q.stateScheduler = nil 167 168 q.resultCache = make([]*fetchResult, blockCacheLimit) 169 q.resultOffset = 0 170 } 171 172 // Close marks the end of the sync, unblocking WaitResults. 173 // It may be called even if the queue is already closed. 174 func (q *queue) Close() { 175 q.lock.Lock() 176 q.closed = true 177 q.lock.Unlock() 178 q.active.Broadcast() 179 } 180 181 // PendingHeaders retrieves the number of header requests pending for retrieval. 182 func (q *queue) PendingHeaders() int { 183 q.lock.Lock() 184 defer q.lock.Unlock() 185 186 return q.headerTaskQueue.Size() 187 } 188 189 // PendingBlocks retrieves the number of block (body) requests pending for retrieval. 190 func (q *queue) PendingBlocks() int { 191 q.lock.Lock() 192 defer q.lock.Unlock() 193 194 return q.blockTaskQueue.Size() 195 } 196 197 // PendingReceipts retrieves the number of block receipts pending for retrieval. 198 func (q *queue) PendingReceipts() int { 199 q.lock.Lock() 200 defer q.lock.Unlock() 201 202 return q.receiptTaskQueue.Size() 203 } 204 205 // PendingNodeData retrieves the number of node data entries pending for retrieval. 206 func (q *queue) PendingNodeData() int { 207 q.lock.Lock() 208 defer q.lock.Unlock() 209 210 return q.pendingNodeDataLocked() 211 } 212 213 // pendingNodeDataLocked retrieves the number of node data entries pending for retrieval. 214 // The caller must hold q.lock. 215 func (q *queue) pendingNodeDataLocked() int { 216 var n int 217 if q.stateScheduler != nil { 218 n = q.stateScheduler.Pending() 219 } 220 // Ensure that PendingNodeData doesn't return 0 until all state is written. 221 if q.stateWriters > 0 { 222 n++ 223 } 224 return n 225 } 226 227 // InFlightHeaders retrieves whether there are header fetch requests currently 228 // in flight. 229 func (q *queue) InFlightHeaders() bool { 230 q.lock.Lock() 231 defer q.lock.Unlock() 232 233 return len(q.headerPendPool) > 0 234 } 235 236 // InFlightBlocks retrieves whether there are block fetch requests currently in 237 // flight. 238 func (q *queue) InFlightBlocks() bool { 239 q.lock.Lock() 240 defer q.lock.Unlock() 241 242 return len(q.blockPendPool) > 0 243 } 244 245 // InFlightReceipts retrieves whether there are receipt fetch requests currently 246 // in flight. 247 func (q *queue) InFlightReceipts() bool { 248 q.lock.Lock() 249 defer q.lock.Unlock() 250 251 return len(q.receiptPendPool) > 0 252 } 253 254 // InFlightNodeData retrieves whether there are node data entry fetch requests 255 // currently in flight. 256 func (q *queue) InFlightNodeData() bool { 257 q.lock.Lock() 258 defer q.lock.Unlock() 259 260 return len(q.statePendPool)+q.stateWriters > 0 261 } 262 263 // Idle returns if the queue is fully idle or has some data still inside. This 264 // method is used by the tester to detect termination events. 265 func (q *queue) Idle() bool { 266 q.lock.Lock() 267 defer q.lock.Unlock() 268 269 queued := q.blockTaskQueue.Size() + q.receiptTaskQueue.Size() + q.stateTaskQueue.Size() 270 pending := len(q.blockPendPool) + len(q.receiptPendPool) + len(q.statePendPool) 271 cached := len(q.blockDonePool) + len(q.receiptDonePool) 272 273 if q.stateScheduler != nil { 274 queued += q.stateScheduler.Pending() 275 } 276 return (queued + pending + cached) == 0 277 } 278 279 // FastSyncPivot retrieves the currently used fast sync pivot point. 280 func (q *queue) FastSyncPivot() uint64 { 281 q.lock.Lock() 282 defer q.lock.Unlock() 283 284 return q.fastSyncPivot 285 } 286 287 // ShouldThrottleBlocks checks if the download should be throttled (active block (body) 288 // fetches exceed block cache). 289 func (q *queue) ShouldThrottleBlocks() bool { 290 q.lock.Lock() 291 defer q.lock.Unlock() 292 293 // Calculate the currently in-flight block (body) requests 294 pending := 0 295 for _, request := range q.blockPendPool { 296 pending += len(request.Hashes) + len(request.Headers) 297 } 298 // Throttle if more blocks (bodies) are in-flight than free space in the cache 299 return pending >= len(q.resultCache)-len(q.blockDonePool) 300 } 301 302 // ShouldThrottleReceipts checks if the download should be throttled (active receipt 303 // fetches exceed block cache). 304 func (q *queue) ShouldThrottleReceipts() bool { 305 q.lock.Lock() 306 defer q.lock.Unlock() 307 308 // Calculate the currently in-flight receipt requests 309 pending := 0 310 for _, request := range q.receiptPendPool { 311 pending += len(request.Headers) 312 } 313 // Throttle if more receipts are in-flight than free space in the cache 314 return pending >= len(q.resultCache)-len(q.receiptDonePool) 315 } 316 317 // ScheduleSkeleton adds a batch of header retrieval tasks to the queue to fill 318 // up an already retrieved header skeleton. 319 func (q *queue) ScheduleSkeleton(from uint64, skeleton []*types.Header) { 320 q.lock.Lock() 321 defer q.lock.Unlock() 322 323 // No skeleton retrieval can be in progress, fail hard if so (huge implementation bug) 324 if q.headerResults != nil { 325 panic("skeleton assembly already in progress") 326 } 327 // Shedule all the header retrieval tasks for the skeleton assembly 328 q.headerTaskPool = make(map[uint64]*types.Header) 329 q.headerTaskQueue = prque.New() 330 q.headerPeerMiss = make(map[string]map[uint64]struct{}) // Reset availability to correct invalid chains 331 q.headerResults = make([]*types.Header, len(skeleton)*MaxHeaderFetch) 332 q.headerProced = 0 333 q.headerOffset = from 334 q.headerContCh = make(chan bool, 1) 335 336 for i, header := range skeleton { 337 index := from + uint64(i*MaxHeaderFetch) 338 339 q.headerTaskPool[index] = header 340 q.headerTaskQueue.Push(index, -float32(index)) 341 } 342 } 343 344 // RetrieveHeaders retrieves the header chain assemble based on the scheduled 345 // skeleton. 346 func (q *queue) RetrieveHeaders() ([]*types.Header, int) { 347 q.lock.Lock() 348 defer q.lock.Unlock() 349 350 headers, proced := q.headerResults, q.headerProced 351 q.headerResults, q.headerProced = nil, 0 352 353 return headers, proced 354 } 355 356 // Schedule adds a set of headers for the download queue for scheduling, returning 357 // the new headers encountered. 358 func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header { 359 q.lock.Lock() 360 defer q.lock.Unlock() 361 362 // Insert all the headers prioritised by the contained block number 363 inserts := make([]*types.Header, 0, len(headers)) 364 for _, header := range headers { 365 // Make sure chain order is honoured and preserved throughout 366 hash := header.Hash() 367 if header.Number == nil || header.Number.Uint64() != from { 368 glog.V(logger.Warn).Infof("Header #%v [%x…] broke chain ordering, expected %d", header.Number, hash[:4], from) 369 break 370 } 371 if q.headerHead != (common.Hash{}) && q.headerHead != header.ParentHash { 372 glog.V(logger.Warn).Infof("Header #%v [%x…] broke chain ancestry", header.Number, hash[:4]) 373 break 374 } 375 // Make sure no duplicate requests are executed 376 if _, ok := q.blockTaskPool[hash]; ok { 377 glog.V(logger.Warn).Infof("Header #%d [%x…] already scheduled for block fetch", header.Number.Uint64(), hash[:4]) 378 continue 379 } 380 if _, ok := q.receiptTaskPool[hash]; ok { 381 glog.V(logger.Warn).Infof("Header #%d [%x…] already scheduled for receipt fetch", header.Number.Uint64(), hash[:4]) 382 continue 383 } 384 // Queue the header for content retrieval 385 q.blockTaskPool[hash] = header 386 q.blockTaskQueue.Push(header, -float32(header.Number.Uint64())) 387 388 if q.mode == FastSync && header.Number.Uint64() <= q.fastSyncPivot { 389 // Fast phase of the fast sync, retrieve receipts too 390 q.receiptTaskPool[hash] = header 391 q.receiptTaskQueue.Push(header, -float32(header.Number.Uint64())) 392 } 393 if q.mode == FastSync && header.Number.Uint64() == q.fastSyncPivot { 394 // Pivoting point of the fast sync, switch the state retrieval to this 395 glog.V(logger.Debug).Infof("Switching state downloads to %d [%x…]", header.Number.Uint64(), header.Hash().Bytes()[:4]) 396 397 q.stateTaskIndex = 0 398 q.stateTaskPool = make(map[common.Hash]int) 399 q.stateTaskQueue.Reset() 400 for _, req := range q.statePendPool { 401 req.Hashes = make(map[common.Hash]int) // Make sure executing requests fail, but don't disappear 402 } 403 404 q.stateScheduler = state.NewStateSync(header.Root, q.stateDatabase) 405 } 406 inserts = append(inserts, header) 407 q.headerHead = hash 408 from++ 409 } 410 return inserts 411 } 412 413 // WaitResults retrieves and permanently removes a batch of fetch 414 // results from the cache. the result slice will be empty if the queue 415 // has been closed. 416 func (q *queue) WaitResults() []*fetchResult { 417 q.lock.Lock() 418 defer q.lock.Unlock() 419 420 nproc := q.countProcessableItems() 421 for nproc == 0 && !q.closed { 422 q.active.Wait() 423 nproc = q.countProcessableItems() 424 } 425 results := make([]*fetchResult, nproc) 426 copy(results, q.resultCache[:nproc]) 427 if len(results) > 0 { 428 // Mark results as done before dropping them from the cache. 429 for _, result := range results { 430 hash := result.Header.Hash() 431 delete(q.blockDonePool, hash) 432 delete(q.receiptDonePool, hash) 433 } 434 // Delete the results from the cache and clear the tail. 435 copy(q.resultCache, q.resultCache[nproc:]) 436 for i := len(q.resultCache) - nproc; i < len(q.resultCache); i++ { 437 q.resultCache[i] = nil 438 } 439 // Advance the expected block number of the first cache entry. 440 q.resultOffset += uint64(nproc) 441 } 442 return results 443 } 444 445 // countProcessableItems counts the processable items. 446 func (q *queue) countProcessableItems() int { 447 for i, result := range q.resultCache { 448 // Don't process incomplete or unavailable items. 449 if result == nil || result.Pending > 0 { 450 return i 451 } 452 // Special handling for the fast-sync pivot block: 453 if q.mode == FastSync { 454 bnum := result.Header.Number.Uint64() 455 if bnum == q.fastSyncPivot { 456 // If the state of the pivot block is not 457 // available yet, we cannot proceed and return 0. 458 // 459 // Stop before processing the pivot block to ensure that 460 // resultCache has space for fsHeaderForceVerify items. Not 461 // doing this could leave us unable to download the required 462 // amount of headers. 463 if i > 0 || len(q.stateTaskPool) > 0 || q.pendingNodeDataLocked() > 0 { 464 return i 465 } 466 for j := 0; j < fsHeaderForceVerify; j++ { 467 if i+j+1 >= len(q.resultCache) || q.resultCache[i+j+1] == nil { 468 return i 469 } 470 } 471 } 472 // If we're just the fast sync pivot, stop as well 473 // because the following batch needs different insertion. 474 // This simplifies handling the switchover in d.process. 475 if bnum == q.fastSyncPivot+1 && i > 0 { 476 return i 477 } 478 } 479 } 480 return len(q.resultCache) 481 } 482 483 // ReserveHeaders reserves a set of headers for the given peer, skipping any 484 // previously failed batches. 485 func (q *queue) ReserveHeaders(p *peer, count int) *fetchRequest { 486 q.lock.Lock() 487 defer q.lock.Unlock() 488 489 // Short circuit if the peer's already downloading something (sanity check to 490 // not corrupt state) 491 if _, ok := q.headerPendPool[p.id]; ok { 492 return nil 493 } 494 // Retrieve a batch of hashes, skipping previously failed ones 495 send, skip := uint64(0), []uint64{} 496 for send == 0 && !q.headerTaskQueue.Empty() { 497 from, _ := q.headerTaskQueue.Pop() 498 if q.headerPeerMiss[p.id] != nil { 499 if _, ok := q.headerPeerMiss[p.id][from.(uint64)]; ok { 500 skip = append(skip, from.(uint64)) 501 continue 502 } 503 } 504 send = from.(uint64) 505 } 506 // Merge all the skipped batches back 507 for _, from := range skip { 508 q.headerTaskQueue.Push(from, -float32(from)) 509 } 510 // Assemble and return the block download request 511 if send == 0 { 512 return nil 513 } 514 request := &fetchRequest{ 515 Peer: p, 516 From: send, 517 Time: time.Now(), 518 } 519 q.headerPendPool[p.id] = request 520 return request 521 } 522 523 // ReserveNodeData reserves a set of node data hashes for the given peer, skipping 524 // any previously failed download. 525 func (q *queue) ReserveNodeData(p *peer, count int) *fetchRequest { 526 // Create a task generator to fetch status-fetch tasks if all schedules ones are done 527 generator := func(max int) { 528 if q.stateScheduler != nil { 529 for _, hash := range q.stateScheduler.Missing(max) { 530 q.stateTaskPool[hash] = q.stateTaskIndex 531 q.stateTaskQueue.Push(hash, -float32(q.stateTaskIndex)) 532 q.stateTaskIndex++ 533 } 534 } 535 } 536 q.lock.Lock() 537 defer q.lock.Unlock() 538 539 return q.reserveHashes(p, count, q.stateTaskQueue, generator, q.statePendPool, maxInFlightStates) 540 } 541 542 // reserveHashes reserves a set of hashes for the given peer, skipping previously 543 // failed ones. 544 // 545 // Note, this method expects the queue lock to be already held for writing. The 546 // reason the lock is not obtained in here is because the parameters already need 547 // to access the queue, so they already need a lock anyway. 548 func (q *queue) reserveHashes(p *peer, count int, taskQueue *prque.Prque, taskGen func(int), pendPool map[string]*fetchRequest, maxPending int) *fetchRequest { 549 // Short circuit if the peer's already downloading something (sanity check to 550 // not corrupt state) 551 if _, ok := pendPool[p.id]; ok { 552 return nil 553 } 554 // Calculate an upper limit on the hashes we might fetch (i.e. throttling) 555 allowance := maxPending 556 if allowance > 0 { 557 for _, request := range pendPool { 558 allowance -= len(request.Hashes) 559 } 560 } 561 // If there's a task generator, ask it to fill our task queue 562 if taskGen != nil && taskQueue.Size() < allowance { 563 taskGen(allowance - taskQueue.Size()) 564 } 565 if taskQueue.Empty() { 566 return nil 567 } 568 // Retrieve a batch of hashes, skipping previously failed ones 569 send := make(map[common.Hash]int) 570 skip := make(map[common.Hash]int) 571 572 for proc := 0; (allowance == 0 || proc < allowance) && len(send) < count && !taskQueue.Empty(); proc++ { 573 hash, priority := taskQueue.Pop() 574 if p.Lacks(hash.(common.Hash)) { 575 skip[hash.(common.Hash)] = int(priority) 576 } else { 577 send[hash.(common.Hash)] = int(priority) 578 } 579 } 580 // Merge all the skipped hashes back 581 for hash, index := range skip { 582 taskQueue.Push(hash, float32(index)) 583 } 584 // Assemble and return the block download request 585 if len(send) == 0 { 586 return nil 587 } 588 request := &fetchRequest{ 589 Peer: p, 590 Hashes: send, 591 Time: time.Now(), 592 } 593 pendPool[p.id] = request 594 595 return request 596 } 597 598 // ReserveBodies reserves a set of body fetches for the given peer, skipping any 599 // previously failed downloads. Beside the next batch of needed fetches, it also 600 // returns a flag whether empty blocks were queued requiring processing. 601 func (q *queue) ReserveBodies(p *peer, count int) (*fetchRequest, bool, error) { 602 isNoop := func(header *types.Header) bool { 603 return header.TxHash == types.EmptyRootHash && header.UncleHash == types.EmptyUncleHash 604 } 605 q.lock.Lock() 606 defer q.lock.Unlock() 607 608 return q.reserveHeaders(p, count, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, q.blockDonePool, isNoop) 609 } 610 611 // ReserveReceipts reserves a set of receipt fetches for the given peer, skipping 612 // any previously failed downloads. Beside the next batch of needed fetches, it 613 // also returns a flag whether empty receipts were queued requiring importing. 614 func (q *queue) ReserveReceipts(p *peer, count int) (*fetchRequest, bool, error) { 615 isNoop := func(header *types.Header) bool { 616 return header.ReceiptHash == types.EmptyRootHash 617 } 618 q.lock.Lock() 619 defer q.lock.Unlock() 620 621 return q.reserveHeaders(p, count, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, q.receiptDonePool, isNoop) 622 } 623 624 // reserveHeaders reserves a set of data download operations for a given peer, 625 // skipping any previously failed ones. This method is a generic version used 626 // by the individual special reservation functions. 627 // 628 // Note, this method expects the queue lock to be already held for writing. The 629 // reason the lock is not obtained in here is because the parameters already need 630 // to access the queue, so they already need a lock anyway. 631 func (q *queue) reserveHeaders(p *peer, count int, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque, 632 pendPool map[string]*fetchRequest, donePool map[common.Hash]struct{}, isNoop func(*types.Header) bool) (*fetchRequest, bool, error) { 633 // Short circuit if the pool has been depleted, or if the peer's already 634 // downloading something (sanity check not to corrupt state) 635 if taskQueue.Empty() { 636 return nil, false, nil 637 } 638 if _, ok := pendPool[p.id]; ok { 639 return nil, false, nil 640 } 641 // Calculate an upper limit on the items we might fetch (i.e. throttling) 642 space := len(q.resultCache) - len(donePool) 643 for _, request := range pendPool { 644 space -= len(request.Headers) 645 } 646 // Retrieve a batch of tasks, skipping previously failed ones 647 send := make([]*types.Header, 0, count) 648 skip := make([]*types.Header, 0) 649 650 progress := false 651 for proc := 0; proc < space && len(send) < count && !taskQueue.Empty(); proc++ { 652 header := taskQueue.PopItem().(*types.Header) 653 654 // If we're the first to request this task, initialise the result container 655 index := int(header.Number.Int64() - int64(q.resultOffset)) 656 if index >= len(q.resultCache) || index < 0 { 657 common.Report("index allocation went beyond available resultCache space") 658 return nil, false, errInvalidChain 659 } 660 if q.resultCache[index] == nil { 661 components := 1 662 if q.mode == FastSync && header.Number.Uint64() <= q.fastSyncPivot { 663 components = 2 664 } 665 q.resultCache[index] = &fetchResult{ 666 Pending: components, 667 Header: header, 668 } 669 } 670 // If this fetch task is a noop, skip this fetch operation 671 if isNoop(header) { 672 donePool[header.Hash()] = struct{}{} 673 delete(taskPool, header.Hash()) 674 675 space, proc = space-1, proc-1 676 q.resultCache[index].Pending-- 677 progress = true 678 continue 679 } 680 // Otherwise unless the peer is known not to have the data, add to the retrieve list 681 if p.Lacks(header.Hash()) { 682 skip = append(skip, header) 683 } else { 684 send = append(send, header) 685 } 686 } 687 // Merge all the skipped headers back 688 for _, header := range skip { 689 taskQueue.Push(header, -float32(header.Number.Uint64())) 690 } 691 if progress { 692 // Wake WaitResults, resultCache was modified 693 q.active.Signal() 694 } 695 // Assemble and return the block download request 696 if len(send) == 0 { 697 return nil, progress, nil 698 } 699 request := &fetchRequest{ 700 Peer: p, 701 Headers: send, 702 Time: time.Now(), 703 } 704 pendPool[p.id] = request 705 706 return request, progress, nil 707 } 708 709 // CancelHeaders aborts a fetch request, returning all pending skeleton indexes to the queue. 710 func (q *queue) CancelHeaders(request *fetchRequest) { 711 q.cancel(request, q.headerTaskQueue, q.headerPendPool) 712 } 713 714 // CancelBodies aborts a body fetch request, returning all pending headers to the 715 // task queue. 716 func (q *queue) CancelBodies(request *fetchRequest) { 717 q.cancel(request, q.blockTaskQueue, q.blockPendPool) 718 } 719 720 // CancelReceipts aborts a body fetch request, returning all pending headers to 721 // the task queue. 722 func (q *queue) CancelReceipts(request *fetchRequest) { 723 q.cancel(request, q.receiptTaskQueue, q.receiptPendPool) 724 } 725 726 // CancelNodeData aborts a node state data fetch request, returning all pending 727 // hashes to the task queue. 728 func (q *queue) CancelNodeData(request *fetchRequest) { 729 q.cancel(request, q.stateTaskQueue, q.statePendPool) 730 } 731 732 // Cancel aborts a fetch request, returning all pending hashes to the task queue. 733 func (q *queue) cancel(request *fetchRequest, taskQueue *prque.Prque, pendPool map[string]*fetchRequest) { 734 q.lock.Lock() 735 defer q.lock.Unlock() 736 737 if request.From > 0 { 738 taskQueue.Push(request.From, -float32(request.From)) 739 } 740 for hash, index := range request.Hashes { 741 taskQueue.Push(hash, float32(index)) 742 } 743 for _, header := range request.Headers { 744 taskQueue.Push(header, -float32(header.Number.Uint64())) 745 } 746 delete(pendPool, request.Peer.id) 747 } 748 749 // Revoke cancels all pending requests belonging to a given peer. This method is 750 // meant to be called during a peer drop to quickly reassign owned data fetches 751 // to remaining nodes. 752 func (q *queue) Revoke(peerId string) { 753 q.lock.Lock() 754 defer q.lock.Unlock() 755 756 if request, ok := q.blockPendPool[peerId]; ok { 757 for _, header := range request.Headers { 758 q.blockTaskQueue.Push(header, -float32(header.Number.Uint64())) 759 } 760 delete(q.blockPendPool, peerId) 761 } 762 if request, ok := q.receiptPendPool[peerId]; ok { 763 for _, header := range request.Headers { 764 q.receiptTaskQueue.Push(header, -float32(header.Number.Uint64())) 765 } 766 delete(q.receiptPendPool, peerId) 767 } 768 if request, ok := q.statePendPool[peerId]; ok { 769 for hash, index := range request.Hashes { 770 q.stateTaskQueue.Push(hash, float32(index)) 771 } 772 delete(q.statePendPool, peerId) 773 } 774 } 775 776 // ExpireHeaders checks for in flight requests that exceeded a timeout allowance, 777 // canceling them and returning the responsible peers for penalisation. 778 func (q *queue) ExpireHeaders(timeout time.Duration) map[string]int { 779 q.lock.Lock() 780 defer q.lock.Unlock() 781 782 return q.expire(timeout, q.headerPendPool, q.headerTaskQueue, headerTimeoutMeter) 783 } 784 785 // ExpireBodies checks for in flight block body requests that exceeded a timeout 786 // allowance, canceling them and returning the responsible peers for penalisation. 787 func (q *queue) ExpireBodies(timeout time.Duration) map[string]int { 788 q.lock.Lock() 789 defer q.lock.Unlock() 790 791 return q.expire(timeout, q.blockPendPool, q.blockTaskQueue, bodyTimeoutMeter) 792 } 793 794 // ExpireReceipts checks for in flight receipt requests that exceeded a timeout 795 // allowance, canceling them and returning the responsible peers for penalisation. 796 func (q *queue) ExpireReceipts(timeout time.Duration) map[string]int { 797 q.lock.Lock() 798 defer q.lock.Unlock() 799 800 return q.expire(timeout, q.receiptPendPool, q.receiptTaskQueue, receiptTimeoutMeter) 801 } 802 803 // ExpireNodeData checks for in flight node data requests that exceeded a timeout 804 // allowance, canceling them and returning the responsible peers for penalisation. 805 func (q *queue) ExpireNodeData(timeout time.Duration) map[string]int { 806 q.lock.Lock() 807 defer q.lock.Unlock() 808 809 return q.expire(timeout, q.statePendPool, q.stateTaskQueue, stateTimeoutMeter) 810 } 811 812 // expire is the generic check that move expired tasks from a pending pool back 813 // into a task pool, returning all entities caught with expired tasks. 814 // 815 // Note, this method expects the queue lock to be already held. The 816 // reason the lock is not obtained in here is because the parameters already need 817 // to access the queue, so they already need a lock anyway. 818 func (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest, taskQueue *prque.Prque, timeoutMeter metrics.Meter) map[string]int { 819 // Iterate over the expired requests and return each to the queue 820 expiries := make(map[string]int) 821 for id, request := range pendPool { 822 if time.Since(request.Time) > timeout { 823 // Update the metrics with the timeout 824 timeoutMeter.Mark(1) 825 826 // Return any non satisfied requests to the pool 827 if request.From > 0 { 828 taskQueue.Push(request.From, -float32(request.From)) 829 } 830 for hash, index := range request.Hashes { 831 taskQueue.Push(hash, float32(index)) 832 } 833 for _, header := range request.Headers { 834 taskQueue.Push(header, -float32(header.Number.Uint64())) 835 } 836 // Add the peer to the expiry report along the the number of failed requests 837 expirations := len(request.Hashes) 838 if expirations < len(request.Headers) { 839 expirations = len(request.Headers) 840 } 841 expiries[id] = expirations 842 } 843 } 844 // Remove the expired requests from the pending pool 845 for id := range expiries { 846 delete(pendPool, id) 847 } 848 return expiries 849 } 850 851 // DeliverHeaders injects a header retrieval response into the header results 852 // cache. This method either accepts all headers it received, or none of them 853 // if they do not map correctly to the skeleton. 854 // 855 // If the headers are accepted, the method makes an attempt to deliver the set 856 // of ready headers to the processor to keep the pipeline full. However it will 857 // not block to prevent stalling other pending deliveries. 858 func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh chan []*types.Header) (int, error) { 859 q.lock.Lock() 860 defer q.lock.Unlock() 861 862 // Short circuit if the data was never requested 863 request := q.headerPendPool[id] 864 if request == nil { 865 return 0, errNoFetchesPending 866 } 867 headerReqTimer.UpdateSince(request.Time) 868 delete(q.headerPendPool, id) 869 870 // Ensure headers can be mapped onto the skeleton chain 871 target := q.headerTaskPool[request.From].Hash() 872 873 accepted := len(headers) == MaxHeaderFetch 874 if accepted { 875 if headers[0].Number.Uint64() != request.From { 876 glog.V(logger.Detail).Infof("Peer %s: first header #%v [%x…] broke chain ordering, expected %d", id, headers[0].Number, headers[0].Hash().Bytes()[:4], request.From) 877 accepted = false 878 } else if headers[len(headers)-1].Hash() != target { 879 glog.V(logger.Detail).Infof("Peer %s: last header #%v [%x…] broke skeleton structure, expected %x", id, headers[len(headers)-1].Number, headers[len(headers)-1].Hash().Bytes()[:4], target[:4]) 880 accepted = false 881 } 882 } 883 if accepted { 884 for i, header := range headers[1:] { 885 hash := header.Hash() 886 if want := request.From + 1 + uint64(i); header.Number.Uint64() != want { 887 glog.V(logger.Warn).Infof("Peer %s: header #%v [%x…] broke chain ordering, expected %d", id, header.Number, hash[:4], want) 888 accepted = false 889 break 890 } 891 if headers[i].Hash() != header.ParentHash { 892 glog.V(logger.Warn).Infof("Peer %s: header #%v [%x…] broke chain ancestry", id, header.Number, hash[:4]) 893 accepted = false 894 break 895 } 896 } 897 } 898 // If the batch of headers wasn't accepted, mark as unavailable 899 if !accepted { 900 glog.V(logger.Detail).Infof("Peer %s: skeleton filling from header #%d not accepted", id, request.From) 901 902 miss := q.headerPeerMiss[id] 903 if miss == nil { 904 q.headerPeerMiss[id] = make(map[uint64]struct{}) 905 miss = q.headerPeerMiss[id] 906 } 907 miss[request.From] = struct{}{} 908 909 q.headerTaskQueue.Push(request.From, -float32(request.From)) 910 return 0, errors.New("delivery not accepted") 911 } 912 // Clean up a successful fetch and try to deliver any sub-results 913 copy(q.headerResults[request.From-q.headerOffset:], headers) 914 delete(q.headerTaskPool, request.From) 915 916 ready := 0 917 for q.headerProced+ready < len(q.headerResults) && q.headerResults[q.headerProced+ready] != nil { 918 ready += MaxHeaderFetch 919 } 920 if ready > 0 { 921 // Headers are ready for delivery, gather them and push forward (non blocking) 922 process := make([]*types.Header, ready) 923 copy(process, q.headerResults[q.headerProced:q.headerProced+ready]) 924 925 select { 926 case headerProcCh <- process: 927 glog.V(logger.Detail).Infof("%s: pre-scheduled %d headers from #%v", id, len(process), process[0].Number) 928 q.headerProced += len(process) 929 default: 930 } 931 } 932 // Check for termination and return 933 if len(q.headerTaskPool) == 0 { 934 q.headerContCh <- false 935 } 936 return len(headers), nil 937 } 938 939 // DeliverBodies injects a block body retrieval response into the results queue. 940 // The method returns the number of blocks bodies accepted from the delivery and 941 // also wakes any threads waiting for data delivery. 942 func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, uncleLists [][]*types.Header) (int, error) { 943 q.lock.Lock() 944 defer q.lock.Unlock() 945 946 reconstruct := func(header *types.Header, index int, result *fetchResult) error { 947 if types.DeriveSha(types.Transactions(txLists[index])) != header.TxHash || types.CalcUncleHash(uncleLists[index]) != header.UncleHash { 948 return errInvalidBody 949 } 950 result.Transactions = txLists[index] 951 result.Uncles = uncleLists[index] 952 return nil 953 } 954 return q.deliver(id, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, q.blockDonePool, bodyReqTimer, len(txLists), reconstruct) 955 } 956 957 // DeliverReceipts injects a receipt retrieval response into the results queue. 958 // The method returns the number of transaction receipts accepted from the delivery 959 // and also wakes any threads waiting for data delivery. 960 func (q *queue) DeliverReceipts(id string, receiptList [][]*types.Receipt) (int, error) { 961 q.lock.Lock() 962 defer q.lock.Unlock() 963 964 reconstruct := func(header *types.Header, index int, result *fetchResult) error { 965 if types.DeriveSha(types.Receipts(receiptList[index])) != header.ReceiptHash { 966 return errInvalidReceipt 967 } 968 result.Receipts = receiptList[index] 969 return nil 970 } 971 return q.deliver(id, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, q.receiptDonePool, receiptReqTimer, len(receiptList), reconstruct) 972 } 973 974 // deliver injects a data retrieval response into the results queue. 975 // 976 // Note, this method expects the queue lock to be already held for writing. The 977 // reason the lock is not obtained in here is because the parameters already need 978 // to access the queue, so they already need a lock anyway. 979 func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque, 980 pendPool map[string]*fetchRequest, donePool map[common.Hash]struct{}, reqTimer metrics.Timer, 981 results int, reconstruct func(header *types.Header, index int, result *fetchResult) error) (int, error) { 982 983 // Short circuit if the data was never requested 984 request := pendPool[id] 985 if request == nil { 986 return 0, errNoFetchesPending 987 } 988 reqTimer.UpdateSince(request.Time) 989 delete(pendPool, id) 990 991 // If no data items were retrieved, mark them as unavailable for the origin peer 992 if results == 0 { 993 for _, header := range request.Headers { 994 request.Peer.MarkLacking(header.Hash()) 995 } 996 } 997 // Assemble each of the results with their headers and retrieved data parts 998 var ( 999 accepted int 1000 failure error 1001 useful bool 1002 ) 1003 for i, header := range request.Headers { 1004 // Short circuit assembly if no more fetch results are found 1005 if i >= results { 1006 break 1007 } 1008 // Reconstruct the next result if contents match up 1009 index := int(header.Number.Int64() - int64(q.resultOffset)) 1010 if index >= len(q.resultCache) || index < 0 || q.resultCache[index] == nil { 1011 failure = errInvalidChain 1012 break 1013 } 1014 if err := reconstruct(header, i, q.resultCache[index]); err != nil { 1015 failure = err 1016 break 1017 } 1018 donePool[header.Hash()] = struct{}{} 1019 q.resultCache[index].Pending-- 1020 useful = true 1021 accepted++ 1022 1023 // Clean up a successful fetch 1024 request.Headers[i] = nil 1025 delete(taskPool, header.Hash()) 1026 } 1027 // Return all failed or missing fetches to the queue 1028 for _, header := range request.Headers { 1029 if header != nil { 1030 taskQueue.Push(header, -float32(header.Number.Uint64())) 1031 } 1032 } 1033 // Wake up WaitResults 1034 if accepted > 0 { 1035 q.active.Signal() 1036 } 1037 // If none of the data was good, it's a stale delivery 1038 switch { 1039 case failure == nil || failure == errInvalidChain: 1040 return accepted, failure 1041 case useful: 1042 return accepted, fmt.Errorf("partial failure: %v", failure) 1043 default: 1044 return accepted, errStaleDelivery 1045 } 1046 } 1047 1048 // DeliverNodeData injects a node state data retrieval response into the queue. 1049 // The method returns the number of node state accepted from the delivery. 1050 func (q *queue) DeliverNodeData(id string, data [][]byte, callback func(int, bool, error)) (int, error) { 1051 q.lock.Lock() 1052 defer q.lock.Unlock() 1053 1054 // Short circuit if the data was never requested 1055 request := q.statePendPool[id] 1056 if request == nil { 1057 return 0, errNoFetchesPending 1058 } 1059 stateReqTimer.UpdateSince(request.Time) 1060 delete(q.statePendPool, id) 1061 1062 // If no data was retrieved, mark their hashes as unavailable for the origin peer 1063 if len(data) == 0 { 1064 for hash := range request.Hashes { 1065 request.Peer.MarkLacking(hash) 1066 } 1067 } 1068 // Iterate over the downloaded data and verify each of them 1069 errs := make([]error, 0) 1070 process := []trie.SyncResult{} 1071 for _, blob := range data { 1072 // Skip any state trie entries that were not requested 1073 hash := common.BytesToHash(crypto.Keccak256(blob)) 1074 if _, ok := request.Hashes[hash]; !ok { 1075 errs = append(errs, fmt.Errorf("non-requested state data %x", hash)) 1076 continue 1077 } 1078 // Inject the next state trie item into the processing queue 1079 process = append(process, trie.SyncResult{Hash: hash, Data: blob}) 1080 delete(request.Hashes, hash) 1081 delete(q.stateTaskPool, hash) 1082 } 1083 // Return all failed or missing fetches to the queue 1084 for hash, index := range request.Hashes { 1085 q.stateTaskQueue.Push(hash, float32(index)) 1086 } 1087 if q.stateScheduler == nil { 1088 return 0, errNoFetchesPending 1089 } 1090 1091 // Run valid nodes through the trie download scheduler. It writes completed nodes to a 1092 // batch, which is committed asynchronously. This may lead to over-fetches because the 1093 // scheduler treats everything as written after Process has returned, but it's 1094 // unlikely to be an issue in practice. 1095 batch := q.stateDatabase.NewBatch() 1096 progressed, nproc, procerr := q.stateScheduler.Process(process, batch) 1097 q.stateWriters += 1 1098 go func() { 1099 if procerr == nil { 1100 nproc = len(process) 1101 procerr = batch.Write() 1102 } 1103 // Return processing errors through the callback so the sync gets canceled. The 1104 // number of writers is decremented prior to the call so PendingNodeData will 1105 // return zero when the callback runs. 1106 q.lock.Lock() 1107 q.stateWriters -= 1 1108 q.lock.Unlock() 1109 callback(nproc, progressed, procerr) 1110 // Wake up WaitResults after the state has been written because it might be 1111 // waiting for completion of the pivot block's state download. 1112 q.active.Signal() 1113 }() 1114 1115 // If none of the data items were good, it's a stale delivery 1116 switch { 1117 case len(errs) == 0: 1118 return len(process), nil 1119 case len(errs) == len(request.Hashes): 1120 return len(process), errStaleDelivery 1121 default: 1122 return len(process), fmt.Errorf("multiple failures: %v", errs) 1123 } 1124 } 1125 1126 // Prepare configures the result cache to allow accepting and caching inbound 1127 // fetch results. 1128 func (q *queue) Prepare(offset uint64, mode SyncMode, pivot uint64, head *types.Header) { 1129 q.lock.Lock() 1130 defer q.lock.Unlock() 1131 1132 // Prepare the queue for sync results 1133 if q.resultOffset < offset { 1134 q.resultOffset = offset 1135 } 1136 q.fastSyncPivot = pivot 1137 q.mode = mode 1138 1139 // If long running fast sync, also start up a head stateretrieval immediately 1140 if mode == FastSync && pivot > 0 { 1141 q.stateScheduler = state.NewStateSync(head.Root, q.stateDatabase) 1142 } 1143 }