github.com/murrekatt/go-ethereum@v1.5.8-0.20170123175102-fc52f2c007fb/eth/downloader/queue.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Contains the block download scheduler to collect download tasks and schedule 18 // them in an ordered, and throttled way. 19 20 package downloader 21 22 import ( 23 "errors" 24 "fmt" 25 "sync" 26 "sync/atomic" 27 "time" 28 29 "github.com/ethereum/go-ethereum/common" 30 "github.com/ethereum/go-ethereum/core/state" 31 "github.com/ethereum/go-ethereum/core/types" 32 "github.com/ethereum/go-ethereum/crypto" 33 "github.com/ethereum/go-ethereum/ethdb" 34 "github.com/ethereum/go-ethereum/logger" 35 "github.com/ethereum/go-ethereum/logger/glog" 36 "github.com/ethereum/go-ethereum/trie" 37 "github.com/rcrowley/go-metrics" 38 "gopkg.in/karalabe/cookiejar.v2/collections/prque" 39 ) 40 41 var ( 42 blockCacheLimit = 8192 // Maximum number of blocks to cache before throttling the download 43 maxInFlightStates = 8192 // Maximum number of state downloads to allow concurrently 44 ) 45 46 var ( 47 errNoFetchesPending = errors.New("no fetches pending") 48 errStaleDelivery = errors.New("stale delivery") 49 ) 50 51 // fetchRequest is a currently running data retrieval operation. 52 type fetchRequest struct { 53 Peer *peer // Peer to which the request was sent 54 From uint64 // [eth/62] Requested chain element index (used for skeleton fills only) 55 Hashes map[common.Hash]int // [eth/61] Requested hashes with their insertion index (priority) 56 Headers []*types.Header // [eth/62] Requested headers, sorted by request order 57 Time time.Time // Time when the request was made 58 } 59 60 // fetchResult is a struct collecting partial results from data fetchers until 61 // all outstanding pieces complete and the result as a whole can be processed. 62 type fetchResult struct { 63 Pending int // Number of data fetches still pending 64 65 Header *types.Header 66 Uncles []*types.Header 67 Transactions types.Transactions 68 Receipts types.Receipts 69 } 70 71 // queue represents hashes that are either need fetching or are being fetched 72 type queue struct { 73 mode SyncMode // Synchronisation mode to decide on the block parts to schedule for fetching 74 fastSyncPivot uint64 // Block number where the fast sync pivots into archive synchronisation mode 75 76 headerHead common.Hash // [eth/62] Hash of the last queued header to verify order 77 78 // Headers are "special", they download in batches, supported by a skeleton chain 79 headerTaskPool map[uint64]*types.Header // [eth/62] Pending header retrieval tasks, mapping starting indexes to skeleton headers 80 headerTaskQueue *prque.Prque // [eth/62] Priority queue of the skeleton indexes to fetch the filling headers for 81 headerPeerMiss map[string]map[uint64]struct{} // [eth/62] Set of per-peer header batches known to be unavailable 82 headerPendPool map[string]*fetchRequest // [eth/62] Currently pending header retrieval operations 83 headerResults []*types.Header // [eth/62] Result cache accumulating the completed headers 84 headerProced int // [eth/62] Number of headers already processed from the results 85 headerOffset uint64 // [eth/62] Number of the first header in the result cache 86 headerContCh chan bool // [eth/62] Channel to notify when header download finishes 87 88 // All data retrievals below are based on an already assembles header chain 89 blockTaskPool map[common.Hash]*types.Header // [eth/62] Pending block (body) retrieval tasks, mapping hashes to headers 90 blockTaskQueue *prque.Prque // [eth/62] Priority queue of the headers to fetch the blocks (bodies) for 91 blockPendPool map[string]*fetchRequest // [eth/62] Currently pending block (body) retrieval operations 92 blockDonePool map[common.Hash]struct{} // [eth/62] Set of the completed block (body) fetches 93 94 receiptTaskPool map[common.Hash]*types.Header // [eth/63] Pending receipt retrieval tasks, mapping hashes to headers 95 receiptTaskQueue *prque.Prque // [eth/63] Priority queue of the headers to fetch the receipts for 96 receiptPendPool map[string]*fetchRequest // [eth/63] Currently pending receipt retrieval operations 97 receiptDonePool map[common.Hash]struct{} // [eth/63] Set of the completed receipt fetches 98 99 stateTaskIndex int // [eth/63] Counter indexing the added hashes to ensure prioritised retrieval order 100 stateTaskPool map[common.Hash]int // [eth/63] Pending node data retrieval tasks, mapping to their priority 101 stateTaskQueue *prque.Prque // [eth/63] Priority queue of the hashes to fetch the node data for 102 statePendPool map[string]*fetchRequest // [eth/63] Currently pending node data retrieval operations 103 104 stateDatabase ethdb.Database // [eth/63] Trie database to populate during state reassembly 105 stateScheduler *state.StateSync // [eth/63] State trie synchronisation scheduler and integrator 106 stateProcessors int32 // [eth/63] Number of currently running state processors 107 stateSchedLock sync.RWMutex // [eth/63] Lock serialising access to the state scheduler 108 109 resultCache []*fetchResult // Downloaded but not yet delivered fetch results 110 resultOffset uint64 // Offset of the first cached fetch result in the block chain 111 112 lock *sync.Mutex 113 active *sync.Cond 114 closed bool 115 } 116 117 // newQueue creates a new download queue for scheduling block retrieval. 118 func newQueue(stateDb ethdb.Database) *queue { 119 lock := new(sync.Mutex) 120 return &queue{ 121 headerPendPool: make(map[string]*fetchRequest), 122 headerContCh: make(chan bool), 123 blockTaskPool: make(map[common.Hash]*types.Header), 124 blockTaskQueue: prque.New(), 125 blockPendPool: make(map[string]*fetchRequest), 126 blockDonePool: make(map[common.Hash]struct{}), 127 receiptTaskPool: make(map[common.Hash]*types.Header), 128 receiptTaskQueue: prque.New(), 129 receiptPendPool: make(map[string]*fetchRequest), 130 receiptDonePool: make(map[common.Hash]struct{}), 131 stateTaskPool: make(map[common.Hash]int), 132 stateTaskQueue: prque.New(), 133 statePendPool: make(map[string]*fetchRequest), 134 stateDatabase: stateDb, 135 resultCache: make([]*fetchResult, blockCacheLimit), 136 active: sync.NewCond(lock), 137 lock: lock, 138 } 139 } 140 141 // Reset clears out the queue contents. 142 func (q *queue) Reset() { 143 q.lock.Lock() 144 defer q.lock.Unlock() 145 146 q.stateSchedLock.Lock() 147 defer q.stateSchedLock.Unlock() 148 149 q.closed = false 150 q.mode = FullSync 151 q.fastSyncPivot = 0 152 153 q.headerHead = common.Hash{} 154 155 q.headerPendPool = make(map[string]*fetchRequest) 156 157 q.blockTaskPool = make(map[common.Hash]*types.Header) 158 q.blockTaskQueue.Reset() 159 q.blockPendPool = make(map[string]*fetchRequest) 160 q.blockDonePool = make(map[common.Hash]struct{}) 161 162 q.receiptTaskPool = make(map[common.Hash]*types.Header) 163 q.receiptTaskQueue.Reset() 164 q.receiptPendPool = make(map[string]*fetchRequest) 165 q.receiptDonePool = make(map[common.Hash]struct{}) 166 167 q.stateTaskIndex = 0 168 q.stateTaskPool = make(map[common.Hash]int) 169 q.stateTaskQueue.Reset() 170 q.statePendPool = make(map[string]*fetchRequest) 171 q.stateScheduler = nil 172 173 q.resultCache = make([]*fetchResult, blockCacheLimit) 174 q.resultOffset = 0 175 } 176 177 // Close marks the end of the sync, unblocking WaitResults. 178 // It may be called even if the queue is already closed. 179 func (q *queue) Close() { 180 q.lock.Lock() 181 q.closed = true 182 q.lock.Unlock() 183 q.active.Broadcast() 184 } 185 186 // PendingHeaders retrieves the number of header requests pending for retrieval. 187 func (q *queue) PendingHeaders() int { 188 q.lock.Lock() 189 defer q.lock.Unlock() 190 191 return q.headerTaskQueue.Size() 192 } 193 194 // PendingBlocks retrieves the number of block (body) requests pending for retrieval. 195 func (q *queue) PendingBlocks() int { 196 q.lock.Lock() 197 defer q.lock.Unlock() 198 199 return q.blockTaskQueue.Size() 200 } 201 202 // PendingReceipts retrieves the number of block receipts pending for retrieval. 203 func (q *queue) PendingReceipts() int { 204 q.lock.Lock() 205 defer q.lock.Unlock() 206 207 return q.receiptTaskQueue.Size() 208 } 209 210 // PendingNodeData retrieves the number of node data entries pending for retrieval. 211 func (q *queue) PendingNodeData() int { 212 q.stateSchedLock.RLock() 213 defer q.stateSchedLock.RUnlock() 214 215 if q.stateScheduler != nil { 216 return q.stateScheduler.Pending() 217 } 218 return 0 219 } 220 221 // InFlightHeaders retrieves whether there are header fetch requests currently 222 // in flight. 223 func (q *queue) InFlightHeaders() bool { 224 q.lock.Lock() 225 defer q.lock.Unlock() 226 227 return len(q.headerPendPool) > 0 228 } 229 230 // InFlightBlocks retrieves whether there are block fetch requests currently in 231 // flight. 232 func (q *queue) InFlightBlocks() bool { 233 q.lock.Lock() 234 defer q.lock.Unlock() 235 236 return len(q.blockPendPool) > 0 237 } 238 239 // InFlightReceipts retrieves whether there are receipt fetch requests currently 240 // in flight. 241 func (q *queue) InFlightReceipts() bool { 242 q.lock.Lock() 243 defer q.lock.Unlock() 244 245 return len(q.receiptPendPool) > 0 246 } 247 248 // InFlightNodeData retrieves whether there are node data entry fetch requests 249 // currently in flight. 250 func (q *queue) InFlightNodeData() bool { 251 q.lock.Lock() 252 defer q.lock.Unlock() 253 254 return len(q.statePendPool)+int(atomic.LoadInt32(&q.stateProcessors)) > 0 255 } 256 257 // Idle returns if the queue is fully idle or has some data still inside. This 258 // method is used by the tester to detect termination events. 259 func (q *queue) Idle() bool { 260 q.lock.Lock() 261 defer q.lock.Unlock() 262 263 queued := q.blockTaskQueue.Size() + q.receiptTaskQueue.Size() + q.stateTaskQueue.Size() 264 pending := len(q.blockPendPool) + len(q.receiptPendPool) + len(q.statePendPool) 265 cached := len(q.blockDonePool) + len(q.receiptDonePool) 266 267 q.stateSchedLock.RLock() 268 if q.stateScheduler != nil { 269 queued += q.stateScheduler.Pending() 270 } 271 q.stateSchedLock.RUnlock() 272 273 return (queued + pending + cached) == 0 274 } 275 276 // FastSyncPivot retrieves the currently used fast sync pivot point. 277 func (q *queue) FastSyncPivot() uint64 { 278 q.lock.Lock() 279 defer q.lock.Unlock() 280 281 return q.fastSyncPivot 282 } 283 284 // ShouldThrottleBlocks checks if the download should be throttled (active block (body) 285 // fetches exceed block cache). 286 func (q *queue) ShouldThrottleBlocks() bool { 287 q.lock.Lock() 288 defer q.lock.Unlock() 289 290 // Calculate the currently in-flight block (body) requests 291 pending := 0 292 for _, request := range q.blockPendPool { 293 pending += len(request.Hashes) + len(request.Headers) 294 } 295 // Throttle if more blocks (bodies) are in-flight than free space in the cache 296 return pending >= len(q.resultCache)-len(q.blockDonePool) 297 } 298 299 // ShouldThrottleReceipts checks if the download should be throttled (active receipt 300 // fetches exceed block cache). 301 func (q *queue) ShouldThrottleReceipts() bool { 302 q.lock.Lock() 303 defer q.lock.Unlock() 304 305 // Calculate the currently in-flight receipt requests 306 pending := 0 307 for _, request := range q.receiptPendPool { 308 pending += len(request.Headers) 309 } 310 // Throttle if more receipts are in-flight than free space in the cache 311 return pending >= len(q.resultCache)-len(q.receiptDonePool) 312 } 313 314 // ScheduleSkeleton adds a batch of header retrieval tasks to the queue to fill 315 // up an already retrieved header skeleton. 316 func (q *queue) ScheduleSkeleton(from uint64, skeleton []*types.Header) { 317 q.lock.Lock() 318 defer q.lock.Unlock() 319 320 // No skeleton retrieval can be in progress, fail hard if so (huge implementation bug) 321 if q.headerResults != nil { 322 panic("skeleton assembly already in progress") 323 } 324 // Shedule all the header retrieval tasks for the skeleton assembly 325 q.headerTaskPool = make(map[uint64]*types.Header) 326 q.headerTaskQueue = prque.New() 327 q.headerPeerMiss = make(map[string]map[uint64]struct{}) // Reset availability to correct invalid chains 328 q.headerResults = make([]*types.Header, len(skeleton)*MaxHeaderFetch) 329 q.headerProced = 0 330 q.headerOffset = from 331 q.headerContCh = make(chan bool, 1) 332 333 for i, header := range skeleton { 334 index := from + uint64(i*MaxHeaderFetch) 335 336 q.headerTaskPool[index] = header 337 q.headerTaskQueue.Push(index, -float32(index)) 338 } 339 } 340 341 // RetrieveHeaders retrieves the header chain assemble based on the scheduled 342 // skeleton. 343 func (q *queue) RetrieveHeaders() ([]*types.Header, int) { 344 q.lock.Lock() 345 defer q.lock.Unlock() 346 347 headers, proced := q.headerResults, q.headerProced 348 q.headerResults, q.headerProced = nil, 0 349 350 return headers, proced 351 } 352 353 // Schedule adds a set of headers for the download queue for scheduling, returning 354 // the new headers encountered. 355 func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header { 356 q.lock.Lock() 357 defer q.lock.Unlock() 358 359 // Insert all the headers prioritised by the contained block number 360 inserts := make([]*types.Header, 0, len(headers)) 361 for _, header := range headers { 362 // Make sure chain order is honoured and preserved throughout 363 hash := header.Hash() 364 if header.Number == nil || header.Number.Uint64() != from { 365 glog.V(logger.Warn).Infof("Header #%v [%x…] broke chain ordering, expected %d", header.Number, hash[:4], from) 366 break 367 } 368 if q.headerHead != (common.Hash{}) && q.headerHead != header.ParentHash { 369 glog.V(logger.Warn).Infof("Header #%v [%x…] broke chain ancestry", header.Number, hash[:4]) 370 break 371 } 372 // Make sure no duplicate requests are executed 373 if _, ok := q.blockTaskPool[hash]; ok { 374 glog.V(logger.Warn).Infof("Header #%d [%x…] already scheduled for block fetch", header.Number.Uint64(), hash[:4]) 375 continue 376 } 377 if _, ok := q.receiptTaskPool[hash]; ok { 378 glog.V(logger.Warn).Infof("Header #%d [%x…] already scheduled for receipt fetch", header.Number.Uint64(), hash[:4]) 379 continue 380 } 381 // Queue the header for content retrieval 382 q.blockTaskPool[hash] = header 383 q.blockTaskQueue.Push(header, -float32(header.Number.Uint64())) 384 385 if q.mode == FastSync && header.Number.Uint64() <= q.fastSyncPivot { 386 // Fast phase of the fast sync, retrieve receipts too 387 q.receiptTaskPool[hash] = header 388 q.receiptTaskQueue.Push(header, -float32(header.Number.Uint64())) 389 } 390 if q.mode == FastSync && header.Number.Uint64() == q.fastSyncPivot { 391 // Pivoting point of the fast sync, switch the state retrieval to this 392 glog.V(logger.Debug).Infof("Switching state downloads to %d [%x…]", header.Number.Uint64(), header.Hash().Bytes()[:4]) 393 394 q.stateTaskIndex = 0 395 q.stateTaskPool = make(map[common.Hash]int) 396 q.stateTaskQueue.Reset() 397 for _, req := range q.statePendPool { 398 req.Hashes = make(map[common.Hash]int) // Make sure executing requests fail, but don't disappear 399 } 400 401 q.stateSchedLock.Lock() 402 q.stateScheduler = state.NewStateSync(header.Root, q.stateDatabase) 403 q.stateSchedLock.Unlock() 404 } 405 inserts = append(inserts, header) 406 q.headerHead = hash 407 from++ 408 } 409 return inserts 410 } 411 412 // WaitResults retrieves and permanently removes a batch of fetch 413 // results from the cache. the result slice will be empty if the queue 414 // has been closed. 415 func (q *queue) WaitResults() []*fetchResult { 416 q.lock.Lock() 417 defer q.lock.Unlock() 418 419 nproc := q.countProcessableItems() 420 for nproc == 0 && !q.closed { 421 q.active.Wait() 422 nproc = q.countProcessableItems() 423 } 424 results := make([]*fetchResult, nproc) 425 copy(results, q.resultCache[:nproc]) 426 if len(results) > 0 { 427 // Mark results as done before dropping them from the cache. 428 for _, result := range results { 429 hash := result.Header.Hash() 430 delete(q.blockDonePool, hash) 431 delete(q.receiptDonePool, hash) 432 } 433 // Delete the results from the cache and clear the tail. 434 copy(q.resultCache, q.resultCache[nproc:]) 435 for i := len(q.resultCache) - nproc; i < len(q.resultCache); i++ { 436 q.resultCache[i] = nil 437 } 438 // Advance the expected block number of the first cache entry. 439 q.resultOffset += uint64(nproc) 440 } 441 return results 442 } 443 444 // countProcessableItems counts the processable items. 445 func (q *queue) countProcessableItems() int { 446 for i, result := range q.resultCache { 447 // Don't process incomplete or unavailable items. 448 if result == nil || result.Pending > 0 { 449 return i 450 } 451 // Special handling for the fast-sync pivot block: 452 if q.mode == FastSync { 453 bnum := result.Header.Number.Uint64() 454 if bnum == q.fastSyncPivot { 455 // If the state of the pivot block is not 456 // available yet, we cannot proceed and return 0. 457 // 458 // Stop before processing the pivot block to ensure that 459 // resultCache has space for fsHeaderForceVerify items. Not 460 // doing this could leave us unable to download the required 461 // amount of headers. 462 if i > 0 || len(q.stateTaskPool) > 0 || q.PendingNodeData() > 0 { 463 return i 464 } 465 for j := 0; j < fsHeaderForceVerify; j++ { 466 if i+j+1 >= len(q.resultCache) || q.resultCache[i+j+1] == nil { 467 return i 468 } 469 } 470 } 471 // If we're just the fast sync pivot, stop as well 472 // because the following batch needs different insertion. 473 // This simplifies handling the switchover in d.process. 474 if bnum == q.fastSyncPivot+1 && i > 0 { 475 return i 476 } 477 } 478 } 479 return len(q.resultCache) 480 } 481 482 // ReserveHeaders reserves a set of headers for the given peer, skipping any 483 // previously failed batches. 484 func (q *queue) ReserveHeaders(p *peer, count int) *fetchRequest { 485 q.lock.Lock() 486 defer q.lock.Unlock() 487 488 // Short circuit if the peer's already downloading something (sanity check to 489 // not corrupt state) 490 if _, ok := q.headerPendPool[p.id]; ok { 491 return nil 492 } 493 // Retrieve a batch of hashes, skipping previously failed ones 494 send, skip := uint64(0), []uint64{} 495 for send == 0 && !q.headerTaskQueue.Empty() { 496 from, _ := q.headerTaskQueue.Pop() 497 if q.headerPeerMiss[p.id] != nil { 498 if _, ok := q.headerPeerMiss[p.id][from.(uint64)]; ok { 499 skip = append(skip, from.(uint64)) 500 continue 501 } 502 } 503 send = from.(uint64) 504 } 505 // Merge all the skipped batches back 506 for _, from := range skip { 507 q.headerTaskQueue.Push(from, -float32(from)) 508 } 509 // Assemble and return the block download request 510 if send == 0 { 511 return nil 512 } 513 request := &fetchRequest{ 514 Peer: p, 515 From: send, 516 Time: time.Now(), 517 } 518 q.headerPendPool[p.id] = request 519 return request 520 } 521 522 // ReserveNodeData reserves a set of node data hashes for the given peer, skipping 523 // any previously failed download. 524 func (q *queue) ReserveNodeData(p *peer, count int) *fetchRequest { 525 // Create a task generator to fetch status-fetch tasks if all schedules ones are done 526 generator := func(max int) { 527 q.stateSchedLock.Lock() 528 defer q.stateSchedLock.Unlock() 529 530 if q.stateScheduler != nil { 531 for _, hash := range q.stateScheduler.Missing(max) { 532 q.stateTaskPool[hash] = q.stateTaskIndex 533 q.stateTaskQueue.Push(hash, -float32(q.stateTaskIndex)) 534 q.stateTaskIndex++ 535 } 536 } 537 } 538 q.lock.Lock() 539 defer q.lock.Unlock() 540 541 return q.reserveHashes(p, count, q.stateTaskQueue, generator, q.statePendPool, maxInFlightStates) 542 } 543 544 // reserveHashes reserves a set of hashes for the given peer, skipping previously 545 // failed ones. 546 // 547 // Note, this method expects the queue lock to be already held for writing. The 548 // reason the lock is not obtained in here is because the parameters already need 549 // to access the queue, so they already need a lock anyway. 550 func (q *queue) reserveHashes(p *peer, count int, taskQueue *prque.Prque, taskGen func(int), pendPool map[string]*fetchRequest, maxPending int) *fetchRequest { 551 // Short circuit if the peer's already downloading something (sanity check to 552 // not corrupt state) 553 if _, ok := pendPool[p.id]; ok { 554 return nil 555 } 556 // Calculate an upper limit on the hashes we might fetch (i.e. throttling) 557 allowance := maxPending 558 if allowance > 0 { 559 for _, request := range pendPool { 560 allowance -= len(request.Hashes) 561 } 562 } 563 // If there's a task generator, ask it to fill our task queue 564 if taskGen != nil && taskQueue.Size() < allowance { 565 taskGen(allowance - taskQueue.Size()) 566 } 567 if taskQueue.Empty() { 568 return nil 569 } 570 // Retrieve a batch of hashes, skipping previously failed ones 571 send := make(map[common.Hash]int) 572 skip := make(map[common.Hash]int) 573 574 for proc := 0; (allowance == 0 || proc < allowance) && len(send) < count && !taskQueue.Empty(); proc++ { 575 hash, priority := taskQueue.Pop() 576 if p.Lacks(hash.(common.Hash)) { 577 skip[hash.(common.Hash)] = int(priority) 578 } else { 579 send[hash.(common.Hash)] = int(priority) 580 } 581 } 582 // Merge all the skipped hashes back 583 for hash, index := range skip { 584 taskQueue.Push(hash, float32(index)) 585 } 586 // Assemble and return the block download request 587 if len(send) == 0 { 588 return nil 589 } 590 request := &fetchRequest{ 591 Peer: p, 592 Hashes: send, 593 Time: time.Now(), 594 } 595 pendPool[p.id] = request 596 597 return request 598 } 599 600 // ReserveBodies reserves a set of body fetches for the given peer, skipping any 601 // previously failed downloads. Beside the next batch of needed fetches, it also 602 // returns a flag whether empty blocks were queued requiring processing. 603 func (q *queue) ReserveBodies(p *peer, count int) (*fetchRequest, bool, error) { 604 isNoop := func(header *types.Header) bool { 605 return header.TxHash == types.EmptyRootHash && header.UncleHash == types.EmptyUncleHash 606 } 607 q.lock.Lock() 608 defer q.lock.Unlock() 609 610 return q.reserveHeaders(p, count, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, q.blockDonePool, isNoop) 611 } 612 613 // ReserveReceipts reserves a set of receipt fetches for the given peer, skipping 614 // any previously failed downloads. Beside the next batch of needed fetches, it 615 // also returns a flag whether empty receipts were queued requiring importing. 616 func (q *queue) ReserveReceipts(p *peer, count int) (*fetchRequest, bool, error) { 617 isNoop := func(header *types.Header) bool { 618 return header.ReceiptHash == types.EmptyRootHash 619 } 620 q.lock.Lock() 621 defer q.lock.Unlock() 622 623 return q.reserveHeaders(p, count, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, q.receiptDonePool, isNoop) 624 } 625 626 // reserveHeaders reserves a set of data download operations for a given peer, 627 // skipping any previously failed ones. This method is a generic version used 628 // by the individual special reservation functions. 629 // 630 // Note, this method expects the queue lock to be already held for writing. The 631 // reason the lock is not obtained in here is because the parameters already need 632 // to access the queue, so they already need a lock anyway. 633 func (q *queue) reserveHeaders(p *peer, count int, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque, 634 pendPool map[string]*fetchRequest, donePool map[common.Hash]struct{}, isNoop func(*types.Header) bool) (*fetchRequest, bool, error) { 635 // Short circuit if the pool has been depleted, or if the peer's already 636 // downloading something (sanity check not to corrupt state) 637 if taskQueue.Empty() { 638 return nil, false, nil 639 } 640 if _, ok := pendPool[p.id]; ok { 641 return nil, false, nil 642 } 643 // Calculate an upper limit on the items we might fetch (i.e. throttling) 644 space := len(q.resultCache) - len(donePool) 645 for _, request := range pendPool { 646 space -= len(request.Headers) 647 } 648 // Retrieve a batch of tasks, skipping previously failed ones 649 send := make([]*types.Header, 0, count) 650 skip := make([]*types.Header, 0) 651 652 progress := false 653 for proc := 0; proc < space && len(send) < count && !taskQueue.Empty(); proc++ { 654 header := taskQueue.PopItem().(*types.Header) 655 656 // If we're the first to request this task, initialise the result container 657 index := int(header.Number.Int64() - int64(q.resultOffset)) 658 if index >= len(q.resultCache) || index < 0 { 659 common.Report("index allocation went beyond available resultCache space") 660 return nil, false, errInvalidChain 661 } 662 if q.resultCache[index] == nil { 663 components := 1 664 if q.mode == FastSync && header.Number.Uint64() <= q.fastSyncPivot { 665 components = 2 666 } 667 q.resultCache[index] = &fetchResult{ 668 Pending: components, 669 Header: header, 670 } 671 } 672 // If this fetch task is a noop, skip this fetch operation 673 if isNoop(header) { 674 donePool[header.Hash()] = struct{}{} 675 delete(taskPool, header.Hash()) 676 677 space, proc = space-1, proc-1 678 q.resultCache[index].Pending-- 679 progress = true 680 continue 681 } 682 // Otherwise unless the peer is known not to have the data, add to the retrieve list 683 if p.Lacks(header.Hash()) { 684 skip = append(skip, header) 685 } else { 686 send = append(send, header) 687 } 688 } 689 // Merge all the skipped headers back 690 for _, header := range skip { 691 taskQueue.Push(header, -float32(header.Number.Uint64())) 692 } 693 if progress { 694 // Wake WaitResults, resultCache was modified 695 q.active.Signal() 696 } 697 // Assemble and return the block download request 698 if len(send) == 0 { 699 return nil, progress, nil 700 } 701 request := &fetchRequest{ 702 Peer: p, 703 Headers: send, 704 Time: time.Now(), 705 } 706 pendPool[p.id] = request 707 708 return request, progress, nil 709 } 710 711 // CancelHeaders aborts a fetch request, returning all pending skeleton indexes to the queue. 712 func (q *queue) CancelHeaders(request *fetchRequest) { 713 q.cancel(request, q.headerTaskQueue, q.headerPendPool) 714 } 715 716 // CancelBodies aborts a body fetch request, returning all pending headers to the 717 // task queue. 718 func (q *queue) CancelBodies(request *fetchRequest) { 719 q.cancel(request, q.blockTaskQueue, q.blockPendPool) 720 } 721 722 // CancelReceipts aborts a body fetch request, returning all pending headers to 723 // the task queue. 724 func (q *queue) CancelReceipts(request *fetchRequest) { 725 q.cancel(request, q.receiptTaskQueue, q.receiptPendPool) 726 } 727 728 // CancelNodeData aborts a node state data fetch request, returning all pending 729 // hashes to the task queue. 730 func (q *queue) CancelNodeData(request *fetchRequest) { 731 q.cancel(request, q.stateTaskQueue, q.statePendPool) 732 } 733 734 // Cancel aborts a fetch request, returning all pending hashes to the task queue. 735 func (q *queue) cancel(request *fetchRequest, taskQueue *prque.Prque, pendPool map[string]*fetchRequest) { 736 q.lock.Lock() 737 defer q.lock.Unlock() 738 739 if request.From > 0 { 740 taskQueue.Push(request.From, -float32(request.From)) 741 } 742 for hash, index := range request.Hashes { 743 taskQueue.Push(hash, float32(index)) 744 } 745 for _, header := range request.Headers { 746 taskQueue.Push(header, -float32(header.Number.Uint64())) 747 } 748 delete(pendPool, request.Peer.id) 749 } 750 751 // Revoke cancels all pending requests belonging to a given peer. This method is 752 // meant to be called during a peer drop to quickly reassign owned data fetches 753 // to remaining nodes. 754 func (q *queue) Revoke(peerId string) { 755 q.lock.Lock() 756 defer q.lock.Unlock() 757 758 if request, ok := q.blockPendPool[peerId]; ok { 759 for _, header := range request.Headers { 760 q.blockTaskQueue.Push(header, -float32(header.Number.Uint64())) 761 } 762 delete(q.blockPendPool, peerId) 763 } 764 if request, ok := q.receiptPendPool[peerId]; ok { 765 for _, header := range request.Headers { 766 q.receiptTaskQueue.Push(header, -float32(header.Number.Uint64())) 767 } 768 delete(q.receiptPendPool, peerId) 769 } 770 if request, ok := q.statePendPool[peerId]; ok { 771 for hash, index := range request.Hashes { 772 q.stateTaskQueue.Push(hash, float32(index)) 773 } 774 delete(q.statePendPool, peerId) 775 } 776 } 777 778 // ExpireHeaders checks for in flight requests that exceeded a timeout allowance, 779 // canceling them and returning the responsible peers for penalisation. 780 func (q *queue) ExpireHeaders(timeout time.Duration) map[string]int { 781 q.lock.Lock() 782 defer q.lock.Unlock() 783 784 return q.expire(timeout, q.headerPendPool, q.headerTaskQueue, headerTimeoutMeter) 785 } 786 787 // ExpireBodies checks for in flight block body requests that exceeded a timeout 788 // allowance, canceling them and returning the responsible peers for penalisation. 789 func (q *queue) ExpireBodies(timeout time.Duration) map[string]int { 790 q.lock.Lock() 791 defer q.lock.Unlock() 792 793 return q.expire(timeout, q.blockPendPool, q.blockTaskQueue, bodyTimeoutMeter) 794 } 795 796 // ExpireReceipts checks for in flight receipt requests that exceeded a timeout 797 // allowance, canceling them and returning the responsible peers for penalisation. 798 func (q *queue) ExpireReceipts(timeout time.Duration) map[string]int { 799 q.lock.Lock() 800 defer q.lock.Unlock() 801 802 return q.expire(timeout, q.receiptPendPool, q.receiptTaskQueue, receiptTimeoutMeter) 803 } 804 805 // ExpireNodeData checks for in flight node data requests that exceeded a timeout 806 // allowance, canceling them and returning the responsible peers for penalisation. 807 func (q *queue) ExpireNodeData(timeout time.Duration) map[string]int { 808 q.lock.Lock() 809 defer q.lock.Unlock() 810 811 return q.expire(timeout, q.statePendPool, q.stateTaskQueue, stateTimeoutMeter) 812 } 813 814 // expire is the generic check that move expired tasks from a pending pool back 815 // into a task pool, returning all entities caught with expired tasks. 816 // 817 // Note, this method expects the queue lock to be already held. The 818 // reason the lock is not obtained in here is because the parameters already need 819 // to access the queue, so they already need a lock anyway. 820 func (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest, taskQueue *prque.Prque, timeoutMeter metrics.Meter) map[string]int { 821 // Iterate over the expired requests and return each to the queue 822 expiries := make(map[string]int) 823 for id, request := range pendPool { 824 if time.Since(request.Time) > timeout { 825 // Update the metrics with the timeout 826 timeoutMeter.Mark(1) 827 828 // Return any non satisfied requests to the pool 829 if request.From > 0 { 830 taskQueue.Push(request.From, -float32(request.From)) 831 } 832 for hash, index := range request.Hashes { 833 taskQueue.Push(hash, float32(index)) 834 } 835 for _, header := range request.Headers { 836 taskQueue.Push(header, -float32(header.Number.Uint64())) 837 } 838 // Add the peer to the expiry report along the the number of failed requests 839 expirations := len(request.Hashes) 840 if expirations < len(request.Headers) { 841 expirations = len(request.Headers) 842 } 843 expiries[id] = expirations 844 } 845 } 846 // Remove the expired requests from the pending pool 847 for id := range expiries { 848 delete(pendPool, id) 849 } 850 return expiries 851 } 852 853 // DeliverHeaders injects a header retrieval response into the header results 854 // cache. This method either accepts all headers it received, or none of them 855 // if they do not map correctly to the skeleton. 856 // 857 // If the headers are accepted, the method makes an attempt to deliver the set 858 // of ready headers to the processor to keep the pipeline full. However it will 859 // not block to prevent stalling other pending deliveries. 860 func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh chan []*types.Header) (int, error) { 861 q.lock.Lock() 862 defer q.lock.Unlock() 863 864 // Short circuit if the data was never requested 865 request := q.headerPendPool[id] 866 if request == nil { 867 return 0, errNoFetchesPending 868 } 869 headerReqTimer.UpdateSince(request.Time) 870 delete(q.headerPendPool, id) 871 872 // Ensure headers can be mapped onto the skeleton chain 873 target := q.headerTaskPool[request.From].Hash() 874 875 accepted := len(headers) == MaxHeaderFetch 876 if accepted { 877 if headers[0].Number.Uint64() != request.From { 878 glog.V(logger.Detail).Infof("Peer %s: first header #%v [%x…] broke chain ordering, expected %d", id, headers[0].Number, headers[0].Hash().Bytes()[:4], request.From) 879 accepted = false 880 } else if headers[len(headers)-1].Hash() != target { 881 glog.V(logger.Detail).Infof("Peer %s: last header #%v [%x…] broke skeleton structure, expected %x", id, headers[len(headers)-1].Number, headers[len(headers)-1].Hash().Bytes()[:4], target[:4]) 882 accepted = false 883 } 884 } 885 if accepted { 886 for i, header := range headers[1:] { 887 hash := header.Hash() 888 if want := request.From + 1 + uint64(i); header.Number.Uint64() != want { 889 glog.V(logger.Warn).Infof("Peer %s: header #%v [%x…] broke chain ordering, expected %d", id, header.Number, hash[:4], want) 890 accepted = false 891 break 892 } 893 if headers[i].Hash() != header.ParentHash { 894 glog.V(logger.Warn).Infof("Peer %s: header #%v [%x…] broke chain ancestry", id, header.Number, hash[:4]) 895 accepted = false 896 break 897 } 898 } 899 } 900 // If the batch of headers wasn't accepted, mark as unavailable 901 if !accepted { 902 glog.V(logger.Detail).Infof("Peer %s: skeleton filling from header #%d not accepted", id, request.From) 903 904 miss := q.headerPeerMiss[id] 905 if miss == nil { 906 q.headerPeerMiss[id] = make(map[uint64]struct{}) 907 miss = q.headerPeerMiss[id] 908 } 909 miss[request.From] = struct{}{} 910 911 q.headerTaskQueue.Push(request.From, -float32(request.From)) 912 return 0, errors.New("delivery not accepted") 913 } 914 // Clean up a successful fetch and try to deliver any sub-results 915 copy(q.headerResults[request.From-q.headerOffset:], headers) 916 delete(q.headerTaskPool, request.From) 917 918 ready := 0 919 for q.headerProced+ready < len(q.headerResults) && q.headerResults[q.headerProced+ready] != nil { 920 ready += MaxHeaderFetch 921 } 922 if ready > 0 { 923 // Headers are ready for delivery, gather them and push forward (non blocking) 924 process := make([]*types.Header, ready) 925 copy(process, q.headerResults[q.headerProced:q.headerProced+ready]) 926 927 select { 928 case headerProcCh <- process: 929 glog.V(logger.Detail).Infof("%s: pre-scheduled %d headers from #%v", id, len(process), process[0].Number) 930 q.headerProced += len(process) 931 default: 932 } 933 } 934 // Check for termination and return 935 if len(q.headerTaskPool) == 0 { 936 q.headerContCh <- false 937 } 938 return len(headers), nil 939 } 940 941 // DeliverBodies injects a block body retrieval response into the results queue. 942 // The method returns the number of blocks bodies accepted from the delivery and 943 // also wakes any threads waiting for data delivery. 944 func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, uncleLists [][]*types.Header) (int, error) { 945 q.lock.Lock() 946 defer q.lock.Unlock() 947 948 reconstruct := func(header *types.Header, index int, result *fetchResult) error { 949 if types.DeriveSha(types.Transactions(txLists[index])) != header.TxHash || types.CalcUncleHash(uncleLists[index]) != header.UncleHash { 950 return errInvalidBody 951 } 952 result.Transactions = txLists[index] 953 result.Uncles = uncleLists[index] 954 return nil 955 } 956 return q.deliver(id, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, q.blockDonePool, bodyReqTimer, len(txLists), reconstruct) 957 } 958 959 // DeliverReceipts injects a receipt retrieval response into the results queue. 960 // The method returns the number of transaction receipts accepted from the delivery 961 // and also wakes any threads waiting for data delivery. 962 func (q *queue) DeliverReceipts(id string, receiptList [][]*types.Receipt) (int, error) { 963 q.lock.Lock() 964 defer q.lock.Unlock() 965 966 reconstruct := func(header *types.Header, index int, result *fetchResult) error { 967 if types.DeriveSha(types.Receipts(receiptList[index])) != header.ReceiptHash { 968 return errInvalidReceipt 969 } 970 result.Receipts = receiptList[index] 971 return nil 972 } 973 return q.deliver(id, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, q.receiptDonePool, receiptReqTimer, len(receiptList), reconstruct) 974 } 975 976 // deliver injects a data retrieval response into the results queue. 977 // 978 // Note, this method expects the queue lock to be already held for writing. The 979 // reason the lock is not obtained in here is because the parameters already need 980 // to access the queue, so they already need a lock anyway. 981 func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque, 982 pendPool map[string]*fetchRequest, donePool map[common.Hash]struct{}, reqTimer metrics.Timer, 983 results int, reconstruct func(header *types.Header, index int, result *fetchResult) error) (int, error) { 984 985 // Short circuit if the data was never requested 986 request := pendPool[id] 987 if request == nil { 988 return 0, errNoFetchesPending 989 } 990 reqTimer.UpdateSince(request.Time) 991 delete(pendPool, id) 992 993 // If no data items were retrieved, mark them as unavailable for the origin peer 994 if results == 0 { 995 for _, header := range request.Headers { 996 request.Peer.MarkLacking(header.Hash()) 997 } 998 } 999 // Assemble each of the results with their headers and retrieved data parts 1000 var ( 1001 accepted int 1002 failure error 1003 useful bool 1004 ) 1005 for i, header := range request.Headers { 1006 // Short circuit assembly if no more fetch results are found 1007 if i >= results { 1008 break 1009 } 1010 // Reconstruct the next result if contents match up 1011 index := int(header.Number.Int64() - int64(q.resultOffset)) 1012 if index >= len(q.resultCache) || index < 0 || q.resultCache[index] == nil { 1013 failure = errInvalidChain 1014 break 1015 } 1016 if err := reconstruct(header, i, q.resultCache[index]); err != nil { 1017 failure = err 1018 break 1019 } 1020 donePool[header.Hash()] = struct{}{} 1021 q.resultCache[index].Pending-- 1022 useful = true 1023 accepted++ 1024 1025 // Clean up a successful fetch 1026 request.Headers[i] = nil 1027 delete(taskPool, header.Hash()) 1028 } 1029 // Return all failed or missing fetches to the queue 1030 for _, header := range request.Headers { 1031 if header != nil { 1032 taskQueue.Push(header, -float32(header.Number.Uint64())) 1033 } 1034 } 1035 // Wake up WaitResults 1036 if accepted > 0 { 1037 q.active.Signal() 1038 } 1039 // If none of the data was good, it's a stale delivery 1040 switch { 1041 case failure == nil || failure == errInvalidChain: 1042 return accepted, failure 1043 case useful: 1044 return accepted, fmt.Errorf("partial failure: %v", failure) 1045 default: 1046 return accepted, errStaleDelivery 1047 } 1048 } 1049 1050 // DeliverNodeData injects a node state data retrieval response into the queue. 1051 // The method returns the number of node state accepted from the delivery. 1052 func (q *queue) DeliverNodeData(id string, data [][]byte, callback func(int, bool, error)) (int, error) { 1053 q.lock.Lock() 1054 defer q.lock.Unlock() 1055 1056 // Short circuit if the data was never requested 1057 request := q.statePendPool[id] 1058 if request == nil { 1059 return 0, errNoFetchesPending 1060 } 1061 stateReqTimer.UpdateSince(request.Time) 1062 delete(q.statePendPool, id) 1063 1064 // If no data was retrieved, mark their hashes as unavailable for the origin peer 1065 if len(data) == 0 { 1066 for hash := range request.Hashes { 1067 request.Peer.MarkLacking(hash) 1068 } 1069 } 1070 // Iterate over the downloaded data and verify each of them 1071 accepted, errs := 0, make([]error, 0) 1072 process := []trie.SyncResult{} 1073 for _, blob := range data { 1074 // Skip any state trie entries that were not requested 1075 hash := common.BytesToHash(crypto.Keccak256(blob)) 1076 if _, ok := request.Hashes[hash]; !ok { 1077 errs = append(errs, fmt.Errorf("non-requested state data %x", hash)) 1078 continue 1079 } 1080 // Inject the next state trie item into the processing queue 1081 process = append(process, trie.SyncResult{Hash: hash, Data: blob}) 1082 accepted++ 1083 1084 delete(request.Hashes, hash) 1085 delete(q.stateTaskPool, hash) 1086 } 1087 // Start the asynchronous node state data injection 1088 atomic.AddInt32(&q.stateProcessors, 1) 1089 go func() { 1090 defer atomic.AddInt32(&q.stateProcessors, -1) 1091 q.deliverNodeData(process, callback) 1092 }() 1093 // Return all failed or missing fetches to the queue 1094 for hash, index := range request.Hashes { 1095 q.stateTaskQueue.Push(hash, float32(index)) 1096 } 1097 // If none of the data items were good, it's a stale delivery 1098 switch { 1099 case len(errs) == 0: 1100 return accepted, nil 1101 case len(errs) == len(request.Hashes): 1102 return accepted, errStaleDelivery 1103 default: 1104 return accepted, fmt.Errorf("multiple failures: %v", errs) 1105 } 1106 } 1107 1108 // deliverNodeData is the asynchronous node data processor that injects a batch 1109 // of sync results into the state scheduler. 1110 func (q *queue) deliverNodeData(results []trie.SyncResult, callback func(int, bool, error)) { 1111 // Wake up WaitResults after the state has been written because it 1112 // might be waiting for the pivot block state to get completed. 1113 defer q.active.Signal() 1114 1115 // Process results one by one to permit task fetches in between 1116 progressed := false 1117 for i, result := range results { 1118 q.stateSchedLock.Lock() 1119 1120 if q.stateScheduler == nil { 1121 // Syncing aborted since this async delivery started, bail out 1122 q.stateSchedLock.Unlock() 1123 callback(i, progressed, errNoFetchesPending) 1124 return 1125 } 1126 1127 batch := q.stateDatabase.NewBatch() 1128 prog, _, err := q.stateScheduler.Process([]trie.SyncResult{result}, batch) 1129 if err != nil { 1130 q.stateSchedLock.Unlock() 1131 callback(i, progressed, err) 1132 return 1133 } 1134 if err = batch.Write(); err != nil { 1135 q.stateSchedLock.Unlock() 1136 callback(i, progressed, err) 1137 return // TODO(karalabe): If a DB write fails (disk full), we ought to cancel the sync 1138 } 1139 // Item processing succeeded, release the lock (temporarily) 1140 progressed = progressed || prog 1141 q.stateSchedLock.Unlock() 1142 } 1143 callback(len(results), progressed, nil) 1144 } 1145 1146 // Prepare configures the result cache to allow accepting and caching inbound 1147 // fetch results. 1148 func (q *queue) Prepare(offset uint64, mode SyncMode, pivot uint64, head *types.Header) { 1149 q.lock.Lock() 1150 defer q.lock.Unlock() 1151 1152 // Prepare the queue for sync results 1153 if q.resultOffset < offset { 1154 q.resultOffset = offset 1155 } 1156 q.fastSyncPivot = pivot 1157 q.mode = mode 1158 1159 // If long running fast sync, also start up a head stateretrieval immediately 1160 if mode == FastSync && pivot > 0 { 1161 q.stateScheduler = state.NewStateSync(head.Root, q.stateDatabase) 1162 } 1163 }