github.com/cheng762/platon-go@v1.8.17-0.20190529111256-7deff2d7be26/eth/downloader/downloader.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package downloader contains the manual full chain synchronisation. 18 package downloader 19 20 import ( 21 "crypto/md5" 22 "errors" 23 "fmt" 24 "github.com/PlatONnetwork/PlatON-Go/core/ppos_storage" 25 "math/big" 26 "sync" 27 "sync/atomic" 28 "time" 29 30 "github.com/PlatONnetwork/PlatON-Go" 31 "github.com/PlatONnetwork/PlatON-Go/common" 32 "github.com/PlatONnetwork/PlatON-Go/core/rawdb" 33 "github.com/PlatONnetwork/PlatON-Go/core/types" 34 "github.com/PlatONnetwork/PlatON-Go/ethdb" 35 "github.com/PlatONnetwork/PlatON-Go/event" 36 "github.com/PlatONnetwork/PlatON-Go/log" 37 "github.com/PlatONnetwork/PlatON-Go/metrics" 38 "github.com/PlatONnetwork/PlatON-Go/params" 39 ) 40 41 var ( 42 MaxHashFetch = 512 // Amount of hashes to be fetched per retrieval request 43 MaxBlockFetch = 128 // Amount of blocks to be fetched per retrieval request 44 MaxHeaderFetch = 192 // Amount of block headers to be fetched per retrieval request 45 MaxSkeletonSize = 128 // Number of header fetches to need for a skeleton assembly 46 MaxBodyFetch = 128 // Amount of block bodies to be fetched per retrieval request 47 MaxReceiptFetch = 256 // Amount of transaction receipts to allow fetching per request 48 MaxStateFetch = 384 // Amount of node state values to allow fetching per request 49 50 MaxForkAncestry = 3 * params.EpochDuration // Maximum chain reorganisation 51 rttMinEstimate = 2 * time.Second // Minimum round-trip time to target for download requests 52 rttMaxEstimate = 20 * time.Second // Maximum round-trip time to target for download requests 53 rttMinConfidence = 0.1 // Worse confidence factor in our estimated RTT value 54 ttlScaling = 3 // Constant scaling factor for RTT -> TTL conversion 55 ttlLimit = time.Minute // Maximum TTL allowance to prevent reaching crazy timeouts 56 57 qosTuningPeers = 5 // Number of peers to tune based on (best peers) 58 qosConfidenceCap = 10 // Number of peers above which not to modify RTT confidence 59 qosTuningImpact = 0.25 // Impact that a new tuning target has on the previous value 60 61 maxQueuedHeaders = 32 * 1024 // [eth/62] Maximum number of headers to queue for import (DOS protection) 62 maxHeadersProcess = 2048 // Number of header download results to import at once into the chain 63 maxResultsProcess = 2048 // Number of content download results to import at once into the chain 64 65 fsHeaderCheckFrequency = 100 // Verification frequency of the downloaded headers during fast sync 66 fsHeaderSafetyNet = 2048 // Number of headers to discard in case a chain violation is detected 67 fsHeaderForceVerify = 24 // Number of headers to verify before and after the pivot to accept it 68 fsHeaderContCheck = 3 * time.Second // Time interval to check for header continuations during state download 69 fsMinFullBlocks = 64 // Number of blocks to retrieve fully even in fast sync 70 ) 71 72 var ( 73 errBusy = errors.New("busy") 74 errUnknownPeer = errors.New("peer is unknown or unhealthy") 75 errBadPeer = errors.New("action from bad peer ignored") 76 errStallingPeer = errors.New("peer is stalling") 77 errNoPeers = errors.New("no peers to keep download active") 78 errTimeout = errors.New("timeout") 79 errEmptyHeaderSet = errors.New("empty header set by peer") 80 errPeersUnavailable = errors.New("no peers available or all tried for download") 81 errInvalidAncestor = errors.New("retrieved ancestor is invalid") 82 errInvalidChain = errors.New("retrieved hash chain is invalid") 83 errInvalidBlock = errors.New("retrieved block is invalid") 84 errInvalidBody = errors.New("retrieved block body is invalid") 85 errInvalidReceipt = errors.New("retrieved receipt is invalid") 86 errCancelBlockFetch = errors.New("block download canceled (requested)") 87 errCancelHeaderFetch = errors.New("block header download canceled (requested)") 88 errCancelBodyFetch = errors.New("block body download canceled (requested)") 89 errCancelReceiptFetch = errors.New("receipt download canceled (requested)") 90 errCancelStateFetch = errors.New("state data download canceled (requested)") 91 errCancelHeaderProcessing = errors.New("header processing canceled (requested)") 92 errCancelContentProcessing = errors.New("content processing canceled (requested)") 93 errNoSyncActive = errors.New("no sync active") 94 errTooOld = errors.New("peer doesn't speak recent enough protocol version (need version >= 62)") 95 errPushPPosStorageProto = errors.New("push ppos storage proto error") 96 errNoNeedSync = errors.New("no need to synchronize") 97 ) 98 99 type Downloader struct { 100 mode SyncMode // Synchronisation mode defining the strategy used (per sync cycle) 101 mux *event.TypeMux // Event multiplexer to announce sync operation events 102 103 queue *queue // Scheduler for selecting the hashes to download 104 peers *peerSet // Set of active peers from which download can proceed 105 stateDB ethdb.Database 106 107 rttEstimate uint64 // Round trip time to target for download requests 108 rttConfidence uint64 // Confidence in the estimated RTT (unit: millionths to allow atomic ops) 109 110 // Statistics 111 syncStatsChainOrigin uint64 // Origin block number where syncing started at 112 syncStatsChainHeight uint64 // Highest block number known when syncing started 113 syncStatsState stateSyncStats 114 syncStatsLock sync.RWMutex // Lock protecting the sync stats fields 115 116 lightchain LightChain 117 blockchain BlockChain 118 119 // Callbacks 120 dropPeer peerDropFn // Drops a peer for misbehaving 121 122 // Status 123 synchroniseMock func(id string, hash common.Hash) error // Replacement for synchronise during testing 124 synchronising int32 125 notified int32 126 committed int32 127 128 // Channels 129 headerCh chan dataPack // [eth/62] Channel receiving inbound block headers 130 bodyCh chan dataPack // [eth/62] Channel receiving inbound block bodies 131 receiptCh chan dataPack // [eth/63] Channel receiving inbound receipts 132 bodyWakeCh chan bool // [eth/62] Channel to signal the block body fetcher of new tasks 133 receiptWakeCh chan bool // [eth/63] Channel to signal the receipt fetcher of new tasks 134 headerProcCh chan []*types.Header // [eth/62] Channel to feed the header processor new tasks 135 pposStorageCh chan dataPack // [eth/63] Channel receiving inbound ppos storage 136 137 // for stateFetcher 138 stateSyncStart chan *stateSync 139 trackStateReq chan *stateReq 140 stateCh chan dataPack // [eth/63] Channel receiving inbound node state data 141 142 // Cancellation and termination 143 cancelPeer string // Identifier of the peer currently being used as the master (cancel on drop) 144 cancelCh chan struct{} // Channel to cancel mid-flight syncs 145 cancelLock sync.RWMutex // Lock to protect the cancel channel and peer in delivers 146 cancelWg sync.WaitGroup // Make sure all fetcher goroutines have exited. 147 148 quitCh chan struct{} // Quit channel to signal termination 149 quitLock sync.RWMutex // Lock to prevent double closes 150 151 // Testing hooks 152 syncInitHook func(uint64, uint64) // Method to call upon initiating a new sync run 153 bodyFetchHook func([]*types.Header) // Method to call upon starting a block body fetch 154 receiptFetchHook func([]*types.Header) // Method to call upon starting a receipt fetch 155 chainInsertHook func([]*fetchResult) // Method to call upon inserting a chain of blocks (possibly in multiple invocations) 156 running int32 // The indicator whether the Downloader is running or not. 157 } 158 159 // LightChain encapsulates functions required to synchronise a light chain. 160 type LightChain interface { 161 // HasHeader verifies a header's presence in the local chain. 162 HasHeader(common.Hash, uint64) bool 163 164 // GetHeaderByHash retrieves a header from the local chain. 165 GetHeaderByHash(common.Hash) *types.Header 166 167 // CurrentHeader retrieves the head header from the local chain. 168 CurrentHeader() *types.Header 169 170 // InsertHeaderChain inserts a batch of headers into the local chain. 171 InsertHeaderChain([]*types.Header, int) (int, error) 172 173 // Rollback removes a few recently added elements from the local chain. 174 Rollback([]common.Hash) 175 } 176 177 // BlockChain encapsulates functions required to sync a (full or fast) blockchain. 178 type BlockChain interface { 179 LightChain 180 181 // HasBlock verifies a block's presence in the local chain. 182 HasBlock(common.Hash, uint64) bool 183 184 // GetBlockByHash retrieves a block from the local chain. 185 GetBlockByHash(common.Hash) *types.Block 186 187 // CurrentBlock retrieves the head block from the local chain. 188 CurrentBlock() *types.Block 189 190 // CurrentFastBlock retrieves the head fast block from the local chain. 191 CurrentFastBlock() *types.Block 192 193 // FastSyncCommitHead directly commits the head block to a certain entity. 194 FastSyncCommitHead(common.Hash) error 195 196 // InsertChain inserts a batch of blocks into the local chain. 197 InsertChain(types.Blocks) (int, error) 198 199 // InsertReceiptChain inserts a batch of receipts into the local chain. 200 InsertReceiptChain(types.Blocks, []types.Receipts) (int, error) 201 } 202 203 // New creates a new downloader to fetch hashes and blocks from remote peers. 204 func New(mode SyncMode, stateDb ethdb.Database, mux *event.TypeMux, chain BlockChain, lightchain LightChain, dropPeer peerDropFn) *Downloader { 205 if lightchain == nil { 206 lightchain = chain 207 } 208 209 dl := &Downloader{ 210 mode: mode, 211 stateDB: stateDb, 212 mux: mux, 213 queue: newQueue(), 214 peers: newPeerSet(), 215 rttEstimate: uint64(rttMaxEstimate), 216 rttConfidence: uint64(1000000), 217 blockchain: chain, 218 lightchain: lightchain, 219 dropPeer: dropPeer, 220 headerCh: make(chan dataPack, 1), 221 bodyCh: make(chan dataPack, 1), 222 receiptCh: make(chan dataPack, 1), 223 bodyWakeCh: make(chan bool, 1), 224 receiptWakeCh: make(chan bool, 1), 225 headerProcCh: make(chan []*types.Header, 1), 226 pposStorageCh: make(chan dataPack, 1), 227 quitCh: make(chan struct{}), 228 stateCh: make(chan dataPack), 229 stateSyncStart: make(chan *stateSync), 230 syncStatsState: stateSyncStats{ 231 processed: rawdb.ReadFastTrieProgress(stateDb), 232 }, 233 trackStateReq: make(chan *stateReq), 234 } 235 go dl.qosTuner() 236 go dl.stateFetcher() 237 return dl 238 } 239 240 // Progress retrieves the synchronisation boundaries, specifically the origin 241 // block where synchronisation started at (may have failed/suspended); the block 242 // or header sync is currently at; and the latest known block which the sync targets. 243 // 244 // In addition, during the state download phase of fast synchronisation the number 245 // of processed and the total number of known states are also returned. Otherwise 246 // these are zero. 247 func (d *Downloader) Progress() ethereum.SyncProgress { 248 // Lock the current stats and return the progress 249 d.syncStatsLock.RLock() 250 defer d.syncStatsLock.RUnlock() 251 252 current := uint64(0) 253 switch d.mode { 254 case FullSync: 255 current = d.blockchain.CurrentBlock().NumberU64() 256 case FastSync: 257 current = d.blockchain.CurrentFastBlock().NumberU64() 258 case LightSync: 259 current = d.lightchain.CurrentHeader().Number.Uint64() 260 } 261 return ethereum.SyncProgress{ 262 StartingBlock: d.syncStatsChainOrigin, 263 CurrentBlock: current, 264 HighestBlock: d.syncStatsChainHeight, 265 PulledStates: d.syncStatsState.processed, 266 KnownStates: d.syncStatsState.processed + d.syncStatsState.pending, 267 } 268 } 269 270 // Synchronising returns whether the downloader is currently retrieving blocks. 271 func (d *Downloader) Synchronising() bool { 272 return atomic.LoadInt32(&d.synchronising) > 0 273 } 274 275 // RegisterPeer injects a new download peer into the set of block source to be 276 // used for fetching hashes and blocks from. 277 func (d *Downloader) RegisterPeer(id string, version int, peer Peer) error { 278 logger := log.New("peer", id) 279 logger.Trace("Registering sync peer") 280 if err := d.peers.Register(newPeerConnection(id, version, peer, logger)); err != nil { 281 logger.Error("Failed to register sync peer", "err", err) 282 return err 283 } 284 d.qosReduceConfidence() 285 286 return nil 287 } 288 289 // RegisterLightPeer injects a light client peer, wrapping it so it appears as a regular peer. 290 func (d *Downloader) RegisterLightPeer(id string, version int, peer LightPeer) error { 291 return d.RegisterPeer(id, version, &lightPeerWrapper{peer}) 292 } 293 294 // UnregisterPeer remove a peer from the known list, preventing any action from 295 // the specified peer. An effort is also made to return any pending fetches into 296 // the queue. 297 func (d *Downloader) UnregisterPeer(id string) error { 298 // Unregister the peer from the active peer set and revoke any fetch tasks 299 logger := log.New("peer", id) 300 logger.Trace("Unregistering sync peer") 301 if err := d.peers.Unregister(id); err != nil { 302 logger.Error("Failed to unregister sync peer", "err", err) 303 return err 304 } 305 d.queue.Revoke(id) 306 307 // If this peer was the master peer, abort sync immediately 308 d.cancelLock.RLock() 309 master := id == d.cancelPeer 310 d.cancelLock.RUnlock() 311 312 if master { 313 d.cancel() 314 } 315 return nil 316 } 317 318 // Synchronise tries to sync up our local block chain with a remote peer, both 319 // adding various sanity checks as well as wrapping it with various log entries. 320 func (d *Downloader) Synchronise(id string, head common.Hash, bn *big.Int, mode SyncMode) error { 321 log.Debug("Synchronise from other peer", "peerID", id, "head", head, "bn", bn, "mode", mode) 322 err := d.synchronise(id, head, bn, mode) 323 switch err { 324 case nil: 325 case errBusy: 326 327 case errTimeout, errBadPeer, errStallingPeer, 328 errEmptyHeaderSet, errPeersUnavailable, errTooOld, 329 errInvalidAncestor, errInvalidChain, errPushPPosStorageProto: 330 log.Warn("Synchronisation failed, dropping peer", "peer", id, "err", err) 331 if d.dropPeer == nil { 332 // The dropPeer method is nil when `--copydb` is used for a local copy. 333 // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored 334 log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", id) 335 } else { 336 d.dropPeer(id) 337 } 338 default: 339 log.Warn("Synchronisation failed, retrying", "err", err) 340 } 341 return err 342 } 343 344 // synchronise will select the peer and use it for synchronising. If an empty string is given 345 // it will use the best peer possible and synchronize if its TD is higher than our own. If any of the 346 // checks fail an error will be returned. This method is synchronous 347 func (d *Downloader) synchronise(id string, hash common.Hash, bn *big.Int, mode SyncMode) error { 348 // Mock out the synchronisation if testing 349 if d.synchroniseMock != nil { 350 return d.synchroniseMock(id, hash) 351 } 352 // Make sure only one goroutine is ever allowed past this point at once 353 if !atomic.CompareAndSwapInt32(&d.synchronising, 0, 1) { 354 return errBusy 355 } 356 defer atomic.StoreInt32(&d.synchronising, 0) 357 358 // Post a user notification of the sync (only once per session) 359 if atomic.CompareAndSwapInt32(&d.notified, 0, 1) { 360 log.Info("Block synchronisation started") 361 } 362 // Reset the queue, peer set and wake channels to clean any internal leftover state 363 d.queue.Reset() 364 d.peers.Reset() 365 366 for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { 367 select { 368 case <-ch: 369 default: 370 } 371 } 372 for _, ch := range []chan dataPack{d.headerCh, d.bodyCh, d.receiptCh, d.pposStorageCh} { 373 for empty := false; !empty; { 374 select { 375 case <-ch: 376 default: 377 empty = true 378 } 379 } 380 } 381 for empty := false; !empty; { 382 select { 383 case <-d.headerProcCh: 384 default: 385 empty = true 386 } 387 } 388 // Create cancel channel for aborting mid-flight and mark the master peer 389 d.cancelLock.Lock() 390 d.cancelCh = make(chan struct{}) 391 d.cancelPeer = id 392 d.cancelLock.Unlock() 393 394 defer d.Cancel() // No matter what, we can't leave the cancel channel open 395 396 // Set the requested sync mode, unless it's forbidden 397 d.mode = mode 398 399 // Retrieve the origin peer and initiate the downloading process 400 p := d.peers.Peer(id) 401 if p == nil { 402 return errUnknownPeer 403 } 404 return d.syncWithPeer(p, hash, bn) 405 } 406 407 // syncWithPeer starts a block synchronization based on the hash chain from the 408 // specified peer and head hash. 409 func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, bn *big.Int) (err error) { 410 d.mux.Post(StartEvent{}) 411 d.start() 412 defer func() { 413 // reset on error 414 if err != nil { 415 d.stop() 416 d.mux.Post(FailedEvent{err}) 417 } else { 418 d.stop() 419 d.mux.Post(DoneEvent{}) 420 } 421 }() 422 if p.version < 62 { 423 return errTooOld 424 } 425 426 defer func(start time.Time) { 427 log.Debug("Synchronisation terminated", "elapsed", time.Since(start)) 428 }(time.Now()) 429 430 var latest *types.Header 431 pivot := uint64(0) 432 if d.mode == FastSync { 433 // fetch latest ppos storage cache from remote peer 434 latest, pivot, err = d.fetchLatestPposStorage(p) 435 if err != nil { 436 return err 437 } 438 } else { 439 // Look up the sync boundaries: the common ancestor and the target block 440 latest, err = d.fetchHeight(p) 441 if err != nil { 442 return err 443 } 444 } 445 height := latest.Number.Uint64() 446 447 448 //origin, err := d.findAncestor(p, height) 449 origin, pivot, err := d.findOrigin(p, height, pivot) 450 if err != nil { 451 return err 452 } 453 // Ensure our origin point is below any fast sync pivot point 454 d.committed = 1 455 if d.mode == FastSync && origin < pivot { 456 d.committed = 0 457 } 458 459 log.Debug("Synchronising with the network begin", "peer", p.id, "eth", p.version, "origin", origin, "pivot", pivot, "latest", latest.Number.Uint64(), "mode", d.mode, "timestamp", time.Now().UnixNano() / 1e6) 460 461 d.syncStatsLock.Lock() 462 if d.syncStatsChainHeight <= origin || d.syncStatsChainOrigin > origin { 463 d.syncStatsChainOrigin = origin 464 } 465 d.syncStatsChainHeight = height 466 d.syncStatsLock.Unlock() 467 468 // Initiate the sync using a concurrent header and content retrieval algorithm 469 d.queue.Prepare(origin+1, d.mode) 470 if d.syncInitHook != nil { 471 d.syncInitHook(origin, height) 472 } 473 474 fetchers := []func() error{ 475 func() error { return d.fetchHeaders(p, origin+1, pivot) }, // Headers are always retrieved 476 func() error { return d.fetchBodies(origin + 1) }, // Bodies are retrieved during normal and fast sync 477 func() error { return d.fetchReceipts(origin + 1) }, // Receipts are retrieved during fast sync 478 func() error { return d.processHeaders(origin+1, pivot, bn) }, 479 } 480 if d.mode == FastSync { 481 fetchers = append(fetchers, func() error { return d.processFastSyncContent(latest, pivot) }) 482 } else if d.mode == FullSync { 483 fetchers = append(fetchers, d.processFullSyncContent) 484 } 485 return d.spawnSync(fetchers, d.mode) 486 } 487 488 // spawnSync runs d.process and all given fetcher functions to completion in 489 // separate goroutines, returning the first error that appears. 490 func (d *Downloader) spawnSync(fetchers []func() error, mode SyncMode) error { 491 errc := make(chan error, len(fetchers)) 492 d.cancelWg.Add(len(fetchers)) 493 for _, fn := range fetchers { 494 fn := fn 495 go func() { defer d.cancelWg.Done(); errc <- fn() }() 496 } 497 // Wait for the first error, then terminate the others. 498 var err error 499 for i := 0; i < len(fetchers); i++ { 500 if i == len(fetchers)-1 { 501 // Close the queue when all fetchers have exited. 502 // This will cause the block processor to end when 503 // it has processed the queue. 504 d.queue.Close() 505 } 506 if err = <-errc; err != nil { 507 break 508 } 509 } 510 d.queue.Close() 511 d.Cancel() 512 log.Debug("Synchronising with the network end", "mode", mode, "headerNumber", d.blockchain.CurrentHeader().Number.Uint64(), "blockNumber", d.blockchain.CurrentBlock().NumberU64(), "fastNumber", d.blockchain.CurrentFastBlock().NumberU64(), "timestamp", time.Now().UnixNano() / 1e6) 513 return err 514 } 515 516 // cancel aborts all of the operations and resets the queue. However, cancel does 517 // not wait for the running download goroutines to finish. This method should be 518 // used when cancelling the downloads from inside the downloader. 519 func (d *Downloader) cancel() { 520 // Close the current cancel channel 521 d.cancelLock.Lock() 522 if d.cancelCh != nil { 523 select { 524 case <-d.cancelCh: 525 // Channel was already closed 526 default: 527 close(d.cancelCh) 528 } 529 } 530 d.cancelLock.Unlock() 531 } 532 533 // Cancel aborts all of the operations and waits for all download goroutines to 534 // finish before returning. 535 func (d *Downloader) Cancel() { 536 d.cancel() 537 d.cancelWg.Wait() 538 } 539 540 // Terminate interrupts the downloader, canceling all pending operations. 541 // The downloader cannot be reused after calling Terminate. 542 func (d *Downloader) Terminate() { 543 // Close the termination channel (make sure double close is allowed) 544 d.quitLock.Lock() 545 select { 546 case <-d.quitCh: 547 default: 548 close(d.quitCh) 549 } 550 d.quitLock.Unlock() 551 552 // Cancel any pending download requests 553 d.Cancel() 554 } 555 556 // fetchHeight retrieves the head header of the remote peer to aid in estimating 557 // the total time a pending synchronisation would take. 558 func (d *Downloader) fetchHeight(p *peerConnection) (*types.Header, error) { 559 p.log.Debug("Retrieving remote chain height") 560 561 // Request the advertised remote head block and wait for the response 562 head, _ := p.peer.Head() 563 go p.peer.RequestHeadersByHash(head, 1, 0, false) 564 565 ttl := d.requestTTL() 566 timeout := time.After(ttl) 567 for { 568 select { 569 case <-d.cancelCh: 570 return nil, errCancelBlockFetch 571 572 case packet := <-d.headerCh: 573 // Discard anything not from the origin peer 574 if packet.PeerId() != p.id { 575 log.Debug("Received headers from incorrect peer", "peer", packet.PeerId()) 576 break 577 } 578 // Make sure the peer actually gave something valid 579 headers := packet.(*headerPack).headers 580 if len(headers) != 1 { 581 p.log.Debug("Multiple headers for single request", "headers", len(headers)) 582 return nil, errBadPeer 583 } 584 head := headers[0] 585 p.log.Debug("Remote head header identified", "number", head.Number, "hash", head.Hash()) 586 return head, nil 587 588 case <-timeout: 589 p.log.Debug("Waiting for head header timed out", "elapsed", ttl) 590 return nil, errTimeout 591 592 case <-d.bodyCh: 593 case <-d.receiptCh: 594 // Out of bounds delivery, ignore 595 } 596 } 597 } 598 599 func (d *Downloader) fetchLatestPposStorage(p *peerConnection) (*types.Header, uint64, error) { 600 p.log.Debug("Retrieving latest ppos storage cache from remote peer") 601 602 go p.peer.RequestLatestPposStorage() 603 604 ttl := d.requestTTL() 605 timeout := time.After(ttl) 606 for { 607 select { 608 case <-d.cancelCh: 609 return nil, 0, errCancelBlockFetch 610 611 case packet := <-d.pposStorageCh: 612 // Discard anything not from the origin peer 613 if packet.PeerId() != p.id { 614 log.Debug("Received ppos storage from incorrect peer", "peer", packet.PeerId()) 615 break 616 } 617 // Make sure the peer actually gave something valid 618 latest := packet.(*pposStoragePack).latest 619 pivot := packet.(*pposStoragePack).pivot 620 data := packet.(*pposStoragePack).storage 621 p.log.Debug("fetch pposStorage content", "latest", latest.Number.Uint64(), "pivot", pivot.Number.Uint64(), "data length", len(data), "data md5", md5.Sum(data)) 622 623 if pivot.Number.Cmp(latest.Number) > 0 { 624 p.log.Debug("pivotNumber is larger than latestNumber", "pivotNumber", pivot.Number.Uint64(), "latestNumber", latest.Number.Uint64()) 625 return nil, 0, errBadPeer 626 } 627 if !d.storagePposCachePoint(pivot.Number.Uint64()) { 628 p.log.Debug("pivotNumber is an incorrect pivot point", "pivotNumber", pivot.Number.Uint64(), "latestNumber", latest.Number.Uint64()) 629 return nil, 0, errBadPeer 630 } 631 if err := ppos_storage.GetPPosTempPtr().PushPPosStorageProto(data); err != nil { 632 p.log.Debug("pushPPosStorageProto error", "pivotNumber", pivot.Number.Uint64(), "latestNumber", latest.Number.Uint64(), "err", err) 633 return nil, 0, errPushPPosStorageProto 634 } 635 636 return latest, pivot.Number.Uint64(), nil 637 638 case <-timeout: 639 p.log.Debug("Waiting for ppos storage timed out", "elapsed", ttl) 640 return nil, 0, errTimeout 641 } 642 } 643 } 644 645 func (d *Downloader) storagePposCachePoint(point uint64) bool { 646 p := uint64(common.BaseSwitchWitness - common.BaseElection + 1) 647 return (point + p) % common.BaseSwitchWitness == 0 648 } 649 650 func (d *Downloader) CalStoragePposCachePoint(blockNumber uint64) uint64 { 651 if blockNumber < common.BaseElection { 652 return 0 653 } else if blockNumber == common.BaseElection { 654 return blockNumber - 1 655 } else { 656 d := (blockNumber - common.BaseElection) / common.BaseSwitchWitness 657 return common.BaseSwitchWitness * d + common.BaseElection - 1 658 } 659 } 660 661 func (d *Downloader) findOrigin(p *peerConnection, height uint64, pivot uint64) (uint64, uint64, error) { 662 var current *types.Block 663 if d.mode == FullSync { 664 current = d.blockchain.CurrentBlock() 665 } else if d.mode == FastSync { 666 current = d.blockchain.CurrentFastBlock() 667 } 668 669 currentNumber := current.NumberU64() 670 p.log.Info("findOrigin in download", "currentNumber", currentNumber, "remoteHeight", height, "remotePivot", pivot) 671 if currentNumber >= height { 672 p.log.Info("current block number is higher than remote peer,no need to sync", "currentNumber", currentNumber, "remoteHeight", height, "remotePivot", pivot) 673 return 0, 0, errNoNeedSync 674 } 675 if currentNumber >= pivot { 676 pivot = currentNumber 677 } 678 679 go p.peer.RequestHeadersByNumber(currentNumber, 1, 0, false) 680 681 ttl := d.requestTTL() 682 timeout := time.After(ttl) 683 for { 684 select { 685 case <-d.cancelCh: 686 return 0, 0, errCancelHeaderFetch 687 688 case packet := <-d.headerCh: 689 // Discard anything not from the origin peer 690 if packet.PeerId() != p.id { 691 log.Debug("Received headers from incorrect peer", "peer", packet.PeerId()) 692 break 693 } 694 // Make sure the peer actually gave something valid 695 headers := packet.(*headerPack).headers 696 if len(headers) == 0 { 697 p.log.Warn("Empty head header set") 698 return 0, 0, errEmptyHeaderSet 699 } 700 if len(headers) == 1 && headers[0].Number.Uint64() == currentNumber && headers[0].Hash() == current.Hash() { 701 return currentNumber, pivot, nil 702 } 703 p.log.Warn("remote chain is invalid","currentNumber", currentNumber, "currentHash", current.Hash(), "remoteNumber", headers[0].Number.Uint64(), "remoteHash", headers[0].Hash()) 704 return 0, 0, errInvalidChain 705 706 case <-timeout: 707 p.log.Debug("Waiting for head header timed out", "elapsed", ttl) 708 return 0, 0, errTimeout 709 } 710 } 711 } 712 713 // findAncestor tries to locate the common ancestor link of the local chain and 714 // a remote peers blockchain. In the general case when our node was in sync and 715 // on the correct chain, checking the top N links should already get us a match. 716 // In the rare scenario when we ended up on a long reorganisation (i.e. none of 717 // the head links match), we do a binary search to find the common ancestor. 718 func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, error) { 719 // Figure out the valid ancestor range to prevent rewrite attacks 720 floor, ceil := int64(-1), d.lightchain.CurrentHeader().Number.Uint64() 721 722 if d.mode == FullSync { 723 ceil = d.blockchain.CurrentBlock().NumberU64() 724 } else if d.mode == FastSync { 725 ceil = d.blockchain.CurrentFastBlock().NumberU64() 726 } 727 if ceil >= MaxForkAncestry { 728 floor = int64(ceil - MaxForkAncestry) 729 } 730 p.log.Debug("Looking for common ancestor", "local", ceil, "remote", height) 731 732 // Request the topmost blocks to short circuit binary ancestor lookup 733 head := ceil 734 if head > height { 735 head = height 736 } 737 from := int64(head) - int64(MaxHeaderFetch) 738 if from < 0 { 739 from = 0 740 } 741 // Span out with 15 block gaps into the future to catch bad head reports 742 limit := 2 * MaxHeaderFetch / 16 743 count := 1 + int((int64(ceil)-from)/16) 744 if count > limit { 745 count = limit 746 } 747 go p.peer.RequestHeadersByNumber(uint64(from), count, 15, false) 748 749 // Wait for the remote response to the head fetch 750 number, hash := uint64(0), common.Hash{} 751 752 ttl := d.requestTTL() 753 timeout := time.After(ttl) 754 755 for finished := false; !finished; { 756 select { 757 case <-d.cancelCh: 758 return 0, errCancelHeaderFetch 759 760 case packet := <-d.headerCh: 761 // Discard anything not from the origin peer 762 if packet.PeerId() != p.id { 763 log.Debug("Received headers from incorrect peer", "peer", packet.PeerId()) 764 break 765 } 766 // Make sure the peer actually gave something valid 767 headers := packet.(*headerPack).headers 768 if len(headers) == 0 { 769 p.log.Warn("Empty head header set") 770 return 0, errEmptyHeaderSet 771 } 772 // Make sure the peer's reply conforms to the request 773 for i := 0; i < len(headers); i++ { 774 if number := headers[i].Number.Int64(); number != from+int64(i)*16 { 775 p.log.Warn("Head headers broke chain ordering", "index", i, "requested", from+int64(i)*16, "received", number) 776 return 0, errInvalidChain 777 } 778 } 779 // Check if a common ancestor was found 780 finished = true 781 for i := len(headers) - 1; i >= 0; i-- { 782 // Skip any headers that underflow/overflow our requested set 783 if headers[i].Number.Int64() < from || headers[i].Number.Uint64() > ceil { 784 continue 785 } 786 // Otherwise check if we already know the header or not 787 if (d.mode == FullSync && d.blockchain.HasBlock(headers[i].Hash(), headers[i].Number.Uint64())) || (d.mode != FullSync && d.lightchain.HasHeader(headers[i].Hash(), headers[i].Number.Uint64())) { 788 number, hash = headers[i].Number.Uint64(), headers[i].Hash() 789 790 // If every header is known, even future ones, the peer straight out lied about its head 791 if number > height && i == limit-1 { 792 p.log.Warn("Lied about chain head", "reported", height, "found", number) 793 return 0, errStallingPeer 794 } 795 break 796 } 797 } 798 799 case <-timeout: 800 p.log.Debug("Waiting for head header timed out", "elapsed", ttl) 801 return 0, errTimeout 802 803 case <-d.bodyCh: 804 case <-d.receiptCh: 805 // Out of bounds delivery, ignore 806 } 807 } 808 // If the head fetch already found an ancestor, return 809 if hash != (common.Hash{}) { 810 if int64(number) <= floor { 811 p.log.Warn("Ancestor below allowance", "number", number, "hash", hash, "allowance", floor) 812 return 0, errInvalidAncestor 813 } 814 p.log.Debug("Found common ancestor", "number", number, "hash", hash) 815 return number, nil 816 } 817 // Ancestor not found, we need to binary search over our chain 818 start, end := uint64(0), head 819 if floor > 0 { 820 start = uint64(floor) 821 } 822 for start+1 < end { 823 // Split our chain interval in two, and request the hash to cross check 824 check := (start + end) / 2 825 826 ttl := d.requestTTL() 827 timeout := time.After(ttl) 828 829 go p.peer.RequestHeadersByNumber(check, 1, 0, false) 830 831 // Wait until a reply arrives to this request 832 for arrived := false; !arrived; { 833 select { 834 case <-d.cancelCh: 835 return 0, errCancelHeaderFetch 836 837 case packer := <-d.headerCh: 838 // Discard anything not from the origin peer 839 if packer.PeerId() != p.id { 840 log.Debug("Received headers from incorrect peer", "peer", packer.PeerId()) 841 break 842 } 843 // Make sure the peer actually gave something valid 844 headers := packer.(*headerPack).headers 845 if len(headers) != 1 { 846 p.log.Debug("Multiple headers for single request", "headers", len(headers)) 847 return 0, errBadPeer 848 } 849 arrived = true 850 851 // Modify the search interval based on the response 852 if (d.mode == FullSync && !d.blockchain.HasBlock(headers[0].Hash(), headers[0].Number.Uint64())) || (d.mode != FullSync && !d.lightchain.HasHeader(headers[0].Hash(), headers[0].Number.Uint64())) { 853 end = check 854 break 855 } 856 header := d.lightchain.GetHeaderByHash(headers[0].Hash()) // Independent of sync mode, header surely exists 857 if header.Number.Uint64() != check { 858 p.log.Debug("Received non requested header", "number", header.Number, "hash", header.Hash(), "request", check) 859 return 0, errBadPeer 860 } 861 start = check 862 863 case <-timeout: 864 p.log.Debug("Waiting for search header timed out", "elapsed", ttl) 865 return 0, errTimeout 866 867 case <-d.bodyCh: 868 case <-d.receiptCh: 869 // Out of bounds delivery, ignore 870 } 871 } 872 } 873 // Ensure valid ancestry and return 874 if int64(start) <= floor { 875 p.log.Warn("Ancestor below allowance", "number", start, "hash", hash, "allowance", floor) 876 return 0, errInvalidAncestor 877 } 878 p.log.Debug("Found common ancestor", "number", start, "hash", hash) 879 return start, nil 880 } 881 882 // fetchHeaders keeps retrieving headers concurrently from the number 883 // requested, until no more are returned, potentially throttling on the way. To 884 // facilitate concurrency but still protect against malicious nodes sending bad 885 // headers, we construct a header chain skeleton using the "origin" peer we are 886 // syncing with, and fill in the missing headers using anyone else. Headers from 887 // other peers are only accepted if they map cleanly to the skeleton. If no one 888 // can fill in the skeleton - not even the origin peer - it's assumed invalid and 889 // the origin is dropped. 890 func (d *Downloader) fetchHeaders(p *peerConnection, from uint64, pivot uint64) error { 891 p.log.Debug("Directing header downloads", "origin", from, "pivot", pivot) 892 defer p.log.Debug("Header download terminated") 893 894 // Create a timeout timer, and the associated header fetcher 895 skeleton := true // Skeleton assembly phase or finishing up 896 request := time.Now() // time of the last skeleton fetch request 897 timeout := time.NewTimer(0) // timer to dump a non-responsive active peer 898 <-timeout.C // timeout channel should be initially empty 899 defer timeout.Stop() 900 901 var ttl time.Duration 902 getHeaders := func(from uint64) { 903 request = time.Now() 904 905 ttl = d.requestTTL() 906 timeout.Reset(ttl) 907 908 if skeleton { 909 p.log.Debug("Fetching skeleton headers", "count", MaxHeaderFetch, "from", from) 910 go p.peer.RequestHeadersByNumber(from+uint64(MaxHeaderFetch)-1, MaxSkeletonSize, MaxHeaderFetch-1, false) 911 } else { 912 p.log.Debug("Fetching full headers", "count", MaxHeaderFetch, "from", from) 913 go p.peer.RequestHeadersByNumber(from, MaxHeaderFetch, 0, false) 914 } 915 } 916 // Start pulling the header chain skeleton until all is done 917 getHeaders(from) 918 919 for { 920 select { 921 case <-d.cancelCh: 922 return errCancelHeaderFetch 923 924 case packet := <-d.headerCh: 925 // Make sure the active peer is giving us the skeleton headers 926 if packet.PeerId() != p.id { 927 log.Debug("Received skeleton from incorrect peer", "peer", packet.PeerId()) 928 break 929 } 930 headerReqTimer.UpdateSince(request) 931 timeout.Stop() 932 933 // If the skeleton's finished, pull any remaining head headers directly from the origin 934 if packet.Items() == 0 && skeleton { 935 log.Debug("skeleton's finished") 936 skeleton = false 937 getHeaders(from) 938 continue 939 } 940 // If no more headers are inbound, notify the content fetchers and return 941 if packet.Items() == 0 { 942 // Don't abort header fetches while the pivot is downloading 943 if atomic.LoadInt32(&d.committed) == 0 && pivot <= from { 944 p.log.Debug("No headers, waiting for pivot commit") 945 select { 946 case <-time.After(fsHeaderContCheck): 947 getHeaders(from) 948 continue 949 case <-d.cancelCh: 950 return errCancelHeaderFetch 951 } 952 } 953 // Pivot done (or not in fast sync) and no more headers, terminate the process 954 p.log.Debug("No more headers available") 955 select { 956 case d.headerProcCh <- nil: 957 return nil 958 case <-d.cancelCh: 959 return errCancelHeaderFetch 960 } 961 } 962 headers := packet.(*headerPack).headers 963 964 // If we received a skeleton batch, resolve internals concurrently 965 if skeleton { 966 p.log.Trace("Fill Header Skeleton", "count", len(headers), "from", from, "header from", headers[0].Number.Uint64(), "header end", headers[len(headers)-1].Number.Uint64()) 967 filled, proced, err := d.fillHeaderSkeleton(from, headers) 968 if err != nil { 969 p.log.Debug("Skeleton chain invalid", "err", err) 970 return errInvalidChain 971 } 972 headers = filled[proced:] 973 from += uint64(proced) 974 p.log.Trace("After Fill Header Skeleton", "from", from) 975 } 976 // Insert all the new headers and fetch the next batch 977 if len(headers) > 0 { 978 p.log.Trace("Scheduling New Headers", "count", len(headers), "from", from, "headerFrom", headers[0].Number.Uint64(), "headerEnd", headers[len(headers)-1].Number.Uint64()) 979 if from != headers[0].Number.Uint64() { 980 p.log.Debug("ignore unexpected deliver headers", "count", len(headers), "from", from, "header from", headers[0].Number.Uint64(), "header end", headers[len(headers)-1].Number.Uint64()) 981 continue 982 } 983 select { 984 case d.headerProcCh <- headers: 985 case <-d.cancelCh: 986 return errCancelHeaderFetch 987 } 988 from += uint64(len(headers)) 989 p.log.Trace("After Scheduling New Headers", "from", from) 990 } 991 getHeaders(from) 992 993 case <-timeout.C: 994 if d.dropPeer == nil { 995 // The dropPeer method is nil when `--copydb` is used for a local copy. 996 // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored 997 p.log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", p.id) 998 break 999 } 1000 // Header retrieval timed out, consider the peer bad and drop 1001 p.log.Debug("Header request timed out", "elapsed", ttl) 1002 headerTimeoutMeter.Mark(1) 1003 d.dropPeer(p.id) 1004 1005 // Finish the sync gracefully instead of dumping the gathered data though 1006 for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { 1007 select { 1008 case ch <- false: 1009 case <-d.cancelCh: 1010 } 1011 } 1012 select { 1013 case d.headerProcCh <- nil: 1014 case <-d.cancelCh: 1015 } 1016 return errBadPeer 1017 } 1018 } 1019 } 1020 1021 // fillHeaderSkeleton concurrently retrieves headers from all our available peers 1022 // and maps them to the provided skeleton header chain. 1023 // 1024 // Any partial results from the beginning of the skeleton is (if possible) forwarded 1025 // immediately to the header processor to keep the rest of the pipeline full even 1026 // in the case of header stalls. 1027 // 1028 // The method returns the entire filled skeleton and also the number of headers 1029 // already forwarded for processing. 1030 func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, int, error) { 1031 log.Debug("Filling up skeleton", "from", from) 1032 d.queue.ScheduleSkeleton(from, skeleton) 1033 1034 var ( 1035 deliver = func(packet dataPack) (int, error) { 1036 pack := packet.(*headerPack) 1037 return d.queue.DeliverHeaders(pack.peerID, pack.headers, d.headerProcCh) 1038 } 1039 expire = func() map[string]int { return d.queue.ExpireHeaders(d.requestTTL()) } 1040 throttle = func() bool { return false } 1041 reserve = func(p *peerConnection, count int) (*fetchRequest, bool, error) { 1042 return d.queue.ReserveHeaders(p, count), false, nil 1043 } 1044 fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchHeaders(req.From, MaxHeaderFetch) } 1045 capacity = func(p *peerConnection) int { return p.HeaderCapacity(d.requestRTT()) } 1046 setIdle = func(p *peerConnection, accepted int) { p.SetHeadersIdle(accepted) } 1047 ) 1048 err := d.fetchParts(errCancelHeaderFetch, d.headerCh, deliver, d.queue.headerContCh, expire, 1049 d.queue.PendingHeaders, d.queue.InFlightHeaders, throttle, reserve, 1050 nil, fetch, d.queue.CancelHeaders, capacity, d.peers.HeaderIdlePeers, setIdle, "headers") 1051 1052 log.Debug("Skeleton fill terminated", "err", err) 1053 1054 filled, proced := d.queue.RetrieveHeaders() 1055 return filled, proced, err 1056 } 1057 1058 // fetchBodies iteratively downloads the scheduled block bodies, taking any 1059 // available peers, reserving a chunk of blocks for each, waiting for delivery 1060 // and also periodically checking for timeouts. 1061 func (d *Downloader) fetchBodies(from uint64) error { 1062 log.Debug("Downloading block bodies", "origin", from) 1063 1064 var ( 1065 deliver = func(packet dataPack) (int, error) { 1066 pack := packet.(*bodyPack) 1067 return d.queue.DeliverBodies(pack.peerID, pack.transactions, pack.uncles, pack.signatures) 1068 } 1069 expire = func() map[string]int { return d.queue.ExpireBodies(d.requestTTL()) } 1070 fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchBodies(req) } 1071 capacity = func(p *peerConnection) int { return p.BlockCapacity(d.requestRTT()) } 1072 setIdle = func(p *peerConnection, accepted int) { p.SetBodiesIdle(accepted) } 1073 ) 1074 err := d.fetchParts(errCancelBodyFetch, d.bodyCh, deliver, d.bodyWakeCh, expire, 1075 d.queue.PendingBlocks, d.queue.InFlightBlocks, d.queue.ShouldThrottleBlocks, d.queue.ReserveBodies, 1076 d.bodyFetchHook, fetch, d.queue.CancelBodies, capacity, d.peers.BodyIdlePeers, setIdle, "bodies") 1077 1078 log.Debug("Block body download terminated", "err", err) 1079 return err 1080 } 1081 1082 // fetchReceipts iteratively downloads the scheduled block receipts, taking any 1083 // available peers, reserving a chunk of receipts for each, waiting for delivery 1084 // and also periodically checking for timeouts. 1085 func (d *Downloader) fetchReceipts(from uint64) error { 1086 log.Debug("Downloading transaction receipts", "origin", from) 1087 1088 var ( 1089 deliver = func(packet dataPack) (int, error) { 1090 pack := packet.(*receiptPack) 1091 return d.queue.DeliverReceipts(pack.peerID, pack.receipts) 1092 } 1093 expire = func() map[string]int { return d.queue.ExpireReceipts(d.requestTTL()) } 1094 fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchReceipts(req) } 1095 capacity = func(p *peerConnection) int { return p.ReceiptCapacity(d.requestRTT()) } 1096 setIdle = func(p *peerConnection, accepted int) { p.SetReceiptsIdle(accepted) } 1097 ) 1098 err := d.fetchParts(errCancelReceiptFetch, d.receiptCh, deliver, d.receiptWakeCh, expire, 1099 d.queue.PendingReceipts, d.queue.InFlightReceipts, d.queue.ShouldThrottleReceipts, d.queue.ReserveReceipts, 1100 d.receiptFetchHook, fetch, d.queue.CancelReceipts, capacity, d.peers.ReceiptIdlePeers, setIdle, "receipts") 1101 1102 log.Debug("Transaction receipt download terminated", "err", err) 1103 return err 1104 } 1105 1106 // fetchParts iteratively downloads scheduled block parts, taking any available 1107 // peers, reserving a chunk of fetch requests for each, waiting for delivery and 1108 // also periodically checking for timeouts. 1109 // 1110 // As the scheduling/timeout logic mostly is the same for all downloaded data 1111 // types, this method is used by each for data gathering and is instrumented with 1112 // various callbacks to handle the slight differences between processing them. 1113 // 1114 // The instrumentation parameters: 1115 // - errCancel: error type to return if the fetch operation is cancelled (mostly makes logging nicer) 1116 // - deliveryCh: channel from which to retrieve downloaded data packets (merged from all concurrent peers) 1117 // - deliver: processing callback to deliver data packets into type specific download queues (usually within `queue`) 1118 // - wakeCh: notification channel for waking the fetcher when new tasks are available (or sync completed) 1119 // - expire: task callback method to abort requests that took too long and return the faulty peers (traffic shaping) 1120 // - pending: task callback for the number of requests still needing download (detect completion/non-completability) 1121 // - inFlight: task callback for the number of in-progress requests (wait for all active downloads to finish) 1122 // - throttle: task callback to check if the processing queue is full and activate throttling (bound memory use) 1123 // - reserve: task callback to reserve new download tasks to a particular peer (also signals partial completions) 1124 // - fetchHook: tester callback to notify of new tasks being initiated (allows testing the scheduling logic) 1125 // - fetch: network callback to actually send a particular download request to a physical remote peer 1126 // - cancel: task callback to abort an in-flight download request and allow rescheduling it (in case of lost peer) 1127 // - capacity: network callback to retrieve the estimated type-specific bandwidth capacity of a peer (traffic shaping) 1128 // - idle: network callback to retrieve the currently (type specific) idle peers that can be assigned tasks 1129 // - setIdle: network callback to set a peer back to idle and update its estimated capacity (traffic shaping) 1130 // - kind: textual label of the type being downloaded to display in log mesages 1131 func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliver func(dataPack) (int, error), wakeCh chan bool, 1132 expire func() map[string]int, pending func() int, inFlight func() bool, throttle func() bool, reserve func(*peerConnection, int) (*fetchRequest, bool, error), 1133 fetchHook func([]*types.Header), fetch func(*peerConnection, *fetchRequest) error, cancel func(*fetchRequest), capacity func(*peerConnection) int, 1134 idle func() ([]*peerConnection, int), setIdle func(*peerConnection, int), kind string) error { 1135 1136 // Create a ticker to detect expired retrieval tasks 1137 ticker := time.NewTicker(100 * time.Millisecond) 1138 defer ticker.Stop() 1139 1140 update := make(chan struct{}, 1) 1141 1142 // Prepare the queue and fetch block parts until the block header fetcher's done 1143 finished := false 1144 for { 1145 select { 1146 case <-d.cancelCh: 1147 return errCancel 1148 1149 case packet := <-deliveryCh: 1150 // If the peer was previously banned and failed to deliver its pack 1151 // in a reasonable time frame, ignore its message. 1152 if peer := d.peers.Peer(packet.PeerId()); peer != nil { 1153 // Deliver the received chunk of data and check chain validity 1154 accepted, err := deliver(packet) 1155 if err == errInvalidChain { 1156 return err 1157 } 1158 // Unless a peer delivered something completely else than requested (usually 1159 // caused by a timed out request which came through in the end), set it to 1160 // idle. If the delivery's stale, the peer should have already been idled. 1161 if err != errStaleDelivery { 1162 setIdle(peer, accepted) 1163 } 1164 // Issue a log to the user to see what's going on 1165 switch { 1166 case err == nil && packet.Items() == 0: 1167 peer.log.Trace("Requested data not delivered", "type", kind) 1168 case err == nil: 1169 peer.log.Trace("Delivered new batch of data", "type", kind, "count", packet.Stats()) 1170 default: 1171 peer.log.Trace("Failed to deliver retrieved data", "type", kind, "err", err) 1172 } 1173 } 1174 // Blocks assembled, try to update the progress 1175 select { 1176 case update <- struct{}{}: 1177 default: 1178 } 1179 1180 case cont := <-wakeCh: 1181 // The header fetcher sent a continuation flag, check if it's done 1182 if !cont { 1183 finished = true 1184 } 1185 // Headers arrive, try to update the progress 1186 select { 1187 case update <- struct{}{}: 1188 default: 1189 } 1190 1191 case <-ticker.C: 1192 // Sanity check update the progress 1193 select { 1194 case update <- struct{}{}: 1195 default: 1196 } 1197 1198 case <-update: 1199 // Short circuit if we lost all our peers 1200 if d.peers.Len() == 0 { 1201 return errNoPeers 1202 } 1203 // Check for fetch request timeouts and demote the responsible peers 1204 for pid, fails := range expire() { 1205 if peer := d.peers.Peer(pid); peer != nil { 1206 // If a lot of retrieval elements expired, we might have overestimated the remote peer or perhaps 1207 // ourselves. Only reset to minimal throughput but don't drop just yet. If even the minimal times 1208 // out that sync wise we need to get rid of the peer. 1209 // 1210 // The reason the minimum threshold is 2 is because the downloader tries to estimate the bandwidth 1211 // and latency of a peer separately, which requires pushing the measures capacity a bit and seeing 1212 // how response times reacts, to it always requests one more than the minimum (i.e. min 2). 1213 if fails > 2 { 1214 peer.log.Trace("Data delivery timed out", "type", kind) 1215 setIdle(peer, 0) 1216 } else { 1217 peer.log.Debug("Stalling delivery, dropping", "type", kind) 1218 if d.dropPeer == nil { 1219 // The dropPeer method is nil when `--copydb` is used for a local copy. 1220 // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored 1221 peer.log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", pid) 1222 } else { 1223 d.dropPeer(pid) 1224 } 1225 } 1226 } 1227 } 1228 // If there's nothing more to fetch, wait or terminate 1229 if pending() == 0 { 1230 if !inFlight() && finished { 1231 log.Debug("Data fetching completed", "type", kind) 1232 return nil 1233 } 1234 break 1235 } 1236 // Send a download request to all idle peers, until throttled 1237 progressed, throttled, running := false, false, inFlight() 1238 idles, total := idle() 1239 1240 for _, peer := range idles { 1241 // Short circuit if throttling activated 1242 if throttle() { 1243 throttled = true 1244 break 1245 } 1246 // Short circuit if there is no more available task. 1247 if pending() == 0 { 1248 break 1249 } 1250 // Reserve a chunk of fetches for a peer. A nil can mean either that 1251 // no more headers are available, or that the peer is known not to 1252 // have them. 1253 request, progress, err := reserve(peer, capacity(peer)) 1254 if err != nil { 1255 return err 1256 } 1257 if progress { 1258 progressed = true 1259 } 1260 if request == nil { 1261 continue 1262 } 1263 if request.From > 0 { 1264 peer.log.Trace("Requesting new batch of data", "type", kind, "from", request.From) 1265 } else { 1266 peer.log.Trace("Requesting new batch of data", "type", kind, "count", len(request.Headers), "from", request.Headers[0].Number) 1267 } 1268 // Fetch the chunk and make sure any errors return the hashes to the queue 1269 if fetchHook != nil { 1270 fetchHook(request.Headers) 1271 } 1272 if err := fetch(peer, request); err != nil { 1273 // Although we could try and make an attempt to fix this, this error really 1274 // means that we've double allocated a fetch task to a peer. If that is the 1275 // case, the internal state of the downloader and the queue is very wrong so 1276 // better hard crash and note the error instead of silently accumulating into 1277 // a much bigger issue. 1278 panic(fmt.Sprintf("%v: %s fetch assignment failed", peer, kind)) 1279 } 1280 running = true 1281 } 1282 // Make sure that we have peers available for fetching. If all peers have been tried 1283 // and all failed throw an error 1284 if !progressed && !throttled && !running && len(idles) == total && pending() > 0 { 1285 return errPeersUnavailable 1286 } 1287 } 1288 } 1289 } 1290 1291 // processHeaders takes batches of retrieved headers from an input channel and 1292 // keeps processing and scheduling them into the header chain and downloader's 1293 // queue until the stream ends or a failure occurs. 1294 func (d *Downloader) processHeaders(origin uint64, pivot uint64, bn *big.Int) error { 1295 // Keep a count of uncertain headers to roll back 1296 rollback := []*types.Header{} 1297 defer func() { 1298 if len(rollback) > 0 { 1299 // Flatten the headers and roll them back 1300 hashes := make([]common.Hash, len(rollback)) 1301 for i, header := range rollback { 1302 hashes[i] = header.Hash() 1303 } 1304 lastHeader, lastFastBlock, lastBlock := d.lightchain.CurrentHeader().Number, common.Big0, common.Big0 1305 if d.mode != LightSync { 1306 lastFastBlock = d.blockchain.CurrentFastBlock().Number() 1307 lastBlock = d.blockchain.CurrentBlock().Number() 1308 } 1309 d.lightchain.Rollback(hashes) 1310 curFastBlock, curBlock := common.Big0, common.Big0 1311 if d.mode != LightSync { 1312 curFastBlock = d.blockchain.CurrentFastBlock().Number() 1313 curBlock = d.blockchain.CurrentBlock().Number() 1314 } 1315 log.Warn("Rolled back headers", "count", len(hashes), 1316 "header", fmt.Sprintf("%d->%d", lastHeader, d.lightchain.CurrentHeader().Number), 1317 "fast", fmt.Sprintf("%d->%d", lastFastBlock, curFastBlock), 1318 "block", fmt.Sprintf("%d->%d", lastBlock, curBlock)) 1319 } 1320 }() 1321 1322 // Wait for batches of headers to process 1323 gotHeaders := false 1324 1325 for { 1326 select { 1327 case <-d.cancelCh: 1328 return errCancelHeaderProcessing 1329 1330 case headers := <-d.headerProcCh: 1331 // Terminate header processing if we synced up 1332 if len(headers) == 0 { 1333 // Notify everyone that headers are fully processed 1334 for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { 1335 select { 1336 case ch <- false: 1337 case <-d.cancelCh: 1338 } 1339 } 1340 // If no headers were retrieved at all, the peer violated its TD promise that it had a 1341 // better chain compared to ours. The only exception is if its promised blocks were 1342 // already imported by other means (e.g. fetcher): 1343 // 1344 // R <remote peer>, L <local node>: Both at block 10 1345 // R: Mine block 11, and propagate it to L 1346 // L: Queue block 11 for import 1347 // L: Notice that R's head and TD increased compared to ours, start sync 1348 // L: Import of block 11 finishes 1349 // L: Sync begins, and finds common ancestor at 11 1350 // L: Request new headers up from 11 (R's TD was higher, it must have something) 1351 // R: Nothing to give 1352 if d.mode != LightSync { 1353 head := d.blockchain.CurrentBlock() 1354 if !gotHeaders && bn.Cmp(head.Number()) > 0 { 1355 return errStallingPeer 1356 } 1357 } 1358 // If fast or light syncing, ensure promised headers are indeed delivered. This is 1359 // needed to detect scenarios where an attacker feeds a bad pivot and then bails out 1360 // of delivering the post-pivot blocks that would flag the invalid content. 1361 // 1362 // This check cannot be executed "as is" for full imports, since blocks may still be 1363 // queued for processing when the header download completes. However, as long as the 1364 // peer gave us something useful, we're already happy/progressed (above check). 1365 if d.mode == FastSync || d.mode == LightSync { 1366 head := d.lightchain.CurrentHeader() 1367 if bn.Cmp(head.Number) > 0 { 1368 return errStallingPeer 1369 } 1370 } 1371 // Disable any rollback and return 1372 rollback = nil 1373 return nil 1374 } 1375 // Otherwise split the chunk of headers into batches and process them 1376 gotHeaders = true 1377 1378 for len(headers) > 0 { 1379 // Terminate if something failed in between processing chunks 1380 select { 1381 case <-d.cancelCh: 1382 return errCancelHeaderProcessing 1383 default: 1384 } 1385 // Select the next chunk of headers to import 1386 limit := maxHeadersProcess 1387 if limit > len(headers) { 1388 limit = len(headers) 1389 } 1390 chunk := headers[:limit] 1391 1392 // In case of header only syncing, validate the chunk immediately 1393 if d.mode == FastSync || d.mode == LightSync { 1394 // Collect the yet unknown headers to mark them as uncertain 1395 unknown := make([]*types.Header, 0, len(headers)) 1396 for _, header := range chunk { 1397 if !d.lightchain.HasHeader(header.Hash(), header.Number.Uint64()) { 1398 unknown = append(unknown, header) 1399 } 1400 } 1401 // If we're importing pure headers, verify based on their recentness 1402 frequency := fsHeaderCheckFrequency 1403 if chunk[len(chunk)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot { 1404 frequency = 1 1405 } 1406 if n, err := d.lightchain.InsertHeaderChain(chunk, frequency); err != nil { 1407 // If some headers were inserted, add them too to the rollback list 1408 if n > 0 { 1409 rollback = append(rollback, chunk[:n]...) 1410 } 1411 log.Debug("Invalid header encountered", "number", chunk[n].Number, "hash", chunk[n].Hash(), "err", err) 1412 return errInvalidChain 1413 } 1414 // All verifications passed, store newly found uncertain headers 1415 rollback = append(rollback, unknown...) 1416 if len(rollback) > fsHeaderSafetyNet { 1417 rollback = append(rollback[:0], rollback[len(rollback)-fsHeaderSafetyNet:]...) 1418 } 1419 } 1420 // Unless we're doing light chains, schedule the headers for associated content retrieval 1421 if d.mode == FullSync || d.mode == FastSync { 1422 // If we've reached the allowed number of pending headers, stall a bit 1423 for d.queue.PendingBlocks() >= maxQueuedHeaders || d.queue.PendingReceipts() >= maxQueuedHeaders { 1424 select { 1425 case <-d.cancelCh: 1426 return errCancelHeaderProcessing 1427 case <-time.After(time.Second): 1428 } 1429 } 1430 // Otherwise insert the headers for content retrieval 1431 inserts := d.queue.Schedule(chunk, origin) 1432 if len(inserts) != len(chunk) { 1433 log.Debug("Stale headers") 1434 return errBadPeer 1435 } 1436 } 1437 headers = headers[limit:] 1438 origin += uint64(limit) 1439 } 1440 1441 // Update the highest block number we know if a higher one is found. 1442 d.syncStatsLock.Lock() 1443 if d.syncStatsChainHeight < origin { 1444 d.syncStatsChainHeight = origin - 1 1445 } 1446 d.syncStatsLock.Unlock() 1447 1448 // Signal the content downloaders of the availablility of new tasks 1449 for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { 1450 select { 1451 case ch <- true: 1452 default: 1453 } 1454 } 1455 } 1456 } 1457 } 1458 1459 // processFullSyncContent takes fetch results from the queue and imports them into the chain. 1460 func (d *Downloader) processFullSyncContent() error { 1461 for { 1462 results := d.queue.Results(true) 1463 if len(results) == 0 { 1464 return nil 1465 } 1466 if d.chainInsertHook != nil { 1467 d.chainInsertHook(results) 1468 } 1469 if err := d.importBlockResults(results); err != nil { 1470 return err 1471 } 1472 } 1473 } 1474 1475 func (d *Downloader) importBlockResults(results []*fetchResult) error { 1476 // Check for any early termination requests 1477 if len(results) == 0 { 1478 return nil 1479 } 1480 select { 1481 case <-d.quitCh: 1482 return errCancelContentProcessing 1483 default: 1484 } 1485 // Retrieve the a batch of results to import 1486 first, last := results[0].Header, results[len(results)-1].Header 1487 log.Debug("Inserting downloaded chain", "items", len(results), 1488 "firstnum", first.Number, "firsthash", first.Hash(), 1489 "lastnum", last.Number, "lasthash", last.Hash(), 1490 ) 1491 blocks := make([]*types.Block, len(results)) 1492 for i, result := range results { 1493 blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles, result.Signatures) 1494 } 1495 if index, err := d.blockchain.InsertChain(blocks); err != nil { 1496 log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err) 1497 return errInvalidChain 1498 } 1499 return nil 1500 } 1501 1502 // processFastSyncContent takes fetch results from the queue and writes them to the 1503 // database. It also controls the synchronisation of state nodes of the pivot block. 1504 func (d *Downloader) processFastSyncContent(latest *types.Header, pivot uint64) error { 1505 // Start syncing state of the reported head block. This should get us most of 1506 // the state of the pivot block. 1507 stateSync := d.syncState(latest.Root) 1508 defer stateSync.Cancel() 1509 go func() { 1510 if err := stateSync.Wait(); err != nil && err != errCancelStateFetch { 1511 d.queue.Close() // wake up WaitResults 1512 } 1513 }() 1514 // To cater for moving pivot points, track the pivot block and subsequently 1515 // accumulated download results separately. 1516 var ( 1517 oldPivot *fetchResult // Locked in pivot block, might change eventually 1518 oldTail []*fetchResult // Downloaded content after the pivot 1519 ) 1520 for { 1521 // Wait for the next batch of downloaded data to be available, and if the pivot 1522 // block became stale, move the goalpost 1523 results := d.queue.Results(oldPivot == nil) // Block if we're not monitoring pivot staleness 1524 if len(results) == 0 { 1525 // If pivot sync is done, stop 1526 if oldPivot == nil { 1527 return stateSync.Cancel() 1528 } 1529 // If sync failed, stop 1530 select { 1531 case <-d.cancelCh: 1532 return stateSync.Cancel() 1533 default: 1534 } 1535 } 1536 if d.chainInsertHook != nil { 1537 d.chainInsertHook(results) 1538 } 1539 if oldPivot != nil { 1540 results = append(append([]*fetchResult{oldPivot}, oldTail...), results...) 1541 } 1542 P, beforeP, afterP := splitAroundPivot(pivot, results) 1543 if err := d.commitFastSyncData(beforeP, stateSync); err != nil { 1544 return err 1545 } 1546 if P != nil { 1547 log.Debug("pivot point is comming", "pivot", pivot, "P number", P.Header.Number.Uint64()) 1548 // If new pivot block found, cancel old state retrieval and restart 1549 if oldPivot != P { 1550 stateSync.Cancel() 1551 1552 stateSync = d.syncState(P.Header.Root) 1553 defer stateSync.Cancel() 1554 go func() { 1555 if err := stateSync.Wait(); err != nil && err != errCancelStateFetch { 1556 d.queue.Close() // wake up WaitResults 1557 } 1558 }() 1559 oldPivot = P 1560 } 1561 // Wait for completion, occasionally checking for pivot staleness 1562 select { 1563 case <-stateSync.done: 1564 if stateSync.err != nil { 1565 return stateSync.err 1566 } 1567 if err := d.commitPivotBlock(P); err != nil { 1568 return err 1569 } 1570 oldPivot = nil 1571 1572 case <-time.After(time.Second): 1573 oldTail = afterP 1574 continue 1575 } 1576 } 1577 // Fast sync done, pivot commit done, full import 1578 if err := d.importBlockResults(afterP); err != nil { 1579 return err 1580 } 1581 } 1582 } 1583 1584 //// processFastSyncContent takes fetch results from the queue and writes them to the 1585 //// database. It also controls the synchronisation of state nodes of the pivot block. 1586 //func (d *Downloader) processFastSyncContent(latest *types.Header) error { 1587 // // Start syncing state of the reported head block. This should get us most of 1588 // // the state of the pivot block. 1589 // stateSync := d.syncState(latest.Root) 1590 // defer stateSync.Cancel() 1591 // go func() { 1592 // if err := stateSync.Wait(); err != nil && err != errCancelStateFetch { 1593 // d.queue.Close() // wake up WaitResults 1594 // } 1595 // }() 1596 // // Figure out the ideal pivot block. Note, that this goalpost may move if the 1597 // // sync takes long enough for the chain head to move significantly. 1598 // pivot := uint64(0) 1599 // if height := latest.Number.Uint64(); height > uint64(fsMinFullBlocks) { 1600 // pivot = height - uint64(fsMinFullBlocks) 1601 // } 1602 // // To cater for moving pivot points, track the pivot block and subsequently 1603 // // accumulated download results separately. 1604 // var ( 1605 // oldPivot *fetchResult // Locked in pivot block, might change eventually 1606 // oldTail []*fetchResult // Downloaded content after the pivot 1607 // ) 1608 // for { 1609 // // Wait for the next batch of downloaded data to be available, and if the pivot 1610 // // block became stale, move the goalpost 1611 // results := d.queue.Results(oldPivot == nil) // Block if we're not monitoring pivot staleness 1612 // if len(results) == 0 { 1613 // // If pivot sync is done, stop 1614 // if oldPivot == nil { 1615 // return stateSync.Cancel() 1616 // } 1617 // // If sync failed, stop 1618 // select { 1619 // case <-d.cancelCh: 1620 // return stateSync.Cancel() 1621 // default: 1622 // } 1623 // } 1624 // if d.chainInsertHook != nil { 1625 // d.chainInsertHook(results) 1626 // } 1627 // if oldPivot != nil { 1628 // results = append(append([]*fetchResult{oldPivot}, oldTail...), results...) 1629 // } 1630 // // Split around the pivot block and process the two sides via fast/full sync 1631 // if atomic.LoadInt32(&d.committed) == 0 { 1632 // latest = results[len(results)-1].Header 1633 // if height := latest.Number.Uint64(); height > pivot+2*uint64(fsMinFullBlocks) { 1634 // log.Warn("Pivot became stale, moving", "old", pivot, "new", height-uint64(fsMinFullBlocks)) 1635 // pivot = height - uint64(fsMinFullBlocks) 1636 // } 1637 // } 1638 // P, beforeP, afterP := splitAroundPivot(pivot, results) 1639 // if err := d.commitFastSyncData(beforeP, stateSync); err != nil { 1640 // return err 1641 // } 1642 // if P != nil { 1643 // // If new pivot block found, cancel old state retrieval and restart 1644 // if oldPivot != P { 1645 // stateSync.Cancel() 1646 // 1647 // stateSync = d.syncState(P.Header.Root) 1648 // defer stateSync.Cancel() 1649 // go func() { 1650 // if err := stateSync.Wait(); err != nil && err != errCancelStateFetch { 1651 // d.queue.Close() // wake up WaitResults 1652 // } 1653 // }() 1654 // oldPivot = P 1655 // } 1656 // // Wait for completion, occasionally checking for pivot staleness 1657 // select { 1658 // case <-stateSync.done: 1659 // if stateSync.err != nil { 1660 // return stateSync.err 1661 // } 1662 // if err := d.commitPivotBlock(P); err != nil { 1663 // return err 1664 // } 1665 // oldPivot = nil 1666 // 1667 // case <-time.After(time.Second): 1668 // oldTail = afterP 1669 // continue 1670 // } 1671 // } 1672 // // Fast sync done, pivot commit done, full import 1673 // if err := d.importBlockResults(afterP); err != nil { 1674 // return err 1675 // } 1676 // } 1677 //} 1678 1679 func splitAroundPivot(pivot uint64, results []*fetchResult) (p *fetchResult, before, after []*fetchResult) { 1680 for _, result := range results { 1681 num := result.Header.Number.Uint64() 1682 switch { 1683 case num < pivot: 1684 before = append(before, result) 1685 case num == pivot: 1686 p = result 1687 default: 1688 after = append(after, result) 1689 } 1690 } 1691 return p, before, after 1692 } 1693 1694 func (d *Downloader) commitFastSyncData(results []*fetchResult, stateSync *stateSync) error { 1695 // Check for any early termination requests 1696 if len(results) == 0 { 1697 return nil 1698 } 1699 select { 1700 case <-d.quitCh: 1701 return errCancelContentProcessing 1702 case <-stateSync.done: 1703 if err := stateSync.Wait(); err != nil { 1704 return err 1705 } 1706 default: 1707 } 1708 // Retrieve the a batch of results to import 1709 first, last := results[0].Header, results[len(results)-1].Header 1710 log.Debug("Inserting fast-sync blocks", "items", len(results), 1711 "firstnum", first.Number, "firsthash", first.Hash(), 1712 "lastnumn", last.Number, "lasthash", last.Hash(), 1713 ) 1714 blocks := make([]*types.Block, len(results)) 1715 receipts := make([]types.Receipts, len(results)) 1716 for i, result := range results { 1717 blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles, result.Signatures) 1718 receipts[i] = result.Receipts 1719 } 1720 if index, err := d.blockchain.InsertReceiptChain(blocks, receipts); err != nil { 1721 log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err) 1722 return errInvalidChain 1723 } 1724 return nil 1725 } 1726 1727 func (d *Downloader) commitPivotBlock(result *fetchResult) error { 1728 block := types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles, result.Signatures) 1729 log.Debug("Committing fast sync pivot as new head", "number", block.Number(), "hash", block.Hash()) 1730 if _, err := d.blockchain.InsertReceiptChain([]*types.Block{block}, []types.Receipts{result.Receipts}); err != nil { 1731 return err 1732 } 1733 if err := d.blockchain.FastSyncCommitHead(block.Hash()); err != nil { 1734 return err 1735 } 1736 atomic.StoreInt32(&d.committed, 1) 1737 return nil 1738 } 1739 1740 // DeliverHeaders injects a new batch of block headers received from a remote 1741 // node into the download schedule. 1742 func (d *Downloader) DeliverHeaders(id string, headers []*types.Header) (err error) { 1743 return d.deliver(id, d.headerCh, &headerPack{id, headers}, headerInMeter, headerDropMeter) 1744 } 1745 1746 // DeliverBodies injects a new batch of block bodies received from a remote node. 1747 func (d *Downloader) DeliverBodies(id string, transactions [][]*types.Transaction, uncles [][]*types.Header, signatures [][]*common.BlockConfirmSign) (err error) { 1748 return d.deliver(id, d.bodyCh, &bodyPack{id, transactions, uncles, signatures}, bodyInMeter, bodyDropMeter) 1749 } 1750 1751 // DeliverReceipts injects a new batch of receipts received from a remote node. 1752 func (d *Downloader) DeliverReceipts(id string, receipts [][]*types.Receipt) (err error) { 1753 return d.deliver(id, d.receiptCh, &receiptPack{id, receipts}, receiptInMeter, receiptDropMeter) 1754 } 1755 1756 // DeliverNodeData injects a new batch of node state data received from a remote node. 1757 func (d *Downloader) DeliverNodeData(id string, data [][]byte) (err error) { 1758 return d.deliver(id, d.stateCh, &statePack{id, data}, stateInMeter, stateDropMeter) 1759 } 1760 1761 // DeliverPposStorage injects a new batch of ppos storage received from a remote node. 1762 func (d *Downloader) DeliverPposStorage(id string, latest *types.Header, pivot *types.Header, storage []byte) (err error) { 1763 return d.deliver(id, d.pposStorageCh, &pposStoragePack{id, latest, pivot, storage}, pposStorageInMeter, pposStorageDropMeter) 1764 } 1765 1766 // deliver injects a new batch of data received from a remote node. 1767 func (d *Downloader) deliver(id string, destCh chan dataPack, packet dataPack, inMeter, dropMeter metrics.Meter) (err error) { 1768 // Update the delivery metrics for both good and failed deliveries 1769 inMeter.Mark(int64(packet.Items())) 1770 defer func() { 1771 if err != nil { 1772 dropMeter.Mark(int64(packet.Items())) 1773 } 1774 }() 1775 // Deliver or abort if the sync is canceled while queuing 1776 d.cancelLock.RLock() 1777 cancel := d.cancelCh 1778 d.cancelLock.RUnlock() 1779 if cancel == nil { 1780 return errNoSyncActive 1781 } 1782 select { 1783 case destCh <- packet: 1784 return nil 1785 case <-cancel: 1786 return errNoSyncActive 1787 } 1788 } 1789 1790 // qosTuner is the quality of service tuning loop that occasionally gathers the 1791 // peer latency statistics and updates the estimated request round trip time. 1792 func (d *Downloader) qosTuner() { 1793 for { 1794 // Retrieve the current median RTT and integrate into the previoust target RTT 1795 rtt := time.Duration((1-qosTuningImpact)*float64(atomic.LoadUint64(&d.rttEstimate)) + qosTuningImpact*float64(d.peers.medianRTT())) 1796 atomic.StoreUint64(&d.rttEstimate, uint64(rtt)) 1797 1798 // A new RTT cycle passed, increase our confidence in the estimated RTT 1799 conf := atomic.LoadUint64(&d.rttConfidence) 1800 conf = conf + (1000000-conf)/2 1801 atomic.StoreUint64(&d.rttConfidence, conf) 1802 1803 // Log the new QoS values and sleep until the next RTT 1804 log.Debug("Recalculated downloader QoS values", "rtt", rtt, "confidence", float64(conf)/1000000.0, "ttl", d.requestTTL()) 1805 select { 1806 case <-d.quitCh: 1807 return 1808 case <-time.After(rtt): 1809 } 1810 } 1811 } 1812 1813 // qosReduceConfidence is meant to be called when a new peer joins the downloader's 1814 // peer set, needing to reduce the confidence we have in out QoS estimates. 1815 func (d *Downloader) qosReduceConfidence() { 1816 // If we have a single peer, confidence is always 1 1817 peers := uint64(d.peers.Len()) 1818 if peers == 0 { 1819 // Ensure peer connectivity races don't catch us off guard 1820 return 1821 } 1822 if peers == 1 { 1823 atomic.StoreUint64(&d.rttConfidence, 1000000) 1824 return 1825 } 1826 // If we have a ton of peers, don't drop confidence) 1827 if peers >= uint64(qosConfidenceCap) { 1828 return 1829 } 1830 // Otherwise drop the confidence factor 1831 conf := atomic.LoadUint64(&d.rttConfidence) * (peers - 1) / peers 1832 if float64(conf)/1000000 < rttMinConfidence { 1833 conf = uint64(rttMinConfidence * 1000000) 1834 } 1835 atomic.StoreUint64(&d.rttConfidence, conf) 1836 1837 rtt := time.Duration(atomic.LoadUint64(&d.rttEstimate)) 1838 log.Debug("Relaxed downloader QoS values", "rtt", rtt, "confidence", float64(conf)/1000000.0, "ttl", d.requestTTL()) 1839 } 1840 1841 // requestRTT returns the current target round trip time for a download request 1842 // to complete in. 1843 // 1844 // Note, the returned RTT is .9 of the actually estimated RTT. The reason is that 1845 // the downloader tries to adapt queries to the RTT, so multiple RTT values can 1846 // be adapted to, but smaller ones are preferred (stabler download stream). 1847 func (d *Downloader) requestRTT() time.Duration { 1848 return time.Duration(atomic.LoadUint64(&d.rttEstimate)) * 9 / 10 1849 } 1850 1851 // requestTTL returns the current timeout allowance for a single download request 1852 // to finish under. 1853 func (d *Downloader) requestTTL() time.Duration { 1854 var ( 1855 rtt = time.Duration(atomic.LoadUint64(&d.rttEstimate)) 1856 conf = float64(atomic.LoadUint64(&d.rttConfidence)) / 1000000.0 1857 ) 1858 ttl := time.Duration(ttlScaling) * time.Duration(float64(rtt)/conf) 1859 if ttl > ttlLimit { 1860 ttl = ttlLimit 1861 } 1862 return ttl 1863 } 1864 1865 func (d *Downloader) start() { 1866 atomic.StoreInt32(&d.running, 1) 1867 } 1868 1869 func (d *Downloader) stop() { 1870 atomic.StoreInt32(&d.running, 0) 1871 } 1872 1873 func (d *Downloader) IsRunning() bool { 1874 return atomic.LoadInt32(&d.running) == 1 1875 }