github.com/codysnider/go-ethereum@v1.10.18-0.20220420071915-14f4ae99222a/eth/sync.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package eth 18 19 import ( 20 "errors" 21 "math/big" 22 "sync/atomic" 23 "time" 24 25 "github.com/ethereum/go-ethereum/common" 26 "github.com/ethereum/go-ethereum/core/rawdb" 27 "github.com/ethereum/go-ethereum/core/types" 28 "github.com/ethereum/go-ethereum/eth/downloader" 29 "github.com/ethereum/go-ethereum/eth/protocols/eth" 30 "github.com/ethereum/go-ethereum/log" 31 ) 32 33 const ( 34 forceSyncCycle = 10 * time.Second // Time interval to force syncs, even if few peers are available 35 defaultMinSyncPeers = 5 // Amount of peers desired to start syncing 36 ) 37 38 // syncTransactions starts sending all currently pending transactions to the given peer. 39 func (h *handler) syncTransactions(p *eth.Peer) { 40 // Assemble the set of transaction to broadcast or announce to the remote 41 // peer. Fun fact, this is quite an expensive operation as it needs to sort 42 // the transactions if the sorting is not cached yet. However, with a random 43 // order, insertions could overflow the non-executable queues and get dropped. 44 // 45 // TODO(karalabe): Figure out if we could get away with random order somehow 46 var txs types.Transactions 47 pending := h.txpool.Pending(false) 48 for _, batch := range pending { 49 txs = append(txs, batch...) 50 } 51 if len(txs) == 0 { 52 return 53 } 54 // The eth/65 protocol introduces proper transaction announcements, so instead 55 // of dripping transactions across multiple peers, just send the entire list as 56 // an announcement and let the remote side decide what they need (likely nothing). 57 hashes := make([]common.Hash, len(txs)) 58 for i, tx := range txs { 59 hashes[i] = tx.Hash() 60 } 61 p.AsyncSendPooledTransactionHashes(hashes) 62 } 63 64 // chainSyncer coordinates blockchain sync components. 65 type chainSyncer struct { 66 handler *handler 67 force *time.Timer 68 forced bool // true when force timer fired 69 warned time.Time 70 peerEventCh chan struct{} 71 doneCh chan error // non-nil when sync is running 72 } 73 74 // chainSyncOp is a scheduled sync operation. 75 type chainSyncOp struct { 76 mode downloader.SyncMode 77 peer *eth.Peer 78 td *big.Int 79 head common.Hash 80 } 81 82 // newChainSyncer creates a chainSyncer. 83 func newChainSyncer(handler *handler) *chainSyncer { 84 return &chainSyncer{ 85 handler: handler, 86 peerEventCh: make(chan struct{}), 87 } 88 } 89 90 // handlePeerEvent notifies the syncer about a change in the peer set. 91 // This is called for new peers and every time a peer announces a new 92 // chain head. 93 func (cs *chainSyncer) handlePeerEvent(peer *eth.Peer) bool { 94 select { 95 case cs.peerEventCh <- struct{}{}: 96 return true 97 case <-cs.handler.quitSync: 98 return false 99 } 100 } 101 102 // loop runs in its own goroutine and launches the sync when necessary. 103 func (cs *chainSyncer) loop() { 104 defer cs.handler.wg.Done() 105 106 cs.handler.blockFetcher.Start() 107 cs.handler.txFetcher.Start() 108 defer cs.handler.blockFetcher.Stop() 109 defer cs.handler.txFetcher.Stop() 110 defer cs.handler.downloader.Terminate() 111 112 // The force timer lowers the peer count threshold down to one when it fires. 113 // This ensures we'll always start sync even if there aren't enough peers. 114 cs.force = time.NewTimer(forceSyncCycle) 115 defer cs.force.Stop() 116 117 for { 118 if op := cs.nextSyncOp(); op != nil { 119 cs.startSync(op) 120 } 121 select { 122 case <-cs.peerEventCh: 123 // Peer information changed, recheck. 124 case err := <-cs.doneCh: 125 cs.doneCh = nil 126 cs.force.Reset(forceSyncCycle) 127 cs.forced = false 128 129 // If we've reached the merge transition but no beacon client is available, or 130 // it has not yet switched us over, keep warning the user that their infra is 131 // potentially flaky. 132 if errors.Is(err, downloader.ErrMergeTransition) && time.Since(cs.warned) > 10*time.Second { 133 log.Warn("Local chain is post-merge, waiting for beacon client sync switch-over...") 134 cs.warned = time.Now() 135 } 136 case <-cs.force.C: 137 cs.forced = true 138 139 case <-cs.handler.quitSync: 140 // Disable all insertion on the blockchain. This needs to happen before 141 // terminating the downloader because the downloader waits for blockchain 142 // inserts, and these can take a long time to finish. 143 cs.handler.chain.StopInsert() 144 cs.handler.downloader.Terminate() 145 if cs.doneCh != nil { 146 <-cs.doneCh 147 } 148 return 149 } 150 } 151 } 152 153 // nextSyncOp determines whether sync is required at this time. 154 func (cs *chainSyncer) nextSyncOp() *chainSyncOp { 155 if cs.doneCh != nil { 156 return nil // Sync already running 157 } 158 // If a beacon client once took over control, disable the entire legacy sync 159 // path from here on end. Note, there is a slight "race" between reaching TTD 160 // and the beacon client taking over. The downloader will enforce that nothing 161 // above the first TTD will be delivered to the chain for import. 162 // 163 // An alternative would be to check the local chain for exceeding the TTD and 164 // avoid triggering a sync in that case, but that could also miss sibling or 165 // other family TTD block being accepted. 166 if cs.handler.merger.TDDReached() { 167 return nil 168 } 169 // Ensure we're at minimum peer count. 170 minPeers := defaultMinSyncPeers 171 if cs.forced { 172 minPeers = 1 173 } else if minPeers > cs.handler.maxPeers { 174 minPeers = cs.handler.maxPeers 175 } 176 if cs.handler.peers.len() < minPeers { 177 return nil 178 } 179 // We have enough peers, pick the one with the highest TD, but avoid going 180 // over the terminal total difficulty. Above that we expect the consensus 181 // clients to direct the chain head to sync to. 182 peer := cs.handler.peers.peerWithHighestTD() 183 if peer == nil { 184 return nil 185 } 186 mode, ourTD := cs.modeAndLocalHead() 187 op := peerToSyncOp(mode, peer) 188 if op.td.Cmp(ourTD) <= 0 { 189 // We seem to be in sync according to the legacy rules. In the merge 190 // world, it can also mean we're stuck on the merge block, waiting for 191 // a beacon client. In the latter case, notify the user. 192 if ttd := cs.handler.chain.Config().TerminalTotalDifficulty; ttd != nil && ourTD.Cmp(ttd) >= 0 && time.Since(cs.warned) > 10*time.Second { 193 log.Warn("Local chain is post-merge, waiting for beacon client sync switch-over...") 194 cs.warned = time.Now() 195 } 196 return nil // We're in sync 197 } 198 return op 199 } 200 201 func peerToSyncOp(mode downloader.SyncMode, p *eth.Peer) *chainSyncOp { 202 peerHead, peerTD := p.Head() 203 return &chainSyncOp{mode: mode, peer: p, td: peerTD, head: peerHead} 204 } 205 206 func (cs *chainSyncer) modeAndLocalHead() (downloader.SyncMode, *big.Int) { 207 // If we're in snap sync mode, return that directly 208 if atomic.LoadUint32(&cs.handler.snapSync) == 1 { 209 block := cs.handler.chain.CurrentFastBlock() 210 td := cs.handler.chain.GetTd(block.Hash(), block.NumberU64()) 211 return downloader.SnapSync, td 212 } 213 // We are probably in full sync, but we might have rewound to before the 214 // snap sync pivot, check if we should reenable 215 if pivot := rawdb.ReadLastPivotNumber(cs.handler.database); pivot != nil { 216 if head := cs.handler.chain.CurrentBlock(); head.NumberU64() < *pivot { 217 block := cs.handler.chain.CurrentFastBlock() 218 td := cs.handler.chain.GetTd(block.Hash(), block.NumberU64()) 219 return downloader.SnapSync, td 220 } 221 } 222 // Nope, we're really full syncing 223 head := cs.handler.chain.CurrentBlock() 224 td := cs.handler.chain.GetTd(head.Hash(), head.NumberU64()) 225 return downloader.FullSync, td 226 } 227 228 // startSync launches doSync in a new goroutine. 229 func (cs *chainSyncer) startSync(op *chainSyncOp) { 230 cs.doneCh = make(chan error, 1) 231 go func() { cs.doneCh <- cs.handler.doSync(op) }() 232 } 233 234 // doSync synchronizes the local blockchain with a remote peer. 235 func (h *handler) doSync(op *chainSyncOp) error { 236 if op.mode == downloader.SnapSync { 237 // Before launch the snap sync, we have to ensure user uses the same 238 // txlookup limit. 239 // The main concern here is: during the snap sync Geth won't index the 240 // block(generate tx indices) before the HEAD-limit. But if user changes 241 // the limit in the next snap sync(e.g. user kill Geth manually and 242 // restart) then it will be hard for Geth to figure out the oldest block 243 // has been indexed. So here for the user-experience wise, it's non-optimal 244 // that user can't change limit during the snap sync. If changed, Geth 245 // will just blindly use the original one. 246 limit := h.chain.TxLookupLimit() 247 if stored := rawdb.ReadFastTxLookupLimit(h.database); stored == nil { 248 rawdb.WriteFastTxLookupLimit(h.database, limit) 249 } else if *stored != limit { 250 h.chain.SetTxLookupLimit(*stored) 251 log.Warn("Update txLookup limit", "provided", limit, "updated", *stored) 252 } 253 } 254 // Run the sync cycle, and disable snap sync if we're past the pivot block 255 err := h.downloader.LegacySync(op.peer.ID(), op.head, op.td, h.chain.Config().TerminalTotalDifficulty, op.mode) 256 if err != nil { 257 return err 258 } 259 if atomic.LoadUint32(&h.snapSync) == 1 { 260 log.Info("Snap sync complete, auto disabling") 261 atomic.StoreUint32(&h.snapSync, 0) 262 } 263 // If we've successfully finished a sync cycle and passed any required checkpoint, 264 // enable accepting transactions from the network. 265 head := h.chain.CurrentBlock() 266 if head.NumberU64() >= h.checkpointNumber { 267 // Checkpoint passed, sanity check the timestamp to have a fallback mechanism 268 // for non-checkpointed (number = 0) private networks. 269 if head.Time() >= uint64(time.Now().AddDate(0, -1, 0).Unix()) { 270 atomic.StoreUint32(&h.acceptTxs, 1) 271 } 272 } 273 if head.NumberU64() > 0 { 274 // We've completed a sync cycle, notify all peers of new state. This path is 275 // essential in star-topology networks where a gateway node needs to notify 276 // all its out-of-date peers of the availability of a new block. This failure 277 // scenario will most often crop up in private and hackathon networks with 278 // degenerate connectivity, but it should be healthy for the mainnet too to 279 // more reliably update peers or the local TD state. 280 h.BroadcastBlock(head, false) 281 } 282 return nil 283 }