github.com/calmw/ethereum@v0.1.1/eth/sync.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package eth 18 19 import ( 20 "errors" 21 "math/big" 22 "time" 23 24 "github.com/calmw/ethereum/common" 25 "github.com/calmw/ethereum/core/rawdb" 26 "github.com/calmw/ethereum/core/types" 27 "github.com/calmw/ethereum/eth/downloader" 28 "github.com/calmw/ethereum/eth/protocols/eth" 29 "github.com/calmw/ethereum/log" 30 ) 31 32 const ( 33 forceSyncCycle = 10 * time.Second // Time interval to force syncs, even if few peers are available 34 defaultMinSyncPeers = 5 // Amount of peers desired to start syncing 35 ) 36 37 // syncTransactions starts sending all currently pending transactions to the given peer. 38 func (h *handler) syncTransactions(p *eth.Peer) { 39 // Assemble the set of transaction to broadcast or announce to the remote 40 // peer. Fun fact, this is quite an expensive operation as it needs to sort 41 // the transactions if the sorting is not cached yet. However, with a random 42 // order, insertions could overflow the non-executable queues and get dropped. 43 // 44 // TODO(karalabe): Figure out if we could get away with random order somehow 45 var txs types.Transactions 46 pending := h.txpool.Pending(false) 47 for _, batch := range pending { 48 txs = append(txs, batch...) 49 } 50 if len(txs) == 0 { 51 return 52 } 53 // The eth/65 protocol introduces proper transaction announcements, so instead 54 // of dripping transactions across multiple peers, just send the entire list as 55 // an announcement and let the remote side decide what they need (likely nothing). 56 hashes := make([]common.Hash, len(txs)) 57 for i, tx := range txs { 58 hashes[i] = tx.Hash() 59 } 60 p.AsyncSendPooledTransactionHashes(hashes) 61 } 62 63 // chainSyncer coordinates blockchain sync components. 64 type chainSyncer struct { 65 handler *handler 66 force *time.Timer 67 forced bool // true when force timer fired 68 warned time.Time 69 peerEventCh chan struct{} 70 doneCh chan error // non-nil when sync is running 71 } 72 73 // chainSyncOp is a scheduled sync operation. 74 type chainSyncOp struct { 75 mode downloader.SyncMode 76 peer *eth.Peer 77 td *big.Int 78 head common.Hash 79 } 80 81 // newChainSyncer creates a chainSyncer. 82 func newChainSyncer(handler *handler) *chainSyncer { 83 return &chainSyncer{ 84 handler: handler, 85 peerEventCh: make(chan struct{}), 86 } 87 } 88 89 // handlePeerEvent notifies the syncer about a change in the peer set. 90 // This is called for new peers and every time a peer announces a new 91 // chain head. 92 func (cs *chainSyncer) handlePeerEvent(peer *eth.Peer) bool { 93 select { 94 case cs.peerEventCh <- struct{}{}: 95 return true 96 case <-cs.handler.quitSync: 97 return false 98 } 99 } 100 101 // loop runs in its own goroutine and launches the sync when necessary. 102 func (cs *chainSyncer) loop() { 103 defer cs.handler.wg.Done() 104 105 cs.handler.blockFetcher.Start() 106 cs.handler.txFetcher.Start() 107 defer cs.handler.blockFetcher.Stop() 108 defer cs.handler.txFetcher.Stop() 109 defer cs.handler.downloader.Terminate() 110 111 // The force timer lowers the peer count threshold down to one when it fires. 112 // This ensures we'll always start sync even if there aren't enough peers. 113 cs.force = time.NewTimer(forceSyncCycle) 114 defer cs.force.Stop() 115 116 for { 117 if op := cs.nextSyncOp(); op != nil { 118 cs.startSync(op) 119 } 120 select { 121 case <-cs.peerEventCh: 122 // Peer information changed, recheck. 123 case err := <-cs.doneCh: 124 cs.doneCh = nil 125 cs.force.Reset(forceSyncCycle) 126 cs.forced = false 127 128 // If we've reached the merge transition but no beacon client is available, or 129 // it has not yet switched us over, keep warning the user that their infra is 130 // potentially flaky. 131 if errors.Is(err, downloader.ErrMergeTransition) && time.Since(cs.warned) > 10*time.Second { 132 log.Warn("Local chain is post-merge, waiting for beacon client sync switch-over...") 133 cs.warned = time.Now() 134 } 135 case <-cs.force.C: 136 cs.forced = true 137 138 case <-cs.handler.quitSync: 139 // Disable all insertion on the blockchain. This needs to happen before 140 // terminating the downloader because the downloader waits for blockchain 141 // inserts, and these can take a long time to finish. 142 cs.handler.chain.StopInsert() 143 cs.handler.downloader.Terminate() 144 if cs.doneCh != nil { 145 <-cs.doneCh 146 } 147 return 148 } 149 } 150 } 151 152 // nextSyncOp determines whether sync is required at this time. 153 func (cs *chainSyncer) nextSyncOp() *chainSyncOp { 154 if cs.doneCh != nil { 155 return nil // Sync already running 156 } 157 // If a beacon client once took over control, disable the entire legacy sync 158 // path from here on end. Note, there is a slight "race" between reaching TTD 159 // and the beacon client taking over. The downloader will enforce that nothing 160 // above the first TTD will be delivered to the chain for import. 161 // 162 // An alternative would be to check the local chain for exceeding the TTD and 163 // avoid triggering a sync in that case, but that could also miss sibling or 164 // other family TTD block being accepted. 165 if cs.handler.chain.Config().TerminalTotalDifficultyPassed || cs.handler.merger.TDDReached() { 166 return nil 167 } 168 // Ensure we're at minimum peer count. 169 minPeers := defaultMinSyncPeers 170 if cs.forced { 171 minPeers = 1 172 } else if minPeers > cs.handler.maxPeers { 173 minPeers = cs.handler.maxPeers 174 } 175 if cs.handler.peers.len() < minPeers { 176 return nil 177 } 178 // We have enough peers, pick the one with the highest TD, but avoid going 179 // over the terminal total difficulty. Above that we expect the consensus 180 // clients to direct the chain head to sync to. 181 peer := cs.handler.peers.peerWithHighestTD() 182 if peer == nil { 183 return nil 184 } 185 mode, ourTD := cs.modeAndLocalHead() 186 op := peerToSyncOp(mode, peer) 187 if op.td.Cmp(ourTD) <= 0 { 188 // We seem to be in sync according to the legacy rules. In the merge 189 // world, it can also mean we're stuck on the merge block, waiting for 190 // a beacon client. In the latter case, notify the user. 191 if ttd := cs.handler.chain.Config().TerminalTotalDifficulty; ttd != nil && ourTD.Cmp(ttd) >= 0 && time.Since(cs.warned) > 10*time.Second { 192 log.Warn("Local chain is post-merge, waiting for beacon client sync switch-over...") 193 cs.warned = time.Now() 194 } 195 return nil // We're in sync 196 } 197 return op 198 } 199 200 func peerToSyncOp(mode downloader.SyncMode, p *eth.Peer) *chainSyncOp { 201 peerHead, peerTD := p.Head() 202 return &chainSyncOp{mode: mode, peer: p, td: peerTD, head: peerHead} 203 } 204 205 func (cs *chainSyncer) modeAndLocalHead() (downloader.SyncMode, *big.Int) { 206 // If we're in snap sync mode, return that directly 207 if cs.handler.snapSync.Load() { 208 block := cs.handler.chain.CurrentSnapBlock() 209 td := cs.handler.chain.GetTd(block.Hash(), block.Number.Uint64()) 210 return downloader.SnapSync, td 211 } 212 // We are probably in full sync, but we might have rewound to before the 213 // snap sync pivot, check if we should reenable 214 if pivot := rawdb.ReadLastPivotNumber(cs.handler.database); pivot != nil { 215 if head := cs.handler.chain.CurrentBlock(); head.Number.Uint64() < *pivot { 216 block := cs.handler.chain.CurrentSnapBlock() 217 td := cs.handler.chain.GetTd(block.Hash(), block.Number.Uint64()) 218 return downloader.SnapSync, td 219 } 220 } 221 // Nope, we're really full syncing 222 head := cs.handler.chain.CurrentBlock() 223 td := cs.handler.chain.GetTd(head.Hash(), head.Number.Uint64()) 224 return downloader.FullSync, td 225 } 226 227 // startSync launches doSync in a new goroutine. 228 func (cs *chainSyncer) startSync(op *chainSyncOp) { 229 cs.doneCh = make(chan error, 1) 230 go func() { cs.doneCh <- cs.handler.doSync(op) }() 231 } 232 233 // doSync synchronizes the local blockchain with a remote peer. 234 func (h *handler) doSync(op *chainSyncOp) error { 235 if op.mode == downloader.SnapSync { 236 // Before launch the snap sync, we have to ensure user uses the same 237 // txlookup limit. 238 // The main concern here is: during the snap sync Geth won't index the 239 // block(generate tx indices) before the HEAD-limit. But if user changes 240 // the limit in the next snap sync(e.g. user kill Geth manually and 241 // restart) then it will be hard for Geth to figure out the oldest block 242 // has been indexed. So here for the user-experience wise, it's non-optimal 243 // that user can't change limit during the snap sync. If changed, Geth 244 // will just blindly use the original one. 245 limit := h.chain.TxLookupLimit() 246 if stored := rawdb.ReadFastTxLookupLimit(h.database); stored == nil { 247 rawdb.WriteFastTxLookupLimit(h.database, limit) 248 } else if *stored != limit { 249 h.chain.SetTxLookupLimit(*stored) 250 log.Warn("Update txLookup limit", "provided", limit, "updated", *stored) 251 } 252 } 253 // Run the sync cycle, and disable snap sync if we're past the pivot block 254 err := h.downloader.LegacySync(op.peer.ID(), op.head, op.td, h.chain.Config().TerminalTotalDifficulty, op.mode) 255 if err != nil { 256 return err 257 } 258 if h.snapSync.Load() { 259 log.Info("Snap sync complete, auto disabling") 260 h.snapSync.Store(false) 261 } 262 // If we've successfully finished a sync cycle, enable accepting transactions 263 // from the network. 264 h.acceptTxs.Store(true) 265 266 head := h.chain.CurrentBlock() 267 if head.Number.Uint64() > 0 { 268 // We've completed a sync cycle, notify all peers of new state. This path is 269 // essential in star-topology networks where a gateway node needs to notify 270 // all its out-of-date peers of the availability of a new block. This failure 271 // scenario will most often crop up in private and hackathon networks with 272 // degenerate connectivity, but it should be healthy for the mainnet too to 273 // more reliably update peers or the local TD state. 274 if block := h.chain.GetBlock(head.Hash(), head.Number.Uint64()); block != nil { 275 h.BroadcastBlock(block, false) 276 } 277 } 278 return nil 279 }