github.com/theQRL/go-zond@v0.1.1/zond/sync.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package zond 18 19 import ( 20 "errors" 21 "math/big" 22 "time" 23 24 "github.com/theQRL/go-zond/common" 25 "github.com/theQRL/go-zond/core/rawdb" 26 "github.com/theQRL/go-zond/log" 27 "github.com/theQRL/go-zond/zond/downloader" 28 "github.com/theQRL/go-zond/zond/protocols/zond" 29 ) 30 31 const ( 32 forceSyncCycle = 10 * time.Second // Time interval to force syncs, even if few peers are available 33 defaultMinSyncPeers = 5 // Amount of peers desired to start syncing 34 ) 35 36 // syncTransactions starts sending all currently pending transactions to the given peer. 37 func (h *handler) syncTransactions(p *zond.Peer) { 38 var hashes []common.Hash 39 for _, batch := range h.txpool.Pending(false) { 40 for _, tx := range batch { 41 hashes = append(hashes, tx.Hash) 42 } 43 } 44 if len(hashes) == 0 { 45 return 46 } 47 p.AsyncSendPooledTransactionHashes(hashes) 48 } 49 50 // chainSyncer coordinates blockchain sync components. 51 type chainSyncer struct { 52 handler *handler 53 force *time.Timer 54 forced bool // true when force timer fired 55 warned time.Time 56 peerEventCh chan struct{} 57 doneCh chan error // non-nil when sync is running 58 } 59 60 // chainSyncOp is a scheduled sync operation. 61 type chainSyncOp struct { 62 mode downloader.SyncMode 63 peer *zond.Peer 64 td *big.Int 65 head common.Hash 66 } 67 68 // newChainSyncer creates a chainSyncer. 69 func newChainSyncer(handler *handler) *chainSyncer { 70 return &chainSyncer{ 71 handler: handler, 72 peerEventCh: make(chan struct{}), 73 } 74 } 75 76 // handlePeerEvent notifies the syncer about a change in the peer set. 77 // This is called for new peers and every time a peer announces a new 78 // chain head. 79 func (cs *chainSyncer) handlePeerEvent() bool { 80 select { 81 case cs.peerEventCh <- struct{}{}: 82 return true 83 case <-cs.handler.quitSync: 84 return false 85 } 86 } 87 88 // loop runs in its own goroutine and launches the sync when necessary. 89 func (cs *chainSyncer) loop() { 90 defer cs.handler.wg.Done() 91 92 cs.handler.blockFetcher.Start() 93 cs.handler.txFetcher.Start() 94 defer cs.handler.blockFetcher.Stop() 95 defer cs.handler.txFetcher.Stop() 96 defer cs.handler.downloader.Terminate() 97 98 // The force timer lowers the peer count threshold down to one when it fires. 99 // This ensures we'll always start sync even if there aren't enough peers. 100 cs.force = time.NewTimer(forceSyncCycle) 101 defer cs.force.Stop() 102 103 for { 104 if op := cs.nextSyncOp(); op != nil { 105 cs.startSync(op) 106 } 107 select { 108 case <-cs.peerEventCh: 109 // Peer information changed, recheck. 110 case err := <-cs.doneCh: 111 cs.doneCh = nil 112 cs.force.Reset(forceSyncCycle) 113 cs.forced = false 114 115 // If we've reached the merge transition but no beacon client is available, or 116 // it has not yet switched us over, keep warning the user that their infra is 117 // potentially flaky. 118 if errors.Is(err, downloader.ErrMergeTransition) && time.Since(cs.warned) > 10*time.Second { 119 log.Warn("Local chain is post-merge, waiting for beacon client sync switch-over...") 120 cs.warned = time.Now() 121 } 122 case <-cs.force.C: 123 cs.forced = true 124 125 case <-cs.handler.quitSync: 126 // Disable all insertion on the blockchain. This needs to happen before 127 // terminating the downloader because the downloader waits for blockchain 128 // inserts, and these can take a long time to finish. 129 cs.handler.chain.StopInsert() 130 cs.handler.downloader.Terminate() 131 if cs.doneCh != nil { 132 <-cs.doneCh 133 } 134 return 135 } 136 } 137 } 138 139 // nextSyncOp determines whether sync is required at this time. 140 func (cs *chainSyncer) nextSyncOp() *chainSyncOp { 141 if cs.doneCh != nil { 142 return nil // Sync already running 143 } 144 // If a beacon client once took over control, disable the entire legacy sync 145 // path from here on end. Note, there is a slight "race" between reaching TTD 146 // and the beacon client taking over. The downloader will enforce that nothing 147 // above the first TTD will be delivered to the chain for import. 148 // 149 // An alternative would be to check the local chain for exceeding the TTD and 150 // avoid triggering a sync in that case, but that could also miss sibling or 151 // other family TTD block being accepted. 152 if cs.handler.chain.Config().TerminalTotalDifficultyPassed || cs.handler.merger.TDDReached() { 153 return nil 154 } 155 // Ensure we're at minimum peer count. 156 minPeers := defaultMinSyncPeers 157 if cs.forced { 158 minPeers = 1 159 } else if minPeers > cs.handler.maxPeers { 160 minPeers = cs.handler.maxPeers 161 } 162 if cs.handler.peers.len() < minPeers { 163 return nil 164 } 165 // We have enough peers, pick the one with the highest TD, but avoid going 166 // over the terminal total difficulty. Above that we expect the consensus 167 // clients to direct the chain head to sync to. 168 peer := cs.handler.peers.peerWithHighestTD() 169 if peer == nil { 170 return nil 171 } 172 mode, ourTD := cs.modeAndLocalHead() 173 op := peerToSyncOp(mode, peer) 174 if op.td.Cmp(ourTD) <= 0 { 175 // We seem to be in sync according to the legacy rules. In the merge 176 // world, it can also mean we're stuck on the merge block, waiting for 177 // a beacon client. In the latter case, notify the user. 178 if ttd := cs.handler.chain.Config().TerminalTotalDifficulty; ttd != nil && ourTD.Cmp(ttd) >= 0 && time.Since(cs.warned) > 10*time.Second { 179 log.Warn("Local chain is post-merge, waiting for beacon client sync switch-over...") 180 cs.warned = time.Now() 181 } 182 return nil // We're in sync 183 } 184 return op 185 } 186 187 func peerToSyncOp(mode downloader.SyncMode, p *zond.Peer) *chainSyncOp { 188 peerHead, peerTD := p.Head() 189 return &chainSyncOp{mode: mode, peer: p, td: peerTD, head: peerHead} 190 } 191 192 func (cs *chainSyncer) modeAndLocalHead() (downloader.SyncMode, *big.Int) { 193 // If we're in snap sync mode, return that directly 194 if cs.handler.snapSync.Load() { 195 block := cs.handler.chain.CurrentSnapBlock() 196 td := cs.handler.chain.GetTd(block.Hash(), block.Number.Uint64()) 197 return downloader.SnapSync, td 198 } 199 // We are probably in full sync, but we might have rewound to before the 200 // snap sync pivot, check if we should reenable 201 if pivot := rawdb.ReadLastPivotNumber(cs.handler.database); pivot != nil { 202 if head := cs.handler.chain.CurrentBlock(); head.Number.Uint64() < *pivot { 203 block := cs.handler.chain.CurrentSnapBlock() 204 td := cs.handler.chain.GetTd(block.Hash(), block.Number.Uint64()) 205 return downloader.SnapSync, td 206 } 207 } 208 // Nope, we're really full syncing 209 head := cs.handler.chain.CurrentBlock() 210 td := cs.handler.chain.GetTd(head.Hash(), head.Number.Uint64()) 211 return downloader.FullSync, td 212 } 213 214 // startSync launches doSync in a new goroutine. 215 func (cs *chainSyncer) startSync(op *chainSyncOp) { 216 cs.doneCh = make(chan error, 1) 217 go func() { cs.doneCh <- cs.handler.doSync(op) }() 218 } 219 220 // doSync synchronizes the local blockchain with a remote peer. 221 func (h *handler) doSync(op *chainSyncOp) error { 222 if op.mode == downloader.SnapSync { 223 // Before launch the snap sync, we have to ensure user uses the same 224 // txlookup limit. 225 // The main concern here is: during the snap sync Geth won't index the 226 // block(generate tx indices) before the HEAD-limit. But if user changes 227 // the limit in the next snap sync(e.g. user kill Geth manually and 228 // restart) then it will be hard for Geth to figure out the oldest block 229 // has been indexed. So here for the user-experience wise, it's non-optimal 230 // that user can't change limit during the snap sync. If changed, Geth 231 // will just blindly use the original one. 232 limit := h.chain.TxLookupLimit() 233 if stored := rawdb.ReadFastTxLookupLimit(h.database); stored == nil { 234 rawdb.WriteFastTxLookupLimit(h.database, limit) 235 } else if *stored != limit { 236 h.chain.SetTxLookupLimit(*stored) 237 log.Warn("Update txLookup limit", "provided", limit, "updated", *stored) 238 } 239 } 240 // Run the sync cycle, and disable snap sync if we're past the pivot block 241 err := h.downloader.LegacySync(op.peer.ID(), op.head, op.td, h.chain.Config().TerminalTotalDifficulty, op.mode) 242 if err != nil { 243 return err 244 } 245 if h.snapSync.Load() { 246 log.Info("Snap sync complete, auto disabling") 247 h.snapSync.Store(false) 248 } 249 // If we've successfully finished a sync cycle, enable accepting transactions 250 // from the network. 251 h.acceptTxs.Store(true) 252 253 head := h.chain.CurrentBlock() 254 if head.Number.Uint64() > 0 { 255 // We've completed a sync cycle, notify all peers of new state. This path is 256 // essential in star-topology networks where a gateway node needs to notify 257 // all its out-of-date peers of the availability of a new block. This failure 258 // scenario will most often crop up in private and hackathon networks with 259 // degenerate connectivity, but it should be healthy for the mainnet too to 260 // more reliably update peers or the local TD state. 261 if block := h.chain.GetBlock(head.Hash(), head.Number.Uint64()); block != nil { 262 h.BroadcastBlock(block, false) 263 } 264 } 265 return nil 266 }