gitlab.com/yannislg/go-pulse@v0.0.0-20210722055913-a3e24e95638d/eth/sync.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package eth 18 19 import ( 20 "math/big" 21 "math/rand" 22 "sync/atomic" 23 "time" 24 25 "github.com/ethereum/go-ethereum/common" 26 "github.com/ethereum/go-ethereum/core/types" 27 "github.com/ethereum/go-ethereum/eth/downloader" 28 "github.com/ethereum/go-ethereum/log" 29 "github.com/ethereum/go-ethereum/p2p/enode" 30 ) 31 32 const ( 33 forceSyncCycle = 10 * time.Second // Time interval to force syncs, even if few peers are available 34 defaultMinSyncPeers = 5 // Amount of peers desired to start syncing 35 36 // This is the target size for the packs of transactions sent by txsyncLoop64. 37 // A pack can get larger than this if a single transactions exceeds this size. 38 txsyncPackSize = 100 * 1024 39 ) 40 41 type txsync struct { 42 p *peer 43 txs []*types.Transaction 44 } 45 46 // syncTransactions starts sending all currently pending transactions to the given peer. 47 func (pm *ProtocolManager) syncTransactions(p *peer) { 48 // Assemble the set of transaction to broadcast or announce to the remote 49 // peer. Fun fact, this is quite an expensive operation as it needs to sort 50 // the transactions if the sorting is not cached yet. However, with a random 51 // order, insertions could overflow the non-executable queues and get dropped. 52 // 53 // TODO(karalabe): Figure out if we could get away with random order somehow 54 var txs types.Transactions 55 pending, _ := pm.txpool.Pending() 56 for _, batch := range pending { 57 txs = append(txs, batch...) 58 } 59 if len(txs) == 0 { 60 return 61 } 62 // The eth/65 protocol introduces proper transaction announcements, so instead 63 // of dripping transactions across multiple peers, just send the entire list as 64 // an announcement and let the remote side decide what they need (likely nothing). 65 if p.version >= eth65 { 66 hashes := make([]common.Hash, len(txs)) 67 for i, tx := range txs { 68 hashes[i] = tx.Hash() 69 } 70 p.AsyncSendPooledTransactionHashes(hashes) 71 return 72 } 73 // Out of luck, peer is running legacy protocols, drop the txs over 74 select { 75 case pm.txsyncCh <- &txsync{p: p, txs: txs}: 76 case <-pm.quitSync: 77 } 78 } 79 80 // txsyncLoop64 takes care of the initial transaction sync for each new 81 // connection. When a new peer appears, we relay all currently pending 82 // transactions. In order to minimise egress bandwidth usage, we send 83 // the transactions in small packs to one peer at a time. 84 func (pm *ProtocolManager) txsyncLoop64() { 85 defer pm.wg.Done() 86 87 var ( 88 pending = make(map[enode.ID]*txsync) 89 sending = false // whether a send is active 90 pack = new(txsync) // the pack that is being sent 91 done = make(chan error, 1) // result of the send 92 ) 93 94 // send starts a sending a pack of transactions from the sync. 95 send := func(s *txsync) { 96 if s.p.version >= eth65 { 97 panic("initial transaction syncer running on eth/65+") 98 } 99 // Fill pack with transactions up to the target size. 100 size := common.StorageSize(0) 101 pack.p = s.p 102 pack.txs = pack.txs[:0] 103 for i := 0; i < len(s.txs) && size < txsyncPackSize; i++ { 104 pack.txs = append(pack.txs, s.txs[i]) 105 size += s.txs[i].Size() 106 } 107 // Remove the transactions that will be sent. 108 s.txs = s.txs[:copy(s.txs, s.txs[len(pack.txs):])] 109 if len(s.txs) == 0 { 110 delete(pending, s.p.ID()) 111 } 112 // Send the pack in the background. 113 s.p.Log().Trace("Sending batch of transactions", "count", len(pack.txs), "bytes", size) 114 sending = true 115 go func() { done <- pack.p.SendTransactions64(pack.txs) }() 116 } 117 118 // pick chooses the next pending sync. 119 pick := func() *txsync { 120 if len(pending) == 0 { 121 return nil 122 } 123 n := rand.Intn(len(pending)) + 1 124 for _, s := range pending { 125 if n--; n == 0 { 126 return s 127 } 128 } 129 return nil 130 } 131 132 for { 133 select { 134 case s := <-pm.txsyncCh: 135 pending[s.p.ID()] = s 136 if !sending { 137 send(s) 138 } 139 case err := <-done: 140 sending = false 141 // Stop tracking peers that cause send failures. 142 if err != nil { 143 pack.p.Log().Debug("Transaction send failed", "err", err) 144 delete(pending, pack.p.ID()) 145 } 146 // Schedule the next send. 147 if s := pick(); s != nil { 148 send(s) 149 } 150 case <-pm.quitSync: 151 return 152 } 153 } 154 } 155 156 // chainSyncer coordinates blockchain sync components. 157 type chainSyncer struct { 158 pm *ProtocolManager 159 force *time.Timer 160 forced bool // true when force timer fired 161 peerEventCh chan struct{} 162 doneCh chan error // non-nil when sync is running 163 } 164 165 // chainSyncOp is a scheduled sync operation. 166 type chainSyncOp struct { 167 mode downloader.SyncMode 168 peer *peer 169 td *big.Int 170 head common.Hash 171 } 172 173 // newChainSyncer creates a chainSyncer. 174 func newChainSyncer(pm *ProtocolManager) *chainSyncer { 175 return &chainSyncer{ 176 pm: pm, 177 peerEventCh: make(chan struct{}), 178 } 179 } 180 181 // handlePeerEvent notifies the syncer about a change in the peer set. 182 // This is called for new peers and every time a peer announces a new 183 // chain head. 184 func (cs *chainSyncer) handlePeerEvent(p *peer) bool { 185 select { 186 case cs.peerEventCh <- struct{}{}: 187 return true 188 case <-cs.pm.quitSync: 189 return false 190 } 191 } 192 193 // loop runs in its own goroutine and launches the sync when necessary. 194 func (cs *chainSyncer) loop() { 195 defer cs.pm.wg.Done() 196 197 cs.pm.blockFetcher.Start() 198 cs.pm.txFetcher.Start() 199 defer cs.pm.blockFetcher.Stop() 200 defer cs.pm.txFetcher.Stop() 201 defer cs.pm.downloader.Terminate() 202 203 // The force timer lowers the peer count threshold down to one when it fires. 204 // This ensures we'll always start sync even if there aren't enough peers. 205 cs.force = time.NewTimer(forceSyncCycle) 206 defer cs.force.Stop() 207 208 for { 209 if op := cs.nextSyncOp(); op != nil { 210 cs.startSync(op) 211 } 212 213 select { 214 case <-cs.peerEventCh: 215 // Peer information changed, recheck. 216 case <-cs.doneCh: 217 cs.doneCh = nil 218 cs.force.Reset(forceSyncCycle) 219 cs.forced = false 220 case <-cs.force.C: 221 cs.forced = true 222 223 case <-cs.pm.quitSync: 224 if cs.doneCh != nil { 225 cs.pm.downloader.Cancel() 226 <-cs.doneCh 227 } 228 return 229 } 230 } 231 } 232 233 // nextSyncOp determines whether sync is required at this time. 234 func (cs *chainSyncer) nextSyncOp() *chainSyncOp { 235 if cs.doneCh != nil { 236 return nil // Sync already running. 237 } 238 239 // Ensure we're at mininum peer count. 240 minPeers := defaultMinSyncPeers 241 if cs.forced { 242 minPeers = 1 243 } else if minPeers > cs.pm.maxPeers { 244 minPeers = cs.pm.maxPeers 245 } 246 if cs.pm.peers.Len() < minPeers { 247 return nil 248 } 249 250 // We have enough peers, check TD. 251 peer := cs.pm.peers.BestPeer() 252 if peer == nil { 253 return nil 254 } 255 mode, ourTD := cs.modeAndLocalHead() 256 op := peerToSyncOp(mode, peer) 257 if op.td.Cmp(ourTD) <= 0 { 258 return nil // We're in sync. 259 } 260 return op 261 } 262 263 func peerToSyncOp(mode downloader.SyncMode, p *peer) *chainSyncOp { 264 peerHead, peerTD := p.Head() 265 return &chainSyncOp{mode: mode, peer: p, td: peerTD, head: peerHead} 266 } 267 268 func (cs *chainSyncer) modeAndLocalHead() (downloader.SyncMode, *big.Int) { 269 if atomic.LoadUint32(&cs.pm.fastSync) == 1 { 270 block := cs.pm.blockchain.CurrentFastBlock() 271 td := cs.pm.blockchain.GetTdByHash(block.Hash()) 272 return downloader.FastSync, td 273 } else { 274 head := cs.pm.blockchain.CurrentHeader() 275 td := cs.pm.blockchain.GetTd(head.Hash(), head.Number.Uint64()) 276 return downloader.FullSync, td 277 } 278 } 279 280 // startSync launches doSync in a new goroutine. 281 func (cs *chainSyncer) startSync(op *chainSyncOp) { 282 cs.doneCh = make(chan error, 1) 283 go func() { cs.doneCh <- cs.pm.doSync(op) }() 284 } 285 286 // doSync synchronizes the local blockchain with a remote peer. 287 func (pm *ProtocolManager) doSync(op *chainSyncOp) error { 288 // Run the sync cycle, and disable fast sync if we're past the pivot block 289 err := pm.downloader.Synchronise(op.peer.id, op.head, op.td, op.mode) 290 if err != nil { 291 return err 292 } 293 if atomic.LoadUint32(&pm.fastSync) == 1 { 294 log.Info("Fast sync complete, auto disabling") 295 atomic.StoreUint32(&pm.fastSync, 0) 296 } 297 298 // If we've successfully finished a sync cycle and passed any required checkpoint, 299 // enable accepting transactions from the network. 300 head := pm.blockchain.CurrentBlock() 301 if head.NumberU64() >= pm.checkpointNumber { 302 // Checkpoint passed, sanity check the timestamp to have a fallback mechanism 303 // for non-checkpointed (number = 0) private networks. 304 if head.Time() >= uint64(time.Now().AddDate(0, -1, 0).Unix()) { 305 atomic.StoreUint32(&pm.acceptTxs, 1) 306 } 307 } 308 309 if head.NumberU64() > 0 { 310 // We've completed a sync cycle, notify all peers of new state. This path is 311 // essential in star-topology networks where a gateway node needs to notify 312 // all its out-of-date peers of the availability of a new block. This failure 313 // scenario will most often crop up in private and hackathon networks with 314 // degenerate connectivity, but it should be healthy for the mainnet too to 315 // more reliably update peers or the local TD state. 316 pm.BroadcastBlock(head, false) 317 } 318 319 return nil 320 }