github.com/SmartMeshFoundation/Spectrum@v0.0.0-20220621030607-452a266fee1e/eth/sync.go (about) 1 // Copyright 2015 The Spectrum Authors 2 // This file is part of the Spectrum library. 3 // 4 // The Spectrum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The Spectrum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the Spectrum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package eth 18 19 import ( 20 "math/rand" 21 "sync/atomic" 22 "time" 23 24 "github.com/SmartMeshFoundation/Spectrum/crypto" 25 26 "github.com/SmartMeshFoundation/Spectrum/common" 27 "github.com/SmartMeshFoundation/Spectrum/core/types" 28 "github.com/SmartMeshFoundation/Spectrum/eth/downloader" 29 "github.com/SmartMeshFoundation/Spectrum/log" 30 "github.com/SmartMeshFoundation/Spectrum/p2p/discover" 31 ) 32 33 const ( 34 forceSyncCycle = 10 * time.Second // Time interval to force syncs, even if few peers are available 35 minDesiredPeerCount = 5 // Amount of peers desired to start syncing 36 37 // This is the target size for the packs of transactions sent by txsyncLoop. 38 // A pack can get larger than this if a single transactions exceeds this size. 39 txsyncPackSize = 100 * 1024 40 ) 41 42 type txsync struct { 43 p *peer 44 txs []*types.Transaction 45 } 46 47 // syncTransactions starts sending all currently pending transactions to the given peer. 48 func (pm *ProtocolManager) syncTransactions(p *peer) { 49 var txs types.Transactions 50 pending, _ := pm.txpool.Pending() 51 for _, batch := range pending { 52 txs = append(txs, batch...) 53 } 54 if len(txs) == 0 { 55 return 56 } 57 select { 58 case pm.txsyncCh <- &txsync{p, txs}: 59 case <-pm.quitSync: 60 } 61 } 62 63 // txsyncLoop takes care of the initial transaction sync for each new 64 // connection. When a new peer appears, we relay all currently pending 65 // transactions. In order to minimise egress bandwidth usage, we send 66 // the transactions in small packs to one peer at a time. 67 func (pm *ProtocolManager) txsyncLoop() { 68 var ( 69 pending = make(map[discover.NodeID]*txsync) 70 sending = false // whether a send is active 71 pack = new(txsync) // the pack that is being sent 72 done = make(chan error, 1) // result of the send 73 ) 74 75 // send starts a sending a pack of transactions from the sync. 76 send := func(s *txsync) { 77 // Fill pack with transactions up to the target size. 78 size := common.StorageSize(0) 79 pack.p = s.p 80 pack.txs = pack.txs[:0] 81 for i := 0; i < len(s.txs) && size < txsyncPackSize; i++ { 82 pack.txs = append(pack.txs, s.txs[i]) 83 size += s.txs[i].Size() 84 } 85 // Remove the transactions that will be sent. 86 s.txs = s.txs[:copy(s.txs, s.txs[len(pack.txs):])] 87 if len(s.txs) == 0 { 88 delete(pending, s.p.ID()) 89 } 90 // Send the pack in the background. 91 s.p.Log().Trace("Sending batch of transactions", "count", len(pack.txs), "bytes", size) 92 sending = true 93 go func() { done <- pack.p.SendTransactions(pack.txs) }() 94 } 95 96 // pick chooses the next pending sync. 97 pick := func() *txsync { 98 if len(pending) == 0 { 99 return nil 100 } 101 n := rand.Intn(len(pending)) + 1 102 for _, s := range pending { 103 if n--; n == 0 { 104 return s 105 } 106 } 107 return nil 108 } 109 110 for { 111 select { 112 case s := <-pm.txsyncCh: 113 pending[s.p.ID()] = s 114 if !sending { 115 send(s) 116 } 117 case err := <-done: 118 sending = false 119 // Stop tracking peers that cause send failures. 120 if err != nil { 121 pack.p.Log().Debug("Transaction send failed", "err", err) 122 delete(pending, pack.p.ID()) 123 } 124 // Schedule the next send. 125 if s := pick(); s != nil { 126 send(s) 127 } 128 case <-pm.quitSync: 129 return 130 } 131 } 132 } 133 134 // syncer is responsible for periodically synchronising with the network, both 135 // downloading hashes and blocks as well as handling the announcement handler. 136 func (pm *ProtocolManager) syncer() { 137 // Start and ensure cleanup of sync mechanisms 138 pm.fetcher.Start() 139 defer pm.fetcher.Stop() 140 defer pm.downloader.Terminate() 141 142 // Wait for different events to fire synchronisation operations 143 forceSync := time.NewTicker(forceSyncCycle) 144 defer forceSync.Stop() 145 146 for { 147 select { 148 case <-pm.newPeerCh: 149 // Make sure we have peers to select from, then sync 150 if pm.peers.Len() < minDesiredPeerCount { 151 break 152 } 153 go pm.synchronise(pm.peers.BestPeer()) 154 155 case <-forceSync.C: 156 // Force a sync even if not enough peers are present 157 go pm.synchronise(pm.peers.BestPeer()) 158 159 case <-pm.noMorePeers: 160 return 161 } 162 } 163 } 164 165 // synchronise tries to sync up our local block chain with a remote peer. 166 func (pm *ProtocolManager) synchronise(peer *peer) { 167 // Short circuit if no peers are available 168 if peer == nil { 169 return 170 } 171 // Make sure the peer's TD is higher than our own 172 currentBlock := pm.blockchain.CurrentBlock() 173 td := pm.blockchain.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) 174 175 pHead, pTd := peer.Head() 176 ppk, _ := peer.ID().Pubkey() 177 paddr := crypto.PubkeyToAddress(*ppk) 178 log.Debug("go_synchronise ->", 179 "currentNum", currentBlock.Number(), 180 "currentTD", td, 181 "peerTD", pTd, 182 "return", pTd.Cmp(td) <= 0, 183 "peerAddr", paddr.Hex(), 184 ) 185 if pTd.Cmp(td) <= 0 { 186 return 187 } 188 // Otherwise try to sync with the downloader 189 mode := downloader.FullSync 190 if atomic.LoadUint32(&pm.fastSync) == 1 { 191 // Fast sync was explicitly requested, and explicitly granted 192 mode = downloader.FastSync 193 } else if currentBlock.NumberU64() == 0 && pm.blockchain.CurrentFastBlock().NumberU64() > 0 { 194 // The database seems empty as the current block is the genesis. Yet the fast 195 // block is ahead, so fast sync was enabled for this node at a certain point. 196 // The only scenario where this can happen is if the user manually (or via a 197 // bad block) rolled back a fast sync node below the sync point. In this case 198 // however it's safe to reenable fast sync. 199 atomic.StoreUint32(&pm.fastSync, 1) 200 mode = downloader.FastSync 201 } 202 // Run the sync cycle, and disable fast sync if we've went past the pivot block 203 err := pm.downloader.Synchronise(peer.id, pHead, pTd, mode) 204 205 if atomic.LoadUint32(&pm.fastSync) == 1 { 206 // Disable fast sync if we indeed have something in our chain 207 if pm.blockchain.CurrentBlock().NumberU64() > 0 { 208 log.Info("Fast sync complete, auto disabling") 209 atomic.StoreUint32(&pm.fastSync, 0) 210 } 211 } 212 if err != nil { 213 log.Debug("<<synchronise>>", "err", err, "mode", mode, "peerid", peer.ID().String()) 214 return 215 } 216 atomic.StoreUint32(&pm.acceptTxs, 1) // Mark initial sync done 217 if head := pm.blockchain.CurrentBlock(); head.NumberU64() > 0 { 218 // We've completed a sync cycle, notify all peers of new state. This path is 219 // essential in star-topology networks where a gateway node needs to notify 220 // all its out-of-date peers of the availability of a new block. This failure 221 // scenario will most often crop up in private and hackathon networks with 222 // degenerate connectivity, but it should be healthy for the mainnet too to 223 // more reliably update peers or the local TD state. 224 go pm.BroadcastBlock(head, false) 225 } 226 }