github.com/luckypickle/go-ethereum-vet@v1.14.2/eth/sync.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package eth 18 19 import ( 20 "math/rand" 21 "sync/atomic" 22 "time" 23 24 "github.com/luckypickle/go-ethereum-vet/common" 25 "github.com/luckypickle/go-ethereum-vet/core/types" 26 "github.com/luckypickle/go-ethereum-vet/eth/downloader" 27 "github.com/luckypickle/go-ethereum-vet/log" 28 "github.com/luckypickle/go-ethereum-vet/p2p/discover" 29 ) 30 31 const ( 32 forceSyncCycle = 10 * time.Second // Time interval to force syncs, even if few peers are available 33 minDesiredPeerCount = 5 // Amount of peers desired to start syncing 34 35 // This is the target size for the packs of transactions sent by txsyncLoop. 36 // A pack can get larger than this if a single transactions exceeds this size. 37 txsyncPackSize = 100 * 1024 38 ) 39 40 type txsync struct { 41 p *peer 42 txs []*types.Transaction 43 } 44 45 // syncTransactions starts sending all currently pending transactions to the given peer. 46 func (pm *ProtocolManager) syncTransactions(p *peer) { 47 var txs types.Transactions 48 pending, _ := pm.txpool.Pending() 49 for _, batch := range pending { 50 txs = append(txs, batch...) 51 } 52 if len(txs) == 0 { 53 return 54 } 55 select { 56 case pm.txsyncCh <- &txsync{p, txs}: 57 case <-pm.quitSync: 58 } 59 } 60 61 // txsyncLoop takes care of the initial transaction sync for each new 62 // connection. When a new peer appears, we relay all currently pending 63 // transactions. In order to minimise egress bandwidth usage, we send 64 // the transactions in small packs to one peer at a time. 65 func (pm *ProtocolManager) txsyncLoop() { 66 var ( 67 pending = make(map[discover.NodeID]*txsync) 68 sending = false // whether a send is active 69 pack = new(txsync) // the pack that is being sent 70 done = make(chan error, 1) // result of the send 71 ) 72 73 // send starts a sending a pack of transactions from the sync. 74 send := func(s *txsync) { 75 // Fill pack with transactions up to the target size. 76 size := common.StorageSize(0) 77 pack.p = s.p 78 pack.txs = pack.txs[:0] 79 for i := 0; i < len(s.txs) && size < txsyncPackSize; i++ { 80 pack.txs = append(pack.txs, s.txs[i]) 81 size += s.txs[i].Size() 82 } 83 // Remove the transactions that will be sent. 84 s.txs = s.txs[:copy(s.txs, s.txs[len(pack.txs):])] 85 if len(s.txs) == 0 { 86 delete(pending, s.p.ID()) 87 } 88 // Send the pack in the background. 89 s.p.Log().Trace("Sending batch of transactions", "count", len(pack.txs), "bytes", size) 90 sending = true 91 go func() { done <- pack.p.SendTransactions(pack.txs) }() 92 } 93 94 // pick chooses the next pending sync. 95 pick := func() *txsync { 96 if len(pending) == 0 { 97 return nil 98 } 99 n := rand.Intn(len(pending)) + 1 100 for _, s := range pending { 101 if n--; n == 0 { 102 return s 103 } 104 } 105 return nil 106 } 107 108 for { 109 select { 110 case s := <-pm.txsyncCh: 111 pending[s.p.ID()] = s 112 if !sending { 113 send(s) 114 } 115 case err := <-done: 116 sending = false 117 // Stop tracking peers that cause send failures. 118 if err != nil { 119 pack.p.Log().Debug("Transaction send failed", "err", err) 120 delete(pending, pack.p.ID()) 121 } 122 // Schedule the next send. 123 if s := pick(); s != nil { 124 send(s) 125 } 126 case <-pm.quitSync: 127 return 128 } 129 } 130 } 131 132 // syncer is responsible for periodically synchronising with the network, both 133 // downloading hashes and blocks as well as handling the announcement handler. 134 func (pm *ProtocolManager) syncer() { 135 // Start and ensure cleanup of sync mechanisms 136 pm.fetcher.Start() 137 defer pm.fetcher.Stop() 138 defer pm.downloader.Terminate() 139 140 // Wait for different events to fire synchronisation operations 141 forceSync := time.NewTicker(forceSyncCycle) 142 defer forceSync.Stop() 143 144 for { 145 select { 146 case <-pm.newPeerCh: 147 // Make sure we have peers to select from, then sync 148 if pm.peers.Len() < minDesiredPeerCount { 149 break 150 } 151 go pm.synchronise(pm.peers.BestPeer()) 152 153 case <-forceSync.C: 154 // Force a sync even if not enough peers are present 155 go pm.synchronise(pm.peers.BestPeer()) 156 157 case <-pm.noMorePeers: 158 return 159 } 160 } 161 } 162 163 // synchronise tries to sync up our local block chain with a remote peer. 164 func (pm *ProtocolManager) synchronise(peer *peer) { 165 // Short circuit if no peers are available 166 if peer == nil { 167 return 168 } 169 // Make sure the peer's TD is higher than our own 170 currentBlock := pm.blockchain.CurrentBlock() 171 td := pm.blockchain.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) 172 173 pHead, pTd := peer.Head() 174 if pTd.Cmp(td) <= 0 { 175 return 176 } 177 // Otherwise try to sync with the downloader 178 mode := downloader.FullSync 179 if atomic.LoadUint32(&pm.fastSync) == 1 { 180 // Fast sync was explicitly requested, and explicitly granted 181 mode = downloader.FastSync 182 } else if currentBlock.NumberU64() == 0 && pm.blockchain.CurrentFastBlock().NumberU64() > 0 { 183 // The database seems empty as the current block is the genesis. Yet the fast 184 // block is ahead, so fast sync was enabled for this node at a certain point. 185 // The only scenario where this can happen is if the user manually (or via a 186 // bad block) rolled back a fast sync node below the sync point. In this case 187 // however it's safe to reenable fast sync. 188 atomic.StoreUint32(&pm.fastSync, 1) 189 mode = downloader.FastSync 190 } 191 192 if mode == downloader.FastSync { 193 // Make sure the peer's total difficulty we are synchronizing is higher. 194 if pm.blockchain.GetTdByHash(pm.blockchain.CurrentFastBlock().Hash()).Cmp(pTd) >= 0 { 195 return 196 } 197 } 198 199 // Run the sync cycle, and disable fast sync if we've went past the pivot block 200 if err := pm.downloader.Synchronise(peer.id, pHead, pTd, mode); err != nil { 201 return 202 } 203 if atomic.LoadUint32(&pm.fastSync) == 1 { 204 log.Info("Fast sync complete, auto disabling") 205 atomic.StoreUint32(&pm.fastSync, 0) 206 } 207 atomic.StoreUint32(&pm.acceptTxs, 1) // Mark initial sync done 208 if head := pm.blockchain.CurrentBlock(); head.NumberU64() > 0 { 209 // We've completed a sync cycle, notify all peers of new state. This path is 210 // essential in star-topology networks where a gateway node needs to notify 211 // all its out-of-date peers of the availability of a new block. This failure 212 // scenario will most often crop up in private and hackathon networks with 213 // degenerate connectivity, but it should be healthy for the mainnet too to 214 // more reliably update peers or the local TD state. 215 go pm.BroadcastBlock(head, false) 216 } 217 }