github.com/ethereumproject/go-ethereum@v5.5.2+incompatible/eth/sync.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package eth 18 19 import ( 20 "math/rand" 21 "sync/atomic" 22 "time" 23 24 "github.com/ethereumproject/go-ethereum/common" 25 "github.com/ethereumproject/go-ethereum/core/types" 26 "github.com/ethereumproject/go-ethereum/eth/downloader" 27 "github.com/ethereumproject/go-ethereum/logger" 28 "github.com/ethereumproject/go-ethereum/logger/glog" 29 "github.com/ethereumproject/go-ethereum/p2p/discover" 30 ) 31 32 const ( 33 forceSyncCycle = 10 * time.Second // Time interval to force syncs, even if few peers are available 34 minDesiredPeerCount = 5 // Amount of peers desired to start syncing 35 36 // This is the target size for the packs of transactions sent by txsyncLoop. 37 // A pack can get larger than this if a single transactions exceeds this size. 38 txsyncPackSize = 100 * 1024 39 ) 40 41 type txsync struct { 42 p *peer 43 txs []*types.Transaction 44 } 45 46 // syncTransactions starts sending all currently pending transactions to the given peer. 47 func (pm *ProtocolManager) syncTransactions(p *peer) { 48 txs := pm.txpool.GetTransactions() 49 if len(txs) == 0 { 50 return 51 } 52 select { 53 case pm.txsyncCh <- &txsync{p, txs}: 54 case <-pm.quitSync: 55 } 56 } 57 58 // txsyncLoop takes care of the initial transaction sync for each new 59 // connection. When a new peer appears, we relay all currently pending 60 // transactions. In order to minimise egress bandwidth usage, we send 61 // the transactions in small packs to one peer at a time. 62 func (pm *ProtocolManager) txsyncLoop() { 63 var ( 64 pending = make(map[discover.NodeID]*txsync) 65 sending = false // whether a send is active 66 pack = new(txsync) // the pack that is being sent 67 done = make(chan error, 1) // result of the send 68 ) 69 70 // send starts a sending a pack of transactions from the sync. 71 send := func(s *txsync) { 72 // Fill pack with transactions up to the target size. 73 size := common.StorageSize(0) 74 pack.p = s.p 75 pack.txs = pack.txs[:0] 76 for i := 0; i < len(s.txs) && size < txsyncPackSize; i++ { 77 pack.txs = append(pack.txs, s.txs[i]) 78 size += s.txs[i].Size() 79 } 80 // Remove the transactions that will be sent. 81 s.txs = s.txs[:copy(s.txs, s.txs[len(pack.txs):])] 82 if len(s.txs) == 0 { 83 delete(pending, s.p.ID()) 84 } 85 // Send the pack in the background. 86 glog.V(logger.Detail).Infof("%v: sending %d transactions (%v)", s.p.Peer, len(pack.txs), size) 87 sending = true 88 go func() { done <- pack.p.SendTransactions(pack.txs) }() 89 } 90 91 // pick chooses the next pending sync. 92 pick := func() *txsync { 93 if len(pending) == 0 { 94 return nil 95 } 96 n := rand.Intn(len(pending)) + 1 97 for _, s := range pending { 98 if n--; n == 0 { 99 return s 100 } 101 } 102 return nil 103 } 104 105 for { 106 select { 107 case s := <-pm.txsyncCh: 108 pending[s.p.ID()] = s 109 if !sending { 110 send(s) 111 } 112 case err := <-done: 113 sending = false 114 // Stop tracking peers that cause send failures. 115 if err != nil { 116 glog.V(logger.Debug).Infof("%v: tx send failed: %v", pack.p.Peer, err) 117 delete(pending, pack.p.ID()) 118 } 119 // Schedule the next send. 120 if s := pick(); s != nil { 121 send(s) 122 } 123 case <-pm.quitSync: 124 return 125 } 126 } 127 } 128 129 // syncer is responsible for periodically synchronising with the network, both 130 // downloading hashes and blocks as well as handling the announcement handler. 131 func (pm *ProtocolManager) syncer() { 132 // Start and ensure cleanup of sync mechanisms 133 pm.fetcher.Start() 134 defer pm.fetcher.Stop() 135 defer pm.downloader.Terminate() 136 137 // Wait for different events to fire synchronisation operations 138 forceSync := time.NewTicker(forceSyncCycle) 139 defer forceSync.Stop() 140 141 for { 142 select { 143 case <-pm.newPeerCh: 144 // Make sure we have peers to select from, then sync 145 if pm.peers.Len() < minDesiredPeerCount { 146 break 147 } 148 go pm.synchronise(pm.peers.BestPeer()) 149 150 case <-forceSync.C: 151 // Force a sync even if not enough peers are present 152 if !pm.downloader.Synchronising() { 153 go pm.synchronise(pm.peers.BestPeer()) 154 } else { 155 glog.V(logger.Debug).Infoln("forceSync.C: skipping call to synchronise with best peers (dl already syncing)") 156 } 157 158 case <-pm.noMorePeers: 159 return 160 } 161 } 162 } 163 164 // synchronise tries to sync up our local block chain with a remote peer. 165 func (pm *ProtocolManager) synchronise(peer *peer) { 166 // Short circuit if no peers are available 167 if peer == nil { 168 return 169 } 170 171 // Make sure the peer's TD is higher than our own 172 currentBlock := pm.blockchain.CurrentBlock() 173 td := pm.blockchain.GetTd(currentBlock.Hash()) 174 // Stored block's td should never be nil or non-positive. 175 if td == nil || td.Sign() < 1 { 176 glog.Fatalf("Found invalid TD=%v for current block in database. Exiting.\nCheck available disk space and restart to attempt database recovery.", td) 177 } 178 pHead, pTd := peer.Head() 179 if pTd.Cmp(td) <= 0 { 180 return 181 } 182 183 // Otherwise try to sync with the downloader 184 mode := downloader.FullSync 185 if atomic.LoadUint32(&pm.fastSync) == 1 { 186 // Fast sync was explicitly requested, and explicitly granted 187 mode = downloader.FastSync 188 } else if currentBlock.NumberU64() == 0 && pm.blockchain.CurrentFastBlock().NumberU64() > 0 { 189 // The database seems empty as the current block is the genesis. Yet the fast 190 // block is ahead, so fast sync was enabled for this node at a certain point. 191 // The only scenario where this can happen is if the user manually (or via a 192 // bad block) rolled back a fast sync node below the sync point. In this case 193 // however it's safe to reenable fast sync. 194 atomic.StoreUint32(&pm.fastSync, 1) 195 mode = downloader.FastSync 196 } 197 198 if mode == downloader.FastSync { 199 // Make sure the peer's total difficulty we are synchronizing is higher. 200 if pm.blockchain.GetTd(pm.blockchain.CurrentFastBlock().Hash()).Cmp(pTd) >= 0 { 201 return 202 } 203 } 204 205 // Run the sync cycle, and disable fast sync if we've went past the pivot block 206 if err := pm.downloader.Synchronise(peer.id, pHead, pTd, mode); err != nil { 207 glog.V(logger.Info).Infoln("downloader failed to synchronise", "mode=", mode, "peer=", peer.String(), "err=", err.Error()) 208 return 209 } 210 211 // If fast sync was enabled, and we synced up, disable it 212 if atomic.CompareAndSwapUint32(&pm.fastSync, 1, 0) { 213 glog.V(logger.Info).Infoln("Fast sync complete, auto disabling") 214 glog.D(logger.Info).Infoln("Fast sync complete, auto disabling") 215 } 216 atomic.StoreUint32(&pm.acceptsTxs, 1) // Mark initial sync done 217 if head := pm.blockchain.CurrentBlock(); head.NumberU64() > 0 { 218 // We've completed a sync cycle, notify all peers of new state. This path is 219 // essential in star-topology networks where a gateway node needs to notify 220 // all its out-of-date peers of the availability of a new block. This failure 221 // scenario will most often crop up in private and hackathon networks with 222 // degenerate connectivity, but it should be healthy for the mainnet too to 223 // more reliably update peers or the local TD state. 224 go pm.BroadcastBlock(head, false) 225 } 226 }