github.com/sberex/go-sberex@v1.8.2-0.20181113200658-ed96ac38f7d7/eth/sync.go (about) 1 // This file is part of the go-sberex library. The go-sberex library is 2 // free software: you can redistribute it and/or modify it under the terms 3 // of the GNU Lesser General Public License as published by the Free 4 // Software Foundation, either version 3 of the License, or (at your option) 5 // any later version. 6 // 7 // The go-sberex library is distributed in the hope that it will be useful, 8 // but WITHOUT ANY WARRANTY; without even the implied warranty of 9 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser 10 // General Public License <http://www.gnu.org/licenses/> for more details. 11 12 package eth 13 14 import ( 15 "math/rand" 16 "sync/atomic" 17 "time" 18 19 "github.com/Sberex/go-sberex/common" 20 "github.com/Sberex/go-sberex/core/types" 21 "github.com/Sberex/go-sberex/eth/downloader" 22 "github.com/Sberex/go-sberex/log" 23 "github.com/Sberex/go-sberex/p2p/discover" 24 ) 25 26 const ( 27 forceSyncCycle = 20 * time.Second // Time interval to force syncs, even if few peers are available 28 minDesiredPeerCount = 5 // Amount of peers desired to start syncing 29 30 // This is the target size for the packs of transactions sent by txsyncLoop. 31 // A pack can get larger than this if a single transactions exceeds this size. 32 txsyncPackSize = 100 * 1024 33 ) 34 35 type txsync struct { 36 p *peer 37 txs []*types.Transaction 38 } 39 40 // syncTransactions starts sending all currently pending transactions to the given peer. 41 func (pm *ProtocolManager) syncTransactions(p *peer) { 42 var txs types.Transactions 43 pending, _ := pm.txpool.Pending() 44 for _, batch := range pending { 45 txs = append(txs, batch...) 46 } 47 if len(txs) == 0 { 48 return 49 } 50 select { 51 case pm.txsyncCh <- &txsync{p, txs}: 52 case <-pm.quitSync: 53 } 54 } 55 56 // txsyncLoop takes care of the initial transaction sync for each new 57 // connection. When a new peer appears, we relay all currently pending 58 // transactions. In order to minimise egress bandwidth usage, we send 59 // the transactions in small packs to one peer at a time. 60 func (pm *ProtocolManager) txsyncLoop() { 61 var ( 62 pending = make(map[discover.NodeID]*txsync) 63 sending = false // whether a send is active 64 pack = new(txsync) // the pack that is being sent 65 done = make(chan error, 1) // result of the send 66 ) 67 68 // send starts a sending a pack of transactions from the sync. 69 send := func(s *txsync) { 70 // Fill pack with transactions up to the target size. 71 size := common.StorageSize(0) 72 pack.p = s.p 73 pack.txs = pack.txs[:0] 74 for i := 0; i < len(s.txs) && size < txsyncPackSize; i++ { 75 pack.txs = append(pack.txs, s.txs[i]) 76 size += s.txs[i].Size() 77 } 78 // Remove the transactions that will be sent. 79 s.txs = s.txs[:copy(s.txs, s.txs[len(pack.txs):])] 80 if len(s.txs) == 0 { 81 delete(pending, s.p.ID()) 82 } 83 // Send the pack in the background. 84 s.p.Log().Trace("Sending batch of transactions", "count", len(pack.txs), "bytes", size) 85 sending = true 86 go func() { done <- pack.p.SendTransactions(pack.txs) }() 87 } 88 89 // pick chooses the next pending sync. 90 pick := func() *txsync { 91 if len(pending) == 0 { 92 return nil 93 } 94 n := rand.Intn(len(pending)) + 1 95 for _, s := range pending { 96 if n--; n == 0 { 97 return s 98 } 99 } 100 return nil 101 } 102 103 for { 104 select { 105 case s := <-pm.txsyncCh: 106 pending[s.p.ID()] = s 107 if !sending { 108 send(s) 109 } 110 case err := <-done: 111 sending = false 112 // Stop tracking peers that cause send failures. 113 if err != nil { 114 pack.p.Log().Debug("Transaction send failed", "err", err) 115 delete(pending, pack.p.ID()) 116 } 117 // Schedule the next send. 118 if s := pick(); s != nil { 119 send(s) 120 } 121 case <-pm.quitSync: 122 return 123 } 124 } 125 } 126 127 // syncer is responsible for periodically synchronising with the network, both 128 // downloading hashes and blocks as well as handling the announcement handler. 129 func (pm *ProtocolManager) syncer() { 130 // Start and ensure cleanup of sync mechanisms 131 pm.fetcher.Start() 132 defer pm.fetcher.Stop() 133 defer pm.downloader.Terminate() 134 135 // Wait for different events to fire synchronisation operations 136 forceSync := time.NewTicker(forceSyncCycle) 137 defer forceSync.Stop() 138 139 for { 140 select { 141 case <-pm.newPeerCh: 142 // Make sure we have peers to select from, then sync 143 if pm.peers.Len() < minDesiredPeerCount { 144 break 145 } 146 go pm.synchronise(pm.peers.BestPeer()) 147 148 case <-forceSync.C: 149 // Force a sync even if not enough peers are present 150 go pm.synchronise(pm.peers.BestPeer()) 151 152 case <-pm.noMorePeers: 153 return 154 } 155 } 156 } 157 158 // synchronise tries to sync up our local block chain with a remote peer. 159 func (pm *ProtocolManager) synchronise(peer *peer) { 160 // Short circuit if no peers are available 161 if peer == nil { 162 return 163 } 164 // Make sure the peer's TD is higher than our own 165 currentBlock := pm.blockchain.CurrentBlock() 166 td := pm.blockchain.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) 167 168 pHead, pTd := peer.Head() 169 if pTd.Cmp(td) <= 0 { 170 return 171 } 172 // Otherwise try to sync with the downloader 173 mode := downloader.FullSync 174 if atomic.LoadUint32(&pm.fastSync) == 1 { 175 // Fast sync was explicitly requested, and explicitly granted 176 mode = downloader.FastSync 177 } else if currentBlock.NumberU64() == 0 && pm.blockchain.CurrentFastBlock().NumberU64() > 0 { 178 // The database seems empty as the current block is the genesis. Yet the fast 179 // block is ahead, so fast sync was enabled for this node at a certain point. 180 // The only scenario where this can happen is if the user manually (or via a 181 // bad block) rolled back a fast sync node below the sync point. In this case 182 // however it's safe to reenable fast sync. 183 atomic.StoreUint32(&pm.fastSync, 1) 184 mode = downloader.FastSync 185 } 186 // Run the sync cycle, and disable fast sync if we've went past the pivot block 187 if err := pm.downloader.Synchronise(peer.id, pHead, pTd, mode); err != nil { 188 return 189 } 190 if atomic.LoadUint32(&pm.fastSync) == 1 { 191 log.Info("Fast sync complete, auto disabling") 192 atomic.StoreUint32(&pm.fastSync, 0) 193 } 194 atomic.StoreUint32(&pm.acceptTxs, 1) // Mark initial sync done 195 if head := pm.blockchain.CurrentBlock(); head.NumberU64() > 0 { 196 // We've completed a sync cycle, notify all peers of new state. This path is 197 // essential in star-topology networks where a gateway node needs to notify 198 // all its out-of-date peers of the availability of a new block. This failure 199 // scenario will most often crop up in private and hackathon networks with 200 // degenerate connectivity, but it should be healthy for the mainnet too to 201 // more reliably update peers or the local TD state. 202 go pm.BroadcastBlock(head, false) 203 } 204 }