github.com/klaytn/klaytn@v1.12.1/node/cn/sync.go (about) 1 // Modifications Copyright 2018 The klaytn Authors 2 // Copyright 2015 The go-ethereum Authors 3 // This file is part of go-ethereum. 4 // 5 // The go-ethereum library is free software: you can redistribute it and/or modify 6 // it under the terms of the GNU Lesser General Public License as published by 7 // the Free Software Foundation, either version 3 of the License, or 8 // (at your option) any later version. 9 // 10 // The go-ethereum library is distributed in the hope that it will be useful, 11 // but WITHOUT ANY WARRANTY; without even the implied warranty of 12 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 // GNU Lesser General Public License for more details. 14 // 15 // You should have received a copy of the GNU Lesser General Public License 16 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 17 // 18 // This file is derived from eth/sync.go (2018/06/04). 19 // Modified and improved for the klaytn development. 20 21 package cn 22 23 import ( 24 "math/rand" 25 "sync/atomic" 26 "time" 27 28 "github.com/klaytn/klaytn/blockchain/types" 29 "github.com/klaytn/klaytn/common" 30 "github.com/klaytn/klaytn/datasync/downloader" 31 "github.com/klaytn/klaytn/networks/p2p/discover" 32 ) 33 34 const ( 35 forceSyncCycle = 10 * time.Second // Time interval to force syncs, even if few peers are available 36 minDesiredPeerCount = 5 // Amount of peers desired to start syncing 37 38 // This is the target size for the packs of transactions sent by txsyncLoop. 39 // A pack can get larger than this if a single transactions exceeds this size. 40 txsyncPackSize = 100 * 1024 41 ) 42 43 type txsync struct { 44 p Peer 45 txs []*types.Transaction 46 } 47 48 // syncTransactions starts sending all currently pending transactions to the given peer. 49 func (pm *ProtocolManager) syncTransactions(p Peer) { 50 var txs types.Transactions 51 pending, _ := pm.txpool.Pending() 52 for _, batch := range pending { 53 txs = append(txs, batch...) 54 } 55 if len(txs) == 0 { 56 return 57 } 58 select { 59 case pm.txsyncCh <- &txsync{p, txs}: 60 case <-pm.quitSync: 61 } 62 } 63 64 // txsyncLoop takes care of the initial transaction sync for each new 65 // connection. When a new peer appears, we relay all currently pending 66 // transactions. In order to minimise egress bandwidth usage, we send 67 // the transactions in small packs to one peer at a time. 68 func (pm *ProtocolManager) txsyncLoop() { 69 var ( 70 pending = make(map[discover.NodeID]*txsync) 71 sending = false // whether a send is active 72 pack = new(txsync) // the pack that is being sent 73 done = make(chan error, 1) // result of the send 74 ) 75 76 // send starts a sending a pack of transactions from the sync. 77 send := func(s *txsync) { 78 // Fill pack with transactions up to the target size. 79 size := common.StorageSize(0) 80 pack.p = s.p 81 pack.txs = pack.txs[:0] 82 for i := 0; i < len(s.txs) && size < txsyncPackSize; i++ { 83 pack.txs = append(pack.txs, s.txs[i]) 84 size += s.txs[i].Size() 85 } 86 // Remove the transactions that will be sent. 87 s.txs = s.txs[:copy(s.txs, s.txs[len(pack.txs):])] 88 if len(s.txs) == 0 { 89 delete(pending, s.p.GetP2PPeerID()) 90 } 91 // Send the pack in the background. 92 s.p.GetP2PPeer().Log().Trace("Sending batch of transactions", "count", len(pack.txs), "bytes", size) 93 sending = true 94 go func() { done <- pack.p.SendTransactions(pack.txs) }() 95 } 96 97 // pick chooses the next pending sync. 98 pick := func() *txsync { 99 if len(pending) == 0 { 100 return nil 101 } 102 n := rand.Intn(len(pending)) + 1 103 for _, s := range pending { 104 if n--; n == 0 { 105 return s 106 } 107 } 108 return nil 109 } 110 111 for { 112 select { 113 case s := <-pm.txsyncCh: 114 pending[s.p.GetP2PPeerID()] = s 115 if !sending { 116 send(s) 117 } 118 case err := <-done: 119 sending = false 120 // Stop tracking peers that cause send failures. 121 if err != nil { 122 pack.p.GetP2PPeer().Log().Debug("Transaction send failed", "err", err) 123 delete(pending, pack.p.GetP2PPeerID()) 124 } 125 // Schedule the next send. 126 if s := pick(); s != nil { 127 send(s) 128 } 129 case <-pm.quitSync: 130 return 131 } 132 } 133 } 134 135 // syncer is responsible for periodically synchronising with the network, both 136 // downloading hashes and blocks as well as handling the announcement handler. 137 func (pm *ProtocolManager) syncer() { 138 pm.wg.Add(1) 139 defer pm.wg.Done() 140 141 // Start and ensure cleanup of sync mechanisms 142 pm.fetcher.Start() 143 defer pm.fetcher.Stop() 144 defer pm.downloader.Terminate() 145 146 // Wait for different events to fire synchronisation operations 147 forceSync := time.NewTicker(forceSyncCycle) 148 defer forceSync.Stop() 149 150 for { 151 select { 152 case <-pm.newPeerCh: 153 // Make sure we have peers to select from, then sync 154 if pm.peers.Len() < minDesiredPeerCount { 155 break 156 } 157 go pm.synchronise(pm.peers.BestPeer()) 158 159 case <-forceSync.C: 160 // Force a sync even if not enough peers are present 161 go pm.synchronise(pm.peers.BestPeer()) 162 163 case <-pm.noMorePeers: 164 return 165 } 166 } 167 } 168 169 // getSyncMode returns SyncMode based on currentBlockNumber. 170 func (pm *ProtocolManager) getSyncMode(currentBlock *types.Block) downloader.SyncMode { 171 if atomic.LoadUint32(&pm.snapSync) == 1 { 172 // Snap sync was explicitly requested, and explicitly granted 173 return downloader.SnapSync 174 } else if atomic.LoadUint32(&pm.fastSync) == 1 { 175 // Fast sync was explicitly requested, and explicitly granted 176 return downloader.FastSync 177 } else if currentBlock.NumberU64() == 0 && pm.blockchain.CurrentFastBlock().NumberU64() > 0 { 178 // The database seems empty as the current block is the genesis. Yet the fast 179 // block is ahead, so fast sync was enabled for this node at a certain point. 180 // The only scenario where this can happen is if the user manually (or via a 181 // bad block) rolled back a fast sync node below the sync point. In this case 182 // however it's safe to reenable fast sync. 183 atomic.StoreUint32(&pm.snapSync, 1) 184 return downloader.SnapSync 185 } 186 return downloader.FullSync 187 } 188 189 // synchronise tries to sync up our local block chain with a remote peer. 190 func (pm *ProtocolManager) synchronise(peer Peer) { 191 // Short circuit if no peers are available or syncStop flag is set to true 192 if peer == nil || pm.GetSyncStop() { 193 return 194 } 195 // Make sure the peer's TD is higher than our own 196 currentBlock := pm.blockchain.CurrentBlock() 197 td := pm.blockchain.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) 198 199 pHead, pTd := peer.Head() 200 if pTd.Cmp(td) <= 0 { 201 return 202 } 203 // Otherwise try to sync with the downloader 204 mode := pm.getSyncMode(currentBlock) 205 if mode == downloader.FastSync || mode == downloader.SnapSync { 206 // Make sure the peer's total blockscore we are synchronizing is higher. 207 if pm.blockchain.GetTdByHash(pm.blockchain.CurrentFastBlock().Hash()).Cmp(pTd) >= 0 { 208 return 209 } 210 } 211 212 // Run the sync cycle, and disable fast sync if we've went past the pivot block 213 if err := pm.downloader.Synchronise(peer.GetID(), pHead, pTd, mode); err != nil { 214 return 215 } 216 if atomic.LoadUint32(&pm.fastSync) == 1 { 217 logger.Info("Fast sync complete, auto disabling") 218 atomic.StoreUint32(&pm.fastSync, 0) 219 } 220 if atomic.LoadUint32(&pm.snapSync) == 1 { 221 logger.Info("Snap sync complete, auto disabling") 222 atomic.StoreUint32(&pm.snapSync, 0) 223 } 224 atomic.StoreUint32(&pm.acceptTxs, 1) // Mark initial sync done 225 if head := pm.blockchain.CurrentBlock(); head.NumberU64() > 0 { 226 // We've completed a sync cycle, notify all peers of new state. This path is 227 // essential in star-topology networks where a gateway node needs to notify 228 // all its out-of-date peers of the availability of a new block. This failure 229 // scenario will most often crop up in private and hackathon networks with 230 // degenerate connectivity, but it should be healthy for the mainnet too to 231 // more reliably update peers or the local TD state. 232 go pm.BroadcastBlockHash(head) 233 } 234 }