github.com/m3shine/gochain@v2.2.26+incompatible/eth/sync.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package eth 18 19 import ( 20 "context" 21 "math" 22 "sync/atomic" 23 "time" 24 25 "go.opencensus.io/trace" 26 27 "github.com/gochain-io/gochain/common" 28 "github.com/gochain-io/gochain/core/types" 29 "github.com/gochain-io/gochain/eth/downloader" 30 "github.com/gochain-io/gochain/log" 31 "github.com/gochain-io/gochain/p2p/discover" 32 ) 33 34 const ( 35 forceSyncCycle = 10 * time.Second // Time interval to force syncs, even if few peers are available 36 minDesiredPeerCount = 5 // Amount of peers desired to start syncing 37 38 // This is the target size for the packs of transactions sent by txsyncLoop. 39 // A pack can get larger than this if a single transactions exceeds this size. 40 txsyncPackSize = 1000 * 1024 41 txResyncInterval = 1 * time.Minute // How often to re-sync pending txs with peers. 42 ) 43 44 type txsync struct { 45 p *peer 46 txs []*types.Transaction 47 } 48 49 // syncTransactions starts sending all currently pending transactions to the given peer. 50 func (pm *ProtocolManager) syncTransactions(ctx context.Context, p *peer) { 51 ctx, span := trace.StartSpan(context.Background(), "ProtocolManager.syncTransactions") 52 defer span.End() 53 txs := pm.txpool.PendingList(ctx) 54 if len(txs) == 0 { 55 return 56 } 57 select { 58 case pm.txsyncCh <- &txsync{p, txs}: 59 case <-pm.quitSync: 60 } 61 } 62 63 // syncTransactionsAllPeers syncs pending txs to all peers. 64 func (pm *ProtocolManager) syncTransactionsAllPeers() { 65 ctx := context.TODO() 66 txs := pm.txpool.PendingList(ctx) 67 if len(txs) == 0 { 68 return 69 } 70 peers := pm.peers.All() 71 max := int(math.Sqrt(float64(len(peers)))) 72 if max < minBroadcastPeers { 73 max = minBroadcastPeers 74 } 75 if max > len(peers) { 76 max = len(peers) 77 } 78 log.Info("Resyncing pending txs", "txs", len(txs), "peers", max) 79 for _, p := range peers[:max] { 80 select { 81 case pm.txsyncCh <- &txsync{p, txs}: 82 case <-pm.quitSync: 83 return 84 } 85 } 86 } 87 88 // txResyncLoop periodically re-syncs transactions to all peers. 89 func (pm *ProtocolManager) txResyncLoop() { 90 t := time.NewTicker(txResyncInterval) 91 defer t.Stop() 92 for { 93 select { 94 case <-t.C: 95 pm.syncTransactionsAllPeers() 96 case <-pm.quitSync: 97 return 98 } 99 } 100 } 101 102 // txsyncLoop takes care of the initial transaction sync for each new 103 // connection. When a new peer appears, we relay all currently pending 104 // transactions. In order to minimise egress bandwidth usage, we send 105 // the transactions in small packs to one peer at a time. 106 func (pm *ProtocolManager) txsyncLoop() { 107 var ( 108 pending = make(map[discover.NodeID]*txsync) 109 sending = false // whether a send is active 110 pack = new(txsync) // the pack that is being sent 111 done = make(chan error, 1) // result of the send 112 ) 113 114 // send starts a sending a pack of transactions from the sync. 115 send := func(s *txsync) { 116 _, span := trace.StartSpan(context.Background(), "ProtocolManager.txsyncLoop-send") 117 defer span.End() 118 119 // Fill pack with transactions up to the target size. 120 size := common.StorageSize(0) 121 pack.p = s.p 122 pack.txs = pack.txs[:0] 123 for i := 0; i < len(s.txs) && size < txsyncPackSize; i++ { 124 pack.txs = append(pack.txs, s.txs[i]) 125 size += s.txs[i].Size() 126 } 127 // Remove the transactions that will be sent. 128 s.txs = s.txs[:copy(s.txs, s.txs[len(pack.txs):])] 129 if len(s.txs) == 0 { 130 delete(pending, s.p.ID()) 131 } 132 // Send the pack in the background. 133 s.p.Log().Trace("Sending batch of transactions", "count", len(pack.txs), "bytes", size) 134 sending = true 135 go func() { 136 ctx, ss := trace.StartSpan(context.Background(), "ProtocolManager.txSyncLoop-send-txs") 137 defer ss.End() 138 parent := span.SpanContext() 139 ss.AddLink(trace.Link{ 140 Type: trace.LinkTypeParent, 141 TraceID: parent.TraceID, 142 SpanID: parent.SpanID, 143 }) 144 done <- pack.p.SendTransactions(ctx, pack.txs) 145 }() 146 } 147 148 // pick chooses the next pending sync. 149 pick := func() *txsync { 150 for _, s := range pending { 151 return s 152 } 153 return nil 154 } 155 156 for { 157 select { 158 case s := <-pm.txsyncCh: 159 pending[s.p.ID()] = s 160 if !sending { 161 send(s) 162 } 163 case err := <-done: 164 sending = false 165 // Stop tracking peers that cause send failures. 166 if err != nil { 167 pack.p.Log().Warn("Transaction send failed", "err", err) 168 delete(pending, pack.p.ID()) 169 } 170 // Schedule the next send. 171 if s := pick(); s != nil { 172 send(s) 173 } 174 case <-pm.quitSync: 175 return 176 } 177 } 178 } 179 180 // syncer is responsible for periodically synchronising with the network, both 181 // downloading hashes and blocks as well as handling the announcement handler. 182 func (pm *ProtocolManager) syncer() { 183 // Start and ensure cleanup of sync mechanisms 184 pm.fetcher.Start() 185 defer pm.fetcher.Stop() 186 defer pm.downloader.Terminate() 187 188 // Wait for different events to fire synchronisation operations 189 forceSync := time.NewTicker(forceSyncCycle) 190 defer forceSync.Stop() 191 192 for { 193 select { 194 case <-pm.newPeerCh: 195 // Make sure we have peers to select from, then sync 196 if pm.peers.Len() < minDesiredPeerCount { 197 break 198 } 199 200 go func() { 201 ctx, span := trace.StartSpan(context.Background(), "protocolManager.syncer-newPeerCh") 202 defer span.End() 203 pm.synchronise(ctx, pm.peers.BestPeer(ctx)) 204 }() 205 206 case <-forceSync.C: 207 // Force a sync even if not enough peers are present 208 go func() { 209 ctx, span := trace.StartSpan(context.Background(), "protocolManager.syncer-forceSync") 210 defer span.End() 211 pm.synchronise(ctx, pm.peers.BestPeer(ctx)) 212 }() 213 214 case <-pm.noMorePeers: 215 return 216 } 217 } 218 } 219 220 // synchronise tries to sync up our local block chain with a remote peer. 221 func (pm *ProtocolManager) synchronise(ctx context.Context, peer *peer) { 222 ctx, span := trace.StartSpan(ctx, "ProtocolManager.synchronise") 223 defer span.End() 224 225 // Short circuit if no peers are available 226 if peer == nil { 227 return 228 } 229 // Make sure the peer's TD is higher than our own 230 currentBlock := pm.blockchain.CurrentBlock() 231 hash := currentBlock.Hash() 232 td := pm.blockchain.GetTd(hash, currentBlock.NumberU64()) 233 234 pHead, pTd := peer.Head() 235 // Skip if lesser total difficulty, or same TD and same hash. 236 if cmp := pTd.Cmp(td); cmp < 0 { 237 return 238 } else if cmp == 0 && pHead == hash { 239 return 240 } 241 // Otherwise try to sync with the downloader 242 mode := downloader.FullSync 243 if atomic.LoadUint32(&pm.fastSync) == 1 { 244 // Fast sync was explicitly requested, and explicitly granted 245 mode = downloader.FastSync 246 } else if currentBlock.NumberU64() == 0 && pm.blockchain.CurrentFastBlock().NumberU64() > 0 { 247 // The database seems empty as the current block is the genesis. Yet the fast 248 // block is ahead, so fast sync was enabled for this node at a certain point. 249 // The only scenario where this can happen is if the user manually (or via a 250 // bad block) rolled back a fast sync node below the sync point. In this case 251 // however it's safe to reenable fast sync. 252 atomic.StoreUint32(&pm.fastSync, 1) 253 mode = downloader.FastSync 254 } 255 256 if mode == downloader.FastSync { 257 // Make sure the peer's total difficulty we are synchronizing is higher. 258 if pm.blockchain.GetTdByHash(pm.blockchain.CurrentFastBlock().Hash()).Cmp(pTd) >= 0 { 259 return 260 } 261 } 262 263 // Run the sync cycle, and disable fast sync if we've went past the pivot block 264 if err := pm.downloader.Synchronise(ctx, peer.id, pHead, pTd, mode); err != nil { 265 return 266 } 267 if atomic.LoadUint32(&pm.fastSync) == 1 { 268 log.Info("Fast sync complete, auto disabling") 269 atomic.StoreUint32(&pm.fastSync, 0) 270 } 271 atomic.StoreUint32(&pm.acceptTxs, 1) // Mark initial sync done 272 if head := pm.blockchain.CurrentBlock(); head.NumberU64() > 0 { 273 // We've completed a sync cycle, notify all peers of new state. This path is 274 // essential in star-topology networks where a gateway node needs to notify 275 // all its out-of-date peers of the availability of a new block. This failure 276 // scenario will most often crop up in private and hackathon networks with 277 // degenerate connectivity, but it should be healthy for the mainnet too to 278 // more reliably update peers or the local TD state. 279 go func() { 280 ctx, bs := trace.StartSpan(context.Background(), "ProtocolManager.syncronise-announce") 281 defer bs.End() 282 parent := span.SpanContext() 283 bs.AddLink(trace.Link{ 284 Type: trace.LinkTypeParent, 285 TraceID: parent.TraceID, 286 SpanID: parent.SpanID, 287 }) 288 pm.BroadcastBlock(ctx, head, false) 289 }() 290 } 291 }