github.com/core-coin/go-core/v2@v2.1.9/xcb/sync.go (about) 1 // Copyright 2015 by the Authors 2 // This file is part of the go-core library. 3 // 4 // The go-core library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-core library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-core library. If not, see <http://www.gnu.org/licenses/>. 16 17 package xcb 18 19 import ( 20 "math/big" 21 "math/rand" 22 "sync/atomic" 23 "time" 24 25 "github.com/core-coin/go-core/v2/common" 26 "github.com/core-coin/go-core/v2/core/rawdb" 27 "github.com/core-coin/go-core/v2/core/types" 28 "github.com/core-coin/go-core/v2/log" 29 "github.com/core-coin/go-core/v2/p2p/enode" 30 "github.com/core-coin/go-core/v2/xcb/downloader" 31 ) 32 33 const ( 34 forceSyncCycle = 10 * time.Second // Time interval to force syncs, even if few peers are available 35 defaultMinSyncPeers = 5 // Amount of peers desired to start syncing 36 37 // This is the target size for the packs of transactions sent by txsyncLoop64. 38 // A pack can get larger than this if a single transactions exceeds this size. 39 txsyncPackSize = 100 * 1024 40 ) 41 42 type txsync struct { 43 p *peer 44 txs []*types.Transaction 45 } 46 47 // syncTransactions starts sending all currently pending transactions to the given peer. 48 func (pm *ProtocolManager) syncTransactions(p *peer) { 49 // Assemble the set of transaction to broadcast or announce to the remote 50 // peer. Fun fact, this is quite an expensive operation as it needs to sort 51 // the transactions if the sorting is not cached yet. However, with a random 52 // order, insertions could overflow the non-executable queues and get dropped. 53 // 54 // TODO(raisty): Figure out if we could get away with random order somehow 55 var txs types.Transactions 56 pending, _ := pm.txpool.Pending() 57 for _, batch := range pending { 58 txs = append(txs, batch...) 59 } 60 if len(txs) == 0 { 61 return 62 } 63 // The xcb/65 protocol introduces proper transaction announcements, so instead 64 // of dripping transactions across multiple peers, just send the entire list as 65 // an announcement and let the remote side decide what they need (likely nothing). 66 if p.version >= xcb65 { 67 hashes := make([]common.Hash, len(txs)) 68 for i, tx := range txs { 69 hashes[i] = tx.Hash() 70 } 71 p.AsyncSendPooledTransactionHashes(hashes) 72 return 73 } 74 // Out of luck, peer is running legacy protocols, drop the txs over 75 select { 76 case pm.txsyncCh <- &txsync{p: p, txs: txs}: 77 case <-pm.quitSync: 78 } 79 } 80 81 // txsyncLoop64 takes care of the initial transaction sync for each new 82 // connection. When a new peer appears, we relay all currently pending 83 // transactions. In order to minimise egress bandwidth usage, we send 84 // the transactions in small packs to one peer at a time. 85 func (pm *ProtocolManager) txsyncLoop64() { 86 defer pm.wg.Done() 87 88 var ( 89 pending = make(map[enode.ID]*txsync) 90 sending = false // whether a send is active 91 pack = new(txsync) // the pack that is being sent 92 done = make(chan error, 1) // result of the send 93 ) 94 95 // send starts a sending a pack of transactions from the sync. 96 send := func(s *txsync) { 97 if s.p.version >= xcb65 { 98 panic("initial transaction syncer running on xcb/65+") 99 } 100 // Fill pack with transactions up to the target size. 101 size := common.StorageSize(0) 102 pack.p = s.p 103 pack.txs = pack.txs[:0] 104 for i := 0; i < len(s.txs) && size < txsyncPackSize; i++ { 105 pack.txs = append(pack.txs, s.txs[i]) 106 size += s.txs[i].Size() 107 } 108 // Remove the transactions that will be sent. 109 s.txs = s.txs[:copy(s.txs, s.txs[len(pack.txs):])] 110 if len(s.txs) == 0 { 111 delete(pending, s.p.ID()) 112 } 113 // Send the pack in the background. 114 s.p.Log().Trace("Sending batch of transactions", "count", len(pack.txs), "bytes", size) 115 sending = true 116 go func() { done <- pack.p.SendTransactions64(pack.txs) }() 117 } 118 119 // pick chooses the next pending sync. 120 pick := func() *txsync { 121 if len(pending) == 0 { 122 return nil 123 } 124 n := rand.Intn(len(pending)) + 1 125 for _, s := range pending { 126 if n--; n == 0 { 127 return s 128 } 129 } 130 return nil 131 } 132 133 for { 134 select { 135 case s := <-pm.txsyncCh: 136 pending[s.p.ID()] = s 137 if !sending { 138 send(s) 139 } 140 case err := <-done: 141 sending = false 142 // Stop tracking peers that cause send failures. 143 if err != nil { 144 pack.p.Log().Debug("Transaction send failed", "err", err) 145 delete(pending, pack.p.ID()) 146 } 147 // Schedule the next send. 148 if s := pick(); s != nil { 149 send(s) 150 } 151 case <-pm.quitSync: 152 return 153 } 154 } 155 } 156 157 // chainSyncer coordinates blockchain sync components. 158 type chainSyncer struct { 159 pm *ProtocolManager 160 force *time.Timer 161 forced bool // true when force timer fired 162 peerEventCh chan struct{} 163 doneCh chan error // non-nil when sync is running 164 } 165 166 // chainSyncOp is a scheduled sync operation. 167 type chainSyncOp struct { 168 mode downloader.SyncMode 169 peer *peer 170 td *big.Int 171 head common.Hash 172 } 173 174 // newChainSyncer creates a chainSyncer. 175 func newChainSyncer(pm *ProtocolManager) *chainSyncer { 176 return &chainSyncer{ 177 pm: pm, 178 peerEventCh: make(chan struct{}), 179 } 180 } 181 182 // handlePeerEvent notifies the syncer about a change in the peer set. 183 // This is called for new peers and every time a peer announces a new 184 // chain head. 185 func (cs *chainSyncer) handlePeerEvent(p *peer) bool { 186 select { 187 case cs.peerEventCh <- struct{}{}: 188 return true 189 case <-cs.pm.quitSync: 190 return false 191 } 192 } 193 194 // loop runs in its own goroutine and launches the sync when necessary. 195 func (cs *chainSyncer) loop() { 196 defer cs.pm.wg.Done() 197 198 cs.pm.blockFetcher.Start() 199 cs.pm.txFetcher.Start() 200 defer cs.pm.blockFetcher.Stop() 201 defer cs.pm.txFetcher.Stop() 202 203 // The force timer lowers the peer count threshold down to one when it fires. 204 // This ensures we'll always start sync even if there aren't enough peers. 205 cs.force = time.NewTimer(forceSyncCycle) 206 defer cs.force.Stop() 207 208 for { 209 if op := cs.nextSyncOp(); op != nil { 210 cs.startSync(op) 211 } 212 213 select { 214 case <-cs.peerEventCh: 215 // Peer information changed, recheck. 216 case <-cs.doneCh: 217 cs.doneCh = nil 218 cs.force.Reset(forceSyncCycle) 219 cs.forced = false 220 case <-cs.force.C: 221 cs.forced = true 222 223 case <-cs.pm.quitSync: 224 // Disable all insertion on the blockchain. This needs to happen before 225 // terminating the downloader because the downloader waits for blockchain 226 // inserts, and these can take a long time to finish. 227 cs.pm.blockchain.StopInsert() 228 cs.pm.downloader.Terminate() 229 if cs.doneCh != nil { 230 // Wait for the current sync to end. 231 <-cs.doneCh 232 } 233 return 234 } 235 } 236 } 237 238 // nextSyncOp determines whether sync is required at this time. 239 func (cs *chainSyncer) nextSyncOp() *chainSyncOp { 240 if cs.doneCh != nil { 241 return nil // Sync already running. 242 } 243 244 // Ensure we're at minimum peer count. 245 minPeers := defaultMinSyncPeers 246 if cs.forced { 247 minPeers = 1 248 } else if minPeers > cs.pm.maxPeers { 249 minPeers = cs.pm.maxPeers 250 } 251 if cs.pm.peers.Len() < minPeers { 252 return nil 253 } 254 255 // We have enough peers, check TD. 256 peer := cs.pm.peers.BestPeer() 257 if peer == nil { 258 return nil 259 } 260 mode, ourTD := cs.modeAndLocalHead() 261 op := peerToSyncOp(mode, peer) 262 if op.td.Cmp(ourTD) <= 0 { 263 return nil // We're in sync. 264 } 265 return op 266 } 267 268 func peerToSyncOp(mode downloader.SyncMode, p *peer) *chainSyncOp { 269 peerHead, peerTD := p.Head() 270 return &chainSyncOp{mode: mode, peer: p, td: peerTD, head: peerHead} 271 } 272 273 func (cs *chainSyncer) modeAndLocalHead() (downloader.SyncMode, *big.Int) { 274 // If we're in fast sync mode, return that directly 275 if atomic.LoadUint32(&cs.pm.fastSync) == 1 { 276 block := cs.pm.blockchain.CurrentFastBlock() 277 td := cs.pm.blockchain.GetTdByHash(block.Hash()) 278 return downloader.FastSync, td 279 } 280 // We are probably in full sync, but we might have rewound to before the 281 // fast sync pivot, check if we should reenable 282 if pivot := rawdb.ReadLastPivotNumber(cs.pm.chaindb); pivot != nil { 283 if head := cs.pm.blockchain.CurrentBlock(); head.NumberU64() < *pivot { 284 block := cs.pm.blockchain.CurrentFastBlock() 285 td := cs.pm.blockchain.GetTdByHash(block.Hash()) 286 return downloader.FastSync, td 287 } 288 } 289 // Nope, we're really full syncing 290 head := cs.pm.blockchain.CurrentHeader() 291 td := cs.pm.blockchain.GetTd(head.Hash(), head.Number.Uint64()) 292 return downloader.FullSync, td 293 } 294 295 // startSync launches doSync in a new goroutine. 296 func (cs *chainSyncer) startSync(op *chainSyncOp) { 297 cs.doneCh = make(chan error, 1) 298 go func() { cs.doneCh <- cs.pm.doSync(op) }() 299 } 300 301 // doSync synchronizes the local blockchain with a remote peer. 302 func (pm *ProtocolManager) doSync(op *chainSyncOp) error { 303 if op.mode == downloader.FastSync { 304 // Before launch the fast sync, we have to ensure user uses the same 305 // txlookup limit. 306 // The main concern here is: during the fast sync Gocore won't index the 307 // block(generate tx indices) before the HEAD-limit. But if user changes 308 // the limit in the next fast sync(e.g. user kill Gocore manually and 309 // restart) then it will be hard for Gocore to figure out the oldest block 310 // has been indexed. So here for the user-experience wise, it's non-optimal 311 // that user can't change limit during the fast sync. If changed, Gocore 312 // will just blindly use the original one. 313 limit := pm.blockchain.TxLookupLimit() 314 if stored := rawdb.ReadFastTxLookupLimit(pm.chaindb); stored == nil { 315 rawdb.WriteFastTxLookupLimit(pm.chaindb, limit) 316 } else if *stored != limit { 317 pm.blockchain.SetTxLookupLimit(*stored) 318 log.Warn("Update txLookup limit", "provided", limit, "updated", *stored) 319 } 320 } 321 // Run the sync cycle, and disable fast sync if we're past the pivot block 322 err := pm.downloader.Synchronise(op.peer.id, op.head, op.td, op.mode) 323 if err != nil { 324 return err 325 } 326 if atomic.LoadUint32(&pm.fastSync) == 1 { 327 log.Info("Fast sync complete, auto disabling") 328 atomic.StoreUint32(&pm.fastSync, 0) 329 } 330 331 // If we've successfully finished a sync cycle and passed any required checkpoint, 332 // enable accepting transactions from the network. 333 head := pm.blockchain.CurrentBlock() 334 if head.NumberU64() >= pm.checkpointNumber { 335 // Checkpoint passed, sanity check the timestamp to have a fallback mechanism 336 // for non-checkpointed (number = 0) private networks. 337 if head.Time() >= uint64(time.Now().AddDate(0, -1, 0).Unix()) { 338 atomic.StoreUint32(&pm.acceptTxs, 1) 339 } 340 } 341 342 if head.NumberU64() > 0 { 343 // We've completed a sync cycle, notify all peers of new state. This path is 344 // essential in star-topology networks where a gateway node needs to notify 345 // all its out-of-date peers of the availability of a new block. This failure 346 // scenario will most often crop up in private and hackathon networks with 347 // degenerate connectivity, but it should be healthy for the mainnet too to 348 // more reliably update peers or the local TD state. 349 pm.BroadcastBlock(head, false) 350 } 351 352 return nil 353 }