github.com/phillinzzz/newBsc@v1.1.6/eth/sync.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package eth 18 19 import ( 20 "math/big" 21 "math/rand" 22 "sync/atomic" 23 "time" 24 25 "github.com/phillinzzz/newBsc/common" 26 "github.com/phillinzzz/newBsc/common/gopool" 27 "github.com/phillinzzz/newBsc/core/rawdb" 28 "github.com/phillinzzz/newBsc/core/types" 29 "github.com/phillinzzz/newBsc/eth/downloader" 30 "github.com/phillinzzz/newBsc/eth/protocols/eth" 31 "github.com/phillinzzz/newBsc/log" 32 "github.com/phillinzzz/newBsc/p2p/enode" 33 ) 34 35 const ( 36 forceSyncCycle = 10 * time.Second // Time interval to force syncs, even if few peers are available 37 defaultMinSyncPeers = 5 // Amount of peers desired to start syncing 38 39 // This is the target size for the packs of transactions sent by txsyncLoop64. 40 // A pack can get larger than this if a single transactions exceeds this size. 41 txsyncPackSize = 100 * 1024 42 ) 43 44 type txsync struct { 45 p *eth.Peer 46 txs []*types.Transaction 47 } 48 49 // syncTransactions starts sending all currently pending transactions to the given peer. 50 func (h *handler) syncTransactions(p *eth.Peer) { 51 // Assemble the set of transaction to broadcast or announce to the remote 52 // peer. Fun fact, this is quite an expensive operation as it needs to sort 53 // the transactions if the sorting is not cached yet. However, with a random 54 // order, insertions could overflow the non-executable queues and get dropped. 55 // 56 // TODO(karalabe): Figure out if we could get away with random order somehow 57 var txs types.Transactions 58 pending, _ := h.txpool.Pending() 59 for _, batch := range pending { 60 txs = append(txs, batch...) 61 } 62 if len(txs) == 0 { 63 return 64 } 65 // The eth/65 protocol introduces proper transaction announcements, so instead 66 // of dripping transactions across multiple peers, just send the entire list as 67 // an announcement and let the remote side decide what they need (likely nothing). 68 if p.Version() >= eth.ETH65 { 69 hashes := make([]common.Hash, len(txs)) 70 for i, tx := range txs { 71 hashes[i] = tx.Hash() 72 } 73 p.AsyncSendPooledTransactionHashes(hashes) 74 return 75 } 76 // Out of luck, peer is running legacy protocols, drop the txs over 77 select { 78 case h.txsyncCh <- &txsync{p: p, txs: txs}: 79 case <-h.quitSync: 80 } 81 } 82 83 // txsyncLoop64 takes care of the initial transaction sync for each new 84 // connection. When a new peer appears, we relay all currently pending 85 // transactions. In order to minimise egress bandwidth usage, we send 86 // the transactions in small packs to one peer at a time. 87 func (h *handler) txsyncLoop64() { 88 defer h.wg.Done() 89 90 var ( 91 pending = make(map[enode.ID]*txsync) 92 sending = false // whether a send is active 93 pack = new(txsync) // the pack that is being sent 94 done = make(chan error, 1) // result of the send 95 ) 96 97 // send starts a sending a pack of transactions from the sync. 98 send := func(s *txsync) { 99 if s.p.Version() >= eth.ETH65 { 100 panic("initial transaction syncer running on eth/65+") 101 } 102 // Fill pack with transactions up to the target size. 103 size := common.StorageSize(0) 104 pack.p = s.p 105 pack.txs = pack.txs[:0] 106 for i := 0; i < len(s.txs) && size < txsyncPackSize; i++ { 107 pack.txs = append(pack.txs, s.txs[i]) 108 size += s.txs[i].Size() 109 } 110 // Remove the transactions that will be sent. 111 s.txs = s.txs[:copy(s.txs, s.txs[len(pack.txs):])] 112 if len(s.txs) == 0 { 113 delete(pending, s.p.Peer.ID()) 114 } 115 // Send the pack in the background. 116 s.p.Log().Trace("Sending batch of transactions", "count", len(pack.txs), "bytes", size) 117 sending = true 118 gopool.Submit(func() { done <- pack.p.SendTransactions(pack.txs) }) 119 } 120 // pick chooses the next pending sync. 121 pick := func() *txsync { 122 if len(pending) == 0 { 123 return nil 124 } 125 n := rand.Intn(len(pending)) + 1 126 for _, s := range pending { 127 if n--; n == 0 { 128 return s 129 } 130 } 131 return nil 132 } 133 134 for { 135 select { 136 case s := <-h.txsyncCh: 137 pending[s.p.Peer.ID()] = s 138 if !sending { 139 send(s) 140 } 141 case err := <-done: 142 sending = false 143 // Stop tracking peers that cause send failures. 144 if err != nil { 145 pack.p.Log().Debug("Transaction send failed", "err", err) 146 delete(pending, pack.p.Peer.ID()) 147 } 148 // Schedule the next send. 149 if s := pick(); s != nil { 150 send(s) 151 } 152 case <-h.quitSync: 153 return 154 } 155 } 156 } 157 158 // chainSyncer coordinates blockchain sync components. 159 type chainSyncer struct { 160 handler *handler 161 force *time.Timer 162 forced bool // true when force timer fired 163 peerEventCh chan struct{} 164 doneCh chan error // non-nil when sync is running 165 } 166 167 // chainSyncOp is a scheduled sync operation. 168 type chainSyncOp struct { 169 mode downloader.SyncMode 170 peer *eth.Peer 171 td *big.Int 172 head common.Hash 173 } 174 175 // newChainSyncer creates a chainSyncer. 176 func newChainSyncer(handler *handler) *chainSyncer { 177 return &chainSyncer{ 178 handler: handler, 179 peerEventCh: make(chan struct{}), 180 } 181 } 182 183 // handlePeerEvent notifies the syncer about a change in the peer set. 184 // This is called for new peers and every time a peer announces a new 185 // chain head. 186 func (cs *chainSyncer) handlePeerEvent(peer *eth.Peer) bool { 187 select { 188 case cs.peerEventCh <- struct{}{}: 189 return true 190 case <-cs.handler.quitSync: 191 return false 192 } 193 } 194 195 // loop runs in its own goroutine and launches the sync when necessary. 196 func (cs *chainSyncer) loop() { 197 defer cs.handler.wg.Done() 198 199 cs.handler.blockFetcher.Start() 200 cs.handler.txFetcher.Start() 201 defer cs.handler.blockFetcher.Stop() 202 defer cs.handler.txFetcher.Stop() 203 defer cs.handler.downloader.Terminate() 204 205 // The force timer lowers the peer count threshold down to one when it fires. 206 // This ensures we'll always start sync even if there aren't enough peers. 207 cs.force = time.NewTimer(forceSyncCycle) 208 defer cs.force.Stop() 209 210 for { 211 if op := cs.nextSyncOp(); op != nil { 212 cs.startSync(op) 213 } 214 select { 215 case <-cs.peerEventCh: 216 // Peer information changed, recheck. 217 case <-cs.doneCh: 218 cs.doneCh = nil 219 cs.force.Reset(forceSyncCycle) 220 cs.forced = false 221 case <-cs.force.C: 222 cs.forced = true 223 224 case <-cs.handler.quitSync: 225 // Disable all insertion on the blockchain. This needs to happen before 226 // terminating the downloader because the downloader waits for blockchain 227 // inserts, and these can take a long time to finish. 228 cs.handler.chain.StopInsert() 229 cs.handler.downloader.Terminate() 230 if cs.doneCh != nil { 231 <-cs.doneCh 232 } 233 return 234 } 235 } 236 } 237 238 // nextSyncOp determines whether sync is required at this time. 239 func (cs *chainSyncer) nextSyncOp() *chainSyncOp { 240 if cs.doneCh != nil { 241 return nil // Sync already running. 242 } 243 244 // Ensure we're at minimum peer count. 245 minPeers := defaultMinSyncPeers 246 if cs.forced { 247 minPeers = 1 248 } else if minPeers > cs.handler.maxPeers { 249 minPeers = cs.handler.maxPeers 250 } 251 if cs.handler.peers.len() < minPeers { 252 return nil 253 } 254 // We have enough peers, check TD 255 peer := cs.handler.peers.peerWithHighestTD() 256 if peer == nil { 257 return nil 258 } 259 mode, ourTD := cs.modeAndLocalHead() 260 if mode == downloader.FastSync && atomic.LoadUint32(&cs.handler.snapSync) == 1 { 261 // Fast sync via the snap protocol 262 mode = downloader.SnapSync 263 } 264 op := peerToSyncOp(mode, peer) 265 if op.td.Cmp(ourTD) <= 0 { 266 return nil // We're in sync. 267 } 268 return op 269 } 270 271 func peerToSyncOp(mode downloader.SyncMode, p *eth.Peer) *chainSyncOp { 272 peerHead, peerTD := p.Head() 273 return &chainSyncOp{mode: mode, peer: p, td: peerTD, head: peerHead} 274 } 275 276 func (cs *chainSyncer) modeAndLocalHead() (downloader.SyncMode, *big.Int) { 277 // If we're in fast sync mode, return that directly 278 if atomic.LoadUint32(&cs.handler.fastSync) == 1 { 279 block := cs.handler.chain.CurrentFastBlock() 280 td := cs.handler.chain.GetTdByHash(block.Hash()) 281 return downloader.FastSync, td 282 } 283 // We are probably in full sync, but we might have rewound to before the 284 // fast sync pivot, check if we should reenable 285 if pivot := rawdb.ReadLastPivotNumber(cs.handler.database); pivot != nil { 286 if head := cs.handler.chain.CurrentBlock(); head.NumberU64() < *pivot { 287 block := cs.handler.chain.CurrentFastBlock() 288 td := cs.handler.chain.GetTdByHash(block.Hash()) 289 return downloader.FastSync, td 290 } 291 } 292 // Nope, we're really full syncing 293 head := cs.handler.chain.CurrentBlock() 294 td := cs.handler.chain.GetTd(head.Hash(), head.NumberU64()) 295 return downloader.FullSync, td 296 } 297 298 // startSync launches doSync in a new goroutine. 299 func (cs *chainSyncer) startSync(op *chainSyncOp) { 300 cs.doneCh = make(chan error, 1) 301 go func() { cs.doneCh <- cs.handler.doSync(op) }() 302 } 303 304 // doSync synchronizes the local blockchain with a remote peer. 305 func (h *handler) doSync(op *chainSyncOp) error { 306 if op.mode == downloader.FastSync || op.mode == downloader.SnapSync { 307 // Before launch the fast sync, we have to ensure user uses the same 308 // txlookup limit. 309 // The main concern here is: during the fast sync Geth won't index the 310 // block(generate tx indices) before the HEAD-limit. But if user changes 311 // the limit in the next fast sync(e.g. user kill Geth manually and 312 // restart) then it will be hard for Geth to figure out the oldest block 313 // has been indexed. So here for the user-experience wise, it's non-optimal 314 // that user can't change limit during the fast sync. If changed, Geth 315 // will just blindly use the original one. 316 limit := h.chain.TxLookupLimit() 317 if stored := rawdb.ReadFastTxLookupLimit(h.database); stored == nil { 318 rawdb.WriteFastTxLookupLimit(h.database, limit) 319 } else if *stored != limit { 320 h.chain.SetTxLookupLimit(*stored) 321 log.Warn("Update txLookup limit", "provided", limit, "updated", *stored) 322 } 323 } 324 // Run the sync cycle, and disable fast sync if we're past the pivot block 325 err := h.downloader.Synchronise(op.peer.ID(), op.head, op.td, op.mode) 326 if err != nil { 327 return err 328 } 329 if atomic.LoadUint32(&h.fastSync) == 1 { 330 log.Info("Fast sync complete, auto disabling") 331 atomic.StoreUint32(&h.fastSync, 0) 332 } 333 if atomic.LoadUint32(&h.snapSync) == 1 { 334 log.Info("Snap sync complete, auto disabling") 335 atomic.StoreUint32(&h.snapSync, 0) 336 } 337 // If we've successfully finished a sync cycle and passed any required checkpoint, 338 // enable accepting transactions from the network. 339 head := h.chain.CurrentBlock() 340 if head.NumberU64() >= h.checkpointNumber { 341 // Checkpoint passed, sanity check the timestamp to have a fallback mechanism 342 // for non-checkpointed (number = 0) private networks. 343 if head.Time() >= uint64(time.Now().AddDate(0, -1, 0).Unix()) { 344 atomic.StoreUint32(&h.acceptTxs, 1) 345 } 346 } 347 if head.NumberU64() > 0 { 348 // We've completed a sync cycle, notify all peers of new state. This path is 349 // essential in star-topology networks where a gateway node needs to notify 350 // all its out-of-date peers of the availability of a new block. This failure 351 // scenario will most often crop up in private and hackathon networks with 352 // degenerate connectivity, but it should be healthy for the mainnet too to 353 // more reliably update peers or the local TD state. 354 h.BroadcastBlock(head, false) 355 } 356 return nil 357 }