gitee.com/liu-zhao234568/cntest@v1.0.0/eth/sync.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package eth 18 19 import ( 20 "math/big" 21 "math/rand" 22 "sync/atomic" 23 "time" 24 25 "gitee.com/liu-zhao234568/cntest/common" 26 "gitee.com/liu-zhao234568/cntest/core/rawdb" 27 "gitee.com/liu-zhao234568/cntest/core/types" 28 "gitee.com/liu-zhao234568/cntest/eth/downloader" 29 "gitee.com/liu-zhao234568/cntest/eth/protocols/eth" 30 "gitee.com/liu-zhao234568/cntest/log" 31 "gitee.com/liu-zhao234568/cntest/p2p/enode" 32 ) 33 34 const ( 35 forceSyncCycle = 10 * time.Second // Time interval to force syncs, even if few peers are available 36 defaultMinSyncPeers = 5 // Amount of peers desired to start syncing 37 38 // This is the target size for the packs of transactions sent by txsyncLoop64. 39 // A pack can get larger than this if a single transactions exceeds this size. 40 txsyncPackSize = 100 * 1024 41 ) 42 43 type txsync struct { 44 p *eth.Peer 45 txs []*types.Transaction 46 } 47 48 // syncTransactions starts sending all currently pending transactions to the given peer. 49 func (h *handler) syncTransactions(p *eth.Peer) { 50 // Assemble the set of transaction to broadcast or announce to the remote 51 // peer. Fun fact, this is quite an expensive operation as it needs to sort 52 // the transactions if the sorting is not cached yet. However, with a random 53 // order, insertions could overflow the non-executable queues and get dropped. 54 // 55 // TODO(karalabe): Figure out if we could get away with random order somehow 56 var txs types.Transactions 57 pending, _ := h.txpool.Pending(false) 58 for _, batch := range pending { 59 txs = append(txs, batch...) 60 } 61 if len(txs) == 0 { 62 return 63 } 64 // The eth/65 protocol introduces proper transaction announcements, so instead 65 // of dripping transactions across multiple peers, just send the entire list as 66 // an announcement and let the remote side decide what they need (likely nothing). 67 if p.Version() >= eth.ETH65 { 68 hashes := make([]common.Hash, len(txs)) 69 for i, tx := range txs { 70 hashes[i] = tx.Hash() 71 } 72 p.AsyncSendPooledTransactionHashes(hashes) 73 return 74 } 75 // Out of luck, peer is running legacy protocols, drop the txs over 76 select { 77 case h.txsyncCh <- &txsync{p: p, txs: txs}: 78 case <-h.quitSync: 79 } 80 } 81 82 // txsyncLoop64 takes care of the initial transaction sync for each new 83 // connection. When a new peer appears, we relay all currently pending 84 // transactions. In order to minimise egress bandwidth usage, we send 85 // the transactions in small packs to one peer at a time. 86 func (h *handler) txsyncLoop64() { 87 defer h.wg.Done() 88 89 var ( 90 pending = make(map[enode.ID]*txsync) 91 sending = false // whether a send is active 92 pack = new(txsync) // the pack that is being sent 93 done = make(chan error, 1) // result of the send 94 ) 95 96 // send starts a sending a pack of transactions from the sync. 97 send := func(s *txsync) { 98 if s.p.Version() >= eth.ETH65 { 99 panic("initial transaction syncer running on eth/65+") 100 } 101 // Fill pack with transactions up to the target size. 102 size := common.StorageSize(0) 103 pack.p = s.p 104 pack.txs = pack.txs[:0] 105 for i := 0; i < len(s.txs) && size < txsyncPackSize; i++ { 106 pack.txs = append(pack.txs, s.txs[i]) 107 size += s.txs[i].Size() 108 } 109 // Remove the transactions that will be sent. 110 s.txs = s.txs[:copy(s.txs, s.txs[len(pack.txs):])] 111 if len(s.txs) == 0 { 112 delete(pending, s.p.Peer.ID()) 113 } 114 // Send the pack in the background. 115 s.p.Log().Trace("Sending batch of transactions", "count", len(pack.txs), "bytes", size) 116 sending = true 117 go func() { done <- pack.p.SendTransactions(pack.txs) }() 118 } 119 // pick chooses the next pending sync. 120 pick := func() *txsync { 121 if len(pending) == 0 { 122 return nil 123 } 124 n := rand.Intn(len(pending)) + 1 125 for _, s := range pending { 126 if n--; n == 0 { 127 return s 128 } 129 } 130 return nil 131 } 132 133 for { 134 select { 135 case s := <-h.txsyncCh: 136 pending[s.p.Peer.ID()] = s 137 if !sending { 138 send(s) 139 } 140 case err := <-done: 141 sending = false 142 // Stop tracking peers that cause send failures. 143 if err != nil { 144 pack.p.Log().Debug("Transaction send failed", "err", err) 145 delete(pending, pack.p.Peer.ID()) 146 } 147 // Schedule the next send. 148 if s := pick(); s != nil { 149 send(s) 150 } 151 case <-h.quitSync: 152 return 153 } 154 } 155 } 156 157 // chainSyncer coordinates blockchain sync components. 158 type chainSyncer struct { 159 handler *handler 160 force *time.Timer 161 forced bool // true when force timer fired 162 peerEventCh chan struct{} 163 doneCh chan error // non-nil when sync is running 164 } 165 166 // chainSyncOp is a scheduled sync operation. 167 type chainSyncOp struct { 168 mode downloader.SyncMode 169 peer *eth.Peer 170 td *big.Int 171 head common.Hash 172 } 173 174 // newChainSyncer creates a chainSyncer. 175 func newChainSyncer(handler *handler) *chainSyncer { 176 return &chainSyncer{ 177 handler: handler, 178 peerEventCh: make(chan struct{}), 179 } 180 } 181 182 // handlePeerEvent notifies the syncer about a change in the peer set. 183 // This is called for new peers and every time a peer announces a new 184 // chain head. 185 func (cs *chainSyncer) handlePeerEvent(peer *eth.Peer) bool { 186 select { 187 case cs.peerEventCh <- struct{}{}: 188 return true 189 case <-cs.handler.quitSync: 190 return false 191 } 192 } 193 194 // loop runs in its own goroutine and launches the sync when necessary. 195 func (cs *chainSyncer) loop() { 196 defer cs.handler.wg.Done() 197 198 cs.handler.blockFetcher.Start() 199 cs.handler.txFetcher.Start() 200 defer cs.handler.blockFetcher.Stop() 201 defer cs.handler.txFetcher.Stop() 202 defer cs.handler.downloader.Terminate() 203 204 // The force timer lowers the peer count threshold down to one when it fires. 205 // This ensures we'll always start sync even if there aren't enough peers. 206 cs.force = time.NewTimer(forceSyncCycle) 207 defer cs.force.Stop() 208 209 for { 210 if op := cs.nextSyncOp(); op != nil { 211 cs.startSync(op) 212 } 213 select { 214 case <-cs.peerEventCh: 215 // Peer information changed, recheck. 216 case <-cs.doneCh: 217 cs.doneCh = nil 218 cs.force.Reset(forceSyncCycle) 219 cs.forced = false 220 case <-cs.force.C: 221 cs.forced = true 222 223 case <-cs.handler.quitSync: 224 // Disable all insertion on the blockchain. This needs to happen before 225 // terminating the downloader because the downloader waits for blockchain 226 // inserts, and these can take a long time to finish. 227 cs.handler.chain.StopInsert() 228 cs.handler.downloader.Terminate() 229 if cs.doneCh != nil { 230 <-cs.doneCh 231 } 232 return 233 } 234 } 235 } 236 237 // nextSyncOp determines whether sync is required at this time. 238 func (cs *chainSyncer) nextSyncOp() *chainSyncOp { 239 if cs.doneCh != nil { 240 return nil // Sync already running. 241 } 242 243 // Ensure we're at minimum peer count. 244 minPeers := defaultMinSyncPeers 245 if cs.forced { 246 minPeers = 1 247 } else if minPeers > cs.handler.maxPeers { 248 minPeers = cs.handler.maxPeers 249 } 250 if cs.handler.peers.len() < minPeers { 251 return nil 252 } 253 // We have enough peers, check TD 254 peer := cs.handler.peers.peerWithHighestTD() 255 if peer == nil { 256 return nil 257 } 258 mode, ourTD := cs.modeAndLocalHead() 259 if mode == downloader.FastSync && atomic.LoadUint32(&cs.handler.snapSync) == 1 { 260 // Fast sync via the snap protocol 261 mode = downloader.SnapSync 262 } 263 op := peerToSyncOp(mode, peer) 264 if op.td.Cmp(ourTD) <= 0 { 265 return nil // We're in sync. 266 } 267 return op 268 } 269 270 func peerToSyncOp(mode downloader.SyncMode, p *eth.Peer) *chainSyncOp { 271 peerHead, peerTD := p.Head() 272 return &chainSyncOp{mode: mode, peer: p, td: peerTD, head: peerHead} 273 } 274 275 func (cs *chainSyncer) modeAndLocalHead() (downloader.SyncMode, *big.Int) { 276 // If we're in fast sync mode, return that directly 277 if atomic.LoadUint32(&cs.handler.fastSync) == 1 { 278 block := cs.handler.chain.CurrentFastBlock() 279 td := cs.handler.chain.GetTdByHash(block.Hash()) 280 return downloader.FastSync, td 281 } 282 // We are probably in full sync, but we might have rewound to before the 283 // fast sync pivot, check if we should reenable 284 if pivot := rawdb.ReadLastPivotNumber(cs.handler.database); pivot != nil { 285 if head := cs.handler.chain.CurrentBlock(); head.NumberU64() < *pivot { 286 block := cs.handler.chain.CurrentFastBlock() 287 td := cs.handler.chain.GetTdByHash(block.Hash()) 288 return downloader.FastSync, td 289 } 290 } 291 // Nope, we're really full syncing 292 head := cs.handler.chain.CurrentBlock() 293 td := cs.handler.chain.GetTd(head.Hash(), head.NumberU64()) 294 return downloader.FullSync, td 295 } 296 297 // startSync launches doSync in a new goroutine. 298 func (cs *chainSyncer) startSync(op *chainSyncOp) { 299 cs.doneCh = make(chan error, 1) 300 go func() { cs.doneCh <- cs.handler.doSync(op) }() 301 } 302 303 // doSync synchronizes the local blockchain with a remote peer. 304 func (h *handler) doSync(op *chainSyncOp) error { 305 if op.mode == downloader.FastSync || op.mode == downloader.SnapSync { 306 // Before launch the fast sync, we have to ensure user uses the same 307 // txlookup limit. 308 // The main concern here is: during the fast sync Geth won't index the 309 // block(generate tx indices) before the HEAD-limit. But if user changes 310 // the limit in the next fast sync(e.g. user kill Geth manually and 311 // restart) then it will be hard for Geth to figure out the oldest block 312 // has been indexed. So here for the user-experience wise, it's non-optimal 313 // that user can't change limit during the fast sync. If changed, Geth 314 // will just blindly use the original one. 315 limit := h.chain.TxLookupLimit() 316 if stored := rawdb.ReadFastTxLookupLimit(h.database); stored == nil { 317 rawdb.WriteFastTxLookupLimit(h.database, limit) 318 } else if *stored != limit { 319 h.chain.SetTxLookupLimit(*stored) 320 log.Warn("Update txLookup limit", "provided", limit, "updated", *stored) 321 } 322 } 323 // Run the sync cycle, and disable fast sync if we're past the pivot block 324 err := h.downloader.Synchronise(op.peer.ID(), op.head, op.td, op.mode) 325 if err != nil { 326 return err 327 } 328 if atomic.LoadUint32(&h.fastSync) == 1 { 329 log.Info("Fast sync complete, auto disabling") 330 atomic.StoreUint32(&h.fastSync, 0) 331 } 332 if atomic.LoadUint32(&h.snapSync) == 1 { 333 log.Info("Snap sync complete, auto disabling") 334 atomic.StoreUint32(&h.snapSync, 0) 335 } 336 // If we've successfully finished a sync cycle and passed any required checkpoint, 337 // enable accepting transactions from the network. 338 head := h.chain.CurrentBlock() 339 if head.NumberU64() >= h.checkpointNumber { 340 // Checkpoint passed, sanity check the timestamp to have a fallback mechanism 341 // for non-checkpointed (number = 0) private networks. 342 if head.Time() >= uint64(time.Now().AddDate(0, -1, 0).Unix()) { 343 atomic.StoreUint32(&h.acceptTxs, 1) 344 } 345 } 346 if head.NumberU64() > 0 { 347 // We've completed a sync cycle, notify all peers of new state. This path is 348 // essential in star-topology networks where a gateway node needs to notify 349 // all its out-of-date peers of the availability of a new block. This failure 350 // scenario will most often crop up in private and hackathon networks with 351 // degenerate connectivity, but it should be healthy for the mainnet too to 352 // more reliably update peers or the local TD state. 353 h.BroadcastBlock(head, false) 354 } 355 return nil 356 }