github.com/palisadeinc/bor@v0.0.0-20230615125219-ab7196213d15/eth/downloader/beaconsync.go (about) 1 // Copyright 2021 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package downloader 18 19 import ( 20 "fmt" 21 "sync" 22 "sync/atomic" 23 "time" 24 25 "github.com/ethereum/go-ethereum/common" 26 "github.com/ethereum/go-ethereum/core/types" 27 "github.com/ethereum/go-ethereum/log" 28 ) 29 30 // beaconBackfiller is the chain and state backfilling that can be commenced once 31 // the skeleton syncer has successfully reverse downloaded all the headers up to 32 // the genesis block or an existing header in the database. Its operation is fully 33 // directed by the skeleton sync's head/tail events. 34 type beaconBackfiller struct { 35 downloader *Downloader // Downloader to direct via this callback implementation 36 syncMode SyncMode // Sync mode to use for backfilling the skeleton chains 37 success func() // Callback to run on successful sync cycle completion 38 filling bool // Flag whether the downloader is backfilling or not 39 started chan struct{} // Notification channel whether the downloader inited 40 lock sync.Mutex // Mutex protecting the sync lock 41 } 42 43 // newBeaconBackfiller is a helper method to create the backfiller. 44 func newBeaconBackfiller(dl *Downloader, success func()) backfiller { 45 return &beaconBackfiller{ 46 downloader: dl, 47 success: success, 48 } 49 } 50 51 // suspend cancels any background downloader threads. 52 func (b *beaconBackfiller) suspend() { 53 // If no filling is running, don't waste cycles 54 b.lock.Lock() 55 filling := b.filling 56 started := b.started 57 b.lock.Unlock() 58 59 if !filling { 60 return 61 } 62 // A previous filling should be running, though it may happen that it hasn't 63 // yet started (being done on a new goroutine). Many concurrent beacon head 64 // announcements can lead to sync start/stop thrashing. In that case we need 65 // to wait for initialization before we can safely cancel it. It is safe to 66 // read this channel multiple times, it gets closed on startup. 67 <-started 68 69 // Now that we're sure the downloader successfully started up, we can cancel 70 // it safely without running the risk of data races. 71 b.downloader.Cancel() 72 } 73 74 // resume starts the downloader threads for backfilling state and chain data. 75 func (b *beaconBackfiller) resume() { 76 b.lock.Lock() 77 if b.filling { 78 // If a previous filling cycle is still running, just ignore this start 79 // request. // TODO(karalabe): We should make this channel driven 80 b.lock.Unlock() 81 return 82 } 83 b.filling = true 84 b.started = make(chan struct{}) 85 mode := b.syncMode 86 b.lock.Unlock() 87 88 // Start the backfilling on its own thread since the downloader does not have 89 // its own lifecycle runloop. 90 go func() { 91 // Set the backfiller to non-filling when download completes 92 defer func() { 93 b.lock.Lock() 94 b.filling = false 95 b.lock.Unlock() 96 }() 97 // If the downloader fails, report an error as in beacon chain mode there 98 // should be no errors as long as the chain we're syncing to is valid. 99 if err := b.downloader.synchronise("", common.Hash{}, nil, nil, mode, true, b.started); err != nil { 100 log.Error("Beacon backfilling failed", "err", err) 101 return 102 } 103 // Synchronization succeeded. Since this happens async, notify the outer 104 // context to disable snap syncing and enable transaction propagation. 105 if b.success != nil { 106 b.success() 107 } 108 }() 109 } 110 111 // setMode updates the sync mode from the current one to the requested one. If 112 // there's an active sync in progress, it will be cancelled and restarted. 113 func (b *beaconBackfiller) setMode(mode SyncMode) { 114 // Update the old sync mode and track if it was changed 115 b.lock.Lock() 116 updated := b.syncMode != mode 117 filling := b.filling 118 b.syncMode = mode 119 b.lock.Unlock() 120 121 // If the sync mode was changed mid-sync, restart. This should never ever 122 // really happen, we just handle it to detect programming errors. 123 if !updated || !filling { 124 return 125 } 126 log.Error("Downloader sync mode changed mid-run", "old", mode.String(), "new", mode.String()) 127 b.suspend() 128 b.resume() 129 } 130 131 // BeaconSync is the post-merge version of the chain synchronization, where the 132 // chain is not downloaded from genesis onward, rather from trusted head announces 133 // backwards. 134 // 135 // Internally backfilling and state sync is done the same way, but the header 136 // retrieval and scheduling is replaced. 137 func (d *Downloader) BeaconSync(mode SyncMode, head *types.Header) error { 138 return d.beaconSync(mode, head, true) 139 } 140 141 // BeaconExtend is an optimistic version of BeaconSync, where an attempt is made 142 // to extend the current beacon chain with a new header, but in case of a mismatch, 143 // the old sync will not be terminated and reorged, rather the new head is dropped. 144 // 145 // This is useful if a beacon client is feeding us large chunks of payloads to run, 146 // but is not setting the head after each. 147 func (d *Downloader) BeaconExtend(mode SyncMode, head *types.Header) error { 148 return d.beaconSync(mode, head, false) 149 } 150 151 // beaconSync is the post-merge version of the chain synchronization, where the 152 // chain is not downloaded from genesis onward, rather from trusted head announces 153 // backwards. 154 // 155 // Internally backfilling and state sync is done the same way, but the header 156 // retrieval and scheduling is replaced. 157 func (d *Downloader) beaconSync(mode SyncMode, head *types.Header, force bool) error { 158 // When the downloader starts a sync cycle, it needs to be aware of the sync 159 // mode to use (full, snap). To keep the skeleton chain oblivious, inject the 160 // mode into the backfiller directly. 161 // 162 // Super crazy dangerous type cast. Should be fine (TM), we're only using a 163 // different backfiller implementation for skeleton tests. 164 d.skeleton.filler.(*beaconBackfiller).setMode(mode) 165 166 // Signal the skeleton sync to switch to a new head, however it wants 167 if err := d.skeleton.Sync(head, force); err != nil { 168 return err 169 } 170 return nil 171 } 172 173 // findBeaconAncestor tries to locate the common ancestor link of the local chain 174 // and the beacon chain just requested. In the general case when our node was in 175 // sync and on the correct chain, checking the top N links should already get us 176 // a match. In the rare scenario when we ended up on a long reorganisation (i.e. 177 // none of the head links match), we do a binary search to find the ancestor. 178 func (d *Downloader) findBeaconAncestor() (uint64, error) { 179 // Figure out the current local head position 180 var chainHead *types.Header 181 182 switch d.getMode() { 183 case FullSync: 184 chainHead = d.blockchain.CurrentBlock().Header() 185 case SnapSync: 186 chainHead = d.blockchain.CurrentFastBlock().Header() 187 default: 188 chainHead = d.lightchain.CurrentHeader() 189 } 190 number := chainHead.Number.Uint64() 191 192 // Retrieve the skeleton bounds and ensure they are linked to the local chain 193 beaconHead, beaconTail, err := d.skeleton.Bounds() 194 if err != nil { 195 // This is a programming error. The chain backfiller was called with an 196 // invalid beacon sync state. Ideally we would panic here, but erroring 197 // gives us at least a remote chance to recover. It's still a big fault! 198 log.Error("Failed to retrieve beacon bounds", "err", err) 199 return 0, err 200 } 201 var linked bool 202 switch d.getMode() { 203 case FullSync: 204 linked = d.blockchain.HasBlock(beaconTail.ParentHash, beaconTail.Number.Uint64()-1) 205 case SnapSync: 206 linked = d.blockchain.HasFastBlock(beaconTail.ParentHash, beaconTail.Number.Uint64()-1) 207 default: 208 linked = d.blockchain.HasHeader(beaconTail.ParentHash, beaconTail.Number.Uint64()-1) 209 } 210 if !linked { 211 // This is a programming error. The chain backfiller was called with a 212 // tail that's not linked to the local chain. Whilst this should never 213 // happen, there might be some weirdnesses if beacon sync backfilling 214 // races with the user (or beacon client) calling setHead. Whilst panic 215 // would be the ideal thing to do, it is safer long term to attempt a 216 // recovery and fix any noticed issue after the fact. 217 log.Error("Beacon sync linkup unavailable", "number", beaconTail.Number.Uint64()-1, "hash", beaconTail.ParentHash) 218 return 0, fmt.Errorf("beacon linkup unavailable locally: %d [%x]", beaconTail.Number.Uint64()-1, beaconTail.ParentHash) 219 } 220 // Binary search to find the ancestor 221 start, end := beaconTail.Number.Uint64()-1, number 222 if number := beaconHead.Number.Uint64(); end > number { 223 // This shouldn't really happen in a healty network, but if the consensus 224 // clients feeds us a shorter chain as the canonical, we should not attempt 225 // to access non-existent skeleton items. 226 log.Warn("Beacon head lower than local chain", "beacon", number, "local", end) 227 end = number 228 } 229 for start+1 < end { 230 // Split our chain interval in two, and request the hash to cross check 231 check := (start + end) / 2 232 233 h := d.skeleton.Header(check) 234 n := h.Number.Uint64() 235 236 var known bool 237 switch d.getMode() { 238 case FullSync: 239 known = d.blockchain.HasBlock(h.Hash(), n) 240 case SnapSync: 241 known = d.blockchain.HasFastBlock(h.Hash(), n) 242 default: 243 known = d.lightchain.HasHeader(h.Hash(), n) 244 } 245 if !known { 246 end = check 247 continue 248 } 249 start = check 250 } 251 return start, nil 252 } 253 254 // fetchBeaconHeaders feeds skeleton headers to the downloader queue for scheduling 255 // until sync errors or is finished. 256 func (d *Downloader) fetchBeaconHeaders(from uint64) error { 257 head, _, err := d.skeleton.Bounds() 258 if err != nil { 259 return err 260 } 261 for { 262 // Retrieve a batch of headers and feed it to the header processor 263 var ( 264 headers = make([]*types.Header, 0, maxHeadersProcess) 265 hashes = make([]common.Hash, 0, maxHeadersProcess) 266 ) 267 for i := 0; i < maxHeadersProcess && from <= head.Number.Uint64(); i++ { 268 headers = append(headers, d.skeleton.Header(from)) 269 hashes = append(hashes, headers[i].Hash()) 270 from++ 271 } 272 if len(headers) > 0 { 273 log.Trace("Scheduling new beacon headers", "count", len(headers), "from", from-uint64(len(headers))) 274 select { 275 case d.headerProcCh <- &headerTask{ 276 headers: headers, 277 hashes: hashes, 278 }: 279 case <-d.cancelCh: 280 return errCanceled 281 } 282 } 283 // If we still have headers to import, loop and keep pushing them 284 if from <= head.Number.Uint64() { 285 continue 286 } 287 // If the pivot block is committed, signal header sync termination 288 if atomic.LoadInt32(&d.committed) == 1 { 289 select { 290 case d.headerProcCh <- nil: 291 return nil 292 case <-d.cancelCh: 293 return errCanceled 294 } 295 } 296 // State sync still going, wait a bit for new headers and retry 297 log.Trace("Pivot not yet committed, waiting...") 298 select { 299 case <-time.After(fsHeaderContCheck): 300 case <-d.cancelCh: 301 return errCanceled 302 } 303 head, _, err = d.skeleton.Bounds() 304 if err != nil { 305 return err 306 } 307 } 308 }