github.com/SmartMeshFoundation/Spectrum@v0.0.0-20220621030607-452a266fee1e/core/chain_indexer.go (about) 1 // Copyright 2017 The Spectrum Authors 2 // This file is part of the Spectrum library. 3 // 4 // The Spectrum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The Spectrum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the Spectrum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package core 18 19 import ( 20 "encoding/binary" 21 "fmt" 22 "sync" 23 "sync/atomic" 24 "time" 25 26 "github.com/SmartMeshFoundation/Spectrum/common" 27 "github.com/SmartMeshFoundation/Spectrum/core/types" 28 "github.com/SmartMeshFoundation/Spectrum/ethdb" 29 "github.com/SmartMeshFoundation/Spectrum/event" 30 "github.com/SmartMeshFoundation/Spectrum/log" 31 ) 32 33 // ChainIndexerBackend defines the methods needed to process chain segments in 34 // the background and write the segment results into the database. These can be 35 // used to create filter blooms or CHTs. 36 type ChainIndexerBackend interface { 37 // Reset initiates the processing of a new chain segment, potentially terminating 38 // any partially completed operations (in case of a reorg). 39 Reset(section uint64, prevHead common.Hash) error 40 41 // Process crunches through the next header in the chain segment. The caller 42 // will ensure a sequential order of headers. 43 Process(header *types.Header) 44 45 // Commit finalizes the section metadata and stores it into the database. 46 Commit() error 47 } 48 49 // ChainIndexerChain interface is used for connecting the indexer to a blockchain 50 type ChainIndexerChain interface { 51 // CurrentHeader retrieves the latest locally known header. 52 CurrentHeader() *types.Header 53 54 // SubscribeChainEvent subscribes to new head header notifications. 55 SubscribeChainEvent(ch chan<- ChainEvent) event.Subscription 56 } 57 58 // ChainIndexer does a post-processing job for equally sized sections of the 59 // canonical chain (like BlooomBits and CHT structures). A ChainIndexer is 60 // connected to the blockchain through the event system by starting a 61 // ChainEventLoop in a goroutine. 62 // 63 // Further child ChainIndexers can be added which use the output of the parent 64 // section indexer. These child indexers receive new head notifications only 65 // after an entire section has been finished or in case of rollbacks that might 66 // affect already finished sections. 67 type ChainIndexer struct { 68 chainDb ethdb.Database // Chain database to index the data from 69 indexDb ethdb.Database // Prefixed table-view of the db to write index metadata into 70 backend ChainIndexerBackend // Background processor generating the index data content 71 children []*ChainIndexer // Child indexers to cascade chain updates to 72 73 active uint32 // Flag whether the event loop was started 74 update chan struct{} // Notification channel that headers should be processed 75 quit chan chan error // Quit channel to tear down running goroutines 76 77 sectionSize uint64 // Number of blocks in a single chain segment to process 78 confirmsReq uint64 // Number of confirmations before processing a completed segment 79 80 storedSections uint64 // Number of sections successfully indexed into the database 81 knownSections uint64 // Number of sections known to be complete (block wise) 82 cascadedHead uint64 // Block number of the last completed section cascaded to subindexers 83 84 throttling time.Duration // Disk throttling to prevent a heavy upgrade from hogging resources 85 86 log log.Logger 87 lock sync.RWMutex 88 } 89 90 // NewChainIndexer creates a new chain indexer to do background processing on 91 // chain segments of a given size after certain number of confirmations passed. 92 // The throttling parameter might be used to prevent database thrashing. 93 func NewChainIndexer(chainDb, indexDb ethdb.Database, backend ChainIndexerBackend, section, confirm uint64, throttling time.Duration, kind string) *ChainIndexer { 94 c := &ChainIndexer{ 95 chainDb: chainDb, 96 indexDb: indexDb, 97 backend: backend, 98 update: make(chan struct{}, 1), 99 quit: make(chan chan error), 100 sectionSize: section, 101 confirmsReq: confirm, 102 throttling: throttling, 103 log: log.New("type", kind), 104 } 105 // Initialize database dependent fields and start the updater 106 c.loadValidSections() 107 go c.updateLoop() 108 109 return c 110 } 111 112 // AddKnownSectionHead marks a new section head as known/processed if it is newer 113 // than the already known best section head 114 func (c *ChainIndexer) AddKnownSectionHead(section uint64, shead common.Hash) { 115 c.lock.Lock() 116 defer c.lock.Unlock() 117 118 if section < c.storedSections { 119 return 120 } 121 c.setSectionHead(section, shead) 122 c.setValidSections(section + 1) 123 } 124 125 // Start creates a goroutine to feed chain head events into the indexer for 126 // cascading background processing. Children do not need to be started, they 127 // are notified about new events by their parents. 128 func (c *ChainIndexer) Start(chain ChainIndexerChain) { 129 events := make(chan ChainEvent, 10) 130 sub := chain.SubscribeChainEvent(events) 131 132 go c.eventLoop(chain.CurrentHeader(), events, sub) 133 } 134 135 // Close tears down all goroutines belonging to the indexer and returns any error 136 // that might have occurred internally. 137 func (c *ChainIndexer) Close() error { 138 var errs []error 139 140 // Tear down the primary update loop 141 errc := make(chan error) 142 c.quit <- errc 143 if err := <-errc; err != nil { 144 errs = append(errs, err) 145 } 146 // If needed, tear down the secondary event loop 147 if atomic.LoadUint32(&c.active) != 0 { 148 c.quit <- errc 149 if err := <-errc; err != nil { 150 errs = append(errs, err) 151 } 152 } 153 // Close all children 154 for _, child := range c.children { 155 if err := child.Close(); err != nil { 156 errs = append(errs, err) 157 } 158 } 159 // Return any failures 160 switch { 161 case len(errs) == 0: 162 return nil 163 164 case len(errs) == 1: 165 return errs[0] 166 167 default: 168 return fmt.Errorf("%v", errs) 169 } 170 } 171 172 // eventLoop is a secondary - optional - event loop of the indexer which is only 173 // started for the outermost indexer to push chain head events into a processing 174 // queue. 175 func (c *ChainIndexer) eventLoop(currentHeader *types.Header, events chan ChainEvent, sub event.Subscription) { 176 // Mark the chain indexer as active, requiring an additional teardown 177 atomic.StoreUint32(&c.active, 1) 178 179 defer sub.Unsubscribe() 180 181 // Fire the initial new head event to start any outstanding processing 182 c.newHead(currentHeader.Number.Uint64(), false) 183 184 var ( 185 prevHeader = currentHeader 186 prevHash = currentHeader.Hash() 187 ) 188 for { 189 select { 190 case errc := <-c.quit: 191 // Chain indexer terminating, report no failure and abort 192 errc <- nil 193 return 194 195 case ev, ok := <-events: 196 // Received a new event, ensure it's not nil (closing) and update 197 if !ok { 198 errc := <-c.quit 199 errc <- nil 200 return 201 } 202 header := ev.Block.Header() 203 if header.ParentHash != prevHash { 204 // Reorg to the common ancestor (might not exist in light sync mode, skip reorg then) 205 // TODO(karalabe, zsfelfoldi): This seems a bit brittle, can we detect this case explicitly? 206 if h := FindCommonAncestor(c.chainDb, prevHeader, header); h != nil { 207 c.newHead(h.Number.Uint64(), true) 208 } 209 } 210 c.newHead(header.Number.Uint64(), false) 211 212 prevHeader, prevHash = header, header.Hash() 213 } 214 } 215 } 216 217 // newHead notifies the indexer about new chain heads and/or reorgs. 218 func (c *ChainIndexer) newHead(head uint64, reorg bool) { 219 c.lock.Lock() 220 defer c.lock.Unlock() 221 222 // If a reorg happened, invalidate all sections until that point 223 if reorg { 224 // Revert the known section number to the reorg point 225 changed := head / c.sectionSize 226 if changed < c.knownSections { 227 c.knownSections = changed 228 } 229 // Revert the stored sections from the database to the reorg point 230 if changed < c.storedSections { 231 c.setValidSections(changed) 232 } 233 // Update the new head number to the finalized section end and notify children 234 head = changed * c.sectionSize 235 236 if head < c.cascadedHead { 237 c.cascadedHead = head 238 for _, child := range c.children { 239 child.newHead(c.cascadedHead, true) 240 } 241 } 242 return 243 } 244 // No reorg, calculate the number of newly known sections and update if high enough 245 var sections uint64 246 if head >= c.confirmsReq { 247 sections = (head + 1 - c.confirmsReq) / c.sectionSize 248 if sections > c.knownSections { 249 c.knownSections = sections 250 251 select { 252 case c.update <- struct{}{}: 253 default: 254 } 255 } 256 } 257 } 258 259 // updateLoop is the main event loop of the indexer which pushes chain segments 260 // down into the processing backend. 261 func (c *ChainIndexer) updateLoop() { 262 var ( 263 updating bool 264 updated time.Time 265 ) 266 267 for { 268 select { 269 case errc := <-c.quit: 270 // Chain indexer terminating, report no failure and abort 271 errc <- nil 272 return 273 274 case <-c.update: 275 // Section headers completed (or rolled back), update the index 276 c.lock.Lock() 277 if c.knownSections > c.storedSections { 278 // Periodically print an upgrade log message to the user 279 if time.Since(updated) > 8*time.Second { 280 if c.knownSections > c.storedSections+1 { 281 updating = true 282 c.log.Info("Upgrading chain index", "percentage", c.storedSections*100/c.knownSections) 283 } 284 updated = time.Now() 285 } 286 // Cache the current section count and head to allow unlocking the mutex 287 section := c.storedSections 288 var oldHead common.Hash 289 if section > 0 { 290 oldHead = c.SectionHead(section - 1) 291 } 292 // Process the newly defined section in the background 293 c.lock.Unlock() 294 newHead, err := c.processSection(section, oldHead) 295 if err != nil { 296 c.log.Error("Section processing failed", "error", err) 297 } 298 c.lock.Lock() 299 300 // If processing succeeded and no reorgs occcurred, mark the section completed 301 if err == nil && oldHead == c.SectionHead(section-1) { 302 c.setSectionHead(section, newHead) 303 c.setValidSections(section + 1) 304 if c.storedSections == c.knownSections && updating { 305 updating = false 306 c.log.Info("Finished upgrading chain index") 307 } 308 309 c.cascadedHead = c.storedSections*c.sectionSize - 1 310 for _, child := range c.children { 311 c.log.Trace("Cascading chain index update", "head", c.cascadedHead) 312 child.newHead(c.cascadedHead, false) 313 } 314 } else { 315 // If processing failed, don't retry until further notification 316 c.log.Debug("Chain index processing failed", "section", section, "err", err) 317 c.knownSections = c.storedSections 318 } 319 } 320 // If there are still further sections to process, reschedule 321 if c.knownSections > c.storedSections { 322 time.AfterFunc(c.throttling, func() { 323 select { 324 case c.update <- struct{}{}: 325 default: 326 } 327 }) 328 } 329 c.lock.Unlock() 330 } 331 } 332 } 333 334 // processSection processes an entire section by calling backend functions while 335 // ensuring the continuity of the passed headers. Since the chain mutex is not 336 // held while processing, the continuity can be broken by a long reorg, in which 337 // case the function returns with an error. 338 func (c *ChainIndexer) processSection(section uint64, lastHead common.Hash) (common.Hash, error) { 339 c.log.Trace("Processing new chain section", "section", section) 340 341 // Reset and partial processing 342 343 if err := c.backend.Reset(section, lastHead); err != nil { 344 c.setValidSections(0) 345 return common.Hash{}, err 346 } 347 348 for number := section * c.sectionSize; number < (section+1)*c.sectionSize; number++ { 349 hash := GetCanonicalHash(c.chainDb, number) 350 if hash == (common.Hash{}) { 351 return common.Hash{}, fmt.Errorf("canonical block #%d unknown", number) 352 } 353 header := GetHeader(c.chainDb, hash, number) 354 if header == nil { 355 return common.Hash{}, fmt.Errorf("block #%d [%x…] not found", number, hash[:4]) 356 } else if header.ParentHash != lastHead { 357 return common.Hash{}, fmt.Errorf("chain reorged during section processing") 358 } 359 c.backend.Process(header) 360 lastHead = header.Hash() 361 } 362 if err := c.backend.Commit(); err != nil { 363 c.log.Error("Section commit failed", "error", err) 364 return common.Hash{}, err 365 } 366 return lastHead, nil 367 } 368 369 // Sections returns the number of processed sections maintained by the indexer 370 // and also the information about the last header indexed for potential canonical 371 // verifications. 372 func (c *ChainIndexer) Sections() (uint64, uint64, common.Hash) { 373 c.lock.Lock() 374 defer c.lock.Unlock() 375 376 return c.storedSections, c.storedSections*c.sectionSize - 1, c.SectionHead(c.storedSections - 1) 377 } 378 379 // AddChildIndexer adds a child ChainIndexer that can use the output of this one 380 func (c *ChainIndexer) AddChildIndexer(indexer *ChainIndexer) { 381 c.lock.Lock() 382 defer c.lock.Unlock() 383 384 c.children = append(c.children, indexer) 385 386 // Cascade any pending updates to new children too 387 if c.storedSections > 0 { 388 indexer.newHead(c.storedSections*c.sectionSize-1, false) 389 } 390 } 391 392 // loadValidSections reads the number of valid sections from the index database 393 // and caches is into the local state. 394 func (c *ChainIndexer) loadValidSections() { 395 data, _ := c.indexDb.Get([]byte("count")) 396 if len(data) == 8 { 397 c.storedSections = binary.BigEndian.Uint64(data[:]) 398 } 399 } 400 401 // setValidSections writes the number of valid sections to the index database 402 func (c *ChainIndexer) setValidSections(sections uint64) { 403 // Set the current number of valid sections in the database 404 var data [8]byte 405 binary.BigEndian.PutUint64(data[:], sections) 406 c.indexDb.Put([]byte("count"), data[:]) 407 408 // Remove any reorged sections, caching the valids in the mean time 409 for c.storedSections > sections { 410 c.storedSections-- 411 c.removeSectionHead(c.storedSections) 412 } 413 c.storedSections = sections // needed if new > old 414 } 415 416 // SectionHead retrieves the last block hash of a processed section from the 417 // index database. 418 func (c *ChainIndexer) SectionHead(section uint64) common.Hash { 419 var data [8]byte 420 binary.BigEndian.PutUint64(data[:], section) 421 422 hash, _ := c.indexDb.Get(append([]byte("shead"), data[:]...)) 423 if len(hash) == len(common.Hash{}) { 424 return common.BytesToHash(hash) 425 } 426 return common.Hash{} 427 } 428 429 // setSectionHead writes the last block hash of a processed section to the index 430 // database. 431 func (c *ChainIndexer) setSectionHead(section uint64, hash common.Hash) { 432 var data [8]byte 433 binary.BigEndian.PutUint64(data[:], section) 434 435 c.indexDb.Put(append([]byte("shead"), data[:]...), hash.Bytes()) 436 } 437 438 // removeSectionHead removes the reference to a processed section from the index 439 // database. 440 func (c *ChainIndexer) removeSectionHead(section uint64) { 441 var data [8]byte 442 binary.BigEndian.PutUint64(data[:], section) 443 444 c.indexDb.Delete(append([]byte("shead"), data[:]...)) 445 }