github.com/Gessiux/neatchain@v1.3.1/chain/core/chain_indexer.go (about) 1 // Copyright 2017 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package core 18 19 import ( 20 "encoding/binary" 21 "fmt" 22 "sync" 23 "sync/atomic" 24 "time" 25 26 "github.com/Gessiux/neatchain/chain/core/rawdb" 27 28 "github.com/Gessiux/neatchain/chain/core/types" 29 "github.com/Gessiux/neatchain/chain/log" 30 "github.com/Gessiux/neatchain/neatdb" 31 "github.com/Gessiux/neatchain/utilities/common" 32 "github.com/Gessiux/neatchain/utilities/event" 33 ) 34 35 // ChainIndexerBackend defines the methods needed to process chain segments in 36 // the background and write the segment results into the database. These can be 37 // used to create filter blooms or CHTs. 38 type ChainIndexerBackend interface { 39 // Reset initiates the processing of a new chain segment, potentially terminating 40 // any partially completed operations (in case of a reorg). 41 Reset(section uint64, prevHead common.Hash) error 42 43 // Process crunches through the next header in the chain segment. The caller 44 // will ensure a sequential order of headers. 45 Process(header *types.Header) 46 47 // Commit finalizes the section metadata and stores it into the database. 48 Commit() error 49 } 50 51 // ChainIndexerChain interface is used for connecting the indexer to a blockchain 52 type ChainIndexerChain interface { 53 // CurrentHeader retrieves the latest locally known header. 54 CurrentHeader() *types.Header 55 56 // SubscribeChainEvent subscribes to new head header notifications. 57 SubscribeChainEvent(ch chan<- ChainEvent) event.Subscription 58 } 59 60 // ChainIndexer does a post-processing job for equally sized sections of the 61 // canonical chain (like BlooomBits and CHT structures). A ChainIndexer is 62 // connected to the blockchain through the event system by starting a 63 // ChainEventLoop in a goroutine. 64 // 65 // Further side ChainIndexers can be added which use the output of the parent 66 // section indexer. These side indexers receive new head notifications only 67 // after an entire section has been finished or in case of rollbacks that might 68 // affect already finished sections. 69 type ChainIndexer struct { 70 chainDb neatdb.Database // Chain database to index the data from 71 indexDb neatdb.Database // Prefixed table-view of the db to write index metadata into 72 backend ChainIndexerBackend // Background processor generating the index data content 73 sideren []*ChainIndexer // Side indexers to cascade chain updates to 74 75 active uint32 // Flag whether the event loop was started 76 update chan struct{} // Notification channel that headers should be processed 77 quit chan chan error // Quit channel to tear down running goroutines 78 79 sectionSize uint64 // Number of blocks in a single chain segment to process 80 confirmsReq uint64 // Number of confirmations before processing a completed segment 81 82 storedSections uint64 // Number of sections successfully indexed into the database 83 knownSections uint64 // Number of sections known to be complete (block wise) 84 cascadedHead uint64 // Block number of the last completed section cascaded to subindexers 85 86 throttling time.Duration // Disk throttling to prevent a heavy upgrade from hogging resources 87 88 log log.Logger 89 lock sync.RWMutex 90 } 91 92 // NewChainIndexer creates a new chain indexer to do background processing on 93 // chain segments of a given size after certain number of confirmations passed. 94 // The throttling parameter might be used to prevent database thrashing. 95 func NewChainIndexer(chainDb, indexDb neatdb.Database, backend ChainIndexerBackend, section, confirm uint64, throttling time.Duration, kind string) *ChainIndexer { 96 c := &ChainIndexer{ 97 chainDb: chainDb, 98 indexDb: indexDb, 99 backend: backend, 100 update: make(chan struct{}, 1), 101 quit: make(chan chan error), 102 sectionSize: section, 103 confirmsReq: confirm, 104 throttling: throttling, 105 log: log.New("type", kind), 106 } 107 // Initialize database dependent fields and start the updater 108 c.loadValidSections() 109 go c.updateLoop() 110 111 return c 112 } 113 114 // AddKnownSectionHead marks a new section head as known/processed if it is newer 115 // than the already known best section head 116 func (c *ChainIndexer) AddKnownSectionHead(section uint64, shead common.Hash) { 117 c.lock.Lock() 118 defer c.lock.Unlock() 119 120 if section < c.storedSections { 121 return 122 } 123 c.setSectionHead(section, shead) 124 c.setValidSections(section + 1) 125 } 126 127 // Start creates a goroutine to feed chain head events into the indexer for 128 // cascading background processing. Children do not need to be started, they 129 // are notified about new events by their parents. 130 func (c *ChainIndexer) Start(chain ChainIndexerChain) { 131 events := make(chan ChainEvent, 10) 132 sub := chain.SubscribeChainEvent(events) 133 134 go c.eventLoop(chain.CurrentHeader(), events, sub) 135 } 136 137 // Close tears down all goroutines belonging to the indexer and returns any error 138 // that might have occurred internally. 139 func (c *ChainIndexer) Close() error { 140 var errs []error 141 142 // Tear down the primary update loop 143 errc := make(chan error) 144 c.quit <- errc 145 if err := <-errc; err != nil { 146 errs = append(errs, err) 147 } 148 // If needed, tear down the secondary event loop 149 if atomic.LoadUint32(&c.active) != 0 { 150 c.quit <- errc 151 if err := <-errc; err != nil { 152 errs = append(errs, err) 153 } 154 } 155 // Close all sideren 156 for _, side := range c.sideren { 157 if err := side.Close(); err != nil { 158 errs = append(errs, err) 159 } 160 } 161 // Return any failures 162 switch { 163 case len(errs) == 0: 164 return nil 165 166 case len(errs) == 1: 167 return errs[0] 168 169 default: 170 return fmt.Errorf("%v", errs) 171 } 172 } 173 174 // eventLoop is a secondary - optional - event loop of the indexer which is only 175 // started for the outermost indexer to push chain head events into a processing 176 // queue. 177 func (c *ChainIndexer) eventLoop(currentHeader *types.Header, events chan ChainEvent, sub event.Subscription) { 178 // Mark the chain indexer as active, requiring an additional teardown 179 atomic.StoreUint32(&c.active, 1) 180 181 defer sub.Unsubscribe() 182 183 // Fire the initial new head event to start any outstanding processing 184 c.newHead(currentHeader.Number.Uint64(), false) 185 186 var ( 187 prevHeader = currentHeader 188 prevHash = currentHeader.Hash() 189 ) 190 for { 191 select { 192 case errc := <-c.quit: 193 // Chain indexer terminating, report no failure and abort 194 errc <- nil 195 return 196 197 case ev, ok := <-events: 198 // Received a new event, ensure it's not nil (closing) and update 199 if !ok { 200 errc := <-c.quit 201 errc <- nil 202 return 203 } 204 header := ev.Block.Header() 205 if header.ParentHash != prevHash { 206 // Reorg to the common ancestor (might not exist in light sync mode, skip reorg then) 207 // TODO(karalabe, zsfelfoldi): This seems a bit brittle, can we detect this case explicitly? 208 209 if rawdb.ReadCanonicalHash(c.chainDb, prevHeader.Number.Uint64()) != prevHash { 210 if h := rawdb.FindCommonAncestor(c.chainDb, prevHeader, header); h != nil { 211 c.newHead(h.Number.Uint64(), true) 212 } 213 } 214 } 215 c.newHead(header.Number.Uint64(), false) 216 217 prevHeader, prevHash = header, header.Hash() 218 } 219 } 220 } 221 222 // newHead notifies the indexer about new chain heads and/or reorgs. 223 func (c *ChainIndexer) newHead(head uint64, reorg bool) { 224 c.lock.Lock() 225 defer c.lock.Unlock() 226 227 // If a reorg happened, invalidate all sections until that point 228 if reorg { 229 // Revert the known section number to the reorg point 230 changed := head / c.sectionSize 231 if changed < c.knownSections { 232 c.knownSections = changed 233 } 234 // Revert the stored sections from the database to the reorg point 235 if changed < c.storedSections { 236 c.setValidSections(changed) 237 } 238 // Update the new head number to the finalized section end and notify sideren 239 head = changed * c.sectionSize 240 241 if head < c.cascadedHead { 242 c.cascadedHead = head 243 for _, side := range c.sideren { 244 side.newHead(c.cascadedHead, true) 245 } 246 } 247 return 248 } 249 // No reorg, calculate the number of newly known sections and update if high enough 250 var sections uint64 251 if head >= c.confirmsReq { 252 sections = (head + 1 - c.confirmsReq) / c.sectionSize 253 if sections > c.knownSections { 254 c.knownSections = sections 255 256 select { 257 case c.update <- struct{}{}: 258 default: 259 } 260 } 261 } 262 } 263 264 // updateLoop is the main event loop of the indexer which pushes chain segments 265 // down into the processing backend. 266 func (c *ChainIndexer) updateLoop() { 267 var ( 268 updating bool 269 updated time.Time 270 ) 271 272 for { 273 select { 274 case errc := <-c.quit: 275 // Chain indexer terminating, report no failure and abort 276 errc <- nil 277 return 278 279 case <-c.update: 280 // Section headers completed (or rolled back), update the index 281 c.lock.Lock() 282 if c.knownSections > c.storedSections { 283 // Periodically print an upgrade log message to the user 284 if time.Since(updated) > 8*time.Second { 285 if c.knownSections > c.storedSections+1 { 286 updating = true 287 c.log.Info("Upgrading chain index", "percentage", c.storedSections*100/c.knownSections) 288 } 289 updated = time.Now() 290 } 291 // Cache the current section count and head to allow unlocking the mutex 292 section := c.storedSections 293 var oldHead common.Hash 294 if section > 0 { 295 oldHead = c.SectionHead(section - 1) 296 } 297 // Process the newly defined section in the background 298 c.lock.Unlock() 299 newHead, err := c.processSection(section, oldHead) 300 if err != nil { 301 c.log.Error("Section processing failed", "error", err) 302 } 303 c.lock.Lock() 304 305 // If processing succeeded and no reorgs occcurred, mark the section completed 306 if err == nil && oldHead == c.SectionHead(section-1) { 307 c.setSectionHead(section, newHead) 308 c.setValidSections(section + 1) 309 if c.storedSections == c.knownSections && updating { 310 updating = false 311 c.log.Info("Finished upgrading chain index") 312 } 313 314 c.cascadedHead = c.storedSections*c.sectionSize - 1 315 for _, side := range c.sideren { 316 c.log.Trace("Cascading chain index update", "head", c.cascadedHead) 317 side.newHead(c.cascadedHead, false) 318 } 319 } else { 320 // If processing failed, don't retry until further notification 321 c.log.Debug("Chain index processing failed", "section", section, "err", err) 322 c.knownSections = c.storedSections 323 } 324 } 325 // If there are still further sections to process, reschedule 326 if c.knownSections > c.storedSections { 327 time.AfterFunc(c.throttling, func() { 328 select { 329 case c.update <- struct{}{}: 330 default: 331 } 332 }) 333 } 334 c.lock.Unlock() 335 } 336 } 337 } 338 339 // processSection processes an entire section by calling backend functions while 340 // ensuring the continuity of the passed headers. Since the chain mutex is not 341 // held while processing, the continuity can be broken by a long reorg, in which 342 // case the function returns with an error. 343 func (c *ChainIndexer) processSection(section uint64, lastHead common.Hash) (common.Hash, error) { 344 c.log.Trace("Processing new chain section", "section", section) 345 346 // Reset and partial processing 347 348 if err := c.backend.Reset(section, lastHead); err != nil { 349 c.setValidSections(0) 350 return common.Hash{}, err 351 } 352 353 for number := section * c.sectionSize; number < (section+1)*c.sectionSize; number++ { 354 hash := rawdb.ReadCanonicalHash(c.chainDb, number) 355 if hash == (common.Hash{}) { 356 return common.Hash{}, fmt.Errorf("canonical block #%d unknown", number) 357 } 358 header := rawdb.ReadHeader(c.chainDb, hash, number) 359 if header == nil { 360 return common.Hash{}, fmt.Errorf("block #%d [%x…] not found", number, hash[:4]) 361 } else if header.ParentHash != lastHead { 362 return common.Hash{}, fmt.Errorf("chain reorged during section processing") 363 } 364 c.backend.Process(header) 365 lastHead = header.Hash() 366 } 367 if err := c.backend.Commit(); err != nil { 368 c.log.Error("Section commit failed", "error", err) 369 return common.Hash{}, err 370 } 371 return lastHead, nil 372 } 373 374 // Sections returns the number of processed sections maintained by the indexer 375 // and also the information about the last header indexed for potential canonical 376 // verifications. 377 func (c *ChainIndexer) Sections() (uint64, uint64, common.Hash) { 378 c.lock.Lock() 379 defer c.lock.Unlock() 380 381 return c.storedSections, c.storedSections*c.sectionSize - 1, c.SectionHead(c.storedSections - 1) 382 } 383 384 // AddChildIndexer adds a side ChainIndexer that can use the output of this one 385 func (c *ChainIndexer) AddChildIndexer(indexer *ChainIndexer) { 386 c.lock.Lock() 387 defer c.lock.Unlock() 388 389 c.sideren = append(c.sideren, indexer) 390 391 // Cascade any pending updates to new sideren too 392 if c.storedSections > 0 { 393 indexer.newHead(c.storedSections*c.sectionSize-1, false) 394 } 395 } 396 397 // loadValidSections reads the number of valid sections from the index database 398 // and caches is into the local state. 399 func (c *ChainIndexer) loadValidSections() { 400 data, _ := c.indexDb.Get([]byte("count")) 401 if len(data) == 8 { 402 c.storedSections = binary.BigEndian.Uint64(data[:]) 403 } 404 } 405 406 // setValidSections writes the number of valid sections to the index database 407 func (c *ChainIndexer) setValidSections(sections uint64) { 408 // Set the current number of valid sections in the database 409 var data [8]byte 410 binary.BigEndian.PutUint64(data[:], sections) 411 c.indexDb.Put([]byte("count"), data[:]) 412 413 // Remove any reorged sections, caching the valids in the mean time 414 for c.storedSections > sections { 415 c.storedSections-- 416 c.removeSectionHead(c.storedSections) 417 } 418 c.storedSections = sections // needed if new > old 419 } 420 421 // SectionHead retrieves the last block hash of a processed section from the 422 // index database. 423 func (c *ChainIndexer) SectionHead(section uint64) common.Hash { 424 var data [8]byte 425 binary.BigEndian.PutUint64(data[:], section) 426 427 hash, _ := c.indexDb.Get(append([]byte("shead"), data[:]...)) 428 if len(hash) == len(common.Hash{}) { 429 return common.BytesToHash(hash) 430 } 431 return common.Hash{} 432 } 433 434 // setSectionHead writes the last block hash of a processed section to the index 435 // database. 436 func (c *ChainIndexer) setSectionHead(section uint64, hash common.Hash) { 437 var data [8]byte 438 binary.BigEndian.PutUint64(data[:], section) 439 440 c.indexDb.Put(append([]byte("shead"), data[:]...), hash.Bytes()) 441 } 442 443 // removeSectionHead removes the reference to a processed section from the index 444 // database. 445 func (c *ChainIndexer) removeSectionHead(section uint64) { 446 var data [8]byte 447 binary.BigEndian.PutUint64(data[:], section) 448 449 c.indexDb.Delete(append([]byte("shead"), data[:]...)) 450 }