github.com/fibonacci-chain/fbc@v0.0.0-20231124064014-c7636198c1e9/x/evm/types/indexer.go (about) 1 package types 2 3 import ( 4 "encoding/binary" 5 "path/filepath" 6 "sync" 7 "sync/atomic" 8 9 "github.com/ethereum/go-ethereum/common" 10 ethtypes "github.com/ethereum/go-ethereum/core/types" 11 sdk "github.com/fibonacci-chain/fbc/libs/cosmos-sdk/types" 12 tmtypes "github.com/fibonacci-chain/fbc/libs/tendermint/types" 13 dbm "github.com/fibonacci-chain/fbc/libs/tm-db" 14 "github.com/spf13/viper" 15 ) 16 17 var ( 18 indexer *Indexer 19 enableBloomFilter bool 20 once sync.Once 21 ) 22 23 type Keeper interface { 24 GetBlockBloom(ctx sdk.Context, height int64) ethtypes.Bloom 25 GetHeightHash(ctx sdk.Context, height uint64) common.Hash 26 } 27 28 func CloseIndexer() { 29 if indexer != nil && indexer.backend.db != nil { 30 indexer.backend.db.Close() 31 } 32 } 33 34 func GetEnableBloomFilter() bool { 35 once.Do(func() { 36 enableBloomFilter = viper.GetBool(FlagEnableBloomFilter) 37 }) 38 return enableBloomFilter 39 } 40 41 // Indexer does a post-processing job for equally sized sections of the 42 // canonical chain (like BlooomBits and CHT structures). A Indexer is 43 // connected to the blockchain through the event system by starting a 44 // ChainHeadEventLoop in a goroutine. 45 // 46 // Further child ChainIndexers can be added which use the output of the parent 47 // section indexer. These child indexers receive new head notifications only 48 // after an entire section has been finished or in case of rollbacks that might 49 // affect already finished sections. 50 type Indexer struct { 51 backend bloomIndexer // Background processor generating the index data content 52 53 update chan sdk.Context // Notification channel that headers should be processed 54 quit chan struct{} // Quit channel to tear down running goroutines 55 56 storedSections uint64 // Number of sections successfully indexed into the database 57 processing uint32 // Atomic flag whether indexer is processing or not 58 } 59 60 func InitIndexer(db dbm.DB) { 61 if !enableBloomFilter { 62 return 63 } 64 65 indexer = &Indexer{ 66 backend: initBloomIndexer(db), 67 update: make(chan sdk.Context), 68 quit: make(chan struct{}), 69 } 70 indexer.setValidSections(indexer.GetValidSections()) 71 } 72 73 func BloomDb() dbm.DB { 74 dataDir := filepath.Join(viper.GetString("home"), "data") 75 var err error 76 db, err := sdk.NewDB(bloomDir, dataDir) 77 if err != nil { 78 panic(err) 79 } 80 return db 81 } 82 83 func GetIndexer() *Indexer { 84 return indexer 85 } 86 87 func (i *Indexer) StoredSection() uint64 { 88 if i != nil { 89 return i.storedSections 90 } 91 return 0 92 } 93 94 func (i *Indexer) IsProcessing() bool { 95 return atomic.LoadUint32(&i.processing) == 1 96 } 97 98 func (i *Indexer) ProcessSection(ctx sdk.Context, k Keeper, interval uint64, bloomData *[]*KV) { 99 if atomic.SwapUint32(&i.processing, 1) == 1 { 100 ctx.Logger().Error("matcher is already running") 101 return 102 } 103 defer func() { 104 if r := recover(); r != nil { 105 ctx.Logger().Error("ProcessSection panic height", ctx.BlockHeight(), r) 106 } 107 }() 108 defer atomic.StoreUint32(&i.processing, 0) 109 knownSection := interval / BloomBitsBlocks 110 for i.storedSections < knownSection { 111 section := i.storedSections 112 var lastHead common.Hash 113 if section > 0 { 114 lastHead = i.sectionHead(section - 1) 115 } 116 ctx.Logger().Debug("Processing new chain section", "section", section) 117 118 // Reset and partial processing 119 if err := i.backend.Reset(section); err != nil { 120 i.setValidSections(0) 121 ctx.Logger().Error(err.Error()) 122 return 123 } 124 125 begin := section*BloomBitsBlocks + uint64(tmtypes.GetStartBlockHeight()) 126 end := (section+1)*BloomBitsBlocks + uint64(tmtypes.GetStartBlockHeight()) 127 128 for number := begin; number < end; number++ { 129 var ( 130 bloom ethtypes.Bloom 131 hash common.Hash 132 ) 133 ctx = i.updateCtx(ctx) 134 // the initial height is 1 but it on ethereum is 0. so set the bloom and hash of the block 0 to empty. 135 if number == uint64(tmtypes.GetStartBlockHeight()) { 136 bloom = ethtypes.Bloom{} 137 hash = common.Hash{} 138 } else { 139 hash = k.GetHeightHash(ctx, number) 140 if hash == (common.Hash{}) { 141 ctx.Logger().Error("canonical block #%d unknown", number) 142 return 143 } 144 bloom = k.GetBlockBloom(ctx, int64(number)) 145 } 146 if err := i.backend.Process(hash, number, bloom); err != nil { 147 ctx.Logger().Error(err.Error()) 148 return 149 } 150 lastHead = hash 151 } 152 153 bd, err := i.backend.Commit() 154 if err != nil { 155 ctx.Logger().Error(err.Error()) 156 return 157 } 158 i.setSectionHead(section, lastHead) 159 i.setValidSections(section + 1) 160 i.setBloomData(&bd, section, lastHead) 161 *bloomData = bd 162 } 163 } 164 165 // GetDB get db of bloomIndexer 166 func (b *Indexer) GetDB() dbm.DB { 167 if b != nil { 168 return b.backend.db 169 } 170 return nil 171 } 172 173 // setValidSections writes the number of valid sections to the index database 174 func (i *Indexer) setValidSections(sections uint64) { 175 // Set the current number of valid sections in the database 176 var data [8]byte 177 binary.BigEndian.PutUint64(data[:], sections) 178 i.backend.db.Set([]byte("count"), data[:]) 179 180 // Remove any reorged sections, caching the valids in the mean time 181 for i.storedSections > sections { 182 i.storedSections-- 183 i.removeSectionHead(i.storedSections) 184 } 185 i.storedSections = sections // needed if new > old 186 } 187 188 // setBloomData put SectionHead and ValidSections into watcher.bloomData 189 func (i *Indexer) setBloomData(bloomData *[]*KV, section uint64, hash common.Hash) { 190 var data [8]byte 191 binary.BigEndian.PutUint64(data[:], section) 192 *bloomData = append(*bloomData, &KV{Key: append([]byte("shead"), data[:]...), Value: hash.Bytes()}) 193 *bloomData = append(*bloomData, &KV{Key: []byte("count"), Value: data[:]}) 194 } 195 196 // GetValidSections reads the number of valid sections from the index database 197 // and caches is into the local state. 198 func (i *Indexer) GetValidSections() uint64 { 199 data, _ := i.backend.db.Get([]byte("count")) 200 if len(data) == 8 { 201 return binary.BigEndian.Uint64(data) 202 } 203 return 0 204 } 205 206 // sectionHead retrieves the last block hash of a processed section from the 207 // index database. 208 func (i *Indexer) sectionHead(section uint64) common.Hash { 209 var data [8]byte 210 binary.BigEndian.PutUint64(data[:], section) 211 212 hash, _ := i.backend.db.Get(append([]byte("shead"), data[:]...)) 213 if len(hash) == len(common.Hash{}) { 214 return common.BytesToHash(hash) 215 } 216 return common.Hash{} 217 } 218 219 // setSectionHead writes the last block hash of a processed section to the index 220 // database. 221 func (i *Indexer) setSectionHead(section uint64, hash common.Hash) { 222 var data [8]byte 223 binary.BigEndian.PutUint64(data[:], section) 224 225 i.backend.db.Set(append([]byte("shead"), data[:]...), hash.Bytes()) 226 } 227 228 // removeSectionHead removes the reference to a processed section from the index 229 // database. 230 func (i *Indexer) removeSectionHead(section uint64) { 231 var data [8]byte 232 binary.BigEndian.PutUint64(data[:], section) 233 234 i.backend.db.Delete(append([]byte("shead"), data[:]...)) 235 } 236 237 func (i *Indexer) NotifyNewHeight(ctx sdk.Context) { 238 i.update <- ctx 239 } 240 241 func (i *Indexer) updateCtx(oldCtx sdk.Context) sdk.Context { 242 newCtx := oldCtx 243 exit := false 244 for { 245 select { 246 case newCtx = <-i.update: 247 default: 248 exit = true 249 } 250 if exit { 251 break 252 } 253 } 254 255 return newCtx 256 }