github.com/ebceco/ebc@v1.8.19-0.20190309150932-8cb0b9e06484/light/postprocess.go (about) 1 // Copyright 2017 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package light 18 19 import ( 20 "context" 21 "encoding/binary" 22 "errors" 23 "fmt" 24 "math/big" 25 "time" 26 27 "github.com/ebceco/ebc/common" 28 "github.com/ebceco/ebc/common/bitutil" 29 "github.com/ebceco/ebc/core" 30 "github.com/ebceco/ebc/core/rawdb" 31 "github.com/ebceco/ebc/core/types" 32 "github.com/ebceco/ebc/ethdb" 33 "github.com/ebceco/ebc/log" 34 "github.com/ebceco/ebc/params" 35 "github.com/ebceco/ebc/rlp" 36 "github.com/ebceco/ebc/trie" 37 ) 38 39 // IndexerConfig includes a set of configs for chain indexers. 40 type IndexerConfig struct { 41 // The block frequency for creating CHTs. 42 ChtSize uint64 43 44 // A special auxiliary field represents client's chtsize for server config, otherwise represents server's chtsize. 45 PairChtSize uint64 46 47 // The number of confirmations needed to generate/accept a canonical hash help trie. 48 ChtConfirms uint64 49 50 // The block frequency for creating new bloom bits. 51 BloomSize uint64 52 53 // The number of confirmation needed before a bloom section is considered probably final and its rotated bits 54 // are calculated. 55 BloomConfirms uint64 56 57 // The block frequency for creating BloomTrie. 58 BloomTrieSize uint64 59 60 // The number of confirmations needed to generate/accept a bloom trie. 61 BloomTrieConfirms uint64 62 } 63 64 var ( 65 // DefaultServerIndexerConfig wraps a set of configs as a default indexer config for server side. 66 DefaultServerIndexerConfig = &IndexerConfig{ 67 ChtSize: params.CHTFrequencyServer, 68 PairChtSize: params.CHTFrequencyClient, 69 ChtConfirms: params.HelperTrieProcessConfirmations, 70 BloomSize: params.BloomBitsBlocks, 71 BloomConfirms: params.BloomConfirms, 72 BloomTrieSize: params.BloomTrieFrequency, 73 BloomTrieConfirms: params.HelperTrieProcessConfirmations, 74 } 75 // DefaultClientIndexerConfig wraps a set of configs as a default indexer config for client side. 76 DefaultClientIndexerConfig = &IndexerConfig{ 77 ChtSize: params.CHTFrequencyClient, 78 PairChtSize: params.CHTFrequencyServer, 79 ChtConfirms: params.HelperTrieConfirmations, 80 BloomSize: params.BloomBitsBlocksClient, 81 BloomConfirms: params.HelperTrieConfirmations, 82 BloomTrieSize: params.BloomTrieFrequency, 83 BloomTrieConfirms: params.HelperTrieConfirmations, 84 } 85 // TestServerIndexerConfig wraps a set of configs as a test indexer config for server side. 86 TestServerIndexerConfig = &IndexerConfig{ 87 ChtSize: 64, 88 PairChtSize: 512, 89 ChtConfirms: 4, 90 BloomSize: 64, 91 BloomConfirms: 4, 92 BloomTrieSize: 512, 93 BloomTrieConfirms: 4, 94 } 95 // TestClientIndexerConfig wraps a set of configs as a test indexer config for client side. 96 TestClientIndexerConfig = &IndexerConfig{ 97 ChtSize: 512, 98 PairChtSize: 64, 99 ChtConfirms: 32, 100 BloomSize: 512, 101 BloomConfirms: 32, 102 BloomTrieSize: 512, 103 BloomTrieConfirms: 32, 104 } 105 ) 106 107 // trustedCheckpoints associates each known checkpoint with the genesis hash of the chain it belongs to 108 var trustedCheckpoints = map[common.Hash]*params.TrustedCheckpoint{ 109 params.MainnetGenesisHash: params.MainnetTrustedCheckpoint, 110 params.TestnetGenesisHash: params.TestnetTrustedCheckpoint, 111 params.RinkebyGenesisHash: params.RinkebyTrustedCheckpoint, 112 } 113 114 var ( 115 ErrNoTrustedCht = errors.New("no trusted canonical hash trie") 116 ErrNoTrustedBloomTrie = errors.New("no trusted bloom trie") 117 ErrNoHeader = errors.New("header not found") 118 chtPrefix = []byte("chtRoot-") // chtPrefix + chtNum (uint64 big endian) -> trie root hash 119 ChtTablePrefix = "cht-" 120 ) 121 122 // ChtNode structures are stored in the Canonical Hash Trie in an RLP encoded format 123 type ChtNode struct { 124 Hash common.Hash 125 Td *big.Int 126 } 127 128 // GetChtRoot reads the CHT root associated to the given section from the database 129 // Note that sectionIdx is specified according to LES/1 CHT section size. 130 func GetChtRoot(db ethdb.Database, sectionIdx uint64, sectionHead common.Hash) common.Hash { 131 var encNumber [8]byte 132 binary.BigEndian.PutUint64(encNumber[:], sectionIdx) 133 data, _ := db.Get(append(append(chtPrefix, encNumber[:]...), sectionHead.Bytes()...)) 134 return common.BytesToHash(data) 135 } 136 137 // StoreChtRoot writes the CHT root associated to the given section into the database 138 // Note that sectionIdx is specified according to LES/1 CHT section size. 139 func StoreChtRoot(db ethdb.Database, sectionIdx uint64, sectionHead, root common.Hash) { 140 var encNumber [8]byte 141 binary.BigEndian.PutUint64(encNumber[:], sectionIdx) 142 db.Put(append(append(chtPrefix, encNumber[:]...), sectionHead.Bytes()...), root.Bytes()) 143 } 144 145 // ChtIndexerBackend implements core.ChainIndexerBackend. 146 type ChtIndexerBackend struct { 147 diskdb, trieTable ethdb.Database 148 odr OdrBackend 149 triedb *trie.Database 150 section, sectionSize uint64 151 lastHash common.Hash 152 trie *trie.Trie 153 } 154 155 // NewChtIndexer creates a Cht chain indexer 156 func NewChtIndexer(db ethdb.Database, odr OdrBackend, size, confirms uint64) *core.ChainIndexer { 157 trieTable := ethdb.NewTable(db, ChtTablePrefix) 158 backend := &ChtIndexerBackend{ 159 diskdb: db, 160 odr: odr, 161 trieTable: trieTable, 162 triedb: trie.NewDatabaseWithCache(trieTable, 1), // Use a tiny cache only to keep memory down 163 sectionSize: size, 164 } 165 return core.NewChainIndexer(db, ethdb.NewTable(db, "chtIndex-"), backend, size, confirms, time.Millisecond*100, "cht") 166 } 167 168 // fetchMissingNodes tries to retrieve the last entry of the latest trusted CHT from the 169 // ODR backend in order to be able to add new entries and calculate subsequent root hashes 170 func (c *ChtIndexerBackend) fetchMissingNodes(ctx context.Context, section uint64, root common.Hash) error { 171 batch := c.trieTable.NewBatch() 172 r := &ChtRequest{ChtRoot: root, ChtNum: section - 1, BlockNum: section*c.sectionSize - 1, Config: c.odr.IndexerConfig()} 173 for { 174 err := c.odr.Retrieve(ctx, r) 175 switch err { 176 case nil: 177 r.Proof.Store(batch) 178 return batch.Write() 179 case ErrNoPeers: 180 // if there are no peers to serve, retry later 181 select { 182 case <-ctx.Done(): 183 return ctx.Err() 184 case <-time.After(time.Second * 10): 185 // stay in the loop and try again 186 } 187 default: 188 return err 189 } 190 } 191 } 192 193 // Reset implements core.ChainIndexerBackend 194 func (c *ChtIndexerBackend) Reset(ctx context.Context, section uint64, lastSectionHead common.Hash) error { 195 var root common.Hash 196 if section > 0 { 197 root = GetChtRoot(c.diskdb, section-1, lastSectionHead) 198 } 199 var err error 200 c.trie, err = trie.New(root, c.triedb) 201 202 if err != nil && c.odr != nil { 203 err = c.fetchMissingNodes(ctx, section, root) 204 if err == nil { 205 c.trie, err = trie.New(root, c.triedb) 206 } 207 } 208 209 c.section = section 210 return err 211 } 212 213 // Process implements core.ChainIndexerBackend 214 func (c *ChtIndexerBackend) Process(ctx context.Context, header *types.Header) error { 215 hash, num := header.Hash(), header.Number.Uint64() 216 c.lastHash = hash 217 218 td := rawdb.ReadTd(c.diskdb, hash, num) 219 if td == nil { 220 panic(nil) 221 } 222 var encNumber [8]byte 223 binary.BigEndian.PutUint64(encNumber[:], num) 224 data, _ := rlp.EncodeToBytes(ChtNode{hash, td}) 225 c.trie.Update(encNumber[:], data) 226 return nil 227 } 228 229 // Commit implements core.ChainIndexerBackend 230 func (c *ChtIndexerBackend) Commit() error { 231 root, err := c.trie.Commit(nil) 232 if err != nil { 233 return err 234 } 235 c.triedb.Commit(root, false) 236 237 if ((c.section+1)*c.sectionSize)%params.CHTFrequencyClient == 0 { 238 log.Info("Storing CHT", "section", c.section*c.sectionSize/params.CHTFrequencyClient, "head", fmt.Sprintf("%064x", c.lastHash), "root", fmt.Sprintf("%064x", root)) 239 } 240 StoreChtRoot(c.diskdb, c.section, c.lastHash, root) 241 return nil 242 } 243 244 var ( 245 bloomTriePrefix = []byte("bltRoot-") // bloomTriePrefix + bloomTrieNum (uint64 big endian) -> trie root hash 246 BloomTrieTablePrefix = "blt-" 247 ) 248 249 // GetBloomTrieRoot reads the BloomTrie root assoctiated to the given section from the database 250 func GetBloomTrieRoot(db ethdb.Database, sectionIdx uint64, sectionHead common.Hash) common.Hash { 251 var encNumber [8]byte 252 binary.BigEndian.PutUint64(encNumber[:], sectionIdx) 253 data, _ := db.Get(append(append(bloomTriePrefix, encNumber[:]...), sectionHead.Bytes()...)) 254 return common.BytesToHash(data) 255 } 256 257 // StoreBloomTrieRoot writes the BloomTrie root assoctiated to the given section into the database 258 func StoreBloomTrieRoot(db ethdb.Database, sectionIdx uint64, sectionHead, root common.Hash) { 259 var encNumber [8]byte 260 binary.BigEndian.PutUint64(encNumber[:], sectionIdx) 261 db.Put(append(append(bloomTriePrefix, encNumber[:]...), sectionHead.Bytes()...), root.Bytes()) 262 } 263 264 // BloomTrieIndexerBackend implements core.ChainIndexerBackend 265 type BloomTrieIndexerBackend struct { 266 diskdb, trieTable ethdb.Database 267 triedb *trie.Database 268 odr OdrBackend 269 section uint64 270 parentSize uint64 271 size uint64 272 bloomTrieRatio uint64 273 trie *trie.Trie 274 sectionHeads []common.Hash 275 } 276 277 // NewBloomTrieIndexer creates a BloomTrie chain indexer 278 func NewBloomTrieIndexer(db ethdb.Database, odr OdrBackend, parentSize, size uint64) *core.ChainIndexer { 279 trieTable := ethdb.NewTable(db, BloomTrieTablePrefix) 280 backend := &BloomTrieIndexerBackend{ 281 diskdb: db, 282 odr: odr, 283 trieTable: trieTable, 284 triedb: trie.NewDatabaseWithCache(trieTable, 1), // Use a tiny cache only to keep memory down 285 parentSize: parentSize, 286 size: size, 287 } 288 backend.bloomTrieRatio = size / parentSize 289 backend.sectionHeads = make([]common.Hash, backend.bloomTrieRatio) 290 return core.NewChainIndexer(db, ethdb.NewTable(db, "bltIndex-"), backend, size, 0, time.Millisecond*100, "bloomtrie") 291 } 292 293 // fetchMissingNodes tries to retrieve the last entries of the latest trusted bloom trie from the 294 // ODR backend in order to be able to add new entries and calculate subsequent root hashes 295 func (b *BloomTrieIndexerBackend) fetchMissingNodes(ctx context.Context, section uint64, root common.Hash) error { 296 indexCh := make(chan uint, types.BloomBitLength) 297 type res struct { 298 nodes *NodeSet 299 err error 300 } 301 resCh := make(chan res, types.BloomBitLength) 302 for i := 0; i < 20; i++ { 303 go func() { 304 for bitIndex := range indexCh { 305 r := &BloomRequest{BloomTrieRoot: root, BloomTrieNum: section - 1, BitIdx: bitIndex, SectionIndexList: []uint64{section - 1}, Config: b.odr.IndexerConfig()} 306 for { 307 if err := b.odr.Retrieve(ctx, r); err == ErrNoPeers { 308 // if there are no peers to serve, retry later 309 select { 310 case <-ctx.Done(): 311 resCh <- res{nil, ctx.Err()} 312 return 313 case <-time.After(time.Second * 10): 314 // stay in the loop and try again 315 } 316 } else { 317 resCh <- res{r.Proofs, err} 318 break 319 } 320 } 321 } 322 }() 323 } 324 325 for i := uint(0); i < types.BloomBitLength; i++ { 326 indexCh <- i 327 } 328 close(indexCh) 329 batch := b.trieTable.NewBatch() 330 for i := uint(0); i < types.BloomBitLength; i++ { 331 res := <-resCh 332 if res.err != nil { 333 return res.err 334 } 335 res.nodes.Store(batch) 336 } 337 return batch.Write() 338 } 339 340 // Reset implements core.ChainIndexerBackend 341 func (b *BloomTrieIndexerBackend) Reset(ctx context.Context, section uint64, lastSectionHead common.Hash) error { 342 var root common.Hash 343 if section > 0 { 344 root = GetBloomTrieRoot(b.diskdb, section-1, lastSectionHead) 345 } 346 var err error 347 b.trie, err = trie.New(root, b.triedb) 348 if err != nil && b.odr != nil { 349 err = b.fetchMissingNodes(ctx, section, root) 350 if err == nil { 351 b.trie, err = trie.New(root, b.triedb) 352 } 353 } 354 b.section = section 355 return err 356 } 357 358 // Process implements core.ChainIndexerBackend 359 func (b *BloomTrieIndexerBackend) Process(ctx context.Context, header *types.Header) error { 360 num := header.Number.Uint64() - b.section*b.size 361 if (num+1)%b.parentSize == 0 { 362 b.sectionHeads[num/b.parentSize] = header.Hash() 363 } 364 return nil 365 } 366 367 // Commit implements core.ChainIndexerBackend 368 func (b *BloomTrieIndexerBackend) Commit() error { 369 var compSize, decompSize uint64 370 371 for i := uint(0); i < types.BloomBitLength; i++ { 372 var encKey [10]byte 373 binary.BigEndian.PutUint16(encKey[0:2], uint16(i)) 374 binary.BigEndian.PutUint64(encKey[2:10], b.section) 375 var decomp []byte 376 for j := uint64(0); j < b.bloomTrieRatio; j++ { 377 data, err := rawdb.ReadBloomBits(b.diskdb, i, b.section*b.bloomTrieRatio+j, b.sectionHeads[j]) 378 if err != nil { 379 return err 380 } 381 decompData, err2 := bitutil.DecompressBytes(data, int(b.parentSize/8)) 382 if err2 != nil { 383 return err2 384 } 385 decomp = append(decomp, decompData...) 386 } 387 comp := bitutil.CompressBytes(decomp) 388 389 decompSize += uint64(len(decomp)) 390 compSize += uint64(len(comp)) 391 if len(comp) > 0 { 392 b.trie.Update(encKey[:], comp) 393 } else { 394 b.trie.Delete(encKey[:]) 395 } 396 } 397 root, err := b.trie.Commit(nil) 398 if err != nil { 399 return err 400 } 401 b.triedb.Commit(root, false) 402 403 sectionHead := b.sectionHeads[b.bloomTrieRatio-1] 404 log.Info("Storing bloom trie", "section", b.section, "head", fmt.Sprintf("%064x", sectionHead), "root", fmt.Sprintf("%064x", root), "compression", float64(compSize)/float64(decompSize)) 405 StoreBloomTrieRoot(b.diskdb, b.section, sectionHead, root) 406 return nil 407 }