github.com/puffscoin/go-puffscoin@v0.0.0-20190701205704-e48ad5c90fa1/light/postprocess.go (about) 1 // Copyright 2017 The go-ethereum Authors 2 // This file is part of the go-puffscoin library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package light 18 19 import ( 20 "context" 21 "encoding/binary" 22 "errors" 23 "fmt" 24 "math/big" 25 "time" 26 27 "github.com/puffscoin/go-puffscoin/common" 28 "github.com/puffscoin/go-puffscoin/common/bitutil" 29 "github.com/puffscoin/go-puffscoin/core" 30 "github.com/puffscoin/go-puffscoin/core/rawdb" 31 "github.com/puffscoin/go-puffscoin/core/types" 32 "github.com/puffscoin/go-puffscoin/ethdb" 33 "github.com/puffscoin/go-puffscoin/log" 34 "github.com/puffscoin/go-puffscoin/params" 35 "github.com/puffscoin/go-puffscoin/rlp" 36 "github.com/puffscoin/go-puffscoin/trie" 37 ) 38 39 // IndexerConfig includes a set of configs for chain indexers. 40 type IndexerConfig struct { 41 // The block frequency for creating CHTs. 42 ChtSize uint64 43 44 // The number of confirmations needed to generate/accept a canonical hash help trie. 45 ChtConfirms uint64 46 47 // The block frequency for creating new bloom bits. 48 BloomSize uint64 49 50 // The number of confirmation needed before a bloom section is considered probably final and its rotated bits 51 // are calculated. 52 BloomConfirms uint64 53 54 // The block frequency for creating BloomTrie. 55 BloomTrieSize uint64 56 57 // The number of confirmations needed to generate/accept a bloom trie. 58 BloomTrieConfirms uint64 59 } 60 61 var ( 62 // DefaultServerIndexerConfig wraps a set of configs as a default indexer config for server side. 63 DefaultServerIndexerConfig = &IndexerConfig{ 64 ChtSize: params.CHTFrequency, 65 ChtConfirms: params.HelperTrieProcessConfirmations, 66 BloomSize: params.BloomBitsBlocks, 67 BloomConfirms: params.BloomConfirms, 68 BloomTrieSize: params.BloomTrieFrequency, 69 BloomTrieConfirms: params.HelperTrieProcessConfirmations, 70 } 71 // DefaultClientIndexerConfig wraps a set of configs as a default indexer config for client side. 72 DefaultClientIndexerConfig = &IndexerConfig{ 73 ChtSize: params.CHTFrequency, 74 ChtConfirms: params.HelperTrieConfirmations, 75 BloomSize: params.BloomBitsBlocksClient, 76 BloomConfirms: params.HelperTrieConfirmations, 77 BloomTrieSize: params.BloomTrieFrequency, 78 BloomTrieConfirms: params.HelperTrieConfirmations, 79 } 80 // TestServerIndexerConfig wraps a set of configs as a test indexer config for server side. 81 TestServerIndexerConfig = &IndexerConfig{ 82 ChtSize: 512, 83 ChtConfirms: 4, 84 BloomSize: 64, 85 BloomConfirms: 4, 86 BloomTrieSize: 512, 87 BloomTrieConfirms: 4, 88 } 89 // TestClientIndexerConfig wraps a set of configs as a test indexer config for client side. 90 TestClientIndexerConfig = &IndexerConfig{ 91 ChtSize: 512, 92 ChtConfirms: 32, 93 BloomSize: 512, 94 BloomConfirms: 32, 95 BloomTrieSize: 512, 96 BloomTrieConfirms: 32, 97 } 98 ) 99 100 var ( 101 ErrNoTrustedCht = errors.New("no trusted canonical hash trie") 102 ErrNoTrustedBloomTrie = errors.New("no trusted bloom trie") 103 ErrNoHeader = errors.New("header not found") 104 chtPrefix = []byte("chtRootV2-") // chtPrefix + chtNum (uint64 big endian) -> trie root hash 105 ChtTablePrefix = "cht-" 106 ) 107 108 // ChtNode structures are stored in the Canonical Hash Trie in an RLP encoded format 109 type ChtNode struct { 110 Hash common.Hash 111 Td *big.Int 112 } 113 114 // GetChtRoot reads the CHT root associated to the given section from the database 115 func GetChtRoot(db ethdb.Database, sectionIdx uint64, sectionHead common.Hash) common.Hash { 116 var encNumber [8]byte 117 binary.BigEndian.PutUint64(encNumber[:], sectionIdx) 118 data, _ := db.Get(append(append(chtPrefix, encNumber[:]...), sectionHead.Bytes()...)) 119 return common.BytesToHash(data) 120 } 121 122 // StoreChtRoot writes the CHT root associated to the given section into the database 123 func StoreChtRoot(db ethdb.Database, sectionIdx uint64, sectionHead, root common.Hash) { 124 var encNumber [8]byte 125 binary.BigEndian.PutUint64(encNumber[:], sectionIdx) 126 db.Put(append(append(chtPrefix, encNumber[:]...), sectionHead.Bytes()...), root.Bytes()) 127 } 128 129 // ChtIndexerBackend implements core.ChainIndexerBackend. 130 type ChtIndexerBackend struct { 131 diskdb, trieTable ethdb.Database 132 odr OdrBackend 133 triedb *trie.Database 134 section, sectionSize uint64 135 lastHash common.Hash 136 trie *trie.Trie 137 } 138 139 // NewChtIndexer creates a Cht chain indexer 140 func NewChtIndexer(db ethdb.Database, odr OdrBackend, size, confirms uint64) *core.ChainIndexer { 141 trieTable := rawdb.NewTable(db, ChtTablePrefix) 142 backend := &ChtIndexerBackend{ 143 diskdb: db, 144 odr: odr, 145 trieTable: trieTable, 146 triedb: trie.NewDatabaseWithCache(trieTable, 1), // Use a tiny cache only to keep memory down 147 sectionSize: size, 148 } 149 return core.NewChainIndexer(db, rawdb.NewTable(db, "chtIndexV2-"), backend, size, confirms, time.Millisecond*100, "cht") 150 } 151 152 // fetchMissingNodes tries to retrieve the last entry of the latest trusted CHT from the 153 // ODR backend in order to be able to add new entries and calculate subsequent root hashes 154 func (c *ChtIndexerBackend) fetchMissingNodes(ctx context.Context, section uint64, root common.Hash) error { 155 batch := c.trieTable.NewBatch() 156 r := &ChtRequest{ChtRoot: root, ChtNum: section - 1, BlockNum: section*c.sectionSize - 1, Config: c.odr.IndexerConfig()} 157 for { 158 err := c.odr.Retrieve(ctx, r) 159 switch err { 160 case nil: 161 r.Proof.Store(batch) 162 return batch.Write() 163 case ErrNoPeers: 164 // if there are no peers to serve, retry later 165 select { 166 case <-ctx.Done(): 167 return ctx.Err() 168 case <-time.After(time.Second * 10): 169 // stay in the loop and try again 170 } 171 default: 172 return err 173 } 174 } 175 } 176 177 // Reset implements core.ChainIndexerBackend 178 func (c *ChtIndexerBackend) Reset(ctx context.Context, section uint64, lastSectionHead common.Hash) error { 179 var root common.Hash 180 if section > 0 { 181 root = GetChtRoot(c.diskdb, section-1, lastSectionHead) 182 } 183 var err error 184 c.trie, err = trie.New(root, c.triedb) 185 186 if err != nil && c.odr != nil { 187 err = c.fetchMissingNodes(ctx, section, root) 188 if err == nil { 189 c.trie, err = trie.New(root, c.triedb) 190 } 191 } 192 193 c.section = section 194 return err 195 } 196 197 // Process implements core.ChainIndexerBackend 198 func (c *ChtIndexerBackend) Process(ctx context.Context, header *types.Header) error { 199 hash, num := header.Hash(), header.Number.Uint64() 200 c.lastHash = hash 201 202 td := rawdb.ReadTd(c.diskdb, hash, num) 203 if td == nil { 204 panic(nil) 205 } 206 var encNumber [8]byte 207 binary.BigEndian.PutUint64(encNumber[:], num) 208 data, _ := rlp.EncodeToBytes(ChtNode{hash, td}) 209 c.trie.Update(encNumber[:], data) 210 return nil 211 } 212 213 // Commit implements core.ChainIndexerBackend 214 func (c *ChtIndexerBackend) Commit() error { 215 root, err := c.trie.Commit(nil) 216 if err != nil { 217 return err 218 } 219 c.triedb.Commit(root, false) 220 221 log.Info("Storing CHT", "section", c.section, "head", fmt.Sprintf("%064x", c.lastHash), "root", fmt.Sprintf("%064x", root)) 222 StoreChtRoot(c.diskdb, c.section, c.lastHash, root) 223 return nil 224 } 225 226 var ( 227 bloomTriePrefix = []byte("bltRoot-") // bloomTriePrefix + bloomTrieNum (uint64 big endian) -> trie root hash 228 BloomTrieTablePrefix = "blt-" 229 ) 230 231 // GetBloomTrieRoot reads the BloomTrie root assoctiated to the given section from the database 232 func GetBloomTrieRoot(db ethdb.Database, sectionIdx uint64, sectionHead common.Hash) common.Hash { 233 var encNumber [8]byte 234 binary.BigEndian.PutUint64(encNumber[:], sectionIdx) 235 data, _ := db.Get(append(append(bloomTriePrefix, encNumber[:]...), sectionHead.Bytes()...)) 236 return common.BytesToHash(data) 237 } 238 239 // StoreBloomTrieRoot writes the BloomTrie root assoctiated to the given section into the database 240 func StoreBloomTrieRoot(db ethdb.Database, sectionIdx uint64, sectionHead, root common.Hash) { 241 var encNumber [8]byte 242 binary.BigEndian.PutUint64(encNumber[:], sectionIdx) 243 db.Put(append(append(bloomTriePrefix, encNumber[:]...), sectionHead.Bytes()...), root.Bytes()) 244 } 245 246 // BloomTrieIndexerBackend implements core.ChainIndexerBackend 247 type BloomTrieIndexerBackend struct { 248 diskdb, trieTable ethdb.Database 249 triedb *trie.Database 250 odr OdrBackend 251 section uint64 252 parentSize uint64 253 size uint64 254 bloomTrieRatio uint64 255 trie *trie.Trie 256 sectionHeads []common.Hash 257 } 258 259 // NewBloomTrieIndexer creates a BloomTrie chain indexer 260 func NewBloomTrieIndexer(db ethdb.Database, odr OdrBackend, parentSize, size uint64) *core.ChainIndexer { 261 trieTable := rawdb.NewTable(db, BloomTrieTablePrefix) 262 backend := &BloomTrieIndexerBackend{ 263 diskdb: db, 264 odr: odr, 265 trieTable: trieTable, 266 triedb: trie.NewDatabaseWithCache(trieTable, 1), // Use a tiny cache only to keep memory down 267 parentSize: parentSize, 268 size: size, 269 } 270 backend.bloomTrieRatio = size / parentSize 271 backend.sectionHeads = make([]common.Hash, backend.bloomTrieRatio) 272 return core.NewChainIndexer(db, rawdb.NewTable(db, "bltIndex-"), backend, size, 0, time.Millisecond*100, "bloomtrie") 273 } 274 275 // fetchMissingNodes tries to retrieve the last entries of the latest trusted bloom trie from the 276 // ODR backend in order to be able to add new entries and calculate subsequent root hashes 277 func (b *BloomTrieIndexerBackend) fetchMissingNodes(ctx context.Context, section uint64, root common.Hash) error { 278 indexCh := make(chan uint, types.BloomBitLength) 279 type res struct { 280 nodes *NodeSet 281 err error 282 } 283 resCh := make(chan res, types.BloomBitLength) 284 for i := 0; i < 20; i++ { 285 go func() { 286 for bitIndex := range indexCh { 287 r := &BloomRequest{BloomTrieRoot: root, BloomTrieNum: section - 1, BitIdx: bitIndex, SectionIndexList: []uint64{section - 1}, Config: b.odr.IndexerConfig()} 288 for { 289 if err := b.odr.Retrieve(ctx, r); err == ErrNoPeers { 290 // if there are no peers to serve, retry later 291 select { 292 case <-ctx.Done(): 293 resCh <- res{nil, ctx.Err()} 294 return 295 case <-time.After(time.Second * 10): 296 // stay in the loop and try again 297 } 298 } else { 299 resCh <- res{r.Proofs, err} 300 break 301 } 302 } 303 } 304 }() 305 } 306 307 for i := uint(0); i < types.BloomBitLength; i++ { 308 indexCh <- i 309 } 310 close(indexCh) 311 batch := b.trieTable.NewBatch() 312 for i := uint(0); i < types.BloomBitLength; i++ { 313 res := <-resCh 314 if res.err != nil { 315 return res.err 316 } 317 res.nodes.Store(batch) 318 } 319 return batch.Write() 320 } 321 322 // Reset implements core.ChainIndexerBackend 323 func (b *BloomTrieIndexerBackend) Reset(ctx context.Context, section uint64, lastSectionHead common.Hash) error { 324 var root common.Hash 325 if section > 0 { 326 root = GetBloomTrieRoot(b.diskdb, section-1, lastSectionHead) 327 } 328 var err error 329 b.trie, err = trie.New(root, b.triedb) 330 if err != nil && b.odr != nil { 331 err = b.fetchMissingNodes(ctx, section, root) 332 if err == nil { 333 b.trie, err = trie.New(root, b.triedb) 334 } 335 } 336 b.section = section 337 return err 338 } 339 340 // Process implements core.ChainIndexerBackend 341 func (b *BloomTrieIndexerBackend) Process(ctx context.Context, header *types.Header) error { 342 num := header.Number.Uint64() - b.section*b.size 343 if (num+1)%b.parentSize == 0 { 344 b.sectionHeads[num/b.parentSize] = header.Hash() 345 } 346 return nil 347 } 348 349 // Commit implements core.ChainIndexerBackend 350 func (b *BloomTrieIndexerBackend) Commit() error { 351 var compSize, decompSize uint64 352 353 for i := uint(0); i < types.BloomBitLength; i++ { 354 var encKey [10]byte 355 binary.BigEndian.PutUint16(encKey[0:2], uint16(i)) 356 binary.BigEndian.PutUint64(encKey[2:10], b.section) 357 var decomp []byte 358 for j := uint64(0); j < b.bloomTrieRatio; j++ { 359 data, err := rawdb.ReadBloomBits(b.diskdb, i, b.section*b.bloomTrieRatio+j, b.sectionHeads[j]) 360 if err != nil { 361 return err 362 } 363 decompData, err2 := bitutil.DecompressBytes(data, int(b.parentSize/8)) 364 if err2 != nil { 365 return err2 366 } 367 decomp = append(decomp, decompData...) 368 } 369 comp := bitutil.CompressBytes(decomp) 370 371 decompSize += uint64(len(decomp)) 372 compSize += uint64(len(comp)) 373 if len(comp) > 0 { 374 b.trie.Update(encKey[:], comp) 375 } else { 376 b.trie.Delete(encKey[:]) 377 } 378 } 379 root, err := b.trie.Commit(nil) 380 if err != nil { 381 return err 382 } 383 b.triedb.Commit(root, false) 384 385 sectionHead := b.sectionHeads[b.bloomTrieRatio-1] 386 log.Info("Storing bloom trie", "section", b.section, "head", fmt.Sprintf("%064x", sectionHead), "root", fmt.Sprintf("%064x", root), "compression", float64(compSize)/float64(decompSize)) 387 StoreBloomTrieRoot(b.diskdb, b.section, sectionHead, root) 388 return nil 389 }