github.com/shrimpyuk/bor@v0.2.15-0.20220224151350-fb4ec6020bae/eth/filters/bench_test.go (about) 1 // Copyright 2017 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package filters 18 19 import ( 20 "context" 21 "fmt" 22 "testing" 23 "time" 24 25 "github.com/ethereum/go-ethereum/common" 26 "github.com/ethereum/go-ethereum/common/bitutil" 27 "github.com/ethereum/go-ethereum/core/bloombits" 28 "github.com/ethereum/go-ethereum/core/rawdb" 29 "github.com/ethereum/go-ethereum/core/types" 30 "github.com/ethereum/go-ethereum/ethdb" 31 "github.com/ethereum/go-ethereum/node" 32 ) 33 34 func BenchmarkBloomBits512(b *testing.B) { 35 benchmarkBloomBits(b, 512) 36 } 37 38 func BenchmarkBloomBits1k(b *testing.B) { 39 benchmarkBloomBits(b, 1024) 40 } 41 42 func BenchmarkBloomBits2k(b *testing.B) { 43 benchmarkBloomBits(b, 2048) 44 } 45 46 func BenchmarkBloomBits4k(b *testing.B) { 47 benchmarkBloomBits(b, 4096) 48 } 49 50 func BenchmarkBloomBits8k(b *testing.B) { 51 benchmarkBloomBits(b, 8192) 52 } 53 54 func BenchmarkBloomBits16k(b *testing.B) { 55 benchmarkBloomBits(b, 16384) 56 } 57 58 func BenchmarkBloomBits32k(b *testing.B) { 59 benchmarkBloomBits(b, 32768) 60 } 61 62 const benchFilterCnt = 2000 63 64 func benchmarkBloomBits(b *testing.B, sectionSize uint64) { 65 benchDataDir := node.DefaultDataDir() + "/geth/chaindata" 66 b.Log("Running bloombits benchmark section size:", sectionSize) 67 68 db, err := rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "", false) 69 if err != nil { 70 b.Fatalf("error opening database at %v: %v", benchDataDir, err) 71 } 72 head := rawdb.ReadHeadBlockHash(db) 73 if head == (common.Hash{}) { 74 b.Fatalf("chain data not found at %v", benchDataDir) 75 } 76 77 clearBloomBits(db) 78 b.Log("Generating bloombits data...") 79 headNum := rawdb.ReadHeaderNumber(db, head) 80 if headNum == nil || *headNum < sectionSize+512 { 81 b.Fatalf("not enough blocks for running a benchmark") 82 } 83 84 start := time.Now() 85 cnt := (*headNum - 512) / sectionSize 86 var dataSize, compSize uint64 87 for sectionIdx := uint64(0); sectionIdx < cnt; sectionIdx++ { 88 bc, err := bloombits.NewGenerator(uint(sectionSize)) 89 if err != nil { 90 b.Fatalf("failed to create generator: %v", err) 91 } 92 var header *types.Header 93 for i := sectionIdx * sectionSize; i < (sectionIdx+1)*sectionSize; i++ { 94 hash := rawdb.ReadCanonicalHash(db, i) 95 header = rawdb.ReadHeader(db, hash, i) 96 if header == nil { 97 b.Fatalf("Error creating bloomBits data") 98 } 99 bc.AddBloom(uint(i-sectionIdx*sectionSize), header.Bloom) 100 } 101 sectionHead := rawdb.ReadCanonicalHash(db, (sectionIdx+1)*sectionSize-1) 102 for i := 0; i < types.BloomBitLength; i++ { 103 data, err := bc.Bitset(uint(i)) 104 if err != nil { 105 b.Fatalf("failed to retrieve bitset: %v", err) 106 } 107 comp := bitutil.CompressBytes(data) 108 dataSize += uint64(len(data)) 109 compSize += uint64(len(comp)) 110 rawdb.WriteBloomBits(db, uint(i), sectionIdx, sectionHead, comp) 111 } 112 //if sectionIdx%50 == 0 { 113 // b.Log(" section", sectionIdx, "/", cnt) 114 //} 115 } 116 117 d := time.Since(start) 118 b.Log("Finished generating bloombits data") 119 b.Log(" ", d, "total ", d/time.Duration(cnt*sectionSize), "per block") 120 b.Log(" data size:", dataSize, " compressed size:", compSize, " compression ratio:", float64(compSize)/float64(dataSize)) 121 122 b.Log("Running filter benchmarks...") 123 start = time.Now() 124 var backend *testBackend 125 126 for i := 0; i < benchFilterCnt; i++ { 127 if i%20 == 0 { 128 db.Close() 129 db, _ = rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "", false) 130 backend = &testBackend{db: db, sections: cnt} 131 } 132 var addr common.Address 133 addr[0] = byte(i) 134 addr[1] = byte(i / 256) 135 filter := NewRangeFilter(backend, 0, int64(cnt*sectionSize-1), []common.Address{addr}, nil) 136 if _, err := filter.Logs(context.Background()); err != nil { 137 b.Error("filter.Find error:", err) 138 } 139 } 140 d = time.Since(start) 141 b.Log("Finished running filter benchmarks") 142 b.Log(" ", d, "total ", d/time.Duration(benchFilterCnt), "per address", d*time.Duration(1000000)/time.Duration(benchFilterCnt*cnt*sectionSize), "per million blocks") 143 db.Close() 144 } 145 146 var bloomBitsPrefix = []byte("bloomBits-") 147 148 func clearBloomBits(db ethdb.Database) { 149 fmt.Println("Clearing bloombits data...") 150 it := db.NewIterator(bloomBitsPrefix, nil) 151 for it.Next() { 152 db.Delete(it.Key()) 153 } 154 it.Release() 155 } 156 157 func BenchmarkNoBloomBits(b *testing.B) { 158 benchDataDir := node.DefaultDataDir() + "/geth/chaindata" 159 b.Log("Running benchmark without bloombits") 160 db, err := rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "", false) 161 if err != nil { 162 b.Fatalf("error opening database at %v: %v", benchDataDir, err) 163 } 164 head := rawdb.ReadHeadBlockHash(db) 165 if head == (common.Hash{}) { 166 b.Fatalf("chain data not found at %v", benchDataDir) 167 } 168 headNum := rawdb.ReadHeaderNumber(db, head) 169 170 clearBloomBits(db) 171 172 b.Log("Running filter benchmarks...") 173 start := time.Now() 174 backend := &testBackend{db: db} 175 filter := NewRangeFilter(backend, 0, int64(*headNum), []common.Address{{}}, nil) 176 filter.Logs(context.Background()) 177 d := time.Since(start) 178 b.Log("Finished running filter benchmarks") 179 b.Log(" ", d, "total ", d*time.Duration(1000000)/time.Duration(*headNum+1), "per million blocks") 180 db.Close() 181 }