github.com/bcnmy/go-ethereum@v1.10.27/eth/filters/bench_test.go (about) 1 // Copyright 2017 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package filters 18 19 import ( 20 "context" 21 "fmt" 22 "testing" 23 "time" 24 25 "github.com/ethereum/go-ethereum/common" 26 "github.com/ethereum/go-ethereum/common/bitutil" 27 "github.com/ethereum/go-ethereum/core/bloombits" 28 "github.com/ethereum/go-ethereum/core/rawdb" 29 "github.com/ethereum/go-ethereum/core/types" 30 "github.com/ethereum/go-ethereum/ethdb" 31 "github.com/ethereum/go-ethereum/node" 32 ) 33 34 func BenchmarkBloomBits512(b *testing.B) { 35 benchmarkBloomBits(b, 512) 36 } 37 38 func BenchmarkBloomBits1k(b *testing.B) { 39 benchmarkBloomBits(b, 1024) 40 } 41 42 func BenchmarkBloomBits2k(b *testing.B) { 43 benchmarkBloomBits(b, 2048) 44 } 45 46 func BenchmarkBloomBits4k(b *testing.B) { 47 benchmarkBloomBits(b, 4096) 48 } 49 50 func BenchmarkBloomBits8k(b *testing.B) { 51 benchmarkBloomBits(b, 8192) 52 } 53 54 func BenchmarkBloomBits16k(b *testing.B) { 55 benchmarkBloomBits(b, 16384) 56 } 57 58 func BenchmarkBloomBits32k(b *testing.B) { 59 benchmarkBloomBits(b, 32768) 60 } 61 62 const benchFilterCnt = 2000 63 64 func benchmarkBloomBits(b *testing.B, sectionSize uint64) { 65 b.Skip("test disabled: this tests presume (and modify) an existing datadir.") 66 benchDataDir := node.DefaultDataDir() + "/geth/chaindata" 67 b.Log("Running bloombits benchmark section size:", sectionSize) 68 69 db, err := rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "", false) 70 if err != nil { 71 b.Fatalf("error opening database at %v: %v", benchDataDir, err) 72 } 73 head := rawdb.ReadHeadBlockHash(db) 74 if head == (common.Hash{}) { 75 b.Fatalf("chain data not found at %v", benchDataDir) 76 } 77 78 clearBloomBits(db) 79 b.Log("Generating bloombits data...") 80 headNum := rawdb.ReadHeaderNumber(db, head) 81 if headNum == nil || *headNum < sectionSize+512 { 82 b.Fatalf("not enough blocks for running a benchmark") 83 } 84 85 start := time.Now() 86 cnt := (*headNum - 512) / sectionSize 87 var dataSize, compSize uint64 88 for sectionIdx := uint64(0); sectionIdx < cnt; sectionIdx++ { 89 bc, err := bloombits.NewGenerator(uint(sectionSize)) 90 if err != nil { 91 b.Fatalf("failed to create generator: %v", err) 92 } 93 var header *types.Header 94 for i := sectionIdx * sectionSize; i < (sectionIdx+1)*sectionSize; i++ { 95 hash := rawdb.ReadCanonicalHash(db, i) 96 if header = rawdb.ReadHeader(db, hash, i); header == nil { 97 b.Fatalf("Error creating bloomBits data") 98 return 99 } 100 bc.AddBloom(uint(i-sectionIdx*sectionSize), header.Bloom) 101 } 102 sectionHead := rawdb.ReadCanonicalHash(db, (sectionIdx+1)*sectionSize-1) 103 for i := 0; i < types.BloomBitLength; i++ { 104 data, err := bc.Bitset(uint(i)) 105 if err != nil { 106 b.Fatalf("failed to retrieve bitset: %v", err) 107 } 108 comp := bitutil.CompressBytes(data) 109 dataSize += uint64(len(data)) 110 compSize += uint64(len(comp)) 111 rawdb.WriteBloomBits(db, uint(i), sectionIdx, sectionHead, comp) 112 } 113 //if sectionIdx%50 == 0 { 114 // b.Log(" section", sectionIdx, "/", cnt) 115 //} 116 } 117 118 d := time.Since(start) 119 b.Log("Finished generating bloombits data") 120 b.Log(" ", d, "total ", d/time.Duration(cnt*sectionSize), "per block") 121 b.Log(" data size:", dataSize, " compressed size:", compSize, " compression ratio:", float64(compSize)/float64(dataSize)) 122 123 b.Log("Running filter benchmarks...") 124 start = time.Now() 125 126 var ( 127 backend *testBackend 128 sys *FilterSystem 129 ) 130 for i := 0; i < benchFilterCnt; i++ { 131 if i%20 == 0 { 132 db.Close() 133 db, _ = rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "", false) 134 backend = &testBackend{db: db, sections: cnt} 135 sys = NewFilterSystem(backend, Config{}) 136 } 137 var addr common.Address 138 addr[0] = byte(i) 139 addr[1] = byte(i / 256) 140 filter := sys.NewRangeFilter(0, int64(cnt*sectionSize-1), []common.Address{addr}, nil) 141 if _, err := filter.Logs(context.Background()); err != nil { 142 b.Error("filter.Logs error:", err) 143 } 144 } 145 146 d = time.Since(start) 147 b.Log("Finished running filter benchmarks") 148 b.Log(" ", d, "total ", d/time.Duration(benchFilterCnt), "per address", d*time.Duration(1000000)/time.Duration(benchFilterCnt*cnt*sectionSize), "per million blocks") 149 db.Close() 150 } 151 152 //nolint:unused 153 func clearBloomBits(db ethdb.Database) { 154 var bloomBitsPrefix = []byte("bloomBits-") 155 fmt.Println("Clearing bloombits data...") 156 it := db.NewIterator(bloomBitsPrefix, nil) 157 for it.Next() { 158 db.Delete(it.Key()) 159 } 160 it.Release() 161 } 162 163 func BenchmarkNoBloomBits(b *testing.B) { 164 b.Skip("test disabled: this tests presume (and modify) an existing datadir.") 165 benchDataDir := node.DefaultDataDir() + "/geth/chaindata" 166 b.Log("Running benchmark without bloombits") 167 db, err := rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "", false) 168 if err != nil { 169 b.Fatalf("error opening database at %v: %v", benchDataDir, err) 170 } 171 head := rawdb.ReadHeadBlockHash(db) 172 if head == (common.Hash{}) { 173 b.Fatalf("chain data not found at %v", benchDataDir) 174 } 175 headNum := rawdb.ReadHeaderNumber(db, head) 176 177 clearBloomBits(db) 178 179 _, sys := newTestFilterSystem(b, db, Config{}) 180 181 b.Log("Running filter benchmarks...") 182 start := time.Now() 183 filter := sys.NewRangeFilter(0, int64(*headNum), []common.Address{{}}, nil) 184 filter.Logs(context.Background()) 185 d := time.Since(start) 186 b.Log("Finished running filter benchmarks") 187 b.Log(" ", d, "total ", d*time.Duration(1000000)/time.Duration(*headNum+1), "per million blocks") 188 db.Close() 189 }