github.com/sberex/go-sberex@v1.8.2-0.20181113200658-ed96ac38f7d7/eth/filters/bench_test.go (about) 1 // This file is part of the go-sberex library. The go-sberex library is 2 // free software: you can redistribute it and/or modify it under the terms 3 // of the GNU Lesser General Public License as published by the Free 4 // Software Foundation, either version 3 of the License, or (at your option) 5 // any later version. 6 // 7 // The go-sberex library is distributed in the hope that it will be useful, 8 // but WITHOUT ANY WARRANTY; without even the implied warranty of 9 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser 10 // General Public License <http://www.gnu.org/licenses/> for more details. 11 12 package filters 13 14 import ( 15 "bytes" 16 "context" 17 "fmt" 18 "testing" 19 "time" 20 21 "github.com/Sberex/go-sberex/common" 22 "github.com/Sberex/go-sberex/common/bitutil" 23 "github.com/Sberex/go-sberex/core" 24 "github.com/Sberex/go-sberex/core/bloombits" 25 "github.com/Sberex/go-sberex/core/types" 26 "github.com/Sberex/go-sberex/ethdb" 27 "github.com/Sberex/go-sberex/event" 28 "github.com/Sberex/go-sberex/node" 29 ) 30 31 func BenchmarkBloomBits512(b *testing.B) { 32 benchmarkBloomBits(b, 512) 33 } 34 35 func BenchmarkBloomBits1k(b *testing.B) { 36 benchmarkBloomBits(b, 1024) 37 } 38 39 func BenchmarkBloomBits2k(b *testing.B) { 40 benchmarkBloomBits(b, 2048) 41 } 42 43 func BenchmarkBloomBits4k(b *testing.B) { 44 benchmarkBloomBits(b, 4096) 45 } 46 47 func BenchmarkBloomBits8k(b *testing.B) { 48 benchmarkBloomBits(b, 8192) 49 } 50 51 func BenchmarkBloomBits16k(b *testing.B) { 52 benchmarkBloomBits(b, 16384) 53 } 54 55 func BenchmarkBloomBits32k(b *testing.B) { 56 benchmarkBloomBits(b, 32768) 57 } 58 59 const benchFilterCnt = 2000 60 61 func benchmarkBloomBits(b *testing.B, sectionSize uint64) { 62 benchDataDir := node.DefaultDataDir() + "/geth/chaindata" 63 fmt.Println("Running bloombits benchmark section size:", sectionSize) 64 65 db, err := ethdb.NewLDBDatabase(benchDataDir, 128, 1024) 66 if err != nil { 67 b.Fatalf("error opening database at %v: %v", benchDataDir, err) 68 } 69 head := core.GetHeadBlockHash(db) 70 if head == (common.Hash{}) { 71 b.Fatalf("chain data not found at %v", benchDataDir) 72 } 73 74 clearBloomBits(db) 75 fmt.Println("Generating bloombits data...") 76 headNum := core.GetBlockNumber(db, head) 77 if headNum < sectionSize+512 { 78 b.Fatalf("not enough blocks for running a benchmark") 79 } 80 81 start := time.Now() 82 cnt := (headNum - 512) / sectionSize 83 var dataSize, compSize uint64 84 for sectionIdx := uint64(0); sectionIdx < cnt; sectionIdx++ { 85 bc, err := bloombits.NewGenerator(uint(sectionSize)) 86 if err != nil { 87 b.Fatalf("failed to create generator: %v", err) 88 } 89 var header *types.Header 90 for i := sectionIdx * sectionSize; i < (sectionIdx+1)*sectionSize; i++ { 91 hash := core.GetCanonicalHash(db, i) 92 header = core.GetHeader(db, hash, i) 93 if header == nil { 94 b.Fatalf("Error creating bloomBits data") 95 } 96 bc.AddBloom(uint(i-sectionIdx*sectionSize), header.Bloom) 97 } 98 sectionHead := core.GetCanonicalHash(db, (sectionIdx+1)*sectionSize-1) 99 for i := 0; i < types.BloomBitLength; i++ { 100 data, err := bc.Bitset(uint(i)) 101 if err != nil { 102 b.Fatalf("failed to retrieve bitset: %v", err) 103 } 104 comp := bitutil.CompressBytes(data) 105 dataSize += uint64(len(data)) 106 compSize += uint64(len(comp)) 107 core.WriteBloomBits(db, uint(i), sectionIdx, sectionHead, comp) 108 } 109 //if sectionIdx%50 == 0 { 110 // fmt.Println(" section", sectionIdx, "/", cnt) 111 //} 112 } 113 114 d := time.Since(start) 115 fmt.Println("Finished generating bloombits data") 116 fmt.Println(" ", d, "total ", d/time.Duration(cnt*sectionSize), "per block") 117 fmt.Println(" data size:", dataSize, " compressed size:", compSize, " compression ratio:", float64(compSize)/float64(dataSize)) 118 119 fmt.Println("Running filter benchmarks...") 120 start = time.Now() 121 mux := new(event.TypeMux) 122 var backend *testBackend 123 124 for i := 0; i < benchFilterCnt; i++ { 125 if i%20 == 0 { 126 db.Close() 127 db, _ = ethdb.NewLDBDatabase(benchDataDir, 128, 1024) 128 backend = &testBackend{mux, db, cnt, new(event.Feed), new(event.Feed), new(event.Feed), new(event.Feed)} 129 } 130 var addr common.Address 131 addr[0] = byte(i) 132 addr[1] = byte(i / 256) 133 filter := New(backend, 0, int64(cnt*sectionSize-1), []common.Address{addr}, nil) 134 if _, err := filter.Logs(context.Background()); err != nil { 135 b.Error("filter.Find error:", err) 136 } 137 } 138 d = time.Since(start) 139 fmt.Println("Finished running filter benchmarks") 140 fmt.Println(" ", d, "total ", d/time.Duration(benchFilterCnt), "per address", d*time.Duration(1000000)/time.Duration(benchFilterCnt*cnt*sectionSize), "per million blocks") 141 db.Close() 142 } 143 144 func forEachKey(db ethdb.Database, startPrefix, endPrefix []byte, fn func(key []byte)) { 145 it := db.(*ethdb.LDBDatabase).NewIterator() 146 it.Seek(startPrefix) 147 for it.Valid() { 148 key := it.Key() 149 cmpLen := len(key) 150 if len(endPrefix) < cmpLen { 151 cmpLen = len(endPrefix) 152 } 153 if bytes.Compare(key[:cmpLen], endPrefix) == 1 { 154 break 155 } 156 fn(common.CopyBytes(key)) 157 it.Next() 158 } 159 it.Release() 160 } 161 162 var bloomBitsPrefix = []byte("bloomBits-") 163 164 func clearBloomBits(db ethdb.Database) { 165 fmt.Println("Clearing bloombits data...") 166 forEachKey(db, bloomBitsPrefix, bloomBitsPrefix, func(key []byte) { 167 db.Delete(key) 168 }) 169 } 170 171 func BenchmarkNoBloomBits(b *testing.B) { 172 benchDataDir := node.DefaultDataDir() + "/geth/chaindata" 173 fmt.Println("Running benchmark without bloombits") 174 db, err := ethdb.NewLDBDatabase(benchDataDir, 128, 1024) 175 if err != nil { 176 b.Fatalf("error opening database at %v: %v", benchDataDir, err) 177 } 178 head := core.GetHeadBlockHash(db) 179 if head == (common.Hash{}) { 180 b.Fatalf("chain data not found at %v", benchDataDir) 181 } 182 headNum := core.GetBlockNumber(db, head) 183 184 clearBloomBits(db) 185 186 fmt.Println("Running filter benchmarks...") 187 start := time.Now() 188 mux := new(event.TypeMux) 189 backend := &testBackend{mux, db, 0, new(event.Feed), new(event.Feed), new(event.Feed), new(event.Feed)} 190 filter := New(backend, 0, int64(headNum), []common.Address{{}}, nil) 191 filter.Logs(context.Background()) 192 d := time.Since(start) 193 fmt.Println("Finished running filter benchmarks") 194 fmt.Println(" ", d, "total ", d*time.Duration(1000000)/time.Duration(headNum+1), "per million blocks") 195 db.Close() 196 }