github.com/onflow/flow-go@v0.35.7-crescendo-preview.23-atree-inlining/ledger/complete/ledger_benchmark_test.go (about) 1 package complete_test 2 3 import ( 4 "math" 5 "testing" 6 "time" 7 8 "github.com/rs/zerolog" 9 "github.com/stretchr/testify/require" 10 "go.uber.org/atomic" 11 12 "github.com/onflow/flow-go/ledger" 13 "github.com/onflow/flow-go/ledger/common/pathfinder" 14 "github.com/onflow/flow-go/ledger/common/testutils" 15 "github.com/onflow/flow-go/ledger/complete" 16 "github.com/onflow/flow-go/ledger/complete/wal" 17 "github.com/onflow/flow-go/ledger/partial/ptrie" 18 "github.com/onflow/flow-go/module/metrics" 19 ) 20 21 // GENERAL COMMENT: 22 // running this test with 23 // 24 // go test -bench=. -benchmem 25 // 26 // will track the heap allocations for the Benchmarks 27 func BenchmarkStorage(b *testing.B) { benchmarkStorage(100, b) } 28 29 // BenchmarkStorage benchmarks the performance of the storage layer 30 func benchmarkStorage(steps int, b *testing.B) { 31 // assumption: 1000 key updates per collection 32 const ( 33 numInsPerStep = 1000 34 keyNumberOfParts = 10 35 keyPartMinByteSize = 1 36 keyPartMaxByteSize = 100 37 valueMaxByteSize = 32 38 checkpointDistance = math.MaxInt // A large number to prevent checkpoint creation. 39 checkpointsToKeep = 1 40 ) 41 42 dir := b.TempDir() 43 44 diskWal, err := wal.NewDiskWAL(zerolog.Nop(), nil, metrics.NewNoopCollector(), dir, steps+1, pathfinder.PathByteSize, wal.SegmentSize) 45 require.NoError(b, err) 46 47 led, err := complete.NewLedger(diskWal, steps+1, &metrics.NoopCollector{}, zerolog.Logger{}, complete.DefaultPathFinderVersion) 48 require.NoError(b, err) 49 50 compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), uint(steps+1), checkpointDistance, checkpointsToKeep, atomic.NewBool(false), metrics.NewNoopCollector()) 51 require.NoError(b, err) 52 53 <-compactor.Ready() 54 55 defer func() { 56 <-led.Done() 57 <-compactor.Done() 58 }() 59 60 totalUpdateTimeMS := 0 61 totalReadTimeMS := 0 62 totalProofTimeMS := 0 63 totalRegOperation := 0 64 totalProofSize := 0 65 totalPTrieConstTimeMS := 0 66 67 state := led.InitialState() 68 for i := 0; i < steps; i++ { 69 70 keys := testutils.RandomUniqueKeys(numInsPerStep, keyNumberOfParts, keyPartMinByteSize, keyPartMaxByteSize) 71 values := testutils.RandomValues(numInsPerStep, 1, valueMaxByteSize) 72 73 totalRegOperation += len(keys) 74 75 start := time.Now() 76 update, err := ledger.NewUpdate(state, keys, values) 77 if err != nil { 78 b.Fatal(err) 79 } 80 81 newState, _, err := led.Set(update) 82 if err != nil { 83 b.Fatal(err) 84 } 85 86 elapsed := time.Since(start) 87 totalUpdateTimeMS += int(elapsed / time.Millisecond) 88 89 // read values and compare values 90 start = time.Now() 91 query, err := ledger.NewQuery(newState, keys) 92 if err != nil { 93 b.Fatal(err) 94 } 95 _, err = led.Get(query) 96 if err != nil { 97 b.Fatal(err) 98 } 99 elapsed = time.Since(start) 100 totalReadTimeMS += int(elapsed / time.Millisecond) 101 102 start = time.Now() 103 // validate proofs (check individual proof and batch proof) 104 proof, err := led.Prove(query) 105 if err != nil { 106 b.Fatal(err) 107 } 108 elapsed = time.Since(start) 109 totalProofTimeMS += int(elapsed / time.Millisecond) 110 111 totalProofSize += len(proof) 112 113 start = time.Now() 114 p, _ := ledger.DecodeTrieBatchProof(proof) 115 116 // construct a partial trie using proofs 117 _, err = ptrie.NewPSMT(ledger.RootHash(newState), p) 118 if err != nil { 119 b.Fatal("failed to create PSMT") 120 } 121 elapsed = time.Since(start) 122 totalPTrieConstTimeMS += int(elapsed / time.Millisecond) 123 124 state = newState 125 } 126 127 b.ReportMetric(float64(totalUpdateTimeMS/steps), "update_time_(ms)") 128 b.ReportMetric(float64(totalUpdateTimeMS*1000000/totalRegOperation), "update_time_per_reg_(ns)") 129 130 b.ReportMetric(float64(totalReadTimeMS/steps), "read_time_(ms)") 131 b.ReportMetric(float64(totalReadTimeMS*1000000/totalRegOperation), "read_time_per_reg_(ns)") 132 133 b.ReportMetric(float64(totalProofTimeMS/steps), "read_w_proof_time_(ms)") 134 b.ReportMetric(float64(totalProofTimeMS*1000000/totalRegOperation), "read_w_proof_time_per_reg_(ns)") 135 136 b.ReportMetric(float64(totalProofSize/steps), "proof_size_(MB)") 137 b.ReportMetric(float64(totalPTrieConstTimeMS/steps), "ptrie_const_time_(ms)") 138 139 } 140 141 // BenchmarkTrieUpdate benchmarks the performance of a trie update 142 func BenchmarkTrieUpdate(b *testing.B) { 143 // key updates per iteration 144 const ( 145 numInsPerStep = 10000 146 keyNumberOfParts = 3 147 keyPartMinByteSize = 1 148 keyPartMaxByteSize = 100 149 valueMaxByteSize = 32 150 capacity = 101 151 checkpointDistance = math.MaxInt // A large number to prevent checkpoint creation. 152 checkpointsToKeep = 1 153 ) 154 155 dir := b.TempDir() 156 157 diskWal, err := wal.NewDiskWAL(zerolog.Nop(), nil, metrics.NewNoopCollector(), dir, capacity, pathfinder.PathByteSize, wal.SegmentSize) 158 require.NoError(b, err) 159 160 led, err := complete.NewLedger(diskWal, capacity, &metrics.NoopCollector{}, zerolog.Logger{}, complete.DefaultPathFinderVersion) 161 require.NoError(b, err) 162 163 compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false), metrics.NewNoopCollector()) 164 require.NoError(b, err) 165 166 <-compactor.Ready() 167 168 defer func() { 169 <-led.Done() 170 <-compactor.Done() 171 }() 172 173 state := led.InitialState() 174 175 keys := testutils.RandomUniqueKeys(numInsPerStep, keyNumberOfParts, keyPartMinByteSize, keyPartMaxByteSize) 176 values := testutils.RandomValues(numInsPerStep, 1, valueMaxByteSize) 177 178 update, err := ledger.NewUpdate(state, keys, values) 179 if err != nil { 180 b.Fatal(err) 181 } 182 183 b.ResetTimer() 184 for i := 0; i < b.N; i++ { 185 _, _, err := led.Set(update) 186 if err != nil { 187 b.Fatal(err) 188 } 189 } 190 b.StopTimer() 191 } 192 193 // BenchmarkTrieUpdate benchmarks the performance of a trie read 194 func BenchmarkTrieRead(b *testing.B) { 195 // key updates per iteration 196 const ( 197 numInsPerStep = 10000 198 keyNumberOfParts = 10 199 keyPartMinByteSize = 1 200 keyPartMaxByteSize = 100 201 valueMaxByteSize = 32 202 capacity = 101 203 checkpointDistance = math.MaxInt // A large number to prevent checkpoint creation. 204 checkpointsToKeep = 1 205 ) 206 207 dir := b.TempDir() 208 209 diskWal, err := wal.NewDiskWAL(zerolog.Nop(), nil, metrics.NewNoopCollector(), dir, capacity, pathfinder.PathByteSize, wal.SegmentSize) 210 require.NoError(b, err) 211 212 led, err := complete.NewLedger(diskWal, capacity, &metrics.NoopCollector{}, zerolog.Logger{}, complete.DefaultPathFinderVersion) 213 require.NoError(b, err) 214 215 compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false), metrics.NewNoopCollector()) 216 require.NoError(b, err) 217 218 <-compactor.Ready() 219 220 defer func() { 221 <-led.Done() 222 <-compactor.Done() 223 }() 224 225 state := led.InitialState() 226 227 keys := testutils.RandomUniqueKeys(numInsPerStep, keyNumberOfParts, keyPartMinByteSize, keyPartMaxByteSize) 228 values := testutils.RandomValues(numInsPerStep, 1, valueMaxByteSize) 229 230 update, err := ledger.NewUpdate(state, keys, values) 231 if err != nil { 232 b.Fatal(err) 233 } 234 235 newState, _, err := led.Set(update) 236 if err != nil { 237 b.Fatal(err) 238 } 239 240 query, err := ledger.NewQuery(newState, keys) 241 if err != nil { 242 b.Fatal(err) 243 } 244 245 b.ResetTimer() 246 for i := 0; i < b.N; i++ { 247 _, err = led.Get(query) 248 if err != nil { 249 b.Fatal(err) 250 } 251 } 252 b.StopTimer() 253 } 254 255 func BenchmarkLedgerGetOneValue(b *testing.B) { 256 // key updates per iteration 257 const ( 258 numInsPerStep = 10000 259 keyNumberOfParts = 10 260 keyPartMinByteSize = 1 261 keyPartMaxByteSize = 100 262 valueMaxByteSize = 32 263 capacity = 101 264 checkpointDistance = math.MaxInt // A large number to prevent checkpoint creation. 265 checkpointsToKeep = 1 266 ) 267 268 dir := b.TempDir() 269 270 diskWal, err := wal.NewDiskWAL(zerolog.Nop(), nil, metrics.NewNoopCollector(), dir, capacity, pathfinder.PathByteSize, wal.SegmentSize) 271 require.NoError(b, err) 272 273 led, err := complete.NewLedger(diskWal, capacity, &metrics.NoopCollector{}, zerolog.Logger{}, complete.DefaultPathFinderVersion) 274 require.NoError(b, err) 275 276 compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false), metrics.NewNoopCollector()) 277 require.NoError(b, err) 278 279 <-compactor.Ready() 280 281 defer func() { 282 <-led.Done() 283 <-compactor.Done() 284 }() 285 286 state := led.InitialState() 287 288 keys := testutils.RandomUniqueKeys(numInsPerStep, keyNumberOfParts, keyPartMinByteSize, keyPartMaxByteSize) 289 values := testutils.RandomValues(numInsPerStep, 1, valueMaxByteSize) 290 291 update, err := ledger.NewUpdate(state, keys, values) 292 if err != nil { 293 b.Fatal(err) 294 } 295 296 newState, _, err := led.Set(update) 297 if err != nil { 298 b.Fatal(err) 299 } 300 301 b.Run("batch get", func(b *testing.B) { 302 query, err := ledger.NewQuery(newState, []ledger.Key{keys[0]}) 303 if err != nil { 304 b.Fatal(err) 305 } 306 307 b.ResetTimer() 308 for i := 0; i < b.N; i++ { 309 _, err = led.Get(query) 310 if err != nil { 311 b.Fatal(err) 312 } 313 } 314 }) 315 316 b.Run("single get", func(b *testing.B) { 317 query, err := ledger.NewQuerySingleValue(newState, keys[0]) 318 if err != nil { 319 b.Fatal(err) 320 } 321 322 b.ResetTimer() 323 for i := 0; i < b.N; i++ { 324 _, err = led.GetSingleValue(query) 325 if err != nil { 326 b.Fatal(err) 327 } 328 } 329 }) 330 } 331 332 // BenchmarkTrieUpdate benchmarks the performance of a trie prove 333 func BenchmarkTrieProve(b *testing.B) { 334 // key updates per iteration 335 const ( 336 numInsPerStep = 10000 337 keyNumberOfParts = 10 338 keyPartMinByteSize = 1 339 keyPartMaxByteSize = 100 340 valueMaxByteSize = 32 341 capacity = 101 342 checkpointDistance = math.MaxInt // A large number to prevent checkpoint creation. 343 checkpointsToKeep = 1 344 ) 345 346 dir := b.TempDir() 347 348 diskWal, err := wal.NewDiskWAL(zerolog.Nop(), nil, metrics.NewNoopCollector(), dir, capacity, pathfinder.PathByteSize, wal.SegmentSize) 349 require.NoError(b, err) 350 351 led, err := complete.NewLedger(diskWal, capacity, &metrics.NoopCollector{}, zerolog.Logger{}, complete.DefaultPathFinderVersion) 352 require.NoError(b, err) 353 354 compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false), metrics.NewNoopCollector()) 355 require.NoError(b, err) 356 357 <-compactor.Ready() 358 359 defer func() { 360 <-led.Done() 361 <-compactor.Done() 362 }() 363 364 state := led.InitialState() 365 366 keys := testutils.RandomUniqueKeys(numInsPerStep, keyNumberOfParts, keyPartMinByteSize, keyPartMaxByteSize) 367 values := testutils.RandomValues(numInsPerStep, 1, valueMaxByteSize) 368 369 update, err := ledger.NewUpdate(state, keys, values) 370 if err != nil { 371 b.Fatal(err) 372 } 373 374 newState, _, err := led.Set(update) 375 if err != nil { 376 b.Fatal(err) 377 } 378 379 query, err := ledger.NewQuery(newState, keys) 380 if err != nil { 381 b.Fatal(err) 382 } 383 384 b.ResetTimer() 385 for i := 0; i < b.N; i++ { 386 _, err := led.Prove(query) 387 if err != nil { 388 b.Fatal(err) 389 } 390 } 391 b.StopTimer() 392 }