github.com/onflow/atree@v0.6.0/array_benchmark_test.go (about) 1 /* 2 * Atree - Scalable Arrays and Ordered Maps 3 * 4 * Copyright 2021 Dapper Labs, Inc. 5 * 6 * Licensed under the Apache License, Version 2.0 (the "License"); 7 * you may not use this file except in compliance with the License. 8 * You may obtain a copy of the License at 9 * 10 * http://www.apache.org/licenses/LICENSE-2.0 11 * 12 * Unless required by applicable law or agreed to in writing, software 13 * distributed under the License is distributed on an "AS IS" BASIS, 14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 * See the License for the specific language governing permissions and 16 * limitations under the License. 17 */ 18 19 package atree 20 21 import ( 22 "math/rand" 23 "testing" 24 "time" 25 26 "github.com/stretchr/testify/require" 27 ) 28 29 // GENERAL COMMENT: 30 // running this test with 31 // go test -bench=. -benchmem 32 // will track the heap allocations for the Benchmarks 33 34 const opCount = 100 35 36 func BenchmarkXSArray(b *testing.B) { benchmarkArray(b, 100, opCount) } 37 38 func BenchmarkSArray(b *testing.B) { benchmarkArray(b, 1000, opCount) } 39 40 func BenchmarkMArray(b *testing.B) { benchmarkArray(b, 10_000, opCount) } 41 42 func BenchmarkLArray(b *testing.B) { benchmarkArray(b, 100_000, opCount) } 43 44 func BenchmarkXLArray(b *testing.B) { benchmarkArray(b, 1_000_000, opCount) } 45 46 func BenchmarkXXLArray(b *testing.B) { benchmarkArray(b, 10_000_000, opCount) } 47 48 func BenchmarkXXXLArray(b *testing.B) { benchmarkArray(b, 100_000_000, opCount) } 49 50 // TODO add nested arrays as class 5 51 func RandomValue(r *rand.Rand) Value { 52 switch r.Intn(4) { 53 case 0: 54 return Uint8Value(r.Intn(255)) 55 case 1: 56 return Uint16Value(r.Intn(6535)) 57 case 2: 58 return Uint32Value(r.Intn(4294967295)) 59 case 3: 60 return Uint64Value(r.Intn(1844674407370955161)) 61 default: 62 return Uint8Value(r.Intn(255)) 63 } 64 } 65 66 // BenchmarkArray benchmarks the performance of the atree array 67 func benchmarkArray(b *testing.B, initialArraySize, numberOfElements int) { 68 69 r := newRand(b) 70 71 storage := newTestPersistentStorage(b) 72 73 address := Address{1, 2, 3, 4, 5, 6, 7, 8} 74 75 typeInfo := testTypeInfo{42} 76 77 array, err := NewArray(storage, address, typeInfo) 78 79 require.NoError(b, err) 80 81 // array := NewBasicArray(storage) 82 83 var start time.Time 84 var totalRawDataSize uint32 85 var totalAppendTime time.Duration 86 var totalRemoveTime time.Duration 87 var totalInsertTime time.Duration 88 var totalLookupTime time.Duration 89 90 // setup 91 for i := 0; i < initialArraySize; i++ { 92 v := RandomValue(r) 93 storable, err := v.Storable(storage, array.Address(), MaxInlineArrayElementSize) 94 require.NoError(b, err) 95 totalRawDataSize += storable.ByteSize() 96 err = array.Append(v) 97 require.NoError(b, err) 98 } 99 require.NoError(b, storage.Commit()) 100 b.ResetTimer() 101 102 arrayID := array.StorageID() 103 104 // append 105 storage.DropCache() 106 start = time.Now() 107 array, err = NewArrayWithRootID(storage, arrayID) 108 // array, err := NewBasicArrayWithRootID(storage, arrayID) 109 require.NoError(b, err) 110 for i := 0; i < numberOfElements; i++ { 111 v := RandomValue(r) 112 113 storable, err := v.Storable(storage, array.Address(), MaxInlineArrayElementSize) 114 require.NoError(b, err) 115 116 totalRawDataSize += storable.ByteSize() 117 118 err = array.Append(v) 119 require.NoError(b, err) 120 } 121 require.NoError(b, storage.Commit()) 122 totalAppendTime = time.Since(start) 123 124 // remove 125 storage.DropCache() 126 start = time.Now() 127 array, err = NewArrayWithRootID(storage, arrayID) 128 // array, err = NewBasicArrayWithRootID(storage, arrayID) 129 require.NoError(b, err) 130 131 for i := 0; i < numberOfElements; i++ { 132 ind := r.Intn(int(array.Count())) 133 storable, err := array.Remove(uint64(ind)) 134 require.NoError(b, err) 135 totalRawDataSize -= storable.ByteSize() 136 } 137 require.NoError(b, storage.Commit()) 138 totalRemoveTime = time.Since(start) 139 140 // insert 141 storage.DropCache() 142 start = time.Now() 143 array, err = NewArrayWithRootID(storage, arrayID) 144 // array, err = NewBasicArrayWithRootID(storage, arrayID) 145 require.NoError(b, err) 146 147 for i := 0; i < numberOfElements; i++ { 148 ind := r.Intn(int(array.Count())) 149 v := RandomValue(r) 150 151 storable, err := v.Storable(storage, array.Address(), MaxInlineArrayElementSize) 152 require.NoError(b, err) 153 154 totalRawDataSize += storable.ByteSize() 155 156 err = array.Insert(uint64(ind), v) 157 require.NoError(b, err) 158 } 159 require.NoError(b, storage.Commit()) 160 totalInsertTime = time.Since(start) 161 162 // lookup 163 storage.DropCache() 164 start = time.Now() 165 array, err = NewArrayWithRootID(storage, arrayID) 166 // array, err = NewBasicArrayWithRootID(storage, arrayID) 167 require.NoError(b, err) 168 169 for i := 0; i < numberOfElements; i++ { 170 ind := r.Intn(int(array.Count())) 171 _, err := array.Get(uint64(ind)) 172 require.NoError(b, err) 173 } 174 require.NoError(b, storage.Commit()) 175 totalLookupTime = time.Since(start) 176 177 // random lookup 178 storage.baseStorage.ResetReporter() 179 storage.DropCache() 180 array, err = NewArrayWithRootID(storage, arrayID) 181 // array, err = NewBasicArrayWithRootID(storage, arrayID) 182 require.NoError(b, err) 183 184 ind := r.Intn(int(array.Count())) 185 _, err = array.Get(uint64(ind)) 186 require.NoError(b, err) 187 storageOverheadRatio := float64(storage.baseStorage.Size()) / float64(totalRawDataSize) 188 b.ReportMetric(float64(storage.baseStorage.SegmentsTouched()), "segments_touched") 189 b.ReportMetric(float64(storage.baseStorage.SegmentCounts()), "segments_total") 190 b.ReportMetric(float64(totalRawDataSize), "storage_raw_data_size") 191 b.ReportMetric(float64(storage.baseStorage.Size()), "storage_stored_data_size") 192 b.ReportMetric(storageOverheadRatio, "storage_overhead_ratio") 193 b.ReportMetric(float64(storage.baseStorage.BytesRetrieved()), "storage_bytes_loaded_for_lookup") 194 // b.ReportMetric(float64(array.Count()), "number_of_elements") 195 b.ReportMetric(float64(int(totalAppendTime)), "append_100_time_(ns)") 196 b.ReportMetric(float64(int(totalRemoveTime)), "remove_100_time_(ns)") 197 b.ReportMetric(float64(int(totalInsertTime)), "insert_100_time_(ns)") 198 b.ReportMetric(float64(int(totalLookupTime)), "lookup_100_time_(ns)") 199 } 200 201 func BenchmarkLArrayMemoryImpact(b *testing.B) { benchmarkLongTermImpactOnMemory(b, 10_000, 1000_000) } 202 203 // BenchmarkArray benchmarks the performance of the atree array 204 func benchmarkLongTermImpactOnMemory(b *testing.B, initialArraySize, numberOfOps int) { 205 206 r := newRand(b) 207 208 storage := newTestPersistentStorage(b) 209 210 address := Address{1, 2, 3, 4, 5, 6, 7, 8} 211 212 typeInfo := testTypeInfo{42} 213 214 array, err := NewArray(storage, address, typeInfo) 215 216 require.NoError(b, err) 217 218 var totalRawDataSize uint32 219 220 // setup 221 for i := 0; i < initialArraySize; i++ { 222 v := RandomValue(r) 223 224 storable, err := v.Storable(storage, array.Address(), MaxInlineArrayElementSize) 225 require.NoError(b, err) 226 227 totalRawDataSize += storable.ByteSize() 228 229 err = array.Append(v) 230 require.NoError(b, err) 231 } 232 require.NoError(b, storage.Commit()) 233 b.ResetTimer() 234 235 for i := 0; i < numberOfOps; i++ { 236 ind := r.Intn(int(array.Count())) 237 // select opt 238 switch r.Intn(2) { 239 case 0: // remove 240 storable, err := array.Remove(uint64(ind)) 241 require.NoError(b, err) 242 totalRawDataSize -= storable.ByteSize() 243 case 1: // insert 244 v := RandomValue(r) 245 246 storable, err := v.Storable(storage, array.Address(), MaxInlineArrayElementSize) 247 require.NoError(b, err) 248 249 totalRawDataSize += storable.ByteSize() 250 251 err = array.Insert(uint64(ind), v) 252 require.NoError(b, err) 253 } 254 } 255 require.NoError(b, storage.Commit()) 256 257 storageOverheadRatio := float64(storage.baseStorage.Size()) / float64(totalRawDataSize) 258 b.ReportMetric(float64(storage.baseStorage.SegmentsTouched()), "segments_touched") 259 b.ReportMetric(float64(storage.baseStorage.SegmentCounts()), "segments_total") 260 b.ReportMetric(float64(totalRawDataSize), "storage_raw_data_size") 261 b.ReportMetric(float64(storage.baseStorage.Size()), "storage_stored_data_size") 262 b.ReportMetric(storageOverheadRatio, "storage_overhead_ratio") 263 }