github.com/blong14/gache@v0.0.0-20240124023949-89416fd8bbfa/internal/db/memtable/map_test.go (about) 1 package memtable_test 2 3 import ( 4 "fmt" 5 "reflect" 6 "strconv" 7 "sync" 8 "sync/atomic" 9 "testing" 10 "time" 11 12 gskl "github.com/blong14/gache/internal/db/memtable" 13 ) 14 15 type test struct { 16 setup func(*testing.T, *gskl.SkipList) 17 run func(t *testing.T, m *gskl.SkipList) 18 teardown func(*testing.T, *gskl.SkipList) func() 19 } 20 21 func testMap(t *testing.T, name string, test test) { 22 t.Run(fmt.Sprintf("skip list test %s", name), func(t *testing.T) { 23 t.Parallel() 24 m := gskl.NewSkipList() 25 if test.setup != nil { 26 test.setup(t, m) 27 } 28 test.run(t, m) 29 if test.teardown != nil { 30 t.Cleanup(func() { 31 test.teardown(t, m) 32 }) 33 } 34 }) 35 } 36 37 func TestCount(t *testing.T) { 38 expected := 100 39 testMap(t, "count", test{ 40 setup: func(t *testing.T, m *gskl.SkipList) { 41 for i := 0; i < expected; i++ { 42 err := m.Set( 43 []byte(fmt.Sprintf("key_%d", i)), []byte(fmt.Sprintf("value__%d", i))) 44 if err != nil { 45 t.Fail() 46 } 47 } 48 }, 49 run: func(t *testing.T, m *gskl.SkipList) { 50 actual := m.Count() 51 if actual != uint64(expected) { 52 t.Errorf("w %d g %d", expected, actual) 53 } 54 }, 55 }) 56 57 } 58 59 func TestGetAndSet(t *testing.T) { 60 count := 50_000 61 testMap(t, "get and set", test{ 62 run: func(t *testing.T, m *gskl.SkipList) { 63 start := time.Now() 64 var wg sync.WaitGroup 65 for i := 0; i < count; i++ { 66 wg.Add(1) 67 go func(indx int) { 68 defer wg.Done() 69 k := []byte(fmt.Sprintf("key-%d", indx)) 70 err := m.Set(k, []byte(fmt.Sprintf("value__%d", indx))) 71 if err != nil { 72 t.Error(err) 73 } 74 }(i) 75 } 76 wg.Wait() 77 t.Logf("%s", time.Since(start)) 78 for i := 0; i < count; i++ { 79 wg.Add(1) 80 go func(indx int) { 81 defer wg.Done() 82 k := []byte(fmt.Sprintf("key-%d", indx)) 83 if _, ok := m.Get(k); !ok { 84 t.Errorf("missing rawKey %d", indx) 85 } 86 }(i) 87 } 88 wg.Wait() 89 // m.Print() 90 t.Logf("%s", time.Since(start)) 91 }, 92 }) 93 } 94 95 func TestRange(t *testing.T) { 96 t.Skip("skipping...") 97 expected := [][]byte{[]byte("first"), []byte("second"), []byte("third")} 98 testMap(t, "count", test{ 99 setup: func(t *testing.T, m *gskl.SkipList) { 100 for _, i := range expected { 101 err := m.Set(i, i) 102 if err != nil { 103 t.Fail() 104 } 105 } 106 }, 107 run: func(t *testing.T, m *gskl.SkipList) { 108 actual := make([][]byte, 0) 109 m.Range(func(k, _ []byte) bool { 110 actual = append(actual, k) 111 return true 112 }) 113 if !reflect.DeepEqual(actual, expected) { 114 t.Errorf("w %v g %v", expected, actual) 115 } 116 }, 117 }) 118 } 119 120 func TestScan(t *testing.T) { 121 // t.Skip("skipping...") 122 expected := [][]byte{[]byte("aaaa"), []byte("bbbb"), []byte("cccc"), []byte("dddd"), []byte("eeee")} 123 testMap(t, "scan", test{ 124 setup: func(t *testing.T, m *gskl.SkipList) { 125 for _, i := range expected { 126 err := m.Set(i, i) 127 if err != nil { 128 t.Fail() 129 } 130 } 131 }, 132 run: func(t *testing.T, m *gskl.SkipList) { 133 actual := make([][]byte, 0) 134 m.Scan(expected[1], expected[4], func(k, _ []byte) bool { 135 actual = append(actual, k) 136 return true 137 }) 138 if !reflect.DeepEqual(actual, expected[1:]) { 139 t.Errorf("w %v g %v", expected[1:], actual) 140 } 141 actual = make([][]byte, 0) 142 m.Scan(expected[0], nil, func(k, _ []byte) bool { 143 actual = append(actual, k) 144 return true 145 }) 146 if !reflect.DeepEqual(actual, expected) { 147 t.Errorf("w %v g %v", expected, actual) 148 } 149 }, 150 }) 151 } 152 153 type bench struct { 154 setup func(*testing.B, *gskl.SkipList) 155 perG func(b *testing.B, pb *testing.PB, i int, m *gskl.SkipList) 156 teardown func(*testing.B, *gskl.SkipList) func() 157 } 158 159 func benchMap(b *testing.B, bench bench) { 160 b.Run("skip list benchmark", func(b *testing.B) { 161 m := gskl.NewSkipList() 162 if bench.setup != nil { 163 bench.setup(b, m) 164 } 165 b.ReportAllocs() 166 b.ResetTimer() 167 var i int64 168 b.RunParallel(func(pb *testing.PB) { 169 id := int(atomic.AddInt64(&i, 1) - 1) 170 bench.perG(b, pb, id*b.N, m) 171 }) 172 if bench.teardown != nil { 173 b.Cleanup(func() { 174 bench.teardown(b, m) 175 }) 176 } 177 }) 178 } 179 180 func BenchmarkSkiplist_LoadMostlyHits(b *testing.B) { 181 const hits, misses = 1023, 1 182 benchMap(b, bench{ 183 setup: func(b *testing.B, m *gskl.SkipList) { 184 for i := 0; i < hits; i++ { 185 key := []byte(strconv.Itoa(i)) 186 if err := m.Set(key, key); err != nil { 187 b.Fail() 188 } 189 } 190 }, 191 perG: func(b *testing.B, pb *testing.PB, i int, m *gskl.SkipList) { 192 for ; pb.Next(); i++ { 193 m.Get([]byte(strconv.Itoa(i % (hits + misses)))) 194 } 195 }, 196 }) 197 } 198 199 func BenchmarkSkiplist_LoadMostlyMisses(b *testing.B) { 200 const hits, misses = 1, 1023 201 benchMap(b, bench{ 202 setup: func(_ *testing.B, m *gskl.SkipList) { 203 for i := 0; i < hits; i++ { 204 key := []byte(strconv.Itoa(i)) 205 if err := m.Set(key, key); err != nil { 206 b.Fail() 207 } 208 } 209 }, 210 perG: func(b *testing.B, pb *testing.PB, i int, m *gskl.SkipList) { 211 for ; pb.Next(); i++ { 212 m.Get([]byte(strconv.Itoa(i % (hits + misses)))) 213 } 214 }, 215 }) 216 } 217 218 func BenchmarkSkiplist_LoadOrStoreBalanced(b *testing.B) { 219 const hits, misses = 1023, 1023 220 value := []byte("value") 221 benchMap(b, bench{ 222 setup: func(b *testing.B, m *gskl.SkipList) { 223 for i := 0; i < hits; i++ { 224 key := []byte(strconv.Itoa(i)) 225 if err := m.Set(key, value); err != nil { 226 b.Fail() 227 } 228 } 229 }, 230 perG: func(b *testing.B, pb *testing.PB, i int, m *gskl.SkipList) { 231 for ; pb.Next(); i++ { 232 j := i % (hits + misses) 233 key := []byte(strconv.Itoa(j)) 234 if j < hits { 235 if _, ok := m.Get(key); !ok { 236 b.Fatalf("unexpected miss for %v", j) 237 } 238 } else { 239 if err := m.Set(key, value); err != nil { 240 b.Error(err) 241 } 242 } 243 } 244 }, 245 }) 246 } 247 248 func BenchmarkSkiplist_LoadOrStoreUnique(b *testing.B) { 249 const hits = 1023 250 value := []byte("value") 251 benchMap(b, bench{ 252 perG: func(b *testing.B, pb *testing.PB, i int, m *gskl.SkipList) { 253 for ; pb.Next(); i++ { 254 j := i % hits 255 key := []byte(strconv.Itoa(j)) 256 if _, ok := m.Get(key); !ok { 257 if err := m.Set(key, value); err != nil { 258 b.Error(err) 259 } 260 } 261 } 262 }, 263 }) 264 } 265 266 func BenchmarkSkiplist_LoadOrStoreCollision(b *testing.B) { 267 const hits = 1023 268 value := []byte("value") 269 benchMap(b, bench{ 270 setup: func(b *testing.B, m *gskl.SkipList) { 271 for i := 0; i < hits; i++ { 272 key := []byte(strconv.Itoa(i)) 273 if err := m.Set(key, value); err != nil { 274 b.Fail() 275 } 276 } 277 }, 278 perG: func(b *testing.B, pb *testing.PB, i int, m *gskl.SkipList) { 279 for ; pb.Next(); i++ { 280 j := i % hits 281 key := []byte(strconv.Itoa(j)) 282 if _, ok := m.Get(key); ok { 283 if err := m.Set(key, value); err != nil { 284 b.Error(err) 285 } 286 } else { 287 b.Errorf("unexpected miss %s", key) 288 } 289 } 290 }, 291 }) 292 } 293 294 func BenchmarkSkiplist_AdversarialAlloc(b *testing.B) { 295 value := []byte("value") 296 benchMap(b, bench{ 297 perG: func(b *testing.B, pb *testing.PB, i int, m *gskl.SkipList) { 298 var stores, loadsSinceStore int64 299 for ; pb.Next(); i++ { 300 key := []byte(strconv.Itoa(i)) 301 m.Get(key) 302 if loadsSinceStore++; loadsSinceStore > stores { 303 if _, ok := m.Get(key); !ok { 304 err := m.Set(key, value) 305 if err != nil { 306 b.Error(err) 307 } 308 } 309 loadsSinceStore = 0 310 stores++ 311 } 312 } 313 }, 314 }) 315 } 316 317 func BenchmarkSkiplist_Range(b *testing.B) { 318 const mapSize = 1 << 10 319 value := []byte("") 320 benchMap(b, bench{ 321 setup: func(_ *testing.B, m *gskl.SkipList) { 322 for i := 0; i < mapSize; i++ { 323 key := []byte(strconv.Itoa(i)) 324 err := m.Set(key, value) 325 if err != nil { 326 b.Fail() 327 } 328 } 329 }, 330 perG: func(b *testing.B, pb *testing.PB, i int, m *gskl.SkipList) { 331 for ; pb.Next(); i++ { 332 m.Range(func(_, _ []byte) bool { return true }) 333 } 334 }, 335 }) 336 }