github.com/petermattis/pebble@v0.0.0-20190905164901-ab51a2166067/level_iter_test.go (about) 1 // Copyright 2018 The LevelDB-Go and Pebble Authors. All rights reserved. Use 2 // of this source code is governed by a BSD-style license that can be found in 3 // the LICENSE file. 4 5 package pebble 6 7 import ( 8 "bytes" 9 "fmt" 10 "strings" 11 "testing" 12 "time" 13 14 "github.com/petermattis/pebble/cache" 15 "github.com/petermattis/pebble/internal/base" 16 "github.com/petermattis/pebble/internal/datadriven" 17 "github.com/petermattis/pebble/internal/rangedel" 18 "github.com/petermattis/pebble/sstable" 19 "github.com/petermattis/pebble/vfs" 20 "golang.org/x/exp/rand" 21 ) 22 23 func TestLevelIter(t *testing.T) { 24 var iters []*fakeIter 25 var files []fileMetadata 26 27 newIters := func( 28 meta *fileMetadata, opts *IterOptions, bytesIterated *uint64, 29 ) (internalIterator, internalIterator, error) { 30 f := *iters[meta.FileNum] 31 return &f, nil, nil 32 } 33 34 datadriven.RunTest(t, "testdata/level_iter", func(d *datadriven.TestData) string { 35 switch d.Cmd { 36 case "define": 37 iters = nil 38 files = nil 39 40 for _, line := range strings.Split(d.Input, "\n") { 41 f := &fakeIter{} 42 for _, key := range strings.Fields(line) { 43 j := strings.Index(key, ":") 44 f.keys = append(f.keys, base.ParseInternalKey(key[:j])) 45 f.vals = append(f.vals, []byte(key[j+1:])) 46 } 47 iters = append(iters, f) 48 49 meta := fileMetadata{ 50 FileNum: uint64(len(files)), 51 } 52 meta.Smallest = f.keys[0] 53 meta.Largest = f.keys[len(f.keys)-1] 54 files = append(files, meta) 55 } 56 57 return "" 58 59 case "iter": 60 var opts IterOptions 61 for _, arg := range d.CmdArgs { 62 if len(arg.Vals) != 1 { 63 return fmt.Sprintf("%s: %s=<value>", d.Cmd, arg.Key) 64 } 65 switch arg.Key { 66 case "lower": 67 opts.LowerBound = []byte(arg.Vals[0]) 68 case "upper": 69 opts.UpperBound = []byte(arg.Vals[0]) 70 default: 71 return fmt.Sprintf("%s: unknown arg: %s", d.Cmd, arg.Key) 72 } 73 } 74 75 iter := newLevelIter(&opts, DefaultComparer.Compare, newIters, files, nil) 76 defer iter.Close() 77 return runInternalIterCmd(d, iter) 78 79 case "load": 80 // The "load" command allows testing the iterator options passed to load 81 // sstables. 82 // 83 // load <key> [lower=<key>] [upper=<key>] 84 var opts IterOptions 85 var key string 86 for _, arg := range d.CmdArgs { 87 if len(arg.Vals) == 0 { 88 key = arg.Key 89 continue 90 } 91 if len(arg.Vals) != 1 { 92 return fmt.Sprintf("%s: %s=<value>", d.Cmd, arg.Key) 93 } 94 switch arg.Key { 95 case "lower": 96 opts.LowerBound = []byte(arg.Vals[0]) 97 case "upper": 98 opts.UpperBound = []byte(arg.Vals[0]) 99 default: 100 return fmt.Sprintf("%s: unknown arg: %s", d.Cmd, arg.Key) 101 } 102 } 103 104 var tableOpts *IterOptions 105 newIters2 := func( 106 meta *fileMetadata, opts *IterOptions, bytesIterated *uint64, 107 ) (internalIterator, internalIterator, error) { 108 tableOpts = opts 109 return newIters(meta, opts, nil) 110 } 111 112 iter := newLevelIter(&opts, DefaultComparer.Compare, newIters2, files, nil) 113 iter.SeekGE([]byte(key)) 114 lower, upper := tableOpts.GetLowerBound(), tableOpts.GetUpperBound() 115 return fmt.Sprintf("[%s,%s]\n", lower, upper) 116 117 default: 118 return fmt.Sprintf("unknown command: %s", d.Cmd) 119 } 120 }) 121 } 122 123 func TestLevelIterBoundaries(t *testing.T) { 124 cmp := DefaultComparer.Compare 125 mem := vfs.NewMem() 126 var readers []*sstable.Reader 127 var files []fileMetadata 128 129 newIters := func( 130 meta *fileMetadata, _ *IterOptions, _ *uint64, 131 ) (internalIterator, internalIterator, error) { 132 return readers[meta.FileNum].NewIter(nil /* lower */, nil /* upper */), nil, nil 133 } 134 135 datadriven.RunTest(t, "testdata/level_iter_boundaries", func(d *datadriven.TestData) string { 136 switch d.Cmd { 137 case "clear": 138 mem = vfs.NewMem() 139 readers = nil 140 files = nil 141 return "" 142 143 case "build": 144 fileNum := uint64(len(readers)) 145 name := fmt.Sprint(fileNum) 146 f0, err := mem.Create(name) 147 if err != nil { 148 return err.Error() 149 } 150 151 w := sstable.NewWriter(f0, nil, LevelOptions{}) 152 var tombstones []rangedel.Tombstone 153 f := rangedel.Fragmenter{ 154 Cmp: cmp, 155 Emit: func(fragmented []rangedel.Tombstone) { 156 tombstones = append(tombstones, fragmented...) 157 }, 158 } 159 for _, key := range strings.Split(d.Input, "\n") { 160 j := strings.Index(key, ":") 161 ikey := base.ParseInternalKey(key[:j]) 162 value := []byte(key[j+1:]) 163 switch ikey.Kind() { 164 case InternalKeyKindRangeDelete: 165 f.Add(ikey, value) 166 default: 167 if err := w.Add(ikey, value); err != nil { 168 return err.Error() 169 } 170 } 171 } 172 f.Finish() 173 for _, v := range tombstones { 174 if err := w.Add(v.Start, v.End); err != nil { 175 return err.Error() 176 } 177 } 178 if err := w.Close(); err != nil { 179 return err.Error() 180 } 181 meta, err := w.Metadata() 182 if err != nil { 183 return err.Error() 184 } 185 186 f1, err := mem.Open(name) 187 if err != nil { 188 return err.Error() 189 } 190 r, err := sstable.NewReader(f1, 0, 0, nil) 191 if err != nil { 192 return err.Error() 193 } 194 readers = append(readers, r) 195 files = append(files, fileMetadata{ 196 FileNum: fileNum, 197 Smallest: meta.Smallest(cmp), 198 Largest: meta.Largest(cmp), 199 }) 200 201 var buf bytes.Buffer 202 for _, f := range files { 203 fmt.Fprintf(&buf, "%d: %s-%s\n", f.FileNum, f.Smallest, f.Largest) 204 } 205 return buf.String() 206 207 case "iter": 208 iter := newLevelIter(nil, DefaultComparer.Compare, newIters, files, nil) 209 defer iter.Close() 210 // Fake up the range deletion initialization. 211 iter.initRangeDel(new(internalIterator)) 212 return runInternalIterCmd(d, iter, iterCmdVerboseKey) 213 214 default: 215 return fmt.Sprintf("unknown command: %s", d.Cmd) 216 } 217 }) 218 } 219 220 func buildLevelIterTables( 221 b *testing.B, blockSize, restartInterval, count int, 222 ) ([]*sstable.Reader, []fileMetadata, [][]byte) { 223 mem := vfs.NewMem() 224 files := make([]vfs.File, count) 225 for i := range files { 226 f, err := mem.Create(fmt.Sprintf("bench%d", i)) 227 if err != nil { 228 b.Fatal(err) 229 } 230 defer f.Close() 231 files[i] = f 232 } 233 234 writers := make([]*sstable.Writer, len(files)) 235 for i := range files { 236 writers[i] = sstable.NewWriter(files[i], nil, LevelOptions{ 237 BlockRestartInterval: restartInterval, 238 BlockSize: blockSize, 239 Compression: NoCompression, 240 }) 241 } 242 243 var keys [][]byte 244 var i int 245 const targetSize = 2 << 20 246 for _, w := range writers { 247 for ; w.EstimatedSize() < targetSize; i++ { 248 key := []byte(fmt.Sprintf("%08d", i)) 249 keys = append(keys, key) 250 ikey := base.MakeInternalKey(key, 0, InternalKeyKindSet) 251 w.Add(ikey, nil) 252 } 253 if err := w.Close(); err != nil { 254 b.Fatal(err) 255 } 256 } 257 258 cache := cache.New(128 << 20) 259 readers := make([]*sstable.Reader, len(files)) 260 for i := range files { 261 f, err := mem.Open(fmt.Sprintf("bench%d", i)) 262 if err != nil { 263 b.Fatal(err) 264 } 265 readers[i], err = sstable.NewReader(f, 0, uint64(i), &Options{ 266 Cache: cache, 267 }) 268 if err != nil { 269 b.Fatal(err) 270 } 271 } 272 273 meta := make([]fileMetadata, len(readers)) 274 for i := range readers { 275 iter := readers[i].NewIter(nil /* lower */, nil /* upper */) 276 key, _ := iter.First() 277 meta[i].FileNum = uint64(i) 278 meta[i].Smallest = *key 279 key, _ = iter.Last() 280 meta[i].Largest = *key 281 } 282 return readers, meta, keys 283 } 284 285 func BenchmarkLevelIterSeekGE(b *testing.B) { 286 const blockSize = 32 << 10 287 288 for _, restartInterval := range []int{16} { 289 b.Run(fmt.Sprintf("restart=%d", restartInterval), 290 func(b *testing.B) { 291 for _, count := range []int{5} { 292 b.Run(fmt.Sprintf("count=%d", count), 293 func(b *testing.B) { 294 readers, files, keys := buildLevelIterTables(b, blockSize, restartInterval, count) 295 newIters := func( 296 meta *fileMetadata, _ *IterOptions, _ *uint64, 297 ) (internalIterator, internalIterator, error) { 298 return readers[meta.FileNum].NewIter(nil /* lower */, nil /* upper */), nil, nil 299 } 300 l := newLevelIter(nil, DefaultComparer.Compare, newIters, files, nil) 301 rng := rand.New(rand.NewSource(uint64(time.Now().UnixNano()))) 302 303 b.ResetTimer() 304 for i := 0; i < b.N; i++ { 305 l.SeekGE(keys[rng.Intn(len(keys))]) 306 } 307 }) 308 } 309 }) 310 } 311 } 312 313 func BenchmarkLevelIterNext(b *testing.B) { 314 const blockSize = 32 << 10 315 316 for _, restartInterval := range []int{16} { 317 b.Run(fmt.Sprintf("restart=%d", restartInterval), 318 func(b *testing.B) { 319 for _, count := range []int{5} { 320 b.Run(fmt.Sprintf("count=%d", count), 321 func(b *testing.B) { 322 readers, files, _ := buildLevelIterTables(b, blockSize, restartInterval, count) 323 newIters := func( 324 meta *fileMetadata, _ *IterOptions, _ *uint64, 325 ) (internalIterator, internalIterator, error) { 326 return readers[meta.FileNum].NewIter(nil /* lower */, nil /* upper */), nil, nil 327 } 328 l := newLevelIter(nil, DefaultComparer.Compare, newIters, files, nil) 329 330 b.ResetTimer() 331 for i := 0; i < b.N; i++ { 332 if !l.Valid() { 333 l.First() 334 } 335 l.Next() 336 } 337 }) 338 } 339 }) 340 } 341 } 342 343 func BenchmarkLevelIterPrev(b *testing.B) { 344 const blockSize = 32 << 10 345 346 for _, restartInterval := range []int{16} { 347 b.Run(fmt.Sprintf("restart=%d", restartInterval), 348 func(b *testing.B) { 349 for _, count := range []int{5} { 350 b.Run(fmt.Sprintf("count=%d", count), 351 func(b *testing.B) { 352 readers, files, _ := buildLevelIterTables(b, blockSize, restartInterval, count) 353 newIters := func( 354 meta *fileMetadata, _ *IterOptions, _ *uint64, 355 ) (internalIterator, internalIterator, error) { 356 return readers[meta.FileNum].NewIter(nil /* lower */, nil /* upper */), nil, nil 357 } 358 l := newLevelIter(nil, DefaultComparer.Compare, newIters, files, nil) 359 360 b.ResetTimer() 361 for i := 0; i < b.N; i++ { 362 if !l.Valid() { 363 l.Last() 364 } 365 l.Prev() 366 } 367 }) 368 } 369 }) 370 } 371 }