github.com/zuoyebang/bitalostable@v1.0.1-0.20240229032404-e3b99a834294/merging_iter_test.go (about) 1 // Copyright 2018 The LevelDB-Go and Pebble and Bitalostored Authors. All rights reserved. Use 2 // of this source code is governed by a BSD-style license that can be found in 3 // the LICENSE file. 4 5 package bitalostable 6 7 import ( 8 "fmt" 9 "strings" 10 "testing" 11 "time" 12 13 "github.com/stretchr/testify/require" 14 "github.com/zuoyebang/bitalostable/bloom" 15 "github.com/zuoyebang/bitalostable/internal/base" 16 "github.com/zuoyebang/bitalostable/internal/datadriven" 17 "github.com/zuoyebang/bitalostable/internal/keyspan" 18 "github.com/zuoyebang/bitalostable/internal/manifest" 19 "github.com/zuoyebang/bitalostable/internal/rangedel" 20 "github.com/zuoyebang/bitalostable/sstable" 21 "github.com/zuoyebang/bitalostable/vfs" 22 "golang.org/x/exp/rand" 23 ) 24 25 func TestMergingIter(t *testing.T) { 26 var stats base.InternalIteratorStats 27 newFunc := func(iters ...internalIterator) internalIterator { 28 return newMergingIter(nil /* logger */, &stats, DefaultComparer.Compare, 29 func(a []byte) int { return len(a) }, iters...) 30 } 31 testIterator(t, newFunc, func(r *rand.Rand) [][]string { 32 // Shuffle testKeyValuePairs into one or more splits. Each individual 33 // split is in increasing order, but different splits may overlap in 34 // range. Some of the splits may be empty. 35 splits := make([][]string, 1+r.Intn(2+len(testKeyValuePairs))) 36 for _, kv := range testKeyValuePairs { 37 j := r.Intn(len(splits)) 38 splits[j] = append(splits[j], kv) 39 } 40 return splits 41 }) 42 } 43 44 func TestMergingIterSeek(t *testing.T) { 45 var def string 46 datadriven.RunTest(t, "testdata/merging_iter_seek", func(d *datadriven.TestData) string { 47 switch d.Cmd { 48 case "define": 49 def = d.Input 50 return "" 51 52 case "iter": 53 var iters []internalIterator 54 for _, line := range strings.Split(def, "\n") { 55 f := &fakeIter{} 56 for _, key := range strings.Fields(line) { 57 j := strings.Index(key, ":") 58 f.keys = append(f.keys, base.ParseInternalKey(key[:j])) 59 f.vals = append(f.vals, []byte(key[j+1:])) 60 } 61 iters = append(iters, f) 62 } 63 64 var stats base.InternalIteratorStats 65 iter := newMergingIter(nil /* logger */, &stats, DefaultComparer.Compare, 66 func(a []byte) int { return len(a) }, iters...) 67 defer iter.Close() 68 return runInternalIterCmd(d, iter) 69 70 default: 71 return fmt.Sprintf("unknown command: %s", d.Cmd) 72 } 73 }) 74 } 75 76 func TestMergingIterNextPrev(t *testing.T) { 77 // The data is the same in each of these cases, but divided up amongst the 78 // iterators differently. This data must match the definition in 79 // testdata/internal_iter_next. 80 iterCases := [][]string{ 81 { 82 "a.SET.2:2 a.SET.1:1 b.SET.2:2 b.SET.1:1 c.SET.2:2 c.SET.1:1", 83 }, 84 { 85 "a.SET.2:2 b.SET.2:2 c.SET.2:2", 86 "a.SET.1:1 b.SET.1:1 c.SET.1:1", 87 }, 88 { 89 "a.SET.2:2 b.SET.2:2", 90 "a.SET.1:1 b.SET.1:1", 91 "c.SET.2:2 c.SET.1:1", 92 }, 93 { 94 "a.SET.2:2", 95 "a.SET.1:1", 96 "b.SET.2:2", 97 "b.SET.1:1", 98 "c.SET.2:2", 99 "c.SET.1:1", 100 }, 101 } 102 103 for _, c := range iterCases { 104 t.Run("", func(t *testing.T) { 105 datadriven.RunTest(t, "testdata/internal_iter_next", func(d *datadriven.TestData) string { 106 switch d.Cmd { 107 case "define": 108 // Ignore. We've defined the iterator data above. 109 return "" 110 111 case "iter": 112 iters := make([]internalIterator, len(c)) 113 for i := range c { 114 f := &fakeIter{} 115 iters[i] = f 116 for _, key := range strings.Fields(c[i]) { 117 j := strings.Index(key, ":") 118 f.keys = append(f.keys, base.ParseInternalKey(key[:j])) 119 f.vals = append(f.vals, []byte(key[j+1:])) 120 } 121 } 122 123 var stats base.InternalIteratorStats 124 iter := newMergingIter(nil /* logger */, &stats, DefaultComparer.Compare, 125 func(a []byte) int { return len(a) }, iters...) 126 defer iter.Close() 127 return runInternalIterCmd(d, iter) 128 129 default: 130 return fmt.Sprintf("unknown command: %s", d.Cmd) 131 } 132 }) 133 }) 134 } 135 } 136 137 func TestMergingIterCornerCases(t *testing.T) { 138 memFS := vfs.NewMem() 139 cmp := DefaultComparer.Compare 140 fmtKey := DefaultComparer.FormatKey 141 opts := (*Options)(nil).EnsureDefaults() 142 var v *version 143 144 // Indexed by fileNum. 145 var readers []*sstable.Reader 146 defer func() { 147 for _, r := range readers { 148 r.Close() 149 } 150 }() 151 152 var fileNum base.FileNum 153 newIters := 154 func(file *manifest.FileMetadata, opts *IterOptions, iio internalIterOpts) (internalIterator, keyspan.FragmentIterator, error) { 155 r := readers[file.FileNum] 156 rangeDelIter, err := r.NewRawRangeDelIter() 157 if err != nil { 158 return nil, nil, err 159 } 160 iter, err := r.NewIterWithBlockPropertyFilters(opts.GetLowerBound(), opts.GetUpperBound(), nil, true /* useFilterBlock */, iio.stats) 161 if err != nil { 162 return nil, nil, err 163 } 164 return iter, rangeDelIter, nil 165 } 166 167 datadriven.RunTest(t, "testdata/merging_iter", func(d *datadriven.TestData) string { 168 switch d.Cmd { 169 case "define": 170 lines := strings.Split(d.Input, "\n") 171 172 var files [numLevels][]*fileMetadata 173 var level int 174 for i := 0; i < len(lines); i++ { 175 line := lines[i] 176 line = strings.TrimSpace(line) 177 if line == "L" || line == "L0" { 178 // start next level 179 level++ 180 continue 181 } 182 keys := strings.Fields(line) 183 smallestKey := base.ParseInternalKey(keys[0]) 184 largestKey := base.ParseInternalKey(keys[1]) 185 m := (&fileMetadata{ 186 FileNum: fileNum, 187 }).ExtendPointKeyBounds(cmp, smallestKey, largestKey) 188 files[level] = append(files[level], m) 189 190 i++ 191 line = lines[i] 192 line = strings.TrimSpace(line) 193 name := fmt.Sprint(fileNum) 194 fileNum++ 195 f, err := memFS.Create(name) 196 if err != nil { 197 return err.Error() 198 } 199 w := sstable.NewWriter(f, sstable.WriterOptions{}) 200 var tombstones []keyspan.Span 201 frag := keyspan.Fragmenter{ 202 Cmp: cmp, 203 Format: fmtKey, 204 Emit: func(fragmented keyspan.Span) { 205 tombstones = append(tombstones, fragmented) 206 }, 207 } 208 keyvalues := strings.Fields(line) 209 for _, kv := range keyvalues { 210 j := strings.Index(kv, ":") 211 ikey := base.ParseInternalKey(kv[:j]) 212 value := []byte(kv[j+1:]) 213 switch ikey.Kind() { 214 case InternalKeyKindRangeDelete: 215 frag.Add(keyspan.Span{Start: ikey.UserKey, End: value, Keys: []keyspan.Key{{Trailer: ikey.Trailer}}}) 216 default: 217 if err := w.Add(ikey, value); err != nil { 218 return err.Error() 219 } 220 } 221 } 222 frag.Finish() 223 for _, v := range tombstones { 224 if err := rangedel.Encode(&v, w.Add); err != nil { 225 return err.Error() 226 } 227 } 228 if err := w.Close(); err != nil { 229 return err.Error() 230 } 231 f, err = memFS.Open(name) 232 if err != nil { 233 return err.Error() 234 } 235 r, err := sstable.NewReader(f, sstable.ReaderOptions{}) 236 if err != nil { 237 return err.Error() 238 } 239 readers = append(readers, r) 240 } 241 242 v = newVersion(opts, files) 243 return v.String() 244 case "iter": 245 levelIters := make([]mergingIterLevel, 0, len(v.Levels)) 246 var stats base.InternalIteratorStats 247 for i, l := range v.Levels { 248 slice := l.Slice() 249 if slice.Empty() { 250 continue 251 } 252 li := &levelIter{} 253 li.init(IterOptions{}, cmp, func(a []byte) int { return len(a) }, newIters, 254 slice.Iter(), manifest.Level(i), internalIterOpts{stats: &stats}) 255 i := len(levelIters) 256 levelIters = append(levelIters, mergingIterLevel{iter: li}) 257 li.initRangeDel(&levelIters[i].rangeDelIter) 258 li.initBoundaryContext(&levelIters[i].levelIterBoundaryContext) 259 } 260 miter := &mergingIter{} 261 miter.init(nil /* opts */, &stats, cmp, func(a []byte) int { return len(a) }, levelIters...) 262 defer miter.Close() 263 return runInternalIterCmd(d, miter, iterCmdVerboseKey, iterCmdStats(&stats)) 264 default: 265 return fmt.Sprintf("unknown command: %s", d.Cmd) 266 } 267 }) 268 } 269 270 func buildMergingIterTables( 271 b *testing.B, blockSize, restartInterval, count int, 272 ) ([]*sstable.Reader, [][]byte, func()) { 273 mem := vfs.NewMem() 274 files := make([]vfs.File, count) 275 for i := range files { 276 f, err := mem.Create(fmt.Sprintf("bench%d", i)) 277 if err != nil { 278 b.Fatal(err) 279 } 280 files[i] = f 281 } 282 283 writers := make([]*sstable.Writer, len(files)) 284 for i := range files { 285 writers[i] = sstable.NewWriter(files[i], sstable.WriterOptions{ 286 BlockRestartInterval: restartInterval, 287 BlockSize: blockSize, 288 Compression: NoCompression, 289 }) 290 } 291 292 estimatedSize := func() uint64 { 293 var sum uint64 294 for _, w := range writers { 295 sum += w.EstimatedSize() 296 } 297 return sum 298 } 299 300 var keys [][]byte 301 var ikey InternalKey 302 targetSize := uint64(count * (2 << 20)) 303 for i := 0; estimatedSize() < targetSize; i++ { 304 key := []byte(fmt.Sprintf("%08d", i)) 305 keys = append(keys, key) 306 ikey.UserKey = key 307 j := rand.Intn(len(writers)) 308 w := writers[j] 309 w.Add(ikey, nil) 310 } 311 312 for _, w := range writers { 313 if err := w.Close(); err != nil { 314 b.Fatal(err) 315 } 316 } 317 318 opts := sstable.ReaderOptions{Cache: NewCache(128 << 20)} 319 defer opts.Cache.Unref() 320 321 readers := make([]*sstable.Reader, len(files)) 322 for i := range files { 323 f, err := mem.Open(fmt.Sprintf("bench%d", i)) 324 if err != nil { 325 b.Fatal(err) 326 } 327 readers[i], err = sstable.NewReader(f, opts) 328 if err != nil { 329 b.Fatal(err) 330 } 331 } 332 return readers, keys, func() { 333 for _, r := range readers { 334 require.NoError(b, r.Close()) 335 } 336 } 337 } 338 339 func BenchmarkMergingIterSeekGE(b *testing.B) { 340 const blockSize = 32 << 10 341 342 for _, restartInterval := range []int{16} { 343 b.Run(fmt.Sprintf("restart=%d", restartInterval), 344 func(b *testing.B) { 345 for _, count := range []int{1, 2, 3, 4, 5} { 346 b.Run(fmt.Sprintf("count=%d", count), 347 func(b *testing.B) { 348 readers, keys, cleanup := buildMergingIterTables(b, blockSize, restartInterval, count) 349 defer cleanup() 350 iters := make([]internalIterator, len(readers)) 351 for i := range readers { 352 var err error 353 iters[i], err = readers[i].NewIter(nil /* lower */, nil /* upper */) 354 require.NoError(b, err) 355 } 356 var stats base.InternalIteratorStats 357 m := newMergingIter(nil /* logger */, &stats, DefaultComparer.Compare, 358 func(a []byte) int { return len(a) }, iters...) 359 rng := rand.New(rand.NewSource(uint64(time.Now().UnixNano()))) 360 361 b.ResetTimer() 362 for i := 0; i < b.N; i++ { 363 m.SeekGE(keys[rng.Intn(len(keys))], base.SeekGEFlagsNone) 364 } 365 m.Close() 366 }) 367 } 368 }) 369 } 370 } 371 372 func BenchmarkMergingIterNext(b *testing.B) { 373 const blockSize = 32 << 10 374 375 for _, restartInterval := range []int{16} { 376 b.Run(fmt.Sprintf("restart=%d", restartInterval), 377 func(b *testing.B) { 378 for _, count := range []int{1, 2, 3, 4, 5} { 379 b.Run(fmt.Sprintf("count=%d", count), 380 func(b *testing.B) { 381 readers, _, cleanup := buildMergingIterTables(b, blockSize, restartInterval, count) 382 defer cleanup() 383 iters := make([]internalIterator, len(readers)) 384 for i := range readers { 385 var err error 386 iters[i], err = readers[i].NewIter(nil /* lower */, nil /* upper */) 387 require.NoError(b, err) 388 } 389 var stats base.InternalIteratorStats 390 m := newMergingIter(nil /* logger */, &stats, DefaultComparer.Compare, 391 func(a []byte) int { return len(a) }, iters...) 392 393 b.ResetTimer() 394 for i := 0; i < b.N; i++ { 395 key, _ := m.Next() 396 if key == nil { 397 key, _ = m.First() 398 } 399 _ = key 400 } 401 m.Close() 402 }) 403 } 404 }) 405 } 406 } 407 408 func BenchmarkMergingIterPrev(b *testing.B) { 409 const blockSize = 32 << 10 410 411 for _, restartInterval := range []int{16} { 412 b.Run(fmt.Sprintf("restart=%d", restartInterval), 413 func(b *testing.B) { 414 for _, count := range []int{1, 2, 3, 4, 5} { 415 b.Run(fmt.Sprintf("count=%d", count), 416 func(b *testing.B) { 417 readers, _, cleanup := buildMergingIterTables(b, blockSize, restartInterval, count) 418 defer cleanup() 419 iters := make([]internalIterator, len(readers)) 420 for i := range readers { 421 var err error 422 iters[i], err = readers[i].NewIter(nil /* lower */, nil /* upper */) 423 require.NoError(b, err) 424 } 425 var stats base.InternalIteratorStats 426 m := newMergingIter(nil /* logger */, &stats, DefaultComparer.Compare, 427 func(a []byte) int { return len(a) }, iters...) 428 429 b.ResetTimer() 430 for i := 0; i < b.N; i++ { 431 key, _ := m.Prev() 432 if key == nil { 433 key, _ = m.Last() 434 } 435 _ = key 436 } 437 m.Close() 438 }) 439 } 440 }) 441 } 442 } 443 444 // Builds levels for BenchmarkMergingIterSeqSeekGEWithBounds. The lowest level, 445 // index 0 here, contains most of the data. Each level has 2 files, to allow for 446 // stepping into the second file if needed. The lowest level has all the keys in 447 // the file 0, and a single "lastIKey" in file 1. File 0 in all other levels have 448 // only the first and last key of file 0 of the aforementioned level -- this 449 // simulates sparseness of data, but not necessarily of file width, in higher 450 // levels. File 1 in other levels is similar to File 1 in the aforementioned level 451 // since it is only for stepping into. If writeRangeTombstoneToLowestLevel is 452 // true, a range tombstone is written to the first lowest level file that 453 // deletes all the keys in it, and no other levels should be written. 454 func buildLevelsForMergingIterSeqSeek( 455 b *testing.B, 456 blockSize, restartInterval, levelCount int, 457 keyOffset int, 458 writeRangeTombstoneToLowestLevel bool, 459 writeBloomFilters bool, 460 ) ([][]*sstable.Reader, []manifest.LevelSlice, [][]byte) { 461 mem := vfs.NewMem() 462 if writeRangeTombstoneToLowestLevel && levelCount != 1 { 463 panic("expect to write only 1 level") 464 } 465 files := make([][]vfs.File, levelCount) 466 for i := range files { 467 for j := 0; j < 2; j++ { 468 f, err := mem.Create(fmt.Sprintf("bench%d_%d", i, j)) 469 if err != nil { 470 b.Fatal(err) 471 } 472 files[i] = append(files[i], f) 473 } 474 } 475 476 writers := make([][]*sstable.Writer, levelCount) 477 // A policy unlikely to have false positives. 478 filterPolicy := bloom.FilterPolicy(100) 479 for i := range files { 480 for j := range files[i] { 481 writerOptions := sstable.WriterOptions{ 482 BlockRestartInterval: restartInterval, 483 BlockSize: blockSize, 484 Compression: NoCompression, 485 } 486 if writeBloomFilters { 487 writerOptions.FilterPolicy = filterPolicy 488 writerOptions.FilterType = base.TableFilter 489 } 490 writers[i] = append(writers[i], sstable.NewWriter(files[i][j], writerOptions)) 491 } 492 } 493 494 var keys [][]byte 495 i := keyOffset 496 const targetSize = 2 << 20 497 w := writers[0][0] 498 for ; w.EstimatedSize() < targetSize; i++ { 499 key := []byte(fmt.Sprintf("%08d", i)) 500 keys = append(keys, key) 501 ikey := base.MakeInternalKey(key, 0, InternalKeyKindSet) 502 w.Add(ikey, nil) 503 } 504 if writeRangeTombstoneToLowestLevel { 505 tombstoneKey := base.MakeInternalKey(keys[0], 1, InternalKeyKindRangeDelete) 506 w.Add(tombstoneKey, []byte(fmt.Sprintf("%08d", i))) 507 } 508 for j := 1; j < len(files); j++ { 509 for _, k := range []int{0, len(keys) - 1} { 510 ikey := base.MakeInternalKey(keys[k], uint64(j), InternalKeyKindSet) 511 writers[j][0].Add(ikey, nil) 512 } 513 } 514 lastKey := []byte(fmt.Sprintf("%08d", i)) 515 keys = append(keys, lastKey) 516 for j := 0; j < len(files); j++ { 517 lastIKey := base.MakeInternalKey(lastKey, uint64(j), InternalKeyKindSet) 518 writers[j][1].Add(lastIKey, nil) 519 } 520 for _, levelWriters := range writers { 521 for _, w := range levelWriters { 522 if err := w.Close(); err != nil { 523 b.Fatal(err) 524 } 525 } 526 } 527 528 opts := sstable.ReaderOptions{Cache: NewCache(128 << 20)} 529 if writeBloomFilters { 530 opts.Filters = make(map[string]FilterPolicy) 531 opts.Filters[filterPolicy.Name()] = filterPolicy 532 } 533 defer opts.Cache.Unref() 534 535 readers := make([][]*sstable.Reader, levelCount) 536 for i := range files { 537 for j := range files[i] { 538 f, err := mem.Open(fmt.Sprintf("bench%d_%d", i, j)) 539 if err != nil { 540 b.Fatal(err) 541 } 542 r, err := sstable.NewReader(f, opts) 543 if err != nil { 544 b.Fatal(err) 545 } 546 readers[i] = append(readers[i], r) 547 } 548 } 549 levelSlices := make([]manifest.LevelSlice, levelCount) 550 for i := range readers { 551 meta := make([]*fileMetadata, len(readers[i])) 552 for j := range readers[i] { 553 iter, err := readers[i][j].NewIter(nil /* lower */, nil /* upper */) 554 require.NoError(b, err) 555 smallest, _ := iter.First() 556 meta[j] = &fileMetadata{} 557 // The same FileNum is being reused across different levels, which 558 // is harmless for the benchmark since each level has its own iterator 559 // creation func. 560 meta[j].FileNum = FileNum(j) 561 largest, _ := iter.Last() 562 meta[j].ExtendPointKeyBounds(opts.Comparer.Compare, smallest.Clone(), largest.Clone()) 563 } 564 levelSlices[i] = manifest.NewLevelSliceSpecificOrder(meta) 565 } 566 return readers, levelSlices, keys 567 } 568 569 func buildMergingIter(readers [][]*sstable.Reader, levelSlices []manifest.LevelSlice) *mergingIter { 570 mils := make([]mergingIterLevel, len(levelSlices)) 571 for i := len(readers) - 1; i >= 0; i-- { 572 levelIndex := i 573 level := len(readers) - 1 - i 574 newIters := func( 575 file *manifest.FileMetadata, opts *IterOptions, _ internalIterOpts, 576 ) (internalIterator, keyspan.FragmentIterator, error) { 577 iter, err := readers[levelIndex][file.FileNum].NewIter( 578 opts.LowerBound, opts.UpperBound) 579 if err != nil { 580 return nil, nil, err 581 } 582 rdIter, err := readers[levelIndex][file.FileNum].NewRawRangeDelIter() 583 if err != nil { 584 iter.Close() 585 return nil, nil, err 586 } 587 return iter, rdIter, err 588 } 589 l := newLevelIter(IterOptions{}, DefaultComparer.Compare, 590 func(a []byte) int { return len(a) }, newIters, levelSlices[i].Iter(), 591 manifest.Level(level), nil) 592 l.initRangeDel(&mils[level].rangeDelIter) 593 l.initBoundaryContext(&mils[level].levelIterBoundaryContext) 594 mils[level].iter = l 595 } 596 var stats base.InternalIteratorStats 597 m := &mergingIter{} 598 m.init(nil /* logger */, &stats, DefaultComparer.Compare, 599 func(a []byte) int { return len(a) }, mils...) 600 return m 601 } 602 603 // A benchmark that simulates the behavior of a mergingIter where 604 // monotonically increasing narrow bounds are repeatedly set and used to Seek 605 // and then iterate over the keys within the bounds. This resembles MVCC 606 // scanning by CockroachDB when doing a lookup/index join with a large number 607 // of left rows, that are batched and reuse the same iterator, and which can 608 // have good locality of access. This results in the successive bounds being 609 // in the same file. 610 func BenchmarkMergingIterSeqSeekGEWithBounds(b *testing.B) { 611 const blockSize = 32 << 10 612 613 restartInterval := 16 614 for _, levelCount := range []int{5} { 615 b.Run(fmt.Sprintf("levelCount=%d", levelCount), 616 func(b *testing.B) { 617 readers, levelSlices, keys := buildLevelsForMergingIterSeqSeek( 618 b, blockSize, restartInterval, levelCount, 0 /* keyOffset */, false, false) 619 m := buildMergingIter(readers, levelSlices) 620 keyCount := len(keys) 621 b.ResetTimer() 622 for i := 0; i < b.N; i++ { 623 pos := i % (keyCount - 1) 624 m.SetBounds(keys[pos], keys[pos+1]) 625 // SeekGE will return keys[pos]. 626 k, _ := m.SeekGE(keys[pos], base.SeekGEFlagsNone) 627 for k != nil { 628 k, _ = m.Next() 629 } 630 } 631 m.Close() 632 for i := range readers { 633 for j := range readers[i] { 634 readers[i][j].Close() 635 } 636 } 637 }) 638 } 639 } 640 641 func BenchmarkMergingIterSeqSeekPrefixGE(b *testing.B) { 642 const blockSize = 32 << 10 643 const restartInterval = 16 644 const levelCount = 5 645 readers, levelSlices, keys := buildLevelsForMergingIterSeqSeek( 646 b, blockSize, restartInterval, levelCount, 0 /* keyOffset */, false, false) 647 648 for _, skip := range []int{1, 2, 4, 8, 16} { 649 for _, useNext := range []bool{false, true} { 650 b.Run(fmt.Sprintf("skip=%d/use-next=%t", skip, useNext), 651 func(b *testing.B) { 652 m := buildMergingIter(readers, levelSlices) 653 keyCount := len(keys) 654 pos := 0 655 656 m.SeekPrefixGE(keys[pos], keys[pos], base.SeekGEFlagsNone) 657 b.ResetTimer() 658 for i := 0; i < b.N; i++ { 659 pos += skip 660 var flags base.SeekGEFlags 661 if useNext { 662 flags = flags.EnableTrySeekUsingNext() 663 } 664 if pos >= keyCount { 665 pos = 0 666 flags = flags.DisableTrySeekUsingNext() 667 } 668 // SeekPrefixGE will return keys[pos]. 669 m.SeekPrefixGE(keys[pos], keys[pos], flags) 670 } 671 b.StopTimer() 672 m.Close() 673 }) 674 } 675 } 676 for i := range readers { 677 for j := range readers[i] { 678 readers[i][j].Close() 679 } 680 } 681 }