github.com/cockroachdb/pebble@v1.1.1-0.20240513155919-3622ade60459/data_test.go (about) 1 // Copyright 2018 The LevelDB-Go and Pebble Authors. All rights reserved. Use 2 // of this source code is governed by a BSD-style license that can be found in 3 // the LICENSE file. 4 5 package pebble 6 7 import ( 8 "bytes" 9 crand "crypto/rand" 10 "fmt" 11 "io" 12 "math" 13 "math/rand" 14 "strconv" 15 "strings" 16 "testing" 17 "time" 18 19 "github.com/cockroachdb/datadriven" 20 "github.com/cockroachdb/errors" 21 "github.com/cockroachdb/pebble/internal/base" 22 "github.com/cockroachdb/pebble/internal/humanize" 23 "github.com/cockroachdb/pebble/internal/keyspan" 24 "github.com/cockroachdb/pebble/internal/private" 25 "github.com/cockroachdb/pebble/internal/rangedel" 26 "github.com/cockroachdb/pebble/internal/rangekey" 27 "github.com/cockroachdb/pebble/internal/testkeys" 28 "github.com/cockroachdb/pebble/objstorage/objstorageprovider" 29 "github.com/cockroachdb/pebble/objstorage/remote" 30 "github.com/cockroachdb/pebble/sstable" 31 "github.com/cockroachdb/pebble/vfs" 32 "github.com/stretchr/testify/require" 33 ) 34 35 type iterCmdOpts struct { 36 verboseKey bool 37 stats *base.InternalIteratorStats 38 } 39 40 type iterCmdOpt func(*iterCmdOpts) 41 42 func iterCmdVerboseKey(opts *iterCmdOpts) { opts.verboseKey = true } 43 44 func iterCmdStats(stats *base.InternalIteratorStats) iterCmdOpt { 45 return func(opts *iterCmdOpts) { 46 opts.stats = stats 47 } 48 } 49 50 func runGetCmd(t testing.TB, td *datadriven.TestData, d *DB) string { 51 snap := Snapshot{ 52 db: d, 53 seqNum: InternalKeySeqNumMax, 54 } 55 td.MaybeScanArgs(t, "seq", &snap.seqNum) 56 57 var buf bytes.Buffer 58 for _, data := range strings.Split(td.Input, "\n") { 59 v, closer, err := snap.Get([]byte(data)) 60 if err != nil { 61 fmt.Fprintf(&buf, "%s: %s\n", data, err) 62 } else { 63 fmt.Fprintf(&buf, "%s:%s\n", data, v) 64 closer.Close() 65 } 66 } 67 return buf.String() 68 } 69 70 func runIterCmd(d *datadriven.TestData, iter *Iterator, closeIter bool) string { 71 if closeIter { 72 defer func() { 73 if iter != nil { 74 iter.Close() 75 } 76 }() 77 } 78 var b bytes.Buffer 79 for _, line := range strings.Split(d.Input, "\n") { 80 parts := strings.Fields(line) 81 if len(parts) == 0 { 82 continue 83 } 84 printValidityState := false 85 var valid bool 86 var validityState IterValidityState 87 switch parts[0] { 88 case "seek-ge": 89 if len(parts) != 2 { 90 return "seek-ge <key>\n" 91 } 92 valid = iter.SeekGE([]byte(parts[1])) 93 case "seek-prefix-ge": 94 if len(parts) != 2 { 95 return "seek-prefix-ge <key>\n" 96 } 97 valid = iter.SeekPrefixGE([]byte(parts[1])) 98 case "seek-lt": 99 if len(parts) != 2 { 100 return "seek-lt <key>\n" 101 } 102 valid = iter.SeekLT([]byte(parts[1])) 103 case "seek-ge-limit": 104 if len(parts) != 3 { 105 return "seek-ge-limit <key> <limit>\n" 106 } 107 validityState = iter.SeekGEWithLimit( 108 []byte(parts[1]), []byte(parts[2])) 109 printValidityState = true 110 case "seek-lt-limit": 111 if len(parts) != 3 { 112 return "seek-lt-limit <key> <limit>\n" 113 } 114 validityState = iter.SeekLTWithLimit( 115 []byte(parts[1]), []byte(parts[2])) 116 printValidityState = true 117 case "inspect": 118 if len(parts) != 2 { 119 return "inspect <field>\n" 120 } 121 field := parts[1] 122 switch field { 123 case "lastPositioningOp": 124 op := "?" 125 switch iter.lastPositioningOp { 126 case unknownLastPositionOp: 127 op = "unknown" 128 case seekPrefixGELastPositioningOp: 129 op = "seekprefixge" 130 case seekGELastPositioningOp: 131 op = "seekge" 132 case seekLTLastPositioningOp: 133 op = "seeklt" 134 case invalidatedLastPositionOp: 135 op = "invalidate" 136 } 137 fmt.Fprintf(&b, "%s=%q\n", field, op) 138 default: 139 return fmt.Sprintf("unrecognized inspect field %q\n", field) 140 } 141 continue 142 case "next-limit": 143 if len(parts) != 2 { 144 return "next-limit <limit>\n" 145 } 146 validityState = iter.NextWithLimit([]byte(parts[1])) 147 printValidityState = true 148 case "internal-next": 149 validity, keyKind := iter.internalNext() 150 switch validity { 151 case internalNextError: 152 fmt.Fprintf(&b, "err: %s\n", iter.Error()) 153 case internalNextExhausted: 154 fmt.Fprint(&b, ".\n") 155 case internalNextValid: 156 fmt.Fprintf(&b, "%s\n", keyKind) 157 default: 158 panic("unreachable") 159 } 160 continue 161 case "can-deterministically-single-delete": 162 ok, err := CanDeterministicallySingleDelete(iter) 163 if err != nil { 164 fmt.Fprintf(&b, "err: %s\n", err) 165 } else { 166 fmt.Fprintf(&b, "%t\n", ok) 167 } 168 continue 169 case "prev-limit": 170 if len(parts) != 2 { 171 return "prev-limit <limit>\n" 172 } 173 validityState = iter.PrevWithLimit([]byte(parts[1])) 174 printValidityState = true 175 case "first": 176 valid = iter.First() 177 case "last": 178 valid = iter.Last() 179 case "next": 180 valid = iter.Next() 181 case "next-prefix": 182 valid = iter.NextPrefix() 183 case "prev": 184 valid = iter.Prev() 185 case "set-bounds": 186 if len(parts) <= 1 || len(parts) > 3 { 187 return "set-bounds lower=<lower> upper=<upper>\n" 188 } 189 var lower []byte 190 var upper []byte 191 for _, part := range parts[1:] { 192 arg := strings.Split(part, "=") 193 switch arg[0] { 194 case "lower": 195 lower = []byte(arg[1]) 196 case "upper": 197 upper = []byte(arg[1]) 198 default: 199 return fmt.Sprintf("set-bounds: unknown arg: %s", arg) 200 } 201 } 202 iter.SetBounds(lower, upper) 203 valid = iter.Valid() 204 case "set-options": 205 opts := iter.opts 206 if _, err := parseIterOptions(&opts, &iter.opts, parts[1:]); err != nil { 207 return fmt.Sprintf("set-options: %s", err.Error()) 208 } 209 iter.SetOptions(&opts) 210 valid = iter.Valid() 211 case "stats": 212 stats := iter.Stats() 213 // The timing is non-deterministic, so set to 0. 214 stats.InternalStats.BlockReadDuration = 0 215 fmt.Fprintf(&b, "stats: %s\n", stats.String()) 216 continue 217 case "clone": 218 var opts CloneOptions 219 if len(parts) > 1 { 220 var iterOpts IterOptions 221 if foundAny, err := parseIterOptions(&iterOpts, &iter.opts, parts[1:]); err != nil { 222 return fmt.Sprintf("clone: %s", err.Error()) 223 } else if foundAny { 224 opts.IterOptions = &iterOpts 225 } 226 for _, part := range parts[1:] { 227 if arg := strings.Split(part, "="); len(arg) == 2 && arg[0] == "refresh-batch" { 228 var err error 229 opts.RefreshBatchView, err = strconv.ParseBool(arg[1]) 230 if err != nil { 231 return fmt.Sprintf("clone: refresh-batch: %s", err.Error()) 232 } 233 } 234 } 235 } 236 clonedIter, err := iter.Clone(opts) 237 if err != nil { 238 fmt.Fprintf(&b, "error in clone, skipping rest of input: err=%v\n", err) 239 return b.String() 240 } 241 if err = iter.Close(); err != nil { 242 fmt.Fprintf(&b, "err=%v\n", err) 243 } 244 iter = clonedIter 245 case "is-using-combined": 246 if iter.opts.KeyTypes != IterKeyTypePointsAndRanges { 247 fmt.Fprintln(&b, "not configured for combined iteration") 248 } else if iter.lazyCombinedIter.combinedIterState.initialized { 249 fmt.Fprintln(&b, "using combined (non-lazy) iterator") 250 } else { 251 fmt.Fprintln(&b, "using lazy iterator") 252 } 253 continue 254 default: 255 return fmt.Sprintf("unknown op: %s", parts[0]) 256 } 257 258 valid = valid || validityState == IterValid 259 if valid != iter.Valid() { 260 fmt.Fprintf(&b, "mismatched valid states: %t vs %t\n", valid, iter.Valid()) 261 } 262 hasPoint, hasRange := iter.HasPointAndRange() 263 hasEither := hasPoint || hasRange 264 if hasEither != valid { 265 fmt.Fprintf(&b, "mismatched valid/HasPointAndRange states: valid=%t HasPointAndRange=(%t,%t)\n", valid, hasPoint, hasRange) 266 } 267 268 if valid { 269 validityState = IterValid 270 } 271 printIterState(&b, iter, validityState, printValidityState) 272 } 273 return b.String() 274 } 275 276 func parseIterOptions( 277 opts *IterOptions, ref *IterOptions, parts []string, 278 ) (foundAny bool, err error) { 279 const usageString = "[lower=<lower>] [upper=<upper>] [key-types=point|range|both] [mask-suffix=<suffix>] [mask-filter=<bool>] [only-durable=<bool>] [table-filter=reuse|none] [point-filters=reuse|none]\n" 280 for _, part := range parts { 281 arg := strings.SplitN(part, "=", 2) 282 if len(arg) != 2 { 283 return false, errors.Newf(usageString) 284 } 285 switch arg[0] { 286 case "point-filters": 287 switch arg[1] { 288 case "reuse": 289 opts.PointKeyFilters = ref.PointKeyFilters 290 case "none": 291 opts.PointKeyFilters = nil 292 default: 293 return false, errors.Newf("unknown arg point-filter=%q:\n%s", arg[1], usageString) 294 } 295 case "lower": 296 opts.LowerBound = []byte(arg[1]) 297 case "upper": 298 opts.UpperBound = []byte(arg[1]) 299 case "key-types": 300 switch arg[1] { 301 case "point": 302 opts.KeyTypes = IterKeyTypePointsOnly 303 case "range": 304 opts.KeyTypes = IterKeyTypeRangesOnly 305 case "both": 306 opts.KeyTypes = IterKeyTypePointsAndRanges 307 default: 308 return false, errors.Newf("unknown key-type %q:\n%s", arg[1], usageString) 309 } 310 case "mask-suffix": 311 opts.RangeKeyMasking.Suffix = []byte(arg[1]) 312 case "mask-filter": 313 opts.RangeKeyMasking.Filter = func() BlockPropertyFilterMask { 314 return sstable.NewTestKeysMaskingFilter() 315 } 316 case "table-filter": 317 switch arg[1] { 318 case "reuse": 319 opts.TableFilter = ref.TableFilter 320 case "none": 321 opts.TableFilter = nil 322 default: 323 return false, errors.Newf("unknown arg table-filter=%q:\n%s", arg[1], usageString) 324 } 325 case "only-durable": 326 var err error 327 opts.OnlyReadGuaranteedDurable, err = strconv.ParseBool(arg[1]) 328 if err != nil { 329 return false, errors.Newf("cannot parse only-durable=%q: %s", arg[1], err) 330 } 331 default: 332 continue 333 } 334 foundAny = true 335 } 336 return foundAny, nil 337 } 338 339 func printIterState( 340 b io.Writer, iter *Iterator, validity IterValidityState, printValidityState bool, 341 ) { 342 var validityStateStr string 343 if printValidityState { 344 switch validity { 345 case IterExhausted: 346 validityStateStr = " exhausted" 347 case IterValid: 348 validityStateStr = " valid" 349 case IterAtLimit: 350 validityStateStr = " at-limit" 351 } 352 } 353 if err := iter.Error(); err != nil { 354 fmt.Fprintf(b, "err=%v\n", err) 355 } else if validity == IterValid { 356 switch { 357 case iter.opts.pointKeys(): 358 hasPoint, hasRange := iter.HasPointAndRange() 359 fmt.Fprintf(b, "%s:%s (", iter.Key(), validityStateStr) 360 if hasPoint { 361 fmt.Fprintf(b, "%s, ", iter.Value()) 362 } else { 363 fmt.Fprint(b, "., ") 364 } 365 if hasRange { 366 start, end := iter.RangeBounds() 367 fmt.Fprintf(b, "[%s-%s)", formatASCIIKey(start), formatASCIIKey(end)) 368 writeRangeKeys(b, iter) 369 } else { 370 fmt.Fprint(b, ".") 371 } 372 if iter.RangeKeyChanged() { 373 fmt.Fprint(b, " UPDATED") 374 } 375 fmt.Fprint(b, ")") 376 default: 377 if iter.Valid() { 378 hasPoint, hasRange := iter.HasPointAndRange() 379 if hasPoint || !hasRange { 380 panic(fmt.Sprintf("pebble: unexpected HasPointAndRange (%t, %t)", hasPoint, hasRange)) 381 } 382 start, end := iter.RangeBounds() 383 fmt.Fprintf(b, "%s [%s-%s)", iter.Key(), formatASCIIKey(start), formatASCIIKey(end)) 384 writeRangeKeys(b, iter) 385 } else { 386 fmt.Fprint(b, ".") 387 } 388 if iter.RangeKeyChanged() { 389 fmt.Fprint(b, " UPDATED") 390 } 391 } 392 fmt.Fprintln(b) 393 } else { 394 fmt.Fprintf(b, ".%s\n", validityStateStr) 395 } 396 } 397 398 func formatASCIIKey(b []byte) string { 399 if bytes.IndexFunc(b, func(r rune) bool { return r < 'A' || r > 'z' }) != -1 { 400 // This key is not just ASCII letters. Quote it. 401 return fmt.Sprintf("%q", b) 402 } 403 return string(b) 404 } 405 406 func writeRangeKeys(b io.Writer, iter *Iterator) { 407 rangeKeys := iter.RangeKeys() 408 for j := 0; j < len(rangeKeys); j++ { 409 if j > 0 { 410 fmt.Fprint(b, ",") 411 } 412 fmt.Fprintf(b, " %s=%s", rangeKeys[j].Suffix, rangeKeys[j].Value) 413 } 414 } 415 416 func runInternalIterCmd( 417 t *testing.T, d *datadriven.TestData, iter internalIterator, opts ...iterCmdOpt, 418 ) string { 419 var o iterCmdOpts 420 for _, opt := range opts { 421 opt(&o) 422 } 423 424 getKV := func(key *InternalKey, val LazyValue) (*InternalKey, []byte) { 425 v, _, err := val.Value(nil) 426 require.NoError(t, err) 427 return key, v 428 } 429 var b bytes.Buffer 430 var prefix []byte 431 for _, line := range strings.Split(d.Input, "\n") { 432 parts := strings.Fields(line) 433 if len(parts) == 0 { 434 continue 435 } 436 var key *InternalKey 437 var value []byte 438 switch parts[0] { 439 case "seek-ge": 440 if len(parts) < 2 || len(parts) > 3 { 441 return "seek-ge <key> [<try-seek-using-next>]\n" 442 } 443 prefix = nil 444 var flags base.SeekGEFlags 445 if len(parts) == 3 { 446 if trySeekUsingNext, err := strconv.ParseBool(parts[2]); err != nil { 447 return err.Error() 448 } else if trySeekUsingNext { 449 flags = flags.EnableTrySeekUsingNext() 450 } 451 } 452 key, value = getKV(iter.SeekGE([]byte(strings.TrimSpace(parts[1])), flags)) 453 case "seek-prefix-ge": 454 if len(parts) != 2 && len(parts) != 3 { 455 return "seek-prefix-ge <key> [<try-seek-using-next>]\n" 456 } 457 prefix = []byte(strings.TrimSpace(parts[1])) 458 var flags base.SeekGEFlags 459 if len(parts) == 3 { 460 if trySeekUsingNext, err := strconv.ParseBool(parts[2]); err != nil { 461 return err.Error() 462 } else if trySeekUsingNext { 463 flags = flags.EnableTrySeekUsingNext() 464 } 465 } 466 key, value = getKV(iter.SeekPrefixGE(prefix, prefix /* key */, flags)) 467 case "seek-lt": 468 if len(parts) != 2 { 469 return "seek-lt <key>\n" 470 } 471 prefix = nil 472 key, value = getKV(iter.SeekLT([]byte(strings.TrimSpace(parts[1])), base.SeekLTFlagsNone)) 473 case "first": 474 prefix = nil 475 key, value = getKV(iter.First()) 476 case "last": 477 prefix = nil 478 key, value = getKV(iter.Last()) 479 case "next": 480 key, value = getKV(iter.Next()) 481 case "prev": 482 key, value = getKV(iter.Prev()) 483 case "set-bounds": 484 if len(parts) <= 1 || len(parts) > 3 { 485 return "set-bounds lower=<lower> upper=<upper>\n" 486 } 487 var lower []byte 488 var upper []byte 489 for _, part := range parts[1:] { 490 arg := strings.Split(strings.TrimSpace(part), "=") 491 switch arg[0] { 492 case "lower": 493 lower = []byte(arg[1]) 494 case "upper": 495 upper = []byte(arg[1]) 496 default: 497 return fmt.Sprintf("set-bounds: unknown arg: %s", arg) 498 } 499 } 500 iter.SetBounds(lower, upper) 501 continue 502 case "stats": 503 if o.stats != nil { 504 // The timing is non-deterministic, so set to 0. 505 o.stats.BlockReadDuration = 0 506 fmt.Fprintf(&b, "%+v\n", *o.stats) 507 } 508 continue 509 case "reset-stats": 510 if o.stats != nil { 511 *o.stats = base.InternalIteratorStats{} 512 } 513 continue 514 default: 515 return fmt.Sprintf("unknown op: %s", parts[0]) 516 } 517 if key != nil { 518 if o.verboseKey { 519 fmt.Fprintf(&b, "%s:%s\n", key, value) 520 } else { 521 fmt.Fprintf(&b, "%s:%s\n", key.UserKey, value) 522 } 523 } else if err := iter.Error(); err != nil { 524 fmt.Fprintf(&b, "err=%v\n", err) 525 } else { 526 fmt.Fprintf(&b, ".\n") 527 } 528 } 529 return b.String() 530 } 531 532 func runBatchDefineCmd(d *datadriven.TestData, b *Batch) error { 533 for _, line := range strings.Split(d.Input, "\n") { 534 parts := strings.Fields(line) 535 if len(parts) == 0 { 536 continue 537 } 538 if parts[1] == `<nil>` { 539 parts[1] = "" 540 } 541 var err error 542 switch parts[0] { 543 case "set": 544 if len(parts) != 3 { 545 return errors.Errorf("%s expects 2 arguments", parts[0]) 546 } 547 err = b.Set([]byte(parts[1]), []byte(parts[2]), nil) 548 case "del": 549 if len(parts) != 2 { 550 return errors.Errorf("%s expects 1 argument", parts[0]) 551 } 552 err = b.Delete([]byte(parts[1]), nil) 553 case "del-sized": 554 if len(parts) != 3 { 555 return errors.Errorf("%s expects 2 arguments", parts[0]) 556 } 557 var valSize uint64 558 valSize, err = strconv.ParseUint(parts[2], 10, 32) 559 if err != nil { 560 return err 561 } 562 err = b.DeleteSized([]byte(parts[1]), uint32(valSize), nil) 563 case "singledel": 564 if len(parts) != 2 { 565 return errors.Errorf("%s expects 1 argument", parts[0]) 566 } 567 err = b.SingleDelete([]byte(parts[1]), nil) 568 case "del-range": 569 if len(parts) != 3 { 570 return errors.Errorf("%s expects 2 arguments", parts[0]) 571 } 572 err = b.DeleteRange([]byte(parts[1]), []byte(parts[2]), nil) 573 case "merge": 574 if len(parts) != 3 { 575 return errors.Errorf("%s expects 2 arguments", parts[0]) 576 } 577 err = b.Merge([]byte(parts[1]), []byte(parts[2]), nil) 578 case "range-key-set": 579 if len(parts) < 4 || len(parts) > 5 { 580 return errors.Errorf("%s expects 3 or 4 arguments", parts[0]) 581 } 582 var val []byte 583 if len(parts) == 5 { 584 val = []byte(parts[4]) 585 } 586 err = b.RangeKeySet( 587 []byte(parts[1]), 588 []byte(parts[2]), 589 []byte(parts[3]), 590 val, 591 nil) 592 case "range-key-unset": 593 if len(parts) != 4 { 594 return errors.Errorf("%s expects 3 arguments", parts[0]) 595 } 596 err = b.RangeKeyUnset( 597 []byte(parts[1]), 598 []byte(parts[2]), 599 []byte(parts[3]), 600 nil) 601 case "range-key-del": 602 if len(parts) != 3 { 603 return errors.Errorf("%s expects 2 arguments", parts[0]) 604 } 605 err = b.RangeKeyDelete( 606 []byte(parts[1]), 607 []byte(parts[2]), 608 nil) 609 default: 610 return errors.Errorf("unknown op: %s", parts[0]) 611 } 612 if err != nil { 613 return err 614 } 615 } 616 return nil 617 } 618 619 func runBuildRemoteCmd(td *datadriven.TestData, d *DB, storage remote.Storage) error { 620 b := d.NewIndexedBatch() 621 if err := runBatchDefineCmd(td, b); err != nil { 622 return err 623 } 624 625 if len(td.CmdArgs) < 1 { 626 return errors.New("build <path>: argument missing") 627 } 628 path := td.CmdArgs[0].String() 629 630 // Override table format, if provided. 631 tableFormat := d.opts.FormatMajorVersion.MaxTableFormat() 632 for _, cmdArg := range td.CmdArgs[1:] { 633 switch cmdArg.Key { 634 case "format": 635 switch cmdArg.Vals[0] { 636 case "leveldb": 637 tableFormat = sstable.TableFormatLevelDB 638 case "rocksdbv2": 639 tableFormat = sstable.TableFormatRocksDBv2 640 case "pebblev1": 641 tableFormat = sstable.TableFormatPebblev1 642 case "pebblev2": 643 tableFormat = sstable.TableFormatPebblev2 644 case "pebblev3": 645 tableFormat = sstable.TableFormatPebblev3 646 case "pebblev4": 647 tableFormat = sstable.TableFormatPebblev4 648 default: 649 return errors.Errorf("unknown format string %s", cmdArg.Vals[0]) 650 } 651 } 652 } 653 654 writeOpts := d.opts.MakeWriterOptions(0 /* level */, tableFormat) 655 656 f, err := storage.CreateObject(path) 657 if err != nil { 658 return err 659 } 660 w := sstable.NewWriter(objstorageprovider.NewRemoteWritable(f), writeOpts) 661 iter := b.newInternalIter(nil) 662 for key, val := iter.First(); key != nil; key, val = iter.Next() { 663 tmp := *key 664 tmp.SetSeqNum(0) 665 if err := w.Add(tmp, val.InPlaceValue()); err != nil { 666 return err 667 } 668 } 669 if err := iter.Close(); err != nil { 670 return err 671 } 672 673 if rdi := b.newRangeDelIter(nil, math.MaxUint64); rdi != nil { 674 for s := rdi.First(); s != nil; s = rdi.Next() { 675 err := rangedel.Encode(s, func(k base.InternalKey, v []byte) error { 676 k.SetSeqNum(0) 677 return w.Add(k, v) 678 }) 679 if err != nil { 680 return err 681 } 682 } 683 } 684 685 if rki := b.newRangeKeyIter(nil, math.MaxUint64); rki != nil { 686 for s := rki.First(); s != nil; s = rki.Next() { 687 for _, k := range s.Keys { 688 var err error 689 switch k.Kind() { 690 case base.InternalKeyKindRangeKeySet: 691 err = w.RangeKeySet(s.Start, s.End, k.Suffix, k.Value) 692 case base.InternalKeyKindRangeKeyUnset: 693 err = w.RangeKeyUnset(s.Start, s.End, k.Suffix) 694 case base.InternalKeyKindRangeKeyDelete: 695 err = w.RangeKeyDelete(s.Start, s.End) 696 default: 697 panic("not a range key") 698 } 699 if err != nil { 700 return err 701 } 702 } 703 } 704 } 705 706 return w.Close() 707 } 708 709 func runBuildCmd(td *datadriven.TestData, d *DB, fs vfs.FS) error { 710 b := d.NewIndexedBatch() 711 if err := runBatchDefineCmd(td, b); err != nil { 712 return err 713 } 714 715 if len(td.CmdArgs) < 1 { 716 return errors.New("build <path>: argument missing") 717 } 718 path := td.CmdArgs[0].String() 719 720 // Override table format, if provided. 721 tableFormat := d.opts.FormatMajorVersion.MaxTableFormat() 722 for _, cmdArg := range td.CmdArgs[1:] { 723 switch cmdArg.Key { 724 case "format": 725 switch cmdArg.Vals[0] { 726 case "leveldb": 727 tableFormat = sstable.TableFormatLevelDB 728 case "rocksdbv2": 729 tableFormat = sstable.TableFormatRocksDBv2 730 case "pebblev1": 731 tableFormat = sstable.TableFormatPebblev1 732 case "pebblev2": 733 tableFormat = sstable.TableFormatPebblev2 734 case "pebblev3": 735 tableFormat = sstable.TableFormatPebblev3 736 case "pebblev4": 737 tableFormat = sstable.TableFormatPebblev4 738 default: 739 return errors.Errorf("unknown format string %s", cmdArg.Vals[0]) 740 } 741 } 742 } 743 744 writeOpts := d.opts.MakeWriterOptions(0 /* level */, tableFormat) 745 746 f, err := fs.Create(path) 747 if err != nil { 748 return err 749 } 750 w := sstable.NewWriter(objstorageprovider.NewFileWritable(f), writeOpts) 751 iter := b.newInternalIter(nil) 752 for key, val := iter.First(); key != nil; key, val = iter.Next() { 753 tmp := *key 754 tmp.SetSeqNum(0) 755 if err := w.Add(tmp, val.InPlaceValue()); err != nil { 756 return err 757 } 758 } 759 if err := iter.Close(); err != nil { 760 return err 761 } 762 763 if rdi := b.newRangeDelIter(nil, math.MaxUint64); rdi != nil { 764 for s := rdi.First(); s != nil; s = rdi.Next() { 765 err := rangedel.Encode(s, func(k base.InternalKey, v []byte) error { 766 k.SetSeqNum(0) 767 return w.Add(k, v) 768 }) 769 if err != nil { 770 return err 771 } 772 } 773 } 774 775 if rki := b.newRangeKeyIter(nil, math.MaxUint64); rki != nil { 776 for s := rki.First(); s != nil; s = rki.Next() { 777 for _, k := range s.Keys { 778 var err error 779 switch k.Kind() { 780 case base.InternalKeyKindRangeKeySet: 781 err = w.RangeKeySet(s.Start, s.End, k.Suffix, k.Value) 782 case base.InternalKeyKindRangeKeyUnset: 783 err = w.RangeKeyUnset(s.Start, s.End, k.Suffix) 784 case base.InternalKeyKindRangeKeyDelete: 785 err = w.RangeKeyDelete(s.Start, s.End) 786 default: 787 panic("not a range key") 788 } 789 if err != nil { 790 return err 791 } 792 } 793 } 794 } 795 796 return w.Close() 797 } 798 799 func runCompactCmd(td *datadriven.TestData, d *DB) error { 800 if len(td.CmdArgs) > 4 { 801 return errors.Errorf("%s expects at most four arguments", td.Cmd) 802 } 803 parts := strings.Split(td.CmdArgs[0].Key, "-") 804 if len(parts) != 2 { 805 return errors.Errorf("expected <begin>-<end>: %s", td.Input) 806 } 807 parallelize := td.HasArg("parallel") 808 if len(td.CmdArgs) >= 2 && strings.HasPrefix(td.CmdArgs[1].Key, "L") { 809 levelString := td.CmdArgs[1].String() 810 iStart := base.MakeInternalKey([]byte(parts[0]), InternalKeySeqNumMax, InternalKeyKindMax) 811 iEnd := base.MakeInternalKey([]byte(parts[1]), 0, 0) 812 if levelString[0] != 'L' { 813 return errors.Errorf("expected L<n>: %s", levelString) 814 } 815 level, err := strconv.Atoi(levelString[1:]) 816 if err != nil { 817 return err 818 } 819 return d.manualCompact(iStart.UserKey, iEnd.UserKey, level, parallelize) 820 } 821 return d.Compact([]byte(parts[0]), []byte(parts[1]), parallelize) 822 } 823 824 // runDBDefineCmd prepares a database state, returning the opened 825 // database with the initialized state. 826 // 827 // The command accepts input describing memtables and sstables to 828 // construct. Each new table is indicated by a line containing the 829 // level of the next table to build (eg, "L6"), or "mem" to build 830 // a memtable. Each subsequent line contains a new key-value pair. 831 // 832 // Point keys and range deletions should be encoded as the 833 // InternalKey's string representation, as understood by 834 // ParseInternalKey, followed a colon and the corresponding value. 835 // 836 // b.SET.50:foo 837 // c.DEL.20 838 // 839 // Range keys may be encoded by prefixing the line with `rangekey:`, 840 // followed by the keyspan.Span string representation, as understood 841 // by keyspan.ParseSpan. 842 // 843 // rangekey:b-d:{(#5,RANGEKEYSET,@2,foo)} 844 // 845 // # Mechanics 846 // 847 // runDBDefineCmd works by simulating a flush for every file written. 848 // Keys are written to a memtable. When a file is complete, the table 849 // is flushed to physical files through manually invoking runCompaction. 850 // The resulting version edit is then manipulated to write the files 851 // to the indicated level. 852 // 853 // Because of it's low-level manipulation, runDBDefineCmd does allow the 854 // creation of invalid database states. If opts.DebugCheck is set, the 855 // level checker should detect the invalid state. 856 func runDBDefineCmd(td *datadriven.TestData, opts *Options) (*DB, error) { 857 opts = opts.EnsureDefaults() 858 opts.FS = vfs.NewMem() 859 860 var snapshots []uint64 861 var levelMaxBytes map[int]int64 862 for _, arg := range td.CmdArgs { 863 switch arg.Key { 864 case "target-file-sizes": 865 opts.Levels = make([]LevelOptions, len(arg.Vals)) 866 for i := range arg.Vals { 867 size, err := strconv.ParseInt(arg.Vals[i], 10, 64) 868 if err != nil { 869 return nil, err 870 } 871 opts.Levels[i].TargetFileSize = size 872 } 873 case "snapshots": 874 snapshots = make([]uint64, len(arg.Vals)) 875 for i := range arg.Vals { 876 seqNum, err := strconv.ParseUint(arg.Vals[i], 10, 64) 877 if err != nil { 878 return nil, err 879 } 880 snapshots[i] = seqNum 881 if i > 0 && snapshots[i] < snapshots[i-1] { 882 return nil, errors.New("Snapshots must be in ascending order") 883 } 884 } 885 case "lbase-max-bytes": 886 lbaseMaxBytes, err := strconv.ParseInt(arg.Vals[0], 10, 64) 887 if err != nil { 888 return nil, err 889 } 890 opts.LBaseMaxBytes = lbaseMaxBytes 891 case "level-max-bytes": 892 levelMaxBytes = map[int]int64{} 893 for i := range arg.Vals { 894 j := strings.Index(arg.Vals[i], ":") 895 levelStr := strings.TrimSpace(arg.Vals[i][:j]) 896 level, err := strconv.Atoi(levelStr[1:]) 897 if err != nil { 898 return nil, err 899 } 900 size, err := strconv.ParseInt(strings.TrimSpace(arg.Vals[i][j+1:]), 10, 64) 901 if err != nil { 902 return nil, err 903 } 904 levelMaxBytes[level] = size 905 } 906 case "auto-compactions": 907 switch arg.Vals[0] { 908 case "off": 909 opts.DisableAutomaticCompactions = true 910 case "on": 911 opts.DisableAutomaticCompactions = false 912 default: 913 return nil, errors.Errorf("Unrecognized %q %q arg value: %q", td.Cmd, arg.Key, arg.Vals[0]) 914 } 915 case "enable-table-stats": 916 enable, err := strconv.ParseBool(arg.Vals[0]) 917 if err != nil { 918 return nil, errors.Errorf("%s: could not parse %q as bool: %s", td.Cmd, arg.Vals[0], err) 919 } 920 opts.private.disableTableStats = !enable 921 case "block-size": 922 size, err := strconv.Atoi(arg.Vals[0]) 923 if err != nil { 924 return nil, err 925 } 926 for _, levelOpts := range opts.Levels { 927 levelOpts.BlockSize = size 928 } 929 case "format-major-version": 930 fmv, err := strconv.Atoi(arg.Vals[0]) 931 if err != nil { 932 return nil, err 933 } 934 opts.FormatMajorVersion = FormatMajorVersion(fmv) 935 case "disable-multi-level": 936 opts.Experimental.MultiLevelCompactionHeuristic = NoMultiLevel{} 937 } 938 } 939 940 // This is placed after the argument parsing above, because the arguments 941 // to define should be parsed even if td.Input is empty. 942 if td.Input == "" { 943 // Empty LSM. 944 d, err := Open("", opts) 945 if err != nil { 946 return nil, err 947 } 948 d.mu.Lock() 949 for i := range snapshots { 950 s := &Snapshot{db: d} 951 s.seqNum = snapshots[i] 952 d.mu.snapshots.pushBack(s) 953 } 954 for l, maxBytes := range levelMaxBytes { 955 d.mu.versions.picker.(*compactionPickerByScore).levelMaxBytes[l] = maxBytes 956 } 957 d.mu.Unlock() 958 return d, nil 959 } 960 961 d, err := Open("", opts) 962 if err != nil { 963 return nil, err 964 } 965 d.mu.Lock() 966 d.mu.versions.dynamicBaseLevel = false 967 for i := range snapshots { 968 s := &Snapshot{db: d} 969 s.seqNum = snapshots[i] 970 d.mu.snapshots.pushBack(s) 971 } 972 defer d.mu.Unlock() 973 974 var mem *memTable 975 var start, end *base.InternalKey 976 ve := &versionEdit{} 977 level := -1 978 979 maybeFlush := func() error { 980 if level < 0 { 981 return nil 982 } 983 984 toFlush := flushableList{{ 985 flushable: mem, 986 flushed: make(chan struct{}), 987 }} 988 c := newFlush(d.opts, d.mu.versions.currentVersion(), 989 d.mu.versions.picker.getBaseLevel(), toFlush, time.Now()) 990 c.disableSpanElision = true 991 // NB: define allows the test to exactly specify which keys go 992 // into which sstables. If the test has a small target file 993 // size to test grandparent limits, etc, the maxOutputFileSize 994 // can cause splitting /within/ the bounds specified to the 995 // test. Ignore the target size here, and split only according 996 // to the user-defined boundaries. 997 c.maxOutputFileSize = math.MaxUint64 998 999 newVE, _, _, err := d.runCompaction(0, c) 1000 if err != nil { 1001 return err 1002 } 1003 largestSeqNum := d.mu.versions.logSeqNum.Load() 1004 for _, f := range newVE.NewFiles { 1005 if start != nil { 1006 f.Meta.SmallestPointKey = *start 1007 f.Meta.Smallest = *start 1008 } 1009 if end != nil { 1010 f.Meta.LargestPointKey = *end 1011 f.Meta.Largest = *end 1012 } 1013 if largestSeqNum <= f.Meta.LargestSeqNum { 1014 largestSeqNum = f.Meta.LargestSeqNum + 1 1015 } 1016 ve.NewFiles = append(ve.NewFiles, newFileEntry{ 1017 Level: level, 1018 Meta: f.Meta, 1019 }) 1020 } 1021 // The committed keys were never written to the WAL, so neither 1022 // the logSeqNum nor the commit pipeline's visibleSeqNum have 1023 // been ratcheted. Manually ratchet them to the largest sequence 1024 // number committed to ensure iterators opened from the database 1025 // correctly observe the committed keys. 1026 if d.mu.versions.logSeqNum.Load() < largestSeqNum { 1027 d.mu.versions.logSeqNum.Store(largestSeqNum) 1028 } 1029 if d.mu.versions.visibleSeqNum.Load() < largestSeqNum { 1030 d.mu.versions.visibleSeqNum.Store(largestSeqNum) 1031 } 1032 level = -1 1033 return nil 1034 } 1035 1036 // Example, a-c. 1037 parseMeta := func(s string) (*fileMetadata, error) { 1038 parts := strings.Split(s, "-") 1039 if len(parts) != 2 { 1040 return nil, errors.Errorf("malformed table spec: %s", s) 1041 } 1042 m := (&fileMetadata{}).ExtendPointKeyBounds( 1043 opts.Comparer.Compare, 1044 InternalKey{UserKey: []byte(parts[0])}, 1045 InternalKey{UserKey: []byte(parts[1])}, 1046 ) 1047 m.InitPhysicalBacking() 1048 return m, nil 1049 } 1050 1051 // Example, compact: a-c. 1052 parseCompaction := func(outputLevel int, s string) (*compaction, error) { 1053 m, err := parseMeta(s[len("compact:"):]) 1054 if err != nil { 1055 return nil, err 1056 } 1057 c := &compaction{ 1058 inputs: []compactionLevel{{}, {level: outputLevel}}, 1059 smallest: m.Smallest, 1060 largest: m.Largest, 1061 } 1062 c.startLevel, c.outputLevel = &c.inputs[0], &c.inputs[1] 1063 return c, nil 1064 } 1065 1066 for _, line := range strings.Split(td.Input, "\n") { 1067 fields := strings.Fields(line) 1068 if len(fields) > 0 { 1069 switch fields[0] { 1070 case "mem": 1071 if err := maybeFlush(); err != nil { 1072 return nil, err 1073 } 1074 // Add a memtable layer. 1075 if !d.mu.mem.mutable.empty() { 1076 d.mu.mem.mutable = newMemTable(memTableOptions{Options: d.opts}) 1077 entry := d.newFlushableEntry(d.mu.mem.mutable, 0, 0) 1078 entry.readerRefs.Add(1) 1079 d.mu.mem.queue = append(d.mu.mem.queue, entry) 1080 d.updateReadStateLocked(nil) 1081 } 1082 mem = d.mu.mem.mutable 1083 start, end = nil, nil 1084 fields = fields[1:] 1085 case "L0", "L1", "L2", "L3", "L4", "L5", "L6": 1086 if err := maybeFlush(); err != nil { 1087 return nil, err 1088 } 1089 var err error 1090 if level, err = strconv.Atoi(fields[0][1:]); err != nil { 1091 return nil, err 1092 } 1093 fields = fields[1:] 1094 start, end = nil, nil 1095 boundFields := 0 1096 for _, field := range fields { 1097 toBreak := false 1098 switch { 1099 case strings.HasPrefix(field, "start="): 1100 ikey := base.ParseInternalKey(strings.TrimPrefix(field, "start=")) 1101 start = &ikey 1102 boundFields++ 1103 case strings.HasPrefix(field, "end="): 1104 ikey := base.ParseInternalKey(strings.TrimPrefix(field, "end=")) 1105 end = &ikey 1106 boundFields++ 1107 default: 1108 toBreak = true 1109 } 1110 if toBreak { 1111 break 1112 } 1113 } 1114 fields = fields[boundFields:] 1115 mem = newMemTable(memTableOptions{Options: d.opts}) 1116 } 1117 } 1118 1119 for _, data := range fields { 1120 i := strings.Index(data, ":") 1121 // Define in-progress compactions. 1122 if data[:i] == "compact" { 1123 c, err := parseCompaction(level, data) 1124 if err != nil { 1125 return nil, err 1126 } 1127 d.mu.compact.inProgress[c] = struct{}{} 1128 continue 1129 } 1130 if data[:i] == "rangekey" { 1131 span := keyspan.ParseSpan(data[i:]) 1132 err := rangekey.Encode(&span, func(k base.InternalKey, v []byte) error { 1133 return mem.set(k, v) 1134 }) 1135 if err != nil { 1136 return nil, err 1137 } 1138 continue 1139 } 1140 key := base.ParseInternalKey(data[:i]) 1141 valueStr := data[i+1:] 1142 value := []byte(valueStr) 1143 var randBytes int 1144 if n, err := fmt.Sscanf(valueStr, "<rand-bytes=%d>", &randBytes); err == nil && n == 1 { 1145 value = make([]byte, randBytes) 1146 rnd := rand.New(rand.NewSource(int64(key.SeqNum()))) 1147 if _, err := rnd.Read(value[:]); err != nil { 1148 return nil, err 1149 } 1150 } 1151 if err := mem.set(key, value); err != nil { 1152 return nil, err 1153 } 1154 } 1155 } 1156 1157 if err := maybeFlush(); err != nil { 1158 return nil, err 1159 } 1160 1161 if len(ve.NewFiles) > 0 { 1162 jobID := d.mu.nextJobID 1163 d.mu.nextJobID++ 1164 d.mu.versions.logLock() 1165 if err := d.mu.versions.logAndApply(jobID, ve, newFileMetrics(ve.NewFiles), false, func() []compactionInfo { 1166 return nil 1167 }); err != nil { 1168 return nil, err 1169 } 1170 d.updateReadStateLocked(nil) 1171 d.updateTableStatsLocked(ve.NewFiles) 1172 } 1173 1174 for l, maxBytes := range levelMaxBytes { 1175 d.mu.versions.picker.(*compactionPickerByScore).levelMaxBytes[l] = maxBytes 1176 } 1177 1178 return d, nil 1179 } 1180 1181 func runTableStatsCmd(td *datadriven.TestData, d *DB) string { 1182 u, err := strconv.ParseUint(strings.TrimSpace(td.Input), 10, 64) 1183 if err != nil { 1184 return err.Error() 1185 } 1186 fileNum := base.FileNum(u) 1187 1188 d.mu.Lock() 1189 defer d.mu.Unlock() 1190 v := d.mu.versions.currentVersion() 1191 for _, levelMetadata := range v.Levels { 1192 iter := levelMetadata.Iter() 1193 for f := iter.First(); f != nil; f = iter.Next() { 1194 if f.FileNum != fileNum { 1195 continue 1196 } 1197 1198 if !f.StatsValid() { 1199 d.waitTableStats() 1200 } 1201 1202 var b bytes.Buffer 1203 fmt.Fprintf(&b, "num-entries: %d\n", f.Stats.NumEntries) 1204 fmt.Fprintf(&b, "num-deletions: %d\n", f.Stats.NumDeletions) 1205 fmt.Fprintf(&b, "num-range-key-sets: %d\n", f.Stats.NumRangeKeySets) 1206 fmt.Fprintf(&b, "point-deletions-bytes-estimate: %d\n", f.Stats.PointDeletionsBytesEstimate) 1207 fmt.Fprintf(&b, "range-deletions-bytes-estimate: %d\n", f.Stats.RangeDeletionsBytesEstimate) 1208 return b.String() 1209 } 1210 } 1211 return "(not found)" 1212 } 1213 1214 func runTableFileSizesCmd(td *datadriven.TestData, d *DB) string { 1215 d.mu.Lock() 1216 defer d.mu.Unlock() 1217 return runVersionFileSizes(d.mu.versions.currentVersion()) 1218 } 1219 1220 func runVersionFileSizes(v *version) string { 1221 var buf bytes.Buffer 1222 for l, levelMetadata := range v.Levels { 1223 if levelMetadata.Empty() { 1224 continue 1225 } 1226 fmt.Fprintf(&buf, "L%d:\n", l) 1227 iter := levelMetadata.Iter() 1228 for f := iter.First(); f != nil; f = iter.Next() { 1229 fmt.Fprintf(&buf, " %s: %d bytes (%s)", f, f.Size, humanize.Bytes.Uint64(f.Size)) 1230 if f.IsCompacting() { 1231 fmt.Fprintf(&buf, " (IsCompacting)") 1232 } 1233 fmt.Fprintln(&buf) 1234 } 1235 } 1236 return buf.String() 1237 } 1238 1239 // Prints some metadata about some sstable which is currently in the latest 1240 // version. 1241 func runMetadataCommand(t *testing.T, td *datadriven.TestData, d *DB) string { 1242 var file int 1243 td.ScanArgs(t, "file", &file) 1244 var m *fileMetadata 1245 d.mu.Lock() 1246 currVersion := d.mu.versions.currentVersion() 1247 for _, level := range currVersion.Levels { 1248 lIter := level.Iter() 1249 for f := lIter.First(); f != nil; f = lIter.Next() { 1250 if f.FileNum == base.FileNum(uint64(file)) { 1251 m = f 1252 break 1253 } 1254 } 1255 } 1256 d.mu.Unlock() 1257 var buf bytes.Buffer 1258 // Add more metadata as needed. 1259 fmt.Fprintf(&buf, "size: %d\n", m.Size) 1260 return buf.String() 1261 } 1262 1263 func runSSTablePropertiesCmd(t *testing.T, td *datadriven.TestData, d *DB) string { 1264 var file int 1265 td.ScanArgs(t, "file", &file) 1266 1267 // See if we can grab the FileMetadata associated with the file. This is needed 1268 // to easily construct virtual sstable properties. 1269 var m *fileMetadata 1270 d.mu.Lock() 1271 currVersion := d.mu.versions.currentVersion() 1272 for _, level := range currVersion.Levels { 1273 lIter := level.Iter() 1274 for f := lIter.First(); f != nil; f = lIter.Next() { 1275 if f.FileNum == base.FileNum(uint64(file)) { 1276 m = f 1277 break 1278 } 1279 } 1280 } 1281 d.mu.Unlock() 1282 1283 // Note that m can be nil here if the sstable exists in the file system, but 1284 // not in the lsm. If m is nil just assume that file is not virtual. 1285 1286 backingFileNum := base.FileNum(uint64(file)).DiskFileNum() 1287 if m != nil { 1288 backingFileNum = m.FileBacking.DiskFileNum 1289 } 1290 fileName := base.MakeFilename(fileTypeTable, backingFileNum) 1291 f, err := d.opts.FS.Open(fileName) 1292 if err != nil { 1293 return err.Error() 1294 } 1295 readable, err := sstable.NewSimpleReadable(f) 1296 if err != nil { 1297 return err.Error() 1298 } 1299 // TODO(bananabrick): cacheOpts is used to set the file number on a Reader, 1300 // and virtual sstables expect this file number to be set. Split out the 1301 // opts into fileNum opts, and cache opts. 1302 cacheOpts := private.SSTableCacheOpts(0, backingFileNum).(sstable.ReaderOption) 1303 r, err := sstable.NewReader(readable, d.opts.MakeReaderOptions(), cacheOpts) 1304 if err != nil { 1305 return err.Error() 1306 } 1307 defer r.Close() 1308 1309 var v sstable.VirtualReader 1310 props := r.Properties.String() 1311 if m != nil && m.Virtual { 1312 v = sstable.MakeVirtualReader(r, m.VirtualMeta(), false /* isForeign */) 1313 props = v.Properties.String() 1314 } 1315 if len(td.Input) == 0 { 1316 return props 1317 } 1318 var buf bytes.Buffer 1319 propsSlice := strings.Split(props, "\n") 1320 for _, requestedProp := range strings.Split(td.Input, "\n") { 1321 fmt.Fprintf(&buf, "%s:\n", requestedProp) 1322 for _, prop := range propsSlice { 1323 if strings.Contains(prop, requestedProp) { 1324 fmt.Fprintf(&buf, " %s\n", prop) 1325 } 1326 } 1327 } 1328 return buf.String() 1329 } 1330 1331 func runPopulateCmd(t *testing.T, td *datadriven.TestData, b *Batch) { 1332 var maxKeyLength, valLength int 1333 var timestamps []int 1334 td.ScanArgs(t, "keylen", &maxKeyLength) 1335 td.MaybeScanArgs(t, "timestamps", ×tamps) 1336 td.MaybeScanArgs(t, "vallen", &valLength) 1337 // Default to writing timestamps @1. 1338 if len(timestamps) == 0 { 1339 timestamps = append(timestamps, 1) 1340 } 1341 1342 ks := testkeys.Alpha(maxKeyLength) 1343 buf := make([]byte, ks.MaxLen()+testkeys.MaxSuffixLen) 1344 vbuf := make([]byte, valLength) 1345 for i := int64(0); i < ks.Count(); i++ { 1346 for _, ts := range timestamps { 1347 n := testkeys.WriteKeyAt(buf, ks, i, int64(ts)) 1348 1349 // Default to using the key as the value, but if the user provided 1350 // the vallen argument, generate a random value of the specified 1351 // length. 1352 value := buf[:n] 1353 if valLength > 0 { 1354 _, err := crand.Read(vbuf) 1355 require.NoError(t, err) 1356 value = vbuf 1357 } 1358 require.NoError(t, b.Set(buf[:n], value, nil)) 1359 } 1360 } 1361 } 1362 1363 // waitTableStats waits until all new files' statistics have been loaded. It's 1364 // used in tests. The d.mu mutex must be locked while calling this method. 1365 func (d *DB) waitTableStats() { 1366 for d.mu.tableStats.loading || len(d.mu.tableStats.pending) > 0 { 1367 d.mu.tableStats.cond.Wait() 1368 } 1369 } 1370 1371 func runIngestAndExciseCmd(td *datadriven.TestData, d *DB, fs vfs.FS) error { 1372 var exciseSpan KeyRange 1373 paths := make([]string, 0, len(td.CmdArgs)) 1374 for i, arg := range td.CmdArgs { 1375 switch td.CmdArgs[i].Key { 1376 case "excise": 1377 if len(td.CmdArgs[i].Vals) != 1 { 1378 return errors.New("expected 2 values for excise separated by -, eg. ingest-and-excise foo1 excise=\"start-end\"") 1379 } 1380 fields := strings.Split(td.CmdArgs[i].Vals[0], "-") 1381 if len(fields) != 2 { 1382 return errors.New("expected 2 values for excise separated by -, eg. ingest-and-excise foo1 excise=\"start-end\"") 1383 } 1384 exciseSpan.Start = []byte(fields[0]) 1385 exciseSpan.End = []byte(fields[1]) 1386 default: 1387 paths = append(paths, arg.String()) 1388 } 1389 } 1390 1391 if _, err := d.IngestAndExcise(paths, nil /* shared */, exciseSpan); err != nil { 1392 return err 1393 } 1394 return nil 1395 } 1396 1397 func runIngestCmd(td *datadriven.TestData, d *DB, fs vfs.FS) error { 1398 paths := make([]string, 0, len(td.CmdArgs)) 1399 for _, arg := range td.CmdArgs { 1400 paths = append(paths, arg.String()) 1401 } 1402 1403 if err := d.Ingest(paths); err != nil { 1404 return err 1405 } 1406 return nil 1407 } 1408 1409 func runIngestExternalCmd(td *datadriven.TestData, d *DB, locator string) error { 1410 external := make([]ExternalFile, 0) 1411 for _, arg := range strings.Split(td.Input, "\n") { 1412 fields := strings.Split(arg, ",") 1413 if len(fields) != 4 { 1414 return errors.New("usage: path,size,smallest,largest") 1415 } 1416 ef := ExternalFile{} 1417 ef.Locator = remote.Locator(locator) 1418 ef.ObjName = fields[0] 1419 sizeInt, err := strconv.Atoi(fields[1]) 1420 if err != nil { 1421 return err 1422 } 1423 ef.Size = uint64(sizeInt) 1424 ef.SmallestUserKey = []byte(fields[2]) 1425 ef.LargestUserKey = []byte(fields[3]) 1426 ef.HasPointKey = true 1427 external = append(external, ef) 1428 } 1429 1430 if _, err := d.IngestExternalFiles(external); err != nil { 1431 return err 1432 } 1433 return nil 1434 } 1435 1436 func runForceIngestCmd(td *datadriven.TestData, d *DB) error { 1437 var paths []string 1438 var level int 1439 for _, arg := range td.CmdArgs { 1440 switch arg.Key { 1441 case "paths": 1442 paths = append(paths, arg.Vals...) 1443 case "level": 1444 var err error 1445 level, err = strconv.Atoi(arg.Vals[0]) 1446 if err != nil { 1447 return err 1448 } 1449 } 1450 } 1451 _, err := d.ingest(paths, func( 1452 tableNewIters, 1453 keyspan.TableNewSpanIter, 1454 IterOptions, 1455 *Comparer, 1456 *version, 1457 int, 1458 map[*compaction]struct{}, 1459 *fileMetadata, 1460 bool, 1461 ) (int, *fileMetadata, error) { 1462 return level, nil, nil 1463 }, nil /* shared */, KeyRange{}, nil /* external */) 1464 return err 1465 } 1466 1467 func runLSMCmd(td *datadriven.TestData, d *DB) string { 1468 d.mu.Lock() 1469 defer d.mu.Unlock() 1470 if td.HasArg("verbose") { 1471 return d.mu.versions.currentVersion().DebugString(d.opts.Comparer.FormatKey) 1472 } 1473 return d.mu.versions.currentVersion().String() 1474 }