github.com/cockroachdb/pebble@v0.0.0-20231214172447-ab4952c5f87b/data_test.go (about) 1 // Copyright 2018 The LevelDB-Go and Pebble Authors. All rights reserved. Use 2 // of this source code is governed by a BSD-style license that can be found in 3 // the LICENSE file. 4 5 package pebble 6 7 import ( 8 "bytes" 9 crand "crypto/rand" 10 "fmt" 11 "io" 12 "math" 13 "math/rand" 14 "strconv" 15 "strings" 16 "testing" 17 "time" 18 19 "github.com/cockroachdb/datadriven" 20 "github.com/cockroachdb/errors" 21 "github.com/cockroachdb/pebble/bloom" 22 "github.com/cockroachdb/pebble/internal/base" 23 "github.com/cockroachdb/pebble/internal/humanize" 24 "github.com/cockroachdb/pebble/internal/keyspan" 25 "github.com/cockroachdb/pebble/internal/private" 26 "github.com/cockroachdb/pebble/internal/rangedel" 27 "github.com/cockroachdb/pebble/internal/rangekey" 28 "github.com/cockroachdb/pebble/internal/testkeys" 29 "github.com/cockroachdb/pebble/objstorage/objstorageprovider" 30 "github.com/cockroachdb/pebble/objstorage/remote" 31 "github.com/cockroachdb/pebble/sstable" 32 "github.com/cockroachdb/pebble/vfs" 33 "github.com/cockroachdb/pebble/vfs/errorfs" 34 "github.com/stretchr/testify/require" 35 ) 36 37 func runGetCmd(t testing.TB, td *datadriven.TestData, d *DB) string { 38 snap := Snapshot{ 39 db: d, 40 seqNum: InternalKeySeqNumMax, 41 } 42 td.MaybeScanArgs(t, "seq", &snap.seqNum) 43 44 var buf bytes.Buffer 45 for _, data := range strings.Split(td.Input, "\n") { 46 v, closer, err := snap.Get([]byte(data)) 47 if err != nil { 48 fmt.Fprintf(&buf, "%s: %s\n", data, err) 49 } else { 50 fmt.Fprintf(&buf, "%s:%s\n", data, v) 51 closer.Close() 52 } 53 } 54 return buf.String() 55 } 56 57 func runIterCmd(d *datadriven.TestData, iter *Iterator, closeIter bool) string { 58 if closeIter { 59 defer func() { 60 if iter != nil { 61 iter.Close() 62 } 63 }() 64 } 65 var b bytes.Buffer 66 for _, line := range strings.Split(d.Input, "\n") { 67 parts := strings.Fields(line) 68 if len(parts) == 0 { 69 continue 70 } 71 printValidityState := false 72 var valid bool 73 var validityState IterValidityState 74 switch parts[0] { 75 case "seek-ge": 76 if len(parts) != 2 { 77 return "seek-ge <key>\n" 78 } 79 valid = iter.SeekGE([]byte(parts[1])) 80 case "seek-prefix-ge": 81 if len(parts) != 2 { 82 return "seek-prefix-ge <key>\n" 83 } 84 valid = iter.SeekPrefixGE([]byte(parts[1])) 85 case "seek-lt": 86 if len(parts) != 2 { 87 return "seek-lt <key>\n" 88 } 89 valid = iter.SeekLT([]byte(parts[1])) 90 case "seek-ge-limit": 91 if len(parts) != 3 { 92 return "seek-ge-limit <key> <limit>\n" 93 } 94 validityState = iter.SeekGEWithLimit( 95 []byte(parts[1]), []byte(parts[2])) 96 printValidityState = true 97 case "seek-lt-limit": 98 if len(parts) != 3 { 99 return "seek-lt-limit <key> <limit>\n" 100 } 101 validityState = iter.SeekLTWithLimit( 102 []byte(parts[1]), []byte(parts[2])) 103 printValidityState = true 104 case "inspect": 105 if len(parts) != 2 { 106 return "inspect <field>\n" 107 } 108 field := parts[1] 109 switch field { 110 case "lastPositioningOp": 111 op := "?" 112 switch iter.lastPositioningOp { 113 case unknownLastPositionOp: 114 op = "unknown" 115 case seekPrefixGELastPositioningOp: 116 op = "seekprefixge" 117 case seekGELastPositioningOp: 118 op = "seekge" 119 case seekLTLastPositioningOp: 120 op = "seeklt" 121 case invalidatedLastPositionOp: 122 op = "invalidate" 123 } 124 fmt.Fprintf(&b, "%s=%q\n", field, op) 125 default: 126 return fmt.Sprintf("unrecognized inspect field %q\n", field) 127 } 128 continue 129 case "next-limit": 130 if len(parts) != 2 { 131 return "next-limit <limit>\n" 132 } 133 validityState = iter.NextWithLimit([]byte(parts[1])) 134 printValidityState = true 135 case "internal-next": 136 validity, keyKind := iter.internalNext() 137 switch validity { 138 case internalNextError: 139 fmt.Fprintf(&b, "err: %s\n", iter.Error()) 140 case internalNextExhausted: 141 fmt.Fprint(&b, ".\n") 142 case internalNextValid: 143 fmt.Fprintf(&b, "%s\n", keyKind) 144 default: 145 panic("unreachable") 146 } 147 continue 148 case "can-deterministically-single-delete": 149 ok, err := CanDeterministicallySingleDelete(iter) 150 if err != nil { 151 fmt.Fprintf(&b, "err: %s\n", err) 152 } else { 153 fmt.Fprintf(&b, "%t\n", ok) 154 } 155 continue 156 case "prev-limit": 157 if len(parts) != 2 { 158 return "prev-limit <limit>\n" 159 } 160 validityState = iter.PrevWithLimit([]byte(parts[1])) 161 printValidityState = true 162 case "first": 163 valid = iter.First() 164 case "last": 165 valid = iter.Last() 166 case "next": 167 valid = iter.Next() 168 case "next-prefix": 169 valid = iter.NextPrefix() 170 case "prev": 171 valid = iter.Prev() 172 case "set-bounds": 173 if len(parts) <= 1 || len(parts) > 3 { 174 return "set-bounds lower=<lower> upper=<upper>\n" 175 } 176 var lower []byte 177 var upper []byte 178 for _, part := range parts[1:] { 179 arg := strings.Split(part, "=") 180 switch arg[0] { 181 case "lower": 182 lower = []byte(arg[1]) 183 case "upper": 184 upper = []byte(arg[1]) 185 default: 186 return fmt.Sprintf("set-bounds: unknown arg: %s", arg) 187 } 188 } 189 iter.SetBounds(lower, upper) 190 valid = iter.Valid() 191 case "set-options": 192 opts := iter.opts 193 if _, err := parseIterOptions(&opts, &iter.opts, parts[1:]); err != nil { 194 return fmt.Sprintf("set-options: %s", err.Error()) 195 } 196 iter.SetOptions(&opts) 197 valid = iter.Valid() 198 case "stats": 199 stats := iter.Stats() 200 // The timing is non-deterministic, so set to 0. 201 stats.InternalStats.BlockReadDuration = 0 202 fmt.Fprintf(&b, "stats: %s\n", stats.String()) 203 continue 204 case "clone": 205 var opts CloneOptions 206 if len(parts) > 1 { 207 var iterOpts IterOptions 208 if foundAny, err := parseIterOptions(&iterOpts, &iter.opts, parts[1:]); err != nil { 209 return fmt.Sprintf("clone: %s", err.Error()) 210 } else if foundAny { 211 opts.IterOptions = &iterOpts 212 } 213 for _, part := range parts[1:] { 214 if arg := strings.Split(part, "="); len(arg) == 2 && arg[0] == "refresh-batch" { 215 var err error 216 opts.RefreshBatchView, err = strconv.ParseBool(arg[1]) 217 if err != nil { 218 return fmt.Sprintf("clone: refresh-batch: %s", err.Error()) 219 } 220 } 221 } 222 } 223 clonedIter, err := iter.Clone(opts) 224 if err != nil { 225 fmt.Fprintf(&b, "error in clone, skipping rest of input: err=%v\n", err) 226 return b.String() 227 } 228 if err = iter.Close(); err != nil { 229 fmt.Fprintf(&b, "err=%v\n", err) 230 } 231 iter = clonedIter 232 case "is-using-combined": 233 if iter.opts.KeyTypes != IterKeyTypePointsAndRanges { 234 fmt.Fprintln(&b, "not configured for combined iteration") 235 } else if iter.lazyCombinedIter.combinedIterState.initialized { 236 fmt.Fprintln(&b, "using combined (non-lazy) iterator") 237 } else { 238 fmt.Fprintln(&b, "using lazy iterator") 239 } 240 continue 241 default: 242 return fmt.Sprintf("unknown op: %s", parts[0]) 243 } 244 245 valid = valid || validityState == IterValid 246 if valid != iter.Valid() { 247 fmt.Fprintf(&b, "mismatched valid states: %t vs %t\n", valid, iter.Valid()) 248 } 249 hasPoint, hasRange := iter.HasPointAndRange() 250 hasEither := hasPoint || hasRange 251 if hasEither != valid { 252 fmt.Fprintf(&b, "mismatched valid/HasPointAndRange states: valid=%t HasPointAndRange=(%t,%t)\n", valid, hasPoint, hasRange) 253 } 254 255 if valid { 256 validityState = IterValid 257 } 258 printIterState(&b, iter, validityState, printValidityState) 259 } 260 return b.String() 261 } 262 263 func parseIterOptions( 264 opts *IterOptions, ref *IterOptions, parts []string, 265 ) (foundAny bool, err error) { 266 const usageString = "[lower=<lower>] [upper=<upper>] [key-types=point|range|both] [mask-suffix=<suffix>] [mask-filter=<bool>] [only-durable=<bool>] [table-filter=reuse|none] [point-filters=reuse|none]\n" 267 for _, part := range parts { 268 arg := strings.SplitN(part, "=", 2) 269 if len(arg) != 2 { 270 return false, errors.Newf(usageString) 271 } 272 switch arg[0] { 273 case "point-filters": 274 switch arg[1] { 275 case "reuse": 276 opts.PointKeyFilters = ref.PointKeyFilters 277 case "none": 278 opts.PointKeyFilters = nil 279 default: 280 return false, errors.Newf("unknown arg point-filter=%q:\n%s", arg[1], usageString) 281 } 282 case "lower": 283 opts.LowerBound = []byte(arg[1]) 284 case "upper": 285 opts.UpperBound = []byte(arg[1]) 286 case "key-types": 287 switch arg[1] { 288 case "point": 289 opts.KeyTypes = IterKeyTypePointsOnly 290 case "range": 291 opts.KeyTypes = IterKeyTypeRangesOnly 292 case "both": 293 opts.KeyTypes = IterKeyTypePointsAndRanges 294 default: 295 return false, errors.Newf("unknown key-type %q:\n%s", arg[1], usageString) 296 } 297 case "mask-suffix": 298 opts.RangeKeyMasking.Suffix = []byte(arg[1]) 299 case "mask-filter": 300 opts.RangeKeyMasking.Filter = func() BlockPropertyFilterMask { 301 return sstable.NewTestKeysMaskingFilter() 302 } 303 case "table-filter": 304 switch arg[1] { 305 case "reuse": 306 opts.TableFilter = ref.TableFilter 307 case "none": 308 opts.TableFilter = nil 309 default: 310 return false, errors.Newf("unknown arg table-filter=%q:\n%s", arg[1], usageString) 311 } 312 case "only-durable": 313 var err error 314 opts.OnlyReadGuaranteedDurable, err = strconv.ParseBool(arg[1]) 315 if err != nil { 316 return false, errors.Newf("cannot parse only-durable=%q: %s", arg[1], err) 317 } 318 default: 319 continue 320 } 321 foundAny = true 322 } 323 return foundAny, nil 324 } 325 326 func printIterState( 327 b io.Writer, iter *Iterator, validity IterValidityState, printValidityState bool, 328 ) { 329 var validityStateStr string 330 if printValidityState { 331 switch validity { 332 case IterExhausted: 333 validityStateStr = " exhausted" 334 case IterValid: 335 validityStateStr = " valid" 336 case IterAtLimit: 337 validityStateStr = " at-limit" 338 } 339 } 340 if err := iter.Error(); err != nil { 341 fmt.Fprintf(b, "err=%v\n", err) 342 } else if validity == IterValid { 343 switch { 344 case iter.opts.pointKeys(): 345 hasPoint, hasRange := iter.HasPointAndRange() 346 fmt.Fprintf(b, "%s:%s (", iter.Key(), validityStateStr) 347 if hasPoint { 348 fmt.Fprintf(b, "%s, ", iter.Value()) 349 } else { 350 fmt.Fprint(b, "., ") 351 } 352 if hasRange { 353 start, end := iter.RangeBounds() 354 fmt.Fprintf(b, "[%s-%s)", formatASCIIKey(start), formatASCIIKey(end)) 355 writeRangeKeys(b, iter) 356 } else { 357 fmt.Fprint(b, ".") 358 } 359 if iter.RangeKeyChanged() { 360 fmt.Fprint(b, " UPDATED") 361 } 362 fmt.Fprint(b, ")") 363 default: 364 if iter.Valid() { 365 hasPoint, hasRange := iter.HasPointAndRange() 366 if hasPoint || !hasRange { 367 panic(fmt.Sprintf("pebble: unexpected HasPointAndRange (%t, %t)", hasPoint, hasRange)) 368 } 369 start, end := iter.RangeBounds() 370 fmt.Fprintf(b, "%s [%s-%s)", iter.Key(), formatASCIIKey(start), formatASCIIKey(end)) 371 writeRangeKeys(b, iter) 372 } else { 373 fmt.Fprint(b, ".") 374 } 375 if iter.RangeKeyChanged() { 376 fmt.Fprint(b, " UPDATED") 377 } 378 } 379 fmt.Fprintln(b) 380 } else { 381 fmt.Fprintf(b, ".%s\n", validityStateStr) 382 } 383 } 384 385 func formatASCIIKey(b []byte) string { 386 if bytes.IndexFunc(b, func(r rune) bool { return r < 'A' || r > 'z' }) != -1 { 387 // This key is not just ASCII letters. Quote it. 388 return fmt.Sprintf("%q", b) 389 } 390 return string(b) 391 } 392 393 func writeRangeKeys(b io.Writer, iter *Iterator) { 394 rangeKeys := iter.RangeKeys() 395 for j := 0; j < len(rangeKeys); j++ { 396 if j > 0 { 397 fmt.Fprint(b, ",") 398 } 399 fmt.Fprintf(b, " %s=%s", rangeKeys[j].Suffix, rangeKeys[j].Value) 400 } 401 } 402 403 func runBatchDefineCmd(d *datadriven.TestData, b *Batch) error { 404 for _, line := range strings.Split(d.Input, "\n") { 405 parts := strings.Fields(line) 406 if len(parts) == 0 { 407 continue 408 } 409 if parts[1] == `<nil>` { 410 parts[1] = "" 411 } 412 var err error 413 switch parts[0] { 414 case "set": 415 if len(parts) != 3 { 416 return errors.Errorf("%s expects 2 arguments", parts[0]) 417 } 418 err = b.Set([]byte(parts[1]), []byte(parts[2]), nil) 419 case "del": 420 if len(parts) != 2 { 421 return errors.Errorf("%s expects 1 argument", parts[0]) 422 } 423 err = b.Delete([]byte(parts[1]), nil) 424 case "del-sized": 425 if len(parts) != 3 { 426 return errors.Errorf("%s expects 2 arguments", parts[0]) 427 } 428 var valSize uint64 429 valSize, err = strconv.ParseUint(parts[2], 10, 32) 430 if err != nil { 431 return err 432 } 433 err = b.DeleteSized([]byte(parts[1]), uint32(valSize), nil) 434 case "singledel": 435 if len(parts) != 2 { 436 return errors.Errorf("%s expects 1 argument", parts[0]) 437 } 438 err = b.SingleDelete([]byte(parts[1]), nil) 439 case "del-range": 440 if len(parts) != 3 { 441 return errors.Errorf("%s expects 2 arguments", parts[0]) 442 } 443 err = b.DeleteRange([]byte(parts[1]), []byte(parts[2]), nil) 444 case "merge": 445 if len(parts) != 3 { 446 return errors.Errorf("%s expects 2 arguments", parts[0]) 447 } 448 err = b.Merge([]byte(parts[1]), []byte(parts[2]), nil) 449 case "range-key-set": 450 if len(parts) < 4 || len(parts) > 5 { 451 return errors.Errorf("%s expects 3 or 4 arguments", parts[0]) 452 } 453 var val []byte 454 if len(parts) == 5 { 455 val = []byte(parts[4]) 456 } 457 err = b.RangeKeySet( 458 []byte(parts[1]), 459 []byte(parts[2]), 460 []byte(parts[3]), 461 val, 462 nil) 463 case "range-key-unset": 464 if len(parts) != 4 { 465 return errors.Errorf("%s expects 3 arguments", parts[0]) 466 } 467 err = b.RangeKeyUnset( 468 []byte(parts[1]), 469 []byte(parts[2]), 470 []byte(parts[3]), 471 nil) 472 case "range-key-del": 473 if len(parts) != 3 { 474 return errors.Errorf("%s expects 2 arguments", parts[0]) 475 } 476 err = b.RangeKeyDelete( 477 []byte(parts[1]), 478 []byte(parts[2]), 479 nil) 480 default: 481 return errors.Errorf("unknown op: %s", parts[0]) 482 } 483 if err != nil { 484 return err 485 } 486 } 487 return nil 488 } 489 490 func runBuildRemoteCmd(td *datadriven.TestData, d *DB, storage remote.Storage) error { 491 b := d.NewIndexedBatch() 492 if err := runBatchDefineCmd(td, b); err != nil { 493 return err 494 } 495 496 if len(td.CmdArgs) < 1 { 497 return errors.New("build <path>: argument missing") 498 } 499 path := td.CmdArgs[0].String() 500 501 // Override table format, if provided. 502 tableFormat := d.opts.FormatMajorVersion.MaxTableFormat() 503 for _, cmdArg := range td.CmdArgs[1:] { 504 switch cmdArg.Key { 505 case "format": 506 switch cmdArg.Vals[0] { 507 case "leveldb": 508 tableFormat = sstable.TableFormatLevelDB 509 case "rocksdbv2": 510 tableFormat = sstable.TableFormatRocksDBv2 511 case "pebblev1": 512 tableFormat = sstable.TableFormatPebblev1 513 case "pebblev2": 514 tableFormat = sstable.TableFormatPebblev2 515 case "pebblev3": 516 tableFormat = sstable.TableFormatPebblev3 517 case "pebblev4": 518 tableFormat = sstable.TableFormatPebblev4 519 default: 520 return errors.Errorf("unknown format string %s", cmdArg.Vals[0]) 521 } 522 } 523 } 524 525 writeOpts := d.opts.MakeWriterOptions(0 /* level */, tableFormat) 526 527 f, err := storage.CreateObject(path) 528 if err != nil { 529 return err 530 } 531 w := sstable.NewWriter(objstorageprovider.NewRemoteWritable(f), writeOpts) 532 iter := b.newInternalIter(nil) 533 for key, val := iter.First(); key != nil; key, val = iter.Next() { 534 tmp := *key 535 tmp.SetSeqNum(0) 536 if err := w.Add(tmp, val.InPlaceValue()); err != nil { 537 return err 538 } 539 } 540 if err := iter.Close(); err != nil { 541 return err 542 } 543 544 if rdi := b.newRangeDelIter(nil, math.MaxUint64); rdi != nil { 545 for s := rdi.First(); s != nil; s = rdi.Next() { 546 err := rangedel.Encode(s, func(k base.InternalKey, v []byte) error { 547 k.SetSeqNum(0) 548 return w.Add(k, v) 549 }) 550 if err != nil { 551 return err 552 } 553 } 554 } 555 556 if rki := b.newRangeKeyIter(nil, math.MaxUint64); rki != nil { 557 for s := rki.First(); s != nil; s = rki.Next() { 558 for _, k := range s.Keys { 559 var err error 560 switch k.Kind() { 561 case base.InternalKeyKindRangeKeySet: 562 err = w.RangeKeySet(s.Start, s.End, k.Suffix, k.Value) 563 case base.InternalKeyKindRangeKeyUnset: 564 err = w.RangeKeyUnset(s.Start, s.End, k.Suffix) 565 case base.InternalKeyKindRangeKeyDelete: 566 err = w.RangeKeyDelete(s.Start, s.End) 567 default: 568 panic("not a range key") 569 } 570 if err != nil { 571 return err 572 } 573 } 574 } 575 } 576 577 return w.Close() 578 } 579 580 func runBuildCmd(td *datadriven.TestData, d *DB, fs vfs.FS) error { 581 b := newIndexedBatch(nil, d.opts.Comparer) 582 if err := runBatchDefineCmd(td, b); err != nil { 583 return err 584 } 585 586 if len(td.CmdArgs) < 1 { 587 return errors.New("build <path>: argument missing") 588 } 589 path := td.CmdArgs[0].String() 590 591 // Override table format, if provided. 592 tableFormat := d.opts.FormatMajorVersion.MaxTableFormat() 593 for _, cmdArg := range td.CmdArgs[1:] { 594 switch cmdArg.Key { 595 case "format": 596 switch cmdArg.Vals[0] { 597 case "leveldb": 598 tableFormat = sstable.TableFormatLevelDB 599 case "rocksdbv2": 600 tableFormat = sstable.TableFormatRocksDBv2 601 case "pebblev1": 602 tableFormat = sstable.TableFormatPebblev1 603 case "pebblev2": 604 tableFormat = sstable.TableFormatPebblev2 605 case "pebblev3": 606 tableFormat = sstable.TableFormatPebblev3 607 case "pebblev4": 608 tableFormat = sstable.TableFormatPebblev4 609 default: 610 return errors.Errorf("unknown format string %s", cmdArg.Vals[0]) 611 } 612 } 613 } 614 615 writeOpts := d.opts.MakeWriterOptions(0 /* level */, tableFormat) 616 617 f, err := fs.Create(path) 618 if err != nil { 619 return err 620 } 621 w := sstable.NewWriter(objstorageprovider.NewFileWritable(f), writeOpts) 622 iter := b.newInternalIter(nil) 623 for key, val := iter.First(); key != nil; key, val = iter.Next() { 624 tmp := *key 625 tmp.SetSeqNum(0) 626 if err := w.Add(tmp, val.InPlaceValue()); err != nil { 627 return err 628 } 629 } 630 if err := iter.Close(); err != nil { 631 return err 632 } 633 634 if rdi := b.newRangeDelIter(nil, math.MaxUint64); rdi != nil { 635 for s := rdi.First(); s != nil; s = rdi.Next() { 636 err := rangedel.Encode(s, func(k base.InternalKey, v []byte) error { 637 k.SetSeqNum(0) 638 return w.Add(k, v) 639 }) 640 if err != nil { 641 return err 642 } 643 } 644 } 645 646 if rki := b.newRangeKeyIter(nil, math.MaxUint64); rki != nil { 647 for s := rki.First(); s != nil; s = rki.Next() { 648 for _, k := range s.Keys { 649 var err error 650 switch k.Kind() { 651 case base.InternalKeyKindRangeKeySet: 652 err = w.RangeKeySet(s.Start, s.End, k.Suffix, k.Value) 653 case base.InternalKeyKindRangeKeyUnset: 654 err = w.RangeKeyUnset(s.Start, s.End, k.Suffix) 655 case base.InternalKeyKindRangeKeyDelete: 656 err = w.RangeKeyDelete(s.Start, s.End) 657 default: 658 panic("not a range key") 659 } 660 if err != nil { 661 return err 662 } 663 } 664 } 665 } 666 667 return w.Close() 668 } 669 670 func runCompactCmd(td *datadriven.TestData, d *DB) error { 671 if len(td.CmdArgs) > 4 { 672 return errors.Errorf("%s expects at most four arguments", td.Cmd) 673 } 674 parts := strings.Split(td.CmdArgs[0].Key, "-") 675 if len(parts) != 2 { 676 return errors.Errorf("expected <begin>-<end>: %s", td.Input) 677 } 678 parallelize := td.HasArg("parallel") 679 if len(td.CmdArgs) >= 2 && strings.HasPrefix(td.CmdArgs[1].Key, "L") { 680 levelString := td.CmdArgs[1].String() 681 iStart := base.MakeInternalKey([]byte(parts[0]), InternalKeySeqNumMax, InternalKeyKindMax) 682 iEnd := base.MakeInternalKey([]byte(parts[1]), 0, 0) 683 if levelString[0] != 'L' { 684 return errors.Errorf("expected L<n>: %s", levelString) 685 } 686 level, err := strconv.Atoi(levelString[1:]) 687 if err != nil { 688 return err 689 } 690 return d.manualCompact(iStart.UserKey, iEnd.UserKey, level, parallelize) 691 } 692 return d.Compact([]byte(parts[0]), []byte(parts[1]), parallelize) 693 } 694 695 // runDBDefineCmd prepares a database state, returning the opened 696 // database with the initialized state. 697 // 698 // The command accepts input describing memtables and sstables to 699 // construct. Each new table is indicated by a line containing the 700 // level of the next table to build (eg, "L6"), or "mem" to build 701 // a memtable. Each subsequent line contains a new key-value pair. 702 // 703 // Point keys and range deletions should be encoded as the 704 // InternalKey's string representation, as understood by 705 // ParseInternalKey, followed a colon and the corresponding value. 706 // 707 // b.SET.50:foo 708 // c.DEL.20 709 // 710 // Range keys may be encoded by prefixing the line with `rangekey:`, 711 // followed by the keyspan.Span string representation, as understood 712 // by keyspan.ParseSpan. 713 // 714 // rangekey:b-d:{(#5,RANGEKEYSET,@2,foo)} 715 // 716 // # Mechanics 717 // 718 // runDBDefineCmd works by simulating a flush for every file written. 719 // Keys are written to a memtable. When a file is complete, the table 720 // is flushed to physical files through manually invoking runCompaction. 721 // The resulting version edit is then manipulated to write the files 722 // to the indicated level. 723 // 724 // Because of it's low-level manipulation, runDBDefineCmd does allow the 725 // creation of invalid database states. If opts.DebugCheck is set, the 726 // level checker should detect the invalid state. 727 func runDBDefineCmd(td *datadriven.TestData, opts *Options) (*DB, error) { 728 opts = opts.EnsureDefaults() 729 opts.FS = vfs.NewMem() 730 731 var snapshots []uint64 732 var levelMaxBytes map[int]int64 733 for _, arg := range td.CmdArgs { 734 switch arg.Key { 735 case "target-file-sizes": 736 opts.Levels = make([]LevelOptions, len(arg.Vals)) 737 for i := range arg.Vals { 738 size, err := strconv.ParseInt(arg.Vals[i], 10, 64) 739 if err != nil { 740 return nil, err 741 } 742 opts.Levels[i].TargetFileSize = size 743 } 744 case "snapshots": 745 snapshots = make([]uint64, len(arg.Vals)) 746 for i := range arg.Vals { 747 seqNum, err := strconv.ParseUint(arg.Vals[i], 10, 64) 748 if err != nil { 749 return nil, err 750 } 751 snapshots[i] = seqNum 752 if i > 0 && snapshots[i] < snapshots[i-1] { 753 return nil, errors.New("Snapshots must be in ascending order") 754 } 755 } 756 case "lbase-max-bytes": 757 lbaseMaxBytes, err := strconv.ParseInt(arg.Vals[0], 10, 64) 758 if err != nil { 759 return nil, err 760 } 761 opts.LBaseMaxBytes = lbaseMaxBytes 762 case "level-max-bytes": 763 levelMaxBytes = map[int]int64{} 764 for i := range arg.Vals { 765 j := strings.Index(arg.Vals[i], ":") 766 levelStr := strings.TrimSpace(arg.Vals[i][:j]) 767 level, err := strconv.Atoi(levelStr[1:]) 768 if err != nil { 769 return nil, err 770 } 771 size, err := strconv.ParseInt(strings.TrimSpace(arg.Vals[i][j+1:]), 10, 64) 772 if err != nil { 773 return nil, err 774 } 775 levelMaxBytes[level] = size 776 } 777 case "auto-compactions": 778 switch arg.Vals[0] { 779 case "off": 780 opts.DisableAutomaticCompactions = true 781 case "on": 782 opts.DisableAutomaticCompactions = false 783 default: 784 return nil, errors.Errorf("Unrecognized %q %q arg value: %q", td.Cmd, arg.Key, arg.Vals[0]) 785 } 786 case "enable-table-stats": 787 enable, err := strconv.ParseBool(arg.Vals[0]) 788 if err != nil { 789 return nil, errors.Errorf("%s: could not parse %q as bool: %s", td.Cmd, arg.Vals[0], err) 790 } 791 opts.private.disableTableStats = !enable 792 case "block-size": 793 size, err := strconv.Atoi(arg.Vals[0]) 794 if err != nil { 795 return nil, err 796 } 797 for _, levelOpts := range opts.Levels { 798 levelOpts.BlockSize = size 799 } 800 case "format-major-version": 801 fmv, err := strconv.Atoi(arg.Vals[0]) 802 if err != nil { 803 return nil, err 804 } 805 opts.FormatMajorVersion = FormatMajorVersion(fmv) 806 case "disable-multi-level": 807 opts.Experimental.MultiLevelCompactionHeuristic = NoMultiLevel{} 808 } 809 } 810 811 // This is placed after the argument parsing above, because the arguments 812 // to define should be parsed even if td.Input is empty. 813 if td.Input == "" { 814 // Empty LSM. 815 d, err := Open("", opts) 816 if err != nil { 817 return nil, err 818 } 819 d.mu.Lock() 820 for i := range snapshots { 821 s := &Snapshot{db: d} 822 s.seqNum = snapshots[i] 823 d.mu.snapshots.pushBack(s) 824 } 825 for l, maxBytes := range levelMaxBytes { 826 d.mu.versions.picker.(*compactionPickerByScore).levelMaxBytes[l] = maxBytes 827 } 828 d.mu.Unlock() 829 return d, nil 830 } 831 832 d, err := Open("", opts) 833 if err != nil { 834 return nil, err 835 } 836 d.mu.Lock() 837 d.mu.versions.dynamicBaseLevel = false 838 for i := range snapshots { 839 s := &Snapshot{db: d} 840 s.seqNum = snapshots[i] 841 d.mu.snapshots.pushBack(s) 842 } 843 defer d.mu.Unlock() 844 845 var mem *memTable 846 var start, end *base.InternalKey 847 ve := &versionEdit{} 848 level := -1 849 850 maybeFlush := func() error { 851 if level < 0 { 852 return nil 853 } 854 855 toFlush := flushableList{{ 856 flushable: mem, 857 flushed: make(chan struct{}), 858 }} 859 c := newFlush(d.opts, d.mu.versions.currentVersion(), 860 d.mu.versions.picker.getBaseLevel(), toFlush, time.Now()) 861 c.disableSpanElision = true 862 // NB: define allows the test to exactly specify which keys go 863 // into which sstables. If the test has a small target file 864 // size to test grandparent limits, etc, the maxOutputFileSize 865 // can cause splitting /within/ the bounds specified to the 866 // test. Ignore the target size here, and split only according 867 // to the user-defined boundaries. 868 c.maxOutputFileSize = math.MaxUint64 869 870 newVE, _, _, err := d.runCompaction(0, c) 871 if err != nil { 872 return err 873 } 874 largestSeqNum := d.mu.versions.logSeqNum.Load() 875 for _, f := range newVE.NewFiles { 876 if start != nil { 877 f.Meta.SmallestPointKey = *start 878 f.Meta.Smallest = *start 879 } 880 if end != nil { 881 f.Meta.LargestPointKey = *end 882 f.Meta.Largest = *end 883 } 884 if largestSeqNum <= f.Meta.LargestSeqNum { 885 largestSeqNum = f.Meta.LargestSeqNum + 1 886 } 887 ve.NewFiles = append(ve.NewFiles, newFileEntry{ 888 Level: level, 889 Meta: f.Meta, 890 }) 891 } 892 // The committed keys were never written to the WAL, so neither 893 // the logSeqNum nor the commit pipeline's visibleSeqNum have 894 // been ratcheted. Manually ratchet them to the largest sequence 895 // number committed to ensure iterators opened from the database 896 // correctly observe the committed keys. 897 if d.mu.versions.logSeqNum.Load() < largestSeqNum { 898 d.mu.versions.logSeqNum.Store(largestSeqNum) 899 } 900 if d.mu.versions.visibleSeqNum.Load() < largestSeqNum { 901 d.mu.versions.visibleSeqNum.Store(largestSeqNum) 902 } 903 level = -1 904 return nil 905 } 906 907 // Example, a-c. 908 parseMeta := func(s string) (*fileMetadata, error) { 909 parts := strings.Split(s, "-") 910 if len(parts) != 2 { 911 return nil, errors.Errorf("malformed table spec: %s", s) 912 } 913 m := (&fileMetadata{}).ExtendPointKeyBounds( 914 opts.Comparer.Compare, 915 InternalKey{UserKey: []byte(parts[0])}, 916 InternalKey{UserKey: []byte(parts[1])}, 917 ) 918 m.InitPhysicalBacking() 919 return m, nil 920 } 921 922 // Example, compact: a-c. 923 parseCompaction := func(outputLevel int, s string) (*compaction, error) { 924 m, err := parseMeta(s[len("compact:"):]) 925 if err != nil { 926 return nil, err 927 } 928 c := &compaction{ 929 inputs: []compactionLevel{{}, {level: outputLevel}}, 930 smallest: m.Smallest, 931 largest: m.Largest, 932 } 933 c.startLevel, c.outputLevel = &c.inputs[0], &c.inputs[1] 934 return c, nil 935 } 936 937 for _, line := range strings.Split(td.Input, "\n") { 938 fields := strings.Fields(line) 939 if len(fields) > 0 { 940 switch fields[0] { 941 case "mem": 942 if err := maybeFlush(); err != nil { 943 return nil, err 944 } 945 // Add a memtable layer. 946 if !d.mu.mem.mutable.empty() { 947 d.mu.mem.mutable = newMemTable(memTableOptions{Options: d.opts}) 948 entry := d.newFlushableEntry(d.mu.mem.mutable, 0, 0) 949 entry.readerRefs.Add(1) 950 d.mu.mem.queue = append(d.mu.mem.queue, entry) 951 d.updateReadStateLocked(nil) 952 } 953 mem = d.mu.mem.mutable 954 start, end = nil, nil 955 fields = fields[1:] 956 case "L0", "L1", "L2", "L3", "L4", "L5", "L6": 957 if err := maybeFlush(); err != nil { 958 return nil, err 959 } 960 var err error 961 if level, err = strconv.Atoi(fields[0][1:]); err != nil { 962 return nil, err 963 } 964 fields = fields[1:] 965 start, end = nil, nil 966 boundFields := 0 967 for _, field := range fields { 968 toBreak := false 969 switch { 970 case strings.HasPrefix(field, "start="): 971 ikey := base.ParseInternalKey(strings.TrimPrefix(field, "start=")) 972 start = &ikey 973 boundFields++ 974 case strings.HasPrefix(field, "end="): 975 ikey := base.ParseInternalKey(strings.TrimPrefix(field, "end=")) 976 end = &ikey 977 boundFields++ 978 default: 979 toBreak = true 980 } 981 if toBreak { 982 break 983 } 984 } 985 fields = fields[boundFields:] 986 mem = newMemTable(memTableOptions{Options: d.opts}) 987 } 988 } 989 990 for _, data := range fields { 991 i := strings.Index(data, ":") 992 // Define in-progress compactions. 993 if data[:i] == "compact" { 994 c, err := parseCompaction(level, data) 995 if err != nil { 996 return nil, err 997 } 998 d.mu.compact.inProgress[c] = struct{}{} 999 continue 1000 } 1001 if data[:i] == "rangekey" { 1002 span := keyspan.ParseSpan(data[i:]) 1003 err := rangekey.Encode(&span, func(k base.InternalKey, v []byte) error { 1004 return mem.set(k, v) 1005 }) 1006 if err != nil { 1007 return nil, err 1008 } 1009 continue 1010 } 1011 key := base.ParseInternalKey(data[:i]) 1012 valueStr := data[i+1:] 1013 value := []byte(valueStr) 1014 var randBytes int 1015 if n, err := fmt.Sscanf(valueStr, "<rand-bytes=%d>", &randBytes); err == nil && n == 1 { 1016 value = make([]byte, randBytes) 1017 rnd := rand.New(rand.NewSource(int64(key.SeqNum()))) 1018 if _, err := rnd.Read(value[:]); err != nil { 1019 return nil, err 1020 } 1021 } 1022 if err := mem.set(key, value); err != nil { 1023 return nil, err 1024 } 1025 } 1026 } 1027 1028 if err := maybeFlush(); err != nil { 1029 return nil, err 1030 } 1031 1032 if len(ve.NewFiles) > 0 { 1033 jobID := d.mu.nextJobID 1034 d.mu.nextJobID++ 1035 d.mu.versions.logLock() 1036 if err := d.mu.versions.logAndApply(jobID, ve, newFileMetrics(ve.NewFiles), false, func() []compactionInfo { 1037 return nil 1038 }); err != nil { 1039 return nil, err 1040 } 1041 d.updateReadStateLocked(nil) 1042 d.updateTableStatsLocked(ve.NewFiles) 1043 } 1044 1045 for l, maxBytes := range levelMaxBytes { 1046 d.mu.versions.picker.(*compactionPickerByScore).levelMaxBytes[l] = maxBytes 1047 } 1048 1049 return d, nil 1050 } 1051 1052 func runTableStatsCmd(td *datadriven.TestData, d *DB) string { 1053 u, err := strconv.ParseUint(strings.TrimSpace(td.Input), 10, 64) 1054 if err != nil { 1055 return err.Error() 1056 } 1057 fileNum := base.FileNum(u) 1058 1059 d.mu.Lock() 1060 defer d.mu.Unlock() 1061 v := d.mu.versions.currentVersion() 1062 for _, levelMetadata := range v.Levels { 1063 iter := levelMetadata.Iter() 1064 for f := iter.First(); f != nil; f = iter.Next() { 1065 if f.FileNum != fileNum { 1066 continue 1067 } 1068 1069 if !f.StatsValid() { 1070 d.waitTableStats() 1071 } 1072 1073 var b bytes.Buffer 1074 fmt.Fprintf(&b, "num-entries: %d\n", f.Stats.NumEntries) 1075 fmt.Fprintf(&b, "num-deletions: %d\n", f.Stats.NumDeletions) 1076 fmt.Fprintf(&b, "num-range-key-sets: %d\n", f.Stats.NumRangeKeySets) 1077 fmt.Fprintf(&b, "point-deletions-bytes-estimate: %d\n", f.Stats.PointDeletionsBytesEstimate) 1078 fmt.Fprintf(&b, "range-deletions-bytes-estimate: %d\n", f.Stats.RangeDeletionsBytesEstimate) 1079 return b.String() 1080 } 1081 } 1082 return "(not found)" 1083 } 1084 1085 func runTableFileSizesCmd(td *datadriven.TestData, d *DB) string { 1086 d.mu.Lock() 1087 defer d.mu.Unlock() 1088 return runVersionFileSizes(d.mu.versions.currentVersion()) 1089 } 1090 1091 func runVersionFileSizes(v *version) string { 1092 var buf bytes.Buffer 1093 for l, levelMetadata := range v.Levels { 1094 if levelMetadata.Empty() { 1095 continue 1096 } 1097 fmt.Fprintf(&buf, "L%d:\n", l) 1098 iter := levelMetadata.Iter() 1099 for f := iter.First(); f != nil; f = iter.Next() { 1100 fmt.Fprintf(&buf, " %s: %d bytes (%s)", f, f.Size, humanize.Bytes.Uint64(f.Size)) 1101 if f.IsCompacting() { 1102 fmt.Fprintf(&buf, " (IsCompacting)") 1103 } 1104 fmt.Fprintln(&buf) 1105 } 1106 } 1107 return buf.String() 1108 } 1109 1110 // Prints some metadata about some sstable which is currently in the latest 1111 // version. 1112 func runMetadataCommand(t *testing.T, td *datadriven.TestData, d *DB) string { 1113 var file int 1114 td.ScanArgs(t, "file", &file) 1115 var m *fileMetadata 1116 d.mu.Lock() 1117 currVersion := d.mu.versions.currentVersion() 1118 for _, level := range currVersion.Levels { 1119 lIter := level.Iter() 1120 for f := lIter.First(); f != nil; f = lIter.Next() { 1121 if f.FileNum == base.FileNum(uint64(file)) { 1122 m = f 1123 break 1124 } 1125 } 1126 } 1127 d.mu.Unlock() 1128 var buf bytes.Buffer 1129 // Add more metadata as needed. 1130 fmt.Fprintf(&buf, "size: %d\n", m.Size) 1131 return buf.String() 1132 } 1133 1134 func runSSTablePropertiesCmd(t *testing.T, td *datadriven.TestData, d *DB) string { 1135 var file int 1136 td.ScanArgs(t, "file", &file) 1137 1138 // See if we can grab the FileMetadata associated with the file. This is needed 1139 // to easily construct virtual sstable properties. 1140 var m *fileMetadata 1141 d.mu.Lock() 1142 currVersion := d.mu.versions.currentVersion() 1143 for _, level := range currVersion.Levels { 1144 lIter := level.Iter() 1145 for f := lIter.First(); f != nil; f = lIter.Next() { 1146 if f.FileNum == base.FileNum(uint64(file)) { 1147 m = f 1148 break 1149 } 1150 } 1151 } 1152 d.mu.Unlock() 1153 1154 // Note that m can be nil here if the sstable exists in the file system, but 1155 // not in the lsm. If m is nil just assume that file is not virtual. 1156 1157 backingFileNum := base.FileNum(uint64(file)).DiskFileNum() 1158 if m != nil { 1159 backingFileNum = m.FileBacking.DiskFileNum 1160 } 1161 fileName := base.MakeFilename(fileTypeTable, backingFileNum) 1162 f, err := d.opts.FS.Open(fileName) 1163 if err != nil { 1164 return err.Error() 1165 } 1166 readable, err := sstable.NewSimpleReadable(f) 1167 if err != nil { 1168 return err.Error() 1169 } 1170 // TODO(bananabrick): cacheOpts is used to set the file number on a Reader, 1171 // and virtual sstables expect this file number to be set. Split out the 1172 // opts into fileNum opts, and cache opts. 1173 cacheOpts := private.SSTableCacheOpts(0, backingFileNum).(sstable.ReaderOption) 1174 r, err := sstable.NewReader(readable, d.opts.MakeReaderOptions(), cacheOpts) 1175 if err != nil { 1176 return err.Error() 1177 } 1178 defer r.Close() 1179 1180 var v sstable.VirtualReader 1181 props := r.Properties.String() 1182 if m != nil && m.Virtual { 1183 v = sstable.MakeVirtualReader(r, m.VirtualMeta(), false /* isForeign */) 1184 props = v.Properties.String() 1185 } 1186 if len(td.Input) == 0 { 1187 return props 1188 } 1189 var buf bytes.Buffer 1190 propsSlice := strings.Split(props, "\n") 1191 for _, requestedProp := range strings.Split(td.Input, "\n") { 1192 fmt.Fprintf(&buf, "%s:\n", requestedProp) 1193 for _, prop := range propsSlice { 1194 if strings.Contains(prop, requestedProp) { 1195 fmt.Fprintf(&buf, " %s\n", prop) 1196 } 1197 } 1198 } 1199 return buf.String() 1200 } 1201 1202 func runPopulateCmd(t *testing.T, td *datadriven.TestData, b *Batch) { 1203 var maxKeyLength, valLength int 1204 var timestamps []int 1205 td.ScanArgs(t, "keylen", &maxKeyLength) 1206 td.MaybeScanArgs(t, "timestamps", ×tamps) 1207 td.MaybeScanArgs(t, "vallen", &valLength) 1208 // Default to writing timestamps @1. 1209 if len(timestamps) == 0 { 1210 timestamps = append(timestamps, 1) 1211 } 1212 1213 ks := testkeys.Alpha(maxKeyLength) 1214 buf := make([]byte, ks.MaxLen()+testkeys.MaxSuffixLen) 1215 vbuf := make([]byte, valLength) 1216 for i := int64(0); i < ks.Count(); i++ { 1217 for _, ts := range timestamps { 1218 n := testkeys.WriteKeyAt(buf, ks, i, int64(ts)) 1219 1220 // Default to using the key as the value, but if the user provided 1221 // the vallen argument, generate a random value of the specified 1222 // length. 1223 value := buf[:n] 1224 if valLength > 0 { 1225 _, err := crand.Read(vbuf) 1226 require.NoError(t, err) 1227 value = vbuf 1228 } 1229 require.NoError(t, b.Set(buf[:n], value, nil)) 1230 } 1231 } 1232 } 1233 1234 // waitTableStats waits until all new files' statistics have been loaded. It's 1235 // used in tests. The d.mu mutex must be locked while calling this method. 1236 func (d *DB) waitTableStats() { 1237 for d.mu.tableStats.loading || len(d.mu.tableStats.pending) > 0 { 1238 d.mu.tableStats.cond.Wait() 1239 } 1240 } 1241 1242 func runIngestAndExciseCmd(td *datadriven.TestData, d *DB, fs vfs.FS) error { 1243 var exciseSpan KeyRange 1244 paths := make([]string, 0, len(td.CmdArgs)) 1245 for i, arg := range td.CmdArgs { 1246 switch td.CmdArgs[i].Key { 1247 case "excise": 1248 if len(td.CmdArgs[i].Vals) != 1 { 1249 return errors.New("expected 2 values for excise separated by -, eg. ingest-and-excise foo1 excise=\"start-end\"") 1250 } 1251 fields := strings.Split(td.CmdArgs[i].Vals[0], "-") 1252 if len(fields) != 2 { 1253 return errors.New("expected 2 values for excise separated by -, eg. ingest-and-excise foo1 excise=\"start-end\"") 1254 } 1255 exciseSpan.Start = []byte(fields[0]) 1256 exciseSpan.End = []byte(fields[1]) 1257 default: 1258 paths = append(paths, arg.String()) 1259 } 1260 } 1261 1262 if _, err := d.IngestAndExcise(paths, nil /* shared */, exciseSpan); err != nil { 1263 return err 1264 } 1265 return nil 1266 } 1267 1268 func runIngestCmd(td *datadriven.TestData, d *DB, fs vfs.FS) error { 1269 paths := make([]string, 0, len(td.CmdArgs)) 1270 for _, arg := range td.CmdArgs { 1271 paths = append(paths, arg.String()) 1272 } 1273 1274 if err := d.Ingest(paths); err != nil { 1275 return err 1276 } 1277 return nil 1278 } 1279 1280 func runIngestExternalCmd(td *datadriven.TestData, d *DB, locator string) error { 1281 external := make([]ExternalFile, 0) 1282 for _, arg := range strings.Split(td.Input, "\n") { 1283 fields := strings.Split(arg, ",") 1284 if len(fields) != 4 { 1285 return errors.New("usage: path,size,smallest,largest") 1286 } 1287 ef := ExternalFile{} 1288 ef.Locator = remote.Locator(locator) 1289 ef.ObjName = fields[0] 1290 sizeInt, err := strconv.Atoi(fields[1]) 1291 if err != nil { 1292 return err 1293 } 1294 ef.Size = uint64(sizeInt) 1295 ef.SmallestUserKey = []byte(fields[2]) 1296 ef.LargestUserKey = []byte(fields[3]) 1297 ef.HasPointKey = true 1298 external = append(external, ef) 1299 } 1300 1301 if _, err := d.IngestExternalFiles(external); err != nil { 1302 return err 1303 } 1304 return nil 1305 } 1306 1307 func runForceIngestCmd(td *datadriven.TestData, d *DB) error { 1308 var paths []string 1309 var level int 1310 for _, arg := range td.CmdArgs { 1311 switch arg.Key { 1312 case "paths": 1313 paths = append(paths, arg.Vals...) 1314 case "level": 1315 var err error 1316 level, err = strconv.Atoi(arg.Vals[0]) 1317 if err != nil { 1318 return err 1319 } 1320 } 1321 } 1322 _, err := d.ingest(paths, func( 1323 tableNewIters, 1324 keyspan.TableNewSpanIter, 1325 IterOptions, 1326 *Comparer, 1327 *version, 1328 int, 1329 map[*compaction]struct{}, 1330 *fileMetadata, 1331 bool, 1332 ) (int, *fileMetadata, error) { 1333 return level, nil, nil 1334 }, nil /* shared */, KeyRange{}, nil /* external */) 1335 return err 1336 } 1337 1338 func runLSMCmd(td *datadriven.TestData, d *DB) string { 1339 d.mu.Lock() 1340 defer d.mu.Unlock() 1341 if td.HasArg("verbose") { 1342 return d.mu.versions.currentVersion().DebugString(d.opts.Comparer.FormatKey) 1343 } 1344 return d.mu.versions.currentVersion().String() 1345 } 1346 1347 func parseDBOptionsArgs(opts *Options, args []datadriven.CmdArg) error { 1348 for _, cmdArg := range args { 1349 switch cmdArg.Key { 1350 case "auto-compactions": 1351 switch cmdArg.Vals[0] { 1352 case "off": 1353 opts.DisableAutomaticCompactions = true 1354 case "on": 1355 opts.DisableAutomaticCompactions = false 1356 default: 1357 return errors.Errorf("Unrecognized %q arg value: %q", cmdArg.Key, cmdArg.Vals[0]) 1358 } 1359 case "inject-errors": 1360 injs := make([]errorfs.Injector, len(cmdArg.Vals)) 1361 for i := 0; i < len(cmdArg.Vals); i++ { 1362 inj, err := errorfs.ParseDSL(cmdArg.Vals[i]) 1363 if err != nil { 1364 return err 1365 } 1366 injs[i] = inj 1367 } 1368 opts.FS = errorfs.Wrap(opts.FS, errorfs.Any(injs...)) 1369 case "enable-table-stats": 1370 enable, err := strconv.ParseBool(cmdArg.Vals[0]) 1371 if err != nil { 1372 return errors.Errorf("%s: could not parse %q as bool: %s", cmdArg.Key, cmdArg.Vals[0], err) 1373 } 1374 opts.private.disableTableStats = !enable 1375 case "format-major-version": 1376 v, err := strconv.Atoi(cmdArg.Vals[0]) 1377 if err != nil { 1378 return err 1379 } 1380 // Override the DB version. 1381 opts.FormatMajorVersion = FormatMajorVersion(v) 1382 case "block-size": 1383 v, err := strconv.Atoi(cmdArg.Vals[0]) 1384 if err != nil { 1385 return err 1386 } 1387 for i := range opts.Levels { 1388 opts.Levels[i].BlockSize = v 1389 } 1390 case "index-block-size": 1391 v, err := strconv.Atoi(cmdArg.Vals[0]) 1392 if err != nil { 1393 return err 1394 } 1395 for i := range opts.Levels { 1396 opts.Levels[i].IndexBlockSize = v 1397 } 1398 case "target-file-size": 1399 v, err := strconv.Atoi(cmdArg.Vals[0]) 1400 if err != nil { 1401 return err 1402 } 1403 for i := range opts.Levels { 1404 opts.Levels[i].TargetFileSize = int64(v) 1405 } 1406 case "bloom-bits-per-key": 1407 v, err := strconv.Atoi(cmdArg.Vals[0]) 1408 if err != nil { 1409 return err 1410 } 1411 fp := bloom.FilterPolicy(v) 1412 opts.Filters = map[string]FilterPolicy{fp.Name(): fp} 1413 for i := range opts.Levels { 1414 opts.Levels[i].FilterPolicy = fp 1415 } 1416 case "merger": 1417 switch cmdArg.Vals[0] { 1418 case "appender": 1419 opts.Merger = base.DefaultMerger 1420 default: 1421 return errors.Newf("unrecognized Merger %q\n", cmdArg.Vals[0]) 1422 } 1423 } 1424 } 1425 return nil 1426 }