github.com/cockroachdb/pebble@v1.1.5/tool/db.go (about) 1 // Copyright 2019 The LevelDB-Go and Pebble Authors. All rights reserved. Use 2 // of this source code is governed by a BSD-style license that can be found in 3 // the LICENSE file. 4 5 package tool 6 7 import ( 8 "bufio" 9 "context" 10 "fmt" 11 "io" 12 "strings" 13 "text/tabwriter" 14 15 "github.com/cockroachdb/errors" 16 "github.com/cockroachdb/errors/oserror" 17 "github.com/cockroachdb/pebble" 18 "github.com/cockroachdb/pebble/internal/base" 19 "github.com/cockroachdb/pebble/internal/humanize" 20 "github.com/cockroachdb/pebble/internal/manifest" 21 "github.com/cockroachdb/pebble/objstorage" 22 "github.com/cockroachdb/pebble/objstorage/objstorageprovider" 23 "github.com/cockroachdb/pebble/record" 24 "github.com/cockroachdb/pebble/sstable" 25 "github.com/cockroachdb/pebble/tool/logs" 26 "github.com/spf13/cobra" 27 ) 28 29 // dbT implements db-level tools, including both configuration state and the 30 // commands themselves. 31 type dbT struct { 32 Root *cobra.Command 33 Check *cobra.Command 34 Upgrade *cobra.Command 35 Checkpoint *cobra.Command 36 Get *cobra.Command 37 Logs *cobra.Command 38 LSM *cobra.Command 39 Properties *cobra.Command 40 Scan *cobra.Command 41 Set *cobra.Command 42 Space *cobra.Command 43 IOBench *cobra.Command 44 45 // Configuration. 46 opts *pebble.Options 47 comparers sstable.Comparers 48 mergers sstable.Mergers 49 openErrEnhancer func(error) error 50 51 // Flags. 52 comparerName string 53 mergerName string 54 fmtKey keyFormatter 55 fmtValue valueFormatter 56 start key 57 end key 58 count int64 59 allLevels bool 60 ioCount int 61 ioParallelism int 62 ioSizes string 63 verbose bool 64 bypassPrompt bool 65 } 66 67 func newDB( 68 opts *pebble.Options, 69 comparers sstable.Comparers, 70 mergers sstable.Mergers, 71 openErrEnhancer func(error) error, 72 ) *dbT { 73 d := &dbT{ 74 opts: opts, 75 comparers: comparers, 76 mergers: mergers, 77 openErrEnhancer: openErrEnhancer, 78 } 79 d.fmtKey.mustSet("quoted") 80 d.fmtValue.mustSet("[%x]") 81 82 d.Root = &cobra.Command{ 83 Use: "db", 84 Short: "DB introspection tools", 85 Version: fmt.Sprintf("supported Pebble format versions: %d-%d", pebble.FormatMostCompatible, pebble.FormatNewest), 86 } 87 d.Root.SetVersionTemplate(`{{printf "%s" .Short}} 88 {{printf "%s" .Version}} 89 `) 90 d.Check = &cobra.Command{ 91 Use: "check <dir>", 92 Short: "verify checksums and metadata", 93 Long: ` 94 Verify sstable, manifest, and WAL checksums. Requires that the specified 95 database not be in use by another process. 96 `, 97 Args: cobra.ExactArgs(1), 98 Run: d.runCheck, 99 } 100 d.Upgrade = &cobra.Command{ 101 Use: "upgrade <dir>", 102 Short: "upgrade the DB internal format version", 103 Long: ` 104 Upgrades the DB internal format version to the latest version. 105 It is recommended to make a backup copy of the DB directory before upgrading. 106 Requires that the specified database not be in use by another process. 107 `, 108 Args: cobra.ExactArgs(1), 109 Run: d.runUpgrade, 110 } 111 d.Checkpoint = &cobra.Command{ 112 Use: "checkpoint <src-dir> <dest-dir>", 113 Short: "create a checkpoint", 114 Long: ` 115 Creates a Pebble checkpoint in the specified destination directory. A checkpoint 116 is a point-in-time snapshot of DB state. Requires that the specified 117 database not be in use by another process. 118 `, 119 Args: cobra.ExactArgs(2), 120 Run: d.runCheckpoint, 121 } 122 d.Get = &cobra.Command{ 123 Use: "get <dir> <key>", 124 Short: "get value for a key", 125 Long: ` 126 Gets a value for a key, if it exists in DB. Prints a "not found" error if key 127 does not exist. Requires that the specified database not be in use by another 128 process. 129 `, 130 Args: cobra.ExactArgs(2), 131 Run: d.runGet, 132 } 133 d.Logs = logs.NewCmd() 134 d.LSM = &cobra.Command{ 135 Use: "lsm <dir>", 136 Short: "print LSM structure", 137 Long: ` 138 Print the structure of the LSM tree. Requires that the specified database not 139 be in use by another process. 140 `, 141 Args: cobra.ExactArgs(1), 142 Run: d.runLSM, 143 } 144 d.Properties = &cobra.Command{ 145 Use: "properties <dir>", 146 Short: "print aggregated sstable properties", 147 Long: ` 148 Print SSTable properties, aggregated per level of the LSM. 149 `, 150 Args: cobra.ExactArgs(1), 151 Run: d.runProperties, 152 } 153 d.Scan = &cobra.Command{ 154 Use: "scan <dir>", 155 Short: "print db records", 156 Long: ` 157 Print the records in the DB. Requires that the specified database not be in use 158 by another process. 159 `, 160 Args: cobra.ExactArgs(1), 161 Run: d.runScan, 162 } 163 d.Set = &cobra.Command{ 164 Use: "set <dir> <key> <value>", 165 Short: "set a value for a key", 166 Long: ` 167 Adds a new key/value to the DB. Requires that the specified database 168 not be in use by another process. 169 `, 170 Args: cobra.ExactArgs(3), 171 Run: d.runSet, 172 } 173 d.Space = &cobra.Command{ 174 Use: "space <dir>", 175 Short: "print filesystem space used", 176 Long: ` 177 Print the estimated filesystem space usage for the inclusive-inclusive range 178 specified by --start and --end. Requires that the specified database not be in 179 use by another process. 180 `, 181 Args: cobra.ExactArgs(1), 182 Run: d.runSpace, 183 } 184 d.IOBench = &cobra.Command{ 185 Use: "io-bench <dir>", 186 Short: "perform sstable IO benchmark", 187 Long: ` 188 Run a random IO workload with various IO sizes against the sstables in the 189 specified database. 190 `, 191 Args: cobra.ExactArgs(1), 192 Run: d.runIOBench, 193 } 194 195 d.Root.AddCommand(d.Check, d.Upgrade, d.Checkpoint, d.Get, d.Logs, d.LSM, d.Properties, d.Scan, d.Set, d.Space, d.IOBench) 196 d.Root.PersistentFlags().BoolVarP(&d.verbose, "verbose", "v", false, "verbose output") 197 198 for _, cmd := range []*cobra.Command{d.Check, d.Upgrade, d.Checkpoint, d.Get, d.LSM, d.Properties, d.Scan, d.Set, d.Space} { 199 cmd.Flags().StringVar( 200 &d.comparerName, "comparer", "", "comparer name (use default if empty)") 201 cmd.Flags().StringVar( 202 &d.mergerName, "merger", "", "merger name (use default if empty)") 203 } 204 205 for _, cmd := range []*cobra.Command{d.Scan, d.Space} { 206 cmd.Flags().Var( 207 &d.start, "start", "start key for the range") 208 cmd.Flags().Var( 209 &d.end, "end", "end key for the range") 210 } 211 212 d.Scan.Flags().Var( 213 &d.fmtKey, "key", "key formatter") 214 for _, cmd := range []*cobra.Command{d.Scan, d.Get} { 215 cmd.Flags().Var( 216 &d.fmtValue, "value", "value formatter") 217 } 218 for _, cmd := range []*cobra.Command{d.Upgrade} { 219 cmd.Flags().BoolVarP( 220 &d.bypassPrompt, "yes", "y", false, "bypass prompt") 221 } 222 223 d.Scan.Flags().Int64Var( 224 &d.count, "count", 0, "key count for scan (0 is unlimited)") 225 226 d.IOBench.Flags().BoolVar( 227 &d.allLevels, "all-levels", false, "if set, benchmark all levels (default is only L5/L6)") 228 d.IOBench.Flags().IntVar( 229 &d.ioCount, "io-count", 10000, "number of IOs (per IO size) to benchmark") 230 d.IOBench.Flags().IntVar( 231 &d.ioParallelism, "io-parallelism", 16, "number of goroutines issuing IO") 232 d.IOBench.Flags().StringVar( 233 &d.ioSizes, "io-sizes-kb", "4,16,64,128,256,512,1024", "comma separated list of IO sizes in KB") 234 235 return d 236 } 237 238 func (d *dbT) loadOptions(dir string) error { 239 ls, err := d.opts.FS.List(dir) 240 if err != nil || len(ls) == 0 { 241 // NB: We don't return the error here as we prefer to return the error from 242 // pebble.Open. Another way to put this is that a non-existent directory is 243 // not a failure in loading the options. 244 return nil 245 } 246 247 hooks := &pebble.ParseHooks{ 248 NewComparer: func(name string) (*pebble.Comparer, error) { 249 if c := d.comparers[name]; c != nil { 250 return c, nil 251 } 252 return nil, errors.Errorf("unknown comparer %q", errors.Safe(name)) 253 }, 254 NewMerger: func(name string) (*pebble.Merger, error) { 255 if m := d.mergers[name]; m != nil { 256 return m, nil 257 } 258 return nil, errors.Errorf("unknown merger %q", errors.Safe(name)) 259 }, 260 SkipUnknown: func(name, value string) bool { 261 return true 262 }, 263 } 264 265 // TODO(peter): RocksDB sometimes leaves multiple OPTIONS files in 266 // existence. We parse all of them as the comparer and merger shouldn't be 267 // changing. We could parse only the first or the latest. Not clear if this 268 // matters. 269 var dbOpts pebble.Options 270 for _, filename := range ls { 271 ft, _, ok := base.ParseFilename(d.opts.FS, filename) 272 if !ok { 273 continue 274 } 275 switch ft { 276 case base.FileTypeOptions: 277 err := func() error { 278 f, err := d.opts.FS.Open(d.opts.FS.PathJoin(dir, filename)) 279 if err != nil { 280 return err 281 } 282 defer f.Close() 283 284 data, err := io.ReadAll(f) 285 if err != nil { 286 return err 287 } 288 289 if err := dbOpts.Parse(string(data), hooks); err != nil { 290 return err 291 } 292 return nil 293 }() 294 if err != nil { 295 return err 296 } 297 } 298 } 299 300 if dbOpts.Comparer != nil { 301 d.opts.Comparer = dbOpts.Comparer 302 } 303 if dbOpts.Merger != nil { 304 d.opts.Merger = dbOpts.Merger 305 } 306 return nil 307 } 308 309 type openOption interface { 310 apply(opts *pebble.Options) 311 } 312 313 func (d *dbT) openDB(dir string, openOptions ...openOption) (*pebble.DB, error) { 314 db, err := d.openDBInternal(dir, openOptions...) 315 if err != nil { 316 if d.openErrEnhancer != nil { 317 err = d.openErrEnhancer(err) 318 } 319 return nil, err 320 } 321 return db, nil 322 } 323 324 func (d *dbT) openDBInternal(dir string, openOptions ...openOption) (*pebble.DB, error) { 325 if err := d.loadOptions(dir); err != nil { 326 return nil, errors.Wrap(err, "error loading options") 327 } 328 if d.comparerName != "" { 329 d.opts.Comparer = d.comparers[d.comparerName] 330 if d.opts.Comparer == nil { 331 return nil, errors.Errorf("unknown comparer %q", errors.Safe(d.comparerName)) 332 } 333 } 334 if d.mergerName != "" { 335 d.opts.Merger = d.mergers[d.mergerName] 336 if d.opts.Merger == nil { 337 return nil, errors.Errorf("unknown merger %q", errors.Safe(d.mergerName)) 338 } 339 } 340 opts := *d.opts 341 for _, opt := range openOptions { 342 opt.apply(&opts) 343 } 344 opts.Cache = pebble.NewCache(128 << 20 /* 128 MB */) 345 defer opts.Cache.Unref() 346 return pebble.Open(dir, &opts) 347 } 348 349 func (d *dbT) closeDB(stderr io.Writer, db *pebble.DB) { 350 if err := db.Close(); err != nil { 351 fmt.Fprintf(stderr, "%s\n", err) 352 } 353 } 354 355 func (d *dbT) runCheck(cmd *cobra.Command, args []string) { 356 stdout, stderr := cmd.OutOrStdout(), cmd.ErrOrStderr() 357 db, err := d.openDB(args[0]) 358 if err != nil { 359 fmt.Fprintf(stderr, "%s\n", err) 360 return 361 } 362 defer d.closeDB(stderr, db) 363 364 var stats pebble.CheckLevelsStats 365 if err := db.CheckLevels(&stats); err != nil { 366 fmt.Fprintf(stderr, "%s\n", err) 367 } 368 fmt.Fprintf(stdout, "checked %d %s and %d %s\n", 369 stats.NumPoints, makePlural("point", stats.NumPoints), stats.NumTombstones, makePlural("tombstone", int64(stats.NumTombstones))) 370 } 371 372 func (d *dbT) runUpgrade(cmd *cobra.Command, args []string) { 373 stdout, stderr := cmd.OutOrStdout(), cmd.ErrOrStderr() 374 db, err := d.openDB(args[0], nonReadOnly{}) 375 if err != nil { 376 fmt.Fprintf(stderr, "%s\n", err) 377 return 378 } 379 defer d.closeDB(stderr, db) 380 381 targetVersion := pebble.FormatNewest 382 current := db.FormatMajorVersion() 383 if current >= targetVersion { 384 fmt.Fprintf(stdout, "DB is already at internal version %d.\n", current) 385 return 386 } 387 fmt.Fprintf(stdout, "Upgrading DB from internal version %d to %d.\n", current, targetVersion) 388 389 prompt := `WARNING!!! 390 This DB will not be usable with older versions of Pebble! 391 392 It is strongly recommended to back up the data before upgrading. 393 ` 394 395 if len(d.opts.BlockPropertyCollectors) == 0 { 396 prompt += ` 397 If this DB uses custom block property collectors, the upgrade should be invoked 398 through a custom binary that configures them. Otherwise, any new tables created 399 during upgrade will not have the relevant block properties. 400 ` 401 } 402 if !d.promptForConfirmation(prompt, cmd.InOrStdin(), stdout, stderr) { 403 return 404 } 405 if err := db.RatchetFormatMajorVersion(targetVersion); err != nil { 406 fmt.Fprintf(stderr, "error: %s\n", err) 407 } 408 fmt.Fprintf(stdout, "Upgrade complete.\n") 409 } 410 411 func (d *dbT) runCheckpoint(cmd *cobra.Command, args []string) { 412 stderr := cmd.ErrOrStderr() 413 db, err := d.openDB(args[0], nonReadOnly{}) 414 if err != nil { 415 fmt.Fprintf(stderr, "%s\n", err) 416 return 417 } 418 defer d.closeDB(stderr, db) 419 destDir := args[1] 420 421 if err := db.Checkpoint(destDir); err != nil { 422 fmt.Fprintf(stderr, "%s\n", err) 423 } 424 } 425 426 func (d *dbT) runGet(cmd *cobra.Command, args []string) { 427 stdout, stderr := cmd.OutOrStdout(), cmd.ErrOrStderr() 428 db, err := d.openDB(args[0]) 429 if err != nil { 430 fmt.Fprintf(stderr, "%s\n", err) 431 return 432 } 433 defer d.closeDB(stderr, db) 434 var k key 435 if err := k.Set(args[1]); err != nil { 436 fmt.Fprintf(stderr, "%s\n", err) 437 return 438 } 439 440 val, closer, err := db.Get(k) 441 if err != nil { 442 fmt.Fprintf(stderr, "%s\n", err) 443 return 444 } 445 defer func() { 446 if closer != nil { 447 closer.Close() 448 } 449 }() 450 if val != nil { 451 fmt.Fprintf(stdout, "%s\n", d.fmtValue.fn(k, val)) 452 } 453 } 454 455 func (d *dbT) runLSM(cmd *cobra.Command, args []string) { 456 stdout, stderr := cmd.OutOrStdout(), cmd.ErrOrStderr() 457 db, err := d.openDB(args[0]) 458 if err != nil { 459 fmt.Fprintf(stderr, "%s\n", err) 460 return 461 } 462 defer d.closeDB(stderr, db) 463 464 fmt.Fprintf(stdout, "%s", db.Metrics()) 465 } 466 467 func (d *dbT) runScan(cmd *cobra.Command, args []string) { 468 stdout, stderr := cmd.OutOrStdout(), cmd.ErrOrStderr() 469 db, err := d.openDB(args[0]) 470 if err != nil { 471 fmt.Fprintf(stderr, "%s\n", err) 472 return 473 } 474 defer d.closeDB(stderr, db) 475 476 // Update the internal formatter if this comparator has one specified. 477 if d.opts.Comparer != nil { 478 d.fmtKey.setForComparer(d.opts.Comparer.Name, d.comparers) 479 d.fmtValue.setForComparer(d.opts.Comparer.Name, d.comparers) 480 } 481 482 start := timeNow() 483 fmtKeys := d.fmtKey.spec != "null" 484 fmtValues := d.fmtValue.spec != "null" 485 var count int64 486 487 iter, _ := db.NewIter(&pebble.IterOptions{ 488 UpperBound: d.end, 489 }) 490 for valid := iter.SeekGE(d.start); valid; valid = iter.Next() { 491 if fmtKeys || fmtValues { 492 needDelimiter := false 493 if fmtKeys { 494 fmt.Fprintf(stdout, "%s", d.fmtKey.fn(iter.Key())) 495 needDelimiter = true 496 } 497 if fmtValues { 498 if needDelimiter { 499 stdout.Write([]byte{' '}) 500 } 501 fmt.Fprintf(stdout, "%s", d.fmtValue.fn(iter.Key(), iter.Value())) 502 } 503 stdout.Write([]byte{'\n'}) 504 } 505 506 count++ 507 if d.count > 0 && count >= d.count { 508 break 509 } 510 } 511 512 if err := iter.Close(); err != nil { 513 fmt.Fprintf(stderr, "%s\n", err) 514 } 515 516 elapsed := timeNow().Sub(start) 517 518 fmt.Fprintf(stdout, "scanned %d %s in %0.1fs\n", 519 count, makePlural("record", count), elapsed.Seconds()) 520 } 521 522 func (d *dbT) runSpace(cmd *cobra.Command, args []string) { 523 stdout, stderr := cmd.OutOrStdout(), cmd.ErrOrStderr() 524 db, err := d.openDB(args[0]) 525 if err != nil { 526 fmt.Fprintf(stderr, "%s\n", err) 527 return 528 } 529 defer d.closeDB(stdout, db) 530 531 bytes, err := db.EstimateDiskUsage(d.start, d.end) 532 if err != nil { 533 fmt.Fprintf(stderr, "%s\n", err) 534 return 535 } 536 fmt.Fprintf(stdout, "%d\n", bytes) 537 } 538 539 func (d *dbT) runProperties(cmd *cobra.Command, args []string) { 540 stdout, stderr := cmd.OutOrStdout(), cmd.ErrOrStderr() 541 dirname := args[0] 542 err := func() error { 543 desc, err := pebble.Peek(dirname, d.opts.FS) 544 if err != nil { 545 return err 546 } else if !desc.Exists { 547 return oserror.ErrNotExist 548 } 549 manifestFilename := d.opts.FS.PathBase(desc.ManifestFilename) 550 551 // Replay the manifest to get the current version. 552 f, err := d.opts.FS.Open(desc.ManifestFilename) 553 if err != nil { 554 return errors.Wrapf(err, "pebble: could not open MANIFEST file %q", manifestFilename) 555 } 556 defer f.Close() 557 558 cmp := base.DefaultComparer 559 var bve manifest.BulkVersionEdit 560 bve.AddedByFileNum = make(map[base.FileNum]*manifest.FileMetadata) 561 rr := record.NewReader(f, 0 /* logNum */) 562 for { 563 r, err := rr.Next() 564 if err == io.EOF { 565 break 566 } 567 if err != nil { 568 return errors.Wrapf(err, "pebble: reading manifest %q", manifestFilename) 569 } 570 var ve manifest.VersionEdit 571 err = ve.Decode(r) 572 if err != nil { 573 return err 574 } 575 if err := bve.Accumulate(&ve); err != nil { 576 return err 577 } 578 if ve.ComparerName != "" { 579 cmp = d.comparers[ve.ComparerName] 580 d.fmtKey.setForComparer(ve.ComparerName, d.comparers) 581 d.fmtValue.setForComparer(ve.ComparerName, d.comparers) 582 } 583 } 584 v, err := bve.Apply( 585 nil /* version */, cmp.Compare, d.fmtKey.fn, d.opts.FlushSplitBytes, 586 d.opts.Experimental.ReadCompactionRate, nil, /* zombies */ 587 manifest.AllowSplitUserKeys, 588 ) 589 if err != nil { 590 return err 591 } 592 593 objProvider, err := objstorageprovider.Open(objstorageprovider.DefaultSettings(d.opts.FS, dirname)) 594 if err != nil { 595 return err 596 } 597 defer objProvider.Close() 598 599 // Load and aggregate sstable properties. 600 tw := tabwriter.NewWriter(stdout, 2, 1, 4, ' ', 0) 601 var total props 602 var all []props 603 for _, l := range v.Levels { 604 iter := l.Iter() 605 var level props 606 for t := iter.First(); t != nil; t = iter.Next() { 607 if t.Virtual { 608 // TODO(bananabrick): Handle virtual sstables here. We don't 609 // really have any stats or properties at this point. Maybe 610 // we could approximate some of these properties for virtual 611 // sstables by first grabbing properties for the backing 612 // physical sstable, and then extrapolating. 613 continue 614 } 615 err := d.addProps(objProvider, t.PhysicalMeta(), &level) 616 if err != nil { 617 return err 618 } 619 } 620 all = append(all, level) 621 total.update(level) 622 } 623 all = append(all, total) 624 625 fmt.Fprintln(tw, "\tL0\tL1\tL2\tL3\tL4\tL5\tL6\tTOTAL") 626 627 fmt.Fprintf(tw, "count\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n", 628 propArgs(all, func(p *props) interface{} { return p.Count })...) 629 630 fmt.Fprintln(tw, "seq num\t\t\t\t\t\t\t\t") 631 fmt.Fprintf(tw, " smallest\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n", 632 propArgs(all, func(p *props) interface{} { return p.SmallestSeqNum })...) 633 fmt.Fprintf(tw, " largest\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n", 634 propArgs(all, func(p *props) interface{} { return p.LargestSeqNum })...) 635 636 fmt.Fprintln(tw, "size\t\t\t\t\t\t\t\t") 637 fmt.Fprintf(tw, " data\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n", 638 propArgs(all, func(p *props) interface{} { return humanize.Bytes.Uint64(p.DataSize) })...) 639 fmt.Fprintf(tw, " blocks\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n", 640 propArgs(all, func(p *props) interface{} { return p.NumDataBlocks })...) 641 fmt.Fprintf(tw, " index\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n", 642 propArgs(all, func(p *props) interface{} { return humanize.Bytes.Uint64(p.IndexSize) })...) 643 fmt.Fprintf(tw, " blocks\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n", 644 propArgs(all, func(p *props) interface{} { return p.NumIndexBlocks })...) 645 fmt.Fprintf(tw, " top-level\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n", 646 propArgs(all, func(p *props) interface{} { return humanize.Bytes.Uint64(p.TopLevelIndexSize) })...) 647 fmt.Fprintf(tw, " filter\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n", 648 propArgs(all, func(p *props) interface{} { return humanize.Bytes.Uint64(p.FilterSize) })...) 649 fmt.Fprintf(tw, " raw-key\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n", 650 propArgs(all, func(p *props) interface{} { return humanize.Bytes.Uint64(p.RawKeySize) })...) 651 fmt.Fprintf(tw, " raw-value\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n", 652 propArgs(all, func(p *props) interface{} { return humanize.Bytes.Uint64(p.RawValueSize) })...) 653 fmt.Fprintf(tw, " pinned-key\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n", 654 propArgs(all, func(p *props) interface{} { return humanize.Bytes.Uint64(p.SnapshotPinnedKeySize) })...) 655 fmt.Fprintf(tw, " pinned-value\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n", 656 propArgs(all, func(p *props) interface{} { return humanize.Bytes.Uint64(p.SnapshotPinnedValueSize) })...) 657 fmt.Fprintf(tw, " point-del-key-size\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n", 658 propArgs(all, func(p *props) interface{} { return humanize.Bytes.Uint64(p.RawPointTombstoneKeySize) })...) 659 fmt.Fprintf(tw, " point-del-value-size\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n", 660 propArgs(all, func(p *props) interface{} { return humanize.Bytes.Uint64(p.RawPointTombstoneValueSize) })...) 661 662 fmt.Fprintln(tw, "records\t\t\t\t\t\t\t\t") 663 fmt.Fprintf(tw, " set\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n", 664 propArgs(all, func(p *props) interface{} { 665 return humanize.Count.Uint64(p.NumEntries - p.NumDeletions - p.NumMergeOperands) 666 })...) 667 fmt.Fprintf(tw, " delete\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n", 668 propArgs(all, func(p *props) interface{} { return humanize.Count.Uint64(p.NumDeletions - p.NumRangeDeletions) })...) 669 fmt.Fprintf(tw, " delete-sized\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n", 670 propArgs(all, func(p *props) interface{} { return humanize.Count.Uint64(p.NumSizedDeletions) })...) 671 fmt.Fprintf(tw, " range-delete\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n", 672 propArgs(all, func(p *props) interface{} { return humanize.Count.Uint64(p.NumRangeDeletions) })...) 673 fmt.Fprintf(tw, " range-key-sets\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n", 674 propArgs(all, func(p *props) interface{} { return humanize.Count.Uint64(p.NumRangeKeySets) })...) 675 fmt.Fprintf(tw, " range-key-unsets\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n", 676 propArgs(all, func(p *props) interface{} { return humanize.Count.Uint64(p.NumRangeKeyUnSets) })...) 677 fmt.Fprintf(tw, " range-key-deletes\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n", 678 propArgs(all, func(p *props) interface{} { return humanize.Count.Uint64(p.NumRangeKeyDeletes) })...) 679 fmt.Fprintf(tw, " merge\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n", 680 propArgs(all, func(p *props) interface{} { return humanize.Count.Uint64(p.NumMergeOperands) })...) 681 fmt.Fprintf(tw, " pinned\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n", 682 propArgs(all, func(p *props) interface{} { return humanize.Count.Uint64(p.SnapshotPinnedKeys) })...) 683 684 if err := tw.Flush(); err != nil { 685 return err 686 } 687 return nil 688 }() 689 if err != nil { 690 fmt.Fprintln(stderr, err) 691 } 692 } 693 694 func (d *dbT) runSet(cmd *cobra.Command, args []string) { 695 stderr := cmd.ErrOrStderr() 696 db, err := d.openDB(args[0], nonReadOnly{}) 697 if err != nil { 698 fmt.Fprintf(stderr, "%s\n", err) 699 return 700 } 701 defer d.closeDB(stderr, db) 702 var k, v key 703 if err := k.Set(args[1]); err != nil { 704 fmt.Fprintf(stderr, "%s\n", err) 705 return 706 } 707 if err := v.Set(args[2]); err != nil { 708 fmt.Fprintf(stderr, "%s\n", err) 709 return 710 } 711 712 if err := db.Set(k, v, nil); err != nil { 713 fmt.Fprintf(stderr, "%s\n", err) 714 } 715 } 716 717 func (d *dbT) promptForConfirmation(prompt string, stdin io.Reader, stdout, stderr io.Writer) bool { 718 if d.bypassPrompt { 719 return true 720 } 721 if _, err := fmt.Fprintf(stdout, "%s\n", prompt); err != nil { 722 fmt.Fprintf(stderr, "Error: %v\n", err) 723 return false 724 } 725 reader := bufio.NewReader(stdin) 726 for { 727 if _, err := fmt.Fprintf(stdout, "Continue? [Y/N] "); err != nil { 728 fmt.Fprintf(stderr, "Error: %v\n", err) 729 return false 730 } 731 answer, err := reader.ReadString('\n') 732 if err != nil { 733 fmt.Fprintf(stderr, "Error: %v\n", err) 734 return false 735 } 736 answer = strings.ToLower(strings.TrimSpace(answer)) 737 if answer == "y" || answer == "yes" { 738 return true 739 } 740 741 if answer == "n" || answer == "no" { 742 _, _ = fmt.Fprintf(stderr, "Aborting\n") 743 return false 744 } 745 } 746 } 747 748 type nonReadOnly struct{} 749 750 func (n nonReadOnly) apply(opts *pebble.Options) { 751 opts.ReadOnly = false 752 // Increase the L0 compaction threshold to reduce the likelihood of an 753 // unintended compaction changing test output. 754 opts.L0CompactionThreshold = 10 755 } 756 757 func propArgs(props []props, getProp func(*props) interface{}) []interface{} { 758 args := make([]interface{}, 0, len(props)) 759 for _, p := range props { 760 args = append(args, getProp(&p)) 761 } 762 return args 763 } 764 765 type props struct { 766 Count uint64 767 SmallestSeqNum uint64 768 LargestSeqNum uint64 769 DataSize uint64 770 FilterSize uint64 771 IndexSize uint64 772 NumDataBlocks uint64 773 NumIndexBlocks uint64 774 NumDeletions uint64 775 NumSizedDeletions uint64 776 NumEntries uint64 777 NumMergeOperands uint64 778 NumRangeDeletions uint64 779 NumRangeKeySets uint64 780 NumRangeKeyUnSets uint64 781 NumRangeKeyDeletes uint64 782 RawKeySize uint64 783 RawPointTombstoneKeySize uint64 784 RawPointTombstoneValueSize uint64 785 RawValueSize uint64 786 SnapshotPinnedKeys uint64 787 SnapshotPinnedKeySize uint64 788 SnapshotPinnedValueSize uint64 789 TopLevelIndexSize uint64 790 } 791 792 func (p *props) update(o props) { 793 p.Count += o.Count 794 if o.SmallestSeqNum != 0 && (o.SmallestSeqNum < p.SmallestSeqNum || p.SmallestSeqNum == 0) { 795 p.SmallestSeqNum = o.SmallestSeqNum 796 } 797 if o.LargestSeqNum > p.LargestSeqNum { 798 p.LargestSeqNum = o.LargestSeqNum 799 } 800 p.DataSize += o.DataSize 801 p.FilterSize += o.FilterSize 802 p.IndexSize += o.IndexSize 803 p.NumDataBlocks += o.NumDataBlocks 804 p.NumIndexBlocks += o.NumIndexBlocks 805 p.NumDeletions += o.NumDeletions 806 p.NumSizedDeletions += o.NumSizedDeletions 807 p.NumEntries += o.NumEntries 808 p.NumMergeOperands += o.NumMergeOperands 809 p.NumRangeDeletions += o.NumRangeDeletions 810 p.NumRangeKeySets += o.NumRangeKeySets 811 p.NumRangeKeyUnSets += o.NumRangeKeyUnSets 812 p.NumRangeKeyDeletes += o.NumRangeKeyDeletes 813 p.RawKeySize += o.RawKeySize 814 p.RawPointTombstoneKeySize += o.RawPointTombstoneKeySize 815 p.RawPointTombstoneValueSize += o.RawPointTombstoneValueSize 816 p.RawValueSize += o.RawValueSize 817 p.SnapshotPinnedKeySize += o.SnapshotPinnedKeySize 818 p.SnapshotPinnedValueSize += o.SnapshotPinnedValueSize 819 p.SnapshotPinnedKeys += o.SnapshotPinnedKeys 820 p.TopLevelIndexSize += o.TopLevelIndexSize 821 } 822 823 func (d *dbT) addProps( 824 objProvider objstorage.Provider, m manifest.PhysicalFileMeta, p *props, 825 ) error { 826 ctx := context.Background() 827 f, err := objProvider.OpenForReading(ctx, base.FileTypeTable, m.FileBacking.DiskFileNum, objstorage.OpenOptions{}) 828 if err != nil { 829 return err 830 } 831 r, err := sstable.NewReader(f, sstable.ReaderOptions{}, d.mergers, d.comparers) 832 if err != nil { 833 _ = f.Close() 834 return err 835 } 836 p.update(props{ 837 Count: 1, 838 SmallestSeqNum: m.SmallestSeqNum, 839 LargestSeqNum: m.LargestSeqNum, 840 DataSize: r.Properties.DataSize, 841 FilterSize: r.Properties.FilterSize, 842 IndexSize: r.Properties.IndexSize, 843 NumDataBlocks: r.Properties.NumDataBlocks, 844 NumIndexBlocks: 1 + r.Properties.IndexPartitions, 845 NumDeletions: r.Properties.NumDeletions, 846 NumSizedDeletions: r.Properties.NumSizedDeletions, 847 NumEntries: r.Properties.NumEntries, 848 NumMergeOperands: r.Properties.NumMergeOperands, 849 NumRangeDeletions: r.Properties.NumRangeDeletions, 850 NumRangeKeySets: r.Properties.NumRangeKeySets, 851 NumRangeKeyUnSets: r.Properties.NumRangeKeyUnsets, 852 NumRangeKeyDeletes: r.Properties.NumRangeKeyDels, 853 RawKeySize: r.Properties.RawKeySize, 854 RawPointTombstoneKeySize: r.Properties.RawPointTombstoneKeySize, 855 RawPointTombstoneValueSize: r.Properties.RawPointTombstoneValueSize, 856 RawValueSize: r.Properties.RawValueSize, 857 SnapshotPinnedKeySize: r.Properties.SnapshotPinnedKeySize, 858 SnapshotPinnedValueSize: r.Properties.SnapshotPinnedValueSize, 859 SnapshotPinnedKeys: r.Properties.SnapshotPinnedKeys, 860 TopLevelIndexSize: r.Properties.TopLevelIndexSize, 861 }) 862 return r.Close() 863 } 864 865 func makePlural(singular string, count int64) string { 866 if count > 1 { 867 return fmt.Sprintf("%ss", singular) 868 } 869 return singular 870 }