github.com/petermattis/pebble@v0.0.0-20190905164901-ab51a2166067/compaction_test.go (about) 1 // Copyright 2013 The LevelDB-Go and Pebble Authors. All rights reserved. Use 2 // of this source code is governed by a BSD-style license that can be found in 3 // the LICENSE file. 4 5 package pebble 6 7 import ( 8 "bytes" 9 "fmt" 10 "math" 11 "regexp" 12 "sort" 13 "strconv" 14 "strings" 15 "testing" 16 "time" 17 18 "github.com/petermattis/pebble/internal/base" 19 "github.com/petermattis/pebble/internal/datadriven" 20 "github.com/petermattis/pebble/internal/manifest" 21 "github.com/petermattis/pebble/sstable" 22 "github.com/petermattis/pebble/vfs" 23 ) 24 25 func TestPickCompaction(t *testing.T) { 26 fileNums := func(f []fileMetadata) string { 27 ss := make([]string, 0, len(f)) 28 for _, meta := range f { 29 ss = append(ss, strconv.Itoa(int(meta.FileNum))) 30 } 31 sort.Strings(ss) 32 return strings.Join(ss, ",") 33 } 34 35 opts := (*Options)(nil).EnsureDefaults() 36 testCases := []struct { 37 desc string 38 version version 39 picker compactionPicker 40 want string 41 }{ 42 { 43 desc: "no compaction", 44 version: version{ 45 Files: [numLevels][]fileMetadata{ 46 0: []fileMetadata{ 47 { 48 FileNum: 100, 49 Size: 1, 50 Smallest: base.ParseInternalKey("i.SET.101"), 51 Largest: base.ParseInternalKey("j.SET.102"), 52 }, 53 }, 54 }, 55 }, 56 want: "", 57 }, 58 59 { 60 desc: "1 L0 file", 61 version: version{ 62 Files: [numLevels][]fileMetadata{ 63 0: []fileMetadata{ 64 { 65 FileNum: 100, 66 Size: 1, 67 Smallest: base.ParseInternalKey("i.SET.101"), 68 Largest: base.ParseInternalKey("j.SET.102"), 69 }, 70 }, 71 }, 72 }, 73 picker: compactionPicker{ 74 score: 99, 75 level: 0, 76 baseLevel: 1, 77 }, 78 want: "100 ", 79 }, 80 81 { 82 desc: "2 L0 files (0 overlaps)", 83 version: version{ 84 Files: [numLevels][]fileMetadata{ 85 0: []fileMetadata{ 86 { 87 FileNum: 100, 88 Size: 1, 89 Smallest: base.ParseInternalKey("i.SET.101"), 90 Largest: base.ParseInternalKey("j.SET.102"), 91 }, 92 { 93 FileNum: 110, 94 Size: 1, 95 Smallest: base.ParseInternalKey("k.SET.111"), 96 Largest: base.ParseInternalKey("l.SET.112"), 97 }, 98 }, 99 }, 100 }, 101 picker: compactionPicker{ 102 score: 99, 103 level: 0, 104 baseLevel: 1, 105 }, 106 want: "100 ", 107 }, 108 109 { 110 desc: "2 L0 files, with ikey overlap", 111 version: version{ 112 Files: [numLevels][]fileMetadata{ 113 0: []fileMetadata{ 114 { 115 FileNum: 100, 116 Size: 1, 117 Smallest: base.ParseInternalKey("i.SET.101"), 118 Largest: base.ParseInternalKey("p.SET.102"), 119 }, 120 { 121 FileNum: 110, 122 Size: 1, 123 Smallest: base.ParseInternalKey("j.SET.111"), 124 Largest: base.ParseInternalKey("q.SET.112"), 125 }, 126 }, 127 }, 128 }, 129 picker: compactionPicker{ 130 score: 99, 131 level: 0, 132 baseLevel: 1, 133 }, 134 want: "100,110 ", 135 }, 136 137 { 138 desc: "2 L0 files, with ukey overlap", 139 version: version{ 140 Files: [numLevels][]fileMetadata{ 141 0: []fileMetadata{ 142 { 143 FileNum: 100, 144 Size: 1, 145 Smallest: base.ParseInternalKey("i.SET.101"), 146 Largest: base.ParseInternalKey("i.SET.102"), 147 }, 148 { 149 FileNum: 110, 150 Size: 1, 151 Smallest: base.ParseInternalKey("i.SET.111"), 152 Largest: base.ParseInternalKey("i.SET.112"), 153 }, 154 }, 155 }, 156 }, 157 picker: compactionPicker{ 158 score: 99, 159 level: 0, 160 baseLevel: 1, 161 }, 162 want: "100,110 ", 163 }, 164 165 { 166 desc: "1 L0 file, 2 L1 files (0 overlaps)", 167 version: version{ 168 Files: [numLevels][]fileMetadata{ 169 0: []fileMetadata{ 170 { 171 FileNum: 100, 172 Size: 1, 173 Smallest: base.ParseInternalKey("i.SET.101"), 174 Largest: base.ParseInternalKey("i.SET.102"), 175 }, 176 }, 177 1: []fileMetadata{ 178 { 179 FileNum: 200, 180 Size: 1, 181 Smallest: base.ParseInternalKey("a.SET.201"), 182 Largest: base.ParseInternalKey("b.SET.202"), 183 }, 184 { 185 FileNum: 210, 186 Size: 1, 187 Smallest: base.ParseInternalKey("y.SET.211"), 188 Largest: base.ParseInternalKey("z.SET.212"), 189 }, 190 }, 191 }, 192 }, 193 picker: compactionPicker{ 194 score: 99, 195 level: 0, 196 baseLevel: 1, 197 }, 198 want: "100 ", 199 }, 200 201 { 202 desc: "1 L0 file, 2 L1 files (1 overlap), 4 L2 files (3 overlaps)", 203 version: version{ 204 Files: [numLevels][]fileMetadata{ 205 0: []fileMetadata{ 206 { 207 FileNum: 100, 208 Size: 1, 209 Smallest: base.ParseInternalKey("i.SET.101"), 210 Largest: base.ParseInternalKey("t.SET.102"), 211 }, 212 }, 213 1: []fileMetadata{ 214 { 215 FileNum: 200, 216 Size: 1, 217 Smallest: base.ParseInternalKey("a.SET.201"), 218 Largest: base.ParseInternalKey("e.SET.202"), 219 }, 220 { 221 FileNum: 210, 222 Size: 1, 223 Smallest: base.ParseInternalKey("f.SET.211"), 224 Largest: base.ParseInternalKey("j.SET.212"), 225 }, 226 }, 227 2: []fileMetadata{ 228 { 229 FileNum: 300, 230 Size: 1, 231 Smallest: base.ParseInternalKey("a.SET.301"), 232 Largest: base.ParseInternalKey("b.SET.302"), 233 }, 234 { 235 FileNum: 310, 236 Size: 1, 237 Smallest: base.ParseInternalKey("c.SET.311"), 238 Largest: base.ParseInternalKey("g.SET.312"), 239 }, 240 { 241 FileNum: 320, 242 Size: 1, 243 Smallest: base.ParseInternalKey("h.SET.321"), 244 Largest: base.ParseInternalKey("m.SET.322"), 245 }, 246 { 247 FileNum: 330, 248 Size: 1, 249 Smallest: base.ParseInternalKey("n.SET.331"), 250 Largest: base.ParseInternalKey("z.SET.332"), 251 }, 252 }, 253 }, 254 }, 255 picker: compactionPicker{ 256 score: 99, 257 level: 0, 258 baseLevel: 1, 259 }, 260 want: "100 210 310,320,330", 261 }, 262 263 { 264 desc: "4 L1 files, 2 L2 files, can grow", 265 version: version{ 266 Files: [numLevels][]fileMetadata{ 267 1: []fileMetadata{ 268 { 269 FileNum: 200, 270 Size: 1, 271 Smallest: base.ParseInternalKey("i1.SET.201"), 272 Largest: base.ParseInternalKey("i2.SET.202"), 273 }, 274 { 275 FileNum: 210, 276 Size: 1, 277 Smallest: base.ParseInternalKey("j1.SET.211"), 278 Largest: base.ParseInternalKey("j2.SET.212"), 279 }, 280 { 281 FileNum: 220, 282 Size: 1, 283 Smallest: base.ParseInternalKey("k1.SET.221"), 284 Largest: base.ParseInternalKey("k2.SET.222"), 285 }, 286 { 287 FileNum: 230, 288 Size: 1, 289 Smallest: base.ParseInternalKey("l1.SET.231"), 290 Largest: base.ParseInternalKey("l2.SET.232"), 291 }, 292 }, 293 2: []fileMetadata{ 294 { 295 FileNum: 300, 296 Size: 1, 297 Smallest: base.ParseInternalKey("a0.SET.301"), 298 Largest: base.ParseInternalKey("l0.SET.302"), 299 }, 300 { 301 FileNum: 310, 302 Size: 1, 303 Smallest: base.ParseInternalKey("l2.SET.311"), 304 Largest: base.ParseInternalKey("z2.SET.312"), 305 }, 306 }, 307 }, 308 }, 309 picker: compactionPicker{ 310 score: 99, 311 level: 1, 312 baseLevel: 1, 313 }, 314 want: "200,210,220 300 ", 315 }, 316 317 { 318 desc: "4 L1 files, 2 L2 files, can't grow (range)", 319 version: version{ 320 Files: [numLevels][]fileMetadata{ 321 1: []fileMetadata{ 322 { 323 FileNum: 200, 324 Size: 1, 325 Smallest: base.ParseInternalKey("i1.SET.201"), 326 Largest: base.ParseInternalKey("i2.SET.202"), 327 }, 328 { 329 FileNum: 210, 330 Size: 1, 331 Smallest: base.ParseInternalKey("j1.SET.211"), 332 Largest: base.ParseInternalKey("j2.SET.212"), 333 }, 334 { 335 FileNum: 220, 336 Size: 1, 337 Smallest: base.ParseInternalKey("k1.SET.221"), 338 Largest: base.ParseInternalKey("k2.SET.222"), 339 }, 340 { 341 FileNum: 230, 342 Size: 1, 343 Smallest: base.ParseInternalKey("l1.SET.231"), 344 Largest: base.ParseInternalKey("l2.SET.232"), 345 }, 346 }, 347 2: []fileMetadata{ 348 { 349 FileNum: 300, 350 Size: 1, 351 Smallest: base.ParseInternalKey("a0.SET.301"), 352 Largest: base.ParseInternalKey("j0.SET.302"), 353 }, 354 { 355 FileNum: 310, 356 Size: 1, 357 Smallest: base.ParseInternalKey("j2.SET.311"), 358 Largest: base.ParseInternalKey("z2.SET.312"), 359 }, 360 }, 361 }, 362 }, 363 picker: compactionPicker{ 364 score: 99, 365 level: 1, 366 baseLevel: 1, 367 }, 368 want: "200 300 ", 369 }, 370 371 { 372 desc: "4 L1 files, 2 L2 files, can't grow (size)", 373 version: version{ 374 Files: [numLevels][]fileMetadata{ 375 1: []fileMetadata{ 376 { 377 FileNum: 200, 378 Size: expandedCompactionByteSizeLimit(opts, 1) - 1, 379 Smallest: base.ParseInternalKey("i1.SET.201"), 380 Largest: base.ParseInternalKey("i2.SET.202"), 381 }, 382 { 383 FileNum: 210, 384 Size: expandedCompactionByteSizeLimit(opts, 1) - 1, 385 Smallest: base.ParseInternalKey("j1.SET.211"), 386 Largest: base.ParseInternalKey("j2.SET.212"), 387 }, 388 { 389 FileNum: 220, 390 Size: expandedCompactionByteSizeLimit(opts, 1) - 1, 391 Smallest: base.ParseInternalKey("k1.SET.221"), 392 Largest: base.ParseInternalKey("k2.SET.222"), 393 }, 394 { 395 FileNum: 230, 396 Size: expandedCompactionByteSizeLimit(opts, 1) - 1, 397 Smallest: base.ParseInternalKey("l1.SET.231"), 398 Largest: base.ParseInternalKey("l2.SET.232"), 399 }, 400 }, 401 2: []fileMetadata{ 402 { 403 FileNum: 300, 404 Size: expandedCompactionByteSizeLimit(opts, 2) - 1, 405 Smallest: base.ParseInternalKey("a0.SET.301"), 406 Largest: base.ParseInternalKey("l0.SET.302"), 407 }, 408 { 409 FileNum: 310, 410 Size: expandedCompactionByteSizeLimit(opts, 2) - 1, 411 Smallest: base.ParseInternalKey("l2.SET.311"), 412 Largest: base.ParseInternalKey("z2.SET.312"), 413 }, 414 }, 415 }, 416 }, 417 picker: compactionPicker{ 418 score: 99, 419 level: 1, 420 baseLevel: 1, 421 }, 422 want: "200 300 ", 423 }, 424 } 425 426 for _, tc := range testCases { 427 vs := &versionSet{ 428 opts: opts, 429 cmp: DefaultComparer.Compare, 430 cmpName: DefaultComparer.Name, 431 } 432 vs.versions.Init(nil) 433 vs.append(&tc.version) 434 vs.picker = &tc.picker 435 vs.picker.vers = &tc.version 436 437 c, got := vs.picker.pickAuto(opts, new(uint64)), "" 438 if c != nil { 439 got0 := fileNums(c.inputs[0]) 440 got1 := fileNums(c.inputs[1]) 441 got2 := fileNums(c.grandparents) 442 got = got0 + " " + got1 + " " + got2 443 } 444 if got != tc.want { 445 t.Fatalf("%s:\ngot %q\nwant %q", tc.desc, got, tc.want) 446 } 447 } 448 } 449 450 func TestElideTombstone(t *testing.T) { 451 testCases := []struct { 452 desc string 453 level int 454 version version 455 wants map[string]bool 456 }{ 457 { 458 desc: "empty", 459 level: 1, 460 version: version{}, 461 wants: map[string]bool{ 462 "x": true, 463 }, 464 }, 465 { 466 desc: "non-empty", 467 level: 1, 468 version: version{ 469 Files: [numLevels][]fileMetadata{ 470 1: []fileMetadata{ 471 { 472 Smallest: base.ParseInternalKey("c.SET.801"), 473 Largest: base.ParseInternalKey("g.SET.800"), 474 }, 475 { 476 Smallest: base.ParseInternalKey("x.SET.701"), 477 Largest: base.ParseInternalKey("y.SET.700"), 478 }, 479 }, 480 2: []fileMetadata{ 481 { 482 Smallest: base.ParseInternalKey("d.SET.601"), 483 Largest: base.ParseInternalKey("h.SET.600"), 484 }, 485 { 486 Smallest: base.ParseInternalKey("r.SET.501"), 487 Largest: base.ParseInternalKey("t.SET.500"), 488 }, 489 }, 490 3: []fileMetadata{ 491 { 492 Smallest: base.ParseInternalKey("f.SET.401"), 493 Largest: base.ParseInternalKey("g.SET.400"), 494 }, 495 { 496 Smallest: base.ParseInternalKey("w.SET.301"), 497 Largest: base.ParseInternalKey("x.SET.300"), 498 }, 499 }, 500 4: []fileMetadata{ 501 { 502 Smallest: base.ParseInternalKey("f.SET.201"), 503 Largest: base.ParseInternalKey("m.SET.200"), 504 }, 505 { 506 Smallest: base.ParseInternalKey("t.SET.101"), 507 Largest: base.ParseInternalKey("t.SET.100"), 508 }, 509 }, 510 }, 511 }, 512 wants: map[string]bool{ 513 "b": true, 514 "c": true, 515 "d": true, 516 "e": true, 517 "f": false, 518 "g": false, 519 "h": false, 520 "l": false, 521 "m": false, 522 "n": true, 523 "q": true, 524 "r": true, 525 "s": true, 526 "t": false, 527 "u": true, 528 "v": true, 529 "w": false, 530 "x": false, 531 "y": true, 532 "z": true, 533 }, 534 }, 535 { 536 desc: "repeated ukey", 537 level: 1, 538 version: version{ 539 Files: [numLevels][]fileMetadata{ 540 6: []fileMetadata{ 541 { 542 Smallest: base.ParseInternalKey("i.SET.401"), 543 Largest: base.ParseInternalKey("i.SET.400"), 544 }, 545 { 546 Smallest: base.ParseInternalKey("i.SET.301"), 547 Largest: base.ParseInternalKey("k.SET.300"), 548 }, 549 { 550 Smallest: base.ParseInternalKey("k.SET.201"), 551 Largest: base.ParseInternalKey("m.SET.200"), 552 }, 553 { 554 Smallest: base.ParseInternalKey("m.SET.101"), 555 Largest: base.ParseInternalKey("m.SET.100"), 556 }, 557 }, 558 }, 559 }, 560 wants: map[string]bool{ 561 "h": true, 562 "i": false, 563 "j": false, 564 "k": false, 565 "l": false, 566 "m": false, 567 "n": true, 568 }, 569 }, 570 } 571 572 for _, tc := range testCases { 573 c := compaction{ 574 cmp: DefaultComparer.Compare, 575 version: &tc.version, 576 startLevel: tc.level, 577 outputLevel: tc.level + 1, 578 } 579 for ukey, want := range tc.wants { 580 if got := c.elideTombstone([]byte(ukey)); got != want { 581 t.Errorf("%s: ukey=%q: got %v, want %v", tc.desc, ukey, got, want) 582 } 583 } 584 } 585 } 586 587 func TestCompaction(t *testing.T) { 588 const memTableSize = 10000 589 // Tuned so that 2 values can reside in the memtable before a flush, but a 590 // 3rd value will cause a flush. Needs to account for the max skiplist node 591 // size. 592 const valueSize = 3500 593 594 mem := vfs.NewMem() 595 d, err := Open("", &Options{ 596 FS: mem, 597 MemTableSize: memTableSize, 598 }) 599 if err != nil { 600 t.Fatalf("Open: %v", err) 601 } 602 mockLimiter := mockCountLimiter{burst: int(math.MaxInt32)} 603 d.compactionLimiter = &mockLimiter 604 605 get1 := func(iter internalIterator) (ret string) { 606 b := &bytes.Buffer{} 607 for key, _ := iter.First(); key != nil; key, _ = iter.Next() { 608 b.Write(key.UserKey) 609 } 610 if err := iter.Close(); err != nil { 611 t.Fatalf("iterator Close: %v", err) 612 } 613 return b.String() 614 } 615 getAll := func() (gotMem, gotDisk string, err error) { 616 d.mu.Lock() 617 defer d.mu.Unlock() 618 619 if d.mu.mem.mutable != nil { 620 gotMem = get1(d.mu.mem.mutable.newIter(nil)) 621 } 622 ss := []string(nil) 623 v := d.mu.versions.currentVersion() 624 for _, files := range v.Files { 625 for _, meta := range files { 626 f, err := mem.Open(base.MakeFilename("", fileTypeTable, meta.FileNum)) 627 if err != nil { 628 return "", "", fmt.Errorf("Open: %v", err) 629 } 630 defer f.Close() 631 r, err := sstable.NewReader(f, 0, meta.FileNum, nil) 632 if err != nil { 633 return "", "", fmt.Errorf("NewReader: %v", err) 634 } 635 defer r.Close() 636 ss = append(ss, get1(r.NewIter(nil /* lower */, nil /* upper */))+".") 637 } 638 } 639 sort.Strings(ss) 640 return gotMem, strings.Join(ss, ""), nil 641 } 642 643 value := bytes.Repeat([]byte("x"), valueSize) 644 testCases := []struct { 645 key, wantMem, wantDisk string 646 }{ 647 {"+A", "A", ""}, 648 {"+a", "Aa", ""}, 649 {"+B", "B", "Aa."}, 650 {"+b", "Bb", "Aa."}, 651 // The next level-0 table overwrites the B key. 652 {"+C", "C", "Aa.Bb."}, 653 {"+B", "BC", "Aa.Bb."}, 654 // The next level-0 table deletes the a key. 655 {"+D", "D", "Aa.BC.Bb."}, 656 {"-a", "Da", "Aa.BC.Bb."}, 657 {"+d", "Dad", "Aa.BC.Bb."}, 658 // The next addition creates the fourth level-0 table, and l0CompactionTrigger == 4, 659 // so this triggers a non-trivial compaction into one level-1 table. Note that the 660 // keys in this one larger table are interleaved from the four smaller ones. 661 {"+E", "E", "ABCDbd."}, 662 {"+e", "Ee", "ABCDbd."}, 663 {"+F", "F", "ABCDbd.Ee."}, 664 } 665 for _, tc := range testCases { 666 if key := tc.key[1:]; tc.key[0] == '+' { 667 if err := d.Set([]byte(key), value, nil); err != nil { 668 t.Errorf("%q: Set: %v", key, err) 669 break 670 } 671 } else { 672 if err := d.Delete([]byte(key), nil); err != nil { 673 t.Errorf("%q: Delete: %v", key, err) 674 break 675 } 676 } 677 678 // try backs off to allow any writes to the memfs to complete. 679 err := try(100*time.Microsecond, 20*time.Second, func() error { 680 gotMem, gotDisk, err := getAll() 681 if err != nil { 682 return err 683 } 684 if testing.Verbose() { 685 fmt.Printf("mem=%s (%s) disk=%s (%s)\n", gotMem, tc.wantMem, gotDisk, tc.wantDisk) 686 } 687 688 if gotMem != tc.wantMem { 689 return fmt.Errorf("mem: got %q, want %q", gotMem, tc.wantMem) 690 } 691 if gotDisk != tc.wantDisk { 692 return fmt.Errorf("ldb: got %q, want %q", gotDisk, tc.wantDisk) 693 } 694 return nil 695 }) 696 if err != nil { 697 t.Errorf("%q: %v", tc.key, err) 698 } 699 } 700 701 if err := d.Close(); err != nil { 702 t.Fatalf("db Close: %v", err) 703 } 704 705 if !(mockLimiter.allowCount > 0) { 706 t.Errorf("limiter allow: got %d, want >%d", mockLimiter.allowCount, 0) 707 } 708 if mockLimiter.waitCount != 0 { 709 t.Errorf("limiter wait: got %d, want %d", mockLimiter.waitCount, 0) 710 } 711 } 712 713 func TestManualCompaction(t *testing.T) { 714 mem := vfs.NewMem() 715 err := mem.MkdirAll("ext", 0755) 716 if err != nil { 717 t.Fatal(err) 718 } 719 720 d, err := Open("", &Options{ 721 FS: mem, 722 }) 723 if err != nil { 724 t.Fatal(err) 725 } 726 727 datadriven.RunTest(t, "testdata/manual_compaction", func(td *datadriven.TestData) string { 728 switch td.Cmd { 729 case "batch": 730 b := d.NewIndexedBatch() 731 if err := runBatchDefineCmd(td, b); err != nil { 732 return err.Error() 733 } 734 b.Commit(nil) 735 return "" 736 737 case "define": 738 var err error 739 if d, err = runDBDefineCmd(td, nil /* options */); err != nil { 740 return err.Error() 741 } 742 743 d.mu.Lock() 744 s := d.mu.versions.currentVersion().String() 745 d.mu.Unlock() 746 return s 747 748 case "iter": 749 iter := d.NewIter(nil) 750 defer iter.Close() 751 var b bytes.Buffer 752 for _, line := range strings.Split(td.Input, "\n") { 753 parts := strings.Fields(line) 754 if len(parts) == 0 { 755 continue 756 } 757 switch parts[0] { 758 case "seek-ge": 759 if len(parts) != 2 { 760 return fmt.Sprintf("seek-ge <key>\n") 761 } 762 iter.SeekGE([]byte(strings.TrimSpace(parts[1]))) 763 case "seek-lt": 764 if len(parts) != 2 { 765 return fmt.Sprintf("seek-lt <key>\n") 766 } 767 iter.SeekLT([]byte(strings.TrimSpace(parts[1]))) 768 case "next": 769 iter.Next() 770 case "prev": 771 iter.Prev() 772 default: 773 return fmt.Sprintf("unknown op: %s", parts[0]) 774 } 775 if iter.Valid() { 776 fmt.Fprintf(&b, "%s:%s\n", iter.Key(), iter.Value()) 777 } else if err := iter.Error(); err != nil { 778 fmt.Fprintf(&b, "err=%v\n", err) 779 } else { 780 fmt.Fprintf(&b, ".\n") 781 } 782 } 783 return b.String() 784 785 case "compact": 786 if err := runCompactCommand(td, d); err != nil { 787 return err.Error() 788 } 789 790 d.mu.Lock() 791 s := d.mu.versions.currentVersion().String() 792 d.mu.Unlock() 793 return s 794 795 default: 796 return fmt.Sprintf("unknown command: %s", td.Cmd) 797 } 798 }) 799 } 800 801 func TestCompactionShouldStopBefore(t *testing.T) { 802 cmp := DefaultComparer.Compare 803 var grandparents []fileMetadata 804 805 parseMeta := func(s string) fileMetadata { 806 parts := strings.Split(s, "-") 807 if len(parts) != 2 { 808 t.Fatalf("malformed table spec: %s", s) 809 } 810 return fileMetadata{ 811 Smallest: InternalKey{UserKey: []byte(parts[0])}, 812 Largest: InternalKey{UserKey: []byte(parts[1])}, 813 } 814 } 815 816 datadriven.RunTest(t, "testdata/compaction_should_stop_before", 817 func(d *datadriven.TestData) string { 818 switch d.Cmd { 819 case "define": 820 grandparents = nil 821 if len(d.Input) == 0 { 822 return "" 823 } 824 for _, data := range strings.Split(d.Input, "\n") { 825 parts := strings.Fields(data) 826 if len(parts) != 2 { 827 return fmt.Sprintf("malformed test:\n%s", d.Input) 828 } 829 830 meta := parseMeta(parts[0]) 831 var err error 832 meta.Size, err = strconv.ParseUint(parts[1], 10, 64) 833 if err != nil { 834 return err.Error() 835 } 836 grandparents = append(grandparents, meta) 837 } 838 manifest.SortBySmallest(grandparents, cmp) 839 return "" 840 841 case "compact": 842 c := &compaction{ 843 cmp: cmp, 844 grandparents: grandparents, 845 } 846 if len(d.CmdArgs) != 1 { 847 return fmt.Sprintf("%s expects 1 argument", d.Cmd) 848 } 849 if len(d.CmdArgs[0].Vals) != 1 { 850 return fmt.Sprintf("%s expects 1 value", d.CmdArgs[0].Key) 851 } 852 var err error 853 c.maxOverlapBytes, err = strconv.ParseUint(d.CmdArgs[0].Vals[0], 10, 64) 854 if err != nil { 855 return err.Error() 856 } 857 858 var buf bytes.Buffer 859 var smallest, largest string 860 for i, key := range strings.Fields(d.Input) { 861 if i == 0 { 862 smallest = key 863 } 864 if c.shouldStopBefore(base.MakeInternalKey([]byte(key), 0, 0)) { 865 fmt.Fprintf(&buf, "%s-%s\n", smallest, largest) 866 smallest = key 867 } 868 largest = key 869 } 870 fmt.Fprintf(&buf, "%s-%s\n", smallest, largest) 871 return buf.String() 872 873 default: 874 return fmt.Sprintf("unknown command: %s", d.Cmd) 875 } 876 }) 877 } 878 879 func TestCompactionOutputLevel(t *testing.T) { 880 opts := (*Options)(nil).EnsureDefaults() 881 version := &version{} 882 883 datadriven.RunTest(t, "testdata/compaction_output_level", 884 func(d *datadriven.TestData) (res string) { 885 defer func() { 886 if r := recover(); r != nil { 887 res = fmt.Sprintln(r) 888 } 889 }() 890 891 switch d.Cmd { 892 case "compact": 893 var start, base int 894 d.ScanArgs(t, "start", &start) 895 d.ScanArgs(t, "base", &base) 896 c := newCompaction(opts, version, start, base, new(uint64)) 897 return fmt.Sprintf("output=%d\nmax-output-file-size=%d\n", 898 c.outputLevel, c.maxOutputFileSize) 899 900 default: 901 return fmt.Sprintf("unknown command: %s", d.Cmd) 902 } 903 }) 904 } 905 906 func TestCompactionExpandInputs(t *testing.T) { 907 cmp := DefaultComparer.Compare 908 var files []fileMetadata 909 910 parseMeta := func(s string) fileMetadata { 911 parts := strings.Split(s, "-") 912 if len(parts) != 2 { 913 t.Fatalf("malformed table spec: %s", s) 914 } 915 return fileMetadata{ 916 Smallest: base.ParseInternalKey(parts[0]), 917 Largest: base.ParseInternalKey(parts[1]), 918 } 919 } 920 921 datadriven.RunTest(t, "testdata/compaction_expand_inputs", 922 func(d *datadriven.TestData) string { 923 switch d.Cmd { 924 case "define": 925 files = nil 926 if len(d.Input) == 0 { 927 return "" 928 } 929 for _, data := range strings.Split(d.Input, "\n") { 930 meta := parseMeta(data) 931 meta.FileNum = uint64(len(files)) 932 files = append(files, meta) 933 } 934 manifest.SortBySmallest(files, cmp) 935 return "" 936 937 case "expand-inputs": 938 c := &compaction{ 939 cmp: cmp, 940 version: &version{}, 941 startLevel: 1, 942 } 943 c.version.Files[c.startLevel] = files 944 if len(d.CmdArgs) != 1 { 945 return fmt.Sprintf("%s expects 1 argument", d.Cmd) 946 } 947 index, err := strconv.ParseInt(d.CmdArgs[0].String(), 10, 64) 948 if err != nil { 949 return err.Error() 950 } 951 952 inputs := c.expandInputs(files[index : index+1]) 953 954 var buf bytes.Buffer 955 for i := range inputs { 956 f := &inputs[i] 957 fmt.Fprintf(&buf, "%d: %s-%s\n", f.FileNum, f.Smallest, f.Largest) 958 } 959 return buf.String() 960 961 default: 962 return fmt.Sprintf("unknown command: %s", d.Cmd) 963 } 964 }) 965 } 966 967 func TestCompactionAtomicUnitBounds(t *testing.T) { 968 cmp := DefaultComparer.Compare 969 var files []fileMetadata 970 971 parseMeta := func(s string) fileMetadata { 972 parts := strings.Split(s, "-") 973 if len(parts) != 2 { 974 t.Fatalf("malformed table spec: %s", s) 975 } 976 return fileMetadata{ 977 Smallest: base.ParseInternalKey(parts[0]), 978 Largest: base.ParseInternalKey(parts[1]), 979 } 980 } 981 982 datadriven.RunTest(t, "testdata/compaction_atomic_unit_bounds", 983 func(d *datadriven.TestData) string { 984 switch d.Cmd { 985 case "define": 986 files = nil 987 if len(d.Input) == 0 { 988 return "" 989 } 990 for _, data := range strings.Split(d.Input, "\n") { 991 meta := parseMeta(data) 992 meta.FileNum = uint64(len(files)) 993 files = append(files, meta) 994 } 995 manifest.SortBySmallest(files, cmp) 996 return "" 997 998 case "atomic-unit-bounds": 999 c := &compaction{ 1000 cmp: cmp, 1001 } 1002 c.inputs[0] = files 1003 if len(d.CmdArgs) != 1 { 1004 return fmt.Sprintf("%s expects 1 argument", d.Cmd) 1005 } 1006 index, err := strconv.ParseInt(d.CmdArgs[0].String(), 10, 64) 1007 if err != nil { 1008 return err.Error() 1009 } 1010 1011 lower, upper := c.atomicUnitBounds(&files[index]) 1012 return fmt.Sprintf("%s-%s\n", lower, upper) 1013 1014 default: 1015 return fmt.Sprintf("unknown command: %s", d.Cmd) 1016 } 1017 }) 1018 } 1019 1020 func TestCompactionAllowZeroSeqNum(t *testing.T) { 1021 var d *DB 1022 1023 metaRE := regexp.MustCompile(`^L([0-9]+):([^-]+)-(.+)$`) 1024 parseMeta := func(s string) (level int, meta fileMetadata) { 1025 match := metaRE.FindStringSubmatch(s) 1026 if match == nil { 1027 t.Fatalf("malformed table spec: %s", s) 1028 } 1029 level, err := strconv.Atoi(match[1]) 1030 if err != nil { 1031 t.Fatalf("malformed table spec: %s: %s", s, err) 1032 } 1033 meta = fileMetadata{ 1034 Smallest: InternalKey{UserKey: []byte(match[2])}, 1035 Largest: InternalKey{UserKey: []byte(match[3])}, 1036 } 1037 return level, meta 1038 } 1039 1040 datadriven.RunTest(t, "testdata/compaction_allow_zero_seqnum", 1041 func(td *datadriven.TestData) string { 1042 switch td.Cmd { 1043 case "define": 1044 var err error 1045 if d, err = runDBDefineCmd(td, nil /* options */); err != nil { 1046 return err.Error() 1047 } 1048 1049 d.mu.Lock() 1050 s := d.mu.versions.currentVersion().String() 1051 d.mu.Unlock() 1052 return s 1053 1054 case "allow-zero-seqnum": 1055 d.mu.Lock() 1056 c := &compaction{ 1057 cmp: d.cmp, 1058 version: d.mu.versions.currentVersion(), 1059 } 1060 d.mu.Unlock() 1061 1062 var buf bytes.Buffer 1063 for _, line := range strings.Split(td.Input, "\n") { 1064 parts := strings.Fields(line) 1065 if len(parts) == 0 { 1066 continue 1067 } 1068 c.inputs[0] = nil 1069 c.inputs[1] = nil 1070 c.startLevel = -1 1071 1072 for _, p := range parts { 1073 level, meta := parseMeta(p) 1074 i := 0 1075 switch { 1076 case c.startLevel == -1: 1077 c.startLevel = level 1078 case c.startLevel+1 == level: 1079 i = 1 1080 case c.startLevel != level: 1081 return fmt.Sprintf("invalid level %d: expected %d or %d", 1082 level, c.startLevel, c.startLevel+1) 1083 } 1084 c.inputs[i] = append(c.inputs[i], meta) 1085 } 1086 c.outputLevel = c.startLevel + 1 1087 fmt.Fprintf(&buf, "%t\n", c.allowZeroSeqNum(nil)) 1088 } 1089 return buf.String() 1090 1091 default: 1092 return fmt.Sprintf("unknown command: %s", td.Cmd) 1093 } 1094 }) 1095 }