github.com/cockroachdb/pebble@v0.0.0-20231214172447-ab4952c5f87b/batch_test.go (about) 1 // Copyright 2012 The LevelDB-Go and Pebble Authors. All rights reserved. Use 2 // of this source code is governed by a BSD-style license that can be found in 3 // the LICENSE file. 4 5 package pebble 6 7 import ( 8 "bytes" 9 "context" 10 "encoding/binary" 11 "encoding/hex" 12 "fmt" 13 "io" 14 "math" 15 "math/rand" 16 "strconv" 17 "strings" 18 "sync" 19 "testing" 20 "time" 21 "unicode" 22 23 "github.com/cockroachdb/datadriven" 24 "github.com/cockroachdb/errors" 25 "github.com/cockroachdb/pebble/internal/base" 26 "github.com/cockroachdb/pebble/internal/batchskl" 27 "github.com/cockroachdb/pebble/internal/itertest" 28 "github.com/cockroachdb/pebble/internal/keyspan" 29 "github.com/cockroachdb/pebble/internal/testkeys" 30 "github.com/cockroachdb/pebble/vfs" 31 "github.com/stretchr/testify/require" 32 ) 33 34 func TestBatch(t *testing.T) { 35 testBatch(t, 0) 36 testBatch(t, batchInitialSize) 37 } 38 39 func testBatch(t *testing.T, size int) { 40 type testCase struct { 41 kind InternalKeyKind 42 key, value string 43 valueInt uint32 44 } 45 46 verifyTestCases := func(b *Batch, testCases []testCase, indexedPointKindsOnly bool) { 47 r := b.Reader() 48 49 for _, tc := range testCases { 50 if indexedPointKindsOnly && (tc.kind == InternalKeyKindLogData || tc.kind == InternalKeyKindIngestSST || 51 tc.kind == InternalKeyKindRangeDelete) { 52 continue 53 } 54 kind, k, v, ok, err := r.Next() 55 if !ok { 56 if err != nil { 57 t.Fatal(err) 58 } 59 t.Fatalf("next returned !ok: test case = %v", tc) 60 } 61 key, value := string(k), string(v) 62 if kind != tc.kind || key != tc.key || value != tc.value { 63 t.Errorf("got (%d, %q, %q), want (%d, %q, %q)", 64 kind, key, value, tc.kind, tc.key, tc.value) 65 } 66 } 67 if len(r) != 0 { 68 t.Errorf("reader was not exhausted: remaining bytes = %q", r) 69 } 70 } 71 72 encodeFileNum := func(n base.FileNum) string { 73 return string(binary.AppendUvarint(nil, uint64(n))) 74 } 75 decodeFileNum := func(d []byte) base.FileNum { 76 val, n := binary.Uvarint(d) 77 if n <= 0 { 78 t.Fatalf("invalid filenum encoding") 79 } 80 return base.FileNum(val) 81 } 82 83 // RangeKeySet and RangeKeyUnset are untested here because they don't expose 84 // deferred variants. This is a consequence of these keys' more complex 85 // value encodings. 86 testCases := []testCase{ 87 {InternalKeyKindIngestSST, encodeFileNum(1), "", 0}, 88 {InternalKeyKindSet, "roses", "red", 0}, 89 {InternalKeyKindSet, "violets", "blue", 0}, 90 {InternalKeyKindDelete, "roses", "", 0}, 91 {InternalKeyKindSingleDelete, "roses", "", 0}, 92 {InternalKeyKindSet, "", "", 0}, 93 {InternalKeyKindSet, "", "non-empty", 0}, 94 {InternalKeyKindDelete, "", "", 0}, 95 {InternalKeyKindSingleDelete, "", "", 0}, 96 {InternalKeyKindSet, "grass", "green", 0}, 97 {InternalKeyKindSet, "grass", "greener", 0}, 98 {InternalKeyKindSet, "eleventy", strings.Repeat("!!11!", 100), 0}, 99 {InternalKeyKindDelete, "nosuchkey", "", 0}, 100 {InternalKeyKindDeleteSized, "eleventy", string(binary.AppendUvarint([]byte(nil), 508)), 500}, 101 {InternalKeyKindSingleDelete, "nosuchkey", "", 0}, 102 {InternalKeyKindSet, "binarydata", "\x00", 0}, 103 {InternalKeyKindSet, "binarydata", "\xff", 0}, 104 {InternalKeyKindMerge, "merge", "mergedata", 0}, 105 {InternalKeyKindMerge, "merge", "", 0}, 106 {InternalKeyKindMerge, "", "", 0}, 107 {InternalKeyKindRangeDelete, "a", "b", 0}, 108 {InternalKeyKindRangeDelete, "", "", 0}, 109 {InternalKeyKindLogData, "logdata", "", 0}, 110 {InternalKeyKindLogData, "", "", 0}, 111 {InternalKeyKindRangeKeyDelete, "grass", "green", 0}, 112 {InternalKeyKindRangeKeyDelete, "", "", 0}, 113 {InternalKeyKindDeleteSized, "nosuchkey", string(binary.AppendUvarint([]byte(nil), 11)), 2}, 114 } 115 b := newBatchWithSize(nil, size) 116 for _, tc := range testCases { 117 switch tc.kind { 118 case InternalKeyKindSet: 119 _ = b.Set([]byte(tc.key), []byte(tc.value), nil) 120 case InternalKeyKindMerge: 121 _ = b.Merge([]byte(tc.key), []byte(tc.value), nil) 122 case InternalKeyKindDelete: 123 _ = b.Delete([]byte(tc.key), nil) 124 case InternalKeyKindDeleteSized: 125 _ = b.DeleteSized([]byte(tc.key), tc.valueInt, nil) 126 case InternalKeyKindSingleDelete: 127 _ = b.SingleDelete([]byte(tc.key), nil) 128 case InternalKeyKindRangeDelete: 129 _ = b.DeleteRange([]byte(tc.key), []byte(tc.value), nil) 130 case InternalKeyKindLogData: 131 _ = b.LogData([]byte(tc.key), nil) 132 case InternalKeyKindRangeKeyDelete: 133 _ = b.RangeKeyDelete([]byte(tc.key), []byte(tc.value), nil) 134 case InternalKeyKindIngestSST: 135 b.ingestSST(decodeFileNum([]byte(tc.key))) 136 } 137 } 138 verifyTestCases(b, testCases, false /* indexedKindsOnly */) 139 140 b.Reset() 141 // Run the same operations, this time using the Deferred variants of each 142 // operation (eg. SetDeferred). 143 for _, tc := range testCases { 144 key := []byte(tc.key) 145 value := []byte(tc.value) 146 switch tc.kind { 147 case InternalKeyKindSet: 148 d := b.SetDeferred(len(key), len(value)) 149 copy(d.Key, key) 150 copy(d.Value, value) 151 d.Finish() 152 case InternalKeyKindMerge: 153 d := b.MergeDeferred(len(key), len(value)) 154 copy(d.Key, key) 155 copy(d.Value, value) 156 d.Finish() 157 case InternalKeyKindDelete: 158 d := b.DeleteDeferred(len(key)) 159 copy(d.Key, key) 160 copy(d.Value, value) 161 d.Finish() 162 case InternalKeyKindDeleteSized: 163 d := b.DeleteSizedDeferred(len(tc.key), tc.valueInt) 164 copy(d.Key, key) 165 d.Finish() 166 case InternalKeyKindSingleDelete: 167 d := b.SingleDeleteDeferred(len(key)) 168 copy(d.Key, key) 169 copy(d.Value, value) 170 d.Finish() 171 case InternalKeyKindRangeDelete: 172 d := b.DeleteRangeDeferred(len(key), len(value)) 173 copy(d.Key, key) 174 copy(d.Value, value) 175 d.Finish() 176 case InternalKeyKindLogData: 177 _ = b.LogData([]byte(tc.key), nil) 178 case InternalKeyKindIngestSST: 179 b.ingestSST(decodeFileNum([]byte(tc.key))) 180 case InternalKeyKindRangeKeyDelete: 181 d := b.RangeKeyDeleteDeferred(len(key), len(value)) 182 copy(d.Key, key) 183 copy(d.Value, value) 184 d.Finish() 185 } 186 } 187 verifyTestCases(b, testCases, false /* indexedKindsOnly */) 188 189 b.Reset() 190 // Run the same operations, this time using AddInternalKey instead of the 191 // Kind-specific methods. 192 for _, tc := range testCases { 193 if tc.kind == InternalKeyKindLogData || tc.kind == InternalKeyKindIngestSST || 194 tc.kind == InternalKeyKindRangeDelete { 195 continue 196 } 197 key := []byte(tc.key) 198 value := []byte(tc.value) 199 b.AddInternalKey(&InternalKey{UserKey: key, Trailer: base.MakeTrailer(0, tc.kind)}, value, nil) 200 } 201 verifyTestCases(b, testCases, true /* indexedKindsOnly */) 202 } 203 204 func TestBatchPreAlloc(t *testing.T) { 205 var cases = []struct { 206 size int 207 exp int 208 }{ 209 {0, batchInitialSize}, 210 {batchInitialSize, batchInitialSize}, 211 {2 * batchInitialSize, 2 * batchInitialSize}, 212 } 213 for _, c := range cases { 214 b := newBatchWithSize(nil, c.size) 215 b.Set([]byte{0x1}, []byte{0x2}, nil) 216 if cap(b.data) != c.exp { 217 t.Errorf("Unexpected memory space, required: %d, got: %d", c.exp, cap(b.data)) 218 } 219 } 220 } 221 222 func TestBatchIngestSST(t *testing.T) { 223 // Verify that Batch.IngestSST has the correct batch count and memtable 224 // size. 225 var b Batch 226 b.ingestSST(1) 227 require.Equal(t, int(b.Count()), 1) 228 b.ingestSST(2) 229 require.Equal(t, int(b.Count()), 2) 230 require.Equal(t, int(b.memTableSize), 0) 231 require.Equal(t, b.ingestedSSTBatch, true) 232 } 233 234 func TestBatchLen(t *testing.T) { 235 var b Batch 236 237 requireLenAndReprEq := func(size int) { 238 require.Equal(t, size, b.Len()) 239 require.Equal(t, size, len(b.Repr())) 240 } 241 242 requireLenAndReprEq(batchHeaderLen) 243 244 key := "test-key" 245 value := "test-value" 246 247 err := b.Set([]byte(key), []byte(value), nil) 248 require.NoError(t, err) 249 250 requireLenAndReprEq(33) 251 252 err = b.Delete([]byte(key), nil) 253 require.NoError(t, err) 254 255 requireLenAndReprEq(43) 256 } 257 258 func TestBatchEmpty(t *testing.T) { 259 testBatchEmpty(t, 0) 260 testBatchEmpty(t, batchInitialSize) 261 } 262 263 func testBatchEmpty(t *testing.T, size int) { 264 b := newBatchWithSize(nil, size) 265 require.True(t, b.Empty()) 266 267 ops := []func(*Batch) error{ 268 func(b *Batch) error { return b.Set(nil, nil, nil) }, 269 func(b *Batch) error { return b.Merge(nil, nil, nil) }, 270 func(b *Batch) error { return b.Delete(nil, nil) }, 271 func(b *Batch) error { return b.DeleteRange(nil, nil, nil) }, 272 func(b *Batch) error { return b.LogData(nil, nil) }, 273 func(b *Batch) error { return b.RangeKeySet(nil, nil, nil, nil, nil) }, 274 func(b *Batch) error { return b.RangeKeyUnset(nil, nil, nil, nil) }, 275 func(b *Batch) error { return b.RangeKeyDelete(nil, nil, nil) }, 276 } 277 278 for _, op := range ops { 279 require.NoError(t, op(b)) 280 require.False(t, b.Empty()) 281 b.Reset() 282 require.True(t, b.Empty()) 283 // Reset may choose to reuse b.data, so clear it to the zero value in 284 // order to test the lazy initialization of b.data. 285 b = newBatchWithSize(nil, size) 286 } 287 288 _ = b.Reader() 289 require.True(t, b.Empty()) 290 b.Reset() 291 require.True(t, b.Empty()) 292 b = newBatchWithSize(nil, size) 293 294 require.Equal(t, uint64(0), b.SeqNum()) 295 require.True(t, b.Empty()) 296 b.Reset() 297 require.True(t, b.Empty()) 298 b = &Batch{} 299 300 d, err := Open("", &Options{ 301 FS: vfs.NewMem(), 302 }) 303 require.NoError(t, err) 304 defer d.Close() 305 ib := newIndexedBatch(d, DefaultComparer) 306 iter, _ := ib.NewIter(nil) 307 require.False(t, iter.First()) 308 iter2, err := iter.Clone(CloneOptions{}) 309 require.NoError(t, err) 310 require.NoError(t, iter.Close()) 311 _, err = iter.Clone(CloneOptions{}) 312 require.True(t, err != nil) 313 require.False(t, iter2.First()) 314 require.NoError(t, iter2.Close()) 315 iter3, err := ib.NewBatchOnlyIter(context.Background(), nil) 316 require.NoError(t, err) 317 require.False(t, iter3.First()) 318 _, err = iter3.Clone(CloneOptions{}) 319 require.Error(t, err) 320 require.NoError(t, iter3.Close()) 321 } 322 323 func TestBatchApplyNoSyncWait(t *testing.T) { 324 db, err := Open("", &Options{ 325 FS: vfs.NewMem(), 326 }) 327 require.NoError(t, err) 328 defer db.Close() 329 var batches []*Batch 330 options := &WriteOptions{Sync: true} 331 for i := 0; i < 10000; i++ { 332 b := db.NewBatch() 333 str := fmt.Sprintf("a%d", i) 334 require.NoError(t, b.Set([]byte(str), []byte(str), nil)) 335 require.NoError(t, db.ApplyNoSyncWait(b, options)) 336 // k-v pair is visible even if not yet synced. 337 val, closer, err := db.Get([]byte(str)) 338 require.NoError(t, err) 339 require.Equal(t, str, string(val)) 340 closer.Close() 341 batches = append(batches, b) 342 } 343 for _, b := range batches { 344 require.NoError(t, b.SyncWait()) 345 b.Close() 346 } 347 } 348 349 func TestBatchReset(t *testing.T) { 350 db, err := Open("", &Options{ 351 FS: vfs.NewMem(), 352 }) 353 require.NoError(t, err) 354 defer db.Close() 355 key := "test-key" 356 value := "test-value" 357 b := db.NewBatch() 358 require.NoError(t, b.Set([]byte(key), []byte(value), nil)) 359 dd := b.DeleteRangeDeferred(len(key), len(value)) 360 copy(dd.Key, key) 361 copy(dd.Value, value) 362 dd.Finish() 363 364 require.NoError(t, b.RangeKeySet([]byte(key), []byte(value), []byte(value), []byte(value), nil)) 365 366 b.setSeqNum(100) 367 b.applied.Store(true) 368 b.commitErr = errors.New("test-error") 369 b.commit.Add(1) 370 b.fsyncWait.Add(1) 371 require.Equal(t, uint32(3), b.Count()) 372 require.Equal(t, uint64(1), b.countRangeDels) 373 require.Equal(t, uint64(1), b.countRangeKeys) 374 require.True(t, len(b.data) > 0) 375 require.True(t, b.SeqNum() > 0) 376 require.True(t, b.memTableSize > 0) 377 require.NotEqual(t, b.deferredOp, DeferredBatchOp{}) 378 // At this point b.data has not been modified since the db.NewBatch() and is 379 // either nil or contains a byte slice of length batchHeaderLen, with a 0 380 // seqnum encoded in data[0:8] and an arbitrary count encoded in data[8:12]. 381 // The following commented code will often fail. 382 // count := binary.LittleEndian.Uint32(b.countData()) 383 // if count != 0 && count != 3 { 384 // t.Fatalf("count: %d", count) 385 // } 386 // If we simply called b.Reset now and later used b.data to initialize 387 // expected, the count in expected will also be arbitrary. So we fix the 388 // count in b.data now by calling b.Repr(). This call isn't essential, since 389 // we will call b.Repr() again, and just shows that it fixes the count in 390 // b.data. 391 _ = b.Repr() 392 require.Equal(t, uint32(3), binary.LittleEndian.Uint32(b.countData())) 393 394 b.Reset() 395 require.Equal(t, db, b.db) 396 require.Equal(t, false, b.applied.Load()) 397 require.Nil(t, b.commitErr) 398 require.Equal(t, uint32(0), b.Count()) 399 require.Equal(t, uint64(0), b.countRangeDels) 400 require.Equal(t, uint64(0), b.countRangeKeys) 401 require.Equal(t, batchHeaderLen, len(b.data)) 402 require.Equal(t, uint64(0), b.SeqNum()) 403 require.Equal(t, uint64(0), b.memTableSize) 404 require.Equal(t, FormatMajorVersion(0x00), b.minimumFormatMajorVersion) 405 require.Equal(t, b.deferredOp, DeferredBatchOp{}) 406 _ = b.Repr() 407 408 var expected Batch 409 require.NoError(t, expected.SetRepr(b.data)) 410 expected.db = db 411 require.Equal(t, &expected, b) 412 413 // Reset batch can be used to write and commit a new record. 414 b.Set([]byte(key), []byte(value), nil) 415 require.NoError(t, db.Apply(b, nil)) 416 v, closer, err := db.Get([]byte(key)) 417 require.NoError(t, err) 418 defer closer.Close() 419 require.Equal(t, v, []byte(value)) 420 } 421 422 func TestIndexedBatchReset(t *testing.T) { 423 indexCount := func(sl *batchskl.Skiplist) int { 424 count := 0 425 iter := sl.NewIter(nil, nil) 426 defer iter.Close() 427 for iter.First(); iter.Valid(); iter.Next() { 428 count++ 429 } 430 return count 431 } 432 db, err := Open("", &Options{ 433 FS: vfs.NewMem(), 434 }) 435 require.NoError(t, err) 436 defer db.Close() 437 b := newIndexedBatch(db, DefaultComparer) 438 start := "start-key" 439 end := "end-key" 440 key := "test-key" 441 value := "test-value" 442 b.DeleteRange([]byte(start), []byte(end), nil) 443 b.Set([]byte(key), []byte(value), nil) 444 require.NoError(t, b. 445 RangeKeySet([]byte(start), []byte(end), []byte("suffix"), []byte(value), nil)) 446 require.NotNil(t, b.rangeKeyIndex) 447 require.NotNil(t, b.rangeDelIndex) 448 require.NotNil(t, b.index) 449 require.Equal(t, 1, indexCount(b.index)) 450 451 b.Reset() 452 require.NotNil(t, b.cmp) 453 require.NotNil(t, b.formatKey) 454 require.NotNil(t, b.abbreviatedKey) 455 require.NotNil(t, b.index) 456 require.Nil(t, b.rangeDelIndex) 457 require.Nil(t, b.rangeKeyIndex) 458 459 count := func(ib *Batch) int { 460 iter, _ := ib.NewIter(nil) 461 defer iter.Close() 462 iter2, err := iter.Clone(CloneOptions{}) 463 require.NoError(t, err) 464 defer iter2.Close() 465 iter3, err := ib.NewBatchOnlyIter(context.Background(), nil) 466 require.NoError(t, err) 467 defer iter3.Close() 468 var count [3]int 469 for i, it := range []*Iterator{iter, iter2, iter3} { 470 for it.First(); it.Valid(); it.Next() { 471 count[i]++ 472 } 473 } 474 require.Equal(t, count[0], count[1]) 475 require.Equal(t, count[0], count[2]) 476 return count[0] 477 } 478 contains := func(ib *Batch, key, value string) bool { 479 iter, _ := ib.NewIter(nil) 480 defer iter.Close() 481 iter2, err := iter.Clone(CloneOptions{}) 482 require.NoError(t, err) 483 defer iter2.Close() 484 iter3, err := ib.NewBatchOnlyIter(context.Background(), nil) 485 require.NoError(t, err) 486 defer iter3.Close() 487 var found [3]bool 488 for i, it := range []*Iterator{iter, iter2, iter3} { 489 for it.First(); it.Valid(); it.Next() { 490 if string(it.Key()) == key && 491 string(it.Value()) == value { 492 found[i] = true 493 } 494 } 495 } 496 require.Equal(t, found[0], found[1]) 497 require.Equal(t, found[0], found[2]) 498 return found[0] 499 } 500 // Set a key and check whether the key-value pair is visible. 501 b.Set([]byte(key), []byte(value), nil) 502 require.Equal(t, 1, indexCount(b.index)) 503 require.Equal(t, 1, count(b)) 504 require.True(t, contains(b, key, value)) 505 506 // Use range delete to delete the above inserted key-value pair. 507 b.DeleteRange([]byte(key), []byte(value), nil) 508 require.NotNil(t, b.rangeDelIndex) 509 require.Equal(t, 1, indexCount(b.rangeDelIndex)) 510 require.Equal(t, 0, count(b)) 511 require.False(t, contains(b, key, value)) 512 } 513 514 // TestIndexedBatchMutation tests mutating an indexed batch with an open 515 // iterator. 516 func TestIndexedBatchMutation(t *testing.T) { 517 opts := &Options{ 518 Comparer: testkeys.Comparer, 519 FS: vfs.NewMem(), 520 FormatMajorVersion: internalFormatNewest, 521 } 522 d, err := Open("", opts) 523 require.NoError(t, err) 524 defer func() { d.Close() }() 525 526 b := newIndexedBatch(d, DefaultComparer) 527 iters := map[string]*Iterator{} 528 defer func() { 529 for _, iter := range iters { 530 require.NoError(t, iter.Close()) 531 } 532 }() 533 534 datadriven.RunTest(t, "testdata/indexed_batch_mutation", func(t *testing.T, td *datadriven.TestData) string { 535 switch td.Cmd { 536 case "batch": 537 writeBatch := newBatch(d) 538 if err := runBatchDefineCmd(td, writeBatch); err != nil { 539 return err.Error() 540 } 541 if err := writeBatch.Commit(nil); err != nil { 542 return err.Error() 543 } 544 return "" 545 case "new-batch-iter": 546 name := td.CmdArgs[0].String() 547 iters[name], _ = b.NewIter(&IterOptions{ 548 KeyTypes: IterKeyTypePointsAndRanges, 549 }) 550 return "" 551 case "new-batch-only-iter": 552 name := td.CmdArgs[0].String() 553 iters[name], _ = b.NewBatchOnlyIter(context.Background(), &IterOptions{ 554 KeyTypes: IterKeyTypePointsAndRanges, 555 }) 556 return "" 557 case "new-db-iter": 558 name := td.CmdArgs[0].String() 559 iters[name], _ = d.NewIter(&IterOptions{ 560 KeyTypes: IterKeyTypePointsAndRanges, 561 }) 562 return "" 563 case "new-batch": 564 if b != nil { 565 require.NoError(t, b.Close()) 566 } 567 b = newIndexedBatch(d, opts.Comparer) 568 if err := runBatchDefineCmd(td, b); err != nil { 569 return err.Error() 570 } 571 return "" 572 case "flush": 573 require.NoError(t, d.Flush()) 574 return "" 575 case "iter": 576 var iter string 577 td.ScanArgs(t, "iter", &iter) 578 return runIterCmd(td, iters[iter], false /* closeIter */) 579 case "mutate": 580 mut := newBatch(d) 581 if err := runBatchDefineCmd(td, mut); err != nil { 582 return err.Error() 583 } 584 if err := b.Apply(mut, nil); err != nil { 585 return err.Error() 586 } 587 return "" 588 case "clone": 589 var from, to string 590 var refreshBatchView bool 591 td.ScanArgs(t, "from", &from) 592 td.ScanArgs(t, "to", &to) 593 td.ScanArgs(t, "refresh-batch", &refreshBatchView) 594 var err error 595 iters[to], err = iters[from].Clone(CloneOptions{RefreshBatchView: refreshBatchView}) 596 if err != nil { 597 return err.Error() 598 } 599 return "" 600 case "reset": 601 for key, iter := range iters { 602 if err := iter.Close(); err != nil { 603 return err.Error() 604 } 605 delete(iters, key) 606 } 607 if d != nil { 608 if err := d.Close(); err != nil { 609 return err.Error() 610 } 611 } 612 opts.FS = vfs.NewMem() 613 d, err = Open("", opts) 614 require.NoError(t, err) 615 return "" 616 default: 617 return fmt.Sprintf("unrecognized command %q", td.Cmd) 618 } 619 }) 620 } 621 622 func TestIndexedBatch_GlobalVisibility(t *testing.T) { 623 opts := &Options{ 624 FS: vfs.NewMem(), 625 FormatMajorVersion: internalFormatNewest, 626 Comparer: testkeys.Comparer, 627 } 628 d, err := Open("", opts) 629 require.NoError(t, err) 630 defer d.Close() 631 632 require.NoError(t, d.Set([]byte("foo"), []byte("foo"), nil)) 633 634 // Create an iterator over an empty indexed batch. 635 b := newIndexedBatch(d, DefaultComparer) 636 iterOpts := IterOptions{KeyTypes: IterKeyTypePointsAndRanges} 637 iter, _ := b.NewIter(&iterOpts) 638 defer iter.Close() 639 640 // Mutate the database's committed state. 641 mut := newBatch(d) 642 require.NoError(t, mut.Set([]byte("bar"), []byte("bar"), nil)) 643 require.NoError(t, mut.DeleteRange([]byte("e"), []byte("g"), nil)) 644 require.NoError(t, mut.RangeKeySet([]byte("a"), []byte("c"), []byte("@1"), []byte("v"), nil)) 645 require.NoError(t, mut.Commit(nil)) 646 647 scanIter := func() string { 648 var buf bytes.Buffer 649 for valid := iter.First(); valid; valid = iter.Next() { 650 fmt.Fprintf(&buf, "%s: (", iter.Key()) 651 hasPoint, hasRange := iter.HasPointAndRange() 652 if hasPoint { 653 fmt.Fprintf(&buf, "%s,", iter.Value()) 654 } else { 655 fmt.Fprintf(&buf, ".,") 656 } 657 if hasRange { 658 start, end := iter.RangeBounds() 659 fmt.Fprintf(&buf, "[%s-%s)", start, end) 660 writeRangeKeys(&buf, iter) 661 } else { 662 fmt.Fprintf(&buf, ".") 663 } 664 fmt.Fprintln(&buf, ")") 665 } 666 return strings.TrimSpace(buf.String()) 667 } 668 // Scanning the iterator should only see the point key written before the 669 // iterator was constructed. 670 require.Equal(t, `foo: (foo,.)`, scanIter()) 671 672 // After calling SetOptions, the iterator should still only see the point 673 // key written before the iterator was constructed. SetOptions refreshes the 674 // iterator's view of its own indexed batch, but not committed state. 675 iter.SetOptions(&iterOpts) 676 require.Equal(t, `foo: (foo,.)`, scanIter()) 677 } 678 679 func TestFlushableBatchReset(t *testing.T) { 680 var b Batch 681 var err error 682 b.flushable, err = newFlushableBatch(&b, DefaultComparer) 683 require.NoError(t, err) 684 685 b.Reset() 686 require.Nil(t, b.flushable) 687 } 688 689 func TestBatchIncrement(t *testing.T) { 690 testCases := []uint32{ 691 0x00000000, 692 0x00000001, 693 0x00000002, 694 0x0000007f, 695 0x00000080, 696 0x000000fe, 697 0x000000ff, 698 0x00000100, 699 0x00000101, 700 0x000001ff, 701 0x00000200, 702 0x00000fff, 703 0x00001234, 704 0x0000fffe, 705 0x0000ffff, 706 0x00010000, 707 0x00010001, 708 0x000100fe, 709 0x000100ff, 710 0x00020100, 711 0x03fffffe, 712 0x03ffffff, 713 0x04000000, 714 0x04000001, 715 0x7fffffff, 716 0xfffffffe, 717 } 718 for _, tc := range testCases { 719 var buf [batchHeaderLen]byte 720 binary.LittleEndian.PutUint32(buf[8:12], tc) 721 var b Batch 722 b.SetRepr(buf[:]) 723 b.count++ 724 got := binary.LittleEndian.Uint32(b.Repr()[8:12]) 725 want := tc + 1 726 if got != want { 727 t.Errorf("input=%d: got %d, want %d", tc, got, want) 728 } 729 _, count := ReadBatch(b.Repr()) 730 if got != want { 731 t.Errorf("input=%d: got %d, want %d", tc, count, want) 732 } 733 } 734 735 err := func() (err error) { 736 defer func() { 737 if v := recover(); v != nil { 738 if verr, ok := v.(error); ok { 739 err = verr 740 } 741 } 742 }() 743 var buf [batchHeaderLen]byte 744 binary.LittleEndian.PutUint32(buf[8:12], 0xffffffff) 745 var b Batch 746 b.SetRepr(buf[:]) 747 b.count++ 748 b.Repr() 749 return nil 750 }() 751 if err != ErrInvalidBatch { 752 t.Fatalf("expected %v, but found %v", ErrInvalidBatch, err) 753 } 754 } 755 756 func TestBatchOpDoesIncrement(t *testing.T) { 757 var b Batch 758 key := []byte("foo") 759 value := []byte("bar") 760 761 if b.Count() != 0 { 762 t.Fatalf("new batch has a nonzero count: %d", b.Count()) 763 } 764 765 // Should increment count by 1 766 _ = b.Set(key, value, nil) 767 if b.Count() != 1 { 768 t.Fatalf("expected count: %d, got %d", 1, b.Count()) 769 } 770 771 var b2 Batch 772 // Should increment count by 1 each 773 _ = b2.Set(key, value, nil) 774 _ = b2.Delete(key, nil) 775 if b2.Count() != 2 { 776 t.Fatalf("expected count: %d, got %d", 2, b2.Count()) 777 } 778 779 // Should increment count by b2.count() 780 _ = b.Apply(&b2, nil) 781 if b.Count() != 3 { 782 t.Fatalf("expected count: %d, got %d", 3, b.Count()) 783 } 784 785 // Should increment count by 1 786 _ = b.Merge(key, value, nil) 787 if b.Count() != 4 { 788 t.Fatalf("expected count: %d, got %d", 4, b.Count()) 789 } 790 791 // Should NOT increment count. 792 _ = b.LogData([]byte("foobarbaz"), nil) 793 if b.Count() != 4 { 794 t.Fatalf("expected count: %d, got %d", 4, b.Count()) 795 } 796 } 797 798 func TestBatchGet(t *testing.T) { 799 testCases := []struct { 800 method string 801 memTableSize uint64 802 }{ 803 {"build", 64 << 20}, 804 {"build", 2 << 10}, 805 {"apply", 64 << 20}, 806 } 807 808 for _, c := range testCases { 809 t.Run(fmt.Sprintf("%s,mem=%d", c.method, c.memTableSize), func(t *testing.T) { 810 d, err := Open("", &Options{ 811 FS: vfs.NewMem(), 812 MemTableSize: c.memTableSize, 813 }) 814 if err != nil { 815 t.Fatalf("Open: %v", err) 816 } 817 defer d.Close() 818 var b *Batch 819 820 datadriven.RunTest(t, "testdata/batch_get", func(t *testing.T, td *datadriven.TestData) string { 821 switch td.Cmd { 822 case "define": 823 switch c.method { 824 case "build": 825 b = d.NewIndexedBatch() 826 case "apply": 827 b = d.NewBatch() 828 } 829 830 if err := runBatchDefineCmd(td, b); err != nil { 831 return err.Error() 832 } 833 834 switch c.method { 835 case "apply": 836 tmp := d.NewIndexedBatch() 837 tmp.Apply(b, nil) 838 b = tmp 839 } 840 return "" 841 842 case "commit": 843 if err := b.Commit(nil); err != nil { 844 return err.Error() 845 } 846 b = nil 847 return "" 848 849 case "get": 850 if len(td.CmdArgs) != 1 { 851 return fmt.Sprintf("%s expects 1 argument", td.Cmd) 852 } 853 v, closer, err := b.Get([]byte(td.CmdArgs[0].String())) 854 if err != nil { 855 return err.Error() 856 } 857 defer closer.Close() 858 return string(v) 859 860 default: 861 return fmt.Sprintf("unknown command: %s", td.Cmd) 862 } 863 }) 864 }) 865 } 866 } 867 868 func TestBatchIter(t *testing.T) { 869 var b *Batch 870 871 for _, method := range []string{"build", "apply"} { 872 for _, testdata := range []string{ 873 "testdata/internal_iter_next", "testdata/internal_iter_bounds"} { 874 t.Run(method, func(t *testing.T) { 875 datadriven.RunTest(t, testdata, func(t *testing.T, d *datadriven.TestData) string { 876 switch d.Cmd { 877 case "define": 878 switch method { 879 case "build": 880 b = newIndexedBatch(nil, DefaultComparer) 881 case "apply": 882 b = newBatch(nil) 883 } 884 885 for _, key := range strings.Split(d.Input, "\n") { 886 j := strings.Index(key, ":") 887 ikey := base.ParseInternalKey(key[:j]) 888 value := []byte(key[j+1:]) 889 b.Set(ikey.UserKey, value, nil) 890 } 891 892 switch method { 893 case "apply": 894 tmp := newIndexedBatch(nil, DefaultComparer) 895 tmp.Apply(b, nil) 896 b = tmp 897 } 898 return "" 899 900 case "iter": 901 var options IterOptions 902 for _, arg := range d.CmdArgs { 903 switch arg.Key { 904 case "lower": 905 if len(arg.Vals) != 1 { 906 return fmt.Sprintf( 907 "%s expects at most 1 value for lower", d.Cmd) 908 } 909 options.LowerBound = []byte(arg.Vals[0]) 910 case "upper": 911 if len(arg.Vals) != 1 { 912 return fmt.Sprintf( 913 "%s expects at most 1 value for upper", d.Cmd) 914 } 915 options.UpperBound = []byte(arg.Vals[0]) 916 default: 917 return fmt.Sprintf("unknown arg: %s", arg.Key) 918 } 919 } 920 iter := b.newInternalIter(&options) 921 defer iter.Close() 922 return itertest.RunInternalIterCmd(t, d, iter) 923 924 default: 925 return fmt.Sprintf("unknown command: %s", d.Cmd) 926 } 927 }) 928 }) 929 } 930 } 931 } 932 933 func TestBatchRangeOps(t *testing.T) { 934 var b *Batch 935 936 datadriven.RunTest(t, "testdata/batch_range_ops", func(t *testing.T, td *datadriven.TestData) string { 937 switch td.Cmd { 938 case "clear": 939 b = nil 940 return "" 941 942 case "apply": 943 if b == nil { 944 b = newIndexedBatch(nil, DefaultComparer) 945 } 946 t := newBatch(nil) 947 if err := runBatchDefineCmd(td, t); err != nil { 948 return err.Error() 949 } 950 if err := b.Apply(t, nil); err != nil { 951 return err.Error() 952 } 953 return "" 954 955 case "define": 956 if b == nil { 957 b = newIndexedBatch(nil, DefaultComparer) 958 } 959 if err := runBatchDefineCmd(td, b); err != nil { 960 return err.Error() 961 } 962 return "" 963 964 case "scan": 965 if len(td.CmdArgs) > 1 { 966 return fmt.Sprintf("%s expects at most 1 argument", td.Cmd) 967 } 968 var fragmentIter keyspan.FragmentIterator 969 var internalIter base.InternalIterator 970 switch { 971 case td.HasArg("range-del"): 972 fragmentIter = b.newRangeDelIter(nil, math.MaxUint64) 973 defer fragmentIter.Close() 974 case td.HasArg("range-key"): 975 fragmentIter = b.newRangeKeyIter(nil, math.MaxUint64) 976 defer fragmentIter.Close() 977 default: 978 internalIter = b.newInternalIter(nil) 979 defer internalIter.Close() 980 } 981 982 var buf bytes.Buffer 983 if fragmentIter != nil { 984 for s := fragmentIter.First(); s != nil; s = fragmentIter.Next() { 985 for i := range s.Keys { 986 s.Keys[i].Trailer = base.MakeTrailer( 987 s.Keys[i].SeqNum()&^base.InternalKeySeqNumBatch, 988 s.Keys[i].Kind(), 989 ) 990 } 991 fmt.Fprintln(&buf, s) 992 } 993 } else { 994 for k, v := internalIter.First(); k != nil; k, v = internalIter.Next() { 995 k.SetSeqNum(k.SeqNum() &^ InternalKeySeqNumBatch) 996 fmt.Fprintf(&buf, "%s:%s\n", k, v.InPlaceValue()) 997 } 998 } 999 return buf.String() 1000 1001 default: 1002 return fmt.Sprintf("unknown command: %s", td.Cmd) 1003 } 1004 }) 1005 } 1006 1007 func TestBatchTooLarge(t *testing.T) { 1008 var b Batch 1009 var result interface{} 1010 func() { 1011 defer func() { 1012 if r := recover(); r != nil { 1013 result = r 1014 } 1015 }() 1016 b.grow(maxBatchSize) 1017 }() 1018 require.EqualValues(t, ErrBatchTooLarge, result) 1019 } 1020 1021 func TestFlushableBatchIter(t *testing.T) { 1022 var b *flushableBatch 1023 datadriven.RunTest(t, "testdata/internal_iter_next", func(t *testing.T, d *datadriven.TestData) string { 1024 switch d.Cmd { 1025 case "define": 1026 batch := newBatch(nil) 1027 for _, key := range strings.Split(d.Input, "\n") { 1028 j := strings.Index(key, ":") 1029 ikey := base.ParseInternalKey(key[:j]) 1030 value := []byte(fmt.Sprint(ikey.SeqNum())) 1031 batch.Set(ikey.UserKey, value, nil) 1032 } 1033 var err error 1034 b, err = newFlushableBatch(batch, DefaultComparer) 1035 require.NoError(t, err) 1036 return "" 1037 1038 case "iter": 1039 iter := b.newIter(nil) 1040 defer iter.Close() 1041 return itertest.RunInternalIterCmd(t, d, iter) 1042 1043 default: 1044 return fmt.Sprintf("unknown command: %s", d.Cmd) 1045 } 1046 }) 1047 } 1048 1049 func TestFlushableBatch(t *testing.T) { 1050 var b *flushableBatch 1051 datadriven.RunTest(t, "testdata/flushable_batch", func(t *testing.T, d *datadriven.TestData) string { 1052 switch d.Cmd { 1053 case "define": 1054 batch := newBatch(nil) 1055 for _, key := range strings.Split(d.Input, "\n") { 1056 j := strings.Index(key, ":") 1057 ikey := base.ParseInternalKey(key[:j]) 1058 value := []byte(fmt.Sprint(ikey.SeqNum())) 1059 switch ikey.Kind() { 1060 case InternalKeyKindDelete: 1061 require.NoError(t, batch.Delete(ikey.UserKey, nil)) 1062 case InternalKeyKindSet: 1063 require.NoError(t, batch.Set(ikey.UserKey, value, nil)) 1064 case InternalKeyKindMerge: 1065 require.NoError(t, batch.Merge(ikey.UserKey, value, nil)) 1066 case InternalKeyKindRangeDelete: 1067 require.NoError(t, batch.DeleteRange(ikey.UserKey, value, nil)) 1068 case InternalKeyKindRangeKeyDelete: 1069 require.NoError(t, batch.RangeKeyDelete(ikey.UserKey, value, nil)) 1070 case InternalKeyKindRangeKeySet: 1071 require.NoError(t, batch.RangeKeySet(ikey.UserKey, value, value, value, nil)) 1072 case InternalKeyKindRangeKeyUnset: 1073 require.NoError(t, batch.RangeKeyUnset(ikey.UserKey, value, value, nil)) 1074 } 1075 } 1076 var err error 1077 b, err = newFlushableBatch(batch, DefaultComparer) 1078 require.NoError(t, err) 1079 return "" 1080 1081 case "iter": 1082 var opts IterOptions 1083 for _, arg := range d.CmdArgs { 1084 if len(arg.Vals) != 1 { 1085 return fmt.Sprintf("%s: %s=<value>", d.Cmd, arg.Key) 1086 } 1087 switch arg.Key { 1088 case "lower": 1089 opts.LowerBound = []byte(arg.Vals[0]) 1090 case "upper": 1091 opts.UpperBound = []byte(arg.Vals[0]) 1092 default: 1093 return fmt.Sprintf("%s: unknown arg: %s", d.Cmd, arg.Key) 1094 } 1095 } 1096 1097 iter := b.newIter(&opts) 1098 defer iter.Close() 1099 return itertest.RunInternalIterCmd(t, d, iter) 1100 1101 case "dump": 1102 if len(d.CmdArgs) != 1 || len(d.CmdArgs[0].Vals) != 1 || d.CmdArgs[0].Key != "seq" { 1103 return "dump seq=<value>\n" 1104 } 1105 seqNum, err := strconv.Atoi(d.CmdArgs[0].Vals[0]) 1106 if err != nil { 1107 return err.Error() 1108 } 1109 b.setSeqNum(uint64(seqNum)) 1110 1111 var buf bytes.Buffer 1112 1113 iter := newInternalIterAdapter(b.newIter(nil)) 1114 for valid := iter.First(); valid; valid = iter.Next() { 1115 fmt.Fprintf(&buf, "%s:%s\n", iter.Key(), iter.Value()) 1116 } 1117 iter.Close() 1118 1119 if rangeDelIter := b.newRangeDelIter(nil); rangeDelIter != nil { 1120 scanKeyspanIterator(&buf, rangeDelIter) 1121 rangeDelIter.Close() 1122 } 1123 if rangeKeyIter := b.newRangeKeyIter(nil); rangeKeyIter != nil { 1124 scanKeyspanIterator(&buf, rangeKeyIter) 1125 rangeKeyIter.Close() 1126 } 1127 return buf.String() 1128 1129 default: 1130 return fmt.Sprintf("unknown command: %s", d.Cmd) 1131 } 1132 }) 1133 } 1134 1135 func TestFlushableBatchDeleteRange(t *testing.T) { 1136 var fb *flushableBatch 1137 var input string 1138 1139 datadriven.RunTest(t, "testdata/delete_range", func(t *testing.T, td *datadriven.TestData) string { 1140 switch td.Cmd { 1141 case "clear": 1142 input = "" 1143 return "" 1144 1145 case "define": 1146 b := newBatch(nil) 1147 // NB: We can't actually add to the flushable batch as we can to a 1148 // memtable (which shares the "testdata/delete_range" data), so we fake 1149 // it by concatenating the input and rebuilding the flushable batch from 1150 // scratch. 1151 input += "\n" + td.Input 1152 td.Input = input 1153 if err := runBatchDefineCmd(td, b); err != nil { 1154 return err.Error() 1155 } 1156 var err error 1157 fb, err = newFlushableBatch(b, DefaultComparer) 1158 require.NoError(t, err) 1159 return "" 1160 1161 case "scan": 1162 var buf bytes.Buffer 1163 if td.HasArg("range-del") { 1164 fi := fb.newRangeDelIter(nil) 1165 defer fi.Close() 1166 scanKeyspanIterator(&buf, fi) 1167 } else { 1168 ii := fb.newIter(nil) 1169 defer ii.Close() 1170 scanInternalIter(&buf, ii) 1171 } 1172 return buf.String() 1173 1174 default: 1175 return fmt.Sprintf("unknown command: %s", td.Cmd) 1176 } 1177 }) 1178 } 1179 1180 func scanInternalIter(w io.Writer, ii internalIterator) { 1181 for k, v := ii.First(); k != nil; k, v = ii.Next() { 1182 fmt.Fprintf(w, "%s:%s\n", k, v.InPlaceValue()) 1183 } 1184 } 1185 1186 func scanKeyspanIterator(w io.Writer, ki keyspan.FragmentIterator) { 1187 for s := ki.First(); s != nil; s = ki.Next() { 1188 fmt.Fprintln(w, s) 1189 } 1190 } 1191 1192 func TestFlushableBatchBytesIterated(t *testing.T) { 1193 batch := newBatch(nil) 1194 for j := 0; j < 1000; j++ { 1195 key := make([]byte, 8+j%3) 1196 value := make([]byte, 7+j%5) 1197 batch.Set(key, value, nil) 1198 1199 fb, err := newFlushableBatch(batch, DefaultComparer) 1200 require.NoError(t, err) 1201 1202 var bytesIterated uint64 1203 it := fb.newFlushIter(nil, &bytesIterated) 1204 1205 var prevIterated uint64 1206 for key, _ := it.First(); key != nil; key, _ = it.Next() { 1207 if bytesIterated < prevIterated { 1208 t.Fatalf("bytesIterated moved backward: %d < %d", bytesIterated, prevIterated) 1209 } 1210 prevIterated = bytesIterated 1211 } 1212 1213 expected := fb.inuseBytes() 1214 if bytesIterated != expected { 1215 t.Fatalf("bytesIterated: got %d, want %d", bytesIterated, expected) 1216 } 1217 } 1218 } 1219 1220 func TestEmptyFlushableBatch(t *testing.T) { 1221 // Verify that we can create a flushable batch on an empty batch. 1222 fb, err := newFlushableBatch(newBatch(nil), DefaultComparer) 1223 require.NoError(t, err) 1224 it := newInternalIterAdapter(fb.newIter(nil)) 1225 require.False(t, it.First()) 1226 } 1227 1228 func TestBatchCommitStats(t *testing.T) { 1229 testFunc := func() error { 1230 db, err := Open("", &Options{ 1231 FS: vfs.NewMem(), 1232 }) 1233 require.NoError(t, err) 1234 defer db.Close() 1235 b := db.NewBatch() 1236 defer b.Close() 1237 stats := b.CommitStats() 1238 require.Equal(t, BatchCommitStats{}, stats) 1239 1240 // The stall code peers into the internals, instead of adding general 1241 // purpose hooks, to avoid changing production code. We can revisit this 1242 // choice if it becomes hard to maintain. 1243 1244 // Commit semaphore stall funcs. 1245 var unstallCommitSemaphore func() 1246 stallCommitSemaphore := func() { 1247 commitPipeline := db.commit 1248 commitSemaphoreReserved := 0 1249 done := false 1250 for !done { 1251 select { 1252 case commitPipeline.commitQueueSem <- struct{}{}: 1253 commitSemaphoreReserved++ 1254 default: 1255 done = true 1256 } 1257 if done { 1258 break 1259 } 1260 } 1261 unstallCommitSemaphore = func() { 1262 for i := 0; i < commitSemaphoreReserved; i++ { 1263 <-commitPipeline.commitQueueSem 1264 } 1265 } 1266 } 1267 1268 // Memstable stall funcs. 1269 var unstallMemtable func() 1270 stallMemtable := func() { 1271 db.mu.Lock() 1272 defer db.mu.Unlock() 1273 prev := db.opts.MemTableStopWritesThreshold 1274 db.opts.MemTableStopWritesThreshold = 0 1275 unstallMemtable = func() { 1276 db.mu.Lock() 1277 defer db.mu.Unlock() 1278 db.opts.MemTableStopWritesThreshold = prev 1279 db.mu.compact.cond.Broadcast() 1280 } 1281 } 1282 1283 // L0 read-amp stall funcs. 1284 var unstallL0ReadAmp func() 1285 stallL0ReadAmp := func() { 1286 db.mu.Lock() 1287 defer db.mu.Unlock() 1288 prev := db.opts.L0StopWritesThreshold 1289 db.opts.L0StopWritesThreshold = 0 1290 unstallL0ReadAmp = func() { 1291 db.mu.Lock() 1292 defer db.mu.Unlock() 1293 db.opts.L0StopWritesThreshold = prev 1294 db.mu.compact.cond.Broadcast() 1295 } 1296 } 1297 1298 // Commit wait stall funcs. 1299 var unstallCommitWait func() 1300 stallCommitWait := func() { 1301 b.commit.Add(1) 1302 unstallCommitWait = func() { 1303 b.commit.Done() 1304 } 1305 } 1306 1307 // Stall everything. 1308 stallCommitSemaphore() 1309 stallMemtable() 1310 stallL0ReadAmp() 1311 stallCommitWait() 1312 1313 // Exceed initialMemTableSize -- this is needed to make stallMemtable work. 1314 require.NoError(t, b.Set(make([]byte, initialMemTableSize), nil, nil)) 1315 1316 var commitWG sync.WaitGroup 1317 commitWG.Add(1) 1318 go func() { 1319 require.NoError(t, db.Apply(b, &WriteOptions{Sync: true})) 1320 commitWG.Done() 1321 }() 1322 // Unstall things in the order that the stalls will happen. 1323 sleepDuration := 10 * time.Millisecond 1324 time.Sleep(sleepDuration) 1325 unstallCommitSemaphore() 1326 time.Sleep(sleepDuration) 1327 unstallMemtable() 1328 time.Sleep(sleepDuration) 1329 unstallL0ReadAmp() 1330 time.Sleep(sleepDuration) 1331 unstallCommitWait() 1332 1333 // Wait for Apply to return. 1334 commitWG.Wait() 1335 stats = b.CommitStats() 1336 expectedDuration := (2 * sleepDuration) / 3 1337 if expectedDuration > stats.SemaphoreWaitDuration { 1338 return errors.Errorf("SemaphoreWaitDuration %s is too low", 1339 stats.SemaphoreWaitDuration.String()) 1340 } 1341 if expectedDuration > stats.MemTableWriteStallDuration { 1342 return errors.Errorf("MemTableWriteStallDuration %s is too low", 1343 stats.MemTableWriteStallDuration.String()) 1344 } 1345 if expectedDuration > stats.L0ReadAmpWriteStallDuration { 1346 return errors.Errorf("L0ReadAmpWriteStallDuration %s is too low", 1347 stats.L0ReadAmpWriteStallDuration) 1348 } 1349 if expectedDuration > stats.CommitWaitDuration { 1350 return errors.Errorf("CommitWaitDuration %s is too low", 1351 stats.CommitWaitDuration) 1352 } 1353 if 5*expectedDuration > stats.TotalDuration { 1354 return errors.Errorf("TotalDuration %s is too low", 1355 stats.TotalDuration) 1356 } 1357 return nil 1358 } 1359 // Try a few times, and succeed if one of them succeeds. 1360 var err error 1361 for i := 0; i < 5; i++ { 1362 err = testFunc() 1363 if err == nil { 1364 break 1365 } 1366 } 1367 require.NoError(t, err) 1368 } 1369 1370 func TestBatchReader(t *testing.T) { 1371 datadriven.RunTest(t, "testdata/batch_reader", func(t *testing.T, td *datadriven.TestData) string { 1372 switch td.Cmd { 1373 case "scan": 1374 var repr bytes.Buffer 1375 for i, l := range strings.Split(td.Input, "\n") { 1376 // Remove any trailing comments behind #. 1377 if i := strings.IndexRune(l, '#'); i >= 0 { 1378 l = l[:i] 1379 } 1380 // Strip all whitespace from the line. 1381 l = strings.Map(func(r rune) rune { 1382 if unicode.IsSpace(r) { 1383 return -1 1384 } 1385 return r 1386 }, l) 1387 b, err := hex.DecodeString(l) 1388 if err != nil { 1389 return fmt.Sprintf("failed to decode hex; line %d", i) 1390 } 1391 repr.Write(b) 1392 } 1393 r, count := ReadBatch(repr.Bytes()) 1394 var out strings.Builder 1395 fmt.Fprintf(&out, "Count: %d\n", count) 1396 for { 1397 kind, ukey, value, ok, err := r.Next() 1398 if !ok { 1399 if err != nil { 1400 fmt.Fprintf(&out, "err: %s\n", err) 1401 } else { 1402 fmt.Fprint(&out, "eof") 1403 } 1404 break 1405 } 1406 fmt.Fprintf(&out, "%s: %q: %q\n", kind, ukey, value) 1407 } 1408 return out.String() 1409 1410 default: 1411 return fmt.Sprintf("unrecognized command %q", td.Cmd) 1412 } 1413 }) 1414 } 1415 1416 func BenchmarkBatchSet(b *testing.B) { 1417 value := make([]byte, 10) 1418 for i := range value { 1419 value[i] = byte(i) 1420 } 1421 key := make([]byte, 8) 1422 batch := newBatch(nil) 1423 1424 b.ResetTimer() 1425 1426 const batchSize = 1000 1427 for i := 0; i < b.N; i += batchSize { 1428 end := i + batchSize 1429 if end > b.N { 1430 end = b.N 1431 } 1432 1433 for j := i; j < end; j++ { 1434 binary.BigEndian.PutUint64(key, uint64(j)) 1435 batch.Set(key, value, nil) 1436 } 1437 batch.Reset() 1438 } 1439 1440 b.StopTimer() 1441 } 1442 1443 func BenchmarkIndexedBatchSet(b *testing.B) { 1444 value := make([]byte, 10) 1445 for i := range value { 1446 value[i] = byte(i) 1447 } 1448 key := make([]byte, 8) 1449 batch := newIndexedBatch(nil, DefaultComparer) 1450 1451 b.ResetTimer() 1452 1453 const batchSize = 1000 1454 for i := 0; i < b.N; i += batchSize { 1455 end := i + batchSize 1456 if end > b.N { 1457 end = b.N 1458 } 1459 1460 for j := i; j < end; j++ { 1461 binary.BigEndian.PutUint64(key, uint64(j)) 1462 batch.Set(key, value, nil) 1463 } 1464 batch.Reset() 1465 } 1466 1467 b.StopTimer() 1468 } 1469 1470 func BenchmarkBatchSetDeferred(b *testing.B) { 1471 value := make([]byte, 10) 1472 for i := range value { 1473 value[i] = byte(i) 1474 } 1475 key := make([]byte, 8) 1476 batch := newBatch(nil) 1477 1478 b.ResetTimer() 1479 1480 const batchSize = 1000 1481 for i := 0; i < b.N; i += batchSize { 1482 end := i + batchSize 1483 if end > b.N { 1484 end = b.N 1485 } 1486 1487 for j := i; j < end; j++ { 1488 binary.BigEndian.PutUint64(key, uint64(j)) 1489 deferredOp := batch.SetDeferred(len(key), len(value)) 1490 1491 copy(deferredOp.Key, key) 1492 copy(deferredOp.Value, value) 1493 1494 deferredOp.Finish() 1495 } 1496 batch.Reset() 1497 } 1498 1499 b.StopTimer() 1500 } 1501 1502 func BenchmarkIndexedBatchSetDeferred(b *testing.B) { 1503 value := make([]byte, 10) 1504 for i := range value { 1505 value[i] = byte(i) 1506 } 1507 key := make([]byte, 8) 1508 batch := newIndexedBatch(nil, DefaultComparer) 1509 1510 b.ResetTimer() 1511 1512 const batchSize = 1000 1513 for i := 0; i < b.N; i += batchSize { 1514 end := i + batchSize 1515 if end > b.N { 1516 end = b.N 1517 } 1518 1519 for j := i; j < end; j++ { 1520 binary.BigEndian.PutUint64(key, uint64(j)) 1521 deferredOp := batch.SetDeferred(len(key), len(value)) 1522 1523 copy(deferredOp.Key, key) 1524 copy(deferredOp.Value, value) 1525 1526 deferredOp.Finish() 1527 } 1528 batch.Reset() 1529 } 1530 1531 b.StopTimer() 1532 } 1533 1534 func TestBatchMemTableSizeOverflow(t *testing.T) { 1535 opts := &Options{ 1536 FS: vfs.NewMem(), 1537 } 1538 opts.EnsureDefaults() 1539 d, err := Open("", opts) 1540 require.NoError(t, err) 1541 1542 bigValue := make([]byte, 1000) 1543 b := d.NewBatch() 1544 1545 // memTableSize can overflow as a uint32. 1546 b.memTableSize = math.MaxUint32 - 50 1547 for i := 0; i < 10; i++ { 1548 k := fmt.Sprintf("key-%05d", i) 1549 require.NoError(t, b.Set([]byte(k), bigValue, nil)) 1550 } 1551 require.Greater(t, b.memTableSize, uint64(math.MaxUint32)) 1552 require.NoError(t, b.Close()) 1553 require.NoError(t, d.Close()) 1554 } 1555 1556 // TestBatchSpanCaching stress tests the caching of keyspan.Spans for range 1557 // tombstones and range keys. 1558 func TestBatchSpanCaching(t *testing.T) { 1559 opts := &Options{ 1560 Comparer: testkeys.Comparer, 1561 FS: vfs.NewMem(), 1562 FormatMajorVersion: internalFormatNewest, 1563 } 1564 d, err := Open("", opts) 1565 require.NoError(t, err) 1566 defer d.Close() 1567 1568 ks := testkeys.Alpha(1) 1569 b := d.NewIndexedBatch() 1570 for i := int64(0); i < ks.Count(); i++ { 1571 k := testkeys.Key(ks, i) 1572 require.NoError(t, b.Set(k, k, nil)) 1573 } 1574 1575 seed := int64(time.Now().UnixNano()) 1576 t.Logf("seed = %d", seed) 1577 rng := rand.New(rand.NewSource(seed)) 1578 iters := make([][]*Iterator, ks.Count()) 1579 defer func() { 1580 for _, keyIters := range iters { 1581 for _, iter := range keyIters { 1582 _ = iter.Close() 1583 } 1584 } 1585 }() 1586 1587 // This test begins with one point key for every letter of the alphabet. 1588 // Over the course of the test, point keys are 'replaced' with range keys 1589 // with narrow bounds from left to right. Iterators are created at random, 1590 // sometimes from the batch and sometimes by cloning existing iterators. 1591 1592 checkIter := func(iter *Iterator, nextKey int64) { 1593 var i int64 1594 for valid := iter.First(); valid; valid = iter.Next() { 1595 hasPoint, hasRange := iter.HasPointAndRange() 1596 require.Equal(t, testkeys.Key(ks, i), iter.Key()) 1597 if i < nextKey { 1598 // This key should not exist as a point key, just a range key. 1599 require.False(t, hasPoint) 1600 require.True(t, hasRange) 1601 } else { 1602 require.True(t, hasPoint) 1603 require.False(t, hasRange) 1604 } 1605 i++ 1606 } 1607 require.Equal(t, ks.Count(), i) 1608 } 1609 1610 // Each iteration of the below loop either reads or writes. 1611 // 1612 // A write iteration writes a new RANGEDEL and RANGEKEYSET into the batch, 1613 // covering a single point key seeded above. Writing these two span keys 1614 // together 'replaces' the point key with a range key. Each write iteration 1615 // ratchets nextWriteKey so the next write iteration will write the next 1616 // key. 1617 // 1618 // A read iteration creates a new iterator and ensures its state is 1619 // expected: some prefix of only point keys, followed by a suffix of only 1620 // range keys. Iterators created through Clone should observe the point keys 1621 // that existed when the cloned iterator was created. 1622 for nextWriteKey := int64(0); nextWriteKey < ks.Count(); { 1623 p := rng.Float64() 1624 switch { 1625 case p < .10: /* 10 % */ 1626 // Write a new range deletion and range key. 1627 start := testkeys.Key(ks, nextWriteKey) 1628 end := append(start, 0x00) 1629 require.NoError(t, b.DeleteRange(start, end, nil)) 1630 require.NoError(t, b.RangeKeySet(start, end, nil, []byte("foo"), nil)) 1631 nextWriteKey++ 1632 case p < .55: /* 45 % */ 1633 // Create a new iterator directly from the batch and check that it 1634 // observes the correct state. 1635 iter, _ := b.NewIter(&IterOptions{KeyTypes: IterKeyTypePointsAndRanges}) 1636 checkIter(iter, nextWriteKey) 1637 iters[nextWriteKey] = append(iters[nextWriteKey], iter) 1638 default: /* 45 % */ 1639 // Create a new iterator through cloning a random existing iterator 1640 // and check that it observes the right state. 1641 readKey := rng.Int63n(nextWriteKey + 1) 1642 itersForReadKey := iters[readKey] 1643 if len(itersForReadKey) == 0 { 1644 continue 1645 } 1646 iter, err := itersForReadKey[rng.Intn(len(itersForReadKey))].Clone(CloneOptions{}) 1647 require.NoError(t, err) 1648 checkIter(iter, readKey) 1649 iters[readKey] = append(iters[readKey], iter) 1650 } 1651 } 1652 }