github.com/cockroachdb/pebble@v1.1.2/batch_test.go (about) 1 // Copyright 2012 The LevelDB-Go and Pebble Authors. All rights reserved. Use 2 // of this source code is governed by a BSD-style license that can be found in 3 // the LICENSE file. 4 5 package pebble 6 7 import ( 8 "bytes" 9 "encoding/binary" 10 "encoding/hex" 11 "fmt" 12 "io" 13 "math" 14 "math/rand" 15 "strconv" 16 "strings" 17 "sync" 18 "testing" 19 "time" 20 "unicode" 21 22 "github.com/cockroachdb/datadriven" 23 "github.com/cockroachdb/errors" 24 "github.com/cockroachdb/pebble/internal/base" 25 "github.com/cockroachdb/pebble/internal/batchskl" 26 "github.com/cockroachdb/pebble/internal/keyspan" 27 "github.com/cockroachdb/pebble/internal/testkeys" 28 "github.com/cockroachdb/pebble/vfs" 29 "github.com/stretchr/testify/require" 30 ) 31 32 func TestBatch(t *testing.T) { 33 testBatch(t, 0) 34 testBatch(t, batchInitialSize) 35 } 36 37 func testBatch(t *testing.T, size int) { 38 type testCase struct { 39 kind InternalKeyKind 40 key, value string 41 valueInt uint32 42 } 43 44 verifyTestCases := func(b *Batch, testCases []testCase, indexedPointKindsOnly bool) { 45 r := b.Reader() 46 47 for _, tc := range testCases { 48 if indexedPointKindsOnly && (tc.kind == InternalKeyKindLogData || tc.kind == InternalKeyKindIngestSST || 49 tc.kind == InternalKeyKindRangeKeyUnset || tc.kind == InternalKeyKindRangeKeySet || 50 tc.kind == InternalKeyKindRangeKeyDelete || tc.kind == InternalKeyKindRangeDelete) { 51 continue 52 } 53 kind, k, v, ok, err := r.Next() 54 if !ok { 55 if err != nil { 56 t.Fatal(err) 57 } 58 t.Fatalf("next returned !ok: test case = %v", tc) 59 } 60 key, value := string(k), string(v) 61 if kind != tc.kind || key != tc.key || value != tc.value { 62 t.Errorf("got (%d, %q, %q), want (%d, %q, %q)", 63 kind, key, value, tc.kind, tc.key, tc.value) 64 } 65 } 66 if len(r) != 0 { 67 t.Errorf("reader was not exhausted: remaining bytes = %q", r) 68 } 69 } 70 71 encodeFileNum := func(n base.FileNum) string { 72 return string(binary.AppendUvarint(nil, uint64(n))) 73 } 74 decodeFileNum := func(d []byte) base.FileNum { 75 val, n := binary.Uvarint(d) 76 if n <= 0 { 77 t.Fatalf("invalid filenum encoding") 78 } 79 return base.FileNum(val) 80 } 81 82 // RangeKeySet and RangeKeyUnset are untested here because they don't expose 83 // deferred variants. This is a consequence of these keys' more complex 84 // value encodings. 85 testCases := []testCase{ 86 {InternalKeyKindIngestSST, encodeFileNum(1), "", 0}, 87 {InternalKeyKindSet, "roses", "red", 0}, 88 {InternalKeyKindSet, "violets", "blue", 0}, 89 {InternalKeyKindDelete, "roses", "", 0}, 90 {InternalKeyKindSingleDelete, "roses", "", 0}, 91 {InternalKeyKindSet, "", "", 0}, 92 {InternalKeyKindSet, "", "non-empty", 0}, 93 {InternalKeyKindDelete, "", "", 0}, 94 {InternalKeyKindSingleDelete, "", "", 0}, 95 {InternalKeyKindSet, "grass", "green", 0}, 96 {InternalKeyKindSet, "grass", "greener", 0}, 97 {InternalKeyKindSet, "eleventy", strings.Repeat("!!11!", 100), 0}, 98 {InternalKeyKindDelete, "nosuchkey", "", 0}, 99 {InternalKeyKindDeleteSized, "eleventy", string(binary.AppendUvarint([]byte(nil), 508)), 500}, 100 {InternalKeyKindSingleDelete, "nosuchkey", "", 0}, 101 {InternalKeyKindSet, "binarydata", "\x00", 0}, 102 {InternalKeyKindSet, "binarydata", "\xff", 0}, 103 {InternalKeyKindMerge, "merge", "mergedata", 0}, 104 {InternalKeyKindMerge, "merge", "", 0}, 105 {InternalKeyKindMerge, "", "", 0}, 106 {InternalKeyKindRangeDelete, "a", "b", 0}, 107 {InternalKeyKindRangeDelete, "", "", 0}, 108 {InternalKeyKindLogData, "logdata", "", 0}, 109 {InternalKeyKindLogData, "", "", 0}, 110 {InternalKeyKindRangeKeyDelete, "grass", "green", 0}, 111 {InternalKeyKindRangeKeyDelete, "", "", 0}, 112 {InternalKeyKindDeleteSized, "nosuchkey", string(binary.AppendUvarint([]byte(nil), 11)), 2}, 113 } 114 b := newBatchWithSize(nil, size) 115 for _, tc := range testCases { 116 switch tc.kind { 117 case InternalKeyKindSet: 118 _ = b.Set([]byte(tc.key), []byte(tc.value), nil) 119 case InternalKeyKindMerge: 120 _ = b.Merge([]byte(tc.key), []byte(tc.value), nil) 121 case InternalKeyKindDelete: 122 _ = b.Delete([]byte(tc.key), nil) 123 case InternalKeyKindDeleteSized: 124 _ = b.DeleteSized([]byte(tc.key), tc.valueInt, nil) 125 case InternalKeyKindSingleDelete: 126 _ = b.SingleDelete([]byte(tc.key), nil) 127 case InternalKeyKindRangeDelete: 128 _ = b.DeleteRange([]byte(tc.key), []byte(tc.value), nil) 129 case InternalKeyKindLogData: 130 _ = b.LogData([]byte(tc.key), nil) 131 case InternalKeyKindRangeKeyDelete: 132 _ = b.RangeKeyDelete([]byte(tc.key), []byte(tc.value), nil) 133 case InternalKeyKindIngestSST: 134 b.ingestSST(decodeFileNum([]byte(tc.key))) 135 } 136 } 137 verifyTestCases(b, testCases, false /* indexedKindsOnly */) 138 139 b.Reset() 140 // Run the same operations, this time using the Deferred variants of each 141 // operation (eg. SetDeferred). 142 for _, tc := range testCases { 143 key := []byte(tc.key) 144 value := []byte(tc.value) 145 switch tc.kind { 146 case InternalKeyKindSet: 147 d := b.SetDeferred(len(key), len(value)) 148 copy(d.Key, key) 149 copy(d.Value, value) 150 d.Finish() 151 case InternalKeyKindMerge: 152 d := b.MergeDeferred(len(key), len(value)) 153 copy(d.Key, key) 154 copy(d.Value, value) 155 d.Finish() 156 case InternalKeyKindDelete: 157 d := b.DeleteDeferred(len(key)) 158 copy(d.Key, key) 159 copy(d.Value, value) 160 d.Finish() 161 case InternalKeyKindDeleteSized: 162 d := b.DeleteSizedDeferred(len(tc.key), tc.valueInt) 163 copy(d.Key, key) 164 d.Finish() 165 case InternalKeyKindSingleDelete: 166 d := b.SingleDeleteDeferred(len(key)) 167 copy(d.Key, key) 168 copy(d.Value, value) 169 d.Finish() 170 case InternalKeyKindRangeDelete: 171 d := b.DeleteRangeDeferred(len(key), len(value)) 172 copy(d.Key, key) 173 copy(d.Value, value) 174 d.Finish() 175 case InternalKeyKindLogData: 176 _ = b.LogData([]byte(tc.key), nil) 177 case InternalKeyKindIngestSST: 178 b.ingestSST(decodeFileNum([]byte(tc.key))) 179 case InternalKeyKindRangeKeyDelete: 180 d := b.RangeKeyDeleteDeferred(len(key), len(value)) 181 copy(d.Key, key) 182 copy(d.Value, value) 183 d.Finish() 184 } 185 } 186 verifyTestCases(b, testCases, false /* indexedKindsOnly */) 187 188 b.Reset() 189 // Run the same operations, this time using AddInternalKey instead of the 190 // Kind-specific methods. 191 for _, tc := range testCases { 192 if tc.kind == InternalKeyKindLogData || tc.kind == InternalKeyKindIngestSST || 193 tc.kind == InternalKeyKindRangeKeyUnset || tc.kind == InternalKeyKindRangeKeySet || 194 tc.kind == InternalKeyKindRangeKeyDelete || tc.kind == InternalKeyKindRangeDelete { 195 continue 196 } 197 key := []byte(tc.key) 198 value := []byte(tc.value) 199 b.AddInternalKey(&InternalKey{UserKey: key, Trailer: base.MakeTrailer(0, tc.kind)}, value, nil) 200 } 201 verifyTestCases(b, testCases, true /* indexedKindsOnly */) 202 } 203 204 func TestBatchPreAlloc(t *testing.T) { 205 var cases = []struct { 206 size int 207 exp int 208 }{ 209 {0, batchInitialSize}, 210 {batchInitialSize, batchInitialSize}, 211 {2 * batchInitialSize, 2 * batchInitialSize}, 212 } 213 for _, c := range cases { 214 b := newBatchWithSize(nil, c.size) 215 b.Set([]byte{0x1}, []byte{0x2}, nil) 216 if cap(b.data) != c.exp { 217 t.Errorf("Unexpected memory space, required: %d, got: %d", c.exp, cap(b.data)) 218 } 219 } 220 } 221 222 func TestBatchIngestSST(t *testing.T) { 223 // Verify that Batch.IngestSST has the correct batch count and memtable 224 // size. 225 var b Batch 226 b.ingestSST(1) 227 require.Equal(t, int(b.Count()), 1) 228 b.ingestSST(2) 229 require.Equal(t, int(b.Count()), 2) 230 require.Equal(t, int(b.memTableSize), 0) 231 require.Equal(t, b.ingestedSSTBatch, true) 232 } 233 234 func TestBatchLen(t *testing.T) { 235 var b Batch 236 237 requireLenAndReprEq := func(size int) { 238 require.Equal(t, size, b.Len()) 239 require.Equal(t, size, len(b.Repr())) 240 } 241 242 requireLenAndReprEq(batchHeaderLen) 243 244 key := "test-key" 245 value := "test-value" 246 247 err := b.Set([]byte(key), []byte(value), nil) 248 require.NoError(t, err) 249 250 requireLenAndReprEq(33) 251 252 err = b.Delete([]byte(key), nil) 253 require.NoError(t, err) 254 255 requireLenAndReprEq(43) 256 } 257 258 func TestBatchEmpty(t *testing.T) { 259 testBatchEmpty(t, 0) 260 testBatchEmpty(t, batchInitialSize) 261 } 262 263 func testBatchEmpty(t *testing.T, size int) { 264 b := newBatchWithSize(nil, size) 265 require.True(t, b.Empty()) 266 267 ops := []func(*Batch) error{ 268 func(b *Batch) error { return b.Set(nil, nil, nil) }, 269 func(b *Batch) error { return b.Merge(nil, nil, nil) }, 270 func(b *Batch) error { return b.Delete(nil, nil) }, 271 func(b *Batch) error { return b.DeleteRange(nil, nil, nil) }, 272 func(b *Batch) error { return b.LogData(nil, nil) }, 273 func(b *Batch) error { return b.RangeKeySet(nil, nil, nil, nil, nil) }, 274 func(b *Batch) error { return b.RangeKeyUnset(nil, nil, nil, nil) }, 275 func(b *Batch) error { return b.RangeKeyDelete(nil, nil, nil) }, 276 } 277 278 for _, op := range ops { 279 require.NoError(t, op(b)) 280 require.False(t, b.Empty()) 281 b.Reset() 282 require.True(t, b.Empty()) 283 // Reset may choose to reuse b.data, so clear it to the zero value in 284 // order to test the lazy initialization of b.data. 285 b = newBatchWithSize(nil, size) 286 } 287 288 _ = b.Reader() 289 require.True(t, b.Empty()) 290 b.Reset() 291 require.True(t, b.Empty()) 292 b = newBatchWithSize(nil, size) 293 294 require.Equal(t, uint64(0), b.SeqNum()) 295 require.True(t, b.Empty()) 296 b.Reset() 297 require.True(t, b.Empty()) 298 b = &Batch{} 299 300 d, err := Open("", &Options{ 301 FS: vfs.NewMem(), 302 }) 303 require.NoError(t, err) 304 defer d.Close() 305 ib := newIndexedBatch(d, DefaultComparer) 306 iter, _ := ib.NewIter(nil) 307 require.False(t, iter.First()) 308 iter2, err := iter.Clone(CloneOptions{}) 309 require.NoError(t, err) 310 require.NoError(t, iter.Close()) 311 _, err = iter.Clone(CloneOptions{}) 312 require.True(t, err != nil) 313 require.False(t, iter2.First()) 314 require.NoError(t, iter2.Close()) 315 } 316 317 func TestBatchApplyNoSyncWait(t *testing.T) { 318 db, err := Open("", &Options{ 319 FS: vfs.NewMem(), 320 }) 321 require.NoError(t, err) 322 defer db.Close() 323 var batches []*Batch 324 options := &WriteOptions{Sync: true} 325 for i := 0; i < 10000; i++ { 326 b := db.NewBatch() 327 str := fmt.Sprintf("a%d", i) 328 require.NoError(t, b.Set([]byte(str), []byte(str), nil)) 329 require.NoError(t, db.ApplyNoSyncWait(b, options)) 330 // k-v pair is visible even if not yet synced. 331 val, closer, err := db.Get([]byte(str)) 332 require.NoError(t, err) 333 require.Equal(t, str, string(val)) 334 closer.Close() 335 batches = append(batches, b) 336 } 337 for _, b := range batches { 338 require.NoError(t, b.SyncWait()) 339 b.Close() 340 } 341 } 342 343 func TestBatchReset(t *testing.T) { 344 db, err := Open("", &Options{ 345 FS: vfs.NewMem(), 346 }) 347 require.NoError(t, err) 348 defer db.Close() 349 key := "test-key" 350 value := "test-value" 351 b := db.NewBatch() 352 require.NoError(t, b.Set([]byte(key), []byte(value), nil)) 353 dd := b.DeleteRangeDeferred(len(key), len(value)) 354 copy(dd.Key, key) 355 copy(dd.Value, value) 356 dd.Finish() 357 358 require.NoError(t, b.RangeKeySet([]byte(key), []byte(value), []byte(value), []byte(value), nil)) 359 360 b.setSeqNum(100) 361 b.applied.Store(true) 362 b.commitErr = errors.New("test-error") 363 b.commit.Add(1) 364 b.fsyncWait.Add(1) 365 require.Equal(t, uint32(3), b.Count()) 366 require.Equal(t, uint64(1), b.countRangeDels) 367 require.Equal(t, uint64(1), b.countRangeKeys) 368 require.True(t, len(b.data) > 0) 369 require.True(t, b.SeqNum() > 0) 370 require.True(t, b.memTableSize > 0) 371 require.NotEqual(t, b.deferredOp, DeferredBatchOp{}) 372 // At this point b.data has not been modified since the db.NewBatch() and is 373 // either nil or contains a byte slice of length batchHeaderLen, with a 0 374 // seqnum encoded in data[0:8] and an arbitrary count encoded in data[8:12]. 375 // The following commented code will often fail. 376 // count := binary.LittleEndian.Uint32(b.countData()) 377 // if count != 0 && count != 3 { 378 // t.Fatalf("count: %d", count) 379 // } 380 // If we simply called b.Reset now and later used b.data to initialize 381 // expected, the count in expected will also be arbitrary. So we fix the 382 // count in b.data now by calling b.Repr(). This call isn't essential, since 383 // we will call b.Repr() again, and just shows that it fixes the count in 384 // b.data. 385 _ = b.Repr() 386 require.Equal(t, uint32(3), binary.LittleEndian.Uint32(b.countData())) 387 388 b.Reset() 389 require.Equal(t, db, b.db) 390 require.Equal(t, false, b.applied.Load()) 391 require.Nil(t, b.commitErr) 392 require.Equal(t, uint32(0), b.Count()) 393 require.Equal(t, uint64(0), b.countRangeDels) 394 require.Equal(t, uint64(0), b.countRangeKeys) 395 require.Equal(t, batchHeaderLen, len(b.data)) 396 require.Equal(t, uint64(0), b.SeqNum()) 397 require.Equal(t, uint64(0), b.memTableSize) 398 require.Equal(t, FormatMajorVersion(0x00), b.minimumFormatMajorVersion) 399 require.Equal(t, b.deferredOp, DeferredBatchOp{}) 400 _ = b.Repr() 401 402 var expected Batch 403 require.NoError(t, expected.SetRepr(b.data)) 404 expected.db = db 405 require.Equal(t, &expected, b) 406 407 // Reset batch can be used to write and commit a new record. 408 b.Set([]byte(key), []byte(value), nil) 409 require.NoError(t, db.Apply(b, nil)) 410 v, closer, err := db.Get([]byte(key)) 411 require.NoError(t, err) 412 defer closer.Close() 413 require.Equal(t, v, []byte(value)) 414 } 415 416 func TestIndexedBatchReset(t *testing.T) { 417 indexCount := func(sl *batchskl.Skiplist) int { 418 count := 0 419 iter := sl.NewIter(nil, nil) 420 defer iter.Close() 421 for iter.First(); iter.Valid(); iter.Next() { 422 count++ 423 } 424 return count 425 } 426 db, err := Open("", &Options{ 427 FS: vfs.NewMem(), 428 }) 429 require.NoError(t, err) 430 defer db.Close() 431 b := newIndexedBatch(db, DefaultComparer) 432 start := "start-key" 433 end := "end-key" 434 key := "test-key" 435 value := "test-value" 436 b.DeleteRange([]byte(start), []byte(end), nil) 437 b.Set([]byte(key), []byte(value), nil) 438 require.NoError(t, b. 439 RangeKeySet([]byte(start), []byte(end), []byte("suffix"), []byte(value), nil)) 440 require.NotNil(t, b.rangeKeyIndex) 441 require.NotNil(t, b.rangeDelIndex) 442 require.NotNil(t, b.index) 443 require.Equal(t, 1, indexCount(b.index)) 444 445 b.Reset() 446 require.NotNil(t, b.cmp) 447 require.NotNil(t, b.formatKey) 448 require.NotNil(t, b.abbreviatedKey) 449 require.NotNil(t, b.index) 450 require.Nil(t, b.rangeDelIndex) 451 require.Nil(t, b.rangeKeyIndex) 452 453 count := func(ib *Batch) int { 454 iter, _ := ib.NewIter(nil) 455 defer iter.Close() 456 iter2, err := iter.Clone(CloneOptions{}) 457 require.NoError(t, err) 458 defer iter2.Close() 459 var count [2]int 460 for i, it := range []*Iterator{iter, iter2} { 461 for it.First(); it.Valid(); it.Next() { 462 count[i]++ 463 } 464 } 465 require.Equal(t, count[0], count[1]) 466 return count[0] 467 } 468 contains := func(ib *Batch, key, value string) bool { 469 iter, _ := ib.NewIter(nil) 470 defer iter.Close() 471 iter2, err := iter.Clone(CloneOptions{}) 472 require.NoError(t, err) 473 defer iter2.Close() 474 var found [2]bool 475 for i, it := range []*Iterator{iter, iter2} { 476 for it.First(); it.Valid(); it.Next() { 477 if string(it.Key()) == key && 478 string(it.Value()) == value { 479 found[i] = true 480 } 481 } 482 } 483 require.Equal(t, found[0], found[1]) 484 return found[0] 485 } 486 // Set a key and check whether the key-value pair is visible. 487 b.Set([]byte(key), []byte(value), nil) 488 require.Equal(t, 1, indexCount(b.index)) 489 require.Equal(t, 1, count(b)) 490 require.True(t, contains(b, key, value)) 491 492 // Use range delete to delete the above inserted key-value pair. 493 b.DeleteRange([]byte(key), []byte(value), nil) 494 require.NotNil(t, b.rangeDelIndex) 495 require.Equal(t, 1, indexCount(b.rangeDelIndex)) 496 require.Equal(t, 0, count(b)) 497 require.False(t, contains(b, key, value)) 498 } 499 500 // TestIndexedBatchMutation tests mutating an indexed batch with an open 501 // iterator. 502 func TestIndexedBatchMutation(t *testing.T) { 503 opts := &Options{ 504 Comparer: testkeys.Comparer, 505 FS: vfs.NewMem(), 506 FormatMajorVersion: internalFormatNewest, 507 } 508 d, err := Open("", opts) 509 require.NoError(t, err) 510 defer func() { d.Close() }() 511 512 b := newIndexedBatch(d, DefaultComparer) 513 iters := map[string]*Iterator{} 514 defer func() { 515 for _, iter := range iters { 516 require.NoError(t, iter.Close()) 517 } 518 }() 519 520 datadriven.RunTest(t, "testdata/indexed_batch_mutation", func(t *testing.T, td *datadriven.TestData) string { 521 switch td.Cmd { 522 case "batch": 523 writeBatch := newBatch(d) 524 if err := runBatchDefineCmd(td, writeBatch); err != nil { 525 return err.Error() 526 } 527 if err := writeBatch.Commit(nil); err != nil { 528 return err.Error() 529 } 530 return "" 531 case "new-batch-iter": 532 name := td.CmdArgs[0].String() 533 iters[name], _ = b.NewIter(&IterOptions{ 534 KeyTypes: IterKeyTypePointsAndRanges, 535 }) 536 return "" 537 case "new-db-iter": 538 name := td.CmdArgs[0].String() 539 iters[name], _ = d.NewIter(&IterOptions{ 540 KeyTypes: IterKeyTypePointsAndRanges, 541 }) 542 return "" 543 case "new-batch": 544 if b != nil { 545 require.NoError(t, b.Close()) 546 } 547 b = newIndexedBatch(d, opts.Comparer) 548 if err := runBatchDefineCmd(td, b); err != nil { 549 return err.Error() 550 } 551 return "" 552 case "flush": 553 require.NoError(t, d.Flush()) 554 return "" 555 case "iter": 556 var iter string 557 td.ScanArgs(t, "iter", &iter) 558 return runIterCmd(td, iters[iter], false /* closeIter */) 559 case "mutate": 560 mut := newBatch(d) 561 if err := runBatchDefineCmd(td, mut); err != nil { 562 return err.Error() 563 } 564 if err := b.Apply(mut, nil); err != nil { 565 return err.Error() 566 } 567 return "" 568 case "clone": 569 var from, to string 570 var refreshBatchView bool 571 td.ScanArgs(t, "from", &from) 572 td.ScanArgs(t, "to", &to) 573 td.ScanArgs(t, "refresh-batch", &refreshBatchView) 574 var err error 575 iters[to], err = iters[from].Clone(CloneOptions{RefreshBatchView: refreshBatchView}) 576 if err != nil { 577 return err.Error() 578 } 579 return "" 580 case "reset": 581 for key, iter := range iters { 582 if err := iter.Close(); err != nil { 583 return err.Error() 584 } 585 delete(iters, key) 586 } 587 if d != nil { 588 if err := d.Close(); err != nil { 589 return err.Error() 590 } 591 } 592 opts.FS = vfs.NewMem() 593 d, err = Open("", opts) 594 require.NoError(t, err) 595 return "" 596 default: 597 return fmt.Sprintf("unrecognized command %q", td.Cmd) 598 } 599 }) 600 } 601 602 func TestIndexedBatch_GlobalVisibility(t *testing.T) { 603 opts := &Options{ 604 FS: vfs.NewMem(), 605 FormatMajorVersion: internalFormatNewest, 606 Comparer: testkeys.Comparer, 607 } 608 d, err := Open("", opts) 609 require.NoError(t, err) 610 defer d.Close() 611 612 require.NoError(t, d.Set([]byte("foo"), []byte("foo"), nil)) 613 614 // Create an iterator over an empty indexed batch. 615 b := newIndexedBatch(d, DefaultComparer) 616 iterOpts := IterOptions{KeyTypes: IterKeyTypePointsAndRanges} 617 iter, _ := b.NewIter(&iterOpts) 618 defer iter.Close() 619 620 // Mutate the database's committed state. 621 mut := newBatch(d) 622 require.NoError(t, mut.Set([]byte("bar"), []byte("bar"), nil)) 623 require.NoError(t, mut.DeleteRange([]byte("e"), []byte("g"), nil)) 624 require.NoError(t, mut.RangeKeySet([]byte("a"), []byte("c"), []byte("@1"), []byte("v"), nil)) 625 require.NoError(t, mut.Commit(nil)) 626 627 scanIter := func() string { 628 var buf bytes.Buffer 629 for valid := iter.First(); valid; valid = iter.Next() { 630 fmt.Fprintf(&buf, "%s: (", iter.Key()) 631 hasPoint, hasRange := iter.HasPointAndRange() 632 if hasPoint { 633 fmt.Fprintf(&buf, "%s,", iter.Value()) 634 } else { 635 fmt.Fprintf(&buf, ".,") 636 } 637 if hasRange { 638 start, end := iter.RangeBounds() 639 fmt.Fprintf(&buf, "[%s-%s)", start, end) 640 writeRangeKeys(&buf, iter) 641 } else { 642 fmt.Fprintf(&buf, ".") 643 } 644 fmt.Fprintln(&buf, ")") 645 } 646 return strings.TrimSpace(buf.String()) 647 } 648 // Scanning the iterator should only see the point key written before the 649 // iterator was constructed. 650 require.Equal(t, `foo: (foo,.)`, scanIter()) 651 652 // After calling SetOptions, the iterator should still only see the point 653 // key written before the iterator was constructed. SetOptions refreshes the 654 // iterator's view of its own indexed batch, but not committed state. 655 iter.SetOptions(&iterOpts) 656 require.Equal(t, `foo: (foo,.)`, scanIter()) 657 } 658 659 func TestFlushableBatchReset(t *testing.T) { 660 var b Batch 661 var err error 662 b.flushable, err = newFlushableBatch(&b, DefaultComparer) 663 require.NoError(t, err) 664 665 b.Reset() 666 require.Nil(t, b.flushable) 667 } 668 669 func TestBatchIncrement(t *testing.T) { 670 testCases := []uint32{ 671 0x00000000, 672 0x00000001, 673 0x00000002, 674 0x0000007f, 675 0x00000080, 676 0x000000fe, 677 0x000000ff, 678 0x00000100, 679 0x00000101, 680 0x000001ff, 681 0x00000200, 682 0x00000fff, 683 0x00001234, 684 0x0000fffe, 685 0x0000ffff, 686 0x00010000, 687 0x00010001, 688 0x000100fe, 689 0x000100ff, 690 0x00020100, 691 0x03fffffe, 692 0x03ffffff, 693 0x04000000, 694 0x04000001, 695 0x7fffffff, 696 0xfffffffe, 697 } 698 for _, tc := range testCases { 699 var buf [batchHeaderLen]byte 700 binary.LittleEndian.PutUint32(buf[8:12], tc) 701 var b Batch 702 b.SetRepr(buf[:]) 703 b.count++ 704 got := binary.LittleEndian.Uint32(b.Repr()[8:12]) 705 want := tc + 1 706 if got != want { 707 t.Errorf("input=%d: got %d, want %d", tc, got, want) 708 } 709 _, count := ReadBatch(b.Repr()) 710 if got != want { 711 t.Errorf("input=%d: got %d, want %d", tc, count, want) 712 } 713 } 714 715 err := func() (err error) { 716 defer func() { 717 if v := recover(); v != nil { 718 if verr, ok := v.(error); ok { 719 err = verr 720 } 721 } 722 }() 723 var buf [batchHeaderLen]byte 724 binary.LittleEndian.PutUint32(buf[8:12], 0xffffffff) 725 var b Batch 726 b.SetRepr(buf[:]) 727 b.count++ 728 b.Repr() 729 return nil 730 }() 731 if err != ErrInvalidBatch { 732 t.Fatalf("expected %v, but found %v", ErrInvalidBatch, err) 733 } 734 } 735 736 func TestBatchOpDoesIncrement(t *testing.T) { 737 var b Batch 738 key := []byte("foo") 739 value := []byte("bar") 740 741 if b.Count() != 0 { 742 t.Fatalf("new batch has a nonzero count: %d", b.Count()) 743 } 744 745 // Should increment count by 1 746 _ = b.Set(key, value, nil) 747 if b.Count() != 1 { 748 t.Fatalf("expected count: %d, got %d", 1, b.Count()) 749 } 750 751 var b2 Batch 752 // Should increment count by 1 each 753 _ = b2.Set(key, value, nil) 754 _ = b2.Delete(key, nil) 755 if b2.Count() != 2 { 756 t.Fatalf("expected count: %d, got %d", 2, b2.Count()) 757 } 758 759 // Should increment count by b2.count() 760 _ = b.Apply(&b2, nil) 761 if b.Count() != 3 { 762 t.Fatalf("expected count: %d, got %d", 3, b.Count()) 763 } 764 765 // Should increment count by 1 766 _ = b.Merge(key, value, nil) 767 if b.Count() != 4 { 768 t.Fatalf("expected count: %d, got %d", 4, b.Count()) 769 } 770 771 // Should NOT increment count. 772 _ = b.LogData([]byte("foobarbaz"), nil) 773 if b.Count() != 4 { 774 t.Fatalf("expected count: %d, got %d", 4, b.Count()) 775 } 776 } 777 778 func TestBatchGet(t *testing.T) { 779 testCases := []struct { 780 method string 781 memTableSize uint64 782 }{ 783 {"build", 64 << 20}, 784 {"build", 2 << 10}, 785 {"apply", 64 << 20}, 786 } 787 788 for _, c := range testCases { 789 t.Run(fmt.Sprintf("%s,mem=%d", c.method, c.memTableSize), func(t *testing.T) { 790 d, err := Open("", &Options{ 791 FS: vfs.NewMem(), 792 MemTableSize: c.memTableSize, 793 }) 794 if err != nil { 795 t.Fatalf("Open: %v", err) 796 } 797 defer d.Close() 798 var b *Batch 799 800 datadriven.RunTest(t, "testdata/batch_get", func(t *testing.T, td *datadriven.TestData) string { 801 switch td.Cmd { 802 case "define": 803 switch c.method { 804 case "build": 805 b = d.NewIndexedBatch() 806 case "apply": 807 b = d.NewBatch() 808 } 809 810 if err := runBatchDefineCmd(td, b); err != nil { 811 return err.Error() 812 } 813 814 switch c.method { 815 case "apply": 816 tmp := d.NewIndexedBatch() 817 tmp.Apply(b, nil) 818 b = tmp 819 } 820 return "" 821 822 case "commit": 823 if err := b.Commit(nil); err != nil { 824 return err.Error() 825 } 826 b = nil 827 return "" 828 829 case "get": 830 if len(td.CmdArgs) != 1 { 831 return fmt.Sprintf("%s expects 1 argument", td.Cmd) 832 } 833 v, closer, err := b.Get([]byte(td.CmdArgs[0].String())) 834 if err != nil { 835 return err.Error() 836 } 837 defer closer.Close() 838 return string(v) 839 840 default: 841 return fmt.Sprintf("unknown command: %s", td.Cmd) 842 } 843 }) 844 }) 845 } 846 } 847 848 func TestBatchIter(t *testing.T) { 849 var b *Batch 850 851 for _, method := range []string{"build", "apply"} { 852 for _, testdata := range []string{ 853 "testdata/internal_iter_next", "testdata/internal_iter_bounds"} { 854 t.Run(method, func(t *testing.T) { 855 datadriven.RunTest(t, testdata, func(t *testing.T, d *datadriven.TestData) string { 856 switch d.Cmd { 857 case "define": 858 switch method { 859 case "build": 860 b = newIndexedBatch(nil, DefaultComparer) 861 case "apply": 862 b = newBatch(nil) 863 } 864 865 for _, key := range strings.Split(d.Input, "\n") { 866 j := strings.Index(key, ":") 867 ikey := base.ParseInternalKey(key[:j]) 868 value := []byte(key[j+1:]) 869 b.Set(ikey.UserKey, value, nil) 870 } 871 872 switch method { 873 case "apply": 874 tmp := newIndexedBatch(nil, DefaultComparer) 875 tmp.Apply(b, nil) 876 b = tmp 877 } 878 return "" 879 880 case "iter": 881 var options IterOptions 882 for _, arg := range d.CmdArgs { 883 switch arg.Key { 884 case "lower": 885 if len(arg.Vals) != 1 { 886 return fmt.Sprintf( 887 "%s expects at most 1 value for lower", d.Cmd) 888 } 889 options.LowerBound = []byte(arg.Vals[0]) 890 case "upper": 891 if len(arg.Vals) != 1 { 892 return fmt.Sprintf( 893 "%s expects at most 1 value for upper", d.Cmd) 894 } 895 options.UpperBound = []byte(arg.Vals[0]) 896 default: 897 return fmt.Sprintf("unknown arg: %s", arg.Key) 898 } 899 } 900 iter := b.newInternalIter(&options) 901 defer iter.Close() 902 return runInternalIterCmd(t, d, iter) 903 904 default: 905 return fmt.Sprintf("unknown command: %s", d.Cmd) 906 } 907 }) 908 }) 909 } 910 } 911 } 912 913 func TestBatchRangeOps(t *testing.T) { 914 var b *Batch 915 916 datadriven.RunTest(t, "testdata/batch_range_ops", func(t *testing.T, td *datadriven.TestData) string { 917 switch td.Cmd { 918 case "clear": 919 b = nil 920 return "" 921 922 case "apply": 923 if b == nil { 924 b = newIndexedBatch(nil, DefaultComparer) 925 } 926 t := newBatch(nil) 927 if err := runBatchDefineCmd(td, t); err != nil { 928 return err.Error() 929 } 930 if err := b.Apply(t, nil); err != nil { 931 return err.Error() 932 } 933 return "" 934 935 case "define": 936 if b == nil { 937 b = newIndexedBatch(nil, DefaultComparer) 938 } 939 if err := runBatchDefineCmd(td, b); err != nil { 940 return err.Error() 941 } 942 return "" 943 944 case "scan": 945 if len(td.CmdArgs) > 1 { 946 return fmt.Sprintf("%s expects at most 1 argument", td.Cmd) 947 } 948 var fragmentIter keyspan.FragmentIterator 949 var internalIter base.InternalIterator 950 switch { 951 case td.HasArg("range-del"): 952 fragmentIter = b.newRangeDelIter(nil, math.MaxUint64) 953 defer fragmentIter.Close() 954 case td.HasArg("range-key"): 955 fragmentIter = b.newRangeKeyIter(nil, math.MaxUint64) 956 defer fragmentIter.Close() 957 default: 958 internalIter = b.newInternalIter(nil) 959 defer internalIter.Close() 960 } 961 962 var buf bytes.Buffer 963 if fragmentIter != nil { 964 for s := fragmentIter.First(); s != nil; s = fragmentIter.Next() { 965 for i := range s.Keys { 966 s.Keys[i].Trailer = base.MakeTrailer( 967 s.Keys[i].SeqNum()&^base.InternalKeySeqNumBatch, 968 s.Keys[i].Kind(), 969 ) 970 } 971 fmt.Fprintln(&buf, s) 972 } 973 } else { 974 for k, v := internalIter.First(); k != nil; k, v = internalIter.Next() { 975 k.SetSeqNum(k.SeqNum() &^ InternalKeySeqNumBatch) 976 fmt.Fprintf(&buf, "%s:%s\n", k, v.InPlaceValue()) 977 } 978 } 979 return buf.String() 980 981 default: 982 return fmt.Sprintf("unknown command: %s", td.Cmd) 983 } 984 }) 985 } 986 987 func TestBatchTooLarge(t *testing.T) { 988 var b Batch 989 var result interface{} 990 func() { 991 defer func() { 992 if r := recover(); r != nil { 993 result = r 994 } 995 }() 996 b.grow(maxBatchSize) 997 }() 998 require.EqualValues(t, ErrBatchTooLarge, result) 999 } 1000 1001 func TestFlushableBatchIter(t *testing.T) { 1002 var b *flushableBatch 1003 datadriven.RunTest(t, "testdata/internal_iter_next", func(t *testing.T, d *datadriven.TestData) string { 1004 switch d.Cmd { 1005 case "define": 1006 batch := newBatch(nil) 1007 for _, key := range strings.Split(d.Input, "\n") { 1008 j := strings.Index(key, ":") 1009 ikey := base.ParseInternalKey(key[:j]) 1010 value := []byte(fmt.Sprint(ikey.SeqNum())) 1011 batch.Set(ikey.UserKey, value, nil) 1012 } 1013 var err error 1014 b, err = newFlushableBatch(batch, DefaultComparer) 1015 require.NoError(t, err) 1016 return "" 1017 1018 case "iter": 1019 iter := b.newIter(nil) 1020 defer iter.Close() 1021 return runInternalIterCmd(t, d, iter) 1022 1023 default: 1024 return fmt.Sprintf("unknown command: %s", d.Cmd) 1025 } 1026 }) 1027 } 1028 1029 func TestFlushableBatch(t *testing.T) { 1030 var b *flushableBatch 1031 datadriven.RunTest(t, "testdata/flushable_batch", func(t *testing.T, d *datadriven.TestData) string { 1032 switch d.Cmd { 1033 case "define": 1034 batch := newBatch(nil) 1035 for _, key := range strings.Split(d.Input, "\n") { 1036 j := strings.Index(key, ":") 1037 ikey := base.ParseInternalKey(key[:j]) 1038 value := []byte(fmt.Sprint(ikey.SeqNum())) 1039 switch ikey.Kind() { 1040 case InternalKeyKindDelete: 1041 require.NoError(t, batch.Delete(ikey.UserKey, nil)) 1042 case InternalKeyKindSet: 1043 require.NoError(t, batch.Set(ikey.UserKey, value, nil)) 1044 case InternalKeyKindMerge: 1045 require.NoError(t, batch.Merge(ikey.UserKey, value, nil)) 1046 case InternalKeyKindRangeDelete: 1047 require.NoError(t, batch.DeleteRange(ikey.UserKey, value, nil)) 1048 case InternalKeyKindRangeKeyDelete: 1049 require.NoError(t, batch.RangeKeyDelete(ikey.UserKey, value, nil)) 1050 case InternalKeyKindRangeKeySet: 1051 require.NoError(t, batch.RangeKeySet(ikey.UserKey, value, value, value, nil)) 1052 case InternalKeyKindRangeKeyUnset: 1053 require.NoError(t, batch.RangeKeyUnset(ikey.UserKey, value, value, nil)) 1054 } 1055 } 1056 var err error 1057 b, err = newFlushableBatch(batch, DefaultComparer) 1058 require.NoError(t, err) 1059 return "" 1060 1061 case "iter": 1062 var opts IterOptions 1063 for _, arg := range d.CmdArgs { 1064 if len(arg.Vals) != 1 { 1065 return fmt.Sprintf("%s: %s=<value>", d.Cmd, arg.Key) 1066 } 1067 switch arg.Key { 1068 case "lower": 1069 opts.LowerBound = []byte(arg.Vals[0]) 1070 case "upper": 1071 opts.UpperBound = []byte(arg.Vals[0]) 1072 default: 1073 return fmt.Sprintf("%s: unknown arg: %s", d.Cmd, arg.Key) 1074 } 1075 } 1076 1077 iter := b.newIter(&opts) 1078 defer iter.Close() 1079 return runInternalIterCmd(t, d, iter) 1080 1081 case "dump": 1082 if len(d.CmdArgs) != 1 || len(d.CmdArgs[0].Vals) != 1 || d.CmdArgs[0].Key != "seq" { 1083 return "dump seq=<value>\n" 1084 } 1085 seqNum, err := strconv.Atoi(d.CmdArgs[0].Vals[0]) 1086 if err != nil { 1087 return err.Error() 1088 } 1089 b.setSeqNum(uint64(seqNum)) 1090 1091 var buf bytes.Buffer 1092 1093 iter := newInternalIterAdapter(b.newIter(nil)) 1094 for valid := iter.First(); valid; valid = iter.Next() { 1095 fmt.Fprintf(&buf, "%s:%s\n", iter.Key(), iter.Value()) 1096 } 1097 iter.Close() 1098 1099 if rangeDelIter := b.newRangeDelIter(nil); rangeDelIter != nil { 1100 scanKeyspanIterator(&buf, rangeDelIter) 1101 rangeDelIter.Close() 1102 } 1103 if rangeKeyIter := b.newRangeKeyIter(nil); rangeKeyIter != nil { 1104 scanKeyspanIterator(&buf, rangeKeyIter) 1105 rangeKeyIter.Close() 1106 } 1107 return buf.String() 1108 1109 default: 1110 return fmt.Sprintf("unknown command: %s", d.Cmd) 1111 } 1112 }) 1113 } 1114 1115 func TestFlushableBatchDeleteRange(t *testing.T) { 1116 var fb *flushableBatch 1117 var input string 1118 1119 datadriven.RunTest(t, "testdata/delete_range", func(t *testing.T, td *datadriven.TestData) string { 1120 switch td.Cmd { 1121 case "clear": 1122 input = "" 1123 return "" 1124 1125 case "define": 1126 b := newBatch(nil) 1127 // NB: We can't actually add to the flushable batch as we can to a 1128 // memtable (which shares the "testdata/delete_range" data), so we fake 1129 // it by concatenating the input and rebuilding the flushable batch from 1130 // scratch. 1131 input += "\n" + td.Input 1132 td.Input = input 1133 if err := runBatchDefineCmd(td, b); err != nil { 1134 return err.Error() 1135 } 1136 var err error 1137 fb, err = newFlushableBatch(b, DefaultComparer) 1138 require.NoError(t, err) 1139 return "" 1140 1141 case "scan": 1142 var buf bytes.Buffer 1143 if td.HasArg("range-del") { 1144 fi := fb.newRangeDelIter(nil) 1145 defer fi.Close() 1146 scanKeyspanIterator(&buf, fi) 1147 } else { 1148 ii := fb.newIter(nil) 1149 defer ii.Close() 1150 scanInternalIter(&buf, ii) 1151 } 1152 return buf.String() 1153 1154 default: 1155 return fmt.Sprintf("unknown command: %s", td.Cmd) 1156 } 1157 }) 1158 } 1159 1160 func scanInternalIter(w io.Writer, ii internalIterator) { 1161 for k, v := ii.First(); k != nil; k, v = ii.Next() { 1162 fmt.Fprintf(w, "%s:%s\n", k, v.InPlaceValue()) 1163 } 1164 } 1165 1166 func scanKeyspanIterator(w io.Writer, ki keyspan.FragmentIterator) { 1167 for s := ki.First(); s != nil; s = ki.Next() { 1168 fmt.Fprintln(w, s) 1169 } 1170 } 1171 1172 func TestFlushableBatchBytesIterated(t *testing.T) { 1173 batch := newBatch(nil) 1174 for j := 0; j < 1000; j++ { 1175 key := make([]byte, 8+j%3) 1176 value := make([]byte, 7+j%5) 1177 batch.Set(key, value, nil) 1178 1179 fb, err := newFlushableBatch(batch, DefaultComparer) 1180 require.NoError(t, err) 1181 1182 var bytesIterated uint64 1183 it := fb.newFlushIter(nil, &bytesIterated) 1184 1185 var prevIterated uint64 1186 for key, _ := it.First(); key != nil; key, _ = it.Next() { 1187 if bytesIterated < prevIterated { 1188 t.Fatalf("bytesIterated moved backward: %d < %d", bytesIterated, prevIterated) 1189 } 1190 prevIterated = bytesIterated 1191 } 1192 1193 expected := fb.inuseBytes() 1194 if bytesIterated != expected { 1195 t.Fatalf("bytesIterated: got %d, want %d", bytesIterated, expected) 1196 } 1197 } 1198 } 1199 1200 func TestEmptyFlushableBatch(t *testing.T) { 1201 // Verify that we can create a flushable batch on an empty batch. 1202 fb, err := newFlushableBatch(newBatch(nil), DefaultComparer) 1203 require.NoError(t, err) 1204 it := newInternalIterAdapter(fb.newIter(nil)) 1205 require.False(t, it.First()) 1206 } 1207 1208 func TestBatchCommitStats(t *testing.T) { 1209 testFunc := func() error { 1210 db, err := Open("", &Options{ 1211 FS: vfs.NewMem(), 1212 }) 1213 require.NoError(t, err) 1214 defer db.Close() 1215 b := db.NewBatch() 1216 defer b.Close() 1217 stats := b.CommitStats() 1218 require.Equal(t, BatchCommitStats{}, stats) 1219 1220 // The stall code peers into the internals, instead of adding general 1221 // purpose hooks, to avoid changing production code. We can revisit this 1222 // choice if it becomes hard to maintain. 1223 1224 // Commit semaphore stall funcs. 1225 var unstallCommitSemaphore func() 1226 stallCommitSemaphore := func() { 1227 commitPipeline := db.commit 1228 commitSemaphoreReserved := 0 1229 done := false 1230 for !done { 1231 select { 1232 case commitPipeline.commitQueueSem <- struct{}{}: 1233 commitSemaphoreReserved++ 1234 default: 1235 done = true 1236 } 1237 if done { 1238 break 1239 } 1240 } 1241 unstallCommitSemaphore = func() { 1242 for i := 0; i < commitSemaphoreReserved; i++ { 1243 <-commitPipeline.commitQueueSem 1244 } 1245 } 1246 } 1247 1248 // Memstable stall funcs. 1249 var unstallMemtable func() 1250 stallMemtable := func() { 1251 db.mu.Lock() 1252 defer db.mu.Unlock() 1253 prev := db.opts.MemTableStopWritesThreshold 1254 db.opts.MemTableStopWritesThreshold = 0 1255 unstallMemtable = func() { 1256 db.mu.Lock() 1257 defer db.mu.Unlock() 1258 db.opts.MemTableStopWritesThreshold = prev 1259 db.mu.compact.cond.Broadcast() 1260 } 1261 } 1262 1263 // L0 read-amp stall funcs. 1264 var unstallL0ReadAmp func() 1265 stallL0ReadAmp := func() { 1266 db.mu.Lock() 1267 defer db.mu.Unlock() 1268 prev := db.opts.L0StopWritesThreshold 1269 db.opts.L0StopWritesThreshold = 0 1270 unstallL0ReadAmp = func() { 1271 db.mu.Lock() 1272 defer db.mu.Unlock() 1273 db.opts.L0StopWritesThreshold = prev 1274 db.mu.compact.cond.Broadcast() 1275 } 1276 } 1277 1278 // Commit wait stall funcs. 1279 var unstallCommitWait func() 1280 stallCommitWait := func() { 1281 b.commit.Add(1) 1282 unstallCommitWait = func() { 1283 b.commit.Done() 1284 } 1285 } 1286 1287 // Stall everything. 1288 stallCommitSemaphore() 1289 stallMemtable() 1290 stallL0ReadAmp() 1291 stallCommitWait() 1292 1293 // Exceed initialMemTableSize -- this is needed to make stallMemtable work. 1294 require.NoError(t, b.Set(make([]byte, initialMemTableSize), nil, nil)) 1295 1296 var commitWG sync.WaitGroup 1297 commitWG.Add(1) 1298 go func() { 1299 require.NoError(t, db.Apply(b, &WriteOptions{Sync: true})) 1300 commitWG.Done() 1301 }() 1302 // Unstall things in the order that the stalls will happen. 1303 sleepDuration := 10 * time.Millisecond 1304 time.Sleep(sleepDuration) 1305 unstallCommitSemaphore() 1306 time.Sleep(sleepDuration) 1307 unstallMemtable() 1308 time.Sleep(sleepDuration) 1309 unstallL0ReadAmp() 1310 time.Sleep(sleepDuration) 1311 unstallCommitWait() 1312 1313 // Wait for Apply to return. 1314 commitWG.Wait() 1315 stats = b.CommitStats() 1316 expectedDuration := (2 * sleepDuration) / 3 1317 if expectedDuration > stats.SemaphoreWaitDuration { 1318 return errors.Errorf("SemaphoreWaitDuration %s is too low", 1319 stats.SemaphoreWaitDuration.String()) 1320 } 1321 if expectedDuration > stats.MemTableWriteStallDuration { 1322 return errors.Errorf("MemTableWriteStallDuration %s is too low", 1323 stats.MemTableWriteStallDuration.String()) 1324 } 1325 if expectedDuration > stats.L0ReadAmpWriteStallDuration { 1326 return errors.Errorf("L0ReadAmpWriteStallDuration %s is too low", 1327 stats.L0ReadAmpWriteStallDuration) 1328 } 1329 if expectedDuration > stats.CommitWaitDuration { 1330 return errors.Errorf("CommitWaitDuration %s is too low", 1331 stats.CommitWaitDuration) 1332 } 1333 if 5*expectedDuration > stats.TotalDuration { 1334 return errors.Errorf("TotalDuration %s is too low", 1335 stats.TotalDuration) 1336 } 1337 return nil 1338 } 1339 // Try a few times, and succeed if one of them succeeds. 1340 var err error 1341 for i := 0; i < 5; i++ { 1342 err = testFunc() 1343 if err == nil { 1344 break 1345 } 1346 } 1347 require.NoError(t, err) 1348 } 1349 1350 func TestBatchReader(t *testing.T) { 1351 datadriven.RunTest(t, "testdata/batch_reader", func(t *testing.T, td *datadriven.TestData) string { 1352 switch td.Cmd { 1353 case "scan": 1354 var repr bytes.Buffer 1355 for i, l := range strings.Split(td.Input, "\n") { 1356 // Remove any trailing comments behind #. 1357 if i := strings.IndexRune(l, '#'); i >= 0 { 1358 l = l[:i] 1359 } 1360 // Strip all whitespace from the line. 1361 l = strings.Map(func(r rune) rune { 1362 if unicode.IsSpace(r) { 1363 return -1 1364 } 1365 return r 1366 }, l) 1367 b, err := hex.DecodeString(l) 1368 if err != nil { 1369 return fmt.Sprintf("failed to decode hex; line %d", i) 1370 } 1371 repr.Write(b) 1372 } 1373 r, count := ReadBatch(repr.Bytes()) 1374 var out strings.Builder 1375 fmt.Fprintf(&out, "Count: %d\n", count) 1376 for { 1377 kind, ukey, value, ok, err := r.Next() 1378 if !ok { 1379 if err != nil { 1380 fmt.Fprintf(&out, "err: %s\n", err) 1381 } else { 1382 fmt.Fprint(&out, "eof") 1383 } 1384 break 1385 } 1386 fmt.Fprintf(&out, "%s: %q: %q\n", kind, ukey, value) 1387 } 1388 return out.String() 1389 1390 default: 1391 return fmt.Sprintf("unrecognized command %q", td.Cmd) 1392 } 1393 }) 1394 } 1395 1396 func BenchmarkBatchSet(b *testing.B) { 1397 value := make([]byte, 10) 1398 for i := range value { 1399 value[i] = byte(i) 1400 } 1401 key := make([]byte, 8) 1402 batch := newBatch(nil) 1403 1404 b.ResetTimer() 1405 1406 const batchSize = 1000 1407 for i := 0; i < b.N; i += batchSize { 1408 end := i + batchSize 1409 if end > b.N { 1410 end = b.N 1411 } 1412 1413 for j := i; j < end; j++ { 1414 binary.BigEndian.PutUint64(key, uint64(j)) 1415 batch.Set(key, value, nil) 1416 } 1417 batch.Reset() 1418 } 1419 1420 b.StopTimer() 1421 } 1422 1423 func BenchmarkIndexedBatchSet(b *testing.B) { 1424 value := make([]byte, 10) 1425 for i := range value { 1426 value[i] = byte(i) 1427 } 1428 key := make([]byte, 8) 1429 batch := newIndexedBatch(nil, DefaultComparer) 1430 1431 b.ResetTimer() 1432 1433 const batchSize = 1000 1434 for i := 0; i < b.N; i += batchSize { 1435 end := i + batchSize 1436 if end > b.N { 1437 end = b.N 1438 } 1439 1440 for j := i; j < end; j++ { 1441 binary.BigEndian.PutUint64(key, uint64(j)) 1442 batch.Set(key, value, nil) 1443 } 1444 batch.Reset() 1445 } 1446 1447 b.StopTimer() 1448 } 1449 1450 func BenchmarkBatchSetDeferred(b *testing.B) { 1451 value := make([]byte, 10) 1452 for i := range value { 1453 value[i] = byte(i) 1454 } 1455 key := make([]byte, 8) 1456 batch := newBatch(nil) 1457 1458 b.ResetTimer() 1459 1460 const batchSize = 1000 1461 for i := 0; i < b.N; i += batchSize { 1462 end := i + batchSize 1463 if end > b.N { 1464 end = b.N 1465 } 1466 1467 for j := i; j < end; j++ { 1468 binary.BigEndian.PutUint64(key, uint64(j)) 1469 deferredOp := batch.SetDeferred(len(key), len(value)) 1470 1471 copy(deferredOp.Key, key) 1472 copy(deferredOp.Value, value) 1473 1474 deferredOp.Finish() 1475 } 1476 batch.Reset() 1477 } 1478 1479 b.StopTimer() 1480 } 1481 1482 func BenchmarkIndexedBatchSetDeferred(b *testing.B) { 1483 value := make([]byte, 10) 1484 for i := range value { 1485 value[i] = byte(i) 1486 } 1487 key := make([]byte, 8) 1488 batch := newIndexedBatch(nil, DefaultComparer) 1489 1490 b.ResetTimer() 1491 1492 const batchSize = 1000 1493 for i := 0; i < b.N; i += batchSize { 1494 end := i + batchSize 1495 if end > b.N { 1496 end = b.N 1497 } 1498 1499 for j := i; j < end; j++ { 1500 binary.BigEndian.PutUint64(key, uint64(j)) 1501 deferredOp := batch.SetDeferred(len(key), len(value)) 1502 1503 copy(deferredOp.Key, key) 1504 copy(deferredOp.Value, value) 1505 1506 deferredOp.Finish() 1507 } 1508 batch.Reset() 1509 } 1510 1511 b.StopTimer() 1512 } 1513 1514 func TestBatchMemTableSizeOverflow(t *testing.T) { 1515 opts := &Options{ 1516 FS: vfs.NewMem(), 1517 } 1518 opts.EnsureDefaults() 1519 d, err := Open("", opts) 1520 require.NoError(t, err) 1521 1522 bigValue := make([]byte, 1000) 1523 b := d.NewBatch() 1524 1525 // memTableSize can overflow as a uint32. 1526 b.memTableSize = math.MaxUint32 - 50 1527 for i := 0; i < 10; i++ { 1528 k := fmt.Sprintf("key-%05d", i) 1529 require.NoError(t, b.Set([]byte(k), bigValue, nil)) 1530 } 1531 require.Greater(t, b.memTableSize, uint64(math.MaxUint32)) 1532 require.NoError(t, b.Close()) 1533 require.NoError(t, d.Close()) 1534 } 1535 1536 // TestBatchSpanCaching stress tests the caching of keyspan.Spans for range 1537 // tombstones and range keys. 1538 func TestBatchSpanCaching(t *testing.T) { 1539 opts := &Options{ 1540 Comparer: testkeys.Comparer, 1541 FS: vfs.NewMem(), 1542 FormatMajorVersion: internalFormatNewest, 1543 } 1544 d, err := Open("", opts) 1545 require.NoError(t, err) 1546 defer d.Close() 1547 1548 ks := testkeys.Alpha(1) 1549 b := d.NewIndexedBatch() 1550 for i := int64(0); i < ks.Count(); i++ { 1551 k := testkeys.Key(ks, i) 1552 require.NoError(t, b.Set(k, k, nil)) 1553 } 1554 1555 seed := int64(time.Now().UnixNano()) 1556 t.Logf("seed = %d", seed) 1557 rng := rand.New(rand.NewSource(seed)) 1558 iters := make([][]*Iterator, ks.Count()) 1559 defer func() { 1560 for _, keyIters := range iters { 1561 for _, iter := range keyIters { 1562 _ = iter.Close() 1563 } 1564 } 1565 }() 1566 1567 // This test begins with one point key for every letter of the alphabet. 1568 // Over the course of the test, point keys are 'replaced' with range keys 1569 // with narrow bounds from left to right. Iterators are created at random, 1570 // sometimes from the batch and sometimes by cloning existing iterators. 1571 1572 checkIter := func(iter *Iterator, nextKey int64) { 1573 var i int64 1574 for valid := iter.First(); valid; valid = iter.Next() { 1575 hasPoint, hasRange := iter.HasPointAndRange() 1576 require.Equal(t, testkeys.Key(ks, i), iter.Key()) 1577 if i < nextKey { 1578 // This key should not exist as a point key, just a range key. 1579 require.False(t, hasPoint) 1580 require.True(t, hasRange) 1581 } else { 1582 require.True(t, hasPoint) 1583 require.False(t, hasRange) 1584 } 1585 i++ 1586 } 1587 require.Equal(t, ks.Count(), i) 1588 } 1589 1590 // Each iteration of the below loop either reads or writes. 1591 // 1592 // A write iteration writes a new RANGEDEL and RANGEKEYSET into the batch, 1593 // covering a single point key seeded above. Writing these two span keys 1594 // together 'replaces' the point key with a range key. Each write iteration 1595 // ratchets nextWriteKey so the next write iteration will write the next 1596 // key. 1597 // 1598 // A read iteration creates a new iterator and ensures its state is 1599 // expected: some prefix of only point keys, followed by a suffix of only 1600 // range keys. Iterators created through Clone should observe the point keys 1601 // that existed when the cloned iterator was created. 1602 for nextWriteKey := int64(0); nextWriteKey < ks.Count(); { 1603 p := rng.Float64() 1604 switch { 1605 case p < .10: /* 10 % */ 1606 // Write a new range deletion and range key. 1607 start := testkeys.Key(ks, nextWriteKey) 1608 end := append(start, 0x00) 1609 require.NoError(t, b.DeleteRange(start, end, nil)) 1610 require.NoError(t, b.RangeKeySet(start, end, nil, []byte("foo"), nil)) 1611 nextWriteKey++ 1612 case p < .55: /* 45 % */ 1613 // Create a new iterator directly from the batch and check that it 1614 // observes the correct state. 1615 iter, _ := b.NewIter(&IterOptions{KeyTypes: IterKeyTypePointsAndRanges}) 1616 checkIter(iter, nextWriteKey) 1617 iters[nextWriteKey] = append(iters[nextWriteKey], iter) 1618 default: /* 45 % */ 1619 // Create a new iterator through cloning a random existing iterator 1620 // and check that it observes the right state. 1621 readKey := rng.Int63n(nextWriteKey + 1) 1622 itersForReadKey := iters[readKey] 1623 if len(itersForReadKey) == 0 { 1624 continue 1625 } 1626 iter, err := itersForReadKey[rng.Intn(len(itersForReadKey))].Clone(CloneOptions{}) 1627 require.NoError(t, err) 1628 checkIter(iter, readKey) 1629 iters[readKey] = append(iters[readKey], iter) 1630 } 1631 } 1632 }