github.com/m3db/m3@v1.5.1-0.20231129193456-75a402aa583b/src/dbnode/storage/block/block_test.go (about) 1 // Copyright (c) 2016 Uber Technologies, Inc. 2 // 3 // Permission is hereby granted, free of charge, to any person obtaining a copy 4 // of this software and associated documentation files (the "Software"), to deal 5 // in the Software without restriction, including without limitation the rights 6 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 // copies of the Software, and to permit persons to whom the Software is 8 // furnished to do so, subject to the following conditions: 9 // 10 // The above copyright notice and this permission notice shall be included in 11 // all copies or substantial portions of the Software. 12 // 13 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 // THE SOFTWARE. 20 21 package block 22 23 import ( 24 "sync" 25 "testing" 26 "time" 27 28 "github.com/m3db/m3/src/dbnode/encoding" 29 "github.com/m3db/m3/src/dbnode/encoding/m3tsz" 30 "github.com/m3db/m3/src/dbnode/ts" 31 "github.com/m3db/m3/src/dbnode/x/xio" 32 "github.com/m3db/m3/src/x/context" 33 "github.com/m3db/m3/src/x/ident" 34 xtime "github.com/m3db/m3/src/x/time" 35 36 "github.com/golang/mock/gomock" 37 "github.com/m3db/m3/src/dbnode/namespace" 38 "github.com/stretchr/testify/assert" 39 "github.com/stretchr/testify/require" 40 ) 41 42 func testDatabaseBlock(ctrl *gomock.Controller) *dbBlock { 43 opts := NewOptions() 44 b := NewDatabaseBlock(xtime.Now(), 0, ts.Segment{}, opts, namespace.Context{}).(*dbBlock) 45 return b 46 } 47 48 func testDatabaseSeriesBlocks() *databaseSeriesBlocks { 49 return NewDatabaseSeriesBlocks(0).(*databaseSeriesBlocks) 50 } 51 52 func testDatabaseSeriesBlocksWithTimes(times []xtime.UnixNano, sizes []time.Duration) *databaseSeriesBlocks { 53 opts := NewOptions() 54 blocks := testDatabaseSeriesBlocks() 55 for i := range times { 56 block := opts.DatabaseBlockPool().Get() 57 block.Reset(times[i], sizes[i], ts.Segment{}, namespace.Context{}) 58 blocks.AddBlock(block) 59 } 60 return blocks 61 } 62 63 func validateBlocks(t *testing.T, blocks *databaseSeriesBlocks, minTime, 64 maxTime xtime.UnixNano, expectedTimes []xtime.UnixNano, expectedSizes []time.Duration) { 65 require.Equal(t, minTime, blocks.MinTime()) 66 require.Equal(t, maxTime, blocks.MaxTime()) 67 allBlocks := blocks.elems 68 require.Equal(t, len(expectedTimes), len(allBlocks)) 69 for i, timestamp := range expectedTimes { 70 block, exists := allBlocks[timestamp] 71 require.True(t, exists) 72 assert.Equal(t, block.BlockSize(), expectedSizes[i]) 73 } 74 } 75 76 func TestDatabaseBlockReadFromClosedBlock(t *testing.T) { 77 ctrl := gomock.NewController(t) 78 defer ctrl.Finish() 79 80 ctx := context.NewBackground() 81 defer ctx.Close() 82 83 block := testDatabaseBlock(ctrl) 84 block.Close() 85 _, err := block.Stream(ctx) 86 require.Equal(t, errReadFromClosedBlock, err) 87 } 88 89 func TestDatabaseBlockChecksum(t *testing.T) { 90 ctrl := gomock.NewController(t) 91 defer ctrl.Finish() 92 93 block := testDatabaseBlock(ctrl) 94 block.checksum = uint32(10) 95 96 checksum, err := block.Checksum() 97 require.NoError(t, err) 98 require.Equal(t, block.checksum, checksum) 99 } 100 101 type segmentReaderFinalizeCounter struct { 102 xio.SegmentReader 103 // Use a pointer so we can update it from the Finalize method 104 // which must not be a pointer receiver (in order to satisfy 105 // the interface) 106 finalizeCount *int 107 } 108 109 func (r segmentReaderFinalizeCounter) Finalize() { 110 *r.finalizeCount++ 111 } 112 113 // TestDatabaseBlockMerge lazily merges two blocks and verifies that the correct 114 // data is returned, as well as that the underlying streams are not double-finalized 115 // (regression test). 116 func TestDatabaseBlockMerge(t *testing.T) { 117 ctrl := gomock.NewController(t) 118 defer ctrl.Finish() 119 120 // Test data 121 curr := xtime.Now() 122 data := []ts.Datapoint{ 123 ts.Datapoint{ 124 TimestampNanos: curr, 125 Value: 0, 126 }, 127 ts.Datapoint{ 128 TimestampNanos: curr.Add(time.Second), 129 Value: 1, 130 }, 131 } 132 durations := []time.Duration{ 133 time.Minute, 134 time.Hour, 135 } 136 137 // Mock segment reader pool so we count the number of Finalize() calls 138 segmentReaders := []segmentReaderFinalizeCounter{} 139 mockSegmentReaderPool := xio.NewMockSegmentReaderPool(ctrl) 140 getCall := mockSegmentReaderPool.EXPECT().Get() 141 getCall.DoAndReturn(func() xio.SegmentReader { 142 val := 0 143 reader := segmentReaderFinalizeCounter{ 144 xio.NewSegmentReader(ts.Segment{}), 145 &val, 146 } 147 segmentReaders = append(segmentReaders, reader) 148 return reader 149 }).AnyTimes() 150 151 // Setup 152 blockOpts := NewOptions().SetSegmentReaderPool(mockSegmentReaderPool) 153 encodingOpts := encoding.NewOptions() 154 155 // Create the two blocks we plan to merge 156 encoder := m3tsz.NewEncoder(data[0].TimestampNanos, nil, true, encodingOpts) 157 encoder.Encode(data[0], xtime.Second, nil) 158 seg := encoder.Discard() 159 block1 := NewDatabaseBlock(data[0].TimestampNanos, durations[0], seg, 160 blockOpts, namespace.Context{}).(*dbBlock) 161 162 encoder.Reset(data[1].TimestampNanos, 10, nil) 163 encoder.Encode(data[1], xtime.Second, nil) 164 seg = encoder.Discard() 165 block2 := NewDatabaseBlock(data[1].TimestampNanos, durations[1], seg, 166 blockOpts, namespace.Context{}).(*dbBlock) 167 168 // Lazily merge the two blocks 169 block1.Merge(block2) 170 171 // BlockSize should not change 172 require.Equal(t, durations[0], block1.BlockSize()) 173 174 // Try and read the data back and verify it looks good 175 depCtx := block1.opts.ContextPool().Get() 176 stream, err := block1.Stream(depCtx) 177 require.NoError(t, err) 178 seg, err = stream.Segment() 179 require.NoError(t, err) 180 reader := xio.NewSegmentReader(seg) 181 iter := m3tsz.NewReaderIterator(reader, true, encodingOpts) 182 183 i := 0 184 for iter.Next() { 185 dp, _, _ := iter.Current() 186 require.True(t, data[i].Equal(dp)) 187 i++ 188 } 189 require.NoError(t, iter.Err()) 190 191 // Make sure the checksum was updated 192 mergedChecksum, err := block1.Checksum() 193 require.NoError(t, err) 194 require.Equal(t, seg.CalculateChecksum(), mergedChecksum) 195 196 // Make sure each segment reader was only finalized once 197 require.Equal(t, 3, len(segmentReaders)) 198 depCtx.BlockingClose() 199 block1.Close() 200 block2.Close() 201 for _, segmentReader := range segmentReaders { 202 require.Equal(t, 1, *segmentReader.finalizeCount) 203 } 204 } 205 206 // TestDatabaseBlockMergeRace is similar to TestDatabaseBlockMerge, except it 207 // tries to stream the data in multiple go-routines to ensure the merging isn't 208 // racy, this is a regression test for a known issue. 209 func TestDatabaseBlockMergeRace(t *testing.T) { 210 ctrl := gomock.NewController(t) 211 defer ctrl.Finish() 212 213 var ( 214 numRuns = 1000 215 numRoutines = 20 216 ) 217 218 for i := 0; i < numRuns; i++ { 219 func() { 220 // Test data 221 curr := xtime.Now() 222 data := []ts.Datapoint{ 223 ts.Datapoint{ 224 TimestampNanos: curr, 225 Value: 0, 226 }, 227 ts.Datapoint{ 228 TimestampNanos: curr.Add(time.Second), 229 Value: 1, 230 }, 231 } 232 durations := []time.Duration{ 233 time.Minute, 234 time.Hour, 235 } 236 237 // Setup 238 blockOpts := NewOptions() 239 encodingOpts := encoding.NewOptions() 240 241 // Create the two blocks we plan to merge 242 encoder := m3tsz.NewEncoder(data[0].TimestampNanos, nil, true, encodingOpts) 243 encoder.Encode(data[0], xtime.Second, nil) 244 seg := encoder.Discard() 245 block1 := NewDatabaseBlock(data[0].TimestampNanos, durations[0], seg, blockOpts, namespace.Context{}).(*dbBlock) 246 247 encoder.Reset(data[1].TimestampNanos, 10, nil) 248 encoder.Encode(data[1], xtime.Second, nil) 249 seg = encoder.Discard() 250 block2 := NewDatabaseBlock(data[1].TimestampNanos, durations[1], seg, blockOpts, namespace.Context{}).(*dbBlock) 251 252 // Lazily merge the two blocks 253 block1.Merge(block2) 254 255 var wg sync.WaitGroup 256 wg.Add(numRoutines) 257 258 blockFn := func(block *dbBlock) { 259 defer wg.Done() 260 261 depCtx := block.opts.ContextPool().Get() 262 var ( 263 // Make sure we shadow the top level variables 264 // with the same name 265 stream xio.BlockReader 266 seg ts.Segment 267 err error 268 ) 269 stream, err = block.Stream(depCtx) 270 block.Close() 271 if err == errReadFromClosedBlock { 272 return 273 } 274 require.NoError(t, err) 275 276 seg, err = stream.Segment() 277 require.NoError(t, err) 278 reader := xio.NewSegmentReader(seg) 279 iter := m3tsz.NewReaderIterator(reader, true, encodingOpts) 280 281 i := 0 282 for iter.Next() { 283 dp, _, _ := iter.Current() 284 require.True(t, data[i].Equal(dp)) 285 i++ 286 } 287 require.NoError(t, iter.Err()) 288 } 289 290 for i := 0; i < numRoutines; i++ { 291 go blockFn(block1) 292 } 293 294 wg.Wait() 295 }() 296 } 297 } 298 299 // TestDatabaseBlockMergeChained is similar to TestDatabaseBlockMerge except 300 // we try chaining multiple merge calls onto the same block. 301 func TestDatabaseBlockMergeChained(t *testing.T) { 302 ctrl := gomock.NewController(t) 303 defer ctrl.Finish() 304 305 // Test data 306 curr := xtime.Now() 307 data := []ts.Datapoint{ 308 ts.Datapoint{ 309 TimestampNanos: curr, 310 Value: 0, 311 }, 312 ts.Datapoint{ 313 TimestampNanos: curr.Add(time.Second), 314 Value: 1, 315 }, 316 ts.Datapoint{ 317 TimestampNanos: curr.Add(2 * time.Second), 318 Value: 2, 319 }, 320 } 321 durations := []time.Duration{ 322 time.Second, 323 time.Minute, 324 time.Hour, 325 } 326 327 // Mock segment reader pool so we count the number of Finalize() calls 328 segmentReaders := []segmentReaderFinalizeCounter{} 329 mockSegmentReaderPool := xio.NewMockSegmentReaderPool(ctrl) 330 getCall := mockSegmentReaderPool.EXPECT().Get() 331 getCall.DoAndReturn(func() xio.SegmentReader { 332 val := 0 333 reader := segmentReaderFinalizeCounter{ 334 xio.NewSegmentReader(ts.Segment{}), 335 &val, 336 } 337 segmentReaders = append(segmentReaders, reader) 338 return reader 339 }).AnyTimes() 340 341 // Setup 342 blockOpts := NewOptions().SetSegmentReaderPool(mockSegmentReaderPool) 343 encodingOpts := encoding.NewOptions() 344 345 // Create the two blocks we plan to merge 346 encoder := m3tsz.NewEncoder(data[0].TimestampNanos, nil, true, encodingOpts) 347 encoder.Encode(data[0], xtime.Second, nil) 348 seg := encoder.Discard() 349 block1 := NewDatabaseBlock(data[0].TimestampNanos, durations[0], seg, blockOpts, namespace.Context{}).(*dbBlock) 350 351 encoder.Reset(data[1].TimestampNanos, 10, nil) 352 encoder.Encode(data[1], xtime.Second, nil) 353 seg = encoder.Discard() 354 block2 := NewDatabaseBlock(data[1].TimestampNanos, durations[1], seg, blockOpts, namespace.Context{}).(*dbBlock) 355 356 encoder.Reset(data[2].TimestampNanos, 10, nil) 357 encoder.Encode(data[2], xtime.Second, nil) 358 seg = encoder.Discard() 359 block3 := NewDatabaseBlock(data[2].TimestampNanos, durations[2], seg, blockOpts, namespace.Context{}).(*dbBlock) 360 361 // Lazily merge two blocks into block1 362 block1.Merge(block2) 363 block1.Merge(block3) 364 365 // BlockSize should not change 366 require.Equal(t, durations[0], block1.BlockSize()) 367 368 // Try and read the data back and verify it looks good 369 depCtx := block1.opts.ContextPool().Get() 370 stream, err := block1.Stream(depCtx) 371 require.NoError(t, err) 372 seg, err = stream.Segment() 373 require.NoError(t, err) 374 reader := xio.NewSegmentReader(seg) 375 iter := m3tsz.NewReaderIterator(reader, true, encodingOpts) 376 377 i := 0 378 for iter.Next() { 379 dp, _, _ := iter.Current() 380 require.True(t, data[i].Equal(dp)) 381 i++ 382 } 383 require.NoError(t, iter.Err()) 384 385 // Make sure the checksum was updated 386 mergedChecksum, err := block1.Checksum() 387 require.NoError(t, err) 388 require.Equal(t, seg.CalculateChecksum(), mergedChecksum) 389 390 // Make sure each segment reader was only finalized once 391 require.Equal(t, 5, len(segmentReaders)) 392 depCtx.BlockingClose() 393 block1.Close() 394 block2.Close() 395 for _, segmentReader := range segmentReaders { 396 require.Equal(t, 1, *segmentReader.finalizeCount) 397 } 398 } 399 400 func TestDatabaseBlockMergeErrorFromDisk(t *testing.T) { 401 ctrl := gomock.NewController(t) 402 defer ctrl.Finish() 403 404 // Setup 405 var ( 406 curr = xtime.Now() 407 blockOpts = NewOptions() 408 ) 409 410 // Create the two blocks we plan to merge 411 block1 := NewDatabaseBlock(curr, 0, ts.Segment{}, blockOpts, namespace.Context{}).(*dbBlock) 412 block2 := NewDatabaseBlock(curr, 0, ts.Segment{}, blockOpts, namespace.Context{}).(*dbBlock) 413 414 // Mark only block 2 as retrieved from disk so we can make sure it checks 415 // the block that is being merged, as well as the one that is being merged 416 // into. 417 block2.wasRetrievedFromDisk = true 418 419 require.Equal(t, false, block1.wasRetrievedFromDisk) 420 require.Equal(t, errTriedToMergeBlockFromDisk, block1.Merge(block2)) 421 require.Equal(t, errTriedToMergeBlockFromDisk, block2.Merge(block1)) 422 } 423 424 // TestDatabaseBlockChecksumMergesAndRecalculates makes sure that the Checksum method 425 // will check if a lazy-merge is pending, and if so perform it and recalculate the checksum. 426 func TestDatabaseBlockChecksumMergesAndRecalculates(t *testing.T) { 427 ctrl := gomock.NewController(t) 428 defer ctrl.Finish() 429 430 // Test data 431 curr := xtime.Now() 432 data := []ts.Datapoint{ 433 ts.Datapoint{ 434 TimestampNanos: curr, 435 Value: 0, 436 }, 437 ts.Datapoint{ 438 TimestampNanos: curr.Add(time.Second), 439 Value: 1, 440 }, 441 } 442 durations := []time.Duration{ 443 time.Minute, 444 time.Hour, 445 } 446 447 // Setup 448 blockOpts := NewOptions() 449 encodingOpts := encoding.NewOptions() 450 451 // Create the two blocks we plan to merge 452 encoder := m3tsz.NewEncoder(data[0].TimestampNanos, nil, true, encodingOpts) 453 encoder.Encode(data[0], xtime.Second, nil) 454 seg := encoder.Discard() 455 block1 := NewDatabaseBlock(data[0].TimestampNanos, durations[0], seg, blockOpts, namespace.Context{}).(*dbBlock) 456 457 encoder.Reset(data[1].TimestampNanos, 10, nil) 458 encoder.Encode(data[1], xtime.Second, nil) 459 seg = encoder.Discard() 460 block2 := NewDatabaseBlock(data[1].TimestampNanos, durations[1], seg, blockOpts, namespace.Context{}).(*dbBlock) 461 462 // Keep track of the old checksum so we can make sure it changed 463 oldChecksum, err := block1.Checksum() 464 require.NoError(t, err) 465 466 // Lazily merge the two blocks 467 block1.Merge(block2) 468 469 // BlockSize should not change 470 require.Equal(t, durations[0], block1.BlockSize()) 471 472 // Make sure the checksum was updated 473 newChecksum, err := block1.Checksum() 474 require.NoError(t, err) 475 require.NotEqual(t, oldChecksum, newChecksum) 476 477 // Try and read the data back and verify it looks good 478 depCtx := block1.opts.ContextPool().Get() 479 stream, err := block1.Stream(depCtx) 480 require.NoError(t, err) 481 seg, err = stream.Segment() 482 require.NoError(t, err) 483 reader := xio.NewSegmentReader(seg) 484 iter := m3tsz.NewReaderIterator(reader, true, encodingOpts) 485 486 i := 0 487 for iter.Next() { 488 dp, _, _ := iter.Current() 489 require.True(t, data[i].Equal(dp)) 490 i++ 491 } 492 require.NoError(t, iter.Err()) 493 494 // Make sure the new checksum is correct 495 mergedChecksum, err := block1.Checksum() 496 require.NoError(t, err) 497 require.Equal(t, seg.CalculateChecksum(), mergedChecksum) 498 } 499 500 func TestDatabaseBlockStreamMergePerformsCopy(t *testing.T) { 501 ctrl := gomock.NewController(t) 502 defer ctrl.Finish() 503 504 // Test data 505 curr := xtime.Now() 506 data := []ts.Datapoint{ 507 ts.Datapoint{ 508 TimestampNanos: curr, 509 Value: 0, 510 }, 511 ts.Datapoint{ 512 TimestampNanos: curr.Add(time.Second), 513 Value: 1, 514 }, 515 } 516 durations := []time.Duration{ 517 time.Minute, 518 time.Hour, 519 } 520 521 // Setup 522 blockOpts := NewOptions() 523 encodingOpts := encoding.NewOptions() 524 525 // Create the two blocks we plan to merge 526 encoder := m3tsz.NewEncoder(data[0].TimestampNanos, nil, true, encodingOpts) 527 encoder.Encode(data[0], xtime.Second, nil) 528 seg := encoder.Discard() 529 block1 := NewDatabaseBlock(data[0].TimestampNanos, durations[0], seg, blockOpts, namespace.Context{}).(*dbBlock) 530 531 encoder.Reset(data[1].TimestampNanos, 10, nil) 532 encoder.Encode(data[1], xtime.Second, nil) 533 seg = encoder.Discard() 534 block2 := NewDatabaseBlock(data[1].TimestampNanos, durations[1], seg, blockOpts, namespace.Context{}).(*dbBlock) 535 536 err := block1.Merge(block2) 537 require.NoError(t, err) 538 539 depCtx := block1.opts.ContextPool().Get() 540 stream, err := block1.Stream(depCtx) 541 require.NoError(t, err) 542 block1.Close() 543 544 seg, err = stream.Segment() 545 require.NoError(t, err) 546 reader := xio.NewSegmentReader(seg) 547 iter := m3tsz.NewReaderIterator(reader, true, encodingOpts) 548 549 i := 0 550 for iter.Next() { 551 dp, _, _ := iter.Current() 552 require.True(t, data[i].Equal(dp)) 553 i++ 554 } 555 require.NoError(t, iter.Err()) 556 } 557 558 func TestDatabaseBlockCloseIfFromDisk(t *testing.T) { 559 var ( 560 blockOpts = NewOptions() 561 blockNotFromDisk = NewDatabaseBlock(0, time.Hour, ts.Segment{}, blockOpts, namespace.Context{}).(*dbBlock) 562 blockFromDisk = NewDatabaseBlock(0, time.Hour, ts.Segment{}, blockOpts, namespace.Context{}).(*dbBlock) 563 ) 564 blockFromDisk.wasRetrievedFromDisk = true 565 566 require.False(t, blockNotFromDisk.CloseIfFromDisk()) 567 require.True(t, blockFromDisk.CloseIfFromDisk()) 568 } 569 570 func TestDatabaseSeriesBlocksAddBlock(t *testing.T) { 571 now := xtime.Now() 572 blockTimes := []xtime.UnixNano{ 573 now, now.Add(time.Second), 574 now.Add(time.Minute), now.Add(-time.Second), now.Add(-time.Hour), 575 } 576 blockSizes := []time.Duration{ 577 time.Minute, time.Hour, time.Second, 578 time.Microsecond, time.Millisecond, 579 } 580 blocks := testDatabaseSeriesBlocksWithTimes(blockTimes, blockSizes) 581 validateBlocks(t, blocks, blockTimes[4], blockTimes[2], blockTimes, blockSizes) 582 } 583 584 func TestDatabaseSeriesBlocksAddSeries(t *testing.T) { 585 now := xtime.Now() 586 blockTimes := [][]xtime.UnixNano{ 587 {now, now.Add(time.Second), now.Add(time.Minute), now.Add(-time.Second), now.Add(-time.Hour)}, 588 {now.Add(-time.Minute), now.Add(time.Hour)}, 589 } 590 blockSizes := [][]time.Duration{ 591 {time.Minute, time.Hour, time.Second, time.Microsecond, time.Millisecond}, 592 {time.Minute * 2, time.Hour * 21}, 593 } 594 blocks := testDatabaseSeriesBlocksWithTimes(blockTimes[0], blockSizes[0]) 595 other := testDatabaseSeriesBlocksWithTimes(blockTimes[1], blockSizes[1]) 596 blocks.AddSeries(other) 597 var expectedTimes []xtime.UnixNano 598 for _, bt := range blockTimes { 599 expectedTimes = append(expectedTimes, bt...) 600 } 601 var expectedSizes []time.Duration 602 for _, bt := range blockSizes { 603 expectedSizes = append(expectedSizes, bt...) 604 } 605 606 validateBlocks(t, blocks, expectedTimes[4], expectedTimes[6], expectedTimes, expectedSizes) 607 } 608 609 func TestDatabaseSeriesBlocksGetBlockAt(t *testing.T) { 610 now := xtime.Now() 611 blockTimes := []xtime.UnixNano{now, now.Add(time.Second), now.Add(-time.Hour)} 612 blockSizes := []time.Duration{time.Minute, time.Hour, time.Second} 613 614 blocks := testDatabaseSeriesBlocksWithTimes(blockTimes, blockSizes) 615 for i, bt := range blockTimes { 616 b, exists := blocks.BlockAt(bt) 617 require.True(t, exists) 618 require.Equal(t, b.BlockSize(), blockSizes[i]) 619 } 620 _, exists := blocks.BlockAt(now.Add(time.Minute)) 621 require.False(t, exists) 622 } 623 624 func TestDatabaseSeriesBlocksRemoveBlockAt(t *testing.T) { 625 now := xtime.Now() 626 blockTimes := []xtime.UnixNano{now, now.Add(-time.Second), now.Add(time.Hour)} 627 blockSizes := []time.Duration{time.Minute, time.Hour, time.Second} 628 629 blocks := testDatabaseSeriesBlocksWithTimes(blockTimes, blockSizes) 630 blocks.RemoveBlockAt(now.Add(-time.Hour)) 631 validateBlocks(t, blocks, blockTimes[1], blockTimes[2], blockTimes, blockSizes) 632 633 expected := []struct { 634 min xtime.UnixNano 635 max xtime.UnixNano 636 allTimes []xtime.UnixNano 637 }{ 638 {blockTimes[1], blockTimes[2], blockTimes[1:]}, 639 {blockTimes[2], blockTimes[2], blockTimes[2:]}, 640 {timeZero, timeZero, []xtime.UnixNano{}}, 641 } 642 for i, bt := range blockTimes { 643 blocks.RemoveBlockAt(bt) 644 blockSizes = blockSizes[1:] 645 validateBlocks(t, blocks, expected[i].min, expected[i].max, expected[i].allTimes, blockSizes) 646 } 647 } 648 649 func TestDatabaseSeriesBlocksRemoveAll(t *testing.T) { 650 now := xtime.Now() 651 blockTimes := []xtime.UnixNano{now, now.Add(-time.Second), now.Add(time.Hour)} 652 blockSizes := []time.Duration{time.Minute, time.Hour, time.Second} 653 654 blocks := testDatabaseSeriesBlocksWithTimes(blockTimes, blockSizes) 655 require.Equal(t, len(blockTimes), len(blocks.AllBlocks())) 656 657 blocks.RemoveAll() 658 require.Equal(t, 0, len(blocks.AllBlocks())) 659 } 660 661 func TestDatabaseSeriesBlocksClose(t *testing.T) { 662 now := xtime.Now() 663 blockTimes := []xtime.UnixNano{now, now.Add(-time.Second), now.Add(time.Hour)} 664 blockSizes := []time.Duration{time.Minute, time.Hour, time.Second} 665 666 blocks := testDatabaseSeriesBlocksWithTimes(blockTimes, blockSizes) 667 require.Equal(t, len(blockTimes), len(blocks.AllBlocks())) 668 669 blocks.Close() 670 require.Equal(t, 0, len(blocks.AllBlocks())) 671 672 var nilPointer map[xtime.UnixNano]DatabaseBlock 673 require.Equal(t, nilPointer, blocks.elems) 674 } 675 676 func TestDatabaseSeriesBlocksReset(t *testing.T) { 677 now := xtime.Now() 678 blockTimes := []xtime.UnixNano{now, now.Add(-time.Second), now.Add(time.Hour)} 679 blockSizes := []time.Duration{time.Minute, time.Hour, time.Second} 680 681 blocks := testDatabaseSeriesBlocksWithTimes(blockTimes, blockSizes) 682 require.Equal(t, len(blockTimes), len(blocks.AllBlocks())) 683 684 blocks.Reset() 685 686 require.Equal(t, 0, len(blocks.AllBlocks())) 687 require.Equal(t, 0, len(blocks.elems)) 688 require.True(t, blocks.min.Equal(0)) 689 require.True(t, blocks.max.Equal(0)) 690 } 691 692 func TestBlockResetFromDisk(t *testing.T) { 693 ctrl := gomock.NewController(t) 694 defer ctrl.Finish() 695 696 bl := testDatabaseBlock(ctrl) 697 now := xtime.Now() 698 blockSize := 2 * time.Hour 699 id := ident.StringID("testID") 700 segment := ts.Segment{} 701 bl.ResetFromDisk(now, blockSize, segment, id, namespace.Context{}) 702 703 assert.True(t, now.Equal(bl.StartTime())) 704 assert.Equal(t, blockSize, bl.BlockSize()) 705 assert.Equal(t, segment, bl.segment) 706 assert.Equal(t, id, bl.seriesID) 707 assert.True(t, bl.WasRetrievedFromDisk()) 708 }