github.com/m3db/m3@v1.5.0/src/dbnode/storage/entry_blackbox_test.go (about) 1 // Copyright (c) 2018 Uber Technologies, Inc. 2 // 3 // Permission is hereby granted, free of charge, to any person obtaining a copy 4 // of this software and associated documentation files (the "Software"), to deal 5 // in the Software without restriction, including without limitation the rights 6 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 // copies of the Software, and to permit persons to whom the Software is 8 // furnished to do so, subject to the following conditions: 9 // 10 // The above copyright notice and this permission notice shall be included in 11 // all copies or substantial portions of the Software. 12 // 13 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 // THE SOFTWARE. 20 21 package storage 22 23 import ( 24 "fmt" 25 "sync" 26 "testing" 27 "time" 28 29 "github.com/fortytw2/leaktest" 30 "github.com/golang/mock/gomock" 31 "github.com/stretchr/testify/assert" 32 "github.com/stretchr/testify/require" 33 "github.com/uber-go/tally" 34 35 "github.com/m3db/m3/src/dbnode/storage/series" 36 "github.com/m3db/m3/src/m3ninx/doc" 37 "github.com/m3db/m3/src/x/ident" 38 "github.com/m3db/m3/src/x/tallytest" 39 xtest "github.com/m3db/m3/src/x/test" 40 xtime "github.com/m3db/m3/src/x/time" 41 ) 42 43 var ( 44 initTime = time.Date(2018, time.May, 12, 15, 55, 0, 0, time.UTC) 45 testBlockSize = 24 * time.Hour 46 ) 47 48 func newTime(n int) xtime.UnixNano { 49 t := initTime.Truncate(testBlockSize).Add(time.Duration(n) * testBlockSize) 50 return xtime.ToUnixNano(t) 51 } 52 53 func newMockSeries(ctrl *gomock.Controller) series.DatabaseSeries { 54 return newMockSeriesWithID(ctrl, "foo") 55 } 56 57 func newMockSeriesWithID(ctrl *gomock.Controller, id string) series.DatabaseSeries { 58 series := series.NewMockDatabaseSeries(ctrl) 59 series.EXPECT().ID().Return(ident.StringID(id)).AnyTimes() 60 return series 61 } 62 63 func TestEntryReaderWriterCount(t *testing.T) { 64 ctrl := xtest.NewController(t) 65 defer ctrl.Finish() 66 67 e := NewEntry(NewEntryOptions{Series: newMockSeries(ctrl)}) 68 require.Equal(t, int32(0), e.ReaderWriterCount()) 69 70 e.IncrementReaderWriterCount() 71 require.Equal(t, int32(1), e.ReaderWriterCount()) 72 73 e.DecrementReaderWriterCount() 74 require.Equal(t, int32(0), e.ReaderWriterCount()) 75 } 76 77 func TestEntryIndexSuccessPath(t *testing.T) { 78 ctrl := xtest.NewController(t) 79 defer ctrl.Finish() 80 81 e := NewEntry(NewEntryOptions{Series: newMockSeries(ctrl)}) 82 t0 := newTime(0) 83 require.False(t, e.IndexedForBlockStart(t0)) 84 85 require.True(t, e.NeedsIndexUpdate(t0)) 86 e.OnIndexPrepare(t0) 87 e.OnIndexSuccess(t0) 88 e.OnIndexFinalize(t0) 89 90 require.True(t, e.IndexedForBlockStart(t0)) 91 require.Equal(t, int32(0), e.ReaderWriterCount()) 92 require.False(t, e.NeedsIndexUpdate(t0)) 93 } 94 95 func TestEntryIndexFailPath(t *testing.T) { 96 ctrl := xtest.NewController(t) 97 defer ctrl.Finish() 98 99 e := NewEntry(NewEntryOptions{Series: newMockSeries(ctrl)}) 100 t0 := newTime(0) 101 require.False(t, e.IndexedForBlockStart(t0)) 102 103 require.True(t, e.NeedsIndexUpdate(t0)) 104 e.OnIndexPrepare(t0) 105 e.OnIndexFinalize(t0) 106 107 require.False(t, e.IndexedForBlockStart(t0)) 108 require.Equal(t, int32(0), e.ReaderWriterCount()) 109 require.True(t, e.NeedsIndexUpdate(t0)) 110 } 111 112 func TestEntryMultipleGoroutinesRaceIndexUpdate(t *testing.T) { 113 ctrl := xtest.NewController(t) 114 defer ctrl.Finish() 115 116 defer leaktest.CheckTimeout(t, time.Second)() 117 118 e := NewEntry(NewEntryOptions{Series: newMockSeries(ctrl)}) 119 t0 := newTime(0) 120 require.False(t, e.IndexedForBlockStart(t0)) 121 122 var ( 123 r1, r2 bool 124 wg sync.WaitGroup 125 ) 126 wg.Add(2) 127 128 go func() { 129 r1 = e.NeedsIndexUpdate(t0) 130 wg.Done() 131 }() 132 133 go func() { 134 r2 = e.NeedsIndexUpdate(t0) 135 wg.Done() 136 }() 137 138 wg.Wait() 139 140 require.False(t, r1 && r2) 141 require.True(t, r1 || r2) 142 } 143 144 func TestEntryTryMarkIndexGarbageCollectedAfterSeriesClose(t *testing.T) { 145 ctrl := xtest.NewController(t) 146 defer ctrl.Finish() 147 148 opts := DefaultTestOptions() 149 ctx := opts.ContextPool().Get() 150 defer ctx.Close() 151 152 shard := testDatabaseShard(t, opts) 153 defer func() { 154 require.NoError(t, shard.Close()) 155 }() 156 157 id := ident.StringID("foo") 158 159 series := series.NewMockDatabaseSeries(ctrl) 160 series.EXPECT().ID().Return(id) 161 162 entry := NewEntry(NewEntryOptions{ 163 Shard: shard, 164 Series: series, 165 EntryMetrics: NewEntryMetrics(tally.NewTestScope("test", nil)), 166 }) 167 168 // Make sure when ID is returned nil to emulate series being closed 169 // and TryMarkIndexGarbageCollected calling back into shard with a nil ID. 170 series.EXPECT().ID().Return(nil).AnyTimes() 171 series.EXPECT().IsEmpty().Return(false).AnyTimes() 172 require.NotPanics(t, func() { 173 // Make sure doesn't panic. 174 require.False(t, entry.TryMarkIndexGarbageCollected()) 175 }) 176 } 177 178 func TestEntryIndexedRange(t *testing.T) { 179 ctrl := xtest.NewController(t) 180 defer ctrl.Finish() 181 182 opts := DefaultTestOptions() 183 ctx := opts.ContextPool().Get() 184 defer ctx.Close() 185 186 shard := testDatabaseShard(t, opts) 187 defer func() { 188 require.NoError(t, shard.Close()) 189 }() 190 191 entry := NewEntry(NewEntryOptions{ 192 Shard: shard, 193 Series: newMockSeries(ctrl), 194 }) 195 196 assertRange := func(expectedMin, expectedMax xtime.UnixNano) { 197 min, max := entry.IndexedRange() 198 assert.Equal(t, expectedMin, min) 199 assert.Equal(t, expectedMax, max) 200 } 201 202 assertRange(0, 0) 203 204 entry.OnIndexPrepare(2) 205 assertRange(0, 0) 206 207 entry.OnIndexSuccess(2) 208 assertRange(2, 2) 209 210 entry.OnIndexSuccess(5) 211 assertRange(2, 5) 212 213 entry.OnIndexSuccess(1) 214 assertRange(1, 5) 215 216 entry.OnIndexSuccess(3) 217 assertRange(1, 5) 218 } 219 220 func TestReconciledOnIndexSeries(t *testing.T) { 221 ctrl := xtest.NewController(t) 222 defer ctrl.Finish() 223 224 opts := DefaultTestOptions() 225 ctx := opts.ContextPool().Get() 226 defer ctx.Close() 227 228 shard := testDatabaseShard(t, opts) 229 defer func() { 230 require.NoError(t, shard.Close()) 231 }() 232 233 // Create entry with index 0 that's not inserted 234 series := newMockSeries(ctrl) 235 entry := NewEntry(NewEntryOptions{ 236 Index: 0, 237 Shard: shard, 238 Series: series, 239 }) 240 241 // Create entry with index 1 that gets inserted into the lookup map 242 _ = addMockSeries(ctrl, shard, series.ID(), ident.Tags{}, 1) 243 244 // Validate we perform the reconciliation. 245 e, closer, reconciled := entry.ReconciledOnIndexSeries() 246 require.True(t, reconciled) 247 require.Equal(t, uint64(1), e.(*Entry).Index) 248 closer.Close() 249 250 // Set the entry's insert time emulating being inserted into the shard. 251 // Ensure no reconciliation. 252 entry.SetInsertTime(time.Now()) 253 e, closer, reconciled = entry.ReconciledOnIndexSeries() 254 require.False(t, reconciled) 255 require.Equal(t, uint64(0), e.(*Entry).Index) 256 closer.Close() 257 } 258 259 func TestMergeWithIndexSeries(t *testing.T) { 260 ctrl := xtest.NewController(t) 261 defer ctrl.Finish() 262 263 var ( 264 blockSize = time.Hour * 2 265 numBlocks = 5 266 numEntries = 3 267 start = xtime.Now(). 268 Truncate(blockSize). 269 Add(blockSize * -time.Duration(numEntries*numBlocks)) //nolint: durationcheck 270 271 expectedIndexTimes = make([]xtime.UnixNano, 0, numEntries*numBlocks) 272 entries = make([]*Entry, 0, numEntries) 273 ) 274 275 for entryIdx := 0; entryIdx < numEntries; entryIdx++ { 276 series := newMockSeriesWithID(ctrl, fmt.Sprint("bar", entryIdx)) 277 entry := NewEntry(NewEntryOptions{Series: series}) 278 279 for blockIdx := 0; blockIdx < numBlocks; blockIdx++ { 280 blockStart := start. 281 Add(blockSize * time.Duration(blockIdx+numBlocks*entryIdx)) 282 283 expectedIndexTimes = append(expectedIndexTimes, blockStart) 284 entry.OnIndexSuccess(blockStart) 285 } 286 287 entries = append(entries, entry) 288 } 289 290 mergedEntry := NewEntry(NewEntryOptions{Series: newMockSeries(ctrl)}) 291 for _, entry := range entries { 292 mergedEntry.MergeEntryIndexBlockStates(entry.reverseIndex.states) 293 } 294 295 for _, start := range expectedIndexTimes { 296 require.True(t, mergedEntry.IndexedForBlockStart(start)) 297 } 298 299 min, max := mergedEntry.IndexedRange() 300 require.Equal(t, min, start) 301 require.Equal(t, max, start.Add(blockSize*time.Duration(numEntries*numBlocks-1))) 302 } 303 304 func TestEntryTryMarkIndexGarbageCollected(t *testing.T) { 305 for _, tc := range []struct { 306 name string 307 entry *Entry 308 hasSeries bool 309 indexed bool 310 indexDuplicate bool 311 shardClosed bool 312 hasReaders bool 313 314 expectCollected bool 315 expectedNeedsReconcileCounter int64 316 expectedNoNeedsReconcileCounter int64 317 expectedGcShardClosedCounter int64 318 expectedGcEmptyCounter int64 319 expectedNoGcNil int64 320 expectedNoGcNotEmptySeriesCounter int64 321 expectedNoGcHasReadersCounter int64 322 }{ 323 { 324 name: "not indexed entry should not be collected", 325 expectCollected: false, 326 expectedNoGcNil: 1, 327 }, 328 { 329 name: "indexed entry with empty series should be collected", 330 indexed: true, 331 hasSeries: false, 332 hasReaders: false, 333 shardClosed: false, 334 expectCollected: true, 335 expectedNoNeedsReconcileCounter: 1, 336 expectedGcEmptyCounter: 1, 337 }, 338 { 339 name: "indexed 2 empty entries need reconcile", 340 indexed: true, 341 indexDuplicate: true, 342 hasSeries: false, 343 hasReaders: false, 344 shardClosed: false, 345 expectCollected: true, 346 expectedNoNeedsReconcileCounter: 0, 347 expectedNeedsReconcileCounter: 1, 348 expectedGcEmptyCounter: 1, 349 }, 350 { 351 name: "indexed 2 non empty entries", 352 indexed: true, 353 indexDuplicate: true, 354 hasSeries: true, 355 hasReaders: false, 356 shardClosed: false, 357 expectCollected: false, 358 expectedNoGcNotEmptySeriesCounter: 1, 359 }, 360 { 361 name: "indexed entry with series should not be collected", 362 indexed: true, 363 hasSeries: true, 364 hasReaders: false, 365 shardClosed: false, 366 expectCollected: false, 367 expectedNoGcNotEmptySeriesCounter: 1, 368 }, 369 { 370 name: "empty indexed entry with readers should not be collected", 371 indexed: true, 372 hasSeries: false, 373 hasReaders: true, 374 shardClosed: false, 375 expectCollected: false, 376 expectedNoGcHasReadersCounter: 1, 377 }, 378 { 379 name: "indexed entry with readers and series should not be collected", 380 indexed: true, 381 hasSeries: true, 382 hasReaders: true, 383 shardClosed: false, 384 expectCollected: false, 385 expectedNoGcHasReadersCounter: 1, 386 }, 387 { 388 name: "indexed entry with non empty series should be collected when the shard is closed", 389 indexed: true, 390 hasSeries: true, 391 hasReaders: false, 392 shardClosed: true, 393 expectCollected: true, 394 expectedGcShardClosedCounter: 1, 395 }, 396 { 397 name: "indexed entry with readers should be collected when the shard is closed", 398 indexed: true, 399 hasSeries: false, 400 hasReaders: true, 401 shardClosed: true, 402 expectCollected: true, 403 expectedGcShardClosedCounter: 1, 404 }, 405 { 406 name: "indexed entry with readers and series should be collected when the shard is closed", 407 indexed: true, 408 hasSeries: true, 409 hasReaders: true, 410 shardClosed: true, 411 expectCollected: true, 412 expectedGcShardClosedCounter: 1, 413 }, 414 } { 415 tc := tc 416 t.Run(tc.name, func(t *testing.T) { 417 ctrl := xtest.NewController(t) 418 defer ctrl.Finish() 419 420 opts := DefaultTestOptions() 421 ctx := opts.ContextPool().Get() 422 defer ctx.Close() 423 424 shard := testDatabaseShard(t, opts) 425 if !tc.shardClosed { 426 defer func() { 427 require.NoError(t, shard.Close()) 428 }() 429 } 430 431 // Create entry with index 0 that's not inserted 432 s := series.NewMockDatabaseSeries(ctrl) 433 s.EXPECT().ID().Return(id).AnyTimes() 434 s.EXPECT().Close().Return().AnyTimes() 435 s.EXPECT().IsEmpty().Return(!tc.hasSeries).AnyTimes() 436 437 scope := tally.NewTestScope("test", nil) 438 metrics := NewEntryMetrics(scope) 439 entry := NewEntry(NewEntryOptions{ 440 Index: 0, 441 Shard: shard, 442 Series: s, 443 EntryMetrics: metrics, 444 }) 445 if tc.indexed { 446 shard.Lock() 447 shard.insertNewShardEntryWithLock(entry) 448 if tc.indexDuplicate { 449 shard.insertNewShardEntryWithLock(NewEntry(NewEntryOptions{ 450 Index: 1, 451 Shard: shard, 452 Series: s, 453 EntryMetrics: metrics, 454 })) 455 } 456 shard.Unlock() 457 } 458 459 if tc.hasReaders { 460 entry.IncrementReaderWriterCount() 461 } 462 if tc.shardClosed { 463 require.NoError(t, shard.Close()) 464 } 465 collected := entry.TryMarkIndexGarbageCollected() 466 require.Equal(t, tc.expectCollected, collected, "collected") 467 if tc.indexDuplicate { 468 assert.False(t, entry.IndexGarbageCollected.Load(), "IndexGarbageCollected") 469 } else { 470 assert.Equal(t, tc.expectCollected, entry.IndexGarbageCollected.Load(), "IndexGarbageCollected") 471 } 472 if tc.hasReaders { 473 entry.DecrementReaderWriterCount() 474 } 475 476 tallytest.AssertCounterValue(t, tc.expectedNeedsReconcileCounter, scope.Snapshot(), "test.count", 477 map[string]string{ 478 "reconcile": "needs_reconcile", 479 "path": "gc", 480 }) 481 tallytest.AssertCounterValue(t, tc.expectedNoNeedsReconcileCounter, scope.Snapshot(), "test.count", 482 map[string]string{ 483 "reconcile": "no_reconcile", 484 "path": "gc", 485 }) 486 487 tallytest.AssertCounterValue(t, tc.expectedGcShardClosedCounter, scope.Snapshot(), "test.gc_count", 488 map[string]string{ 489 "reason": "shard_closed", 490 "path": "gc", 491 }) 492 tallytest.AssertCounterValue(t, tc.expectedGcEmptyCounter, scope.Snapshot(), "test.gc_count", 493 map[string]string{ 494 "reason": "empty", 495 "path": "gc", 496 }) 497 498 tallytest.AssertCounterValue(t, tc.expectedNoGcNil, scope.Snapshot(), "test.no_gc_count", 499 map[string]string{ 500 "reason": "nil", 501 "path": "gc", 502 }) 503 tallytest.AssertCounterValue(t, 0, scope.Snapshot(), "test.no_gc_count", 504 map[string]string{ 505 "reason": "error", 506 "path": "gc", 507 }) 508 tallytest.AssertCounterValue(t, tc.expectedNoGcHasReadersCounter, scope.Snapshot(), "test.no_gc_count", 509 map[string]string{ 510 "reason": "has_readers", 511 "path": "gc", 512 }) 513 tallytest.AssertCounterValue(t, tc.expectedNoGcNotEmptySeriesCounter, scope.Snapshot(), "test.no_gc_count", 514 map[string]string{ 515 "reason": "not_empty_series", 516 "path": "gc", 517 }) 518 }) 519 } 520 } 521 522 func TestTryReconcileDuplicates(t *testing.T) { 523 ctrl := xtest.NewController(t) 524 defer ctrl.Finish() 525 526 var ( 527 shard = NewMockShard(ctrl) 528 scope = tally.NewTestScope("test", nil) 529 series = series.NewMockDatabaseSeries(ctrl) 530 ) 531 532 series.EXPECT().ID().Return(id) 533 entry := NewEntry(NewEntryOptions{ 534 Series: series, 535 Shard: shard, 536 EntryMetrics: NewEntryMetrics(scope), 537 }) 538 539 shard.EXPECT().TryRetrieveSeriesAndIncrementReaderWriterCount(id).DoAndReturn( 540 func(ident.ID) (*Entry, WritableSeriesOptions, error) { 541 // NB: TryRetrieveSeriesAndIncrementReaderWriterCount increments rw count 542 // so emulate this here. 543 entry.IncrementReaderWriterCount() 544 return entry, WritableSeriesOptions{}, nil 545 }) 546 547 entry.TryReconcileDuplicates() 548 tallytest.AssertCounterValue(t, 1, scope.Snapshot(), "test.count", map[string]string{ 549 "reconcile": "no_reconcile", 550 "path": "duplicate", 551 }) 552 tallytest.AssertCounterValue(t, 0, scope.Snapshot(), "test.count", map[string]string{ 553 "reconcile": "needs_reconcile", 554 "path": "duplicate", 555 }) 556 557 states := doc.EntryIndexBlockStates{1: doc.EntryIndexBlockState{}} 558 entry.reverseIndex = entryIndexState{states: states} 559 e := &Entry{reverseIndex: newEntryIndexState()} 560 shard.EXPECT().TryRetrieveSeriesAndIncrementReaderWriterCount(id). 561 Return(e, WritableSeriesOptions{}, nil) 562 563 entry.TryReconcileDuplicates() 564 require.Equal(t, states, e.reverseIndex.states) 565 tallytest.AssertCounterValue(t, 1, scope.Snapshot(), "test.count", map[string]string{ 566 "reconcile": "no_reconcile", 567 "path": "duplicate", 568 }) 569 tallytest.AssertCounterValue(t, 1, scope.Snapshot(), "test.count", map[string]string{ 570 "reconcile": "needs_reconcile", 571 "path": "duplicate", 572 }) 573 } 574 575 func TestMergeOnReconcile(t *testing.T) { 576 ctrl := xtest.NewController(t) 577 defer ctrl.Finish() 578 579 var ( 580 shard = NewMockShard(ctrl) 581 series = series.NewMockDatabaseSeries(ctrl) 582 ) 583 584 series.EXPECT().ID().Return(id) 585 entry := NewEntry(NewEntryOptions{ 586 Series: series, 587 Shard: shard, 588 }) 589 590 shard.EXPECT().TryRetrieveSeriesAndIncrementReaderWriterCount(id).DoAndReturn( 591 func(ident.ID) (*Entry, WritableSeriesOptions, error) { 592 // NB: TryRetrieveSeriesAndIncrementReaderWriterCount increments rw count 593 // so emulate this here. 594 entry.IncrementReaderWriterCount() 595 return entry, WritableSeriesOptions{}, nil 596 }) 597 598 onIndexed, closer, needsReconcile := entry.ReconciledOnIndexSeries() 599 require.Equal(t, entry, onIndexed) 600 require.True(t, needsReconcile) 601 closer.Close() 602 603 states := doc.EntryIndexBlockStates{1: doc.EntryIndexBlockState{}} 604 entry.reverseIndex = entryIndexState{states: states} 605 otherEntry := &Entry{reverseIndex: newEntryIndexState()} 606 shard.EXPECT().TryRetrieveSeriesAndIncrementReaderWriterCount(id). 607 Return(otherEntry, WritableSeriesOptions{}, nil) 608 609 onIndexed, closer, needsReconcile = entry.ReconciledOnIndexSeries() 610 require.True(t, needsReconcile) 611 e, ok := onIndexed.(*Entry) 612 require.True(t, ok) 613 require.Equal(t, states, e.reverseIndex.states) 614 closer.Close() 615 }