github.com/m3db/m3@v1.5.0/src/dbnode/storage/index_queue_forward_write_test.go (about) 1 // Copyright (c) 2019 Uber Technologies, Inc. 2 // 3 // Permission is hereby granted, free of charge, to any person obtaining a copy 4 // of this software and associated documentation files (the "Software"), to deal 5 // in the Software without restriction, including without limitation the rights 6 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 // copies of the Software, and to permit persons to whom the Software is 8 // furnished to do so, subject to the following conditions: 9 // 10 // The above copyright notice and this permission notice shall be included in 11 // all copies or substantial portions of the Software. 12 // 13 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 // THE SOFTWARE. 20 21 package storage 22 23 import ( 24 "errors" 25 "fmt" 26 "testing" 27 "time" 28 29 "github.com/m3db/m3/src/dbnode/namespace" 30 "github.com/m3db/m3/src/dbnode/runtime" 31 "github.com/m3db/m3/src/dbnode/storage/index" 32 idxconvert "github.com/m3db/m3/src/dbnode/storage/index/convert" 33 "github.com/m3db/m3/src/dbnode/storage/series" 34 "github.com/m3db/m3/src/dbnode/ts/writes" 35 xmetrics "github.com/m3db/m3/src/dbnode/x/metrics" 36 "github.com/m3db/m3/src/m3ninx/doc" 37 m3ninxidx "github.com/m3db/m3/src/m3ninx/idx" 38 "github.com/m3db/m3/src/m3ninx/index/segment/fst/encoding/docs" 39 "github.com/m3db/m3/src/x/clock" 40 "github.com/m3db/m3/src/x/context" 41 "github.com/m3db/m3/src/x/ident" 42 "github.com/m3db/m3/src/x/resource" 43 xsync "github.com/m3db/m3/src/x/sync" 44 xtest "github.com/m3db/m3/src/x/test" 45 xtime "github.com/m3db/m3/src/x/time" 46 47 "github.com/fortytw2/leaktest" 48 "github.com/golang/mock/gomock" 49 "github.com/stretchr/testify/require" 50 "github.com/uber-go/tally" 51 ) 52 53 func generateOptionsNowAndBlockSize() (Options, xtime.UnixNano, time.Duration) { 54 idxOpts := testNamespaceIndexOptions(). 55 SetInsertMode(index.InsertSync). 56 SetForwardIndexProbability(1). 57 SetForwardIndexThreshold(1) 58 59 opts := DefaultTestOptions(). 60 SetIndexOptions(idxOpts) 61 62 var ( 63 retOpts = opts.SeriesOptions().RetentionOptions() 64 blockSize = retOpts.BlockSize() 65 bufferFuture = retOpts.BufferFuture() 66 bufferFragment = blockSize - time.Duration(float64(bufferFuture)*0.5) 67 now = xtime.Now().Truncate(blockSize).Add(bufferFragment) 68 69 clockOptions = opts.ClockOptions() 70 ) 71 72 clockOptions = clockOptions.SetNowFn(func() time.Time { return now.ToTime() }) 73 opts = opts.SetClockOptions(clockOptions) 74 75 return opts, now, blockSize 76 } 77 78 func setupForwardIndex( 79 t *testing.T, 80 ctrl *gomock.Controller, 81 expectAggregateQuery bool, 82 ) (NamespaceIndex, xtime.UnixNano, time.Duration) { 83 newFn := func( 84 fn nsIndexInsertBatchFn, 85 md namespace.Metadata, 86 nowFn clock.NowFn, 87 coreFn xsync.CoreFn, 88 s tally.Scope, 89 ) namespaceIndexInsertQueue { 90 q := newNamespaceIndexInsertQueue(fn, md, nowFn, coreFn, s) 91 q.(*nsIndexInsertQueue).indexBatchBackoff = 10 * time.Millisecond 92 return q 93 } 94 95 md, err := namespace.NewMetadata(defaultTestNs1ID, defaultTestNs1Opts) 96 require.NoError(t, err) 97 98 opts, now, blockSize := generateOptionsNowAndBlockSize() 99 idx, err := newNamespaceIndexWithInsertQueueFn(md, 100 namespace.NewRuntimeOptionsManager(md.ID().String()), 101 testShardSet, newFn, opts) 102 require.NoError(t, err) 103 104 var ( 105 ts = idx.(*nsIndex).state.latestBlock.StartTime() 106 nextTS = ts.Add(blockSize) 107 current = ts.Truncate(blockSize) 108 next = current.Add(blockSize) 109 tags = ident.NewTags( 110 ident.StringTag("name", "value"), 111 ) 112 lifecycle = doc.NewMockOnIndexSeries(ctrl) 113 ) 114 115 gomock.InOrder( 116 lifecycle.EXPECT().IfAlreadyIndexedMarkIndexSuccessAndFinalize(gomock.Any()).Return(false), 117 118 lifecycle.EXPECT().NeedsIndexUpdate(next).Return(true), 119 lifecycle.EXPECT().OnIndexPrepare(next), 120 121 lifecycle.EXPECT().OnIndexSuccess(ts), 122 lifecycle.EXPECT().OnIndexFinalize(ts), 123 124 lifecycle.EXPECT().OnIndexSuccess(nextTS), 125 lifecycle.EXPECT().OnIndexFinalize(nextTS), 126 ) 127 128 if !expectAggregateQuery { 129 lifecycle.EXPECT().ReconciledOnIndexSeries().Return( 130 lifecycle, resource.SimpleCloserFn(func() {}), false, 131 ).AnyTimes() 132 133 lifecycle.EXPECT().IndexedRange().Return(ts, ts) 134 lifecycle.EXPECT().IndexedForBlockStart(ts).Return(true) 135 136 lifecycle.EXPECT().IndexedRange().Return(next, next) 137 lifecycle.EXPECT().IndexedForBlockStart(next).Return(true) 138 } 139 140 entry, doc := testWriteBatchEntry(id, tags, now, lifecycle) 141 batch := testWriteBatch(entry, doc, testWriteBatchBlockSizeOption(blockSize)) 142 require.NoError(t, idx.WriteBatch(batch)) 143 144 return idx, now, blockSize 145 } 146 147 func TestNamespaceForwardIndexInsertQuery(t *testing.T) { 148 ctrl := xtest.NewController(t) 149 defer ctrl.Finish() 150 defer leaktest.CheckTimeout(t, 2*time.Second)() 151 152 ctx := context.NewBackground() 153 defer ctx.Close() 154 155 idx, now, blockSize := setupForwardIndex(t, ctrl, false) 156 defer idx.Close() 157 158 reQuery, err := m3ninxidx.NewRegexpQuery([]byte("name"), []byte("val.*")) 159 require.NoError(t, err) 160 161 // NB: query both the current and the next index block to ensure that the 162 // write was correctly indexed to both. 163 nextBlockTime := now.Add(blockSize) 164 queryTimes := []xtime.UnixNano{now, nextBlockTime} 165 reader := docs.NewEncodedDocumentReader() 166 for _, ts := range queryTimes { 167 res, err := idx.Query(ctx, index.Query{Query: reQuery}, index.QueryOptions{ 168 StartInclusive: ts.Add(-1 * time.Minute), 169 EndExclusive: ts.Add(1 * time.Minute), 170 }) 171 require.NoError(t, err) 172 173 require.True(t, res.Exhaustive) 174 results := res.Results 175 require.Equal(t, "testns1", results.Namespace().String()) 176 177 d, ok := results.Map().Get(ident.BytesID("foo")) 178 md, err := docs.MetadataFromDocument(d, reader) 179 require.NoError(t, err) 180 tags := idxconvert.ToSeriesTags(md, idxconvert.Opts{NoClone: true}) 181 182 require.True(t, ok) 183 require.True(t, ident.NewTagIterMatcher( 184 ident.MustNewTagStringsIterator("name", "value")).Matches( 185 tags)) 186 } 187 } 188 189 func TestNamespaceForwardIndexAggregateQuery(t *testing.T) { 190 ctrl := xtest.NewController(t) 191 defer ctrl.Finish() 192 defer leaktest.CheckTimeout(t, 2*time.Second)() 193 194 ctx := context.NewBackground() 195 defer ctx.Close() 196 197 idx, now, blockSize := setupForwardIndex(t, ctrl, true) 198 defer idx.Close() 199 200 reQuery, err := m3ninxidx.NewRegexpQuery([]byte("name"), []byte("val.*")) 201 require.NoError(t, err) 202 203 // NB: query both the current and the next index block to ensure that the 204 // write was correctly indexed to both. 205 nextBlockTime := now.Add(blockSize) 206 queryTimes := []xtime.UnixNano{now, nextBlockTime} 207 for _, ts := range queryTimes { 208 res, err := idx.AggregateQuery(ctx, index.Query{Query: reQuery}, 209 index.AggregationOptions{ 210 QueryOptions: index.QueryOptions{ 211 StartInclusive: ts.Add(-1 * time.Minute), 212 EndExclusive: ts.Add(1 * time.Minute), 213 }, 214 }, 215 ) 216 require.NoError(t, err) 217 218 require.True(t, res.Exhaustive) 219 results := res.Results 220 require.Equal(t, "testns1", results.Namespace().String()) 221 222 rMap := results.Map() 223 require.Equal(t, 1, rMap.Len()) 224 seenIters, found := rMap.Get(ident.StringID("name")) 225 require.True(t, found) 226 227 vMap := seenIters.Map() 228 require.Equal(t, 1, vMap.Len()) 229 require.True(t, vMap.Contains(ident.StringID("value"))) 230 } 231 } 232 233 func setupMockBlock( 234 t *testing.T, 235 bl *index.MockBlock, 236 ts xtime.UnixNano, 237 id ident.ID, 238 tag ident.Tag, 239 lifecycle doc.OnIndexSeries, 240 ) { 241 bl.EXPECT(). 242 WriteBatch(gomock.Any()). 243 Return(index.WriteBatchResult{}, nil). 244 Do(func(batch *index.WriteBatch) { 245 docs := batch.PendingDocs() 246 require.Equal(t, 1, len(docs), id.String()) 247 require.Equal(t, doc.Metadata{ 248 ID: id.Bytes(), 249 Fields: doc.Fields{{Name: tag.Name.Bytes(), Value: tag.Value.Bytes()}}, 250 }, docs[0]) 251 entries := batch.PendingEntries() 252 require.Equal(t, 1, len(entries)) 253 require.True(t, entries[0].Timestamp.Equal(ts)) 254 require.True(t, entries[0].OnIndexSeries == lifecycle) // Just ptr equality 255 }).Times(1) 256 } 257 258 func createMockBlocks( 259 ctrl *gomock.Controller, 260 blockStart xtime.UnixNano, 261 nextBlockStart xtime.UnixNano, 262 ) (*index.MockBlock, index.NewBlockFn) { 263 activeBlock := index.NewMockBlock(ctrl) 264 activeBlock.EXPECT().Stats(gomock.Any()).Return(nil).AnyTimes() 265 activeBlock.EXPECT().Close().Return(nil) 266 activeBlock.EXPECT().StartTime().Return(blockStart).AnyTimes() 267 268 block := index.NewMockBlock(ctrl) 269 block.EXPECT().Stats(gomock.Any()).Return(nil).AnyTimes() 270 block.EXPECT().Close().Return(nil) 271 block.EXPECT().StartTime().Return(blockStart).AnyTimes() 272 273 futureBlock := index.NewMockBlock(ctrl) 274 futureBlock.EXPECT().Stats(gomock.Any()).Return(nil).AnyTimes() 275 futureBlock.EXPECT().StartTime().Return(nextBlockStart).AnyTimes() 276 277 var madeActive, madeBlock, madeFuture bool 278 newBlockFn := func( 279 ts xtime.UnixNano, 280 md namespace.Metadata, 281 opts index.BlockOptions, 282 _ namespace.RuntimeOptionsManager, 283 io index.Options, 284 ) (index.Block, error) { 285 if opts.ActiveBlock && ts.Equal(xtime.UnixNano(0)) { 286 if madeActive { 287 return activeBlock, errors.New("already created active block") 288 } 289 madeActive = true 290 return activeBlock, nil 291 } 292 if ts.Equal(blockStart) { 293 if madeBlock { 294 return block, errors.New("already created initial block") 295 } 296 madeBlock = true 297 return block, nil 298 } else if ts.Equal(nextBlockStart) { 299 if madeFuture { 300 return nil, errors.New("already created forward block") 301 } 302 madeFuture = true 303 return futureBlock, nil 304 } 305 return nil, fmt.Errorf("no block starting at %s; mus t start at %s or %s", 306 ts, blockStart, nextBlockStart) 307 } 308 309 return activeBlock, newBlockFn 310 } 311 312 func TestNamespaceIndexForwardWrite(t *testing.T) { 313 ctrl := xtest.NewController(t) 314 defer ctrl.Finish() 315 316 opts, now, blockSize := generateOptionsNowAndBlockSize() 317 blockStart := now.Truncate(blockSize) 318 futureStart := blockStart.Add(blockSize) 319 activeBlock, newBlockFn := createMockBlocks(ctrl, blockStart, futureStart) 320 321 md := testNamespaceMetadata(blockSize, 4*time.Hour) 322 idx, err := newNamespaceIndexWithNewBlockFn(md, 323 namespace.NewRuntimeOptionsManager(md.ID().String()), 324 testShardSet, newBlockFn, opts) 325 require.NoError(t, err) 326 327 defer func() { 328 require.NoError(t, idx.Close()) 329 }() 330 331 id := ident.StringID("foo") 332 tag := ident.StringTag("name", "value") 333 tags := ident.NewTags(tag) 334 lifecycle := doc.NewMockOnIndexSeries(ctrl) 335 336 var ( 337 ts = idx.(*nsIndex).state.latestBlock.StartTime() 338 next = ts.Truncate(blockSize).Add(blockSize) 339 ) 340 341 lifecycle.EXPECT().NeedsIndexUpdate(next).Return(true) 342 lifecycle.EXPECT().OnIndexPrepare(next) 343 lifecycle.EXPECT().IfAlreadyIndexedMarkIndexSuccessAndFinalize(gomock.Any()).Return(false) 344 345 setupMockBlock(t, activeBlock, now, id, tag, lifecycle) 346 setupMockBlock(t, activeBlock, futureStart, id, tag, lifecycle) 347 348 batch := index.NewWriteBatch(index.WriteBatchOptions{ 349 IndexBlockSize: blockSize, 350 }) 351 batch.Append(testWriteBatchEntry(id, tags, now, lifecycle)) 352 require.NoError(t, idx.WriteBatch(batch)) 353 } 354 355 func TestNamespaceIndexForwardWriteCreatesBlock(t *testing.T) { 356 ctrl := xtest.NewController(t) 357 defer ctrl.Finish() 358 359 opts, now, blockSize := generateOptionsNowAndBlockSize() 360 blockStart := now.Truncate(blockSize) 361 futureStart := blockStart.Add(blockSize) 362 activeBlock, newBlockFn := createMockBlocks(ctrl, blockStart, futureStart) 363 364 md := testNamespaceMetadata(blockSize, 4*time.Hour) 365 idx, err := newNamespaceIndexWithNewBlockFn(md, 366 namespace.NewRuntimeOptionsManager(md.ID().String()), 367 testShardSet, newBlockFn, opts) 368 require.NoError(t, err) 369 370 defer func() { 371 require.NoError(t, idx.Close()) 372 }() 373 374 id := ident.StringID("foo") 375 tag := ident.StringTag("name", "value") 376 tags := ident.NewTags(tag) 377 lifecycle := doc.NewMockOnIndexSeries(ctrl) 378 379 var ( 380 ts = idx.(*nsIndex).state.latestBlock.StartTime() 381 next = ts.Truncate(blockSize).Add(blockSize) 382 ) 383 384 lifecycle.EXPECT().NeedsIndexUpdate(next).Return(true) 385 lifecycle.EXPECT().OnIndexPrepare(next) 386 lifecycle.EXPECT().IfAlreadyIndexedMarkIndexSuccessAndFinalize(gomock.Any()).Return(false) 387 388 setupMockBlock(t, activeBlock, now, id, tag, lifecycle) 389 setupMockBlock(t, activeBlock, futureStart, id, tag, lifecycle) 390 391 entry, doc := testWriteBatchEntry(id, tags, now, lifecycle) 392 batch := testWriteBatch(entry, doc, testWriteBatchBlockSizeOption(blockSize)) 393 require.NoError(t, idx.WriteBatch(batch)) 394 } 395 396 func TestShardForwardWriteTaggedSyncRefCountSyncIndex(t *testing.T) { 397 testShardForwardWriteTaggedRefCountIndex(t, index.InsertSync, false) 398 } 399 400 func TestShardForwardWriteTaggedAsyncRefCountSyncIndex(t *testing.T) { 401 testShardForwardWriteTaggedRefCountIndex(t, index.InsertAsync, true) 402 } 403 404 func testShardForwardWriteTaggedRefCountIndex( 405 t *testing.T, 406 syncType index.InsertMode, 407 async bool, 408 ) { 409 defer leaktest.CheckTimeout(t, 10*time.Second)() 410 newFn := func( 411 fn nsIndexInsertBatchFn, 412 md namespace.Metadata, 413 nowFn clock.NowFn, 414 coreFn xsync.CoreFn, 415 s tally.Scope, 416 ) namespaceIndexInsertQueue { 417 q := newNamespaceIndexInsertQueue(fn, md, nowFn, coreFn, s) 418 q.(*nsIndexInsertQueue).indexBatchBackoff = 10 * time.Millisecond 419 return q 420 } 421 md, err := namespace.NewMetadata(defaultTestNs1ID, defaultTestNs1Opts) 422 require.NoError(t, err) 423 424 opts, now, blockSize := generateOptionsNowAndBlockSize() 425 opts = opts.SetIndexOptions(opts.IndexOptions().SetInsertMode(syncType)) 426 427 idx, err := newNamespaceIndexWithInsertQueueFn(md, 428 namespace.NewRuntimeOptionsManager(md.ID().String()), 429 testShardSet, newFn, opts) 430 require.NoError(t, err) 431 432 defer func() { 433 require.NoError(t, idx.Close()) 434 }() 435 436 next := now.Truncate(blockSize).Add(blockSize) 437 if async { 438 testShardForwardWriteTaggedAsyncRefCount(t, now, next, idx, opts) 439 } else { 440 testShardForwardWriteTaggedSyncRefCount(t, now, next, idx, opts) 441 } 442 } 443 444 func writeToShard( 445 ctx context.Context, 446 t *testing.T, 447 shard *dbShard, 448 idx NamespaceIndex, 449 now xtime.UnixNano, 450 id string, 451 shouldWrite bool, 452 ) { 453 tag := ident.Tag{Name: ident.StringID(id), Value: ident.StringID("")} 454 idTags := ident.NewTags(tag) 455 iter := ident.NewTagsIterator(idTags) 456 seriesWrite, err := shard.WriteTagged(ctx, ident.StringID(id), 457 idxconvert.NewTagsIterMetadataResolver(iter), now, 458 1.0, xtime.Second, nil, series.WriteOptions{ 459 TruncateType: series.TypeBlock, 460 TransformOptions: series.WriteTransformOptions{ 461 ForceValueEnabled: true, 462 ForceValue: 1, 463 }, 464 }) 465 require.NoError(t, err) 466 require.Equal(t, shouldWrite, seriesWrite.WasWritten) 467 if seriesWrite.NeedsIndex { 468 err = idx.WritePending([]writes.PendingIndexInsert{ 469 seriesWrite.PendingIndexInsert, 470 }) 471 require.NoError(t, err) 472 } 473 } 474 475 func verifyShard( 476 ctx context.Context, 477 t *testing.T, 478 idx NamespaceIndex, 479 now xtime.UnixNano, 480 next xtime.UnixNano, 481 id string, 482 ) { 483 allQueriesSuccess := clock.WaitUntil(func() bool { 484 query := m3ninxidx.NewFieldQuery([]byte(id)) 485 // check current index block for series 486 res, err := idx.Query(ctx, index.Query{Query: query}, index.QueryOptions{ 487 StartInclusive: now, 488 EndExclusive: next, 489 }) 490 require.NoError(t, err) 491 if res.Results.Size() != 1 { 492 return false 493 } 494 495 // check next index block for series 496 res, err = idx.Query(ctx, index.Query{Query: query}, index.QueryOptions{ 497 StartInclusive: next.Add(1 * time.Minute), 498 EndExclusive: next.Add(5 * time.Minute), 499 }) 500 require.NoError(t, err) 501 if res.Results.Size() != 1 { 502 return false 503 } 504 505 // check across both index blocks to ensure only a single ID is returned. 506 res, err = idx.Query(ctx, index.Query{Query: query}, index.QueryOptions{ 507 StartInclusive: now, 508 EndExclusive: next.Add(5 * time.Minute), 509 }) 510 require.NoError(t, err) 511 if res.Results.Size() != 1 { 512 return false 513 } 514 515 return true 516 }, 5*time.Second) 517 require.True(t, allQueriesSuccess) 518 } 519 520 func writeToShardAndVerify( 521 ctx context.Context, 522 t *testing.T, 523 shard *dbShard, 524 idx NamespaceIndex, 525 now xtime.UnixNano, 526 next xtime.UnixNano, 527 id string, 528 shouldWrite bool, 529 ) { 530 writeToShard(ctx, t, shard, idx, now, id, shouldWrite) 531 verifyShard(ctx, t, idx, now, next, id) 532 } 533 534 func testShardForwardWriteTaggedSyncRefCount( 535 t *testing.T, 536 now xtime.UnixNano, 537 next xtime.UnixNano, 538 idx NamespaceIndex, 539 opts Options, 540 ) { 541 shard := testDatabaseShardWithIndexFn(t, opts, idx, false) 542 shard.SetRuntimeOptions(runtime.NewOptions(). 543 SetWriteNewSeriesAsync(false)) 544 defer shard.Close() 545 546 ctx := context.NewBackground() 547 defer ctx.Close() 548 549 writeToShardAndVerify(ctx, t, shard, idx, now, next, "foo", true) 550 writeToShardAndVerify(ctx, t, shard, idx, now, next, "bar", true) 551 writeToShardAndVerify(ctx, t, shard, idx, now, next, "baz", true) 552 553 // ensure all entries have no references left 554 for _, id := range []string{"foo", "bar", "baz"} { 555 shard.Lock() 556 entry, err := shard.lookupEntryWithLock(ident.StringID(id)) 557 shard.Unlock() 558 require.NoError(t, err) 559 require.Equal(t, int32(0), entry.ReaderWriterCount(), id) 560 } 561 562 // move the time the point is written to ensure truncation works. 563 now = now.Add(1) 564 // write already inserted series 565 writeToShardAndVerify(ctx, t, shard, idx, now, next, "foo", false) 566 writeToShardAndVerify(ctx, t, shard, idx, now, next, "bar", false) 567 writeToShardAndVerify(ctx, t, shard, idx, now, next, "baz", false) 568 569 // // ensure all entries have no references left 570 for _, id := range []string{"foo", "bar", "baz"} { 571 shard.Lock() 572 entry, err := shard.lookupEntryWithLock(ident.StringID(id)) 573 shard.Unlock() 574 require.NoError(t, err) 575 require.Equal(t, int32(0), entry.ReaderWriterCount(), id) 576 } 577 } 578 579 func testShardForwardWriteTaggedAsyncRefCount( 580 t *testing.T, 581 now xtime.UnixNano, 582 next xtime.UnixNano, 583 idx NamespaceIndex, 584 opts Options, 585 ) { 586 testReporterOpts := xmetrics.NewTestStatsReporterOptions() 587 testReporter := xmetrics.NewTestStatsReporter(testReporterOpts) 588 scope, closer := tally.NewRootScope(tally.ScopeOptions{ 589 Reporter: testReporter, 590 }, 100*time.Millisecond) 591 defer closer.Close() 592 opts = opts.SetInstrumentOptions( 593 opts.InstrumentOptions(). 594 SetMetricsScope(scope). 595 SetReportInterval(100 * time.Millisecond)) 596 597 shard := testDatabaseShardWithIndexFn(t, opts, idx, false) 598 shard.SetRuntimeOptions(runtime.NewOptions(). 599 SetWriteNewSeriesAsync(true)) 600 defer shard.Close() 601 602 ctx := context.NewBackground() 603 defer ctx.Close() 604 605 writeToShard(ctx, t, shard, idx, now, "foo", true) 606 writeToShard(ctx, t, shard, idx, now, "bar", true) 607 writeToShard(ctx, t, shard, idx, now, "baz", true) 608 609 verifyShard(ctx, t, idx, now, next, "foo") 610 verifyShard(ctx, t, idx, now, next, "bar") 611 verifyShard(ctx, t, idx, now, next, "baz") 612 613 // ensure all entries have no references left 614 for _, id := range []string{"foo", "bar", "baz"} { 615 shard.Lock() 616 entry, err := shard.lookupEntryWithLock(ident.StringID(id)) 617 shard.Unlock() 618 require.NoError(t, err) 619 require.Equal(t, int32(0), entry.ReaderWriterCount(), id) 620 } 621 622 // write already inserted series. This should have no effect. 623 now = now.Add(1) 624 writeToShardAndVerify(ctx, t, shard, idx, now, next, "foo", false) 625 writeToShardAndVerify(ctx, t, shard, idx, now, next, "bar", false) 626 writeToShardAndVerify(ctx, t, shard, idx, now, next, "baz", false) 627 628 // ensure all entries have no references left 629 for _, id := range []string{"foo", "bar", "baz"} { 630 shard.Lock() 631 entry, err := shard.lookupEntryWithLock(ident.StringID(id)) 632 shard.Unlock() 633 require.NoError(t, err) 634 require.Equal(t, int32(0), entry.ReaderWriterCount(), id) 635 } 636 }