github.com/m3db/m3@v1.5.0/src/dbnode/integration/disk_snapshot_test.go (about)

     1  // +build integration
     2  
     3  // Copyright (c) 2018 Uber Technologies, Inc.
     4  //
     5  // Permission is hereby granted, free of charge, to any person obtaining a copy
     6  // of this software and associated documentation files (the "Software"), to deal
     7  // in the Software without restriction, including without limitation the rights
     8  // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     9  // copies of the Software, and to permit persons to whom the Software is
    10  // furnished to do so, subject to the following conditions:
    11  //
    12  // The above copyright notice and this permission notice shall be included in
    13  // all copies or substantial portions of the Software.
    14  //
    15  // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    16  // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    17  // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    18  // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    19  // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    20  // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    21  // THE SOFTWARE.
    22  
    23  package integration
    24  
    25  import (
    26  	"testing"
    27  	"time"
    28  
    29  	"github.com/m3db/m3/src/dbnode/integration/generate"
    30  	"github.com/m3db/m3/src/dbnode/namespace"
    31  	"github.com/m3db/m3/src/dbnode/persist/fs"
    32  	xtime "github.com/m3db/m3/src/x/time"
    33  
    34  	"github.com/stretchr/testify/require"
    35  	"go.uber.org/zap"
    36  )
    37  
    38  func TestDiskSnapshotSimple(t *testing.T) {
    39  	if testing.Short() {
    40  		t.SkipNow() // Just skip if we're doing a short run
    41  	}
    42  	// Test setup
    43  	var (
    44  		nOpts = namespace.NewOptions().
    45  			SetSnapshotEnabled(true)
    46  		bufferPast   = 50 * time.Minute
    47  		bufferFuture = 50 * time.Minute
    48  		blockSize    = time.Hour
    49  	)
    50  
    51  	nOpts = nOpts.
    52  		SetRetentionOptions(nOpts.RetentionOptions().
    53  			SetBufferFuture(bufferFuture).
    54  			SetBufferPast(bufferPast).
    55  			SetBlockSize(blockSize))
    56  	md1, err := namespace.NewMetadata(testNamespaces[0], nOpts)
    57  	require.NoError(t, err)
    58  	md2, err := namespace.NewMetadata(testNamespaces[1], nOpts)
    59  	require.NoError(t, err)
    60  
    61  	testOpts := NewTestOptions(t).
    62  		SetTickMinimumInterval(time.Second).
    63  		SetNamespaces([]namespace.Metadata{md1, md2})
    64  	testSetup, err := NewTestSetup(t, testOpts, nil)
    65  	require.NoError(t, err)
    66  	defer testSetup.Close()
    67  
    68  	shardSet := testSetup.ShardSet()
    69  
    70  	// Start the server
    71  	log := testSetup.StorageOpts().InstrumentOptions().Logger()
    72  	log.Debug("disk flush test")
    73  	require.NoError(t, testSetup.StartServer())
    74  	log.Debug("server is now up")
    75  
    76  	// Stop the server
    77  	defer func() {
    78  		require.NoError(t, testSetup.StopServer())
    79  		log.Debug("server is now down")
    80  	}()
    81  
    82  	// Write test data
    83  	var (
    84  		currBlock                         = testSetup.NowFn()().Truncate(blockSize)
    85  		now                               = currBlock.Add(11 * time.Minute)
    86  		assertTimeAllowsWritesToAllBlocks = func(ti xtime.UnixNano) {
    87  			// Make sure now is within bufferPast of the previous block
    88  			require.True(t, ti.Before(ti.Truncate(blockSize).Add(bufferPast)))
    89  			// Make sure now is within bufferFuture of the next block
    90  			require.True(t, ti.After(ti.Truncate(blockSize).Add(blockSize).Add(-bufferFuture)))
    91  		}
    92  	)
    93  
    94  	assertTimeAllowsWritesToAllBlocks(now)
    95  	testSetup.SetNowFn(now)
    96  
    97  	var (
    98  		seriesMaps = make(map[xtime.UnixNano]generate.SeriesBlock)
    99  		inputData  = []generate.BlockConfig{
   100  			// Writes in the previous block which should be mutable due to bufferPast
   101  			{IDs: []string{"foo", "bar", "baz"}, NumPoints: 5, Start: currBlock.Add(-10 * time.Minute)},
   102  			// Writes in the current block
   103  			{IDs: []string{"a", "b", "c"}, NumPoints: 5, Start: currBlock},
   104  			// Writes in the next block which should be mutable due to bufferFuture
   105  			{IDs: []string{"1", "2", "3"}, NumPoints: 5, Start: currBlock.Add(blockSize)},
   106  		}
   107  	)
   108  	for _, input := range inputData {
   109  		testData := generate.Block(input)
   110  		seriesMaps[input.Start.Truncate(blockSize)] = testData
   111  		for _, ns := range testSetup.Namespaces() {
   112  			require.NoError(t, testSetup.WriteBatch(ns.ID(), testData))
   113  		}
   114  	}
   115  
   116  	// Now that we've completed the writes, we need to make sure that we wait for
   117  	// the next snapshot to guarantee that it should contain all the writes. We do
   118  	// this by measuring the current highest snapshot volume index and then updating
   119  	// the time (to allow another snapshot process to occur due to the configurable
   120  	// minimum time between snapshots), and then waiting for the snapshot files with
   121  	// the measured volume index + 1.
   122  	var (
   123  		snapshotsToWaitForByNS = make([][]snapshotID, 0, len(testSetup.Namespaces()))
   124  		fsOpts                 = testSetup.StorageOpts().
   125  					CommitLogOptions().
   126  					FilesystemOptions()
   127  	)
   128  	for _, ns := range testSetup.Namespaces() {
   129  		snapshotsToWaitForByNS = append(snapshotsToWaitForByNS, []snapshotID{
   130  			{
   131  				blockStart: currBlock.Add(-blockSize),
   132  				minVolume: getLatestSnapshotVolumeIndex(
   133  					fsOpts, shardSet, ns.ID(), currBlock.Add(-blockSize)),
   134  			},
   135  			{
   136  				blockStart: currBlock,
   137  				minVolume: getLatestSnapshotVolumeIndex(
   138  					fsOpts, shardSet, ns.ID(), currBlock),
   139  			},
   140  			{
   141  				blockStart: currBlock.Add(blockSize),
   142  				minVolume: getLatestSnapshotVolumeIndex(
   143  					fsOpts, shardSet, ns.ID(), currBlock.Add(blockSize)),
   144  			},
   145  		})
   146  	}
   147  
   148  	now = testSetup.NowFn()().Add(2 * time.Minute)
   149  	assertTimeAllowsWritesToAllBlocks(now)
   150  	testSetup.SetNowFn(now)
   151  
   152  	maxWaitTime := time.Minute
   153  	for i, ns := range testSetup.Namespaces() {
   154  		log.Info("waiting for snapshot files to flush",
   155  			zap.Any("ns", ns.ID()))
   156  		_, err := waitUntilSnapshotFilesFlushed(fsOpts, shardSet, ns.ID(), snapshotsToWaitForByNS[i], maxWaitTime)
   157  		require.NoError(t, err)
   158  		log.Info("verifying snapshot files",
   159  			zap.Any("ns", ns.ID()))
   160  		verifySnapshottedDataFiles(t, shardSet, testSetup.StorageOpts(), ns.ID(), seriesMaps)
   161  	}
   162  
   163  	var (
   164  		newTime = testSetup.NowFn()().Add(blockSize * 2)
   165  	)
   166  	testSetup.SetNowFn(newTime)
   167  
   168  	for _, ns := range testSetup.Namespaces() {
   169  		log.Info("waiting for new snapshot files to be written out",
   170  			zap.Any("ns", ns.ID()))
   171  		snapshotsToWaitFor := []snapshotID{{blockStart: currBlock.Add(blockSize)}}
   172  		// NB(bodu): We need to check if a specific snapshot ID was deleted since snapshotting logic now changed
   173  		// to always snapshotting every block start w/in retention.
   174  		snapshotID, err := waitUntilSnapshotFilesFlushed(fsOpts, shardSet, ns.ID(), snapshotsToWaitFor, maxWaitTime)
   175  		require.NoError(t, err)
   176  		log.Info("waiting for old snapshot files to be deleted",
   177  			zap.Any("ns", ns.ID()))
   178  		// These should be flushed to disk and snapshots should have been cleaned up.
   179  		flushedBlockStarts := []xtime.UnixNano{
   180  			currBlock.Add(-blockSize),
   181  			currBlock,
   182  		}
   183  		for _, shard := range shardSet.All() {
   184  			waitUntil(func() bool {
   185  				// Increase the time each check to ensure that the filesystem processes are able to progress (some
   186  				// of them throttle themselves based on time elapsed since the previous time.)
   187  				testSetup.SetNowFn(testSetup.NowFn()().Add(10 * time.Second))
   188  				// Ensure that snapshots for flushed data blocks no longer exist.
   189  				for _, blockStart := range flushedBlockStarts {
   190  					exists, err := fs.SnapshotFileSetExistsAt(fsOpts.FilePathPrefix(), ns.ID(), snapshotID, shard.ID(), blockStart)
   191  					require.NoError(t, err)
   192  					if exists {
   193  						return false
   194  					}
   195  				}
   196  				return true
   197  			}, maxWaitTime)
   198  		}
   199  	}
   200  }