github.com/m3db/m3@v1.5.0/src/dbnode/integration/peers_bootstrap_partial_data_test.go (about)

     1  // +build integration
     2  
     3  // Copyright (c) 2016 Uber Technologies, Inc.
     4  //
     5  // Permission is hereby granted, free of charge, to any person obtaining a copy
     6  // of this software and associated documentation files (the "Software"), to deal
     7  // in the Software without restriction, including without limitation the rights
     8  // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     9  // copies of the Software, and to permit persons to whom the Software is
    10  // furnished to do so, subject to the following conditions:
    11  //
    12  // The above copyright notice and this permission notice shall be included in
    13  // all copies or substantial portions of the Software.
    14  //
    15  // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    16  // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    17  // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    18  // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    19  // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    20  // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    21  // THE SOFTWARE.
    22  
    23  package integration
    24  
    25  import (
    26  	"testing"
    27  	"time"
    28  
    29  	"github.com/stretchr/testify/require"
    30  
    31  	"github.com/m3db/m3/src/dbnode/integration/generate"
    32  	"github.com/m3db/m3/src/dbnode/namespace"
    33  	"github.com/m3db/m3/src/dbnode/retention"
    34  	xtest "github.com/m3db/m3/src/x/test"
    35  	xtime "github.com/m3db/m3/src/x/time"
    36  )
    37  
    38  // This test simulates a case where node fails / reboots while fetching data from peers.
    39  // When restarting / retrying bootstrap process, there already will be some data on disk,
    40  // which can be fulfilled by filesystem bootstrapper.
    41  func TestPeersBootstrapPartialData(t *testing.T) {
    42  	if testing.Short() {
    43  		t.SkipNow()
    44  	}
    45  
    46  	// Test setups
    47  	log := xtest.NewLogger(t)
    48  	blockSize := 2 * time.Hour
    49  	retentionOpts := retention.NewOptions().
    50  		SetRetentionPeriod(5 * blockSize).
    51  		SetBlockSize(blockSize).
    52  		SetBufferPast(10 * time.Minute).
    53  		SetBufferFuture(2 * time.Minute)
    54  	idxOpts := namespace.NewIndexOptions().
    55  		SetEnabled(true).
    56  		SetBlockSize(blockSize)
    57  	nsOpts := namespace.NewOptions().SetRetentionOptions(retentionOpts).SetIndexOptions(idxOpts)
    58  	namesp, err := namespace.NewMetadata(testNamespaces[0], nsOpts)
    59  	require.NoError(t, err)
    60  	opts := NewTestOptions(t).
    61  		SetNamespaces([]namespace.Metadata{namesp}).
    62  		// Use TChannel clients for writing / reading because we want to target individual nodes at a time
    63  		// and not write/read all nodes in the cluster.
    64  		SetUseTChannelClientForWriting(true).
    65  		SetUseTChannelClientForReading(true)
    66  
    67  	setupOpts := []BootstrappableTestSetupOptions{
    68  		{DisablePeersBootstrapper: true},
    69  		{
    70  			DisableCommitLogBootstrapper: true,
    71  			DisablePeersBootstrapper:     false,
    72  		},
    73  	}
    74  	setups, closeFn := NewDefaultBootstrappableTestSetups(t, opts, setupOpts) //nolint:govet
    75  	defer closeFn()
    76  
    77  	// Write test data to first node
    78  	now := setups[0].NowFn()()
    79  	inputData := []generate.BlockConfig{
    80  		{IDs: []string{"foo", "baz"}, NumPoints: 90, Start: now.Add(-5 * blockSize)},
    81  		{IDs: []string{"foo", "baz"}, NumPoints: 90, Start: now.Add(-4 * blockSize)},
    82  		{IDs: []string{"foo", "baz"}, NumPoints: 90, Start: now.Add(-3 * blockSize)},
    83  		{IDs: []string{"foo", "baz"}, NumPoints: 90, Start: now.Add(-2 * blockSize)},
    84  		{IDs: []string{"foo", "baz"}, NumPoints: 90, Start: now.Add(-blockSize)},
    85  		{IDs: []string{"foo", "baz"}, NumPoints: 90, Start: now},
    86  	}
    87  	seriesMaps := generate.BlocksByStart(inputData)
    88  	require.NoError(t, writeTestDataToDiskWithIndex(namesp, setups[0], seriesMaps))
    89  
    90  	// Write a subset of blocks to second node, simulating an incomplete peer bootstrap.
    91  	partialBlockStarts := map[xtime.UnixNano]struct{}{
    92  		inputData[0].Start: {},
    93  		inputData[1].Start: {},
    94  		inputData[2].Start: {},
    95  	}
    96  	partialSeriesMaps := make(generate.SeriesBlocksByStart)
    97  	for blockStart, series := range seriesMaps {
    98  		if _, ok := partialBlockStarts[blockStart]; ok {
    99  			partialSeriesMaps[blockStart] = series
   100  		}
   101  	}
   102  	require.NoError(t, writeTestDataToDisk(namesp, setups[1], partialSeriesMaps, 0,
   103  		func(gOpts generate.Options) generate.Options {
   104  			return gOpts.SetWriteEmptyShards(false)
   105  		}))
   106  
   107  	// Start the first server with filesystem bootstrapper
   108  	require.NoError(t, setups[0].StartServer())
   109  
   110  	// Start the last server with peers and filesystem bootstrappers
   111  	require.NoError(t, setups[1].StartServer())
   112  	log.Debug("servers are now up")
   113  
   114  	// Stop the servers
   115  	defer func() {
   116  		setups.parallel(func(s TestSetup) {
   117  			require.NoError(t, s.StopServer())
   118  		})
   119  		log.Debug("servers are now down")
   120  	}()
   121  
   122  	// Verify in-memory data match what we expect
   123  	for _, setup := range setups {
   124  		verifySeriesMaps(t, setup, namesp.ID(), seriesMaps)
   125  	}
   126  }