github.com/m3db/m3@v1.5.1-0.20231129193456-75a402aa583b/src/dbnode/integration/peers_bootstrap_high_concurrency_test.go (about)

     1  // +build integration
     2  
     3  // Copyright (c) 2016 Uber Technologies, Inc.
     4  //
     5  // Permission is hereby granted, free of charge, to any person obtaining a copy
     6  // of this software and associated documentation files (the "Software"), to deal
     7  // in the Software without restriction, including without limitation the rights
     8  // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     9  // copies of the Software, and to permit persons to whom the Software is
    10  // furnished to do so, subject to the following conditions:
    11  //
    12  // The above copyright notice and this permission notice shall be included in
    13  // all copies or substantial portions of the Software.
    14  //
    15  // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    16  // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    17  // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    18  // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    19  // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    20  // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    21  // THE SOFTWARE.
    22  
    23  package integration
    24  
    25  import (
    26  	"fmt"
    27  	"testing"
    28  	"time"
    29  
    30  	"github.com/m3db/m3/src/dbnode/integration/generate"
    31  	"github.com/m3db/m3/src/dbnode/namespace"
    32  	"github.com/m3db/m3/src/dbnode/retention"
    33  	"github.com/m3db/m3/src/dbnode/storage/index"
    34  	"github.com/m3db/m3/src/m3ninx/idx"
    35  	idxpersist "github.com/m3db/m3/src/m3ninx/persist"
    36  	"github.com/m3db/m3/src/x/ident"
    37  	xtest "github.com/m3db/m3/src/x/test"
    38  	xtime "github.com/m3db/m3/src/x/time"
    39  
    40  	"github.com/stretchr/testify/require"
    41  	"go.uber.org/zap"
    42  )
    43  
    44  func TestPeersBootstrapHighConcurrencyBatch16Workers64(t *testing.T) {
    45  	testPeersBootstrapHighConcurrency(t,
    46  		testPeersBootstrapHighConcurrencyOptions{
    47  			BatchSize:        16,
    48  			Concurrency:      64,
    49  			BatchesPerWorker: 8,
    50  		})
    51  }
    52  
    53  func TestPeersBootstrapHighConcurrencyBatch64Workers16(t *testing.T) {
    54  	testPeersBootstrapHighConcurrency(t,
    55  		testPeersBootstrapHighConcurrencyOptions{
    56  			BatchSize:        64,
    57  			Concurrency:      16,
    58  			BatchesPerWorker: 8,
    59  		})
    60  }
    61  
    62  type testPeersBootstrapHighConcurrencyOptions struct {
    63  	BatchSize        int
    64  	Concurrency      int
    65  	BatchesPerWorker int
    66  }
    67  
    68  func testPeersBootstrapHighConcurrency(
    69  	t *testing.T,
    70  	testOpts testPeersBootstrapHighConcurrencyOptions,
    71  ) {
    72  	if testing.Short() {
    73  		t.SkipNow()
    74  	}
    75  
    76  	// Test setups
    77  	log := xtest.NewLogger(t)
    78  
    79  	blockSize := 2 * time.Hour
    80  
    81  	idxOpts := namespace.NewIndexOptions().
    82  		SetEnabled(true).
    83  		SetBlockSize(blockSize)
    84  
    85  	rOpts := retention.NewOptions().
    86  		SetRetentionPeriod(6 * time.Hour).
    87  		SetBlockSize(blockSize).
    88  		SetBufferPast(10 * time.Minute).
    89  		SetBufferFuture(2 * time.Minute)
    90  
    91  	nOpts := namespace.NewOptions().
    92  		SetRetentionOptions(rOpts).
    93  		SetIndexOptions(idxOpts)
    94  
    95  	namesp, err := namespace.NewMetadata(testNamespaces[0], nOpts)
    96  	require.NoError(t, err)
    97  
    98  	opts := NewTestOptions(t).
    99  		SetNamespaces([]namespace.Metadata{namesp}).
   100  		// Use TChannel clients for writing / reading because we want to target individual nodes at a time
   101  		// and not write/read all nodes in the cluster.
   102  		SetUseTChannelClientForWriting(true).
   103  		SetUseTChannelClientForReading(true)
   104  
   105  	batchSize := 16
   106  	concurrency := 64
   107  	setupOpts := []BootstrappableTestSetupOptions{
   108  		{
   109  			DisablePeersBootstrapper: true,
   110  		},
   111  		{
   112  			DisableCommitLogBootstrapper: true,
   113  			DisablePeersBootstrapper:     false,
   114  			BootstrapBlocksBatchSize:     batchSize,
   115  			BootstrapBlocksConcurrency:   concurrency,
   116  		},
   117  	}
   118  	setups, closeFn := NewDefaultBootstrappableTestSetups(t, opts, setupOpts)
   119  	defer closeFn()
   120  
   121  	// Write test data for first node
   122  	numSeries := testOpts.BatchesPerWorker * testOpts.Concurrency * testOpts.BatchSize
   123  	log.Sugar().Debugf("testing a total of %d IDs with %d batch size %d concurrency",
   124  		numSeries, testOpts.BatchSize, testOpts.Concurrency)
   125  
   126  	now := setups[0].NowFn()()
   127  	commonTags := []ident.Tag{
   128  		{
   129  			Name:  ident.StringID("fruit"),
   130  			Value: ident.StringID("apple"),
   131  		},
   132  	}
   133  	numPoints := 10
   134  	blockConfigs := blockConfigs(
   135  		generateTaggedBlockConfigs(generateTaggedBlockConfig{
   136  			series:     numSeries,
   137  			numPoints:  numPoints,
   138  			commonTags: commonTags,
   139  			blockStart: now.Add(-3 * blockSize),
   140  		}),
   141  		generateTaggedBlockConfigs(generateTaggedBlockConfig{
   142  			series:     numSeries,
   143  			numPoints:  numPoints,
   144  			commonTags: commonTags,
   145  			blockStart: now.Add(-2 * blockSize),
   146  		}),
   147  		generateTaggedBlockConfigs(generateTaggedBlockConfig{
   148  			series:     numSeries,
   149  			numPoints:  numPoints,
   150  			commonTags: commonTags,
   151  			blockStart: now.Add(-1 * blockSize),
   152  		}),
   153  		generateTaggedBlockConfigs(generateTaggedBlockConfig{
   154  			series:     numSeries,
   155  			numPoints:  numPoints,
   156  			commonTags: commonTags,
   157  			blockStart: now,
   158  		}),
   159  	)
   160  	seriesMaps := generate.BlocksByStart(blockConfigs)
   161  	err = writeTestDataToDisk(namesp, setups[0], seriesMaps, 0)
   162  	require.NoError(t, err)
   163  
   164  	for blockStart, series := range seriesMaps {
   165  		docs := generate.ToDocMetadata(series)
   166  		require.NoError(t, writeTestIndexDataToDisk(
   167  			namesp,
   168  			setups[0].StorageOpts(),
   169  			idxpersist.DefaultIndexVolumeType,
   170  			blockStart,
   171  			setups[0].ShardSet().AllIDs(),
   172  			docs,
   173  		))
   174  	}
   175  
   176  	// Start the first server with filesystem bootstrapper
   177  	require.NoError(t, setups[0].StartServer())
   178  
   179  	// Start the last server with peers and filesystem bootstrappers
   180  	bootstrapStart := time.Now()
   181  	require.NoError(t, setups[1].StartServer())
   182  	log.Debug("servers are now up", zap.Duration("took", time.Since(bootstrapStart)))
   183  
   184  	// Stop the servers
   185  	defer func() {
   186  		setups.parallel(func(s TestSetup) {
   187  			require.NoError(t, s.StopServer())
   188  		})
   189  		log.Debug("servers are now down")
   190  	}()
   191  
   192  	// Verify in-memory data match what we expect
   193  	for _, setup := range setups {
   194  		verifySeriesMaps(t, setup, namesp.ID(), seriesMaps)
   195  	}
   196  
   197  	// Issue some index queries to the second node which bootstrapped the metadata
   198  	session, err := setups[1].M3DBClient().DefaultSession()
   199  	require.NoError(t, err)
   200  
   201  	start := now.Add(-rOpts.RetentionPeriod())
   202  	end := now.Add(blockSize)
   203  	queryOpts := index.QueryOptions{StartInclusive: start, EndExclusive: end}
   204  
   205  	// Match on common tags
   206  	termQuery := idx.NewTermQuery(commonTags[0].Name.Bytes(), commonTags[0].Value.Bytes())
   207  	iter, _, err := session.FetchTaggedIDs(ContextWithDefaultTimeout(),
   208  		namesp.ID(), index.Query{Query: termQuery}, queryOpts)
   209  	require.NoError(t, err)
   210  	defer iter.Finalize()
   211  
   212  	count := 0
   213  	for iter.Next() {
   214  		count++
   215  	}
   216  	require.Equal(t, numSeries, count)
   217  }
   218  
   219  type generateTaggedBlockConfig struct {
   220  	series     int
   221  	numPoints  int
   222  	commonTags []ident.Tag
   223  	blockStart xtime.UnixNano
   224  }
   225  
   226  func generateTaggedBlockConfigs(
   227  	cfg generateTaggedBlockConfig,
   228  ) []generate.BlockConfig {
   229  	results := make([]generate.BlockConfig, 0, cfg.series)
   230  	for i := 0; i < cfg.series; i++ {
   231  		id := fmt.Sprintf("series_%d", i)
   232  		tags := make([]ident.Tag, 0, 1+len(cfg.commonTags))
   233  		tags = append(tags, ident.Tag{
   234  			Name:  ident.StringID("series"),
   235  			Value: ident.StringID(fmt.Sprintf("%d", i)),
   236  		})
   237  		tags = append(tags, cfg.commonTags...)
   238  		results = append(results, generate.BlockConfig{
   239  			IDs:       []string{id},
   240  			Tags:      ident.NewTags(tags...),
   241  			NumPoints: cfg.numPoints,
   242  			Start:     cfg.blockStart,
   243  		})
   244  	}
   245  	return results
   246  }
   247  
   248  func blockConfigs(cfgs ...[]generate.BlockConfig) []generate.BlockConfig {
   249  	var results []generate.BlockConfig
   250  	for _, elem := range cfgs {
   251  		results = append(results, elem...)
   252  	}
   253  	return results
   254  }