github.com/m3db/m3@v1.5.1-0.20231129193456-75a402aa583b/src/aggregator/integration/metadata_change_test.go (about)

     1  //go:build integration
     2  
     3  // Copyright (c) 2016 Uber Technologies, Inc.
     4  //
     5  // Permission is hereby granted, free of charge, to any person obtaining a copy
     6  // of this software and associated documentation files (the "Software"), to deal
     7  // in the Software without restriction, including without limitation the rights
     8  // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     9  // copies of the Software, and to permit persons to whom the Software is
    10  // furnished to do so, subject to the following conditions:
    11  //
    12  // The above copyright notice and this permission notice shall be included in
    13  // all copies or substantial portions of the Software.
    14  //
    15  // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    16  // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    17  // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    18  // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    19  // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    20  // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    21  // THE SOFTWARE.
    22  
    23  package integration
    24  
    25  import (
    26  	"sort"
    27  	"testing"
    28  	"time"
    29  
    30  	"github.com/m3db/m3/src/cluster/placement"
    31  	"github.com/m3db/m3/src/metrics/metric/aggregated"
    32  
    33  	"github.com/stretchr/testify/require"
    34  )
    35  
    36  func TestMetadataChangeWithStagedMetadatas(t *testing.T) {
    37  	oldMetadataFn := func(int) metadataUnion {
    38  		return metadataUnion{
    39  			mType:           stagedMetadatasType,
    40  			stagedMetadatas: testStagedMetadatas,
    41  		}
    42  	}
    43  	newMetadataFn := func(int) metadataUnion {
    44  		return metadataUnion{
    45  			mType:           stagedMetadatasType,
    46  			stagedMetadatas: testUpdatedStagedMetadatas,
    47  		}
    48  	}
    49  	testMetadataChange(t, oldMetadataFn, newMetadataFn)
    50  }
    51  
    52  func testMetadataChange(t *testing.T, oldMetadataFn, newMetadataFn metadataFn) {
    53  	if testing.Short() {
    54  		t.SkipNow()
    55  	}
    56  
    57  	serverOpts := newTestServerOptions(t)
    58  
    59  	// Clock setup.
    60  	clock := newTestClock(time.Now().Truncate(time.Hour))
    61  	serverOpts = serverOpts.SetClockOptions(clock.Options())
    62  
    63  	// Placement setup.
    64  	numShards := 1024
    65  	cfg := placementInstanceConfig{
    66  		instanceID:          serverOpts.InstanceID(),
    67  		shardSetID:          serverOpts.ShardSetID(),
    68  		shardStartInclusive: 0,
    69  		shardEndExclusive:   uint32(numShards),
    70  	}
    71  	instance := cfg.newPlacementInstance()
    72  	placement := newPlacement(numShards, []placement.Instance{instance})
    73  	placementKey := serverOpts.PlacementKVKey()
    74  	setPlacement(t, placementKey, serverOpts.ClusterClient(), placement)
    75  
    76  	serverOpts = setupTopic(t, serverOpts, placement)
    77  
    78  	// Create server.
    79  	testServer := newTestServerSetup(t, serverOpts)
    80  	defer testServer.close()
    81  
    82  	// Start the server.
    83  	log := testServer.aggregatorOpts.InstrumentOptions().Logger()
    84  	log.Info("test metadata change")
    85  	require.NoError(t, testServer.startServer())
    86  	log.Info("server is now up")
    87  	require.NoError(t, testServer.waitUntilLeader())
    88  	log.Info("server is now the leader")
    89  
    90  	var (
    91  		idPrefix = "foo"
    92  		numIDs   = 100
    93  		start    = clock.Now()
    94  		middle   = start.Add(4 * time.Second)
    95  		end      = start.Add(10 * time.Second)
    96  		interval = time.Second
    97  	)
    98  	client := testServer.newClient(t)
    99  	require.NoError(t, client.connect())
   100  
   101  	ids := generateTestIDs(idPrefix, numIDs)
   102  	inputs := []testDataset{
   103  		mustGenerateTestDataset(t, datasetGenOpts{
   104  			start:        start,
   105  			stop:         middle,
   106  			interval:     interval,
   107  			ids:          ids,
   108  			category:     untimedMetric,
   109  			typeFn:       roundRobinMetricTypeFn,
   110  			valueGenOpts: defaultValueGenOpts,
   111  			metadataFn:   oldMetadataFn,
   112  		}),
   113  		mustGenerateTestDataset(t, datasetGenOpts{
   114  			start:        middle,
   115  			stop:         end,
   116  			interval:     interval,
   117  			ids:          ids,
   118  			category:     untimedMetric,
   119  			typeFn:       roundRobinMetricTypeFn,
   120  			valueGenOpts: defaultValueGenOpts,
   121  			metadataFn:   newMetadataFn,
   122  		}),
   123  	}
   124  	for _, dataset := range inputs {
   125  		for _, data := range dataset {
   126  			clock.SetNow(data.timestamp)
   127  			for _, mm := range data.metricWithMetadatas {
   128  				require.NoError(t, client.writeUntimedMetricWithMetadatas(mm.metric.untimed, mm.metadata.stagedMetadatas))
   129  			}
   130  			require.NoError(t, client.flush())
   131  
   132  			// Give server some time to process the incoming packets.
   133  			time.Sleep(100 * time.Millisecond)
   134  		}
   135  	}
   136  
   137  	// Move time forward and wait for ticking to happen. The sleep time
   138  	// must be the longer than the lowest resolution across all policies.
   139  	finalTime := end.Add(6 * time.Second)
   140  	clock.SetNow(finalTime)
   141  	time.Sleep(waitForDataToFlush)
   142  
   143  	require.NoError(t, client.close())
   144  
   145  	// Stop the server.
   146  	require.NoError(t, testServer.stopServer())
   147  	log.Info("server is now down")
   148  
   149  	// Validate results.
   150  	var expected []aggregated.MetricWithStoragePolicy
   151  	for _, input := range inputs {
   152  		expected = append(expected, mustComputeExpectedResults(t, finalTime, input, testServer.aggregatorOpts)...)
   153  	}
   154  	sort.Sort(byTimeIDPolicyAscending(expected))
   155  	actual := testServer.sortedResults()
   156  	require.Equal(t, expected, actual)
   157  }