github.com/m3db/m3@v1.5.1-0.20231129193456-75a402aa583b/src/aggregator/integration/custom_aggregations_test.go (about) 1 //go:build integration 2 3 // Copyright (c) 2016 Uber Technologies, Inc. 4 // 5 // Permission is hereby granted, free of charge, to any person obtaining a copy 6 // of this software and associated documentation files (the "Software"), to deal 7 // in the Software without restriction, including without limitation the rights 8 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 // copies of the Software, and to permit persons to whom the Software is 10 // furnished to do so, subject to the following conditions: 11 // 12 // The above copyright notice and this permission notice shall be included in 13 // all copies or substantial portions of the Software. 14 // 15 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 // THE SOFTWARE. 22 23 package integration 24 25 import ( 26 "sort" 27 "testing" 28 "time" 29 30 "github.com/m3db/m3/src/cluster/placement" 31 "github.com/m3db/m3/src/metrics/aggregation" 32 "github.com/m3db/m3/src/metrics/metric/aggregated" 33 34 "github.com/stretchr/testify/require" 35 ) 36 37 func TestCustomAggregationWithStagedMetadatas(t *testing.T) { 38 metadataFns := [4]metadataFn{ 39 func(int) metadataUnion { 40 return metadataUnion{ 41 mType: stagedMetadatasType, 42 stagedMetadatas: testStagedMetadatas, 43 } 44 }, 45 func(int) metadataUnion { 46 return metadataUnion{ 47 mType: stagedMetadatasType, 48 stagedMetadatas: testStagedMetadatasWithCustomAggregation1, 49 } 50 }, 51 func(int) metadataUnion { 52 return metadataUnion{ 53 mType: stagedMetadatasType, 54 stagedMetadatas: testStagedMetadatasWithCustomAggregation2, 55 } 56 }, 57 func(int) metadataUnion { 58 return metadataUnion{ 59 mType: stagedMetadatasType, 60 stagedMetadatas: testUpdatedStagedMetadatas, 61 } 62 }, 63 } 64 testCustomAggregations(t, metadataFns) 65 } 66 67 func testCustomAggregations(t *testing.T, metadataFns [4]metadataFn) { 68 if testing.Short() { 69 t.SkipNow() 70 } 71 aggTypesOpts := aggregation.NewTypesOptions(). 72 SetCounterTypeStringTransformFn(aggregation.SuffixTransform). 73 SetTimerTypeStringTransformFn(aggregation.SuffixTransform). 74 SetGaugeTypeStringTransformFn(aggregation.SuffixTransform) 75 serverOpts := newTestServerOptions(t).SetAggregationTypesOptions(aggTypesOpts) 76 77 // Clock setup. 78 clock := newTestClock(time.Now().Truncate(time.Hour)) 79 serverOpts = serverOpts.SetClockOptions(clock.Options()) 80 81 // Placement setup. 82 numShards := 1024 83 cfg := placementInstanceConfig{ 84 instanceID: serverOpts.InstanceID(), 85 shardSetID: serverOpts.ShardSetID(), 86 shardStartInclusive: 0, 87 shardEndExclusive: uint32(numShards), 88 } 89 instance := cfg.newPlacementInstance() 90 placement := newPlacement(numShards, []placement.Instance{instance}) 91 placementKey := serverOpts.PlacementKVKey() 92 setPlacement(t, placementKey, serverOpts.ClusterClient(), placement) 93 94 serverOpts = setupTopic(t, serverOpts, placement) 95 96 // Create server. 97 testServer := newTestServerSetup(t, serverOpts) 98 defer testServer.close() 99 100 // Start the server. 101 log := testServer.aggregatorOpts.InstrumentOptions().Logger() 102 log.Info("test custom aggregations") 103 require.NoError(t, testServer.startServer()) 104 log.Info("server is now up") 105 require.NoError(t, testServer.waitUntilLeader()) 106 log.Info("server is now the leader") 107 108 var ( 109 idPrefix = "foo" 110 numIDs = 100 111 start = clock.Now() 112 t1 = start.Add(2 * time.Second) 113 t2 = start.Add(4 * time.Second) 114 t3 = start.Add(6 * time.Second) 115 end = start.Add(8 * time.Second) 116 interval = time.Second 117 ) 118 client := testServer.newClient(t) 119 require.NoError(t, client.connect()) 120 121 ids := generateTestIDs(idPrefix, numIDs) 122 inputs := []testDataset{ 123 mustGenerateTestDataset(t, datasetGenOpts{ 124 start: start, 125 stop: t1, 126 interval: interval, 127 ids: ids, 128 category: untimedMetric, 129 typeFn: roundRobinMetricTypeFn, 130 valueGenOpts: defaultValueGenOpts, 131 metadataFn: metadataFns[0], 132 }), 133 mustGenerateTestDataset(t, datasetGenOpts{ 134 start: t1, 135 stop: t2, 136 interval: interval, 137 ids: ids, 138 category: untimedMetric, 139 typeFn: roundRobinMetricTypeFn, 140 valueGenOpts: defaultValueGenOpts, 141 metadataFn: metadataFns[1], 142 }), 143 mustGenerateTestDataset(t, datasetGenOpts{ 144 start: t2, 145 stop: t3, 146 interval: interval, 147 ids: ids, 148 category: untimedMetric, 149 typeFn: roundRobinMetricTypeFn, 150 valueGenOpts: defaultValueGenOpts, 151 metadataFn: metadataFns[2], 152 }), 153 mustGenerateTestDataset(t, datasetGenOpts{ 154 start: t3, 155 stop: end, 156 interval: interval, 157 ids: ids, 158 category: untimedMetric, 159 typeFn: roundRobinMetricTypeFn, 160 valueGenOpts: defaultValueGenOpts, 161 metadataFn: metadataFns[3], 162 }), 163 } 164 for _, dataset := range inputs { 165 for _, data := range dataset { 166 clock.SetNow(data.timestamp) 167 for _, mm := range data.metricWithMetadatas { 168 require.NoError(t, client.writeUntimedMetricWithMetadatas(mm.metric.untimed, mm.metadata.stagedMetadatas)) 169 } 170 require.NoError(t, client.flush()) 171 172 // Give server some time to process the incoming packets. 173 time.Sleep(100 * time.Millisecond) 174 } 175 } 176 177 // Move time forward and wait for ticking to happen. The sleep time 178 // must be the longer than the lowest resolution across all policies. 179 finalTime := end.Add(6 * time.Second) 180 clock.SetNow(finalTime) 181 time.Sleep(waitForDataToFlush) 182 183 require.NoError(t, client.close()) 184 185 // Stop the server. 186 require.NoError(t, testServer.stopServer()) 187 log.Info("server is now down") 188 189 // Validate results. 190 var expected []aggregated.MetricWithStoragePolicy 191 for _, input := range inputs { 192 expected = append(expected, mustComputeExpectedResults(t, finalTime, input, testServer.aggregatorOpts)...) 193 } 194 sort.Sort(byTimeIDPolicyAscending(expected)) 195 actual := testServer.sortedResults() 196 require.Equal(t, expected, actual) 197 }