github.com/m3db/m3@v1.5.0/src/aggregator/integration/custom_aggregations_test.go (about) 1 // +build integration 2 3 // Copyright (c) 2016 Uber Technologies, Inc. 4 // 5 // Permission is hereby granted, free of charge, to any person obtaining a copy 6 // of this software and associated documentation files (the "Software"), to deal 7 // in the Software without restriction, including without limitation the rights 8 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 // copies of the Software, and to permit persons to whom the Software is 10 // furnished to do so, subject to the following conditions: 11 // 12 // The above copyright notice and this permission notice shall be included in 13 // all copies or substantial portions of the Software. 14 // 15 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 // THE SOFTWARE. 22 23 package integration 24 25 import ( 26 "sort" 27 "testing" 28 "time" 29 30 "github.com/stretchr/testify/require" 31 32 "github.com/m3db/m3/src/cluster/placement" 33 "github.com/m3db/m3/src/metrics/aggregation" 34 "github.com/m3db/m3/src/metrics/metric/aggregated" 35 ) 36 37 func TestCustomAggregationWithStagedMetadatas(t *testing.T) { 38 metadataFns := [4]metadataFn{ 39 func(int) metadataUnion { 40 return metadataUnion{ 41 mType: stagedMetadatasType, 42 stagedMetadatas: testStagedMetadatas, 43 } 44 }, 45 func(int) metadataUnion { 46 return metadataUnion{ 47 mType: stagedMetadatasType, 48 stagedMetadatas: testStagedMetadatasWithCustomAggregation1, 49 } 50 }, 51 func(int) metadataUnion { 52 return metadataUnion{ 53 mType: stagedMetadatasType, 54 stagedMetadatas: testStagedMetadatasWithCustomAggregation2, 55 } 56 }, 57 func(int) metadataUnion { 58 return metadataUnion{ 59 mType: stagedMetadatasType, 60 stagedMetadatas: testUpdatedStagedMetadatas, 61 } 62 }, 63 } 64 testCustomAggregations(t, metadataFns) 65 } 66 67 func testCustomAggregations(t *testing.T, metadataFns [4]metadataFn) { 68 if testing.Short() { 69 t.SkipNow() 70 } 71 72 aggTypesOpts := aggregation.NewTypesOptions(). 73 SetCounterTypeStringTransformFn(aggregation.SuffixTransform). 74 SetTimerTypeStringTransformFn(aggregation.SuffixTransform). 75 SetGaugeTypeStringTransformFn(aggregation.SuffixTransform) 76 serverOpts := newTestServerOptions(t).SetAggregationTypesOptions(aggTypesOpts) 77 78 // Clock setup. 79 clock := newTestClock(time.Now().Truncate(time.Hour)) 80 serverOpts = serverOpts.SetClockOptions(clock.Options()) 81 82 // Placement setup. 83 numShards := 1024 84 cfg := placementInstanceConfig{ 85 instanceID: serverOpts.InstanceID(), 86 shardSetID: serverOpts.ShardSetID(), 87 shardStartInclusive: 0, 88 shardEndExclusive: uint32(numShards), 89 } 90 instance := cfg.newPlacementInstance() 91 placement := newPlacement(numShards, []placement.Instance{instance}) 92 placementKey := serverOpts.PlacementKVKey() 93 setPlacement(t, placementKey, serverOpts.ClusterClient(), placement) 94 95 serverOpts = setupTopic(t, serverOpts, placement) 96 97 // Create server. 98 testServer := newTestServerSetup(t, serverOpts) 99 defer testServer.close() 100 101 // Start the server. 102 log := testServer.aggregatorOpts.InstrumentOptions().Logger() 103 log.Info("test custom aggregations") 104 require.NoError(t, testServer.startServer()) 105 log.Info("server is now up") 106 require.NoError(t, testServer.waitUntilLeader()) 107 log.Info("server is now the leader") 108 109 var ( 110 idPrefix = "foo" 111 numIDs = 100 112 start = clock.Now() 113 t1 = start.Add(2 * time.Second) 114 t2 = start.Add(4 * time.Second) 115 t3 = start.Add(6 * time.Second) 116 end = start.Add(8 * time.Second) 117 interval = time.Second 118 ) 119 client := testServer.newClient(t) 120 require.NoError(t, client.connect()) 121 122 ids := generateTestIDs(idPrefix, numIDs) 123 inputs := []testDataset{ 124 mustGenerateTestDataset(t, datasetGenOpts{ 125 start: start, 126 stop: t1, 127 interval: interval, 128 ids: ids, 129 category: untimedMetric, 130 typeFn: roundRobinMetricTypeFn, 131 valueGenOpts: defaultValueGenOpts, 132 metadataFn: metadataFns[0], 133 }), 134 mustGenerateTestDataset(t, datasetGenOpts{ 135 start: t1, 136 stop: t2, 137 interval: interval, 138 ids: ids, 139 category: untimedMetric, 140 typeFn: roundRobinMetricTypeFn, 141 valueGenOpts: defaultValueGenOpts, 142 metadataFn: metadataFns[1], 143 }), 144 mustGenerateTestDataset(t, datasetGenOpts{ 145 start: t2, 146 stop: t3, 147 interval: interval, 148 ids: ids, 149 category: untimedMetric, 150 typeFn: roundRobinMetricTypeFn, 151 valueGenOpts: defaultValueGenOpts, 152 metadataFn: metadataFns[2], 153 }), 154 mustGenerateTestDataset(t, datasetGenOpts{ 155 start: t3, 156 stop: end, 157 interval: interval, 158 ids: ids, 159 category: untimedMetric, 160 typeFn: roundRobinMetricTypeFn, 161 valueGenOpts: defaultValueGenOpts, 162 metadataFn: metadataFns[3], 163 }), 164 } 165 for _, dataset := range inputs { 166 for _, data := range dataset { 167 clock.SetNow(data.timestamp) 168 for _, mm := range data.metricWithMetadatas { 169 require.NoError(t, client.writeUntimedMetricWithMetadatas(mm.metric.untimed, mm.metadata.stagedMetadatas)) 170 } 171 require.NoError(t, client.flush()) 172 173 // Give server some time to process the incoming packets. 174 time.Sleep(100 * time.Millisecond) 175 } 176 } 177 178 // Move time forward and wait for ticking to happen. The sleep time 179 // must be the longer than the lowest resolution across all policies. 180 finalTime := end.Add(6 * time.Second) 181 clock.SetNow(finalTime) 182 time.Sleep(6 * time.Second) 183 184 require.NoError(t, client.close()) 185 186 // Stop the server. 187 require.NoError(t, testServer.stopServer()) 188 log.Info("server is now down") 189 190 // Validate results. 191 var expected []aggregated.MetricWithStoragePolicy 192 for _, input := range inputs { 193 expected = append(expected, mustComputeExpectedResults(t, finalTime, input, testServer.aggregatorOpts)...) 194 } 195 sort.Sort(byTimeIDPolicyAscending(expected)) 196 actual := testServer.sortedResults() 197 require.Equal(t, expected, actual) 198 }