github.com/cockroachdb/cockroach@v20.2.0-alpha.1+incompatible/pkg/ts/model_test.go (about) 1 // Copyright 2018 The Cockroach Authors. 2 // 3 // Use of this software is governed by the Business Source License 4 // included in the file licenses/BSL.txt. 5 // 6 // As of the Change Date specified in that file, in accordance with 7 // the Business Source License, use of this software will be governed 8 // by the Apache License, Version 2.0, included in the file 9 // licenses/APL.txt. 10 11 package ts 12 13 import ( 14 "math" 15 "math/rand" 16 "testing" 17 "time" 18 19 "github.com/cockroachdb/cockroach/pkg/ts/tspb" 20 "github.com/cockroachdb/cockroach/pkg/util/leaktest" 21 "github.com/cockroachdb/cockroach/pkg/util/timeutil" 22 ) 23 24 // The data for the model test is generated for two metrics with four sources 25 // each. The values are randomly generated for each timestamp, but we do input 26 // a bitmap of "gaps" which determine where each data series is missing data. 27 // This allows us to exercise the various gap-filling strategies of the time 28 // series system. 29 // 30 // In the bitmap below, each column represents one of the eight series. Data 31 // points will be generated for each sample period in order, with each row 32 // representing one sample period. A 1 in a column means that a series gets a 33 // point for that sample period; a zero represents a gap. 34 var modelTestGapBitmap = [][8]int{ 35 {1, 1, 1, 1, 1, 1, 0, 1}, 36 {1, 1, 1, 1, 1, 1, 1, 1}, 37 {1, 1, 1, 1, 0, 1, 1, 1}, 38 {1, 1, 1, 1, 1, 1, 1, 1}, 39 {1, 0, 1, 1, 1, 1, 1, 1}, 40 {1, 1, 1, 1, 1, 1, 1, 1}, 41 {1, 1, 1, 1, 1, 1, 1, 1}, 42 {1, 1, 1, 1, 1, 0, 1, 1}, 43 {1, 1, 1, 1, 1, 1, 1, 1}, 44 {1, 1, 0, 1, 1, 1, 1, 1}, 45 {1, 0, 1, 1, 1, 1, 1, 1}, 46 {1, 0, 1, 1, 1, 1, 1, 1}, 47 {1, 0, 0, 1, 0, 1, 1, 1}, 48 {1, 0, 1, 1, 1, 1, 1, 1}, 49 {1, 0, 1, 1, 1, 1, 1, 1}, 50 {1, 1, 1, 1, 1, 1, 1, 1}, 51 {1, 1, 1, 1, 1, 1, 1, 1}, 52 {1, 1, 1, 1, 1, 1, 1, 1}, 53 {1, 1, 1, 0, 1, 1, 1, 1}, 54 {1, 1, 1, 1, 1, 1, 0, 1}, 55 {1, 1, 1, 1, 1, 1, 0, 1}, 56 {1, 1, 1, 1, 1, 1, 0, 1}, 57 {1, 1, 1, 1, 1, 1, 0, 1}, 58 {0, 0, 0, 0, 0, 0, 0, 0}, 59 {1, 1, 1, 1, 1, 1, 0, 1}, 60 {1, 1, 1, 1, 1, 1, 0, 1}, 61 {1, 1, 1, 1, 1, 1, 0, 1}, 62 {1, 1, 1, 1, 1, 0, 1, 1}, 63 {1, 1, 1, 1, 1, 1, 1, 1}, 64 {0, 0, 0, 1, 1, 1, 1, 1}, 65 {1, 1, 1, 1, 1, 1, 1, 1}, 66 {1, 1, 1, 1, 1, 1, 1, 1}, 67 {1, 1, 1, 1, 1, 0, 1, 1}, 68 {1, 1, 1, 1, 1, 1, 1, 1}, 69 {1, 0, 1, 1, 1, 1, 1, 1}, 70 {1, 0, 1, 1, 1, 1, 1, 1}, 71 {1, 0, 0, 1, 0, 1, 1, 1}, 72 {1, 0, 1, 1, 1, 1, 1, 1}, 73 {1, 0, 1, 1, 1, 1, 1, 1}, 74 {1, 1, 1, 1, 1, 1, 1, 1}, 75 {1, 1, 1, 1, 1, 0, 0, 0}, 76 {1, 1, 1, 1, 1, 1, 1, 1}, 77 {1, 1, 1, 0, 1, 1, 1, 1}, 78 {1, 1, 1, 1, 1, 1, 0, 1}, 79 {1, 1, 1, 1, 1, 1, 0, 1}, 80 {1, 1, 1, 1, 1, 1, 0, 1}, 81 {0, 0, 0, 0, 0, 0, 0, 0}, 82 {0, 0, 0, 0, 0, 0, 0, 0}, 83 {1, 1, 1, 1, 1, 1, 0, 1}, 84 {1, 1, 1, 1, 1, 1, 0, 1}, 85 {1, 1, 1, 1, 1, 1, 1, 1}, 86 {1, 1, 1, 1, 1, 1, 1, 1}, 87 {0, 0, 0, 1, 1, 1, 1, 1}, 88 {1, 0, 1, 1, 1, 1, 1, 1}, 89 {1, 1, 0, 1, 1, 0, 1, 1}, 90 {1, 1, 1, 1, 0, 0, 1, 1}, 91 {1, 1, 1, 1, 1, 0, 1, 1}, 92 {1, 1, 1, 1, 1, 0, 1, 1}, 93 {1, 1, 1, 1, 1, 0, 1, 1}, 94 {1, 1, 0, 1, 1, 0, 1, 1}, 95 } 96 97 var modelTestSourceNames = []string{ 98 "source1", 99 "source2", 100 "source3", 101 "source4", 102 } 103 104 var modelTestMetricNames = []string{ 105 "metric1", 106 "metric2", 107 } 108 109 // modelTestDownsamplers are the downsamplers that will be exercised during 110 // the model test. 111 var modelTestDownsamplers = []tspb.TimeSeriesQueryAggregator{ 112 tspb.TimeSeriesQueryAggregator_AVG, 113 tspb.TimeSeriesQueryAggregator_SUM, 114 tspb.TimeSeriesQueryAggregator_MAX, 115 tspb.TimeSeriesQueryAggregator_MIN, 116 } 117 118 // modelTestAggregators are the source aggregators that will be exercised during 119 // the model test. 120 var modelTestAggregators = []tspb.TimeSeriesQueryAggregator{ 121 tspb.TimeSeriesQueryAggregator_AVG, 122 tspb.TimeSeriesQueryAggregator_SUM, 123 tspb.TimeSeriesQueryAggregator_MAX, 124 tspb.TimeSeriesQueryAggregator_MIN, 125 } 126 127 // modelTestDerivatives are the derivative options that will be exercised during 128 // the model test. 129 var modelTestDerivatives = []tspb.TimeSeriesQueryDerivative{ 130 tspb.TimeSeriesQueryDerivative_NONE, 131 tspb.TimeSeriesQueryDerivative_DERIVATIVE, 132 tspb.TimeSeriesQueryDerivative_NON_NEGATIVE_DERIVATIVE, 133 } 134 135 // modelTestInterpolationLimits are the various interpolation limits that will 136 // be exercised. 137 var modelTestInterpolationLimits = []int64{ 138 time.Second.Nanoseconds() * 1, 139 time.Minute.Nanoseconds(), // Only two gaps in the bitmap exceed this limit. 140 5 * time.Minute.Nanoseconds(), // No gaps exceed this length. 141 } 142 143 // modelTestSampleDurations are the various sample durations that will be 144 // exercised. 145 var modelTestSampleDurations = []int64{ 146 Resolution10s.SampleDuration(), 147 Resolution10s.SampleDuration() * 3, 148 Resolution10s.SlabDuration(), 149 } 150 151 // modelTestRowCount is the number of sample periods that will be filled for 152 // the test. 153 var modelTestRowCount = len(modelTestGapBitmap) 154 155 // modelTestStartTime, the first time at which data is recorded in the model, is 156 // set at a reasonably modern time and configured to overlap a slab boundary. 157 // This is accomplished by computing an anchor time, and then putting the start 158 // time earlier than the anchor time such that half of recorded samples occur 159 // before the anchor time. 160 var modelTestAnchorTime = time.Date(2018, 1, 1, 0, 0, 0, 0, time.UTC).UnixNano() 161 var modelTestStartTime = modelTestAnchorTime - int64(modelTestRowCount/2)*Resolution10s.SampleDuration() 162 163 // modelTestQueryTimes are the bounds that will be queried for the tests. 164 var modelTestQueryTimes = []struct { 165 start int64 166 end int64 167 }{ 168 { 169 start: modelTestStartTime, 170 end: modelTestStartTime + time.Hour.Nanoseconds(), 171 }, 172 { 173 start: modelTestStartTime + 20*Resolution10s.SampleDuration(), 174 end: modelTestStartTime + 35*Resolution10s.SampleDuration(), 175 }, 176 } 177 178 func TestTimeSeriesModelTest(t *testing.T) { 179 defer leaktest.AfterTest(t)() 180 tm := newTestModelRunner(t) 181 tm.Start() 182 defer tm.Stop() 183 184 s1 := rand.NewSource(timeutil.Now().UnixNano()) 185 r1 := rand.New(s1) 186 187 // populate model with random values according to gap bitmap. 188 for rowNum := 0; rowNum <= modelTestRowCount; rowNum++ { 189 for metricNum, metric := range modelTestMetricNames { 190 for sourceNum, source := range modelTestSourceNames { 191 // Check the gap bitmap to see if this sample period and source get a 192 // data point. 193 if modelTestGapBitmap[rowNum%len(modelTestGapBitmap)][4*metricNum+sourceNum] > 0 { 194 tm.storeTimeSeriesData(Resolution10s, []tspb.TimeSeriesData{ 195 tsd(metric, source, 196 tsdp(getSampleTime(rowNum), math.Floor(r1.Float64()*10000)), 197 ), 198 }) 199 } 200 } 201 } 202 } 203 204 tm.assertModelCorrect() 205 { 206 // Sanity check: model should contain a datapoint for all but two sample 207 // periods (one period is fully missing from the gap bitmap). 208 query := tm.makeQuery( 209 modelTestMetricNames[0], Resolution10s, modelTestStartTime, modelTestStartTime+time.Hour.Nanoseconds(), 210 ) 211 query.assertSuccess(len(modelTestGapBitmap)-2, 4) 212 } 213 214 executeQueryMatrix(t, tm) 215 } 216 217 func TestTimeSeriesRollupModelTest(t *testing.T) { 218 defer leaktest.AfterTest(t)() 219 tm := newTestModelRunner(t) 220 tm.Start() 221 defer tm.Stop() 222 223 s1 := rand.NewSource(timeutil.Now().UnixNano()) 224 r1 := rand.New(s1) 225 226 // populate model with random values according to gap bitmap. 227 for rowNum := 0; rowNum <= modelTestRowCount; rowNum++ { 228 for metricNum, metric := range modelTestMetricNames { 229 for sourceNum, source := range modelTestSourceNames { 230 // Check the gap bitmap to see if this sample period and source get a 231 // data point. 232 if modelTestGapBitmap[rowNum%len(modelTestGapBitmap)][4*metricNum+sourceNum] > 0 { 233 tm.storeTimeSeriesData(Resolution10s, []tspb.TimeSeriesData{ 234 tsd(metric, source, 235 tsdp(getSampleTime(rowNum), math.Floor(r1.Float64()*10000)), 236 ), 237 }) 238 } 239 } 240 } 241 } 242 243 tm.maintain(modelTestAnchorTime + resolution10sDefaultRollupThreshold.Nanoseconds()) 244 245 tm.assertModelCorrect() 246 { 247 // Sanity check: after the rollup, the 10s resolution should only have half 248 // of its data points. 249 query := tm.makeQuery( 250 modelTestMetricNames[0], Resolution10s, modelTestStartTime, modelTestStartTime+time.Hour.Nanoseconds(), 251 ) 252 query.assertSuccess(modelTestRowCount/2-1, 4) 253 } 254 { 255 // Sanity check: after the rollup, the 30m resolution should contain a 256 // single data point. 257 query := tm.makeQuery( 258 modelTestMetricNames[0], Resolution30m, modelTestStartTime, modelTestStartTime+time.Hour.Nanoseconds(), 259 ) 260 query.assertSuccess(1, 4) 261 } 262 263 executeQueryMatrix(t, tm) 264 } 265 266 // getSampleTime returns the timestamp for the numbered sample period in the 267 // model test. 268 func getSampleTime(n int) time.Duration { 269 return time.Duration(modelTestStartTime + int64(n)*Resolution10s.SampleDuration()) 270 } 271 272 func executeQueryMatrix(t *testing.T, tm testModelRunner) { 273 for _, metric := range modelTestMetricNames { 274 for _, downsampler := range modelTestDownsamplers { 275 for _, aggregator := range modelTestAggregators { 276 for _, derivative := range modelTestDerivatives { 277 for _, interpolationLimit := range modelTestInterpolationLimits { 278 for _, sampleDuration := range modelTestSampleDurations { 279 for _, queryRange := range modelTestQueryTimes { 280 for srcCount := 0; srcCount <= len(modelTestSourceNames); srcCount++ { 281 query := tm.makeQuery( 282 metric, Resolution10s, queryRange.start, queryRange.end, 283 ) 284 query.setDownsampler(downsampler) 285 query.setSourceAggregator(aggregator) 286 query.setDerivative(derivative) 287 query.Sources = modelTestSourceNames[:srcCount] 288 query.InterpolationLimitNanos = interpolationLimit 289 query.SampleDurationNanos = sampleDuration 290 query.assertMatchesModel() 291 } 292 } 293 } 294 } 295 } 296 } 297 } 298 } 299 }