go.chromium.org/luci@v0.0.0-20240309015107-7cdc2e660f33/common/tsmon/monitor/serialize.go (about) 1 // Copyright 2016 The LUCI Authors. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package monitor 16 17 import ( 18 "time" 19 20 "github.com/golang/protobuf/proto" 21 "go.chromium.org/luci/common/tsmon/distribution" 22 "go.chromium.org/luci/common/tsmon/field" 23 "go.chromium.org/luci/common/tsmon/target" 24 "go.chromium.org/luci/common/tsmon/types" 25 "google.golang.org/protobuf/types/known/timestamppb" 26 27 pb "go.chromium.org/luci/common/tsmon/ts_mon_proto" 28 ) 29 30 type dataSetKey struct { 31 targetHash uint64 32 metricName string 33 } 34 35 // SerializeCells creates a MetricsCollection message from a slice of cells. 36 func SerializeCells(cells []types.Cell, now time.Time) []*pb.MetricsCollection { 37 collections := map[uint64]*pb.MetricsCollection{} 38 dataSets := map[dataSetKey]*pb.MetricsDataSet{} 39 40 // TODO(1026140): the hash and proto of a Target object should be created 41 // at the time of the object creation to avoid unnecessary invocation of 42 // Target.Hash() and Target.PopulateProto() 43 for _, c := range cells { 44 // Find the collection, add it if it doesn't exist. 45 targetHash := c.Target.Hash() 46 collection, ok := collections[targetHash] 47 if !ok { 48 collection = &pb.MetricsCollection{} 49 collections[targetHash] = collection 50 c.Target.PopulateProto(collection) 51 52 // add is_tsmon to indicate that this target is a tsmon schema. 53 collection.RootLabels = append(collection.RootLabels, target.RootLabel("is_tsmon", true)) 54 } 55 56 // Find the data set, add it if it doesn't exist. 57 key := dataSetKey{targetHash, c.Name} 58 dataSet, ok := dataSets[key] 59 if !ok { 60 dataSet = SerializeDataSet(c) 61 dataSets[key] = dataSet 62 collection.MetricsDataSet = append(collection.MetricsDataSet, dataSet) 63 } 64 65 // Add the data to the data set. 66 dataSet.Data = append(dataSet.Data, SerializeValue(c, now)) 67 } 68 69 // Turn the hash into a list and return it. 70 ret := make([]*pb.MetricsCollection, 0, len(collections)) 71 for _, collection := range collections { 72 ret = append(ret, collection) 73 } 74 return ret 75 } 76 77 // SerializeDataSet creates a new MetricsDataSet without any data, but just with 78 // the metric metadata fields populated. 79 func SerializeDataSet(c types.Cell) *pb.MetricsDataSet { 80 d := pb.MetricsDataSet{} 81 d.MetricName = proto.String(MetricNamePrefix + c.Name) 82 d.FieldDescriptor = field.SerializeDescriptor(c.Fields) 83 d.Description = proto.String(c.Description) 84 85 if c.ValueType.IsCumulative() { 86 d.StreamKind = pb.StreamKind_CUMULATIVE.Enum() 87 } else { 88 d.StreamKind = pb.StreamKind_GAUGE.Enum() 89 } 90 91 switch c.ValueType { 92 case types.NonCumulativeIntType, types.CumulativeIntType: 93 d.ValueType = pb.ValueType_INT64.Enum() 94 case types.NonCumulativeFloatType, types.CumulativeFloatType: 95 d.ValueType = pb.ValueType_DOUBLE.Enum() 96 case types.NonCumulativeDistributionType, types.CumulativeDistributionType: 97 d.ValueType = pb.ValueType_DISTRIBUTION.Enum() 98 case types.StringType: 99 d.ValueType = pb.ValueType_STRING.Enum() 100 case types.BoolType: 101 d.ValueType = pb.ValueType_BOOL.Enum() 102 } 103 104 if c.Units.IsSpecified() { 105 d.Annotations = &pb.Annotations{ 106 Unit: proto.String(string(c.Units)), 107 // Annotation.Timestamp can be true only if ValueType == Int or 108 // Float types. Distribution is a collection of Float values, and is 109 // often used to track the durations of certain events, such as 110 // RPC duration. 111 // 112 // However, distribution itself is not a time value, and, therefore, 113 // Annotation.Timestamp must not be True in any metrics with 114 // ValueType_DISTRIBUTION. 115 // 116 // This sets isTimeUnit with False for distribution metrics, so that 117 // the monitoring UI will display the time unit string beside 118 // the Y-Axis for distribution metrics. It won't be able to adjust 119 // the time scale, though. 120 Timestamp: proto.Bool( 121 *d.ValueType != pb.ValueType_DISTRIBUTION && c.Units.IsTime(), 122 ), 123 } 124 } 125 return &d 126 } 127 128 // SerializeValue creates a new MetricsData representing this cell's value. 129 func SerializeValue(c types.Cell, now time.Time) *pb.MetricsData { 130 d := pb.MetricsData{} 131 d.Field = field.Serialize(c.Fields, c.FieldVals) 132 133 if c.ValueType.IsCumulative() { 134 d.StartTimestamp = timestamppb.New(c.ResetTime) 135 } else { 136 d.StartTimestamp = timestamppb.New(now) 137 } 138 d.EndTimestamp = timestamppb.New(now) 139 140 switch c.ValueType { 141 case types.NonCumulativeIntType, types.CumulativeIntType: 142 d.Value = &pb.MetricsData_Int64Value{c.Value.(int64)} 143 case types.NonCumulativeFloatType, types.CumulativeFloatType: 144 d.Value = &pb.MetricsData_DoubleValue{c.Value.(float64)} 145 case types.CumulativeDistributionType, types.NonCumulativeDistributionType: 146 d.Value = &pb.MetricsData_DistributionValue{serializeDistribution(c.Value.(*distribution.Distribution))} 147 case types.StringType: 148 d.Value = &pb.MetricsData_StringValue{c.Value.(string)} 149 case types.BoolType: 150 d.Value = &pb.MetricsData_BoolValue{c.Value.(bool)} 151 } 152 return &d 153 } 154 155 func serializeDistribution(d *distribution.Distribution) *pb.MetricsData_Distribution { 156 ret := pb.MetricsData_Distribution{ 157 Count: proto.Int64(d.Count()), 158 } 159 160 if d.Count() > 0 { 161 ret.Mean = proto.Float64(d.Sum() / float64(d.Count())) 162 } 163 164 // Copy the bucketer params. 165 if d.Bucketer().Width() == 0 { 166 ret.BucketOptions = &pb.MetricsData_Distribution_ExponentialBuckets{ 167 &pb.MetricsData_Distribution_ExponentialOptions{ 168 NumFiniteBuckets: proto.Int32(int32(d.Bucketer().NumFiniteBuckets())), 169 GrowthFactor: proto.Float64(d.Bucketer().GrowthFactor()), 170 Scale: proto.Float64(1.0), 171 }, 172 } 173 } else { 174 ret.BucketOptions = &pb.MetricsData_Distribution_LinearBuckets{ 175 &pb.MetricsData_Distribution_LinearOptions{ 176 NumFiniteBuckets: proto.Int32(int32(d.Bucketer().NumFiniteBuckets())), 177 Width: proto.Float64(d.Bucketer().Width()), 178 Offset: proto.Float64(0.0), 179 }, 180 } 181 } 182 183 // Copy the distribution bucket values. Include the overflow buckets on 184 // either end. 185 ret.BucketCount = d.Buckets() 186 187 return &ret 188 }