github.com/alibaba/ilogtail/pkg@v0.0.0-20250526110833-c53b480d046c/protocol/decoder/opentelemetry/otlpDataToSLSProto.go (about)

     1  package opentelemetry
     2  
     3  import (
     4  	"encoding/json"
     5  	"fmt"
     6  	"math"
     7  	"strconv"
     8  	"time"
     9  
    10  	"go.opentelemetry.io/collector/pdata/pcommon"
    11  	"go.opentelemetry.io/collector/pdata/plog"
    12  	"go.opentelemetry.io/collector/pdata/plog/plogotlp"
    13  	"go.opentelemetry.io/collector/pdata/pmetric"
    14  	"go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp"
    15  	"go.opentelemetry.io/collector/pdata/ptrace"
    16  	"go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp"
    17  
    18  	"github.com/alibaba/ilogtail/pkg/helper"
    19  	"github.com/alibaba/ilogtail/pkg/protocol"
    20  	"github.com/alibaba/ilogtail/pkg/protocol/otlp"
    21  )
    22  
    23  const (
    24  	metricNameKey      = "__name__"
    25  	labelsKey          = "__labels__"
    26  	timeNanoKey        = "__time_nano__"
    27  	valueKey           = "__value__"
    28  	infinityBoundValue = "+Inf"
    29  	bucketLabelKey     = "le"
    30  	summaryLabelKey    = "quantile"
    31  )
    32  
    33  const (
    34  	metricNameSuffixSum       = "_sum"
    35  	metricNameSuffixCount     = "_count"
    36  	metricNameSuffixMax       = "_max"
    37  	metricNameSuffixMin       = "_min"
    38  	metricNameSuffixBucket    = "_bucket"
    39  	metricNameSuffixExemplars = "_exemplars"
    40  )
    41  
    42  func min(l, r int) int {
    43  	if l < r {
    44  		return l
    45  	}
    46  	return r
    47  }
    48  
    49  func formatMetricName(name string) string {
    50  	var newName []byte
    51  	for i := 0; i < len(name); i++ {
    52  		b := name[i]
    53  		if (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || (b >= '0' && b <= '9') || b == '_' || b == ':' {
    54  			continue
    55  		} else {
    56  			if newName == nil {
    57  				newName = []byte(name)
    58  			}
    59  			newName[i] = '_'
    60  		}
    61  	}
    62  	if newName == nil {
    63  		return name
    64  	}
    65  	return string(newName)
    66  }
    67  
    68  func newMetricLogFromRaw(name string, labels *helper.MetricLabels, nsec int64, value float64) *protocol.Log {
    69  	return helper.NewMetricLog(name, nsec, value, labels)
    70  }
    71  
    72  func attrs2Labels(labels *helper.MetricLabels, attrs pcommon.Map) {
    73  	attrs.Range(func(k string, v pcommon.Value) bool {
    74  		labels.Append(k, v.AsString())
    75  		return true
    76  	})
    77  }
    78  
    79  func newExemplarMetricLogFromRaw(name string, exemplar pmetric.Exemplar, labels *helper.MetricLabels) *protocol.Log {
    80  	metricName := name + metricNameSuffixExemplars
    81  	if !exemplar.TraceID().IsEmpty() {
    82  		labels.Append("traceId", exemplar.TraceID().String())
    83  	}
    84  
    85  	if !exemplar.SpanID().IsEmpty() {
    86  		labels.Append("spanId", exemplar.SpanID().String())
    87  	}
    88  
    89  	filterAttributeMap := pcommon.NewMap()
    90  	exemplar.FilteredAttributes().CopyTo(filterAttributeMap)
    91  
    92  	for key, value := range filterAttributeMap.AsRaw() {
    93  		labels.Append(key, fmt.Sprintf("%v", value))
    94  	}
    95  
    96  	log := &protocol.Log{
    97  		Contents: []*protocol.Log_Content{
    98  			{
    99  				Key:   metricNameKey,
   100  				Value: formatMetricName(metricName),
   101  			},
   102  			{
   103  				Key:   timeNanoKey,
   104  				Value: strconv.FormatInt(exemplar.Timestamp().AsTime().Unix(), 10),
   105  			},
   106  			{
   107  				Key:   labelsKey,
   108  				Value: labels.String(),
   109  			},
   110  			{
   111  				Key:   valueKey,
   112  				Value: strconv.FormatFloat(exemplar.DoubleValue(), 'g', -1, 64),
   113  			},
   114  		},
   115  	}
   116  	protocol.SetLogTimeWithNano(log, uint32(exemplar.Timestamp()/1e9), uint32(exemplar.Timestamp()%1e9))
   117  	return log
   118  }
   119  
   120  func GaugeToLogs(name string, data pmetric.NumberDataPointSlice, defaultLabels *helper.MetricLabels) (logs []*protocol.Log) {
   121  	for i := 0; i < data.Len(); i++ {
   122  		dataPoint := data.At(i)
   123  
   124  		labels := defaultLabels.Clone()
   125  		attrs2Labels(labels, dataPoint.Attributes())
   126  
   127  		for j := 0; j < dataPoint.Exemplars().Len(); j++ {
   128  			logs = append(logs, newExemplarMetricLogFromRaw(name, dataPoint.Exemplars().At(j), labels.Clone()))
   129  		}
   130  
   131  		value := dataPoint.DoubleValue()
   132  		if dataPoint.IntValue() != 0 {
   133  			value = float64(dataPoint.IntValue())
   134  		}
   135  		logs = append(logs, newMetricLogFromRaw(name, labels, int64(dataPoint.Timestamp()), value))
   136  	}
   137  	return logs
   138  }
   139  
   140  func SumToLogs(name string, aggregationTemporality pmetric.AggregationTemporality, isMonotonic string, data pmetric.NumberDataPointSlice, defaultLabels *helper.MetricLabels) (logs []*protocol.Log) {
   141  	for i := 0; i < data.Len(); i++ {
   142  		dataPoint := data.At(i)
   143  
   144  		labels := defaultLabels.Clone()
   145  		attrs2Labels(labels, dataPoint.Attributes())
   146  		labels.Append(otlp.TagKeyMetricIsMonotonic, isMonotonic)
   147  		labels.Append(otlp.TagKeyMetricAggregationTemporality, aggregationTemporality.String())
   148  
   149  		for j := 0; j < dataPoint.Exemplars().Len(); j++ {
   150  			logs = append(logs, newExemplarMetricLogFromRaw(name, dataPoint.Exemplars().At(j), labels.Clone()))
   151  		}
   152  
   153  		value := dataPoint.DoubleValue()
   154  		if dataPoint.IntValue() != 0 {
   155  			value = float64(dataPoint.IntValue())
   156  		}
   157  		logs = append(logs, newMetricLogFromRaw(name, labels, int64(dataPoint.Timestamp()), value))
   158  	}
   159  	return logs
   160  }
   161  
   162  func SummaryToLogs(name string, data pmetric.SummaryDataPointSlice, defaultLabels *helper.MetricLabels) (logs []*protocol.Log) {
   163  	for i := 0; i < data.Len(); i++ {
   164  		dataPoint := data.At(i)
   165  
   166  		labels := defaultLabels.Clone()
   167  		attrs2Labels(labels, dataPoint.Attributes())
   168  
   169  		logs = append(logs, newMetricLogFromRaw(name+metricNameSuffixSum, labels, int64(dataPoint.Timestamp()), dataPoint.Sum()))
   170  		logs = append(logs, newMetricLogFromRaw(name+metricNameSuffixCount, labels, int64(dataPoint.Timestamp()), float64(dataPoint.Count())))
   171  
   172  		summaryLabels := labels.Clone()
   173  		summaryLabels.Append(summaryLabelKey, "")
   174  
   175  		values := dataPoint.QuantileValues()
   176  		for j := 0; j < values.Len(); j++ {
   177  			value := values.At(j)
   178  			summaryLabels.Replace(summaryLabelKey, strconv.FormatFloat(value.Quantile(), 'g', -1, 64))
   179  			logs = append(logs, newMetricLogFromRaw(name, summaryLabels, int64(dataPoint.Timestamp()), value.Value()))
   180  		}
   181  	}
   182  	return logs
   183  }
   184  
   185  func HistogramToLogs(name string, data pmetric.HistogramDataPointSlice, aggregationTemporality pmetric.AggregationTemporality, defaultLabels *helper.MetricLabels) (logs []*protocol.Log) {
   186  	for i := 0; i < data.Len(); i++ {
   187  		dataPoint := data.At(i)
   188  
   189  		labels := defaultLabels.Clone()
   190  		attrs2Labels(labels, dataPoint.Attributes())
   191  		labels.Append(otlp.TagKeyMetricAggregationTemporality, aggregationTemporality.String())
   192  		labels.Append(otlp.TagKeyMetricHistogramType, pmetric.MetricTypeHistogram.String())
   193  
   194  		if dataPoint.HasSum() {
   195  			logs = append(logs, newMetricLogFromRaw(name+metricNameSuffixSum, labels, int64(dataPoint.Timestamp()), dataPoint.Sum()))
   196  		}
   197  		if dataPoint.HasMin() {
   198  			logs = append(logs, newMetricLogFromRaw(name+metricNameSuffixMin, labels, int64(dataPoint.Timestamp()), dataPoint.Min()))
   199  		}
   200  		if dataPoint.HasMax() {
   201  			logs = append(logs, newMetricLogFromRaw(name+metricNameSuffixMax, labels, int64(dataPoint.Timestamp()), dataPoint.Max()))
   202  		}
   203  		logs = append(logs, newMetricLogFromRaw(name+metricNameSuffixCount, labels, int64(dataPoint.Timestamp()), float64(dataPoint.Count())))
   204  
   205  		for j := 0; j < dataPoint.Exemplars().Len(); j++ {
   206  			logs = append(logs, newExemplarMetricLogFromRaw(name, dataPoint.Exemplars().At(j), labels.Clone()))
   207  		}
   208  
   209  		bounds := dataPoint.ExplicitBounds()
   210  		boundsStr := make([]string, bounds.Len()+1)
   211  		for j := 0; j < bounds.Len(); j++ {
   212  			boundsStr[j] = strconv.FormatFloat(bounds.At(j), 'g', -1, 64)
   213  		}
   214  		boundsStr[len(boundsStr)-1] = infinityBoundValue
   215  
   216  		bucketCount := min(len(boundsStr), dataPoint.BucketCounts().Len())
   217  
   218  		bucketLabels := labels.Clone()
   219  		bucketLabels.Append(bucketLabelKey, "")
   220  		sumCount := uint64(0)
   221  		for j := 0; j < bucketCount; j++ {
   222  			bucket := dataPoint.BucketCounts().At(j)
   223  			bucketLabels.Replace(bucketLabelKey, boundsStr[j])
   224  			sumCount += bucket
   225  			logs = append(logs, newMetricLogFromRaw(name+metricNameSuffixBucket, bucketLabels, int64(dataPoint.Timestamp()), float64(sumCount)))
   226  		}
   227  	}
   228  	return logs
   229  }
   230  
   231  func ExponentialHistogramToLogs(name string, data pmetric.ExponentialHistogramDataPointSlice, aggregationTemporality pmetric.AggregationTemporality, defaultLabels *helper.MetricLabels) (logs []*protocol.Log) {
   232  	for i := 0; i < data.Len(); i++ {
   233  		dataPoint := data.At(i)
   234  
   235  		labels := defaultLabels.Clone()
   236  		attrs2Labels(labels, dataPoint.Attributes())
   237  		labels.Append(otlp.TagKeyMetricAggregationTemporality, aggregationTemporality.String())
   238  		labels.Append(otlp.TagKeyMetricHistogramType, pmetric.MetricTypeExponentialHistogram.String())
   239  
   240  		if dataPoint.HasSum() {
   241  			logs = append(logs, newMetricLogFromRaw(name+metricNameSuffixSum, labels, int64(dataPoint.Timestamp()), dataPoint.Sum()))
   242  		}
   243  		if dataPoint.HasMin() {
   244  			logs = append(logs, newMetricLogFromRaw(name+metricNameSuffixMin, labels, int64(dataPoint.Timestamp()), dataPoint.Min()))
   245  		}
   246  		if dataPoint.HasMax() {
   247  			logs = append(logs, newMetricLogFromRaw(name+metricNameSuffixMax, labels, int64(dataPoint.Timestamp()), dataPoint.Max()))
   248  		}
   249  		logs = append(logs, newMetricLogFromRaw(name+metricNameSuffixCount, labels, int64(dataPoint.Timestamp()), float64(dataPoint.Count())))
   250  
   251  		for j := 0; j < dataPoint.Exemplars().Len(); j++ {
   252  			logs = append(logs, newExemplarMetricLogFromRaw(name, dataPoint.Exemplars().At(j), labels.Clone()))
   253  		}
   254  
   255  		scale := dataPoint.Scale()
   256  		base := math.Pow(2, math.Pow(2, float64(-scale)))
   257  		postiveFields := genExponentialHistogramValues(true, base, dataPoint.Positive())
   258  		negativeFields := genExponentialHistogramValues(false, base, dataPoint.Negative())
   259  
   260  		bucketLabels := labels.Clone()
   261  		bucketLabels.Append(bucketLabelKey, "")
   262  		for k, v := range postiveFields {
   263  			bucketLabels.Replace(bucketLabelKey, k)
   264  			logs = append(logs, newMetricLogFromRaw(name+metricNameSuffixBucket, bucketLabels, int64(dataPoint.Timestamp()), v))
   265  		}
   266  		bucketLabels.Replace(bucketLabelKey, otlp.FieldZeroCount)
   267  		logs = append(logs, newMetricLogFromRaw(name+metricNameSuffixBucket, bucketLabels, int64(dataPoint.Timestamp()), float64(dataPoint.ZeroCount())))
   268  		for k, v := range negativeFields {
   269  			bucketLabels.Replace(bucketLabelKey, k)
   270  			logs = append(logs, newMetricLogFromRaw(name+metricNameSuffixBucket, bucketLabels, int64(dataPoint.Timestamp()), v))
   271  		}
   272  	}
   273  	return logs
   274  }
   275  
   276  func ConvertOtlpLogRequestV1(otlpLogReq plogotlp.ExportRequest) (logs []*protocol.Log, err error) {
   277  	return ConvertOtlpLogV1(otlpLogReq.Logs())
   278  }
   279  
   280  func ConvertOtlpLogV1(otlpLogs plog.Logs) (logs []*protocol.Log, err error) {
   281  	resLogs := otlpLogs.ResourceLogs()
   282  	for i := 0; i < resLogs.Len(); i++ {
   283  		resourceLog := resLogs.At(i)
   284  		sLogs := resourceLog.ScopeLogs()
   285  		for j := 0; j < sLogs.Len(); j++ {
   286  			scopeLog := sLogs.At(j)
   287  			lRecords := scopeLog.LogRecords()
   288  			for k := 0; k < lRecords.Len(); k++ {
   289  				logRecord := lRecords.At(k)
   290  
   291  				protoContents := []*protocol.Log_Content{
   292  					{
   293  						Key:   "time_unix_nano",
   294  						Value: strconv.FormatInt(logRecord.Timestamp().AsTime().UnixNano(), 10),
   295  					},
   296  					{
   297  						Key:   "severity_number",
   298  						Value: strconv.FormatInt(int64(logRecord.SeverityNumber()), 10),
   299  					},
   300  					{
   301  						Key:   "severity_text",
   302  						Value: logRecord.SeverityText(),
   303  					},
   304  					{
   305  						Key:   "content",
   306  						Value: logRecord.Body().AsString(),
   307  					},
   308  				}
   309  
   310  				if logRecord.Attributes().Len() != 0 {
   311  					if d, err := json.Marshal(logRecord.Attributes().AsRaw()); err == nil {
   312  						protoContents = append(protoContents, &protocol.Log_Content{
   313  							Key:   "attributes",
   314  							Value: string(d),
   315  						})
   316  					}
   317  				}
   318  
   319  				if resourceLog.Resource().Attributes().Len() != 0 {
   320  					if d, err := json.Marshal(resourceLog.Resource().Attributes().AsRaw()); err == nil {
   321  						protoContents = append(protoContents, &protocol.Log_Content{
   322  							Key:   "resources",
   323  							Value: string(d),
   324  						})
   325  					}
   326  				}
   327  
   328  				protoLog := &protocol.Log{
   329  					Contents: protoContents,
   330  				}
   331  				protocol.SetLogTimeWithNano(protoLog, uint32(logRecord.Timestamp().AsTime().Unix()), uint32(logRecord.Timestamp().AsTime().Nanosecond()))
   332  				logs = append(logs, protoLog)
   333  			}
   334  		}
   335  	}
   336  
   337  	return logs, nil
   338  }
   339  
   340  func ConvertOtlpMetricRequestV1(otlpMetricReq pmetricotlp.ExportRequest) (logs []*protocol.Log, err error) {
   341  	return ConvertOtlpMetricV1(otlpMetricReq.Metrics())
   342  }
   343  
   344  func ConvertOtlpMetricV1(otlpMetrics pmetric.Metrics) (logs []*protocol.Log, err error) {
   345  	resMetrics := otlpMetrics.ResourceMetrics()
   346  	resMetricsLen := resMetrics.Len()
   347  
   348  	if 0 == resMetricsLen {
   349  		return
   350  	}
   351  
   352  	for i := 0; i < resMetricsLen; i++ {
   353  		resMetricsSlice := resMetrics.At(i)
   354  		var labels helper.MetricLabels
   355  		attrs2Labels(&labels, resMetricsSlice.Resource().Attributes())
   356  
   357  		scopeMetrics := resMetricsSlice.ScopeMetrics()
   358  		for j := 0; j < scopeMetrics.Len(); j++ {
   359  			otMetrics := scopeMetrics.At(j).Metrics()
   360  
   361  			for k := 0; k < otMetrics.Len(); k++ {
   362  				otMetric := otMetrics.At(k)
   363  				metricName := otMetric.Name()
   364  
   365  				switch otMetric.Type() {
   366  				case pmetric.MetricTypeGauge:
   367  					otGauge := otMetric.Gauge()
   368  					otDatapoints := otGauge.DataPoints()
   369  					logs = append(logs, GaugeToLogs(metricName, otDatapoints, &labels)...)
   370  				case pmetric.MetricTypeSum:
   371  					otSum := otMetric.Sum()
   372  					isMonotonic := strconv.FormatBool(otSum.IsMonotonic())
   373  					aggregationTemporality := otSum.AggregationTemporality()
   374  					otDatapoints := otSum.DataPoints()
   375  					logs = append(logs, SumToLogs(metricName, aggregationTemporality, isMonotonic, otDatapoints, &labels)...)
   376  				case pmetric.MetricTypeSummary:
   377  					otSummary := otMetric.Summary()
   378  					otDatapoints := otSummary.DataPoints()
   379  					logs = append(logs, SummaryToLogs(metricName, otDatapoints, &labels)...)
   380  				case pmetric.MetricTypeHistogram:
   381  					otHistogram := otMetric.Histogram()
   382  					aggregationTemporality := otHistogram.AggregationTemporality()
   383  					otDatapoints := otHistogram.DataPoints()
   384  					logs = append(logs, HistogramToLogs(metricName, otDatapoints, aggregationTemporality, &labels)...)
   385  				case pmetric.MetricTypeExponentialHistogram:
   386  					otExponentialHistogram := otMetric.ExponentialHistogram()
   387  					aggregationTemporality := otExponentialHistogram.AggregationTemporality()
   388  					otDatapoints := otExponentialHistogram.DataPoints()
   389  					logs = append(logs, ExponentialHistogramToLogs(metricName, otDatapoints, aggregationTemporality, &labels)...)
   390  				default:
   391  					// TODO:
   392  					// find a better way to handle metric with type MetricTypeEmpty.
   393  					nowTime := time.Now()
   394  					log := &protocol.Log{
   395  						Contents: []*protocol.Log_Content{
   396  							{
   397  								Key:   metricNameKey,
   398  								Value: otMetric.Name(),
   399  							},
   400  							{
   401  								Key:   labelsKey,
   402  								Value: otMetric.Type().String(),
   403  							},
   404  							{
   405  								Key:   timeNanoKey,
   406  								Value: strconv.FormatInt(nowTime.UnixNano(), 10),
   407  							},
   408  							{
   409  								Key:   valueKey,
   410  								Value: otMetric.Description(),
   411  							},
   412  						},
   413  					}
   414  					// Not support for nanosecond of system time
   415  					protocol.SetLogTime(log, uint32(nowTime.Unix()))
   416  					logs = append(logs, log)
   417  				}
   418  			}
   419  		}
   420  	}
   421  
   422  	return logs, err
   423  }
   424  
   425  func ConvertOtlpTraceRequestV1(otlpTraceReq ptraceotlp.ExportRequest) (logs []*protocol.Log, err error) {
   426  	return ConvertOtlpTraceV1(otlpTraceReq.Traces())
   427  }
   428  
   429  func ConvertOtlpTraceV1(otlpTrace ptrace.Traces) (logs []*protocol.Log, err error) {
   430  	log, _ := ConvertTrace(otlpTrace)
   431  	return log, nil
   432  }