github.com/GuanceCloud/cliutils@v1.1.21/pipeline/ptinput/plmap/aggmap.go (about)

     1  // Unless explicitly stated otherwise all files in this repository are licensed
     2  // under the MIT License.
     3  // This product includes software developed at Guance Cloud (https://www.guance.com/).
     4  // Copyright 2021-present Guance, Inc.
     5  
     6  package plmap
     7  
     8  import (
     9  	"fmt"
    10  	"runtime"
    11  	"sync"
    12  	"time"
    13  
    14  	"github.com/GuanceCloud/cliutils/logger"
    15  	"github.com/GuanceCloud/cliutils/pipeline/ptinput/utils"
    16  	"github.com/GuanceCloud/cliutils/pkg/hash"
    17  	"github.com/GuanceCloud/cliutils/point"
    18  	"github.com/spf13/cast"
    19  )
    20  
    21  var l = logger.DefaultSLogger("pl-map")
    22  
    23  func InitLog() {
    24  	l = logger.SLogger("pl-map")
    25  }
    26  
    27  type AggBuckets struct {
    28  	uploadDataFn UploadFunc
    29  	// key: [hash(name), hash(...tagName)]
    30  	data map[point.Category]map[string]*bucket
    31  
    32  	gTags [][2]string
    33  
    34  	sync.RWMutex
    35  }
    36  
    37  func NewAggBuks(upFn UploadFunc, globalTags [][2]string) *AggBuckets {
    38  	return &AggBuckets{
    39  		uploadDataFn: upFn,
    40  		data:         map[point.Category]map[string]*bucket{},
    41  		gTags:        globalTags,
    42  	}
    43  }
    44  
    45  func (a *AggBuckets) CreateBucket(cat point.Category, name string, interval time.Duration,
    46  	count int, keepValue bool, constTags map[string]string,
    47  ) {
    48  	a.Lock()
    49  	defer a.Unlock()
    50  
    51  	if a.data == nil {
    52  		a.data = map[point.Category]map[string]*bucket{}
    53  	}
    54  
    55  	catBuk, ok := a.data[cat]
    56  	if !ok {
    57  		catBuk = map[string]*bucket{}
    58  		a.data[cat] = catBuk
    59  	}
    60  
    61  	buk, ok := catBuk[name]
    62  	if !ok {
    63  		buk = newBucket(cat, name, interval, count,
    64  			keepValue, a.uploadDataFn, a.gTags)
    65  		catBuk[name] = buk
    66  
    67  		buk.startScan()
    68  	}
    69  
    70  	buk.setExtraTag(constTags)
    71  }
    72  
    73  func (a *AggBuckets) SetUploadFunc(fn UploadFunc) {
    74  	a.Lock()
    75  	defer a.Unlock()
    76  	a.uploadDataFn = fn
    77  }
    78  
    79  func (a *AggBuckets) StopAllBukScanner() {
    80  	a.Lock()
    81  	defer a.Unlock()
    82  
    83  	for _, catBuk := range a.data {
    84  		for name, b := range catBuk {
    85  			b.stopScan()
    86  			delete(catBuk, name)
    87  		}
    88  	}
    89  }
    90  
    91  func (a *AggBuckets) GetBucket(cat point.Category, name string) (*bucket, bool) {
    92  	a.RLock()
    93  	defer a.RUnlock()
    94  
    95  	if a.data == nil {
    96  		return nil, false
    97  	}
    98  	if buks, ok := a.data[cat]; !ok {
    99  		return nil, false
   100  	} else {
   101  		v, ok := buks[name]
   102  		return v, ok
   103  	}
   104  }
   105  
   106  type aggFields struct {
   107  	tags   []string
   108  	fields map[string]aggMetric
   109  }
   110  
   111  type ptsGroup struct {
   112  	timeline map[uint64]*aggFields
   113  
   114  	countLimit int
   115  }
   116  
   117  func (g *ptsGroup) addMetric(tagsValue []string, name, action string, value any) bool {
   118  	if g.timeline == nil {
   119  		g.timeline = map[uint64]*aggFields{}
   120  	}
   121  
   122  	tagsHash := hash.Fnv1aHash(tagsValue)
   123  
   124  	agg, ok := g.timeline[tagsHash]
   125  	if !ok {
   126  		agg = &aggFields{
   127  			tags:   tagsValue,
   128  			fields: map[string]aggMetric{},
   129  		}
   130  		g.timeline[tagsHash] = agg
   131  	}
   132  
   133  	m, ok := agg.fields[name]
   134  	if !ok {
   135  		m, ok = NewAggMetric(name, action)
   136  		if !ok {
   137  			return false
   138  		}
   139  		agg.fields[name] = m
   140  	}
   141  	m.Append(value)
   142  
   143  	return true
   144  }
   145  
   146  type bucket struct {
   147  	bukName string
   148  
   149  	category point.Category
   150  
   151  	interval   time.Duration
   152  	keepValue  bool
   153  	countLimit int
   154  	curCount   int
   155  
   156  	// tagsNameHash: tagsName
   157  	by map[uint64][]string
   158  	// tagsNameHash: pts
   159  	group map[uint64]*ptsGroup
   160  
   161  	extraTags  map[string]string
   162  	globalTags [][2]string
   163  
   164  	stop chan struct{}
   165  
   166  	uploadFn UploadFunc
   167  
   168  	sync.Mutex
   169  }
   170  
   171  func (buk *bucket) startScan() {
   172  	if buk.stop != nil || buk.interval <= 0 {
   173  		return
   174  	}
   175  
   176  	stop := make(chan struct{})
   177  	buk.stop = stop
   178  
   179  	go func() {
   180  		ticker := time.NewTicker(buk.interval)
   181  		defer ticker.Stop()
   182  
   183  		defer func() {
   184  			if r := recover(); r != nil {
   185  				buf := make([]byte, 4096) //nolint:gomnd
   186  				buf = buf[:runtime.Stack(buf, false)]
   187  
   188  				if e, ok := r.(error); ok {
   189  					buf = append([]byte(fmt.Sprintf("%s\n", e.Error())), buf...)
   190  				}
   191  				l.Error("%s", buf)
   192  			}
   193  		}()
   194  
   195  		for {
   196  			select {
   197  			case <-ticker.C:
   198  				buk.Lock()
   199  				pts := endAgg(buk)
   200  				if len(pts) > 0 && buk.uploadFn != nil {
   201  					_ = buk.uploadFn(buk.category, buk.bukName, pts)
   202  				}
   203  				buk.Unlock()
   204  			case <-stop:
   205  				return
   206  			}
   207  		}
   208  	}()
   209  }
   210  
   211  func (buk *bucket) stopScan() {
   212  	buk.Lock()
   213  	defer buk.Unlock()
   214  
   215  	if buk.stop == nil {
   216  		return
   217  	}
   218  	close(buk.stop)
   219  	buk.stop = nil
   220  
   221  	if buk.uploadFn != nil {
   222  		pts := endAgg(buk)
   223  		_ = buk.uploadFn(buk.category, buk.bukName, pts)
   224  	}
   225  }
   226  
   227  func (buk *bucket) setExtraTag(extra map[string]string) {
   228  	buk.Lock()
   229  	defer buk.Unlock()
   230  
   231  	buk.extraTags = extra
   232  }
   233  
   234  func (buk *bucket) AddMetric(fieldName, action string, tagsName,
   235  	tagsValue []string, aggField any,
   236  ) bool {
   237  	tagNameHash := hash.Fnv1aHash(tagsName)
   238  
   239  	buk.Lock()
   240  	defer buk.Unlock()
   241  
   242  	if buk.by == nil {
   243  		buk.by = map[uint64][]string{}
   244  	}
   245  
   246  	if buk.group == nil {
   247  		buk.group = map[uint64]*ptsGroup{}
   248  	}
   249  
   250  	if _, ok := buk.by[tagNameHash]; !ok {
   251  		t := make([]string, len(tagsValue))
   252  		copy(t, tagsName)
   253  		buk.by[tagNameHash] = t
   254  	}
   255  
   256  	group, ok := buk.group[tagNameHash]
   257  	if !ok {
   258  		group = &ptsGroup{
   259  			countLimit: buk.countLimit,
   260  		}
   261  		buk.group[tagNameHash] = group
   262  	}
   263  
   264  	if ok := group.addMetric(tagsValue, fieldName, action, aggField); ok {
   265  		if buk.countLimit > 0 {
   266  			buk.curCount++
   267  			if buk.curCount >= buk.countLimit {
   268  				if buk.uploadFn != nil {
   269  					pts := endAgg(buk)
   270  					_ = buk.uploadFn(buk.category, buk.bukName, pts)
   271  				}
   272  				buk.curCount = 0
   273  			}
   274  		}
   275  		return true
   276  	}
   277  
   278  	return false
   279  }
   280  
   281  func newBucket(cat point.Category, name string, interval time.Duration,
   282  	count int, keepValue bool, uploadFn UploadFunc, gTags [][2]string,
   283  ) *bucket {
   284  	return &bucket{
   285  		bukName:    name,
   286  		interval:   interval,
   287  		keepValue:  keepValue,
   288  		countLimit: count,
   289  		by:         map[uint64][]string{},
   290  		group:      map[uint64]*ptsGroup{},
   291  		uploadFn:   uploadFn,
   292  		category:   cat,
   293  		globalTags: gTags,
   294  	}
   295  }
   296  
   297  func conv2Pt(b *bucket, tagsName []string, aggTF *aggFields) (*point.Point, bool) {
   298  	if len(tagsName) != len(aggTF.tags) {
   299  		return nil, false
   300  	}
   301  	tags := map[string]string{}
   302  
   303  	for idx := range b.globalTags {
   304  		tags[b.globalTags[idx][0]] = b.globalTags[idx][1]
   305  	}
   306  
   307  	for k, v := range b.extraTags {
   308  		tags[k] = v
   309  	}
   310  
   311  	for i := 0; i < len(tagsName); i++ {
   312  		tags[tagsName[i]] = aggTF.tags[i]
   313  	}
   314  
   315  	fields := map[string]any{}
   316  	for k, v := range aggTF.fields {
   317  		if v != nil {
   318  			fields[k] = v.Value()
   319  		}
   320  	}
   321  
   322  	fieldsKV := point.NewTags(tags)
   323  	fieldsKV = append(fieldsKV, point.NewKVs(fields)...)
   324  
   325  	opt := utils.PtCatOption(b.category)
   326  
   327  	pt := point.NewPointV2(b.bukName, fieldsKV, opt...)
   328  	return pt, true
   329  }
   330  
   331  // 结束聚合.
   332  func endAgg(b *bucket) []*point.Point {
   333  	pts := []*point.Point{}
   334  
   335  	for tagNameHash, group := range b.group {
   336  		if group == nil {
   337  			continue
   338  		}
   339  		tagsName, ok := b.by[tagNameHash]
   340  		if !ok {
   341  			continue
   342  		}
   343  		for _, tl := range group.timeline {
   344  			if pt, ok := conv2Pt(b, tagsName, tl); ok {
   345  				pts = append(pts, pt)
   346  			}
   347  		}
   348  	}
   349  
   350  	if !b.keepValue {
   351  		b.by = map[uint64][]string{}
   352  		b.group = map[uint64]*ptsGroup{}
   353  	}
   354  
   355  	return pts
   356  }
   357  
   358  type aggMetric interface {
   359  	Append(any)
   360  	Value() any
   361  }
   362  
   363  func NewAggMetric(name, action string) (aggMetric, bool) {
   364  	switch action {
   365  	case "avg":
   366  		return &avgMetric{}, true
   367  	case "sum":
   368  		return &sumMetric{}, true
   369  	case "min":
   370  		return &minMetric{}, true
   371  	case "max":
   372  		return &maxMetric{}, true
   373  	case "set":
   374  		return &setMetric{}, true
   375  	default:
   376  		return nil, false
   377  	}
   378  }
   379  
   380  type avgMetric struct {
   381  	sum   float64
   382  	count float64
   383  }
   384  
   385  func (f *avgMetric) Append(v any) {
   386  	f.sum += cast.ToFloat64(v)
   387  	f.count++
   388  }
   389  
   390  func (f *avgMetric) Value() any {
   391  	return f.sum / f.count
   392  }
   393  
   394  type sumMetric struct {
   395  	sum float64
   396  }
   397  
   398  func (f *sumMetric) Append(v any) {
   399  	f.sum += cast.ToFloat64(v)
   400  }
   401  
   402  func (f *sumMetric) Value() any {
   403  	return f.sum
   404  }
   405  
   406  type minMetric struct {
   407  	inserted bool
   408  	min      float64
   409  }
   410  
   411  func (f *minMetric) Append(v any) {
   412  	min := cast.ToFloat64(v)
   413  
   414  	if f.inserted {
   415  		if f.min > min {
   416  			f.min = min
   417  		}
   418  	} else {
   419  		f.min = min
   420  		f.inserted = false
   421  	}
   422  }
   423  
   424  func (f *minMetric) Value() any {
   425  	return f.min
   426  }
   427  
   428  type maxMetric struct {
   429  	max float64
   430  }
   431  
   432  func (f *maxMetric) Append(v any) {
   433  	if max := cast.ToFloat64(v); f.max < max {
   434  		f.max = max
   435  	}
   436  }
   437  
   438  func (f *maxMetric) Value() any {
   439  	return f.max
   440  }
   441  
   442  type setMetric struct {
   443  	set float64
   444  }
   445  
   446  func (f *setMetric) Append(v any) {
   447  	f.set = cast.ToFloat64(v)
   448  }
   449  
   450  func (f *setMetric) Value() any {
   451  	return f.set
   452  }