github.com/netdata/go.d.plugin@v0.58.1/agent/discovery/sd/pipeline/pipeline.go (about)

     1  // SPDX-License-Identifier: GPL-3.0-or-later
     2  
     3  package pipeline
     4  
     5  import (
     6  	"context"
     7  	"log/slog"
     8  	"time"
     9  
    10  	"github.com/netdata/go.d.plugin/agent/confgroup"
    11  	"github.com/netdata/go.d.plugin/agent/discovery/sd/hostsocket"
    12  	"github.com/netdata/go.d.plugin/agent/discovery/sd/kubernetes"
    13  	"github.com/netdata/go.d.plugin/agent/discovery/sd/model"
    14  	"github.com/netdata/go.d.plugin/logger"
    15  )
    16  
    17  func New(cfg Config) (*Pipeline, error) {
    18  	if err := validateConfig(cfg); err != nil {
    19  		return nil, err
    20  	}
    21  
    22  	p := &Pipeline{
    23  		Logger: logger.New().With(
    24  			slog.String("component", "discovery sd pipeline"),
    25  		),
    26  		accum:       newAccumulator(),
    27  		discoverers: make([]model.Discoverer, 0),
    28  		items:       make(map[string]map[uint64][]confgroup.Config),
    29  	}
    30  
    31  	if err := p.registerDiscoverers(cfg); err != nil {
    32  		return nil, err
    33  	}
    34  
    35  	return p, nil
    36  }
    37  
    38  type (
    39  	Pipeline struct {
    40  		*logger.Logger
    41  
    42  		discoverers []model.Discoverer
    43  		accum       *accumulator
    44  
    45  		clr classificator
    46  		cmr composer
    47  
    48  		items map[string]map[uint64][]confgroup.Config // [source][targetHash]
    49  	}
    50  	classificator interface {
    51  		classify(model.Target) model.Tags
    52  	}
    53  	composer interface {
    54  		compose(model.Target) []confgroup.Config
    55  	}
    56  )
    57  
    58  func (p *Pipeline) registerDiscoverers(conf Config) error {
    59  	for _, cfg := range conf.Discovery.K8s {
    60  		td, err := kubernetes.NewKubeDiscoverer(cfg)
    61  		if err != nil {
    62  			return err
    63  		}
    64  		p.discoverers = append(p.discoverers, td)
    65  	}
    66  	if conf.Discovery.HostSocket.Net != nil {
    67  		td, err := hostsocket.NewNetSocketDiscoverer(*conf.Discovery.HostSocket.Net)
    68  		if err != nil {
    69  			return err
    70  		}
    71  		p.discoverers = append(p.discoverers, td)
    72  	}
    73  
    74  	return nil
    75  }
    76  
    77  func (p *Pipeline) Run(ctx context.Context, in chan<- []*confgroup.Group) {
    78  	p.Info("instance is started")
    79  	defer p.Info("instance is stopped")
    80  
    81  	p.accum.discoverers = p.discoverers
    82  
    83  	updates := make(chan []model.TargetGroup)
    84  	done := make(chan struct{})
    85  
    86  	go func() { defer close(done); p.accum.run(ctx, updates) }()
    87  
    88  	for {
    89  		select {
    90  		case <-ctx.Done():
    91  			select {
    92  			case <-done:
    93  			case <-time.After(time.Second * 5):
    94  			}
    95  			return
    96  		case <-done:
    97  			return
    98  		case tggs := <-updates:
    99  			p.Infof("received %d target groups", len(tggs))
   100  			send(ctx, in, p.processGroups(tggs))
   101  		}
   102  	}
   103  }
   104  
   105  func (p *Pipeline) processGroups(tggs []model.TargetGroup) []*confgroup.Group {
   106  	var confGroups []*confgroup.Group
   107  	// updates come from the accumulator, this ensures that all groups have different sources
   108  	for _, tgg := range tggs {
   109  		p.Infof("processing group '%s' with %d target(s)", tgg.Source(), len(tgg.Targets()))
   110  		if v := p.processGroup(tgg); v != nil {
   111  			confGroups = append(confGroups, v)
   112  		}
   113  	}
   114  	return confGroups
   115  }
   116  
   117  func (p *Pipeline) processGroup(tgg model.TargetGroup) *confgroup.Group {
   118  	if len(tgg.Targets()) == 0 {
   119  		if _, ok := p.items[tgg.Source()]; !ok {
   120  			return nil
   121  		}
   122  		delete(p.items, tgg.Source())
   123  		return &confgroup.Group{Source: tgg.Source()}
   124  	}
   125  
   126  	targetsCache, ok := p.items[tgg.Source()]
   127  	if !ok {
   128  		targetsCache = make(map[uint64][]confgroup.Config)
   129  		p.items[tgg.Source()] = targetsCache
   130  	}
   131  
   132  	var changed bool
   133  	seen := make(map[uint64]bool)
   134  
   135  	for _, tgt := range tgg.Targets() {
   136  		if tgt == nil {
   137  			continue
   138  		}
   139  
   140  		hash := tgt.Hash()
   141  		seen[hash] = true
   142  
   143  		if _, ok := targetsCache[hash]; ok {
   144  			continue
   145  		}
   146  
   147  		if tags := p.clr.classify(tgt); len(tags) > 0 {
   148  			tgt.Tags().Merge(tags)
   149  
   150  			if configs := p.cmr.compose(tgt); len(configs) > 0 {
   151  				for _, cfg := range configs {
   152  					cfg.SetProvider(tgg.Provider())
   153  					cfg.SetSource(tgg.Source())
   154  				}
   155  				targetsCache[hash] = configs
   156  				changed = true
   157  			}
   158  		} else {
   159  			p.Infof("target '%s' classify: fail", tgt.TUID())
   160  		}
   161  	}
   162  
   163  	for hash := range targetsCache {
   164  		if seen[hash] {
   165  			continue
   166  		}
   167  		if configs := targetsCache[hash]; len(configs) > 0 {
   168  			changed = true
   169  		}
   170  		delete(targetsCache, hash)
   171  	}
   172  
   173  	if !changed {
   174  		return nil
   175  	}
   176  
   177  	// TODO: deepcopy?
   178  	cfgGroup := &confgroup.Group{Source: tgg.Source()}
   179  	for _, cfgs := range targetsCache {
   180  		cfgGroup.Configs = append(cfgGroup.Configs, cfgs...)
   181  	}
   182  
   183  	return cfgGroup
   184  }
   185  
   186  func send(ctx context.Context, in chan<- []*confgroup.Group, configs []*confgroup.Group) {
   187  	if len(configs) == 0 {
   188  		return
   189  	}
   190  
   191  	select {
   192  	case <-ctx.Done():
   193  		return
   194  	case in <- configs:
   195  	}
   196  }