bitbucket.org/Aishee/synsec@v0.0.0-20210414005726-236fc01a153d/cmd/synsec/synsec.go (about)

     1  package main
     2  
     3  import (
     4  	"fmt"
     5  	"sync"
     6  
     7  	"bitbucket.org/Aishee/synsec/pkg/acquisition"
     8  	"bitbucket.org/Aishee/synsec/pkg/csconfig"
     9  	"bitbucket.org/Aishee/synsec/pkg/cwhub"
    10  	"bitbucket.org/Aishee/synsec/pkg/exprhelpers"
    11  	leaky "bitbucket.org/Aishee/synsec/pkg/leakybucket"
    12  	"bitbucket.org/Aishee/synsec/pkg/parser"
    13  	"bitbucket.org/Aishee/synsec/pkg/types"
    14  	log "github.com/sirupsen/logrus"
    15  )
    16  
    17  func initSynsec(cConfig *csconfig.Config) (*parser.Parsers, error) {
    18  	err := exprhelpers.Init()
    19  	if err != nil {
    20  		return &parser.Parsers{}, fmt.Errorf("Failed to init expr helpers : %s", err)
    21  	}
    22  
    23  	// Populate cwhub package tools
    24  	if err := cwhub.GetHubIdx(cConfig.Hub); err != nil {
    25  		return &parser.Parsers{}, fmt.Errorf("Failed to load hub index : %s", err)
    26  	}
    27  
    28  	// Start loading configs
    29  	csParsers := newParsers()
    30  	if csParsers, err = parser.LoadParsers(cConfig, csParsers); err != nil {
    31  		return &parser.Parsers{}, fmt.Errorf("Failed to load parsers: %s", err)
    32  	}
    33  
    34  	if err := LoadBuckets(cConfig); err != nil {
    35  		return &parser.Parsers{}, fmt.Errorf("Failed to load scenarios: %s", err)
    36  	}
    37  
    38  	if err := LoadAcquisition(cConfig); err != nil {
    39  		return &parser.Parsers{}, fmt.Errorf("Error while loading acquisition config : %s", err)
    40  	}
    41  	return csParsers, nil
    42  }
    43  
    44  func runSynsec(cConfig *csconfig.Config, parsers *parser.Parsers) error {
    45  	inputLineChan := make(chan types.Event)
    46  	inputEventChan := make(chan types.Event)
    47  
    48  	//start go-routines for parsing, buckets pour and ouputs.
    49  	parserWg := &sync.WaitGroup{}
    50  	parsersTomb.Go(func() error {
    51  		parserWg.Add(1)
    52  		for i := 0; i < cConfig.Synsec.ParserRoutinesCount; i++ {
    53  			parsersTomb.Go(func() error {
    54  				defer types.CatchPanic("synsec/runParse")
    55  				if err := runParse(inputLineChan, inputEventChan, *parsers.Ctx, parsers.Nodes); err != nil { //this error will never happen as parser.Parse is not able to return errors
    56  					log.Fatalf("starting parse error : %s", err)
    57  					return err
    58  				}
    59  				return nil
    60  			})
    61  		}
    62  		parserWg.Done()
    63  		return nil
    64  	})
    65  	parserWg.Wait()
    66  
    67  	bucketWg := &sync.WaitGroup{}
    68  	bucketsTomb.Go(func() error {
    69  		bucketWg.Add(1)
    70  		/*restore as well previous state if present*/
    71  		if cConfig.Synsec.BucketStateFile != "" {
    72  			log.Warningf("Restoring buckets state from %s", cConfig.Synsec.BucketStateFile)
    73  			if err := leaky.LoadBucketsState(cConfig.Synsec.BucketStateFile, buckets, holders); err != nil {
    74  				return fmt.Errorf("unable to restore buckets : %s", err)
    75  			}
    76  		}
    77  
    78  		for i := 0; i < cConfig.Synsec.BucketsRoutinesCount; i++ {
    79  			bucketsTomb.Go(func() error {
    80  				defer types.CatchPanic("synsec/runPour")
    81  				if err := runPour(inputEventChan, holders, buckets, cConfig); err != nil {
    82  					log.Fatalf("starting pour error : %s", err)
    83  					return err
    84  				}
    85  				return nil
    86  			})
    87  		}
    88  		bucketWg.Done()
    89  		return nil
    90  	})
    91  	bucketWg.Wait()
    92  
    93  	outputWg := &sync.WaitGroup{}
    94  	outputsTomb.Go(func() error {
    95  		outputWg.Add(1)
    96  		for i := 0; i < cConfig.Synsec.OutputRoutinesCount; i++ {
    97  			outputsTomb.Go(func() error {
    98  				defer types.CatchPanic("synsec/runOutput")
    99  				if err := runOutput(inputEventChan, outputEventChan, buckets, *parsers.Povfwctx, parsers.Povfwnodes, *cConfig.API.Client.Credentials); err != nil {
   100  					log.Fatalf("starting outputs error : %s", err)
   101  					return err
   102  				}
   103  				return nil
   104  			})
   105  		}
   106  		outputWg.Done()
   107  		return nil
   108  	})
   109  	outputWg.Wait()
   110  	log.Warningf("Starting processing data")
   111  
   112  	if err := acquisition.StartAcquisition(dataSources, inputLineChan, &acquisTomb); err != nil {
   113  		log.Fatalf("starting acquisition error : %s", err)
   114  		return err
   115  	}
   116  
   117  	return nil
   118  }
   119  
   120  func serveSynsec(parsers *parser.Parsers, cConfig *csconfig.Config) {
   121  	synsecTomb.Go(func() error {
   122  		defer types.CatchPanic("synsec/serveSynsec")
   123  		go func() {
   124  			defer types.CatchPanic("synsec/runSynsec")
   125  			if err := runSynsec(cConfig, parsers); err != nil {
   126  				log.Fatalf("unable to start synsec routines: %s", err)
   127  			}
   128  		}()
   129  
   130  		/*we should stop in two cases :
   131  		- synsecTomb has been Killed() : it might be shutdown or reload, so stop
   132  		- acquisTomb is dead, it means that we were in "cat" mode and files are done reading, quit
   133  		*/
   134  		waitOnTomb()
   135  		log.Debugf("Shutting down synsec routines")
   136  		if err := ShutdownSynsecRoutines(); err != nil {
   137  			log.Fatalf("unable to shutdown synsec routines: %s", err)
   138  		}
   139  		log.Debugf("everything is dead, return synsecTomb")
   140  		return nil
   141  	})
   142  }
   143  
   144  func waitOnTomb() {
   145  	for {
   146  		select {
   147  		case <-acquisTomb.Dead():
   148  			/*if it's acquisition dying it means that we were in "cat" mode.
   149  			while shutting down, we need to give time for all buckets to process in flight data*/
   150  			log.Warningf("Acquisition is finished, shutting down")
   151  			/*
   152  				While it might make sense to want to shut-down parser/buckets/etc. as soon as acquisition is finished,
   153  				we might have some pending buckets : buckets that overflowed, but which LeakRoutine are still alive because they
   154  				are waiting to be able to "commit" (push to api). This can happens specifically in a context where a lot of logs
   155  				are going to trigger overflow (ie. trigger buckets with ~100% of the logs triggering an overflow).
   156  
   157  				To avoid this (which would mean that we would "lose" some overflows), let's monitor the number of live buckets.
   158  				However, because of the blackhole mechanism, you can't really wait for the number of LeakRoutine to go to zero (we might have to wait $blackhole_duration).
   159  
   160  				So : we are waiting for the number of buckets to stop decreasing before returning. "how long" we should wait is a bit of the trick question,
   161  				as some operations (ie. reverse dns or such in post-overflow) can take some time :)
   162  			*/
   163  
   164  			return
   165  
   166  		case <-synsecTomb.Dying():
   167  			log.Infof("Synsec engine shutting down")
   168  			return
   169  		}
   170  	}
   171  }