bitbucket.org/Aishee/synsec@v0.0.0-20210414005726-236fc01a153d/pkg/leakybucket/bucket.go (about)

     1  package leakybucket
     2  
     3  import (
     4  	"fmt"
     5  	"sync"
     6  	"sync/atomic"
     7  	"time"
     8  
     9  	//"log"
    10  	"bitbucket.org/Aishee/synsec/pkg/time/rate"
    11  	"bitbucket.org/Aishee/synsec/pkg/types"
    12  	"github.com/goombaio/namegenerator"
    13  	"gopkg.in/tomb.v2"
    14  
    15  	//rate "time/rate"
    16  
    17  	"github.com/davecgh/go-spew/spew"
    18  	"github.com/prometheus/client_golang/prometheus"
    19  	log "github.com/sirupsen/logrus"
    20  	//"golang.org/x/time/rate"
    21  )
    22  
    23  const (
    24  	LIVE = iota
    25  	TIMEMACHINE
    26  )
    27  
    28  //Leaky represents one instance of a bucket
    29  type Leaky struct {
    30  	Name string
    31  	Mode int //LIVE or TIMEMACHINE
    32  	//the limiter is what holds the proper "leaky aspect", it determines when/if we can pour objects
    33  	Limiter         rate.RateLimiter `json:"-"`
    34  	SerializedState rate.Lstate
    35  	//Queue is used to held the cache of objects in the bucket, it is used to know 'how many' objects we have in buffer.
    36  	Queue *Queue
    37  	//Leaky buckets are receiving message through a chan
    38  	In chan types.Event `json:"-"`
    39  	//Leaky buckets are pushing their overflows through a chan
    40  	Out chan *Queue `json:"-"`
    41  	// shared for all buckets (the idea is to kill this afterwards)
    42  	AllOut chan types.Event `json:"-"`
    43  	//max capacity (for burst)
    44  	Capacity int
    45  	//CacheRatio is the number of elements that should be kept in memory (compared to capacity)
    46  	CacheSize int
    47  	//the unique identifier of the bucket (a hash)
    48  	Mapkey string
    49  	// chan for signaling
    50  	Signal       chan bool `json:"-"`
    51  	Reprocess    bool
    52  	Simulated    bool
    53  	Uuid         string
    54  	First_ts     time.Time
    55  	Last_ts      time.Time
    56  	Ovflw_ts     time.Time
    57  	Total_count  int
    58  	Leakspeed    time.Duration
    59  	BucketConfig *BucketFactory
    60  	Duration     time.Duration
    61  	Pour         func(*Leaky, types.Event) `json:"-"`
    62  	//Profiling when set to true enables profiling of bucket
    63  	Profiling       bool
    64  	timedOverflow   bool
    65  	logger          *log.Entry
    66  	scopeType       types.ScopeType
    67  	hash            string
    68  	scenarioVersion string
    69  	tomb            *tomb.Tomb
    70  	wgPour          *sync.WaitGroup
    71  	wgDumpState     *sync.WaitGroup
    72  	mutex           *sync.Mutex //used only for TIMEMACHINE mode to allow garbage collection without races
    73  }
    74  
    75  var BucketsPour = prometheus.NewCounterVec(
    76  	prometheus.CounterOpts{
    77  		Name: "cs_bucket_poured_total",
    78  		Help: "Total events were poured in bucket.",
    79  	},
    80  	[]string{"source", "name"},
    81  )
    82  
    83  var BucketsOverflow = prometheus.NewCounterVec(
    84  	prometheus.CounterOpts{
    85  		Name: "cs_bucket_overflowed_total",
    86  		Help: "Total buckets overflowed.",
    87  	},
    88  	[]string{"name"},
    89  )
    90  
    91  var BucketsUnderflow = prometheus.NewCounterVec(
    92  	prometheus.CounterOpts{
    93  		Name: "cs_bucket_underflowed_total",
    94  		Help: "Total buckets underflowed.",
    95  	},
    96  	[]string{"name"},
    97  )
    98  
    99  var BucketsInstanciation = prometheus.NewCounterVec(
   100  	prometheus.CounterOpts{
   101  		Name: "cs_bucket_created_total",
   102  		Help: "Total buckets were instanciated.",
   103  	},
   104  	[]string{"name"},
   105  )
   106  
   107  var BucketsCurrentCount = prometheus.NewGaugeVec(
   108  	prometheus.GaugeOpts{
   109  		Name: "cs_buckets",
   110  		Help: "Number of buckets that currently exist.",
   111  	},
   112  	[]string{"name"},
   113  )
   114  
   115  var LeakyRoutineCount int64
   116  
   117  // Newleaky creates a new leaky bucket from a BucketFactory
   118  // Events created by the bucket (overflow, bucket empty) are sent to a chan defined by BucketFactory
   119  // The leaky bucket implementation is based on rate limiter (see https://godoc.org/golang.org/x/time/rate)
   120  // There's a trick to have an event said when the bucket gets empty to allow its destruction
   121  func NewLeaky(bucketFactory BucketFactory) *Leaky {
   122  	bucketFactory.logger.Tracef("Instantiating live bucket %s", bucketFactory.Name)
   123  	return FromFactory(bucketFactory)
   124  }
   125  
   126  func FromFactory(bucketFactory BucketFactory) *Leaky {
   127  	var limiter rate.RateLimiter
   128  	//golang rate limiter. It's mainly intended for http rate limiter
   129  	Qsize := bucketFactory.Capacity
   130  	if bucketFactory.CacheSize > 0 {
   131  		//cache is smaller than actual capacity
   132  		if bucketFactory.CacheSize <= bucketFactory.Capacity {
   133  			Qsize = bucketFactory.CacheSize
   134  			//bucket might be counter (infinite size), allow cache limitation
   135  		} else if bucketFactory.Capacity == -1 {
   136  			Qsize = bucketFactory.CacheSize
   137  		}
   138  	}
   139  	if bucketFactory.Capacity == -1 {
   140  		//In this case we allow all events to pass.
   141  		//maybe in the future we could avoid using a limiter
   142  		limiter = &rate.AlwaysFull{}
   143  	} else {
   144  		limiter = rate.NewLimiter(rate.Every(bucketFactory.leakspeed), bucketFactory.Capacity)
   145  	}
   146  	BucketsInstanciation.With(prometheus.Labels{"name": bucketFactory.Name}).Inc()
   147  
   148  	//create the leaky bucket per se
   149  	l := &Leaky{
   150  		Name:            bucketFactory.Name,
   151  		Limiter:         limiter,
   152  		Uuid:            namegenerator.NewNameGenerator(time.Now().UTC().UnixNano()).Generate(),
   153  		Queue:           NewQueue(Qsize),
   154  		CacheSize:       bucketFactory.CacheSize,
   155  		Out:             make(chan *Queue, 1),
   156  		AllOut:          bucketFactory.ret,
   157  		Capacity:        bucketFactory.Capacity,
   158  		Leakspeed:       bucketFactory.leakspeed,
   159  		BucketConfig:    &bucketFactory,
   160  		Pour:            Pour,
   161  		Reprocess:       bucketFactory.Reprocess,
   162  		Profiling:       bucketFactory.Profiling,
   163  		Mode:            LIVE,
   164  		scopeType:       bucketFactory.ScopeType,
   165  		scenarioVersion: bucketFactory.ScenarioVersion,
   166  		hash:            bucketFactory.hash,
   167  		Simulated:       bucketFactory.Simulated,
   168  		tomb:            bucketFactory.tomb,
   169  		wgPour:          bucketFactory.wgPour,
   170  		wgDumpState:     bucketFactory.wgDumpState,
   171  		mutex:           &sync.Mutex{},
   172  	}
   173  	if l.BucketConfig.Capacity > 0 && l.BucketConfig.leakspeed != time.Duration(0) {
   174  		l.Duration = time.Duration(l.BucketConfig.Capacity+1) * l.BucketConfig.leakspeed
   175  	}
   176  	if l.BucketConfig.duration != time.Duration(0) {
   177  		l.Duration = l.BucketConfig.duration
   178  		l.timedOverflow = true
   179  	}
   180  
   181  	return l
   182  }
   183  
   184  /* for now mimic a leak routine */
   185  //LeakRoutine us the life of a bucket. It dies when the bucket underflows or overflows
   186  func LeakRoutine(leaky *Leaky) error {
   187  
   188  	var (
   189  		durationTicker <-chan time.Time = make(<-chan time.Time)
   190  	)
   191  
   192  	defer types.CatchPanic(fmt.Sprintf("synsec/LeakRoutine/%s", leaky.Name))
   193  
   194  	BucketsCurrentCount.With(prometheus.Labels{"name": leaky.Name}).Inc()
   195  	defer BucketsCurrentCount.With(prometheus.Labels{"name": leaky.Name}).Dec()
   196  
   197  	/*todo : we create a logger at runtime while we want leakroutine to be up asap, might not be a good idea*/
   198  	leaky.logger = leaky.BucketConfig.logger.WithFields(log.Fields{"capacity": leaky.Capacity, "partition": leaky.Mapkey, "bucket_id": leaky.Uuid})
   199  
   200  	leaky.Signal <- true
   201  	atomic.AddInt64(&LeakyRoutineCount, 1)
   202  	defer atomic.AddInt64(&LeakyRoutineCount, -1)
   203  
   204  	for _, f := range leaky.BucketConfig.processors {
   205  		err := f.OnBucketInit(leaky.BucketConfig)
   206  		if err != nil {
   207  			leaky.logger.Errorf("Problem at bucket initializiation. Bail out %T : %v", f, err)
   208  			close(leaky.Signal)
   209  			return fmt.Errorf("Problem at bucket initializiation. Bail out %T : %v", f, err)
   210  		}
   211  	}
   212  
   213  	leaky.logger.Debugf("Leaky routine starting, lifetime : %s", leaky.Duration)
   214  	for {
   215  		select {
   216  		/*receiving an event*/
   217  		case msg := <-leaky.In:
   218  			/*the msg var use is confusing and is redeclared in a different type :/*/
   219  			for _, processor := range leaky.BucketConfig.processors {
   220  				msg := processor.OnBucketPour(leaky.BucketConfig)(msg, leaky)
   221  				// if &msg == nil we stop processing
   222  				if msg == nil {
   223  					goto End
   224  				}
   225  			}
   226  			if leaky.logger.Level >= log.TraceLevel {
   227  				leaky.logger.Tracef("Pour event: %s", spew.Sdump(msg))
   228  			}
   229  			BucketsPour.With(prometheus.Labels{"name": leaky.Name, "source": msg.Line.Src}).Inc()
   230  
   231  			leaky.Pour(leaky, msg) // glue for now
   232  			//Clear cache on behalf of pour
   233  			tmp := time.NewTicker(leaky.Duration)
   234  			durationTicker = tmp.C
   235  			defer tmp.Stop()
   236  		/*we overflowed*/
   237  		case ofw := <-leaky.Out:
   238  			leaky.overflow(ofw)
   239  			return nil
   240  			/*we underflow or reach bucket deadline (timers)*/
   241  		case <-durationTicker:
   242  			var (
   243  				alert types.RuntimeAlert
   244  				err   error
   245  			)
   246  			leaky.Ovflw_ts = time.Now()
   247  			close(leaky.Signal)
   248  			ofw := leaky.Queue
   249  			alert = types.RuntimeAlert{Mapkey: leaky.Mapkey}
   250  
   251  			if leaky.timedOverflow {
   252  				BucketsOverflow.With(prometheus.Labels{"name": leaky.Name}).Inc()
   253  
   254  				alert, err = NewAlert(leaky, ofw)
   255  				if err != nil {
   256  					log.Errorf("%s", err)
   257  				}
   258  				for _, f := range leaky.BucketConfig.processors {
   259  					alert, ofw = f.OnBucketOverflow(leaky.BucketConfig)(leaky, alert, ofw)
   260  					if ofw == nil {
   261  						leaky.logger.Debugf("Overflow has been discarded (%T)", f)
   262  						break
   263  					}
   264  				}
   265  				leaky.logger.Infof("Timed Overflow")
   266  			} else {
   267  				leaky.logger.Debugf("bucket underflow, destroy")
   268  				BucketsUnderflow.With(prometheus.Labels{"name": leaky.Name}).Inc()
   269  
   270  			}
   271  			if leaky.logger.Level >= log.TraceLevel {
   272  				/*don't sdump if it's not going to printed, it's expensive*/
   273  				leaky.logger.Tracef("Overflow event: %s", spew.Sdump(types.Event{Overflow: alert}))
   274  			}
   275  
   276  			leaky.AllOut <- types.Event{Overflow: alert, Type: types.OVFLW}
   277  			leaky.logger.Tracef("Returning from leaky routine.")
   278  			return nil
   279  		case <-leaky.tomb.Dying():
   280  			leaky.logger.Debugf("Bucket externally killed, return")
   281  			for len(leaky.Out) > 0 {
   282  				ofw := <-leaky.Out
   283  				leaky.overflow(ofw)
   284  			}
   285  			leaky.AllOut <- types.Event{Type: types.OVFLW, Overflow: types.RuntimeAlert{Mapkey: leaky.Mapkey}}
   286  			return nil
   287  
   288  		}
   289  	End:
   290  	}
   291  }
   292  
   293  func Pour(leaky *Leaky, msg types.Event) {
   294  	leaky.wgDumpState.Wait()
   295  	leaky.wgPour.Add(1)
   296  	defer leaky.wgPour.Done()
   297  
   298  	leaky.Total_count += 1
   299  	if leaky.First_ts.IsZero() {
   300  		leaky.First_ts = time.Now()
   301  	}
   302  	leaky.Last_ts = time.Now()
   303  	if leaky.Limiter.Allow() {
   304  		leaky.Queue.Add(msg)
   305  	} else {
   306  		leaky.Ovflw_ts = time.Now()
   307  		leaky.logger.Debugf("Last event to be poured, bucket overflow.")
   308  		leaky.Queue.Add(msg)
   309  		leaky.Out <- leaky.Queue
   310  	}
   311  }
   312  
   313  func (leaky *Leaky) overflow(ofw *Queue) {
   314  	close(leaky.Signal)
   315  	alert, err := NewAlert(leaky, ofw)
   316  	if err != nil {
   317  		log.Errorf("%s", err)
   318  	}
   319  	leaky.logger.Tracef("Overflow hooks time : %v", leaky.BucketConfig.processors)
   320  	for _, f := range leaky.BucketConfig.processors {
   321  		alert, ofw = f.OnBucketOverflow(leaky.BucketConfig)(leaky, alert, ofw)
   322  		if ofw == nil {
   323  			leaky.logger.Debugf("Overflow has been discarded (%T)", f)
   324  			break
   325  		}
   326  	}
   327  	if leaky.logger.Level >= log.TraceLevel {
   328  		leaky.logger.Tracef("Overflow event: %s", spew.Sdump(types.RuntimeAlert(alert)))
   329  	}
   330  	mt, _ := leaky.Ovflw_ts.MarshalText()
   331  	leaky.logger.Tracef("overflow time : %s", mt)
   332  
   333  	BucketsOverflow.With(prometheus.Labels{"name": leaky.Name}).Inc()
   334  
   335  	leaky.AllOut <- types.Event{Overflow: alert, Type: types.OVFLW, MarshaledTime: string(mt)}
   336  }