github.com/crowdsecurity/crowdsec@v1.6.1/pkg/leakybucket/buckets_test.go (about)

     1  package leakybucket
     2  
     3  import (
     4  	"bytes"
     5  	"encoding/json"
     6  	"errors"
     7  	"fmt"
     8  	"html/template"
     9  	"io"
    10  	"os"
    11  	"path/filepath"
    12  	"reflect"
    13  	"sync"
    14  	"testing"
    15  	"time"
    16  
    17  	"github.com/davecgh/go-spew/spew"
    18  	log "github.com/sirupsen/logrus"
    19  	"gopkg.in/tomb.v2"
    20  	yaml "gopkg.in/yaml.v2"
    21  
    22  	"github.com/crowdsecurity/crowdsec/pkg/csconfig"
    23  	"github.com/crowdsecurity/crowdsec/pkg/cwhub"
    24  	"github.com/crowdsecurity/crowdsec/pkg/exprhelpers"
    25  	"github.com/crowdsecurity/crowdsec/pkg/parser"
    26  	"github.com/crowdsecurity/crowdsec/pkg/types"
    27  )
    28  
    29  type TestFile struct {
    30  	Lines   []types.Event `yaml:"lines,omitempty"`
    31  	Results []types.Event `yaml:"results,omitempty"`
    32  }
    33  
    34  func TestBucket(t *testing.T) {
    35  	var (
    36  		envSetting = os.Getenv("TEST_ONLY")
    37  		tomb       = &tomb.Tomb{}
    38  	)
    39  
    40  	testdata := "./tests"
    41  
    42  	hubCfg := &csconfig.LocalHubCfg{
    43  		HubDir:         filepath.Join(testdata, "hub"),
    44  		HubIndexFile:   filepath.Join(testdata, "hub", "index.json"),
    45  		InstallDataDir: testdata,
    46  	}
    47  
    48  	hub, err := cwhub.NewHub(hubCfg, nil, false, nil)
    49  	if err != nil {
    50  		t.Fatalf("failed to init hub: %s", err)
    51  	}
    52  
    53  	err = exprhelpers.Init(nil)
    54  	if err != nil {
    55  		log.Fatalf("exprhelpers init failed: %s", err)
    56  	}
    57  
    58  	if envSetting != "" {
    59  		if err := testOneBucket(t, hub, envSetting, tomb); err != nil {
    60  			t.Fatalf("Test '%s' failed : %s", envSetting, err)
    61  		}
    62  	} else {
    63  		wg := new(sync.WaitGroup)
    64  		fds, err := os.ReadDir(testdata)
    65  		if err != nil {
    66  			t.Fatalf("Unable to read test directory : %s", err)
    67  		}
    68  		for _, fd := range fds {
    69  			if fd.Name() == "hub" {
    70  				continue
    71  			}
    72  			fname := filepath.Join(testdata, fd.Name())
    73  			log.Infof("Running test on %s", fname)
    74  			tomb.Go(func() error {
    75  				wg.Add(1)
    76  				defer wg.Done()
    77  				if err := testOneBucket(t, hub, fname, tomb); err != nil {
    78  					t.Fatalf("Test '%s' failed : %s", fname, err)
    79  				}
    80  				return nil
    81  			})
    82  		}
    83  		wg.Wait()
    84  	}
    85  }
    86  
    87  // during tests, we're likely to have only one scenario, and thus only one holder.
    88  // we want to avoid the death of the tomb because all existing buckets have been destroyed.
    89  func watchTomb(tomb *tomb.Tomb) {
    90  	for {
    91  		if tomb.Alive() == false {
    92  			log.Warning("Tomb is dead")
    93  			break
    94  		}
    95  		time.Sleep(100 * time.Millisecond)
    96  	}
    97  }
    98  
    99  func testOneBucket(t *testing.T, hub *cwhub.Hub, dir string, tomb *tomb.Tomb) error {
   100  
   101  	var (
   102  		holders []BucketFactory
   103  
   104  		stagefiles []byte
   105  		stagecfg   string
   106  		stages     []parser.Stagefile
   107  		err        error
   108  		buckets    *Buckets
   109  	)
   110  	buckets = NewBuckets()
   111  
   112  	/*load the scenarios*/
   113  	stagecfg = dir + "/scenarios.yaml"
   114  	if stagefiles, err = os.ReadFile(stagecfg); err != nil {
   115  		t.Fatalf("Failed to load stage file %s : %s", stagecfg, err)
   116  	}
   117  
   118  	tmpl, err := template.New("test").Parse(string(stagefiles))
   119  	if err != nil {
   120  		return fmt.Errorf("failed to parse template %s : %s", stagefiles, err)
   121  	}
   122  	var out bytes.Buffer
   123  	err = tmpl.Execute(&out, map[string]string{"TestDirectory": dir})
   124  	if err != nil {
   125  		panic(err)
   126  	}
   127  	if err := yaml.UnmarshalStrict(out.Bytes(), &stages); err != nil {
   128  		log.Fatalf("failed unmarshaling %s : %s", stagecfg, err)
   129  	}
   130  	files := []string{}
   131  	for _, x := range stages {
   132  		files = append(files, x.Filename)
   133  	}
   134  
   135  	cscfg := &csconfig.CrowdsecServiceCfg{}
   136  	holders, response, err := LoadBuckets(cscfg, hub, files, tomb, buckets, false)
   137  	if err != nil {
   138  		t.Fatalf("failed loading bucket : %s", err)
   139  	}
   140  	tomb.Go(func() error {
   141  		watchTomb(tomb)
   142  		return nil
   143  	})
   144  	if !testFile(t, filepath.Join(dir, "test.json"), filepath.Join(dir, "in-buckets_state.json"), holders, response, buckets) {
   145  		return fmt.Errorf("tests from %s failed", dir)
   146  	}
   147  	return nil
   148  }
   149  
   150  func testFile(t *testing.T, file string, bs string, holders []BucketFactory, response chan types.Event, buckets *Buckets) bool {
   151  
   152  	var results []types.Event
   153  	var dump bool
   154  
   155  	//should we restore
   156  	if _, err := os.Stat(bs); err == nil {
   157  		dump = true
   158  		if err := LoadBucketsState(bs, buckets, holders); err != nil {
   159  			t.Fatalf("Failed to load bucket state : %s", err)
   160  		}
   161  	}
   162  
   163  	/* now we can load the test files */
   164  	//process the yaml
   165  	yamlFile, err := os.Open(file)
   166  	if err != nil {
   167  		t.Errorf("yamlFile.Get err   #%v ", err)
   168  	}
   169  	dec := json.NewDecoder(yamlFile)
   170  	dec.DisallowUnknownFields()
   171  	//dec.SetStrict(true)
   172  	tf := TestFile{}
   173  	err = dec.Decode(&tf)
   174  	if err != nil {
   175  		if errors.Is(err, io.EOF) {
   176  			t.Errorf("Failed to load testfile '%s' yaml error : %v", file, err)
   177  			return false
   178  		}
   179  		log.Warning("end of test file")
   180  	}
   181  	var latest_ts time.Time
   182  	for _, in := range tf.Lines {
   183  		//just to avoid any race during ingestion of funny scenarios
   184  		time.Sleep(50 * time.Millisecond)
   185  		var ts time.Time
   186  		if err := ts.UnmarshalText([]byte(in.MarshaledTime)); err != nil {
   187  			t.Fatalf("Failed to unmarshal time from input event : %s", err)
   188  		}
   189  		if latest_ts.IsZero() {
   190  			latest_ts = ts
   191  		} else if ts.After(latest_ts) {
   192  			latest_ts = ts
   193  		}
   194  
   195  		in.ExpectMode = types.TIMEMACHINE
   196  		log.Infof("Buckets input : %s", spew.Sdump(in))
   197  		ok, err := PourItemToHolders(in, holders, buckets)
   198  		if err != nil {
   199  			t.Fatalf("Failed to pour : %s", err)
   200  		}
   201  		if !ok {
   202  			log.Warning("Event wasn't poured")
   203  		}
   204  	}
   205  	log.Warning("Done pouring !")
   206  
   207  	time.Sleep(1 * time.Second)
   208  
   209  	//Read results from chan
   210  POLL_AGAIN:
   211  	fails := 0
   212  	for fails < 2 {
   213  		select {
   214  		case ret := <-response:
   215  			log.Warning("got one result")
   216  			results = append(results, ret)
   217  			if ret.Overflow.Reprocess {
   218  				log.Errorf("Overflow being reprocessed.")
   219  				ok, err := PourItemToHolders(ret, holders, buckets)
   220  				if err != nil {
   221  					t.Fatalf("Failed to pour : %s", err)
   222  				}
   223  				if !ok {
   224  					log.Warning("Event wasn't poured")
   225  				}
   226  				goto POLL_AGAIN
   227  			}
   228  			fails = 0
   229  		default:
   230  			log.Warning("no more results")
   231  			time.Sleep(1 * time.Second)
   232  			fails += 1
   233  		}
   234  	}
   235  	log.Warningf("Got %d overflows from run", len(results))
   236  	/*
   237  		check the results we got against the expected ones
   238  		only the keys of the expected part are checked against result
   239  	*/
   240  	var tmpFile string
   241  
   242  	for {
   243  		if len(tf.Results) == 0 && len(results) == 0 {
   244  			log.Warning("Test is successful")
   245  			if dump {
   246  				if tmpFile, err = DumpBucketsStateAt(latest_ts, ".", buckets); err != nil {
   247  					t.Fatalf("Failed to dump bucket state: %s", err)
   248  				}
   249  				log.Infof("dumped bucket to %s", tmpFile)
   250  			}
   251  			return true
   252  		}
   253  		log.Warningf("%d results to check against %d expected results", len(results), len(tf.Results))
   254  		if len(tf.Results) != len(results) {
   255  			if dump {
   256  				if tmpFile, err = DumpBucketsStateAt(latest_ts, ".", buckets); err != nil {
   257  					t.Fatalf("Failed to dump bucket state: %s", err)
   258  				}
   259  				log.Infof("dumped bucket to %s", tmpFile)
   260  			}
   261  			log.Errorf("results / expected count doesn't match results = %d / expected = %d", len(results), len(tf.Results))
   262  			return false
   263  		}
   264  	checkresultsloop:
   265  		for eidx, out := range results {
   266  			for ridx, expected := range tf.Results {
   267  
   268  				log.Tracef("Checking next expected result.")
   269  
   270  				//empty overflow
   271  				if out.Overflow.Alert == nil && expected.Overflow.Alert == nil {
   272  					//match stuff
   273  				} else {
   274  					if out.Overflow.Alert == nil || expected.Overflow.Alert == nil {
   275  						log.Printf("Here ?")
   276  						continue
   277  					}
   278  
   279  					//Scenario
   280  					if *out.Overflow.Alert.Scenario != *expected.Overflow.Alert.Scenario {
   281  						log.Errorf("(scenario) %v != %v", *out.Overflow.Alert.Scenario, *expected.Overflow.Alert.Scenario)
   282  						continue
   283  					}
   284  					log.Infof("(scenario) %v == %v", *out.Overflow.Alert.Scenario, *expected.Overflow.Alert.Scenario)
   285  
   286  					//EventsCount
   287  					if *out.Overflow.Alert.EventsCount != *expected.Overflow.Alert.EventsCount {
   288  						log.Errorf("(EventsCount) %d != %d", *out.Overflow.Alert.EventsCount, *expected.Overflow.Alert.EventsCount)
   289  						continue
   290  					}
   291  					log.Infof("(EventsCount) %d == %d", *out.Overflow.Alert.EventsCount, *expected.Overflow.Alert.EventsCount)
   292  
   293  					//Sources
   294  					if !reflect.DeepEqual(out.Overflow.Sources, expected.Overflow.Sources) {
   295  						log.Errorf("(Sources %s != %s)", spew.Sdump(out.Overflow.Sources), spew.Sdump(expected.Overflow.Sources))
   296  						continue
   297  					}
   298  					log.Infof("(Sources: %s == %s)", spew.Sdump(out.Overflow.Sources), spew.Sdump(expected.Overflow.Sources))
   299  				}
   300  				//Events
   301  				// if !reflect.DeepEqual(out.Overflow.Alert.Events, expected.Overflow.Alert.Events) {
   302  				// 	log.Errorf("(Events %s != %s)", spew.Sdump(out.Overflow.Alert.Events), spew.Sdump(expected.Overflow.Alert.Events))
   303  				// 	valid = false
   304  				// 	continue
   305  				// } else {
   306  				// 	log.Infof("(Events: %s == %s)", spew.Sdump(out.Overflow.Alert.Events), spew.Sdump(expected.Overflow.Alert.Events))
   307  				// }
   308  
   309  				//CheckFailed:
   310  
   311  				log.Warningf("The test is valid, remove entry %d from expects, and %d from t.Results", eidx, ridx)
   312  				//don't do this at home : delete current element from list and redo
   313  				results[eidx] = results[len(results)-1]
   314  				results = results[:len(results)-1]
   315  				tf.Results[ridx] = tf.Results[len(tf.Results)-1]
   316  				tf.Results = tf.Results[:len(tf.Results)-1]
   317  				goto checkresultsloop
   318  			}
   319  		}
   320  		if len(results) != 0 && len(tf.Results) != 0 {
   321  			log.Errorf("mismatching entries left")
   322  			log.Errorf("we got: %s", spew.Sdump(results))
   323  			log.Errorf("we expected: %s", spew.Sdump(tf.Results))
   324  			return false
   325  		}
   326  		log.Warning("entry valid at end of loop")
   327  	}
   328  }