bitbucket.org/Aishee/synsec@v0.0.0-20210414005726-236fc01a153d/pkg/leakybucket/buckets_test.go (about) 1 package leakybucket 2 3 import ( 4 "bytes" 5 "encoding/json" 6 "fmt" 7 "html/template" 8 "io" 9 "io/ioutil" 10 "os" 11 "reflect" 12 "testing" 13 "time" 14 15 "bitbucket.org/Aishee/synsec/pkg/csconfig" 16 "bitbucket.org/Aishee/synsec/pkg/exprhelpers" 17 "bitbucket.org/Aishee/synsec/pkg/parser" 18 "bitbucket.org/Aishee/synsec/pkg/types" 19 "github.com/davecgh/go-spew/spew" 20 log "github.com/sirupsen/logrus" 21 "gopkg.in/tomb.v2" 22 yaml "gopkg.in/yaml.v2" 23 ) 24 25 type TestFile struct { 26 Lines []types.Event `yaml:"lines,omitempty"` 27 Results []types.Event `yaml:"results,omitempty"` 28 } 29 30 func TestBucket(t *testing.T) { 31 var ( 32 envSetting = os.Getenv("TEST_ONLY") 33 tomb *tomb.Tomb = &tomb.Tomb{} 34 ) 35 err := exprhelpers.Init() 36 if err != nil { 37 log.Fatalf("exprhelpers init failed: %s", err) 38 } 39 40 if envSetting != "" { 41 if err := testOneBucket(t, envSetting, tomb); err != nil { 42 t.Fatalf("Test '%s' failed : %s", envSetting, err) 43 } 44 } else { 45 fds, err := ioutil.ReadDir("./tests/") 46 if err != nil { 47 t.Fatalf("Unable to read test directory : %s", err) 48 } 49 for _, fd := range fds { 50 fname := "./tests/" + fd.Name() 51 log.Infof("Running test on %s", fname) 52 tomb.Go(func() error { 53 if err := testOneBucket(t, fname, tomb); err != nil { 54 t.Fatalf("Test '%s' failed : %s", fname, err) 55 } 56 return nil 57 }) 58 } 59 } 60 } 61 62 func testOneBucket(t *testing.T, dir string, tomb *tomb.Tomb) error { 63 64 var ( 65 holders []BucketFactory 66 67 stagefiles []byte 68 stagecfg string 69 stages []parser.Stagefile 70 err error 71 buckets *Buckets 72 ) 73 buckets = NewBuckets() 74 75 /*load the scenarios*/ 76 stagecfg = dir + "/scenarios.yaml" 77 if stagefiles, err = ioutil.ReadFile(stagecfg); err != nil { 78 t.Fatalf("Failed to load stage file %s : %s", stagecfg, err) 79 } 80 81 tmpl, err := template.New("test").Parse(string(stagefiles)) 82 if err != nil { 83 return fmt.Errorf("failed to parse template %s : %s", stagefiles, err) 84 } 85 var out bytes.Buffer 86 err = tmpl.Execute(&out, map[string]string{"TestDirectory": dir}) 87 if err != nil { 88 panic(err) 89 } 90 if err := yaml.UnmarshalStrict(out.Bytes(), &stages); err != nil { 91 log.Fatalf("failed unmarshaling %s : %s", stagecfg, err) 92 } 93 files := []string{} 94 for _, x := range stages { 95 files = append(files, x.Filename) 96 } 97 98 cscfg := &csconfig.SynsecServiceCfg{ 99 DataDir: "tests", 100 } 101 holders, response, err := LoadBuckets(cscfg, files, tomb, buckets) 102 if err != nil { 103 t.Fatalf("failed loading bucket : %s", err) 104 } 105 if !testFile(t, dir+"/test.json", dir+"/in-buckets_state.json", holders, response, buckets) { 106 return fmt.Errorf("tests from %s failed", dir) 107 } 108 return nil 109 } 110 111 func testFile(t *testing.T, file string, bs string, holders []BucketFactory, response chan types.Event, buckets *Buckets) bool { 112 113 var results []types.Event 114 var dump bool 115 116 //should we restore 117 if _, err := os.Stat(bs); err == nil { 118 dump = true 119 if err := LoadBucketsState(bs, buckets, holders); err != nil { 120 t.Fatalf("Failed to load bucket state : %s", err) 121 } 122 } 123 124 /* now we can load the test files */ 125 //process the yaml 126 yamlFile, err := os.Open(file) 127 if err != nil { 128 t.Errorf("yamlFile.Get err #%v ", err) 129 } 130 dec := json.NewDecoder(yamlFile) 131 dec.DisallowUnknownFields() 132 //dec.SetStrict(true) 133 tf := TestFile{} 134 err = dec.Decode(&tf) 135 if err != nil { 136 if err == io.EOF { 137 log.Warningf("end of test file") 138 } else { 139 t.Errorf("Failed to load testfile '%s' yaml error : %v", file, err) 140 return false 141 } 142 } 143 var latest_ts time.Time 144 for _, in := range tf.Lines { 145 //just to avoid any race during ingestion of funny scenarios 146 time.Sleep(50 * time.Millisecond) 147 var ts time.Time 148 if err := ts.UnmarshalText([]byte(in.MarshaledTime)); err != nil { 149 t.Fatalf("Failed to unmarshal time from input event : %s", err) 150 } 151 if latest_ts.IsZero() { 152 latest_ts = ts 153 } else if ts.After(latest_ts) { 154 latest_ts = ts 155 } 156 157 in.ExpectMode = TIMEMACHINE 158 log.Infof("Buckets input : %s", spew.Sdump(in)) 159 ok, err := PourItemToHolders(in, holders, buckets) 160 if err != nil { 161 t.Fatalf("Failed to pour : %s", err) 162 } 163 if !ok { 164 log.Warningf("Event wasn't poured") 165 } 166 } 167 log.Warningf("Done pouring !") 168 169 time.Sleep(1 * time.Second) 170 171 //Read results from chan 172 POLL_AGAIN: 173 fails := 0 174 for fails < 2 { 175 select { 176 case ret := <-response: 177 log.Warningf("got one result") 178 results = append(results, ret) 179 if ret.Overflow.Reprocess { 180 log.Errorf("Overflow being reprocessed.") 181 ok, err := PourItemToHolders(ret, holders, buckets) 182 if err != nil { 183 t.Fatalf("Failed to pour : %s", err) 184 } 185 if !ok { 186 log.Warningf("Event wasn't poured") 187 } 188 goto POLL_AGAIN 189 } 190 fails = 0 191 default: 192 log.Warningf("no more results") 193 time.Sleep(1 * time.Second) 194 fails += 1 195 } 196 } 197 log.Warningf("Got %d overflows from run", len(results)) 198 /* 199 check the results we got against the expected ones 200 only the keys of the expected part are checked against result 201 */ 202 var tmpFile string 203 204 for { 205 if len(tf.Results) == 0 && len(results) == 0 { 206 log.Warningf("Test is successfull") 207 if dump { 208 if tmpFile, err = DumpBucketsStateAt(latest_ts, ".", buckets); err != nil { 209 t.Fatalf("Failed dumping bucket state : %s", err) 210 } 211 log.Infof("dumped bucket to %s", tmpFile) 212 } 213 return true 214 } else { 215 log.Warningf("%d results to check against %d expected results", len(results), len(tf.Results)) 216 if len(tf.Results) != len(results) { 217 if dump { 218 if tmpFile, err = DumpBucketsStateAt(latest_ts, ".", buckets); err != nil { 219 t.Fatalf("Failed dumping bucket state : %s", err) 220 } 221 log.Infof("dumped bucket to %s", tmpFile) 222 223 } 224 log.Errorf("results / expected count doesn't match results = %d / expected = %d", len(results), len(tf.Results)) 225 return false 226 } 227 } 228 checkresultsloop: 229 for eidx, out := range results { 230 for ridx, expected := range tf.Results { 231 232 log.Tracef("Checking next expected result.") 233 234 //empty overflow 235 if out.Overflow.Alert == nil && expected.Overflow.Alert == nil { 236 //match stuff 237 } else { 238 if out.Overflow.Alert == nil || expected.Overflow.Alert == nil { 239 log.Printf("Here ?") 240 continue 241 } 242 //Scenario 243 244 if *out.Overflow.Alert.Scenario != *expected.Overflow.Alert.Scenario { 245 log.Errorf("(scenario) %v != %v", *out.Overflow.Alert.Scenario, *expected.Overflow.Alert.Scenario) 246 continue 247 } else { 248 log.Infof("(scenario) %v == %v", *out.Overflow.Alert.Scenario, *expected.Overflow.Alert.Scenario) 249 } 250 //EventsCount 251 if *out.Overflow.Alert.EventsCount != *expected.Overflow.Alert.EventsCount { 252 log.Errorf("(EventsCount) %d != %d", *out.Overflow.Alert.EventsCount, *expected.Overflow.Alert.EventsCount) 253 continue 254 } else { 255 log.Infof("(EventsCount) %d == %d", *out.Overflow.Alert.EventsCount, *expected.Overflow.Alert.EventsCount) 256 } 257 //Sources 258 if !reflect.DeepEqual(out.Overflow.Sources, expected.Overflow.Sources) { 259 log.Errorf("(Sources %s != %s)", spew.Sdump(out.Overflow.Sources), spew.Sdump(expected.Overflow.Sources)) 260 continue 261 } else { 262 log.Infof("(Sources: %s == %s)", spew.Sdump(out.Overflow.Sources), spew.Sdump(expected.Overflow.Sources)) 263 } 264 265 } 266 //Events 267 // if !reflect.DeepEqual(out.Overflow.Alert.Events, expected.Overflow.Alert.Events) { 268 // log.Errorf("(Events %s != %s)", spew.Sdump(out.Overflow.Alert.Events), spew.Sdump(expected.Overflow.Alert.Events)) 269 // valid = false 270 // continue 271 // } else { 272 // log.Infof("(Events: %s == %s)", spew.Sdump(out.Overflow.Alert.Events), spew.Sdump(expected.Overflow.Alert.Events)) 273 // } 274 275 //CheckFailed: 276 277 log.Warningf("The test is valid, remove entry %d from expects, and %d from t.Results", eidx, ridx) 278 //don't do this at home : delete current element from list and redo 279 results[eidx] = results[len(results)-1] 280 results = results[:len(results)-1] 281 tf.Results[ridx] = tf.Results[len(tf.Results)-1] 282 tf.Results = tf.Results[:len(tf.Results)-1] 283 goto checkresultsloop 284 } 285 } 286 if len(results) != 0 && len(tf.Results) != 0 { 287 log.Errorf("mismatching entries left") 288 log.Errorf("we got: %s", spew.Sdump(results)) 289 log.Errorf("we expected: %s", spew.Sdump(tf.Results)) 290 return false 291 } else { 292 log.Warningf("entry valid at end of loop") 293 } 294 } 295 return false 296 }