github.com/google/syzkaller@v0.0.0-20240517125934-c0f1611a36d6/dashboard/app/app_test.go (about) 1 // Copyright 2017 syzkaller project authors. All rights reserved. 2 // Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file. 3 4 package main 5 6 import ( 7 "errors" 8 "fmt" 9 "net/http" 10 "os" 11 "strconv" 12 "strings" 13 "testing" 14 "time" 15 16 "github.com/google/go-cmp/cmp" 17 "github.com/google/syzkaller/dashboard/dashapi" 18 "github.com/google/syzkaller/pkg/auth" 19 "github.com/google/syzkaller/pkg/subsystem" 20 _ "github.com/google/syzkaller/pkg/subsystem/lists" 21 "github.com/google/syzkaller/sys/targets" 22 "google.golang.org/appengine/v2/user" 23 ) 24 25 func init() { 26 // This is ugly but without this go test hangs with: 27 // panic: Metadata fetch failed for 'instance/attributes/gae_backend_version': 28 // Get http://metadata/computeMetadata/v1/instance/attributes/gae_backend_version: 29 // dial tcp: lookup metadata on 127.0.0.1:53: no such host 30 // It's unclear what's the proper fix for this. 31 os.Setenv("GAE_MODULE_VERSION", "1") 32 os.Setenv("GAE_MINOR_VERSION", "1") 33 34 isBrokenAuthDomainInTest = true 35 obsoleteWhatWontBeFixBisected = true 36 notifyAboutUnsuccessfulBisections = true 37 ensureConfigImmutability = true 38 initMocks() 39 installConfig(testConfig) 40 } 41 42 // Config used in tests. 43 var testConfig = &GlobalConfig{ 44 AccessLevel: AccessPublic, 45 AuthDomain: "@syzkaller.com", 46 Clients: map[string]string{ 47 "reporting": "reportingkeyreportingkeyreportingkey", 48 }, 49 EmailBlocklist: []string{ 50 "\"Bar\" <Blocked@Domain.com>", 51 }, 52 Obsoleting: ObsoletingConfig{ 53 MinPeriod: 80 * 24 * time.Hour, 54 MaxPeriod: 100 * 24 * time.Hour, 55 NonFinalMinPeriod: 40 * 24 * time.Hour, 56 NonFinalMaxPeriod: 60 * 24 * time.Hour, 57 ReproRetestPeriod: 100 * 24 * time.Hour, 58 }, 59 DiscussionEmails: []DiscussionEmailConfig{ 60 {"lore@email.com", dashapi.DiscussionLore}, 61 }, 62 DefaultNamespace: "test1", 63 Namespaces: map[string]*Config{ 64 "test1": { 65 AccessLevel: AccessAdmin, 66 Key: "test1keytest1keytest1key", 67 FixBisectionAutoClose: true, 68 SimilarityDomain: testDomain, 69 Clients: map[string]string{ 70 client1: password1, 71 "oauth": auth.OauthMagic + "111111122222222", 72 }, 73 Repos: []KernelRepo{ 74 { 75 URL: "git://syzkaller.org", 76 Branch: "branch10", 77 Alias: "repo10alias", 78 CC: CCConfig{ 79 Maintainers: []string{"maintainers@repo10.org", "bugs@repo10.org"}, 80 }, 81 }, 82 { 83 URL: "git://github.com/google/syzkaller", 84 Branch: "master", 85 Alias: "repo10alias1", 86 CC: CCConfig{ 87 Maintainers: []string{"maintainers@repo10.org", "bugs@repo10.org"}, 88 }, 89 }, 90 { 91 URL: "git://github.com/google/syzkaller", 92 Branch: "old_master", 93 Alias: "repo10alias2", 94 NoPoll: true, 95 }, 96 }, 97 Managers: map[string]ConfigManager{ 98 "special-obsoleting": { 99 ObsoletingMinPeriod: 10 * 24 * time.Hour, 100 ObsoletingMaxPeriod: 20 * 24 * time.Hour, 101 }, 102 }, 103 Reporting: []Reporting{ 104 { 105 Name: "reporting1", 106 DailyLimit: 5, 107 Embargo: 14 * 24 * time.Hour, 108 Filter: skipWithRepro, 109 Config: &TestConfig{ 110 Index: 1, 111 }, 112 }, 113 { 114 Name: "reporting2", 115 DailyLimit: 5, 116 Config: &TestConfig{ 117 Index: 2, 118 }, 119 }, 120 }, 121 Subsystems: SubsystemsConfig{ 122 Service: subsystem.MustMakeService(testSubsystems), 123 }, 124 }, 125 "test2": { 126 AccessLevel: AccessAdmin, 127 Key: "test2keytest2keytest2key", 128 SimilarityDomain: testDomain, 129 Clients: map[string]string{ 130 client2: password2, 131 }, 132 Repos: []KernelRepo{ 133 { 134 URL: "git://syzkaller.org", 135 Branch: "branch10", 136 Alias: "repo10alias", 137 CC: CCConfig{ 138 Always: []string{"always@cc.me"}, 139 Maintainers: []string{"maintainers@repo10.org", "bugs@repo10.org"}, 140 BuildMaintainers: []string{"build-maintainers@repo10.org"}, 141 }, 142 }, 143 { 144 URL: "git://syzkaller.org", 145 Branch: "branch20", 146 Alias: "repo20", 147 CC: CCConfig{ 148 Maintainers: []string{"maintainers@repo20.org", "bugs@repo20.org"}, 149 }, 150 }, 151 }, 152 Managers: map[string]ConfigManager{ 153 noFixBisectionManager: { 154 FixBisectionDisabled: true, 155 }, 156 specialCCManager: { 157 CC: CCConfig{ 158 Always: []string{"always@manager.org"}, 159 Maintainers: []string{"maintainers@manager.org"}, 160 BuildMaintainers: []string{"build-maintainers@manager.org"}, 161 }, 162 }, 163 }, 164 Reporting: []Reporting{ 165 { 166 Name: "reporting1", 167 DailyLimit: 5, 168 Embargo: 14 * 24 * time.Hour, 169 Filter: skipWithRepro, 170 Config: &EmailConfig{ 171 Email: "test@syzkaller.com", 172 }, 173 }, 174 { 175 Name: "reporting2", 176 DailyLimit: 3, 177 Filter: skipWithRepro2, 178 Config: &EmailConfig{ 179 Email: "bugs@syzkaller.com", 180 DefaultMaintainers: []string{"default@maintainers.com"}, 181 SubjectPrefix: "[syzbot]", 182 MailMaintainers: true, 183 }, 184 }, 185 { 186 Name: "reporting3", 187 DailyLimit: 3, 188 Config: &EmailConfig{ 189 Email: "bugs2@syzkaller.com", 190 DefaultMaintainers: []string{"default2@maintainers.com"}, 191 MailMaintainers: true, 192 }, 193 }, 194 }, 195 }, 196 // Namespaces for access level testing. 197 "access-admin": { 198 AccessLevel: AccessAdmin, 199 Key: "adminkeyadminkeyadminkey", 200 Clients: map[string]string{ 201 clientAdmin: keyAdmin, 202 }, 203 Repos: []KernelRepo{ 204 { 205 URL: "git://syzkaller.org/access-admin.git", 206 Branch: "access-admin", 207 Alias: "access-admin", 208 }, 209 }, 210 Reporting: []Reporting{ 211 { 212 Name: "access-admin-reporting1", 213 DailyLimit: 1000, 214 Config: &TestConfig{Index: 1}, 215 }, 216 { 217 Name: "access-admin-reporting2", 218 DailyLimit: 1000, 219 Config: &TestConfig{Index: 2}, 220 }, 221 }, 222 }, 223 "access-user": { 224 AccessLevel: AccessUser, 225 Key: "userkeyuserkeyuserkey", 226 Clients: map[string]string{ 227 clientUser: keyUser, 228 }, 229 Repos: []KernelRepo{ 230 { 231 URL: "git://syzkaller.org/access-user.git", 232 Branch: "access-user", 233 Alias: "access-user", 234 }, 235 }, 236 Reporting: []Reporting{ 237 { 238 AccessLevel: AccessAdmin, 239 Name: "access-admin-reporting1", 240 DailyLimit: 1000, 241 Config: &TestConfig{Index: 1}, 242 }, 243 { 244 Name: "access-user-reporting2", 245 DailyLimit: 1000, 246 Config: &TestConfig{Index: 2}, 247 }, 248 }, 249 }, 250 "access-public": { 251 AccessLevel: AccessPublic, 252 Key: "publickeypublickeypublickey", 253 Clients: map[string]string{ 254 clientPublic: keyPublic, 255 }, 256 Repos: []KernelRepo{ 257 { 258 URL: "git://syzkaller.org/access-public.git", 259 Branch: "access-public", 260 Alias: "access-public", 261 DetectMissingBackports: true, 262 }, 263 }, 264 Reporting: []Reporting{ 265 { 266 AccessLevel: AccessUser, 267 Name: "access-user-reporting1", 268 DailyLimit: 1000, 269 Config: &TestConfig{Index: 1}, 270 }, 271 { 272 Name: "access-public-reporting2", 273 DailyLimit: 1000, 274 Config: &TestConfig{Index: 2}, 275 }, 276 }, 277 FindBugOriginTrees: true, 278 CacheUIPages: true, 279 RetestRepros: true, 280 }, 281 "access-public-email": { 282 AccessLevel: AccessPublic, 283 Key: "publickeypublickeypublickey", 284 Clients: map[string]string{ 285 clientPublicEmail: keyPublicEmail, 286 }, 287 Managers: map[string]ConfigManager{ 288 restrictedManager: { 289 RestrictedTestingRepo: "git://restricted.git/restricted.git", 290 RestrictedTestingReason: "you should test only on restricted.git", 291 }, 292 }, 293 Repos: []KernelRepo{ 294 { 295 URL: "git://syzkaller.org/access-public-email.git", 296 Branch: "access-public-email", 297 Alias: "access-public-email", 298 }, 299 { 300 // Needed for TestTreeOriginLtsBisection(). 301 URL: "https://upstream.repo/repo", 302 Branch: "upstream-master", 303 Alias: "upstream-master", 304 }, 305 }, 306 Reporting: []Reporting{ 307 { 308 AccessLevel: AccessPublic, 309 Name: "access-public-email-reporting1", 310 DailyLimit: 1000, 311 Config: &EmailConfig{ 312 Email: "test@syzkaller.com", 313 HandleListEmails: true, 314 SubjectPrefix: "[syzbot]", 315 }, 316 }, 317 }, 318 RetestRepros: true, 319 Subsystems: SubsystemsConfig{ 320 Service: subsystem.MustMakeService(testSubsystems), 321 Redirect: map[string]string{ 322 "oldSubsystem": "subsystemA", 323 }, 324 }, 325 }, 326 // The second namespace reporting to the same mailing list. 327 "access-public-email-2": { 328 AccessLevel: AccessPublic, 329 Key: "publickeypublickeypublickey", 330 Clients: map[string]string{ 331 clientPublicEmail2: keyPublicEmail2, 332 }, 333 Repos: []KernelRepo{ 334 { 335 URL: "git://syzkaller.org/access-public-email2.git", 336 Branch: "access-public-email2", 337 Alias: "access-public-email2", 338 }, 339 }, 340 Reporting: []Reporting{ 341 { 342 AccessLevel: AccessPublic, 343 Name: "access-public-email2-reporting1", 344 DailyLimit: 1000, 345 Config: &EmailConfig{ 346 Email: "test@syzkaller.com", 347 HandleListEmails: true, 348 }, 349 }, 350 }, 351 }, 352 "fs-bugs-reporting": { 353 AccessLevel: AccessPublic, 354 Key: "fspublickeypublickeypublickey", 355 Clients: map[string]string{ 356 clientPublicFs: keyPublicFs, 357 }, 358 Repos: []KernelRepo{ 359 { 360 URL: "git://syzkaller.org/fs-bugs.git", 361 Branch: "fs-bugs", 362 Alias: "fs-bugs", 363 }, 364 }, 365 Reporting: []Reporting{ 366 { 367 Name: "wait-repro", 368 DailyLimit: 1000, 369 Filter: func(bug *Bug) FilterResult { 370 if canBeVfsBug(bug) && 371 bug.ReproLevel == dashapi.ReproLevelNone { 372 return FilterReport 373 } 374 return FilterSkip 375 }, 376 Config: &TestConfig{Index: 1}, 377 }, 378 { 379 AccessLevel: AccessPublic, 380 Name: "public", 381 DailyLimit: 1000, 382 Config: &EmailConfig{ 383 Email: "test@syzkaller.com", 384 HandleListEmails: true, 385 DefaultMaintainers: []string{"linux-kernel@vger.kernel.org"}, 386 MailMaintainers: true, 387 SubjectPrefix: "[syzbot]", 388 }, 389 }, 390 }, 391 Subsystems: SubsystemsConfig{ 392 Service: subsystem.ListService("linux"), 393 }, 394 }, 395 "test-decommission": { 396 AccessLevel: AccessAdmin, 397 Key: "testdecommissiontestdecommission", 398 SimilarityDomain: testDomain, 399 Clients: map[string]string{ 400 clientTestDecomm: keyTestDecomm, 401 }, 402 Repos: []KernelRepo{ 403 { 404 URL: "git://syzkaller.org", 405 Branch: "branch10", 406 Alias: "repo10alias", 407 }, 408 }, 409 Reporting: []Reporting{ 410 { 411 Name: "reporting1", 412 DailyLimit: 3, 413 Embargo: 14 * 24 * time.Hour, 414 Filter: skipWithRepro, 415 Config: &TestConfig{ 416 Index: 1, 417 }, 418 }, 419 { 420 Name: "reporting2", 421 DailyLimit: 3, 422 Config: &TestConfig{ 423 Index: 2, 424 }, 425 }, 426 }, 427 }, 428 "test-mgr-decommission": { 429 AccessLevel: AccessAdmin, 430 Key: "testmgrdecommissiontestmgrdecommission", 431 SimilarityDomain: testDomain, 432 Clients: map[string]string{ 433 clientMgrDecommission: keyMgrDecommission, 434 }, 435 Managers: map[string]ConfigManager{ 436 notYetDecommManger: {}, 437 delegateToManager: {}, 438 }, 439 Repos: []KernelRepo{ 440 { 441 URL: "git://syzkaller.org", 442 Branch: "branch10", 443 Alias: "repo10alias", 444 }, 445 }, 446 Reporting: []Reporting{ 447 { 448 Name: "reporting1", 449 DailyLimit: 5, 450 Embargo: 14 * 24 * time.Hour, 451 Filter: skipWithRepro, 452 Config: &EmailConfig{ 453 Email: "test@syzkaller.com", 454 }, 455 }, 456 { 457 Name: "reporting2", 458 DailyLimit: 3, 459 Filter: skipWithRepro2, 460 Config: &EmailConfig{ 461 Email: "bugs@syzkaller.com", 462 DefaultMaintainers: []string{"default@maintainers.com"}, 463 SubjectPrefix: "[syzbot]", 464 MailMaintainers: true, 465 }, 466 }, 467 }, 468 RetestRepros: true, 469 }, 470 "subsystem-reminders": { 471 AccessLevel: AccessPublic, 472 Key: "subsystemreminderssubsystemreminders", 473 Clients: map[string]string{ 474 clientSubsystemRemind: keySubsystemRemind, 475 }, 476 Repos: []KernelRepo{ 477 { 478 URL: "git://syzkaller.org/reminders.git", 479 Branch: "main", 480 Alias: "main", 481 }, 482 }, 483 Reporting: []Reporting{ 484 { 485 // Let's emulate public moderation. 486 AccessLevel: AccessPublic, 487 Name: "moderation", 488 DailyLimit: 1000, 489 Filter: func(bug *Bug) FilterResult { 490 if strings.Contains(bug.Title, "keep in moderation") { 491 return FilterReport 492 } 493 return FilterSkip 494 }, 495 Config: &TestConfig{Index: 1}, 496 }, 497 { 498 AccessLevel: AccessPublic, 499 Name: "public", 500 DailyLimit: 1000, 501 Config: &EmailConfig{ 502 Email: "bugs@syzkaller.com", 503 HandleListEmails: true, 504 MailMaintainers: true, 505 DefaultMaintainers: []string{"default@maintainers.com"}, 506 SubjectPrefix: "[syzbot]", 507 }, 508 }, 509 }, 510 Subsystems: SubsystemsConfig{ 511 Service: subsystem.MustMakeService(testSubsystems), 512 Reminder: &BugListReportingConfig{ 513 SourceReporting: "public", 514 BugsInReport: 6, 515 ModerationConfig: &EmailConfig{ 516 Email: "moderation@syzkaller.com", 517 SubjectPrefix: "[moderation]", 518 }, 519 Config: &EmailConfig{ 520 Email: "bugs@syzkaller.com", 521 MailMaintainers: true, 522 SubjectPrefix: "[syzbot]", 523 }, 524 }, 525 }, 526 }, 527 "tree-tests": { 528 AccessLevel: AccessPublic, 529 FixBisectionAutoClose: true, 530 Key: "treeteststreeteststreeteststreeteststreeteststreetests", 531 Clients: map[string]string{ 532 clientTreeTests: keyTreeTests, 533 }, 534 Repos: []KernelRepo{ 535 { 536 URL: "git://syzkaller.org/test.git", 537 Branch: "main", 538 Alias: "main", 539 DetectMissingBackports: true, 540 }, 541 }, 542 Managers: map[string]ConfigManager{ 543 "better-manager": { 544 Priority: 1, 545 }, 546 }, 547 Reporting: []Reporting{ 548 { 549 AccessLevel: AccessAdmin, 550 Name: "non-public", 551 DailyLimit: 1000, 552 Filter: func(bug *Bug) FilterResult { 553 return FilterReport 554 }, 555 Config: &TestConfig{Index: 1}, 556 }, 557 { 558 AccessLevel: AccessUser, 559 Name: "user", 560 DailyLimit: 1000, 561 Config: &EmailConfig{ 562 Email: "bugs@syzkaller.com", 563 SubjectPrefix: "[syzbot]", 564 }, 565 Labels: map[string]string{ 566 "origin:downstream": "Bug presence analysis results: the bug reproduces only on the downstream tree.", 567 }, 568 }, 569 }, 570 FindBugOriginTrees: true, 571 RetestMissingBackports: true, 572 }, 573 }, 574 } 575 576 var testSubsystems = []*subsystem.Subsystem{ 577 { 578 Name: "subsystemA", 579 PathRules: []subsystem.PathRule{{IncludeRegexp: `a\.c`}}, 580 Lists: []string{"subsystemA@list.com"}, 581 Maintainers: []string{"subsystemA@person.com"}, 582 }, 583 { 584 Name: "subsystemB", 585 PathRules: []subsystem.PathRule{{IncludeRegexp: `b\.c`}}, 586 Lists: []string{"subsystemB@list.com"}, 587 Maintainers: []string{"subsystemB@person.com"}, 588 }, 589 { 590 Name: "subsystemC", 591 PathRules: []subsystem.PathRule{{IncludeRegexp: `c\.c`}}, 592 Lists: []string{"subsystemC@list.com"}, 593 Maintainers: []string{"subsystemC@person.com"}, 594 NoReminders: true, 595 }, 596 } 597 598 const ( 599 client1 = "client1" 600 client2 = "client2" 601 password1 = "client1keyclient1keyclient1key" 602 password2 = "client2keyclient2keyclient2key" 603 clientAdmin = "client-admin" 604 keyAdmin = "clientadminkeyclientadminkey" 605 clientUser = "client-user" 606 keyUser = "clientuserkeyclientuserkey" 607 clientPublic = "client-public" 608 keyPublic = "clientpublickeyclientpublickey" 609 clientPublicEmail = "client-public-email" 610 keyPublicEmail = "clientpublicemailkeyclientpublicemailkey" 611 clientPublicEmail2 = "client-public-email2" 612 keyPublicEmail2 = "clientpublicemailkeyclientpublicemailkey2" 613 clientPublicFs = "client-public-fs" 614 keyPublicFs = "keypublicfskeypublicfskeypublicfs" 615 clientTestDecomm = "client-test-decomm" 616 keyTestDecomm = "keyTestDecommkeyTestDecomm" 617 clientMgrDecommission = "client-mgr-decommission" 618 keyMgrDecommission = "keyMgrDecommissionkeyMgrDecommission" 619 clientSubsystemRemind = "client-subystem-reminders" 620 keySubsystemRemind = "keySubsystemRemindkeySubsystemRemind" 621 clientTreeTests = "clientTreeTestsclientTreeTests" 622 keyTreeTests = "keyTreeTestskeyTreeTestskeyTreeTests" 623 624 restrictedManager = "restricted-manager" 625 noFixBisectionManager = "no-fix-bisection-manager" 626 specialCCManager = "special-cc-manager" 627 notYetDecommManger = "not-yet-decomm-manager" 628 delegateToManager = "delegate-to-manager" 629 630 testDomain = "test" 631 ) 632 633 func skipWithRepro(bug *Bug) FilterResult { 634 if strings.HasPrefix(bug.Title, "skip with repro") && 635 bug.ReproLevel != dashapi.ReproLevelNone { 636 return FilterSkip 637 } 638 return FilterReport 639 } 640 641 func skipWithRepro2(bug *Bug) FilterResult { 642 if strings.HasPrefix(bug.Title, "skip reporting2 with repro") && 643 bug.ReproLevel != dashapi.ReproLevelNone { 644 return FilterSkip 645 } 646 return FilterReport 647 } 648 649 type TestConfig struct { 650 Index int 651 } 652 653 func (cfg *TestConfig) Type() string { 654 return "test" 655 } 656 657 func (cfg *TestConfig) Validate() error { 658 return nil 659 } 660 661 func testBuild(id int) *dashapi.Build { 662 return &dashapi.Build{ 663 Manager: fmt.Sprintf("manager%v", id), 664 ID: fmt.Sprintf("build%v", id), 665 OS: targets.Linux, 666 Arch: targets.AMD64, 667 VMArch: targets.AMD64, 668 SyzkallerCommit: fmt.Sprintf("syzkaller_commit%v", id), 669 CompilerID: fmt.Sprintf("compiler%v", id), 670 KernelRepo: fmt.Sprintf("repo%v", id), 671 KernelBranch: fmt.Sprintf("branch%v", id), 672 KernelCommit: strings.Repeat(fmt.Sprint(id), 40)[:40], 673 KernelCommitTitle: fmt.Sprintf("kernel_commit_title%v", id), 674 KernelCommitDate: buildCommitDate, 675 KernelConfig: []byte(fmt.Sprintf("config%v", id)), 676 } 677 } 678 679 var buildCommitDate = time.Date(1, 2, 3, 4, 5, 6, 0, time.UTC) 680 681 func testCrash(build *dashapi.Build, id int) *dashapi.Crash { 682 return &dashapi.Crash{ 683 BuildID: build.ID, 684 Title: fmt.Sprintf("title%v", id), 685 Log: []byte(fmt.Sprintf("log%v", id)), 686 Report: []byte(fmt.Sprintf("report%v", id)), 687 MachineInfo: []byte(fmt.Sprintf("machine info %v", id)), 688 } 689 } 690 691 func testCrashWithRepro(build *dashapi.Build, id int) *dashapi.Crash { 692 crash := testCrash(build, id) 693 crash.ReproOpts = []byte(fmt.Sprintf("repro opts %v", id)) 694 crash.ReproSyz = []byte(fmt.Sprintf("syncfs(%v)", id)) 695 crash.ReproC = []byte(fmt.Sprintf("int main() { return %v; }", id)) 696 return crash 697 } 698 699 func testCrashID(crash *dashapi.Crash) *dashapi.CrashID { 700 return &dashapi.CrashID{ 701 BuildID: crash.BuildID, 702 Title: crash.Title, 703 } 704 } 705 706 func TestApp(t *testing.T) { 707 c := NewCtx(t) 708 defer c.Close() 709 710 _, err := c.GET("/test1") 711 c.expectOK(err) 712 713 apiClient1 := c.makeClient(client1, password1, false) 714 apiClient2 := c.makeClient(client2, password2, false) 715 c.expectFail("unknown api method", apiClient1.Query("unsupported_method", nil, nil)) 716 c.client.LogError("name", "msg %s", "arg") 717 718 build := testBuild(1) 719 c.client.UploadBuild(build) 720 // Uploading the same build must be OK. 721 c.client.UploadBuild(build) 722 723 // Some bad combinations of client/key. 724 c.expectFail("unauthorized", c.makeClient(client1, "borked", false).Query("upload_build", build, nil)) 725 c.expectFail("unauthorized", c.makeClient("unknown", password1, false).Query("upload_build", build, nil)) 726 c.expectFail("unauthorized", c.makeClient(client1, password2, false).Query("upload_build", build, nil)) 727 728 crash1 := testCrash(build, 1) 729 c.client.ReportCrash(crash1) 730 c.client.pollBug() 731 732 // Test that namespace isolation works. 733 c.expectFail("unknown build", apiClient2.Query("report_crash", crash1, nil)) 734 735 crash2 := testCrashWithRepro(build, 2) 736 c.client.ReportCrash(crash2) 737 c.client.pollBug() 738 739 // Provoke purgeOldCrashes. 740 const purgeTestIters = 30 741 for i := 0; i < purgeTestIters; i++ { 742 // Also test how daily counts work. 743 if i == purgeTestIters/2 { 744 c.advanceTime(48 * time.Hour) 745 } 746 crash := testCrash(build, 3) 747 crash.Log = []byte(fmt.Sprintf("log%v", i)) 748 crash.Report = []byte(fmt.Sprintf("report%v", i)) 749 c.client.ReportCrash(crash) 750 } 751 rep := c.client.pollBug() 752 bug, _, _ := c.loadBug(rep.ID) 753 c.expectNE(bug, nil) 754 c.expectEQ(bug.DailyStats, []BugDailyStats{ 755 {20000101, purgeTestIters / 2}, 756 {20000103, purgeTestIters / 2}, 757 }) 758 759 cid := &dashapi.CrashID{ 760 BuildID: "build1", 761 Title: "title1", 762 } 763 c.client.ReportFailedRepro(cid) 764 765 c.client.ReportingPollBugs("test") 766 767 c.client.ReportingUpdate(&dashapi.BugUpdate{ 768 ID: "id", 769 Status: dashapi.BugStatusOpen, 770 ReproLevel: dashapi.ReproLevelC, 771 }) 772 } 773 774 func TestRedirects(t *testing.T) { 775 c := NewCtx(t) 776 defer c.Close() 777 778 checkRedirect(c, AccessUser, "/", "/test1", http.StatusFound) // redirect to default namespace 779 checkRedirect(c, AccessAdmin, "/", "/admin", http.StatusFound) 780 checkLoginRedirect(c, AccessPublic, "/access-user") // not accessible namespace 781 782 _, err := c.AuthGET(AccessUser, "/access-user") 783 c.expectOK(err) 784 } 785 786 func TestResponseStatusCode(t *testing.T) { 787 tests := []struct { 788 whatURL string 789 wantRespCode int 790 }{ 791 { 792 "/text?tag=CrashLog&x=13354bf5700000", 793 http.StatusNotFound, 794 }, 795 { 796 "/text?tag=CrashReport&x=17a2bedcb00000", 797 http.StatusNotFound, 798 }, 799 { 800 "/text?tag=ReproSyz&x=107e219b700000", 801 http.StatusNotFound, 802 }, 803 { 804 "/text?tag=ReproC&x=1762ad64f00000", 805 http.StatusNotFound, 806 }, 807 { 808 "/text?tag=CrashLog", 809 http.StatusBadRequest, 810 }, 811 { 812 "/text?tag=CrashReport", 813 http.StatusBadRequest, 814 }, 815 { 816 "/text?tag=ReproC", 817 http.StatusBadRequest, 818 }, 819 { 820 "/text?tag=ReproSyz", 821 http.StatusBadRequest, 822 }, 823 } 824 825 c := NewCtx(t) 826 defer c.Close() 827 828 for _, test := range tests { 829 checkResponseStatusCode(c, AccessUser, test.whatURL, test.wantRespCode) 830 } 831 } 832 833 func checkLoginRedirect(c *Ctx, accessLevel AccessLevel, url string) { 834 to, err := user.LoginURL(c.ctx, url) 835 if err != nil { 836 c.t.Fatal(err) 837 } 838 checkRedirect(c, accessLevel, url, to, http.StatusTemporaryRedirect) 839 } 840 841 func checkRedirect(c *Ctx, accessLevel AccessLevel, from, to string, status int) { 842 _, err := c.AuthGET(accessLevel, from) 843 c.expectNE(err, nil) 844 var httpErr *HTTPError 845 c.expectTrue(errors.As(err, &httpErr)) 846 c.expectEQ(httpErr.Code, status) 847 c.expectEQ(httpErr.Headers["Location"], []string{to}) 848 } 849 850 func checkResponseStatusCode(c *Ctx, accessLevel AccessLevel, url string, status int) { 851 _, err := c.AuthGET(accessLevel, url) 852 c.expectNE(err, nil) 853 var httpErr *HTTPError 854 c.expectTrue(errors.As(err, &httpErr)) 855 c.expectEQ(httpErr.Code, status) 856 } 857 858 // Test purging of old crashes for bugs with lots of crashes. 859 func TestPurgeOldCrashes(t *testing.T) { 860 if testing.Short() { 861 t.Skip() 862 } 863 c := NewCtx(t) 864 defer c.Close() 865 866 build := testBuild(1) 867 c.client.UploadBuild(build) 868 869 // First, send 3 crashes that are reported. These need to be preserved regardless. 870 crash := testCrash(build, 1) 871 crash.ReproOpts = []byte("no repro") 872 c.client.ReportCrash(crash) 873 rep := c.client.pollBug() 874 875 crash.ReproSyz = []byte("getpid()") 876 crash.ReproOpts = []byte("syz repro") 877 c.client.ReportCrash(crash) 878 c.client.pollBug() 879 880 crash.ReproC = []byte("int main() {}") 881 crash.ReproOpts = []byte("C repro") 882 c.client.ReportCrash(crash) 883 c.client.pollBug() 884 885 // Now report lots of bugs with/without repros. Some of the older ones should be purged. 886 var totalReported = 3 * maxCrashes() 887 for i := 0; i < totalReported; i++ { 888 c.advanceTime(2 * time.Hour) // This ensures that crashes are saved. 889 crash.ReproSyz = nil 890 crash.ReproC = nil 891 crash.ReproOpts = []byte(fmt.Sprintf("%v", i)) 892 c.client.ReportCrash(crash) 893 894 crash.ReproSyz = []byte("syz repro") 895 crash.ReproC = []byte("C repro") 896 crash.ReproOpts = []byte(fmt.Sprintf("%v", i)) 897 c.client.ReportCrash(crash) 898 } 899 bug, _, _ := c.loadBug(rep.ID) 900 crashes, _, err := queryCrashesForBug(c.ctx, bug.key(c.ctx), 10*totalReported) 901 c.expectOK(err) 902 // First, count how many crashes of different types we have. 903 // We should get all 3 reported crashes + some with repros and some without repros. 904 reported, norepro, repro := 0, 0, 0 905 for _, crash := range crashes { 906 if !crash.Reported.IsZero() { 907 reported++ 908 } else if crash.ReproSyz == 0 { 909 norepro++ 910 } else { 911 repro++ 912 } 913 } 914 c.t.Logf("got reported=%v, norepro=%v, repro=%v, maxCrashes=%v", 915 reported, norepro, repro, maxCrashes()) 916 if reported != 3 || 917 norepro < maxCrashes() || norepro > maxCrashes()+10 || 918 repro < maxCrashes() || repro > maxCrashes()+10 { 919 c.t.Fatalf("bad purged crashes") 920 } 921 // Then, check that latest crashes were preserved. 922 for _, crash := range crashes { 923 if !crash.Reported.IsZero() { 924 continue 925 } 926 idx, err := strconv.Atoi(string(crash.ReproOpts)) 927 c.expectOK(err) 928 count := norepro 929 if crash.ReproSyz != 0 { 930 count = repro 931 } 932 if idx < totalReported-count { 933 c.t.Errorf("preserved bad crash repro=%v: %v", crash.ReproC != 0, idx) 934 } 935 } 936 937 firstCrashExists := func() bool { 938 _, crashKeys, err := queryCrashesForBug(c.ctx, bug.key(c.ctx), 10*totalReported) 939 c.expectOK(err) 940 for _, key := range crashKeys { 941 if key.IntID() == rep.CrashID { 942 return true 943 } 944 } 945 return false 946 } 947 948 // A sanity check for the test itself. 949 if !firstCrashExists() { 950 t.Fatalf("the first reported crash should be present") 951 } 952 953 // Unreport the first crash. 954 reply, _ := c.client.ReportingUpdate(&dashapi.BugUpdate{ 955 ID: rep.ID, 956 Status: dashapi.BugStatusUpdate, 957 ReproLevel: dashapi.ReproLevelC, 958 UnreportCrashIDs: []int64{rep.CrashID}, 959 }) 960 c.expectEQ(reply.OK, true) 961 962 // Trigger more purge events. 963 var moreIterations = maxCrashes() 964 for i := 0; i < moreIterations; i++ { 965 c.advanceTime(2 * time.Hour) // This ensures that crashes are saved. 966 crash.ReproSyz = nil 967 crash.ReproC = nil 968 crash.ReproOpts = []byte(fmt.Sprintf("%v", i)) 969 c.client.ReportCrash(crash) 970 } 971 // Check that the unreported crash was purged. 972 if firstCrashExists() { 973 t.Fatalf("the unreported crash should have been purged") 974 } 975 } 976 977 func TestManagerFailedBuild(t *testing.T) { 978 c := NewCtx(t) 979 defer c.Close() 980 981 // Upload and check first build. 982 build := testBuild(1) 983 c.client.UploadBuild(build) 984 checkManagerBuild(c, build, nil, nil) 985 986 // Upload and check second build. 987 build.ID = "id1" 988 build.KernelCommit = "kern1" 989 build.SyzkallerCommit = "syz1" 990 c.client.UploadBuild(build) 991 checkManagerBuild(c, build, nil, nil) 992 993 // Upload failed kernel build. 994 failedBuild := new(dashapi.Build) 995 *failedBuild = *build 996 failedBuild.ID = "id2" 997 failedBuild.KernelCommit = "kern2" 998 failedBuild.KernelCommitTitle = "failed build 1" 999 failedBuild.SyzkallerCommit = "syz2" 1000 c.expectOK(c.client.ReportBuildError(&dashapi.BuildErrorReq{ 1001 Build: *failedBuild, 1002 Crash: dashapi.Crash{ 1003 Title: "failed build 1", 1004 }, 1005 })) 1006 checkManagerBuild(c, build, failedBuild, nil) 1007 1008 // Now the old good build again, nothing should change. 1009 c.client.UploadBuild(build) 1010 checkManagerBuild(c, build, failedBuild, nil) 1011 1012 // New good kernel build, failed build must reset. 1013 build.ID = "id3" 1014 build.KernelCommit = "kern3" 1015 c.client.UploadBuild(build) 1016 checkManagerBuild(c, build, nil, nil) 1017 1018 // Now more complex scenario: OK -> failed kernel -> failed kernel+syzkaller -> failed syzkaller -> OK. 1019 failedBuild.ID = "id4" 1020 failedBuild.KernelCommit = "kern4" 1021 failedBuild.KernelCommitTitle = "failed build 4" 1022 failedBuild.SyzkallerCommit = "syz4" 1023 c.expectOK(c.client.ReportBuildError(&dashapi.BuildErrorReq{ 1024 Build: *failedBuild, 1025 Crash: dashapi.Crash{ 1026 Title: "failed build 4", 1027 }, 1028 })) 1029 checkManagerBuild(c, build, failedBuild, nil) 1030 1031 failedBuild2 := new(dashapi.Build) 1032 *failedBuild2 = *failedBuild 1033 failedBuild2.ID = "id5" 1034 failedBuild2.KernelCommit = "" 1035 failedBuild2.KernelCommitTitle = "failed build 5" 1036 failedBuild2.SyzkallerCommit = "syz5" 1037 c.expectOK(c.client.ReportBuildError(&dashapi.BuildErrorReq{ 1038 Build: *failedBuild2, 1039 Crash: dashapi.Crash{ 1040 Title: "failed build 5", 1041 }, 1042 })) 1043 checkManagerBuild(c, build, failedBuild, failedBuild2) 1044 1045 build.ID = "id6" 1046 build.KernelCommit = "kern6" 1047 c.client.UploadBuild(build) 1048 checkManagerBuild(c, build, nil, failedBuild2) 1049 1050 build.ID = "id7" 1051 build.KernelCommit = "kern6" 1052 build.SyzkallerCommit = "syz7" 1053 c.client.UploadBuild(build) 1054 checkManagerBuild(c, build, nil, nil) 1055 } 1056 1057 func checkManagerBuild(c *Ctx, build, failedKernelBuild, failedSyzBuild *dashapi.Build) { 1058 mgr, dbBuild := c.loadManager("test1", build.Manager) 1059 c.expectEQ(mgr.CurrentBuild, build.ID) 1060 compareBuilds(c, dbBuild, build) 1061 checkBuildBug(c, mgr.FailedBuildBug, failedKernelBuild) 1062 checkBuildBug(c, mgr.FailedSyzBuildBug, failedSyzBuild) 1063 } 1064 1065 func checkBuildBug(c *Ctx, hash string, build *dashapi.Build) { 1066 if build == nil { 1067 c.expectEQ(hash, "") 1068 return 1069 } 1070 c.expectNE(hash, "") 1071 bug, _, dbBuild := c.loadBugByHash(hash) 1072 c.expectEQ(bug.Title, build.KernelCommitTitle) 1073 compareBuilds(c, dbBuild, build) 1074 } 1075 1076 func compareBuilds(c *Ctx, dbBuild *Build, build *dashapi.Build) { 1077 c.expectEQ(dbBuild.ID, build.ID) 1078 c.expectEQ(dbBuild.KernelCommit, build.KernelCommit) 1079 c.expectEQ(dbBuild.SyzkallerCommit, build.SyzkallerCommit) 1080 } 1081 1082 func TestLinkifyReport(t *testing.T) { 1083 input := ` 1084 tipc_topsrv_stop net/tipc/topsrv.c:694 [inline] 1085 tipc_topsrv_exit_net+0x149/0x340 net/tipc/topsrv.c:715 1086 kernel BUG at fs/ext4/inode.c:2753! 1087 pkg/sentry/fsimpl/fuse/fusefs.go:278 +0x384 1088 kvm_vcpu_release+0x4d/0x70 arch/x86/kvm/../../../virt/kvm/kvm_main.c:3713 1089 arch/x86/entry/entry_64.S:298 1090 [<81751700>] (show_stack) from [<8176d3e0>] (dump_stack_lvl+0x48/0x54 lib/dump_stack.c:106) 1091 ` 1092 // nolint: lll 1093 output := ` 1094 tipc_topsrv_stop <a href='https://github.com/google/syzkaller/blob/111222/net/tipc/topsrv.c#L694'>net/tipc/topsrv.c:694</a> [inline] 1095 tipc_topsrv_exit_net+0x149/0x340 <a href='https://github.com/google/syzkaller/blob/111222/net/tipc/topsrv.c#L715'>net/tipc/topsrv.c:715</a> 1096 kernel BUG at <a href='https://github.com/google/syzkaller/blob/111222/fs/ext4/inode.c#L2753'>fs/ext4/inode.c:2753</a>! 1097 <a href='https://github.com/google/syzkaller/blob/111222/pkg/sentry/fsimpl/fuse/fusefs.go#L278'>pkg/sentry/fsimpl/fuse/fusefs.go:278</a> +0x384 1098 kvm_vcpu_release+0x4d/0x70 <a href='https://github.com/google/syzkaller/blob/111222/arch/x86/kvm/../../../virt/kvm/kvm_main.c#L3713'>arch/x86/kvm/../../../virt/kvm/kvm_main.c:3713</a> 1099 <a href='https://github.com/google/syzkaller/blob/111222/arch/x86/entry/entry_64.S#L298'>arch/x86/entry/entry_64.S:298</a> 1100 [<81751700>] (show_stack) from [<8176d3e0>] (dump_stack_lvl+0x48/0x54 <a href='https://github.com/google/syzkaller/blob/111222/lib/dump_stack.c#L106'>lib/dump_stack.c:106</a>) 1101 ` 1102 got := linkifyReport([]byte(input), "https://github.com/google/syzkaller", "111222") 1103 if diff := cmp.Diff(output, string(got)); diff != "" { 1104 t.Fatal(diff) 1105 } 1106 }