github.com/google/syzkaller@v0.0.0-20251211124644-a066d2bc4b02/dashboard/app/entities_datastore.go (about) 1 // Copyright 2017 syzkaller project authors. All rights reserved. 2 // Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file. 3 4 package main 5 6 import ( 7 "context" 8 "fmt" 9 "regexp" 10 "strconv" 11 "strings" 12 "time" 13 14 "github.com/google/syzkaller/dashboard/dashapi" 15 "github.com/google/syzkaller/pkg/hash" 16 "github.com/google/syzkaller/pkg/subsystem" 17 db "google.golang.org/appengine/v2/datastore" 18 ) 19 20 // This file contains definitions of entities stored in datastore. 21 22 const ( 23 maxTextLen = 200 24 MaxStringLen = 1024 25 26 maxBugHistoryDays = 365 * 5 27 ) 28 29 type Manager struct { 30 Namespace string 31 Name string 32 Link string 33 CurrentBuild string 34 FailedBuildBug string 35 FailedSyzBuildBug string 36 LastAlive time.Time 37 CurrentUpTime time.Duration 38 LastGeneratedJob time.Time 39 } 40 41 // ManagerStats holds per-day manager runtime stats. 42 // Has Manager as parent entity. Keyed by Date. 43 type ManagerStats struct { 44 Date int // YYYYMMDD 45 MaxCorpus int64 46 MaxPCs int64 // coverage 47 MaxCover int64 // what we call feedback signal everywhere else 48 TotalFuzzingTime time.Duration 49 TotalCrashes int64 50 CrashTypes int64 // unique crash types 51 SuppressedCrashes int64 52 TotalExecs int64 53 // These are only recorded once right after corpus is triaged. 54 TriagedCoverage int64 55 TriagedPCs int64 56 } 57 58 type Asset struct { 59 Type dashapi.AssetType 60 DownloadURL string 61 CreateDate time.Time 62 FsckLog int64 // references to fsck logstext entity - 0 if fsck wasn't run 63 FsIsClean bool // undefined value if FsckLog is 0 64 } 65 66 type Build struct { 67 Namespace string 68 Manager string 69 ID string // unique ID generated by syz-ci 70 Type BuildType 71 Time time.Time 72 OS string 73 Arch string 74 VMArch string 75 SyzkallerCommit string 76 SyzkallerCommitDate time.Time 77 CompilerID string 78 KernelRepo string 79 KernelBranch string 80 KernelCommit string 81 KernelCommitTitle string `datastore:",noindex"` 82 KernelCommitDate time.Time `datastore:",noindex"` 83 KernelConfig int64 // reference to KernelConfig text entity 84 Assets []Asset // build-related assets 85 AssetsLastCheck time.Time // the last time we checked the assets for deprecation 86 } 87 88 type Bug struct { 89 Namespace string 90 Seq int64 // sequences of the bug with the same title 91 Title string 92 MergedTitles []string // crash titles that we already merged into this bug 93 AltTitles []string // alternative crash titles that we may merge into this bug 94 Status int 95 StatusReason dashapi.BugStatusReason // e.g. if the bug status is "invalid", here's the reason why 96 DupOf string 97 NumCrashes int64 98 NumRepro int64 99 // ReproLevel is the best ever found repro level for this bug. 100 // HeadReproLevel is best known repro level that still works on the HEAD commit. 101 ReproLevel dashapi.ReproLevel 102 HeadReproLevel dashapi.ReproLevel `datastore:"HeadReproLevel"` 103 BisectCause BisectStatus 104 BisectFix BisectStatus 105 HasReport bool 106 NeedCommitInfo bool 107 FirstTime time.Time 108 LastTime time.Time 109 LastSavedCrash time.Time 110 LastReproTime time.Time 111 LastCauseBisect time.Time 112 FixTime time.Time // when we become aware of the fixing commit 113 LastActivity time.Time // last time we observed any activity related to the bug 114 Closed time.Time 115 SubsystemsTime time.Time // when we have updated subsystems last time 116 SubsystemsRev int 117 Reporting []BugReporting 118 Commits []string // titles of fixing commmits 119 CommitInfo []Commit // additional info for commits (for historical reasons parallel array to Commits) 120 HappenedOn []string // list of managers 121 PatchedOn []string `datastore:",noindex"` // list of managers 122 UNCC []string // don't CC these emails on this bug 123 // Kcidb publishing status bitmask: 124 // bit 0 - the bug is published 125 // bit 1 - don't want to publish it (syzkaller build/test errors) 126 KcidbStatus int64 127 DailyStats []BugDailyStats 128 Labels []BugLabel 129 DiscussionInfo []BugDiscussionInfo 130 TreeTests BugTreeTestInfo 131 // FixCandidateJob holds the key of the latest successful cross-tree fix bisection job. 132 FixCandidateJob string 133 ReproAttempts []BugReproAttempt 134 } 135 136 type BugTreeTestInfo struct { 137 // NeedPoll is set to true if this bug needs to be considered ASAP. 138 NeedPoll bool 139 // NextPoll can be used to delay the next inspection of the bug. 140 NextPoll time.Time 141 // List contains latest data about cross-tree patch tests. 142 List []BugTreeTest 143 } 144 145 type BugTreeTest struct { 146 CrashID int64 147 Repo string 148 Branch string // May be also equal to a commit. 149 // If the values below are set, the testing was done on a merge base. 150 MergeBaseRepo string 151 MergeBaseBranch string 152 // Below are job keys. 153 First string // The first job that finished successfully. 154 FirstOK string 155 FirstCrash string 156 Last string 157 Error string // If some job succeeds afterwards, it should be cleared. 158 Pending string 159 } 160 161 type BugLabelType string 162 163 type BugLabel struct { 164 Label BugLabelType 165 // Either empty (for flags) or contains the value. 166 Value string 167 // The email of the user who manually set this subsystem tag. 168 // If empty, the label was set automatically. 169 SetBy string 170 // Link to the message. 171 Link string 172 } 173 174 func (label BugLabel) String() string { 175 if label.Value == "" { 176 return string(label.Label) 177 } 178 return string(label.Label) + ":" + label.Value 179 } 180 181 // BugReproAttempt describes a single attempt to generate a repro for a bug. 182 type BugReproAttempt struct { 183 Time time.Time 184 Manager string 185 Log int64 186 } 187 188 func (bug *Bug) SetAutoSubsystems(c context.Context, list []*subsystem.Subsystem, now time.Time, rev int) { 189 bug.SubsystemsRev = rev 190 bug.SubsystemsTime = now 191 var objects []BugLabel 192 for _, item := range list { 193 objects = append(objects, BugLabel{Label: SubsystemLabel, Value: item.Name}) 194 } 195 bug.SetLabels(makeLabelSet(c, bug.Namespace), objects) 196 } 197 198 func updateSingleBug(c context.Context, bugKey *db.Key, transform func(*Bug) error) error { 199 tx := func(c context.Context) error { 200 bug := new(Bug) 201 if err := db.Get(c, bugKey, bug); err != nil { 202 return fmt.Errorf("failed to get bug: %w", err) 203 } 204 err := transform(bug) 205 if err != nil { 206 return err 207 } 208 if _, err := db.Put(c, bugKey, bug); err != nil { 209 return fmt.Errorf("failed to put bug: %w", err) 210 } 211 return nil 212 } 213 return runInTransaction(c, tx, nil) 214 } 215 216 func (bug *Bug) hasUserSubsystems() bool { 217 return bug.HasUserLabel(SubsystemLabel) 218 } 219 220 // Initially, subsystem labels were stored as Tags.Subsystems, but over time 221 // it turned out that we'd better store all labels together. 222 // Let's keep this conversion code until "Tags" are removed from all bugs. 223 // Then it can be removed. 224 225 type Bug202304 struct { 226 Tags BugTags202304 227 } 228 229 type BugTags202304 struct { 230 Subsystems []BugTag202304 231 } 232 233 type BugTag202304 struct { 234 Name string 235 SetBy string 236 } 237 238 func (bug *Bug) Load(origProps []db.Property) error { 239 // First filer out Tag properties. 240 var tags, ps []db.Property 241 for _, p := range origProps { 242 if strings.HasPrefix(p.Name, "Tags.") { 243 tags = append(tags, p) 244 } else { 245 ps = append(ps, p) 246 } 247 } 248 if err := db.LoadStruct(bug, ps); err != nil { 249 return err 250 } 251 if len(tags) > 0 { 252 old := Bug202304{} 253 if err := db.LoadStruct(&old, tags); err != nil { 254 return err 255 } 256 for _, entry := range old.Tags.Subsystems { 257 bug.Labels = append(bug.Labels, BugLabel{ 258 Label: SubsystemLabel, 259 SetBy: entry.SetBy, 260 Value: entry.Name, 261 }) 262 } 263 } 264 headReproFound := false 265 for _, p := range ps { 266 if p.Name == "HeadReproLevel" { 267 headReproFound = true 268 break 269 } 270 } 271 if !headReproFound { 272 // The field is new, so it won't be set in all entities. 273 // Assume it to be equal to the best found repro for the bug. 274 bug.HeadReproLevel = bug.ReproLevel 275 } 276 return nil 277 } 278 279 func (bug *Bug) Save() ([]db.Property, error) { 280 return db.SaveStruct(bug) 281 } 282 283 type BugDailyStats struct { 284 Date int // YYYYMMDD 285 CrashCount int 286 } 287 288 type Commit struct { 289 Hash string 290 Title string 291 Author string 292 AuthorName string 293 CC string `datastore:",noindex"` // (|-delimited list) 294 Date time.Time 295 } 296 297 func (com Commit) toDashapi() *dashapi.Commit { 298 return &dashapi.Commit{ 299 Hash: com.Hash, 300 Title: com.Title, 301 Author: com.Author, 302 AuthorName: com.AuthorName, 303 Date: com.Date, 304 } 305 } 306 307 type BugDiscussionInfo struct { 308 Source string 309 Summary DiscussionSummary 310 } 311 312 type DiscussionSummary struct { 313 AllMessages int 314 ExternalMessages int 315 LastMessage time.Time 316 LastPatchMessage time.Time 317 } 318 319 type BugReporting struct { 320 Name string // refers to Reporting.Name 321 ID string // unique ID per BUG/BugReporting used in commucation with external systems 322 ExtID string // arbitrary reporting ID that is passed back in dashapi.BugReport 323 Link string 324 CC string // additional emails added to CC list (|-delimited list) 325 CrashID int64 // crash that we've last reported in this reporting 326 Auto bool // was it auto-upstreamed/obsoleted? 327 // If Dummy is true, the corresponding Reporting stage was introduced later and the object was just 328 // inserted to preserve consistency across the system. Even though it's indicated as Closed and Reported, 329 // it never actually was. 330 Dummy bool 331 ReproLevel dashapi.ReproLevel // may be less then bug.ReproLevel if repro arrived but we didn't report it yet 332 Labels string // a comma-separated string of already reported labels 333 OnHold time.Time // if set, the bug must not be upstreamed 334 Reported time.Time 335 Closed time.Time 336 } 337 338 func (r *BugReporting) GetLabels() []string { 339 return strings.Split(r.Labels, ",") 340 } 341 342 func (r *BugReporting) AddLabel(label string) { 343 newList := unique(append(r.GetLabels(), label)) 344 r.Labels = strings.Join(newList, ",") 345 } 346 347 type Crash struct { 348 // May be different from bug.Title due to AltTitles. 349 // May be empty for old bugs, in such case bug.Title is the right title. 350 Title string 351 Manager string 352 BuildID string 353 Time time.Time 354 Reported time.Time // set if this crash was ever reported 355 References []CrashReference 356 Maintainers []string `datastore:",noindex"` 357 Log int64 // reference to CrashLog text entity 358 Flags int64 // properties of the Crash 359 Report int64 // reference to CrashReport text entity 360 ReportElements CrashReportElements // parsed parts of the crash report 361 ReproOpts []byte `datastore:",noindex"` 362 ReproSyz int64 // reference to ReproSyz text entity 363 ReproC int64 // reference to ReproC text entity 364 ReproIsRevoked bool // the repro no longer triggers the bug on HEAD 365 ReproLog int64 // reference to ReproLog text entity 366 LastReproRetest time.Time // the last time when the repro was re-checked 367 MachineInfo int64 // Reference to MachineInfo text entity. 368 // Custom crash priority for reporting (greater values are higher priority). 369 // For example, a crash in mainline kernel has higher priority than a crash in a side branch. 370 // For historical reasons this is called ReportLen. 371 ReportLen int64 372 Assets []Asset // crash-related assets 373 AssetsLastCheck time.Time // the last time we checked the assets for deprecation 374 } 375 376 type CrashReportElements struct { 377 GuiltyFiles []string // guilty files as determined during the crash report parsing 378 } 379 380 type CrashReferenceType string 381 382 const ( 383 CrashReferenceReporting = "reporting" 384 CrashReferenceJob = "job" 385 // This one is needed for backward compatibility. 386 crashReferenceUnknown = "unknown" 387 ) 388 389 type CrashReference struct { 390 Type CrashReferenceType 391 // For CrashReferenceReporting, it refers to Reporting.Name 392 // For CrashReferenceJob, it refers to extJobID(jobKey) 393 Key string 394 Time time.Time 395 } 396 397 func (crash *Crash) AddReference(newRef CrashReference) { 398 crash.Reported = newRef.Time 399 for i, ref := range crash.References { 400 if ref.Type != newRef.Type || ref.Key != newRef.Key { 401 continue 402 } 403 crash.References[i].Time = newRef.Time 404 return 405 } 406 crash.References = append(crash.References, newRef) 407 } 408 409 func (crash *Crash) ClearReference(t CrashReferenceType, key string) { 410 newRefs := []CrashReference{} 411 crash.Reported = time.Time{} 412 for _, ref := range crash.References { 413 if ref.Type == t && ref.Key == key { 414 continue 415 } 416 if ref.Time.After(crash.Reported) { 417 crash.Reported = ref.Time 418 } 419 newRefs = append(newRefs, ref) 420 } 421 crash.References = newRefs 422 } 423 424 func (crash *Crash) Load(ps []db.Property) error { 425 if err := db.LoadStruct(crash, ps); err != nil { 426 return err 427 } 428 // Earlier we only relied on Reported, which does not let us reliably unreport a crash. 429 // We need some means of ref counting, so let's create a dummy reference to keep the 430 // crash from being purged. 431 if !crash.Reported.IsZero() && len(crash.References) == 0 { 432 crash.References = append(crash.References, CrashReference{ 433 Type: crashReferenceUnknown, 434 Time: crash.Reported, 435 }) 436 } 437 return nil 438 } 439 440 func (crash *Crash) Save() ([]db.Property, error) { 441 return db.SaveStruct(crash) 442 } 443 444 type Discussion struct { 445 ID string // the base message ID 446 Source string 447 Type string 448 Subject string 449 BugKeys []string 450 // Message contains last N messages. 451 // N is supposed to be big enough, so that in almost all cases 452 // AllMessages == len(Messages) holds true. 453 Messages []DiscussionMessage 454 // Since Messages could be trimmed, we have to keep aggregate stats. 455 Summary DiscussionSummary 456 } 457 458 func discussionKey(c context.Context, source, id string) *db.Key { 459 return db.NewKey(c, "Discussion", fmt.Sprintf("%v-%v", source, id), 0, nil) 460 } 461 462 func (d *Discussion) key(c context.Context) *db.Key { 463 return discussionKey(c, d.Source, d.ID) 464 } 465 466 type DiscussionMessage struct { 467 ID string 468 // External is true if the message is not from the bot itself. 469 // Let's use a shorter name to save space. 470 External bool `datastore:"e"` 471 Time time.Time `datastore:",noindex"` 472 } 473 474 // ReportingState holds dynamic info associated with reporting. 475 type ReportingState struct { 476 Entries []ReportingStateEntry 477 } 478 479 type ReportingStateEntry struct { 480 Namespace string 481 Name string 482 // Current reporting quota consumption. 483 Sent int 484 Date int // YYYYMMDD 485 } 486 487 // Subsystem holds the history of grouped per-subsystem open bug reminders. 488 type Subsystem struct { 489 Namespace string 490 Name string 491 // ListsQueried is the last time bug lists were queried for the subsystem. 492 ListsQueried time.Time 493 // LastBugList is the last time we have actually managed to generate a bug list. 494 LastBugList time.Time 495 } 496 497 // SubsystemReport holds a single report about open bugs in a subsystem. 498 // There'll be one record for moderation (if it's needed) and one for actual reporting. 499 type SubsystemReport struct { 500 Created time.Time 501 BugKeys []string `datastore:",noindex"` 502 TotalStats SubsystemReportStats 503 PeriodStats SubsystemReportStats 504 Stages []SubsystemReportStage 505 } 506 507 func (r *SubsystemReport) getBugKeys() ([]*db.Key, error) { 508 ret := []*db.Key{} 509 for _, encoded := range r.BugKeys { 510 key, err := db.DecodeKey(encoded) 511 if err != nil { 512 return nil, fmt.Errorf("failed to parse %#v: %w", encoded, err) 513 } 514 ret = append(ret, key) 515 } 516 return ret, nil 517 } 518 519 func (r *SubsystemReport) findStage(id string) *SubsystemReportStage { 520 for j := range r.Stages { 521 stage := &r.Stages[j] 522 if stage.ID == id { 523 return stage 524 } 525 } 526 return nil 527 } 528 529 type SubsystemReportStats struct { 530 Reported int 531 LowPrio int 532 Fixed int 533 } 534 535 func (s *SubsystemReportStats) toDashapi() dashapi.BugListReportStats { 536 return dashapi.BugListReportStats{ 537 Reported: s.Reported, 538 LowPrio: s.LowPrio, 539 Fixed: s.Fixed, 540 } 541 } 542 543 // There can be at most two stages. 544 // One has Moderation=true, the other one has Moderation=false. 545 type SubsystemReportStage struct { 546 ID string 547 ExtID string 548 Link string 549 Reported time.Time 550 Closed time.Time 551 Moderation bool 552 } 553 554 // Job represent a single patch testing or bisection job for syz-ci. 555 // Later we may want to extend this to other types of jobs: 556 // - test of a committed fix 557 // - reproduce crash 558 // - test that crash still happens on HEAD 559 // 560 // Job has Bug as parent entity. 561 type Job struct { 562 Type JobType 563 Created time.Time 564 User string 565 CC []string 566 Reporting string 567 ExtID string // email Message-ID 568 Link string // web link for the job (e.g. email in the group) 569 Namespace string 570 Manager string 571 BugTitle string 572 CrashID int64 573 574 // Provided by user: 575 KernelRepo string 576 KernelBranch string 577 Patch int64 // reference to Patch text entity 578 KernelConfig int64 // reference to the kernel config entity 579 580 Attempts int // number of times we tried to execute this job 581 IsRunning bool // the job might have been started, but never finished 582 LastStarted time.Time `datastore:"Started"` 583 Finished time.Time // if set, job is finished 584 TreeOrigin bool // whether the job is related to tree origin detection 585 586 // If patch test should be done on the merge base between two branches. 587 MergeBaseRepo string 588 MergeBaseBranch string 589 590 // By default, bisection starts from the revision of the associated crash. 591 // The BisectFrom field can override this. 592 BisectFrom string 593 594 // Result of execution: 595 CrashTitle string // if empty, we did not hit crash during testing 596 CrashLog int64 // reference to CrashLog text entity 597 CrashReport int64 // reference to CrashReport text entity 598 Commits []Commit 599 BuildID string 600 Log int64 // reference to Log text entity 601 Error int64 // reference to Error text entity, if set job failed 602 Flags dashapi.JobDoneFlags 603 604 Reported bool // have we reported result back to user? 605 InvalidatedBy string // user who marked this bug as invalid, empty by default 606 BackportedCommit Commit 607 } 608 609 func (job *Job) IsBisection() bool { 610 return job.Type == JobBisectCause || job.Type == JobBisectFix 611 } 612 613 func (job *Job) IsFinished() bool { 614 return !job.Finished.IsZero() 615 } 616 617 type JobType int 618 619 const ( 620 JobTestPatch JobType = iota 621 JobBisectCause 622 JobBisectFix 623 ) 624 625 func (typ JobType) toDashapiReportType() dashapi.ReportType { 626 switch typ { 627 case JobTestPatch: 628 return dashapi.ReportTestPatch 629 case JobBisectCause: 630 return dashapi.ReportBisectCause 631 case JobBisectFix: 632 return dashapi.ReportBisectFix 633 default: 634 panic(fmt.Sprintf("unknown job type %v", typ)) 635 } 636 } 637 638 func (job *Job) isUnreliableBisect() bool { 639 if job.Type != JobBisectCause && job.Type != JobBisectFix { 640 panic(fmt.Sprintf("bad job type %v", job.Type)) 641 } 642 // If a bisection points to a merge or a commit that does not affect the kernel binary, 643 // it is considered an unreliable/wrong result and should not be reported in emails. 644 return job.Flags&dashapi.BisectResultMerge != 0 || 645 job.Flags&dashapi.BisectResultNoop != 0 || 646 job.Flags&dashapi.BisectResultRelease != 0 || 647 job.Flags&dashapi.BisectResultIgnore != 0 648 } 649 650 func (job *Job) IsCrossTree() bool { 651 return job.MergeBaseRepo != "" && job.IsBisection() 652 } 653 654 // Text holds text blobs (crash logs, reports, reproducers, etc). 655 type Text struct { 656 Namespace string 657 Text []byte `datastore:",noindex"` // gzip-compressed text 658 } 659 660 const ( 661 textCrashLog = "CrashLog" 662 textCrashReport = "CrashReport" 663 textReproSyz = "ReproSyz" 664 textReproC = "ReproC" 665 textMachineInfo = "MachineInfo" 666 textKernelConfig = "KernelConfig" 667 textPatch = "Patch" 668 textLog = "Log" 669 textError = "Error" 670 textReproLog = "ReproLog" 671 textFsckLog = "FsckLog" 672 ) 673 674 const ( 675 BugStatusOpen = iota 676 ) 677 678 const ( 679 BugStatusFixed = 1000 + iota 680 BugStatusInvalid 681 BugStatusDup 682 ) 683 684 const ( 685 ReproLevelNone = dashapi.ReproLevelNone 686 ReproLevelSyz = dashapi.ReproLevelSyz 687 ReproLevelC = dashapi.ReproLevelC 688 ) 689 690 type BuildType int 691 692 const ( 693 BuildNormal BuildType = iota 694 BuildFailed 695 BuildJob 696 ) 697 698 type BisectStatus int 699 700 const ( 701 BisectNot BisectStatus = iota 702 BisectPending 703 BisectError 704 BisectYes // have 1 commit 705 BisectUnreliable // have 1 commit, but suspect it's wrong 706 BisectInconclusive // multiple commits due to skips 707 BisectHorizont // happens on the oldest commit we can test (or HEAD for fix bisection) 708 bisectStatusLast // this value can be changed (not stored in datastore) 709 ) 710 711 func (status BisectStatus) String() string { 712 switch status { 713 case BisectError: 714 return "error" 715 case BisectYes: 716 return "done" 717 case BisectUnreliable: 718 return "unreliable" 719 case BisectInconclusive: 720 return "inconclusive" 721 case BisectHorizont: 722 return "inconclusive" 723 default: 724 return "" 725 } 726 } 727 728 // ReproTask is a manually requested reproduction attempt. 729 type ReproTask struct { 730 Namespace string 731 Manager string 732 Log int64 // Reference to CrashLog text entity. 733 AttemptsLeft int64 734 LastAttempt time.Time 735 } 736 737 func mgrKey(c context.Context, ns, name string) *db.Key { 738 return db.NewKey(c, "Manager", fmt.Sprintf("%v-%v", ns, name), 0, nil) 739 } 740 741 func (mgr *Manager) key(c context.Context) *db.Key { 742 return mgrKey(c, mgr.Namespace, mgr.Name) 743 } 744 745 func loadManager(c context.Context, ns, name string) (*Manager, error) { 746 mgr := new(Manager) 747 if err := db.Get(c, mgrKey(c, ns, name), mgr); err != nil { 748 if err != db.ErrNoSuchEntity { 749 return nil, fmt.Errorf("failed to get manager %v/%v: %w", ns, name, err) 750 } 751 mgr = &Manager{ 752 Namespace: ns, 753 Name: name, 754 } 755 } 756 return mgr, nil 757 } 758 759 // updateManager does transactional compare-and-swap on the manager and its current stats. 760 func updateManager(c context.Context, ns, name string, fn func(mgr *Manager, stats *ManagerStats) error) error { 761 date := timeDate(timeNow(c)) 762 tx := func(c context.Context) error { 763 mgr, err := loadManager(c, ns, name) 764 if err != nil { 765 return err 766 } 767 mgrKey := mgr.key(c) 768 stats := new(ManagerStats) 769 statsKey := db.NewKey(c, "ManagerStats", "", int64(date), mgrKey) 770 if err := db.Get(c, statsKey, stats); err != nil { 771 if err != db.ErrNoSuchEntity { 772 return fmt.Errorf("failed to get stats %v/%v/%v: %w", ns, name, date, err) 773 } 774 stats = &ManagerStats{ 775 Date: date, 776 } 777 } 778 779 if err := fn(mgr, stats); err != nil { 780 return err 781 } 782 783 if _, err := db.Put(c, mgrKey, mgr); err != nil { 784 return fmt.Errorf("failed to put manager: %w", err) 785 } 786 if _, err := db.Put(c, statsKey, stats); err != nil { 787 return fmt.Errorf("failed to put manager stats: %w", err) 788 } 789 return nil 790 } 791 return runInTransaction(c, tx, nil) 792 } 793 794 func loadAllManagers(c context.Context, ns string) ([]*Manager, []*db.Key, error) { 795 var managers []*Manager 796 query := db.NewQuery("Manager") 797 if ns != "" { 798 query = query.Filter("Namespace=", ns) 799 } 800 keys, err := query.GetAll(c, &managers) 801 if err != nil { 802 return nil, nil, fmt.Errorf("failed to query managers: %w", err) 803 } 804 var result []*Manager 805 var resultKeys []*db.Key 806 for i, mgr := range managers { 807 if getNsConfig(c, mgr.Namespace).Managers[mgr.Name].Decommissioned { 808 continue 809 } 810 result = append(result, mgr) 811 resultKeys = append(resultKeys, keys[i]) 812 } 813 return result, resultKeys, nil 814 } 815 816 func buildKey(c context.Context, ns, id string) *db.Key { 817 if ns == "" { 818 panic("requesting build key outside of namespace") 819 } 820 h := hash.String([]byte(fmt.Sprintf("%v-%v", ns, id))) 821 return db.NewKey(c, "Build", h, 0, nil) 822 } 823 824 func loadBuild(c context.Context, ns, id string) (*Build, error) { 825 build := new(Build) 826 if err := db.Get(c, buildKey(c, ns, id), build); err != nil { 827 if err == db.ErrNoSuchEntity { 828 return nil, fmt.Errorf("unknown build %v/%v", ns, id) 829 } 830 return nil, fmt.Errorf("failed to get build %v/%v: %w", ns, id, err) 831 } 832 return build, nil 833 } 834 835 func lastManagerBuild(c context.Context, ns, manager string) (*Build, error) { 836 mgr, err := loadManager(c, ns, manager) 837 if err != nil { 838 return nil, err 839 } 840 if mgr.CurrentBuild == "" { 841 return nil, fmt.Errorf("failed to fetch manager build: no builds") 842 } 843 return loadBuild(c, ns, mgr.CurrentBuild) 844 } 845 846 func loadBuilds(c context.Context, ns, manager string, typ BuildType) ([]*Build, error) { 847 const limit = 1000 848 var builds []*Build 849 _, err := db.NewQuery("Build"). 850 Filter("Namespace=", ns). 851 Filter("Manager=", manager). 852 Filter("Type=", typ). 853 Order("-Time"). 854 Limit(limit). 855 GetAll(c, &builds) 856 if err != nil { 857 return nil, err 858 } 859 return builds, nil 860 } 861 862 func (bug *Bug) displayTitle() string { 863 if bug.Seq == 0 { 864 return bug.Title 865 } 866 return fmt.Sprintf("%v (%v)", bug.Title, bug.Seq+1) 867 } 868 869 var displayTitleRe = regexp.MustCompile(`^(.*) \(([0-9]+)\)$`) 870 871 func splitDisplayTitle(display string) (string, int64, error) { 872 match := displayTitleRe.FindStringSubmatchIndex(display) 873 if match == nil { 874 return display, 0, nil 875 } 876 title := display[match[2]:match[3]] 877 seqStr := display[match[4]:match[5]] 878 seq, err := strconv.ParseInt(seqStr, 10, 64) 879 if err != nil { 880 return "", 0, fmt.Errorf("failed to parse bug title: %w", err) 881 } 882 if seq <= 0 || seq > 1e6 { 883 return "", 0, fmt.Errorf("failed to parse bug title: seq=%v", seq) 884 } 885 return title, seq - 1, nil 886 } 887 888 func canonicalBug(c context.Context, bug *Bug) (*Bug, error) { 889 for { 890 if bug.Status != BugStatusDup { 891 return bug, nil 892 } 893 canon := new(Bug) 894 bugKey := db.NewKey(c, "Bug", bug.DupOf, 0, nil) 895 if err := db.Get(c, bugKey, canon); err != nil { 896 return nil, fmt.Errorf("failed to get dup bug %q for %q: %w", 897 bug.DupOf, bug.keyHash(c), err) 898 } 899 bug = canon 900 } 901 } 902 903 func (bug *Bug) key(c context.Context) *db.Key { 904 return db.NewKey(c, "Bug", bug.keyHash(c), 0, nil) 905 } 906 907 func (bug *Bug) keyHash(c context.Context) string { 908 return bugKeyHash(c, bug.Namespace, bug.Title, bug.Seq) 909 } 910 911 func bugKeyHash(c context.Context, ns, title string, seq int64) string { 912 return hash.String([]byte(fmt.Sprintf("%v-%v-%v-%v", getNsConfig(c, ns).Key, ns, title, seq))) 913 } 914 915 func loadSimilarBugs(c context.Context, bug *Bug) ([]*Bug, error) { 916 domain := getNsConfig(c, bug.Namespace).SimilarityDomain 917 dedup := make(map[string]bool) 918 dedup[bug.keyHash(c)] = true 919 920 ret := []*Bug{} 921 for _, title := range bug.AltTitles { 922 var similar []*Bug 923 _, err := db.NewQuery("Bug"). 924 Filter("AltTitles=", title). 925 GetAll(c, &similar) 926 if err != nil { 927 return nil, err 928 } 929 for _, bug := range similar { 930 if getNsConfig(c, bug.Namespace).SimilarityDomain != domain || 931 dedup[bug.keyHash(c)] { 932 continue 933 } 934 dedup[bug.keyHash(c)] = true 935 ret = append(ret, bug) 936 } 937 } 938 return ret, nil 939 } 940 941 // Since these IDs appear in Reported-by tags in commit, we slightly limit their size. 942 const reportingHashLen = 20 943 944 func bugReportingHash(bugHash, reporting string) string { 945 return hash.String([]byte(fmt.Sprintf("%v-%v", bugHash, reporting)))[:reportingHashLen] 946 } 947 948 func looksLikeReportingHash(id string) bool { 949 // This is only used as best-effort check. 950 // Now we produce 20-chars ids, but we used to use full sha1 hash. 951 return len(id) == reportingHashLen || len(id) == 2*len(hash.Sig{}) 952 } 953 954 func (bug *Bug) updateCommits(commits []string, now time.Time) { 955 bug.Commits = commits 956 bug.CommitInfo = nil 957 bug.NeedCommitInfo = true 958 bug.FixTime = now 959 bug.PatchedOn = nil 960 } 961 962 func (bug *Bug) getCommitInfo(i int) Commit { 963 if i < len(bug.CommitInfo) { 964 return bug.CommitInfo[i] 965 } 966 return Commit{} 967 } 968 969 func (bug *Bug) increaseCrashStats(now time.Time) { 970 bug.NumCrashes++ 971 date := timeDate(now) 972 if len(bug.DailyStats) == 0 || bug.DailyStats[len(bug.DailyStats)-1].Date < date { 973 bug.DailyStats = append(bug.DailyStats, BugDailyStats{date, 1}) 974 } else { 975 // It is theoretically possible that this method might get into a situation, when 976 // the latest saved date is later than now. But we assume that this can only happen 977 // in a small window around the start of the day and it is better to attribute a 978 // crash to the next day than to get a mismatch between NumCrashes and the sum of 979 // CrashCount. 980 bug.DailyStats[len(bug.DailyStats)-1].CrashCount++ 981 } 982 983 if len(bug.DailyStats) > maxBugHistoryDays { 984 bug.DailyStats = bug.DailyStats[len(bug.DailyStats)-maxBugHistoryDays:] 985 } 986 } 987 988 func (bug *Bug) dailyStatsTail(from time.Time) []BugDailyStats { 989 startDate := timeDate(from) 990 startPos := len(bug.DailyStats) 991 for ; startPos > 0; startPos-- { 992 if bug.DailyStats[startPos-1].Date < startDate { 993 break 994 } 995 } 996 return bug.DailyStats[startPos:] 997 } 998 999 func (bug *Bug) dashapiStatus() (dashapi.BugStatus, error) { 1000 var status dashapi.BugStatus 1001 switch bug.Status { 1002 case BugStatusOpen: 1003 status = dashapi.BugStatusOpen 1004 case BugStatusFixed: 1005 status = dashapi.BugStatusFixed 1006 case BugStatusInvalid: 1007 status = dashapi.BugStatusInvalid 1008 case BugStatusDup: 1009 status = dashapi.BugStatusDup 1010 default: 1011 return status, fmt.Errorf("unknown bugs status %v", bug.Status) 1012 } 1013 return status, nil 1014 } 1015 1016 // If an entity of type EmergencyStop exists, syzbot's operation is paused until 1017 // a support engineer deletes it from the DB. 1018 type EmergencyStop struct { 1019 Time time.Time 1020 User string 1021 } 1022 1023 func addCrashReference(c context.Context, crashID int64, bugKey *db.Key, ref CrashReference) error { 1024 crash := new(Crash) 1025 crashKey := db.NewKey(c, "Crash", "", crashID, bugKey) 1026 if err := db.Get(c, crashKey, crash); err != nil { 1027 return fmt.Errorf("failed to get reported crash %v: %w", crashID, err) 1028 } 1029 crash.AddReference(ref) 1030 if _, err := db.Put(c, crashKey, crash); err != nil { 1031 return fmt.Errorf("failed to put reported crash %v: %w", crashID, err) 1032 } 1033 return nil 1034 } 1035 1036 func removeCrashReference(c context.Context, crashID int64, bugKey *db.Key, 1037 t CrashReferenceType, key string) error { 1038 crash := new(Crash) 1039 crashKey := db.NewKey(c, "Crash", "", crashID, bugKey) 1040 if err := db.Get(c, crashKey, crash); err != nil { 1041 return fmt.Errorf("failed to get reported crash %v: %w", crashID, err) 1042 } 1043 crash.ClearReference(t, key) 1044 if _, err := db.Put(c, crashKey, crash); err != nil { 1045 return fmt.Errorf("failed to put reported crash %v: %w", crashID, err) 1046 } 1047 return nil 1048 } 1049 1050 func kernelRepoInfo(c context.Context, build *Build) KernelRepo { 1051 return kernelRepoInfoRaw(c, build.Namespace, build.KernelRepo, build.KernelBranch) 1052 } 1053 1054 func kernelRepoInfoRaw(c context.Context, ns, url, branch string) KernelRepo { 1055 var info KernelRepo 1056 for _, repo := range getNsConfig(c, ns).Repos { 1057 if repo.URL == url && repo.Branch == branch { 1058 info = repo 1059 break 1060 } 1061 } 1062 if info.Alias == "" { 1063 info.Alias = url 1064 if branch != "" { 1065 info.Alias += " " + branch 1066 } 1067 } 1068 return info 1069 } 1070 1071 func textLink(tag string, id int64) string { 1072 if id == 0 { 1073 return "" 1074 } 1075 return fmt.Sprintf("/text?tag=%v&x=%v", tag, strconv.FormatUint(uint64(id), 16)) 1076 } 1077 1078 // timeDate returns t's date as a single int YYYYMMDD. 1079 func timeDate(t time.Time) int { 1080 year, month, day := t.Date() 1081 return year*10000 + int(month)*100 + day 1082 } 1083 1084 func stringInList(list []string, str string) bool { 1085 for _, s := range list { 1086 if s == str { 1087 return true 1088 } 1089 } 1090 return false 1091 } 1092 1093 func stringListsIntersect(a, b []string) bool { 1094 m := map[string]bool{} 1095 for _, strA := range a { 1096 m[strA] = true 1097 } 1098 for _, strB := range b { 1099 if m[strB] { 1100 return true 1101 } 1102 } 1103 return false 1104 } 1105 1106 func mergeString(list []string, str string) []string { 1107 if !stringInList(list, str) { 1108 list = append(list, str) 1109 } 1110 return list 1111 } 1112 1113 func mergeStringList(list, add []string) []string { 1114 for _, str := range add { 1115 list = mergeString(list, str) 1116 } 1117 return list 1118 } 1119 1120 // dateTime converts date in YYYYMMDD format back to Time. 1121 func dateTime(date int) time.Time { 1122 return time.Date(date/10000, time.Month(date/100%100), date%100, 0, 0, 0, 0, time.UTC) 1123 } 1124 1125 // dependencyLoader encapsulates the repetitive logic of mass loading referenced entities. 1126 type dependencyLoader[T any] struct { 1127 keys []*db.Key 1128 callbacks []func(*T) 1129 } 1130 1131 func (dl *dependencyLoader[T]) add(key *db.Key, upd func(*T)) { 1132 dl.keys = append(dl.keys, key) 1133 dl.callbacks = append(dl.callbacks, upd) 1134 } 1135 1136 func (dl *dependencyLoader[T]) load(c context.Context) error { 1137 type info struct { 1138 key *db.Key 1139 cbs []func(*T) 1140 } 1141 unique := map[string]*info{} 1142 for i, key := range dl.keys { 1143 str := key.String() 1144 val := unique[str] 1145 if val == nil { 1146 val = &info{key: key} 1147 unique[str] = val 1148 } 1149 val.cbs = append(val.cbs, dl.callbacks[i]) 1150 } 1151 if len(unique) == 0 { 1152 return nil 1153 } 1154 1155 var keys []*db.Key 1156 var infos []*info 1157 for _, info := range unique { 1158 keys = append(keys, info.key) 1159 infos = append(infos, info) 1160 } 1161 objects := make([]*T, len(keys)) 1162 if badKey, err := getAllMulti(c, keys, objects); err != nil { 1163 return fmt.Errorf("%v: %w", badKey, err) 1164 } 1165 for i := range keys { 1166 info := infos[i] 1167 for _, cb := range info.cbs { 1168 cb(objects[i]) 1169 } 1170 } 1171 return nil 1172 } 1173 1174 type txFunc func(tc context.Context) error 1175 1176 // runInTransaction is a wrapper around db.RunInTransaction, 1177 // with the common number of attempts. 1178 func runInTransaction(c context.Context, tx txFunc, opts *db.TransactionOptions) error { 1179 if opts == nil { 1180 opts = &db.TransactionOptions{} 1181 } 1182 if opts.Attempts == 0 { 1183 opts.Attempts = 10 1184 } 1185 return db.RunInTransaction(c, tx, opts) 1186 }