github.com/google/syzkaller@v0.0.0-20251211124644-a066d2bc4b02/dashboard/app/api.go (about) 1 // Copyright 2017 syzkaller project authors. All rights reserved. 2 // Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file. 3 4 package main 5 6 import ( 7 "bytes" 8 "compress/gzip" 9 "context" 10 "crypto/subtle" 11 "encoding/json" 12 "errors" 13 "fmt" 14 "io" 15 "math/rand" 16 "net/http" 17 "net/mail" 18 "net/url" 19 "reflect" 20 "regexp" 21 "sort" 22 "strings" 23 "time" 24 "unicode/utf8" 25 26 "github.com/google/syzkaller/dashboard/dashapi" 27 "github.com/google/syzkaller/pkg/asset" 28 "github.com/google/syzkaller/pkg/auth" 29 "github.com/google/syzkaller/pkg/coveragedb" 30 "github.com/google/syzkaller/pkg/debugtracer" 31 "github.com/google/syzkaller/pkg/email" 32 "github.com/google/syzkaller/pkg/gcs" 33 "github.com/google/syzkaller/pkg/hash" 34 "github.com/google/syzkaller/pkg/subsystem" 35 "github.com/google/syzkaller/sys/targets" 36 "github.com/google/uuid" 37 "google.golang.org/appengine/v2" 38 db "google.golang.org/appengine/v2/datastore" 39 "google.golang.org/appengine/v2/log" 40 aemail "google.golang.org/appengine/v2/mail" 41 "google.golang.org/appengine/v2/user" 42 ) 43 44 func initAPIHandlers() { 45 http.Handle("/api", handleJSON(handleAPI)) 46 } 47 48 var apiHandlers = map[string]APIHandler{ 49 "log_error": apiLogError, 50 "job_poll": apiJobPoll, 51 "job_reset": apiJobReset, 52 "job_done": apiJobDone, 53 "reporting_poll_bugs": apiReportingPollBugs, 54 "reporting_poll_notifs": apiReportingPollNotifications, 55 "reporting_poll_closed": apiReportingPollClosed, 56 "reporting_update": apiReportingUpdate, 57 "new_test_job": apiNewTestJob, 58 "needed_assets": apiNeededAssetsList, 59 "load_full_bug": apiLoadFullBug, 60 "save_discussion": apiSaveDiscussion, 61 "create_upload_url": apiCreateUploadURL, 62 "send_email": apiSendEmail, 63 "save_coverage": gcsPayloadHandler(apiSaveCoverage), 64 "upload_build": nsHandler(apiUploadBuild), 65 "builder_poll": nsHandler(apiBuilderPoll), 66 "report_build_error": nsHandler(apiReportBuildError), 67 "report_crash": nsHandler(apiReportCrash), 68 "report_failed_repro": nsHandler(apiReportFailedRepro), 69 "need_repro": nsHandler(apiNeedRepro), 70 "manager_stats": nsHandler(apiManagerStats), 71 "commit_poll": nsHandler(apiCommitPoll), 72 "upload_commits": nsHandler(apiUploadCommits), 73 "bug_list": nsHandler(apiBugList), 74 "load_bug": nsHandler(apiLoadBug), 75 "update_report": nsHandler(apiUpdateReport), 76 "add_build_assets": nsHandler(apiAddBuildAssets), 77 "log_to_repro": nsHandler(apiLogToReproduce), 78 } 79 80 type JSONHandler func(c context.Context, r *http.Request) (interface{}, error) 81 type APIHandler func(c context.Context, payload io.Reader) (interface{}, error) 82 type APINamespaceHandler func(c context.Context, ns string, payload io.Reader) (interface{}, error) 83 84 const ( 85 maxReproPerBug = 10 86 reproRetryPeriod = 24 * time.Hour // try 1 repro per day until we have at least syz repro 87 // Attempt a new repro every ~ 3 months, even if we have already found it for the bug. This should: 88 // 1) Improve old repros over time (as we update descriptions / change syntax / repro algorithms). 89 // 2) Constrain the impact of bugs in syzkaller's backward compatibility. Fewer old repros, fewer problems. 90 reproStalePeriod = 100 * 24 * time.Hour 91 ) 92 93 // Overridable for testing. 94 var timeNow = func(c context.Context) time.Time { 95 return time.Now() 96 } 97 98 func timeSince(c context.Context, t time.Time) time.Duration { 99 return timeNow(c).Sub(t) 100 } 101 102 var maxCrashes = func() int { 103 const maxCrashesPerBug = 40 104 return maxCrashesPerBug 105 } 106 107 func handleJSON(fn JSONHandler) http.Handler { 108 return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 109 c := appengine.NewContext(r) 110 reply, err := fn(c, r) 111 if err != nil { 112 status := logErrorPrepareStatus(c, err) 113 http.Error(w, err.Error(), status) 114 return 115 } 116 117 wJS := newGzipResponseWriterCloser(w) 118 defer wJS.Close() 119 if err := json.NewEncoder(wJS).Encode(reply); err != nil { 120 log.Errorf(c, "failed to encode reply: %v", err) 121 return 122 } 123 w.Header().Set("Content-Type", "application/json") 124 if err := wJS.writeResult(r); err != nil { 125 log.Errorf(c, "wJS.writeResult: %s", err.Error()) 126 } 127 }) 128 } 129 130 func handleAPI(c context.Context, r *http.Request) (interface{}, error) { 131 client := r.PostFormValue("client") 132 method := r.PostFormValue("method") 133 log.Infof(c, "api %q from %q", method, client) 134 if client == "" { 135 // Don't log as error if somebody just invokes /api. 136 return nil, fmt.Errorf("client is empty: %w", ErrClientBadRequest) 137 } 138 auth := auth.MakeEndpoint(auth.GoogleTokenInfoEndpoint) 139 subj, err := auth.DetermineAuthSubj(timeNow(c), r.Header["Authorization"]) 140 if err != nil { 141 return nil, fmt.Errorf("failed to auth.DetermineAuthSubj(): %w", err) 142 } 143 password := r.PostFormValue("key") 144 ns, err := checkClient(getConfig(c), client, password, subj) 145 if err != nil { 146 return nil, fmt.Errorf("checkClient('%s') error: %w", client, err) 147 } 148 var payloadReader io.Reader 149 if str := r.PostFormValue("payload"); str != "" { 150 gr, err := gzip.NewReader(strings.NewReader(str)) 151 if err != nil { 152 return nil, fmt.Errorf("failed to ungzip payload: %w", err) 153 } 154 payloadReader = gr 155 // Ignore Close() error because we may not read all data. 156 defer gr.Close() 157 } 158 handler, exists := apiHandlers[method] 159 if !exists { 160 return nil, fmt.Errorf("unknown api method %q", method) 161 } 162 reply, err := handler(contextWithNamespace(c, ns), payloadReader) 163 if err != nil { 164 err = fmt.Errorf("method '%s' ns '%s' err: %w", method, ns, err) 165 } 166 return reply, err 167 } 168 169 var contextKeyNamespace = "context namespace available for any APIHandler" 170 171 func contextWithNamespace(c context.Context, ns string) context.Context { 172 return context.WithValue(c, &contextKeyNamespace, ns) 173 } 174 175 func contextNamespace(c context.Context) string { 176 return c.Value(&contextKeyNamespace).(string) 177 } 178 179 // gcsPayloadHandler json.Decode the gcsURL from payload and stream pointed content. 180 // This function streams ungzipped content in order to be aligned with other wrappers/handlers. 181 func gcsPayloadHandler(handler APIHandler) APIHandler { 182 return func(c context.Context, payload io.Reader) (interface{}, error) { 183 var gcsURL string 184 if err := json.NewDecoder(payload).Decode(&gcsURL); err != nil { 185 return nil, fmt.Errorf("json.NewDecoder(payload).Decode(&gcsURL): %w", err) 186 } 187 gcsURL = strings.TrimPrefix(gcsURL, "gs://") 188 clientGCS, err := gcs.NewClient(c) 189 if err != nil { 190 return nil, fmt.Errorf("gcs.NewClient: %w", err) 191 } 192 defer clientGCS.Close() 193 gcsPayloadReader, err := clientGCS.FileReader(gcsURL) 194 if err != nil { 195 return nil, fmt.Errorf("clientGCS.FileReader: %w", err) 196 } 197 gz, err := gzip.NewReader(gcsPayloadReader) 198 if err != nil { 199 return nil, fmt.Errorf("gzip.NewReader: %w", err) 200 } 201 // Close() generates error in case of the corrupted data. 202 // In order to check the data checksum all the data should be read. 203 // We don't guarantee all the data will be read - let's ignore. 204 defer gz.Close() 205 return handler(c, gz) 206 } 207 } 208 209 func nsHandler(handler APINamespaceHandler) APIHandler { 210 return func(c context.Context, payload io.Reader) (interface{}, error) { 211 ns := contextNamespace(c) 212 if ns == "" { 213 return nil, fmt.Errorf("must be called within a namespace") 214 } 215 return handler(c, ns, payload) 216 } 217 } 218 219 func apiLogError(c context.Context, payload io.Reader) (interface{}, error) { 220 req := new(dashapi.LogEntry) 221 if err := json.NewDecoder(payload).Decode(req); err != nil { 222 return nil, fmt.Errorf("failed to unmarshal request: %w", err) 223 } 224 log.Errorf(c, "%v: %v", req.Name, req.Text) 225 return nil, nil 226 } 227 228 func apiBuilderPoll(c context.Context, ns string, payload io.Reader) (interface{}, error) { 229 req := new(dashapi.BuilderPollReq) 230 if err := json.NewDecoder(payload).Decode(req); err != nil { 231 return nil, fmt.Errorf("failed to unmarshal request: %w", err) 232 } 233 bugs, _, err := loadAllBugs(c, func(query *db.Query) *db.Query { 234 return query.Filter("Namespace=", ns). 235 Filter("Status<", BugStatusFixed) 236 }) 237 if err != nil { 238 return nil, err 239 } 240 m := make(map[string]bool) 241 loop: 242 for _, bug := range bugs { 243 // TODO(dvyukov): include this condition into the query if possible. 244 if len(bug.Commits) == 0 { 245 continue 246 } 247 for _, mgr := range bug.PatchedOn { 248 if mgr == req.Manager { 249 continue loop 250 } 251 } 252 for _, com := range bug.Commits { 253 m[com] = true 254 } 255 } 256 commits := make([]string, 0, len(m)) 257 for com := range m { 258 commits = append(commits, com) 259 } 260 sort.Strings(commits) 261 resp := &dashapi.BuilderPollResp{ 262 PendingCommits: commits, 263 ReportEmail: reportEmail(c, ns), 264 } 265 return resp, nil 266 } 267 268 func reportEmail(c context.Context, ns string) string { 269 for _, reporting := range getNsConfig(c, ns).Reporting { 270 if _, ok := reporting.Config.(*EmailConfig); ok { 271 return ownEmail(c) 272 } 273 } 274 return "" 275 } 276 277 func apiCommitPoll(c context.Context, ns string, payload io.Reader) (interface{}, error) { 278 resp := &dashapi.CommitPollResp{ 279 ReportEmail: reportEmail(c, ns), 280 } 281 for _, repo := range getNsConfig(c, ns).Repos { 282 if repo.NoPoll { 283 continue 284 } 285 resp.Repos = append(resp.Repos, dashapi.Repo{ 286 URL: repo.URL, 287 Branch: repo.Branch, 288 }) 289 } 290 var bugs []*Bug 291 _, err := db.NewQuery("Bug"). 292 Filter("Namespace=", ns). 293 Filter("NeedCommitInfo=", true). 294 Project("Commits"). 295 Limit(100). 296 GetAll(c, &bugs) 297 if err != nil { 298 return nil, fmt.Errorf("failed to query bugs: %w", err) 299 } 300 commits := make(map[string]bool) 301 for _, bug := range bugs { 302 for _, com := range bug.Commits { 303 commits[com] = true 304 } 305 } 306 for com := range commits { 307 resp.Commits = append(resp.Commits, com) 308 } 309 if getNsConfig(c, ns).RetestMissingBackports { 310 const takeBackportTitles = 5 311 backportCommits, err := pollBackportCommits(c, ns, takeBackportTitles) 312 if err != nil { 313 return nil, err 314 } 315 resp.Commits = append(resp.Commits, backportCommits...) 316 } 317 return resp, nil 318 } 319 320 func pollBackportCommits(c context.Context, ns string, count int) ([]string, error) { 321 // Let's assume that there won't be too many pending backports. 322 list, err := relevantBackportJobs(c) 323 if err != nil { 324 return nil, fmt.Errorf("failed to query backport: %w", err) 325 } 326 var backportTitles []string 327 for _, info := range list { 328 if info.bug.Namespace != ns { 329 continue 330 } 331 backportTitles = append(backportTitles, info.job.Commits[0].Title) 332 } 333 randomizer := rand.New(rand.NewSource(timeNow(c).UnixNano())) 334 randomizer.Shuffle(len(backportTitles), func(i, j int) { 335 backportTitles[i], backportTitles[j] = backportTitles[j], backportTitles[i] 336 }) 337 if len(backportTitles) > count { 338 backportTitles = backportTitles[:count] 339 } 340 return backportTitles, nil 341 } 342 343 func apiUploadCommits(c context.Context, ns string, payload io.Reader) (interface{}, error) { 344 req := new(dashapi.CommitPollResultReq) 345 if err := json.NewDecoder(payload).Decode(req); err != nil { 346 return nil, fmt.Errorf("failed to unmarshal request: %w", err) 347 } 348 // This adds fixing commits to bugs. 349 err := addCommitsToBugs(c, ns, "", nil, req.Commits) 350 if err != nil { 351 return nil, err 352 } 353 // Now add commit info to commits. 354 for _, com := range req.Commits { 355 if com.Hash == "" { 356 continue 357 } 358 if err := addCommitInfo(c, ns, com); err != nil { 359 return nil, err 360 } 361 } 362 if getNsConfig(c, ns).RetestMissingBackports { 363 err = updateBackportCommits(c, ns, req.Commits) 364 if err != nil { 365 return nil, fmt.Errorf("failed to update backport commits: %w", err) 366 } 367 } 368 return nil, nil 369 } 370 371 func addCommitInfo(c context.Context, ns string, com dashapi.Commit) error { 372 var bugs []*Bug 373 keys, err := db.NewQuery("Bug"). 374 Filter("Namespace=", ns). 375 Filter("Commits=", com.Title). 376 GetAll(c, &bugs) 377 if err != nil { 378 return fmt.Errorf("failed to query bugs: %w", err) 379 } 380 for i, bug := range bugs { 381 if err := addCommitInfoToBug(c, bug, keys[i], com); err != nil { 382 return err 383 } 384 } 385 return nil 386 } 387 388 func addCommitInfoToBug(c context.Context, bug *Bug, bugKey *db.Key, com dashapi.Commit) error { 389 if needUpdate, err := addCommitInfoToBugImpl(c, bug, com); err != nil { 390 return err 391 } else if !needUpdate { 392 return nil 393 } 394 tx := func(c context.Context) error { 395 bug := new(Bug) 396 if err := db.Get(c, bugKey, bug); err != nil { 397 return fmt.Errorf("failed to get bug %v: %w", bugKey.StringID(), err) 398 } 399 if needUpdate, err := addCommitInfoToBugImpl(c, bug, com); err != nil { 400 return err 401 } else if !needUpdate { 402 return nil 403 } 404 if _, err := db.Put(c, bugKey, bug); err != nil { 405 return fmt.Errorf("failed to put bug: %w", err) 406 } 407 return nil 408 } 409 return runInTransaction(c, tx, nil) 410 } 411 412 func addCommitInfoToBugImpl(c context.Context, bug *Bug, com dashapi.Commit) (bool, error) { 413 ci := -1 414 for i, title := range bug.Commits { 415 if title == com.Title { 416 ci = i 417 break 418 } 419 } 420 if ci < 0 { 421 return false, nil 422 } 423 for len(bug.CommitInfo) < len(bug.Commits) { 424 bug.CommitInfo = append(bug.CommitInfo, Commit{}) 425 } 426 hash0 := bug.CommitInfo[ci].Hash 427 date0 := bug.CommitInfo[ci].Date 428 author0 := bug.CommitInfo[ci].Author 429 needCommitInfo0 := bug.NeedCommitInfo 430 431 bug.CommitInfo[ci].Hash = com.Hash 432 bug.CommitInfo[ci].Date = com.Date 433 bug.CommitInfo[ci].Author = com.Author 434 bug.NeedCommitInfo = false 435 for i := range bug.CommitInfo { 436 if bug.CommitInfo[i].Hash == "" { 437 bug.NeedCommitInfo = true 438 break 439 } 440 } 441 changed := hash0 != bug.CommitInfo[ci].Hash || 442 date0 != bug.CommitInfo[ci].Date || 443 author0 != bug.CommitInfo[ci].Author || 444 needCommitInfo0 != bug.NeedCommitInfo 445 return changed, nil 446 } 447 448 func apiJobPoll(c context.Context, payload io.Reader) (interface{}, error) { 449 if stop, err := emergentlyStopped(c); err != nil || stop { 450 // The bot's operation was aborted. Don't accept new crash reports. 451 return &dashapi.JobPollResp{}, err 452 } 453 req := new(dashapi.JobPollReq) 454 if err := json.NewDecoder(payload).Decode(req); err != nil { 455 return nil, fmt.Errorf("failed to unmarshal request: %w", err) 456 } 457 if len(req.Managers) == 0 { 458 return nil, fmt.Errorf("no managers") 459 } 460 return pollPendingJobs(c, req.Managers) 461 } 462 463 // nolint: dupl 464 func apiJobDone(c context.Context, payload io.Reader) (interface{}, error) { 465 req := new(dashapi.JobDoneReq) 466 if err := json.NewDecoder(payload).Decode(req); err != nil { 467 return nil, fmt.Errorf("failed to unmarshal request: %w", err) 468 } 469 err := doneJob(c, req) 470 return nil, err 471 } 472 473 // nolint: dupl 474 func apiJobReset(c context.Context, payload io.Reader) (interface{}, error) { 475 req := new(dashapi.JobResetReq) 476 if err := json.NewDecoder(payload).Decode(req); err != nil { 477 return nil, fmt.Errorf("failed to unmarshal request: %w", err) 478 } 479 err := resetJobs(c, req) 480 return nil, err 481 } 482 483 func apiUploadBuild(c context.Context, ns string, payload io.Reader) (interface{}, error) { 484 req := new(dashapi.Build) 485 if err := json.NewDecoder(payload).Decode(req); err != nil { 486 return nil, fmt.Errorf("failed to unmarshal request: %w", err) 487 } 488 now := timeNow(c) 489 _, isNewBuild, err := uploadBuild(c, now, ns, req, BuildNormal) 490 if err != nil { 491 return nil, err 492 } 493 if isNewBuild { 494 err := updateManager(c, ns, req.Manager, func(mgr *Manager, stats *ManagerStats) error { 495 prevKernel, prevSyzkaller := "", "" 496 if mgr.CurrentBuild != "" { 497 prevBuild, err := loadBuild(c, ns, mgr.CurrentBuild) 498 if err != nil { 499 return err 500 } 501 prevKernel = prevBuild.KernelCommit 502 prevSyzkaller = prevBuild.SyzkallerCommit 503 } 504 log.Infof(c, "new build on %v: kernel %v->%v syzkaller %v->%v", 505 req.Manager, prevKernel, req.KernelCommit, prevSyzkaller, req.SyzkallerCommit) 506 mgr.CurrentBuild = req.ID 507 if req.KernelCommit != prevKernel { 508 mgr.FailedBuildBug = "" 509 } 510 if req.SyzkallerCommit != prevSyzkaller { 511 mgr.FailedSyzBuildBug = "" 512 } 513 return nil 514 }) 515 if err != nil { 516 return nil, err 517 } 518 } 519 if len(req.Commits) != 0 || len(req.FixCommits) != 0 { 520 for i := range req.FixCommits { 521 // Reset hashes just to make sure, 522 // the build does not necessary come from the master repo, so we must not remember hashes. 523 req.FixCommits[i].Hash = "" 524 } 525 if err := addCommitsToBugs(c, ns, req.Manager, req.Commits, req.FixCommits); err != nil { 526 // We've already uploaded the build successfully and manager can use it. 527 // Moreover, addCommitsToBugs scans all bugs and can take long time. 528 // So just log the error. 529 log.Errorf(c, "failed to add commits to bugs: %v", err) 530 } 531 } 532 return nil, nil 533 } 534 535 func uploadBuild(c context.Context, now time.Time, ns string, req *dashapi.Build, typ BuildType) ( 536 *Build, bool, error) { 537 newAssets := []Asset{} 538 for i, toAdd := range req.Assets { 539 newAsset, err := parseIncomingAsset(c, toAdd, ns) 540 if err != nil { 541 return nil, false, fmt.Errorf("failed to parse asset #%d: %w", i, err) 542 } 543 newAssets = append(newAssets, newAsset) 544 } 545 if build, err := loadBuild(c, ns, req.ID); err == nil { 546 return build, false, nil 547 } 548 checkStrLen := func(str, name string, maxLen int) error { 549 if str == "" { 550 return fmt.Errorf("%v is empty", name) 551 } 552 if len(str) > maxLen { 553 return fmt.Errorf("%v is too long (%v)", name, len(str)) 554 } 555 return nil 556 } 557 if err := checkStrLen(req.Manager, "Build.Manager", MaxStringLen); err != nil { 558 return nil, false, err 559 } 560 if err := checkStrLen(req.ID, "Build.ID", MaxStringLen); err != nil { 561 return nil, false, err 562 } 563 if err := checkStrLen(req.KernelRepo, "Build.KernelRepo", MaxStringLen); err != nil { 564 return nil, false, err 565 } 566 if len(req.KernelBranch) > MaxStringLen { 567 return nil, false, fmt.Errorf("Build.KernelBranch is too long (%v)", len(req.KernelBranch)) 568 } 569 if err := checkStrLen(req.SyzkallerCommit, "Build.SyzkallerCommit", MaxStringLen); err != nil { 570 return nil, false, err 571 } 572 if len(req.CompilerID) > MaxStringLen { 573 return nil, false, fmt.Errorf("Build.CompilerID is too long (%v)", len(req.CompilerID)) 574 } 575 if len(req.KernelCommit) > MaxStringLen { 576 return nil, false, fmt.Errorf("Build.KernelCommit is too long (%v)", len(req.KernelCommit)) 577 } 578 configID, err := putText(c, ns, textKernelConfig, req.KernelConfig) 579 if err != nil { 580 return nil, false, err 581 } 582 build := &Build{ 583 Namespace: ns, 584 Manager: req.Manager, 585 ID: req.ID, 586 Type: typ, 587 Time: now, 588 OS: req.OS, 589 Arch: req.Arch, 590 VMArch: req.VMArch, 591 SyzkallerCommit: req.SyzkallerCommit, 592 SyzkallerCommitDate: req.SyzkallerCommitDate, 593 CompilerID: req.CompilerID, 594 KernelRepo: req.KernelRepo, 595 KernelBranch: req.KernelBranch, 596 KernelCommit: req.KernelCommit, 597 KernelCommitTitle: req.KernelCommitTitle, 598 KernelCommitDate: req.KernelCommitDate, 599 KernelConfig: configID, 600 Assets: newAssets, 601 } 602 if _, err := db.Put(c, buildKey(c, ns, req.ID), build); err != nil { 603 return nil, false, err 604 } 605 return build, true, nil 606 } 607 608 func addCommitsToBugs(c context.Context, ns, manager string, titles []string, fixCommits []dashapi.Commit) error { 609 presentCommits := make(map[string]bool) 610 bugFixedBy := make(map[string][]string) 611 for _, com := range titles { 612 presentCommits[com] = true 613 } 614 for _, com := range fixCommits { 615 presentCommits[com.Title] = true 616 for _, bugID := range com.BugIDs { 617 bugFixedBy[bugID] = append(bugFixedBy[bugID], com.Title) 618 } 619 } 620 managers, err := managerList(c, ns) 621 if err != nil { 622 return err 623 } 624 // Fetching all bugs in a namespace can be slow, and there is no way to filter only Open/Dup statuses. 625 // So we run a separate query for each status, this both avoids fetching unnecessary data 626 // and splits a long query into two (two smaller queries have lower chances of trigerring 627 // timeouts than one huge). 628 for _, status := range []int{BugStatusOpen, BugStatusDup} { 629 err := addCommitsToBugsInStatus(c, status, ns, manager, managers, presentCommits, bugFixedBy) 630 if err != nil { 631 return err 632 } 633 } 634 return nil 635 } 636 637 func addCommitsToBugsInStatus(c context.Context, status int, ns, manager string, managers []string, 638 presentCommits map[string]bool, bugFixedBy map[string][]string) error { 639 bugs, _, err := loadAllBugs(c, func(query *db.Query) *db.Query { 640 return query.Filter("Namespace=", ns). 641 Filter("Status=", status) 642 }) 643 if err != nil { 644 return err 645 } 646 for _, bug := range bugs { 647 var fixCommits []string 648 for i := range bug.Reporting { 649 fixCommits = append(fixCommits, bugFixedBy[bug.Reporting[i].ID]...) 650 } 651 sort.Strings(fixCommits) 652 if err := addCommitsToBug(c, bug, manager, managers, fixCommits, presentCommits); err != nil { 653 return err 654 } 655 if bug.Status == BugStatusDup { 656 canon, err := canonicalBug(c, bug) 657 if err != nil { 658 return err 659 } 660 if canon.Status == BugStatusOpen && len(bug.Commits) == 0 { 661 if err := addCommitsToBug(c, canon, manager, managers, 662 fixCommits, presentCommits); err != nil { 663 return err 664 } 665 } 666 } 667 } 668 return nil 669 } 670 671 func addCommitsToBug(c context.Context, bug *Bug, manager string, managers, fixCommits []string, 672 presentCommits map[string]bool) error { 673 if !bugNeedsCommitUpdate(c, bug, manager, fixCommits, presentCommits, true) { 674 return nil 675 } 676 now := timeNow(c) 677 bugKey := bug.key(c) 678 tx := func(c context.Context) error { 679 bug := new(Bug) 680 if err := db.Get(c, bugKey, bug); err != nil { 681 return fmt.Errorf("failed to get bug %v: %w", bugKey.StringID(), err) 682 } 683 if !bugNeedsCommitUpdate(c, bug, manager, fixCommits, presentCommits, false) { 684 return nil 685 } 686 if len(fixCommits) != 0 && !reflect.DeepEqual(bug.Commits, fixCommits) { 687 bug.updateCommits(fixCommits, now) 688 } 689 if manager != "" { 690 bug.PatchedOn = append(bug.PatchedOn, manager) 691 if bug.Status == BugStatusOpen { 692 fixed := true 693 for _, mgr := range managers { 694 if !stringInList(bug.PatchedOn, mgr) { 695 fixed = false 696 break 697 } 698 } 699 if fixed { 700 bug.Status = BugStatusFixed 701 bug.Closed = now 702 } 703 } 704 } 705 if _, err := db.Put(c, bugKey, bug); err != nil { 706 return fmt.Errorf("failed to put bug: %w", err) 707 } 708 return nil 709 } 710 return runInTransaction(c, tx, nil) 711 } 712 713 func bugNeedsCommitUpdate(c context.Context, bug *Bug, manager string, fixCommits []string, 714 presentCommits map[string]bool, dolog bool) bool { 715 if len(fixCommits) != 0 && !reflect.DeepEqual(bug.Commits, fixCommits) { 716 if dolog { 717 log.Infof(c, "bug %q is fixed with %q", bug.Title, fixCommits) 718 } 719 return true 720 } 721 if len(bug.Commits) == 0 || manager == "" || stringInList(bug.PatchedOn, manager) { 722 return false 723 } 724 for _, com := range bug.Commits { 725 if !presentCommits[com] { 726 return false 727 } 728 } 729 return true 730 } 731 732 // Note: if you do not need the latest data, prefer CachedManagersList(). 733 func managerList(c context.Context, ns string) ([]string, error) { 734 var builds []*Build 735 _, err := db.NewQuery("Build"). 736 Filter("Namespace=", ns). 737 Project("Manager"). 738 Distinct(). 739 GetAll(c, &builds) 740 if err != nil { 741 return nil, fmt.Errorf("failed to query builds: %w", err) 742 } 743 configManagers := getNsConfig(c, ns).Managers 744 var managers []string 745 for _, build := range builds { 746 if configManagers[build.Manager].Decommissioned { 747 continue 748 } 749 managers = append(managers, build.Manager) 750 } 751 return managers, nil 752 } 753 754 func apiReportBuildError(c context.Context, ns string, payload io.Reader) (interface{}, error) { 755 req := new(dashapi.BuildErrorReq) 756 if err := json.NewDecoder(payload).Decode(req); err != nil { 757 return nil, fmt.Errorf("failed to unmarshal request: %w", err) 758 } 759 now := timeNow(c) 760 build, _, err := uploadBuild(c, now, ns, &req.Build, BuildFailed) 761 if err != nil { 762 return nil, fmt.Errorf("failed to store build: %w", err) 763 } 764 req.Crash.BuildID = req.Build.ID 765 bug, err := reportCrash(c, build, &req.Crash) 766 if err != nil { 767 return nil, fmt.Errorf("failed to store crash: %w", err) 768 } 769 if err := updateManager(c, ns, req.Build.Manager, func(mgr *Manager, stats *ManagerStats) error { 770 log.Infof(c, "failed build on %v: kernel=%v", req.Build.Manager, req.Build.KernelCommit) 771 if req.Build.KernelCommit != "" { 772 mgr.FailedBuildBug = bug.keyHash(c) 773 } else { 774 mgr.FailedSyzBuildBug = bug.keyHash(c) 775 } 776 return nil 777 }); err != nil { 778 return nil, fmt.Errorf("failed to update manager: %w", err) 779 } 780 return nil, nil 781 } 782 783 const ( 784 corruptedReportTitle = "corrupted report" 785 suppressedReportTitle = "suppressed report" 786 ) 787 788 func apiReportCrash(c context.Context, ns string, payload io.Reader) (interface{}, error) { 789 if stop, err := emergentlyStopped(c); err != nil || stop { 790 // The bot's operation was aborted. Don't accept new crash reports. 791 return &dashapi.ReportCrashResp{}, err 792 } 793 req := new(dashapi.Crash) 794 if err := json.NewDecoder(payload).Decode(req); err != nil { 795 return nil, fmt.Errorf("failed to unmarshal request: %w", err) 796 } 797 build, err := loadBuild(c, ns, req.BuildID) 798 if err != nil { 799 return nil, err 800 } 801 if !getNsConfig(c, ns).TransformCrash(build, req) { 802 return new(dashapi.ReportCrashResp), nil 803 } 804 var bug2 *Bug 805 if req.OriginalTitle != "" { 806 bug2, err = findExistingBugForCrash(c, ns, []string{req.OriginalTitle}) 807 if err != nil { 808 return nil, fmt.Errorf("original bug query failed: %w", err) 809 } 810 } 811 bug, err := reportCrash(c, build, req) 812 if err != nil { 813 return nil, err 814 } 815 if bug2 != nil && bug2.Title != bug.Title && len(req.ReproLog) > 0 { 816 // During bug reproduction, we have diverted to another bug. 817 // Let's remember this. 818 err = saveFailedReproLog(c, bug2, build, req.ReproLog) 819 if err != nil { 820 return nil, fmt.Errorf("failed to save failed repro log: %w", err) 821 } 822 } 823 resp := &dashapi.ReportCrashResp{ 824 NeedRepro: needRepro(c, bug), 825 } 826 return resp, nil 827 } 828 829 // nolint: gocyclo 830 func reportCrash(c context.Context, build *Build, req *dashapi.Crash) (*Bug, error) { 831 ns := build.Namespace 832 assets, err := parseCrashAssets(c, req, ns) 833 if err != nil { 834 return nil, err 835 } 836 req.Title = canonicalizeCrashTitle(req.Title, req.Corrupted, req.Suppressed) 837 if req.Corrupted || req.Suppressed { 838 req.AltTitles = []string{req.Title} 839 } else { 840 for i, t := range req.AltTitles { 841 req.AltTitles[i] = normalizeCrashTitle(t) 842 } 843 req.AltTitles = mergeStringList([]string{req.Title}, req.AltTitles) // dedup 844 } 845 req.Maintainers = email.MergeEmailLists(req.Maintainers) 846 847 bug, err := findBugForCrash(c, ns, req.AltTitles) 848 if err != nil { 849 return nil, fmt.Errorf("failed to find bug for the crash: %w", err) 850 } 851 if bug == nil { 852 bug, err = createBugForCrash(c, ns, req) 853 if err != nil { 854 return nil, fmt.Errorf("failed to create a bug: %w", err) 855 } 856 } 857 858 bugKey := bug.key(c) 859 now := timeNow(c) 860 reproLevel := ReproLevelNone 861 if len(req.ReproC) != 0 { 862 reproLevel = ReproLevelC 863 } else if len(req.ReproSyz) != 0 { 864 reproLevel = ReproLevelSyz 865 } 866 save := reproLevel != ReproLevelNone || 867 bug.NumCrashes < int64(maxCrashes()) || 868 now.Sub(bug.LastSavedCrash) > time.Hour || 869 bug.NumCrashes%20 == 0 || 870 !stringInList(bug.MergedTitles, req.Title) 871 if save { 872 if err := saveCrash(c, ns, req, bug, bugKey, build, assets); err != nil { 873 return nil, fmt.Errorf("failed to save the crash: %w", err) 874 } 875 } else { 876 log.Infof(c, "not saving crash for %q", bug.Title) 877 } 878 879 subsystemService := getNsConfig(c, ns).Subsystems.Service 880 881 newSubsystems := []*subsystem.Subsystem{} 882 // Recalculate subsystems on the first saved crash and on the first saved repro, 883 // unless a user has already manually specified them. 884 calculateSubsystems := subsystemService != nil && 885 save && 886 !bug.hasUserSubsystems() && 887 (bug.NumCrashes == 0 || 888 bug.ReproLevel == ReproLevelNone && reproLevel != ReproLevelNone) 889 if calculateSubsystems { 890 newSubsystems, err = inferSubsystems(c, bug, bugKey, &debugtracer.NullTracer{}) 891 if err != nil { 892 log.Errorf(c, "%q: failed to extract subsystems: %s", bug.Title, err) 893 return nil, err 894 } 895 } 896 897 tx := func(c context.Context) error { 898 bug = new(Bug) 899 if err := db.Get(c, bugKey, bug); err != nil { 900 return fmt.Errorf("failed to get bug: %w", err) 901 } 902 bug.LastTime = now 903 if save { 904 bug.LastSavedCrash = now 905 } 906 if reproLevel != ReproLevelNone { 907 bug.NumRepro++ 908 bug.LastReproTime = now 909 } 910 bug.ReproLevel = max(bug.ReproLevel, reproLevel) 911 bug.HeadReproLevel = max(bug.HeadReproLevel, reproLevel) 912 if len(req.Report) != 0 { 913 bug.HasReport = true 914 } 915 if calculateSubsystems { 916 bug.SetAutoSubsystems(c, newSubsystems, now, subsystemService.Revision) 917 } 918 bug.increaseCrashStats(now) 919 bug.HappenedOn = mergeString(bug.HappenedOn, build.Manager) 920 // Migration of older entities (for new bugs Title is always in MergedTitles). 921 bug.MergedTitles = mergeString(bug.MergedTitles, bug.Title) 922 bug.MergedTitles = mergeString(bug.MergedTitles, req.Title) 923 bug.AltTitles = mergeStringList(bug.AltTitles, req.AltTitles) 924 if _, err = db.Put(c, bugKey, bug); err != nil { 925 return fmt.Errorf("failed to put bug: %w", err) 926 } 927 return nil 928 } 929 if err := runInTransaction(c, tx, &db.TransactionOptions{ 930 XG: true, 931 // Very valuable transaction. 932 Attempts: 30, 933 }); err != nil { 934 return nil, fmt.Errorf("bug updating failed: %w", err) 935 } 936 if save { 937 purgeOldCrashes(c, bug, bugKey) 938 } 939 return bug, nil 940 } 941 942 func parseCrashAssets(c context.Context, req *dashapi.Crash, ns string) ([]Asset, error) { 943 assets := []Asset{} 944 for i, toAdd := range req.Assets { 945 newAsset, err := parseIncomingAsset(c, toAdd, ns) 946 if err != nil { 947 return nil, fmt.Errorf("failed to parse asset #%d: %w", i, err) 948 } 949 assets = append(assets, newAsset) 950 } 951 return assets, nil 952 } 953 954 func (crash *Crash) UpdateReportingPriority(c context.Context, build *Build, bug *Bug) { 955 prio := int64(kernelRepoInfo(c, build).ReportingPriority) * 1e6 956 if crash.ReproC > 0 && !crash.ReproIsRevoked { 957 prio += 4e12 958 } else if crash.ReproSyz > 0 && !crash.ReproIsRevoked { 959 prio += 2e12 960 } 961 if crash.Title == bug.Title { 962 prio += 1e8 // prefer reporting crash that matches bug title 963 } 964 managerPrio := 0 965 if _, mgrConfig := activeManager(c, crash.Manager, bug.Namespace); mgrConfig != nil { 966 managerPrio = mgrConfig.Priority 967 } 968 prio += int64((managerPrio - MinManagerPriority) * 1e5) 969 if build.Arch == targets.AMD64 { 970 prio += 1e3 971 } 972 crash.ReportLen = prio 973 } 974 975 func saveCrash(c context.Context, ns string, req *dashapi.Crash, bug *Bug, bugKey *db.Key, 976 build *Build, assets []Asset) error { 977 crash := &Crash{ 978 Title: req.Title, 979 Manager: build.Manager, 980 BuildID: req.BuildID, 981 Time: timeNow(c), 982 Maintainers: email.MergeEmailLists(req.Maintainers, 983 GetEmails(req.Recipients, dashapi.To), 984 GetEmails(req.Recipients, dashapi.Cc)), 985 ReproOpts: req.ReproOpts, 986 Flags: int64(req.Flags), 987 Assets: assets, 988 ReportElements: CrashReportElements{ 989 GuiltyFiles: req.GuiltyFiles, 990 }, 991 } 992 var err error 993 if crash.Log, err = putText(c, ns, textCrashLog, req.Log); err != nil { 994 return err 995 } 996 if crash.Report, err = putText(c, ns, textCrashReport, req.Report); err != nil { 997 return err 998 } 999 if crash.ReproSyz, err = putText(c, ns, textReproSyz, req.ReproSyz); err != nil { 1000 return err 1001 } 1002 if crash.ReproC, err = putText(c, ns, textReproC, req.ReproC); err != nil { 1003 return err 1004 } 1005 if crash.MachineInfo, err = putText(c, ns, textMachineInfo, req.MachineInfo); err != nil { 1006 return err 1007 } 1008 if crash.ReproLog, err = putText(c, ns, textReproLog, req.ReproLog); err != nil { 1009 return err 1010 } 1011 crash.UpdateReportingPriority(c, build, bug) 1012 crashKey := db.NewIncompleteKey(c, "Crash", bugKey) 1013 if _, err = db.Put(c, crashKey, crash); err != nil { 1014 return fmt.Errorf("failed to put crash: %w", err) 1015 } 1016 return nil 1017 } 1018 1019 func purgeOldCrashes(c context.Context, bug *Bug, bugKey *db.Key) { 1020 const purgeEvery = 10 1021 if bug.NumCrashes <= int64(2*maxCrashes()) || (bug.NumCrashes-1)%purgeEvery != 0 { 1022 return 1023 } 1024 var crashes []*Crash 1025 keys, err := db.NewQuery("Crash"). 1026 Ancestor(bugKey). 1027 Filter("Reported=", time.Time{}). 1028 GetAll(c, &crashes) 1029 if err != nil { 1030 log.Errorf(c, "failed to fetch purge crashes: %v", err) 1031 return 1032 } 1033 keyMap := make(map[*Crash]*db.Key) 1034 for i, crash := range crashes { 1035 keyMap[crash] = keys[i] 1036 } 1037 // Newest first. 1038 sort.Slice(crashes, func(i, j int) bool { 1039 return crashes[i].Time.After(crashes[j].Time) 1040 }) 1041 var toDelete []*db.Key 1042 latestOnManager := make(map[string]bool) 1043 uniqueTitle := make(map[string]bool) 1044 deleted, reproCount, noreproCount := 0, 0, 0 1045 for _, crash := range crashes { 1046 if !crash.Reported.IsZero() { 1047 log.Errorf(c, "purging reported crash?") 1048 continue 1049 } 1050 // Preserve latest crash on each manager. 1051 if !latestOnManager[crash.Manager] { 1052 latestOnManager[crash.Manager] = true 1053 continue 1054 } 1055 // Preserve at least one crash with each title. 1056 if !uniqueTitle[crash.Title] { 1057 uniqueTitle[crash.Title] = true 1058 continue 1059 } 1060 // Preserve maxCrashes latest crashes with repro and without repro. 1061 count := &noreproCount 1062 if crash.ReproSyz != 0 || crash.ReproC != 0 { 1063 count = &reproCount 1064 } 1065 if *count < maxCrashes() { 1066 *count++ 1067 continue 1068 } 1069 toDelete = append(toDelete, keyMap[crash]) 1070 if crash.Log != 0 { 1071 toDelete = append(toDelete, db.NewKey(c, textCrashLog, "", crash.Log, nil)) 1072 } 1073 if crash.Report != 0 { 1074 toDelete = append(toDelete, db.NewKey(c, textCrashReport, "", crash.Report, nil)) 1075 } 1076 if crash.ReproSyz != 0 { 1077 toDelete = append(toDelete, db.NewKey(c, textReproSyz, "", crash.ReproSyz, nil)) 1078 } 1079 if crash.ReproC != 0 { 1080 toDelete = append(toDelete, db.NewKey(c, textReproC, "", crash.ReproC, nil)) 1081 } 1082 deleted++ 1083 if deleted == 2*purgeEvery { 1084 break 1085 } 1086 } 1087 if len(toDelete) == 0 { 1088 return 1089 } 1090 if err := db.DeleteMulti(c, toDelete); err != nil { 1091 log.Errorf(c, "failed to delete old crashes: %v", err) 1092 return 1093 } 1094 log.Infof(c, "deleted %v crashes for bug %q", deleted, bug.Title) 1095 } 1096 1097 func apiReportFailedRepro(c context.Context, ns string, payload io.Reader) (interface{}, error) { 1098 req := new(dashapi.CrashID) 1099 if err := json.NewDecoder(payload).Decode(req); err != nil { 1100 return nil, fmt.Errorf("failed to unmarshal request: %w", err) 1101 } 1102 req.Title = canonicalizeCrashTitle(req.Title, req.Corrupted, req.Suppressed) 1103 1104 bug, err := findExistingBugForCrash(c, ns, []string{req.Title}) 1105 if err != nil { 1106 return nil, err 1107 } 1108 if bug == nil { 1109 return nil, fmt.Errorf("%v: can't find bug for crash %q", ns, req.Title) 1110 } 1111 build, err := loadBuild(c, ns, req.BuildID) 1112 if err != nil { 1113 return nil, err 1114 } 1115 return nil, saveFailedReproLog(c, bug, build, req.ReproLog) 1116 } 1117 1118 func saveFailedReproLog(c context.Context, bug *Bug, build *Build, log []byte) error { 1119 now := timeNow(c) 1120 bugKey := bug.key(c) 1121 tx := func(c context.Context) error { 1122 bug := new(Bug) 1123 if err := db.Get(c, bugKey, bug); err != nil { 1124 return fmt.Errorf("failed to get bug: %w", err) 1125 } 1126 bug.NumRepro++ 1127 bug.LastReproTime = now 1128 if len(log) > 0 { 1129 err := saveReproAttempt(c, bug, build, log) 1130 if err != nil { 1131 return fmt.Errorf("failed to save repro log: %w", err) 1132 } 1133 } 1134 if _, err := db.Put(c, bugKey, bug); err != nil { 1135 return fmt.Errorf("failed to put bug: %w", err) 1136 } 1137 return nil 1138 } 1139 return runInTransaction(c, tx, &db.TransactionOptions{ 1140 XG: true, 1141 Attempts: 30, 1142 }) 1143 } 1144 1145 const maxReproLogs = 5 1146 1147 func saveReproAttempt(c context.Context, bug *Bug, build *Build, log []byte) error { 1148 var deleteKeys []*db.Key 1149 for len(bug.ReproAttempts)+1 > maxReproLogs { 1150 deleteKeys = append(deleteKeys, 1151 db.NewKey(c, textReproLog, "", bug.ReproAttempts[0].Log, nil)) 1152 bug.ReproAttempts = bug.ReproAttempts[1:] 1153 } 1154 entry := BugReproAttempt{ 1155 Time: timeNow(c), 1156 Manager: build.Manager, 1157 } 1158 var err error 1159 if entry.Log, err = putText(c, bug.Namespace, textReproLog, log); err != nil { 1160 return err 1161 } 1162 if len(deleteKeys) > 0 { 1163 return db.DeleteMulti(c, deleteKeys) 1164 } 1165 bug.ReproAttempts = append(bug.ReproAttempts, entry) 1166 return nil 1167 } 1168 1169 func apiNeedRepro(c context.Context, ns string, payload io.Reader) (interface{}, error) { 1170 req := new(dashapi.CrashID) 1171 if err := json.NewDecoder(payload).Decode(req); err != nil { 1172 return nil, fmt.Errorf("failed to unmarshal request: %w", err) 1173 } 1174 if req.Corrupted { 1175 resp := &dashapi.NeedReproResp{ 1176 NeedRepro: false, 1177 } 1178 return resp, nil 1179 } 1180 req.Title = canonicalizeCrashTitle(req.Title, req.Corrupted, req.Suppressed) 1181 1182 bug, err := findExistingBugForCrash(c, ns, []string{req.Title}) 1183 if err != nil { 1184 return nil, err 1185 } 1186 if bug == nil { 1187 if req.MayBeMissing { 1188 // Manager does not send leak reports w/o repro to dashboard, we want to reproduce them. 1189 resp := &dashapi.NeedReproResp{ 1190 NeedRepro: true, 1191 } 1192 return resp, nil 1193 } 1194 return nil, fmt.Errorf("%v: can't find bug for crash %q", ns, req.Title) 1195 } 1196 resp := &dashapi.NeedReproResp{ 1197 NeedRepro: needRepro(c, bug), 1198 } 1199 return resp, nil 1200 } 1201 1202 func canonicalizeCrashTitle(title string, corrupted, suppressed bool) string { 1203 if corrupted { 1204 // The report is corrupted and the title is most likely invalid. 1205 // Such reports are usually unactionable and are discarded. 1206 // Collect them into a single bin. 1207 return corruptedReportTitle 1208 } 1209 if suppressed { 1210 // Collect all of them into a single bucket so that it's possible to control and assess them, 1211 // e.g. if there are some spikes in suppressed reports. 1212 return suppressedReportTitle 1213 } 1214 return normalizeCrashTitle(title) 1215 } 1216 1217 func normalizeCrashTitle(title string) string { 1218 return strings.TrimSpace(limitLength(title, maxTextLen)) 1219 } 1220 1221 func apiManagerStats(c context.Context, ns string, payload io.Reader) (interface{}, error) { 1222 req := new(dashapi.ManagerStatsReq) 1223 if err := json.NewDecoder(payload).Decode(req); err != nil { 1224 return nil, fmt.Errorf("failed to unmarshal request: %w", err) 1225 } 1226 now := timeNow(c) 1227 err := updateManager(c, ns, req.Name, func(mgr *Manager, stats *ManagerStats) error { 1228 mgr.Link = req.Addr 1229 mgr.LastAlive = now 1230 mgr.CurrentUpTime = req.UpTime 1231 stats.MaxCorpus = max(stats.MaxCorpus, int64(req.Corpus)) 1232 stats.MaxPCs = max(stats.MaxPCs, int64(req.PCs)) 1233 stats.MaxCover = max(stats.MaxCover, int64(req.Cover)) 1234 stats.CrashTypes = max(stats.CrashTypes, int64(req.CrashTypes)) 1235 stats.TotalFuzzingTime += req.FuzzingTime 1236 stats.TotalCrashes += int64(req.Crashes) 1237 stats.SuppressedCrashes += int64(req.SuppressedCrashes) 1238 stats.TotalExecs += int64(req.Execs) 1239 stats.TriagedCoverage = max(stats.TriagedCoverage, int64(req.TriagedCoverage)) 1240 stats.TriagedPCs = max(stats.TriagedPCs, int64(req.TriagedPCs)) 1241 return nil 1242 }) 1243 return nil, err 1244 } 1245 1246 func apiUpdateReport(c context.Context, ns string, payload io.Reader) (interface{}, error) { 1247 req := new(dashapi.UpdateReportReq) 1248 if err := json.NewDecoder(payload).Decode(req); err != nil { 1249 return nil, fmt.Errorf("failed to unmarshal request: %w", err) 1250 } 1251 bug := new(Bug) 1252 bugKey := db.NewKey(c, "Bug", req.BugID, 0, nil) 1253 if err := db.Get(c, bugKey, bug); err != nil { 1254 return nil, fmt.Errorf("failed to get bug: %w", err) 1255 } 1256 if bug.Namespace != ns { 1257 return nil, fmt.Errorf("no such bug") 1258 } 1259 tx := func(c context.Context) error { 1260 crash := new(Crash) 1261 crashKey := db.NewKey(c, "Crash", "", req.CrashID, bugKey) 1262 if err := db.Get(c, crashKey, crash); err != nil { 1263 return fmt.Errorf("failed to query the crash: %w", err) 1264 } 1265 if req.GuiltyFiles != nil { 1266 crash.ReportElements.GuiltyFiles = *req.GuiltyFiles 1267 } 1268 if _, err := db.Put(c, crashKey, crash); err != nil { 1269 return fmt.Errorf("failed to put reported crash: %w", err) 1270 } 1271 return nil 1272 } 1273 return nil, runInTransaction(c, tx, nil) 1274 } 1275 1276 func apiBugList(c context.Context, ns string, payload io.Reader) (interface{}, error) { 1277 keys, err := db.NewQuery("Bug"). 1278 Filter("Namespace=", ns). 1279 KeysOnly(). 1280 GetAll(c, nil) 1281 if err != nil { 1282 return nil, fmt.Errorf("failed to query bugs: %w", err) 1283 } 1284 resp := &dashapi.BugListResp{} 1285 for _, key := range keys { 1286 resp.List = append(resp.List, key.StringID()) 1287 } 1288 return resp, nil 1289 } 1290 1291 func apiLoadBug(c context.Context, ns string, payload io.Reader) (interface{}, error) { 1292 req := new(dashapi.LoadBugReq) 1293 if err := json.NewDecoder(payload).Decode(req); err != nil { 1294 return nil, fmt.Errorf("failed to unmarshal request: %w", err) 1295 } 1296 bug := new(Bug) 1297 bugKey := db.NewKey(c, "Bug", req.ID, 0, nil) 1298 if err := db.Get(c, bugKey, bug); err != nil { 1299 return nil, fmt.Errorf("failed to get bug: %w", err) 1300 } 1301 if bug.Namespace != ns { 1302 return nil, fmt.Errorf("no such bug") 1303 } 1304 return loadBugReport(c, bug) 1305 } 1306 1307 func apiLoadFullBug(c context.Context, payload io.Reader) (interface{}, error) { 1308 req := new(dashapi.LoadFullBugReq) 1309 if err := json.NewDecoder(payload).Decode(req); err != nil { 1310 return nil, fmt.Errorf("failed to unmarshal request: %w", err) 1311 } 1312 bug, bugKey, err := findBugByReportingID(c, req.BugID) 1313 if err != nil { 1314 return nil, fmt.Errorf("failed to find the bug: %w", err) 1315 } 1316 bugReporting, _ := bugReportingByID(bug, req.BugID) 1317 if bugReporting == nil { 1318 return nil, fmt.Errorf("failed to find the bug reporting: %w", err) 1319 } 1320 return loadFullBugInfo(c, bug, bugKey, bugReporting) 1321 } 1322 1323 func loadBugReport(c context.Context, bug *Bug) (*dashapi.BugReport, error) { 1324 crash, crashKey, err := findCrashForBug(c, bug) 1325 if err != nil { 1326 return nil, err 1327 } 1328 // Create report for the last reporting so that it's stable and ExtID does not change over time. 1329 bugReporting := &bug.Reporting[len(bug.Reporting)-1] 1330 reporting := getNsConfig(c, bug.Namespace).ReportingByName(bugReporting.Name) 1331 if reporting == nil { 1332 return nil, fmt.Errorf("reporting %v is missing in config", bugReporting.Name) 1333 } 1334 return createBugReport(c, bug, crash, crashKey, bugReporting, reporting) 1335 } 1336 1337 func apiAddBuildAssets(c context.Context, ns string, payload io.Reader) (interface{}, error) { 1338 req := new(dashapi.AddBuildAssetsReq) 1339 if err := json.NewDecoder(payload).Decode(req); err != nil { 1340 return nil, fmt.Errorf("failed to unmarshal request: %w", err) 1341 } 1342 assets := []Asset{} 1343 for i, toAdd := range req.Assets { 1344 asset, err := parseIncomingAsset(c, toAdd, ns) 1345 if err != nil { 1346 return nil, fmt.Errorf("failed to parse asset #%d: %w", i, err) 1347 } 1348 assets = append(assets, asset) 1349 } 1350 _, err := appendBuildAssets(c, ns, req.BuildID, assets) 1351 if err != nil { 1352 return nil, err 1353 } 1354 return nil, nil 1355 } 1356 1357 func parseIncomingAsset(c context.Context, newAsset dashapi.NewAsset, ns string) (Asset, error) { 1358 typeInfo := asset.GetTypeDescription(newAsset.Type) 1359 if typeInfo == nil { 1360 return Asset{}, fmt.Errorf("unknown asset type") 1361 } 1362 _, err := url.ParseRequestURI(newAsset.DownloadURL) 1363 if err != nil { 1364 return Asset{}, fmt.Errorf("invalid URL: %w", err) 1365 } 1366 fsckLog := int64(0) 1367 if len(newAsset.FsckLog) > 0 { 1368 fsckLog, err = putText(c, ns, textFsckLog, newAsset.FsckLog) 1369 if err != nil { 1370 return Asset{}, err 1371 } 1372 } 1373 return Asset{ 1374 Type: newAsset.Type, 1375 DownloadURL: newAsset.DownloadURL, 1376 CreateDate: timeNow(c), 1377 FsckLog: fsckLog, 1378 FsIsClean: newAsset.FsIsClean, 1379 }, nil 1380 } 1381 1382 func apiNeededAssetsList(c context.Context, payload io.Reader) (interface{}, error) { 1383 return queryNeededAssets(c) 1384 } 1385 1386 func findExistingBugForCrash(c context.Context, ns string, titles []string) (*Bug, error) { 1387 // First, try to find an existing bug that we already used to report this crash title. 1388 var bugs []*Bug 1389 _, err := db.NewQuery("Bug"). 1390 Filter("Namespace=", ns). 1391 Filter("MergedTitles=", titles[0]). 1392 GetAll(c, &bugs) 1393 if err != nil { 1394 return nil, fmt.Errorf("failed to query bugs: %w", err) 1395 } 1396 // We can find bugs with different bug.Title and uncomparable bug.Seq's. 1397 // But there should be only one active bug for each crash title, 1398 // so if we sort by Seq, the first active bug is our target bug. 1399 sort.Slice(bugs, func(i, j int) bool { 1400 return bugs[i].Seq > bugs[j].Seq 1401 }) 1402 for _, bug := range bugs { 1403 if active, err := isActiveBug(c, bug); err != nil { 1404 return nil, err 1405 } else if active { 1406 return bug, nil 1407 } 1408 } 1409 // This is required for incremental migration. 1410 // Older bugs don't have MergedTitles, so we need to check Title as well 1411 // (reportCrash will set MergedTitles later). 1412 for _, title := range titles { 1413 bug, err := highestSeqBug(c, ns, title) 1414 if err != nil { 1415 return nil, err 1416 } 1417 if bug != nil { 1418 if active, err := isActiveBug(c, bug); err != nil { 1419 return nil, err 1420 } else if active { 1421 return bug, nil 1422 } 1423 } 1424 } 1425 return nil, nil 1426 } 1427 1428 func highestSeqBug(c context.Context, ns, title string) (*Bug, error) { 1429 var bugs []*Bug 1430 _, err := db.NewQuery("Bug"). 1431 Filter("Namespace=", ns). 1432 Filter("Title=", title). 1433 Order("-Seq"). 1434 Limit(1). 1435 GetAll(c, &bugs) 1436 if err != nil { 1437 return nil, fmt.Errorf("failed to query the last bug report: %w", err) 1438 } 1439 if len(bugs) == 0 { 1440 return nil, nil 1441 } 1442 return bugs[0], nil 1443 } 1444 1445 func findBugForCrash(c context.Context, ns string, titles []string) (*Bug, error) { 1446 // First, try to find an existing bug that we already used to report this crash title. 1447 bug, err := findExistingBugForCrash(c, ns, titles) 1448 if bug != nil || err != nil { 1449 return bug, err 1450 } 1451 // If there is no active bug for this crash title, try to find an existing candidate based on AltTitles. 1452 var bugs []*Bug 1453 for _, title := range titles { 1454 var bugs1 []*Bug 1455 _, err := db.NewQuery("Bug"). 1456 Filter("Namespace=", ns). 1457 Filter("AltTitles=", title). 1458 GetAll(c, &bugs1) 1459 if err != nil { 1460 return nil, fmt.Errorf("failed to query bugs: %w", err) 1461 } 1462 bugs = append(bugs, bugs1...) 1463 } 1464 // Sort to get determinism and skip inactive bugs. 1465 sort.Slice(bugs, func(i, j int) bool { 1466 if bugs[i].Title != bugs[j].Title { 1467 return bugs[i].Title < bugs[j].Title 1468 } 1469 return bugs[i].Seq > bugs[j].Seq 1470 }) 1471 var best *Bug 1472 bestPrio := 0 1473 for i, bug := range bugs { 1474 if i != 0 && bugs[i-1].Title == bug.Title { 1475 continue // skip inactive bugs 1476 } 1477 if active, err := isActiveBug(c, bug); err != nil { 1478 return nil, err 1479 } else if !active { 1480 continue 1481 } 1482 // Generally we should have few candidates (one in most cases). 1483 // However, it's possible if e.g. we first get a data race between A<->B, 1484 // then a race between C<->D and now we handle a race between B<->D, 1485 // it can be merged into any of the previous ones. 1486 // The priority here is very basic. The only known case we want to handle is bug title renaming 1487 // where we have an active bug with title A, but then A is renamed to B and A is attached as alt title. 1488 // In such case we want to merge the new crash into the old one. However, it's also unlikely that 1489 // in this case we have any other candidates. 1490 // Overall selection algorithm can be arbitrary changed because the selection for existing crashes 1491 // is fixed with bug.MergedTitles (stable for existing bugs/crashes). 1492 prio := 0 1493 if stringInList(titles[1:], bug.Title) { 1494 prio = 2 1495 } else if stringInList(bug.AltTitles[1:], titles[0]) { 1496 prio = 1 1497 } 1498 if best == nil || prio > bestPrio { 1499 best, bestPrio = bug, prio 1500 } 1501 } 1502 return best, nil 1503 } 1504 1505 func createBugForCrash(c context.Context, ns string, req *dashapi.Crash) (*Bug, error) { 1506 // Datastore limits the number of entities involved in a transaction to 25, so it's possible 1507 // to iterate over them all only up to some point. 1508 // To optimize the process, let's first obtain the maximum known seq for the title outside 1509 // of the transaction and then iterate a bit more in case of conflicts. 1510 startSeq := int64(0) 1511 prevBug, err := highestSeqBug(c, ns, req.Title) 1512 if err != nil { 1513 return nil, err 1514 } else if prevBug != nil { 1515 startSeq = prevBug.Seq + 1 1516 } 1517 1518 var bug *Bug 1519 now := timeNow(c) 1520 tx := func(c context.Context) error { 1521 for seq := startSeq; ; seq++ { 1522 bug = new(Bug) 1523 bugHash := bugKeyHash(c, ns, req.Title, seq) 1524 bugKey := db.NewKey(c, "Bug", bugHash, 0, nil) 1525 if err := db.Get(c, bugKey, bug); err != nil { 1526 if err != db.ErrNoSuchEntity { 1527 return fmt.Errorf("failed to get bug: %w", err) 1528 } 1529 bug = &Bug{ 1530 Namespace: ns, 1531 Seq: seq, 1532 Title: req.Title, 1533 MergedTitles: []string{req.Title}, 1534 AltTitles: req.AltTitles, 1535 Status: BugStatusOpen, 1536 NumCrashes: 0, 1537 NumRepro: 0, 1538 ReproLevel: ReproLevelNone, 1539 HasReport: false, 1540 FirstTime: now, 1541 LastTime: now, 1542 SubsystemsTime: now, 1543 } 1544 err = bug.updateReportings(c, getNsConfig(c, ns), now) 1545 if err != nil { 1546 return err 1547 } 1548 if _, err = db.Put(c, bugKey, bug); err != nil { 1549 return fmt.Errorf("failed to put new bug: %w", err) 1550 } 1551 return nil 1552 } 1553 canon, err := canonicalBug(c, bug) 1554 if err != nil { 1555 return err 1556 } 1557 if canon.Status != BugStatusOpen { 1558 continue 1559 } 1560 return nil 1561 } 1562 } 1563 if err := runInTransaction(c, tx, &db.TransactionOptions{ 1564 XG: true, 1565 // Very valuable transaction. 1566 Attempts: 30, 1567 }); err != nil { 1568 return nil, err 1569 } 1570 return bug, nil 1571 } 1572 1573 func isActiveBug(c context.Context, bug *Bug) (bool, error) { 1574 if bug == nil { 1575 return false, nil 1576 } 1577 canon, err := canonicalBug(c, bug) 1578 if err != nil { 1579 return false, err 1580 } 1581 return canon.Status == BugStatusOpen, nil 1582 } 1583 1584 func needRepro(c context.Context, bug *Bug) bool { 1585 if !needReproForBug(c, bug) { 1586 return false 1587 } 1588 canon, err := canonicalBug(c, bug) 1589 if err != nil { 1590 log.Errorf(c, "failed to get canonical bug: %v", err) 1591 return false 1592 } 1593 return needReproForBug(c, canon) 1594 } 1595 1596 var syzErrorTitleRe = regexp.MustCompile(`^SYZFAIL:|^SYZFATAL:`) 1597 1598 func needReproForBug(c context.Context, bug *Bug) bool { 1599 // We already have fixing commits. 1600 if len(bug.Commits) > 0 { 1601 return false 1602 } 1603 if bug.Title == corruptedReportTitle || 1604 bug.Title == suppressedReportTitle { 1605 return false 1606 } 1607 if !getNsConfig(c, bug.Namespace).NeedRepro(bug) { 1608 return false 1609 } 1610 bestReproLevel := ReproLevelC 1611 // For some bugs there's anyway no chance to find a C repro. 1612 if syzErrorTitleRe.MatchString(bug.Title) { 1613 bestReproLevel = ReproLevelSyz 1614 } 1615 if bug.HeadReproLevel < bestReproLevel { 1616 // We have not found a best-level repro yet, try until we do. 1617 return bug.NumRepro < maxReproPerBug || timeSince(c, bug.LastReproTime) >= reproRetryPeriod 1618 } 1619 // When the best repro is already found, still do a repro attempt once in a while. 1620 return timeSince(c, bug.LastReproTime) >= reproStalePeriod 1621 } 1622 1623 var dedupTextFor = map[string]bool{ 1624 textKernelConfig: true, 1625 textMachineInfo: true, 1626 } 1627 1628 func putText(c context.Context, ns, tag string, data []byte) (int64, error) { 1629 if ns == "" { 1630 return 0, fmt.Errorf("putting text outside of namespace") 1631 } 1632 if len(data) == 0 { 1633 return 0, nil 1634 } 1635 const ( 1636 // Kernel crash log is capped at ~1MB, but vm.Diagnose can add more. 1637 // These text files usually compress very well. 1638 maxTextLen = 10 << 20 // 10 MB 1639 maxCompressedLen = 1000 << 10 // datastore entity limit is 1MB 1640 ) 1641 if len(data) > maxTextLen { 1642 data = data[:maxTextLen] 1643 } 1644 b := new(bytes.Buffer) 1645 for { 1646 z, _ := gzip.NewWriterLevel(b, gzip.BestCompression) 1647 z.Write(data) 1648 z.Close() 1649 if len(b.Bytes()) < maxCompressedLen { 1650 break 1651 } 1652 // For crash logs, it's better to preserve the end of the log - that is, 1653 // where the panic message resides. 1654 // Other types of data are not really assumed to be larger than 1MB compressed. 1655 data = data[len(data)/10:] 1656 b.Reset() 1657 } 1658 var key *db.Key 1659 if dedupTextFor[tag] { 1660 h := hash.Hash([]byte(ns), b.Bytes()) 1661 key = db.NewKey(c, tag, "", h.Truncate64(), nil) 1662 } else { 1663 key = db.NewIncompleteKey(c, tag, nil) 1664 } 1665 text := &Text{ 1666 Namespace: ns, 1667 Text: b.Bytes(), 1668 } 1669 key, err := db.Put(c, key, text) 1670 if err != nil { 1671 return 0, err 1672 } 1673 return key.IntID(), nil 1674 } 1675 1676 func getText(c context.Context, tag string, id int64) ([]byte, string, error) { 1677 if id == 0 { 1678 return nil, "", nil 1679 } 1680 text := new(Text) 1681 if err := db.Get(c, db.NewKey(c, tag, "", id, nil), text); err != nil { 1682 return nil, "", fmt.Errorf("failed to read text %v: %w", tag, err) 1683 } 1684 d, err := gzip.NewReader(bytes.NewBuffer(text.Text)) 1685 if err != nil { 1686 return nil, "", fmt.Errorf("failed to read text %v: %w", tag, err) 1687 } 1688 data, err := io.ReadAll(d) 1689 if err != nil { 1690 return nil, "", fmt.Errorf("failed to read text %v: %w", tag, err) 1691 } 1692 return data, text.Namespace, nil 1693 } 1694 1695 // limitLength essentially does return s[:max], 1696 // but it ensures that we dot not split UTF-8 rune in half. 1697 // Otherwise appengine python scripts will break badly. 1698 func limitLength(s string, max int) string { 1699 s = strings.TrimSpace(s) 1700 if len(s) <= max { 1701 return s 1702 } 1703 for { 1704 s = s[:max] 1705 r, size := utf8.DecodeLastRuneInString(s) 1706 if r != utf8.RuneError || size != 1 { 1707 return s 1708 } 1709 max-- 1710 } 1711 } 1712 1713 func GetEmails(r dashapi.Recipients, filter dashapi.RecipientType) []string { 1714 emails := []string{} 1715 for _, user := range r { 1716 if user.Type == filter { 1717 emails = append(emails, user.Address.Address) 1718 } 1719 } 1720 sort.Strings(emails) 1721 return emails 1722 } 1723 1724 // Verifies that the given credentials are acceptable and returns the 1725 // corresponding namespace. 1726 func checkClient(conf *GlobalConfig, name0, secretPassword, oauthSubject string) (string, error) { 1727 checkAuth := func(ns, a string) (string, error) { 1728 if strings.HasPrefix(a, auth.OauthMagic) && 1729 subtle.ConstantTimeCompare([]byte(a), []byte(oauthSubject)) == 1 { 1730 return ns, nil 1731 } 1732 if subtle.ConstantTimeCompare([]byte(a), []byte(secretPassword)) == 0 { 1733 return ns, ErrAccess 1734 } 1735 return ns, nil 1736 } 1737 for name, authenticator := range conf.Clients { 1738 if name == name0 { 1739 return checkAuth("", authenticator) 1740 } 1741 } 1742 for ns, cfg := range conf.Namespaces { 1743 for name, authenticator := range cfg.Clients { 1744 if name == name0 { 1745 return checkAuth(ns, authenticator) 1746 } 1747 } 1748 } 1749 return "", ErrAccess 1750 } 1751 1752 func handleRefreshSubsystems(w http.ResponseWriter, r *http.Request) { 1753 c := appengine.NewContext(r) 1754 const updateBugsCount = 25 1755 for ns := range getConfig(c).Namespaces { 1756 err := reassignBugSubsystems(c, ns, updateBugsCount) 1757 if err != nil { 1758 log.Errorf(c, "failed to update subsystems for %s: %v", ns, err) 1759 } 1760 } 1761 } 1762 1763 func apiSaveDiscussion(c context.Context, payload io.Reader) (interface{}, error) { 1764 req := new(dashapi.SaveDiscussionReq) 1765 if err := json.NewDecoder(payload).Decode(req); err != nil { 1766 return nil, fmt.Errorf("failed to unmarshal request: %w", err) 1767 } 1768 d := req.Discussion 1769 newBugIDs := []string{} 1770 for _, id := range d.BugIDs { 1771 _, _, err := findBugByReportingID(c, id) 1772 if err == nil { 1773 newBugIDs = append(newBugIDs, id) 1774 } 1775 } 1776 d.BugIDs = newBugIDs 1777 if len(d.BugIDs) == 0 { 1778 return nil, nil 1779 } 1780 return nil, mergeDiscussion(c, d) 1781 } 1782 1783 func emergentlyStopped(c context.Context) (bool, error) { 1784 keys, err := db.NewQuery("EmergencyStop"). 1785 Limit(1). 1786 KeysOnly(). 1787 GetAll(c, nil) 1788 if err != nil { 1789 return false, err 1790 } 1791 return len(keys) > 0, nil 1792 } 1793 1794 func recordEmergencyStop(c context.Context) error { 1795 key := db.NewKey(c, "EmergencyStop", "all", 0, nil) 1796 _, err := db.Put(c, key, &EmergencyStop{ 1797 Time: timeNow(c), 1798 User: user.Current(c).Email, 1799 }) 1800 return err 1801 } 1802 1803 // Share crash logs for non-reproduced bugs with syz-managers. 1804 // In future, this can also take care of repro exchange between instances 1805 // in the place of syz-hub. 1806 func apiLogToReproduce(c context.Context, ns string, payload io.Reader) (interface{}, error) { 1807 req := new(dashapi.LogToReproReq) 1808 if err := json.NewDecoder(payload).Decode(req); err != nil { 1809 return nil, fmt.Errorf("failed to unmarshal request: %w", err) 1810 } 1811 build, err := loadBuild(c, ns, req.BuildID) 1812 if err != nil { 1813 return nil, err 1814 } 1815 // First check if there have been any manual requests. 1816 log, err := takeReproTask(c, ns, build.Manager) 1817 if err != nil { 1818 return nil, err 1819 } 1820 if log != nil { 1821 return &dashapi.LogToReproResp{ 1822 CrashLog: log, 1823 Type: dashapi.ManualLog, 1824 }, nil 1825 } 1826 1827 bugs, _, err := loadAllBugs(c, func(query *db.Query) *db.Query { 1828 return query.Filter("Namespace=", ns). 1829 Filter("HappenedOn=", build.Manager). 1830 Filter("Status=", BugStatusOpen) 1831 }) 1832 if err != nil { 1833 return nil, fmt.Errorf("failed to query bugs: %w", err) 1834 } 1835 rand.New(rand.NewSource(timeNow(c).UnixNano())).Shuffle(len(bugs), func(i, j int) { 1836 bugs[i], bugs[j] = bugs[j], bugs[i] 1837 }) 1838 // Let's limit the load on the DB. 1839 const bugsToConsider = 10 1840 checkedBugs := 0 1841 for _, bug := range bugs { 1842 if bug.ReproLevel != ReproLevelNone { 1843 continue 1844 } 1845 if len(bug.Commits) > 0 || len(bug.ReproAttempts) > 0 { 1846 // For now let's focus on all bugs where we have never ever 1847 // finished a bug reproduction process. 1848 continue 1849 } 1850 if !crashNeedsRepro(bug.Title) || !needReproForBug(c, bug) { 1851 continue 1852 } 1853 checkedBugs++ 1854 if checkedBugs > bugsToConsider { 1855 break 1856 } 1857 resp, err := logToReproForBug(c, bug, build.Manager) 1858 if resp != nil || err != nil { 1859 return resp, err 1860 } 1861 } 1862 return nil, nil 1863 } 1864 1865 func logToReproForBug(c context.Context, bug *Bug, manager string) (*dashapi.LogToReproResp, error) { 1866 const considerCrashes = 10 1867 crashes, _, err := queryCrashesForBug(c, bug.key(c), considerCrashes) 1868 if err != nil { 1869 return nil, err 1870 } 1871 for _, crash := range crashes { 1872 if crash.Manager != manager { 1873 continue 1874 } 1875 crashLog, _, err := getText(c, textCrashLog, crash.Log) 1876 if err != nil { 1877 return nil, fmt.Errorf("failed to query a crash log: %w", err) 1878 } 1879 return &dashapi.LogToReproResp{ 1880 Title: bug.Title, 1881 CrashLog: crashLog, 1882 Type: dashapi.RetryReproLog, 1883 }, nil 1884 } 1885 return nil, nil 1886 } 1887 1888 func saveReproTask(c context.Context, ns, manager string, repro []byte) error { 1889 log, err := putText(c, ns, textCrashLog, repro) 1890 if err != nil { 1891 return err 1892 } 1893 // We don't control the status of each attempt, so let's just try twice. 1894 const attempts = 2 1895 obj := &ReproTask{ 1896 Namespace: ns, 1897 Manager: manager, 1898 Log: log, 1899 AttemptsLeft: attempts, 1900 } 1901 key := db.NewIncompleteKey(c, "ReproTask", nil) 1902 _, err = db.Put(c, key, obj) 1903 return err 1904 } 1905 1906 func takeReproTask(c context.Context, ns, manager string) ([]byte, error) { 1907 var tasks []*ReproTask 1908 keys, err := db.NewQuery("ReproTask"). 1909 Filter("Namespace=", ns). 1910 Filter("Manager=", manager). 1911 Filter("AttemptsLeft>", 0). 1912 GetAll(c, &tasks) 1913 if err != nil || len(keys) == 0 { 1914 return nil, err 1915 } 1916 1917 // Yes, it's possible that the entity will be modified simultaneously, and we 1918 // ideall need a transaction, but let's just ignore this possibility -- in the 1919 // worst case we'd just try to reproduce it once more. 1920 key, task := keys[0], tasks[0] 1921 task.AttemptsLeft-- 1922 task.LastAttempt = timeNow(c) 1923 if _, err := db.Put(c, key, task); err != nil { 1924 return nil, err 1925 } 1926 log, _, err := getText(c, textCrashLog, task.Log) 1927 return log, err 1928 } 1929 1930 func apiCreateUploadURL(c context.Context, payload io.Reader) (interface{}, error) { 1931 bucket := getConfig(c).UploadBucket 1932 if bucket == "" { 1933 return nil, errors.New("not configured") 1934 } 1935 return fmt.Sprintf("%s/%s.upload", bucket, uuid.New().String()), nil 1936 } 1937 1938 func apiSendEmail(c context.Context, payload io.Reader) (interface{}, error) { 1939 req := new(dashapi.SendEmailReq) 1940 if err := json.NewDecoder(payload).Decode(req); err != nil { 1941 return nil, fmt.Errorf("failed to unmarshal request: %w", err) 1942 } 1943 var headers mail.Header 1944 if req.InReplyTo != "" { 1945 headers = mail.Header{"In-Reply-To": []string{req.InReplyTo}} 1946 } 1947 return nil, sendEmail(c, &aemail.Message{ 1948 Sender: req.Sender, 1949 Headers: headers, 1950 To: req.To, 1951 Cc: req.Cc, 1952 Subject: req.Subject, 1953 Body: req.Body, 1954 }) 1955 } 1956 1957 // apiSaveCoverage reads jsonl data from payload and stores it to coveragedb. 1958 // First payload jsonl line is a coveragedb.HistoryRecord (w/o session and time). 1959 // Second+ records are coveragedb.JSONLWrapper. 1960 func apiSaveCoverage(c context.Context, payload io.Reader) (interface{}, error) { 1961 descr := new(coveragedb.HistoryRecord) 1962 jsonDec := json.NewDecoder(payload) 1963 if err := jsonDec.Decode(descr); err != nil { 1964 return 0, fmt.Errorf("json.NewDecoder(coveragedb.HistoryRecord).Decode: %w", err) 1965 } 1966 rowsCreated, err := coveragedb.SaveMergeResult(c, getCoverageDBClient(c), descr, jsonDec) 1967 if err != nil { 1968 log.Errorf(c, "error storing coverage for ns %s, date %s: %v", 1969 descr.Namespace, descr.DateTo.String(), err) 1970 } else { 1971 log.Infof(c, "updated coverage for ns %s, date %s to %d rows", 1972 descr.Namespace, descr.DateTo.String(), descr.TotalRows) 1973 } 1974 return &rowsCreated, err 1975 }