go.chromium.org/luci@v0.0.0-20240309015107-7cdc2e660f33/bisection/server/analyses_test.go (about) 1 // Copyright 2023 The LUCI Authors. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package server 16 17 import ( 18 "context" 19 "fmt" 20 "testing" 21 "time" 22 23 "cloud.google.com/go/bigquery" 24 "go.chromium.org/luci/bisection/internal/lucianalysis" 25 "go.chromium.org/luci/bisection/model" 26 pb "go.chromium.org/luci/bisection/proto/v1" 27 "go.chromium.org/luci/bisection/util/testutil" 28 29 "google.golang.org/grpc/codes" 30 "google.golang.org/grpc/status" 31 "google.golang.org/protobuf/proto" 32 "google.golang.org/protobuf/types/known/timestamppb" 33 34 buildbucketpb "go.chromium.org/luci/buildbucket/proto" 35 "go.chromium.org/luci/gae/impl/memory" 36 "go.chromium.org/luci/gae/service/datastore" 37 "go.chromium.org/luci/server/secrets" 38 39 . "github.com/smartystreets/goconvey/convey" 40 . "go.chromium.org/luci/common/testing/assertions" 41 ) 42 43 func TestQueryAnalysis(t *testing.T) { 44 t.Parallel() 45 server := &AnalysesServer{} 46 c := memory.Use(context.Background()) 47 testutil.UpdateIndices(c) 48 datastore.GetTestable(c).AutoIndex(true) 49 50 Convey("No BuildFailure Info", t, func() { 51 req := &pb.QueryAnalysisRequest{} 52 _, err := server.QueryAnalysis(c, req) 53 So(err, ShouldNotBeNil) 54 So(status.Convert(err).Code(), ShouldEqual, codes.InvalidArgument) 55 }) 56 57 Convey("No bbid", t, func() { 58 req := &pb.QueryAnalysisRequest{BuildFailure: &pb.BuildFailure{}} 59 _, err := server.QueryAnalysis(c, req) 60 So(err, ShouldNotBeNil) 61 So(status.Convert(err).Code(), ShouldEqual, codes.InvalidArgument) 62 }) 63 64 Convey("Unsupported step", t, func() { 65 req := &pb.QueryAnalysisRequest{ 66 BuildFailure: &pb.BuildFailure{ 67 FailedStepName: "some step", 68 Bbid: 123, 69 }, 70 } 71 _, err := server.QueryAnalysis(c, req) 72 So(err, ShouldNotBeNil) 73 So(status.Convert(err).Code(), ShouldEqual, codes.Unimplemented) 74 }) 75 76 Convey("No analysis found", t, func() { 77 req := &pb.QueryAnalysisRequest{ 78 BuildFailure: &pb.BuildFailure{ 79 FailedStepName: "compile", 80 Bbid: 123, 81 }, 82 } 83 _, err := server.QueryAnalysis(c, req) 84 So(err, ShouldNotBeNil) 85 So(status.Convert(err).Code(), ShouldEqual, codes.NotFound) 86 }) 87 88 Convey("Analysis found", t, func() { 89 // Prepares datastore 90 failedBuild := &model.LuciFailedBuild{ 91 Id: 123, 92 LuciBuild: model.LuciBuild{ 93 Project: "chromium/test", 94 Bucket: "ci", 95 Builder: "android", 96 }, 97 BuildFailureType: pb.BuildFailureType_COMPILE, 98 } 99 So(datastore.Put(c, failedBuild), ShouldBeNil) 100 datastore.GetTestable(c).CatchupIndexes() 101 102 compileFailure := &model.CompileFailure{ 103 Id: 123, 104 Build: datastore.KeyForObj(c, failedBuild), 105 } 106 So(datastore.Put(c, compileFailure), ShouldBeNil) 107 datastore.GetTestable(c).CatchupIndexes() 108 109 suspect := &model.Suspect{ 110 GitilesCommit: buildbucketpb.GitilesCommit{ 111 Host: "host1", 112 Project: "proj1", 113 Id: "commit5", 114 }, 115 ReviewUrl: "http://this/is/review/url", 116 ReviewTitle: "This is review title", 117 ActionDetails: model.ActionDetails{ 118 RevertURL: "https://this/is/revert/review/url", 119 IsRevertCreated: true, 120 RevertCreateTime: (×tamppb.Timestamp{Seconds: 100}).AsTime(), 121 IsRevertCommitted: true, 122 RevertCommitTime: (×tamppb.Timestamp{Seconds: 200}).AsTime(), 123 }, 124 VerificationStatus: model.SuspectVerificationStatus_ConfirmedCulprit, 125 Score: 100, 126 Justification: "Justification", 127 } 128 So(datastore.Put(c, suspect), ShouldBeNil) 129 datastore.GetTestable(c).CatchupIndexes() 130 131 compileFailureAnalysis := &model.CompileFailureAnalysis{ 132 CompileFailure: datastore.KeyForObj(c, compileFailure), 133 FirstFailedBuildId: 119, 134 InitialRegressionRange: &pb.RegressionRange{ 135 LastPassed: &buildbucketpb.GitilesCommit{ 136 Host: "host1", 137 Project: "proj1", 138 Ref: "ref", 139 Id: "commit9", 140 }, 141 FirstFailed: &buildbucketpb.GitilesCommit{ 142 Host: "host1", 143 Project: "proj1", 144 Ref: "ref", 145 Id: "commit0", 146 }, 147 }, 148 } 149 So(datastore.Put(c, compileFailureAnalysis), ShouldBeNil) 150 datastore.GetTestable(c).CatchupIndexes() 151 152 heuristicAnalysis := &model.CompileHeuristicAnalysis{ 153 ParentAnalysis: datastore.KeyForObj(c, compileFailureAnalysis), 154 } 155 So(datastore.Put(c, heuristicAnalysis), ShouldBeNil) 156 datastore.GetTestable(c).CatchupIndexes() 157 158 // Create nth section analysis 159 nsa := &model.CompileNthSectionAnalysis{ 160 ParentAnalysis: datastore.KeyForObj(c, compileFailureAnalysis), 161 Status: pb.AnalysisStatus_FOUND, 162 RunStatus: pb.AnalysisRunStatus_ENDED, 163 StartTime: (×tamppb.Timestamp{Seconds: 100}).AsTime(), 164 EndTime: (×tamppb.Timestamp{Seconds: 102}).AsTime(), 165 BlameList: testutil.CreateBlamelist(10), 166 } 167 So(datastore.Put(c, nsa), ShouldBeNil) 168 datastore.GetTestable(c).CatchupIndexes() 169 170 // Create suspect for nthsection 171 nthSectionSuspect := &model.Suspect{ 172 GitilesCommit: buildbucketpb.GitilesCommit{ 173 Host: "host1", 174 Project: "proj1", 175 Id: "commit6", 176 }, 177 ReviewUrl: "http://this/is/review/url1", 178 ReviewTitle: "This is review title1", 179 VerificationStatus: model.SuspectVerificationStatus_Vindicated, 180 ParentAnalysis: datastore.KeyForObj(c, nsa), 181 } 182 So(datastore.Put(c, nthSectionSuspect), ShouldBeNil) 183 datastore.GetTestable(c).CatchupIndexes() 184 185 // Add culprit verification rerun build for suspect 186 suspectRerunBuild := &model.CompileRerunBuild{ 187 Id: 8877665544332211, 188 LuciBuild: model.LuciBuild{ 189 BuildId: 8877665544332211, 190 Project: "chromium", 191 Bucket: "findit", 192 Builder: "luci-bisection-single-revision", 193 CreateTime: (×tamppb.Timestamp{Seconds: 100}).AsTime(), 194 StartTime: (×tamppb.Timestamp{Seconds: 101}).AsTime(), 195 EndTime: (×tamppb.Timestamp{Seconds: 102}).AsTime(), 196 Status: buildbucketpb.Status_FAILURE, 197 GitilesCommit: buildbucketpb.GitilesCommit{ 198 Host: "host1", 199 Project: "proj1", 200 Ref: "ref", 201 Id: "commit5", 202 }, 203 }, 204 } 205 So(datastore.Put(c, suspectRerunBuild), ShouldBeNil) 206 datastore.GetTestable(c).CatchupIndexes() 207 208 // Add culprit verification single rerun for suspect 209 suspectSingleRerun := &model.SingleRerun{ 210 Analysis: datastore.KeyForObj(c, compileFailureAnalysis), 211 Suspect: datastore.KeyForObj(c, suspect), 212 RerunBuild: datastore.KeyForObj(c, suspectRerunBuild), 213 Status: pb.RerunStatus_RERUN_STATUS_FAILED, 214 GitilesCommit: buildbucketpb.GitilesCommit{ 215 Host: "host1", 216 Project: "proj1", 217 Ref: "ref", 218 Id: "commit5", 219 }, 220 CreateTime: (×tamppb.Timestamp{Seconds: 100}).AsTime(), 221 StartTime: (×tamppb.Timestamp{Seconds: 101}).AsTime(), 222 EndTime: (×tamppb.Timestamp{Seconds: 102}).AsTime(), 223 Type: model.RerunBuildType_CulpritVerification, 224 Priority: 100, 225 } 226 So(datastore.Put(c, suspectSingleRerun), ShouldBeNil) 227 228 // Add culprit verification rerun build for parent of suspect 229 parentRerunBuild := &model.CompileRerunBuild{ 230 Id: 7766554433221100, 231 LuciBuild: model.LuciBuild{ 232 BuildId: 7766554433221100, 233 Project: "chromium", 234 Bucket: "findit", 235 Builder: "luci-bisection-single-revision", 236 CreateTime: (×tamppb.Timestamp{Seconds: 200}).AsTime(), 237 StartTime: (×tamppb.Timestamp{Seconds: 201}).AsTime(), 238 EndTime: (×tamppb.Timestamp{Seconds: 202}).AsTime(), 239 Status: buildbucketpb.Status_SUCCESS, 240 GitilesCommit: buildbucketpb.GitilesCommit{ 241 Host: "host1", 242 Project: "proj1", 243 Ref: "ref", 244 Id: "commit6", 245 }, 246 }, 247 } 248 So(datastore.Put(c, parentRerunBuild), ShouldBeNil) 249 datastore.GetTestable(c).CatchupIndexes() 250 251 // Add culprit verification single rerun for parent of suspect 252 parentSingleRerun := &model.SingleRerun{ 253 Analysis: datastore.KeyForObj(c, compileFailureAnalysis), 254 Suspect: datastore.KeyForObj(c, suspect), 255 RerunBuild: datastore.KeyForObj(c, parentRerunBuild), 256 Status: pb.RerunStatus_RERUN_STATUS_PASSED, 257 GitilesCommit: buildbucketpb.GitilesCommit{ 258 Host: "host1", 259 Project: "proj1", 260 Ref: "ref", 261 Id: "commit6", 262 }, 263 CreateTime: (×tamppb.Timestamp{Seconds: 200}).AsTime(), 264 StartTime: (×tamppb.Timestamp{Seconds: 201}).AsTime(), 265 EndTime: (×tamppb.Timestamp{Seconds: 202}).AsTime(), 266 Type: model.RerunBuildType_CulpritVerification, 267 Priority: 100, 268 } 269 So(datastore.Put(c, parentSingleRerun), ShouldBeNil) 270 datastore.GetTestable(c).CatchupIndexes() 271 272 // Update suspect's culprit verification results 273 suspect.ParentAnalysis = datastore.KeyForObj(c, heuristicAnalysis) 274 suspect.VerificationStatus = model.SuspectVerificationStatus_ConfirmedCulprit 275 suspect.SuspectRerunBuild = datastore.KeyForObj(c, suspectRerunBuild) 276 suspect.ParentRerunBuild = datastore.KeyForObj(c, parentRerunBuild) 277 So(datastore.Put(c, suspect), ShouldBeNil) 278 datastore.GetTestable(c).CatchupIndexes() 279 280 compileFailureAnalysis.VerifiedCulprits = []*datastore.Key{ 281 datastore.KeyForObj(c, suspect), 282 } 283 So(datastore.Put(c, compileFailureAnalysis), ShouldBeNil) 284 datastore.GetTestable(c).CatchupIndexes() 285 286 // nth section rerun 287 rrBuild := &model.CompileRerunBuild{ 288 Id: 800999000, 289 } 290 So(datastore.Put(c, rrBuild), ShouldBeNil) 291 datastore.GetTestable(c).CatchupIndexes() 292 293 nthSectionRerun := &model.SingleRerun{ 294 Analysis: datastore.KeyForObj(c, compileFailureAnalysis), 295 NthSectionAnalysis: datastore.KeyForObj(c, nsa), 296 Status: pb.RerunStatus_RERUN_STATUS_PASSED, 297 GitilesCommit: buildbucketpb.GitilesCommit{ 298 Host: "host1", 299 Project: "proj1", 300 Ref: "ref", 301 Id: "commit8", 302 }, 303 RerunBuild: datastore.KeyForObj(c, rrBuild), 304 CreateTime: (×tamppb.Timestamp{Seconds: 300}).AsTime(), 305 StartTime: (×tamppb.Timestamp{Seconds: 301}).AsTime(), 306 EndTime: (×tamppb.Timestamp{Seconds: 302}).AsTime(), 307 Type: model.RerunBuildType_NthSection, 308 Priority: 100, 309 } 310 311 So(datastore.Put(c, nthSectionRerun), ShouldBeNil) 312 datastore.GetTestable(c).CatchupIndexes() 313 314 req := &pb.QueryAnalysisRequest{ 315 BuildFailure: &pb.BuildFailure{ 316 FailedStepName: "compile", 317 Bbid: 123, 318 }, 319 } 320 321 res, err := server.QueryAnalysis(c, req) 322 So(err, ShouldBeNil) 323 So(len(res.Analyses), ShouldEqual, 1) 324 325 analysis := res.Analyses[0] 326 So(analysis.Builder, ShouldResemble, &buildbucketpb.BuilderID{ 327 Project: "chromium/test", 328 Bucket: "ci", 329 Builder: "android", 330 }) 331 So(analysis.BuildFailureType, ShouldEqual, pb.BuildFailureType_COMPILE) 332 So(len(analysis.Culprits), ShouldEqual, 1) 333 So(proto.Equal(analysis.Culprits[0], &pb.Culprit{ 334 Commit: &buildbucketpb.GitilesCommit{ 335 Host: "host1", 336 Project: "proj1", 337 Id: "commit5", 338 }, 339 ReviewUrl: "http://this/is/review/url", 340 ReviewTitle: "This is review title", 341 CulpritAction: []*pb.CulpritAction{ 342 { 343 ActionType: pb.CulpritActionType_CULPRIT_AUTO_REVERTED, 344 RevertClUrl: "https://this/is/revert/review/url", 345 ActionTime: ×tamppb.Timestamp{Seconds: 200}, 346 }, 347 }, 348 VerificationDetails: &pb.SuspectVerificationDetails{ 349 Status: string(model.SuspectVerificationStatus_ConfirmedCulprit), 350 SuspectRerun: &pb.SingleRerun{ 351 Bbid: 8877665544332211, 352 StartTime: ×tamppb.Timestamp{Seconds: 101}, 353 EndTime: ×tamppb.Timestamp{Seconds: 102}, 354 RerunResult: &pb.RerunResult{ 355 RerunStatus: pb.RerunStatus_RERUN_STATUS_FAILED, 356 }, 357 Commit: &buildbucketpb.GitilesCommit{ 358 Host: "host1", 359 Project: "proj1", 360 Ref: "ref", 361 Id: "commit5", 362 }, 363 }, 364 ParentRerun: &pb.SingleRerun{ 365 Bbid: 7766554433221100, 366 StartTime: ×tamppb.Timestamp{Seconds: 201}, 367 EndTime: ×tamppb.Timestamp{Seconds: 202}, 368 RerunResult: &pb.RerunResult{ 369 RerunStatus: pb.RerunStatus_RERUN_STATUS_PASSED, 370 }, 371 Commit: &buildbucketpb.GitilesCommit{ 372 Host: "host1", 373 Project: "proj1", 374 Ref: "ref", 375 Id: "commit6", 376 }, 377 }, 378 }, 379 }), ShouldBeTrue) 380 381 So(len(analysis.HeuristicResult.Suspects), ShouldEqual, 1) 382 So(proto.Equal(analysis.HeuristicResult.Suspects[0], &pb.HeuristicSuspect{ 383 GitilesCommit: &buildbucketpb.GitilesCommit{ 384 Host: "host1", 385 Project: "proj1", 386 Id: "commit5", 387 }, 388 ReviewUrl: "http://this/is/review/url", 389 ReviewTitle: "This is review title", 390 Score: 100, 391 Justification: "Justification", 392 ConfidenceLevel: pb.SuspectConfidenceLevel_HIGH, 393 VerificationDetails: &pb.SuspectVerificationDetails{ 394 Status: string(model.SuspectVerificationStatus_ConfirmedCulprit), 395 SuspectRerun: &pb.SingleRerun{ 396 Bbid: 8877665544332211, 397 StartTime: ×tamppb.Timestamp{Seconds: 101}, 398 EndTime: ×tamppb.Timestamp{Seconds: 102}, 399 RerunResult: &pb.RerunResult{ 400 RerunStatus: pb.RerunStatus_RERUN_STATUS_FAILED, 401 }, 402 Commit: &buildbucketpb.GitilesCommit{ 403 Host: "host1", 404 Project: "proj1", 405 Ref: "ref", 406 Id: "commit5", 407 }, 408 }, 409 ParentRerun: &pb.SingleRerun{ 410 Bbid: 7766554433221100, 411 StartTime: ×tamppb.Timestamp{Seconds: 201}, 412 EndTime: ×tamppb.Timestamp{Seconds: 202}, 413 RerunResult: &pb.RerunResult{ 414 RerunStatus: pb.RerunStatus_RERUN_STATUS_PASSED, 415 }, 416 Commit: &buildbucketpb.GitilesCommit{ 417 Host: "host1", 418 Project: "proj1", 419 Ref: "ref", 420 Id: "commit6", 421 }, 422 }, 423 }, 424 }), ShouldBeTrue) 425 426 nthSectionResult := analysis.NthSectionResult 427 So(nthSectionResult, ShouldNotBeNil) 428 So(proto.Equal(nthSectionResult.StartTime, ×tamppb.Timestamp{Seconds: 100}), ShouldBeTrue) 429 So(proto.Equal(nthSectionResult.EndTime, ×tamppb.Timestamp{Seconds: 102}), ShouldBeTrue) 430 So(nthSectionResult.Status, ShouldEqual, pb.AnalysisStatus_FOUND) 431 So(nthSectionResult.Suspect, ShouldResembleProto, &pb.NthSectionSuspect{ 432 GitilesCommit: &buildbucketpb.GitilesCommit{ 433 Host: "host1", 434 Project: "proj1", 435 Id: "commit6", 436 }, 437 ReviewUrl: "http://this/is/review/url1", 438 ReviewTitle: "This is review title1", 439 VerificationDetails: &pb.SuspectVerificationDetails{ 440 Status: string(model.SuspectVerificationStatus_Vindicated), 441 }, 442 Commit: &buildbucketpb.GitilesCommit{ 443 Host: "host1", 444 Project: "proj1", 445 Id: "commit6", 446 }, 447 }) 448 449 So(nthSectionResult.RemainingNthSectionRange, ShouldResembleProto, &pb.RegressionRange{ 450 LastPassed: &buildbucketpb.GitilesCommit{ 451 Host: "host1", 452 Project: "proj1", 453 Ref: "ref", 454 Id: "commit5", 455 }, 456 FirstFailed: &buildbucketpb.GitilesCommit{ 457 Host: "host1", 458 Project: "proj1", 459 Ref: "ref", 460 Id: "commit5", 461 }, 462 }) 463 464 So(len(nthSectionResult.Reruns), ShouldEqual, 3) 465 466 So(nthSectionResult.Reruns[0], ShouldResembleProto, &pb.SingleRerun{ 467 Bbid: 8877665544332211, 468 StartTime: ×tamppb.Timestamp{Seconds: 101}, 469 EndTime: ×tamppb.Timestamp{Seconds: 102}, 470 RerunResult: &pb.RerunResult{ 471 RerunStatus: pb.RerunStatus_RERUN_STATUS_FAILED, 472 }, 473 Commit: &buildbucketpb.GitilesCommit{ 474 Host: "host1", 475 Project: "proj1", 476 Ref: "ref", 477 Id: "commit5", 478 }, 479 Index: "5", 480 Type: "Culprit Verification", 481 }) 482 483 So(nthSectionResult.Reruns[1], ShouldResembleProto, &pb.SingleRerun{ 484 Bbid: 7766554433221100, 485 StartTime: ×tamppb.Timestamp{Seconds: 201}, 486 EndTime: ×tamppb.Timestamp{Seconds: 202}, 487 RerunResult: &pb.RerunResult{ 488 RerunStatus: pb.RerunStatus_RERUN_STATUS_PASSED, 489 }, 490 Commit: &buildbucketpb.GitilesCommit{ 491 Host: "host1", 492 Project: "proj1", 493 Ref: "ref", 494 Id: "commit6", 495 }, 496 Index: "6", 497 Type: "Culprit Verification", 498 }) 499 500 So(nthSectionResult.Reruns[2], ShouldResembleProto, &pb.SingleRerun{ 501 Bbid: 800999000, 502 StartTime: ×tamppb.Timestamp{Seconds: 301}, 503 EndTime: ×tamppb.Timestamp{Seconds: 302}, 504 RerunResult: &pb.RerunResult{ 505 RerunStatus: pb.RerunStatus_RERUN_STATUS_PASSED, 506 }, 507 Commit: &buildbucketpb.GitilesCommit{ 508 Host: "host1", 509 Project: "proj1", 510 Ref: "ref", 511 Id: "commit8", 512 }, 513 Index: "8", 514 Type: "NthSection", 515 }) 516 517 So(nthSectionResult.BlameList, ShouldResembleProto, nsa.BlameList) 518 }) 519 520 Convey("Analysis found for a similar failure", t, func() { 521 // Prepares datastore 522 basedFailedBuild := &model.LuciFailedBuild{ 523 Id: 122, 524 } 525 So(datastore.Put(c, basedFailedBuild), ShouldBeNil) 526 datastore.GetTestable(c).CatchupIndexes() 527 528 basedCompileFailure := &model.CompileFailure{ 529 Id: 122, 530 Build: datastore.KeyForObj(c, basedFailedBuild), 531 } 532 So(datastore.Put(c, basedCompileFailure), ShouldBeNil) 533 datastore.GetTestable(c).CatchupIndexes() 534 535 failedBuild := &model.LuciFailedBuild{ 536 Id: 123, 537 } 538 So(datastore.Put(c, failedBuild), ShouldBeNil) 539 datastore.GetTestable(c).CatchupIndexes() 540 541 compileFailure := &model.CompileFailure{ 542 Id: 123, 543 Build: datastore.KeyForObj(c, failedBuild), 544 MergedFailureKey: datastore.KeyForObj(c, basedCompileFailure), 545 } 546 So(datastore.Put(c, compileFailure), ShouldBeNil) 547 datastore.GetTestable(c).CatchupIndexes() 548 549 compileFailureAnalysis := &model.CompileFailureAnalysis{ 550 CompileFailure: datastore.KeyForObj(c, basedCompileFailure), 551 } 552 So(datastore.Put(c, compileFailureAnalysis), ShouldBeNil) 553 datastore.GetTestable(c).CatchupIndexes() 554 555 req := &pb.QueryAnalysisRequest{ 556 BuildFailure: &pb.BuildFailure{ 557 FailedStepName: "compile", 558 Bbid: 123, 559 }, 560 } 561 562 res, err := server.QueryAnalysis(c, req) 563 So(err, ShouldBeNil) 564 So(len(res.Analyses), ShouldEqual, 1) 565 }) 566 567 } 568 569 func TestListAnalyses(t *testing.T) { 570 t.Parallel() 571 server := &AnalysesServer{} 572 573 Convey("List existing analyses", t, func() { 574 // Set up context and AEAD so that page tokens can be generated 575 c := memory.Use(context.Background()) 576 c = secrets.GeneratePrimaryTinkAEADForTest(c) 577 578 // Prepares datastore 579 failureAnalysis1 := &model.CompileFailureAnalysis{ 580 Id: 1, 581 CreateTime: (×tamppb.Timestamp{Seconds: 100}).AsTime(), 582 } 583 failureAnalysis2 := &model.CompileFailureAnalysis{ 584 Id: 2, 585 CreateTime: (×tamppb.Timestamp{Seconds: 102}).AsTime(), 586 } 587 failureAnalysis3 := &model.CompileFailureAnalysis{ 588 Id: 3, 589 CreateTime: (×tamppb.Timestamp{Seconds: 101}).AsTime(), 590 } 591 failureAnalysis4 := &model.CompileFailureAnalysis{ 592 Id: 4, 593 CreateTime: (×tamppb.Timestamp{Seconds: 103}).AsTime(), 594 } 595 So(datastore.Put(c, failureAnalysis1), ShouldBeNil) 596 So(datastore.Put(c, failureAnalysis2), ShouldBeNil) 597 So(datastore.Put(c, failureAnalysis3), ShouldBeNil) 598 So(datastore.Put(c, failureAnalysis4), ShouldBeNil) 599 datastore.GetTestable(c).CatchupIndexes() 600 601 Convey("Invalid page size", func() { 602 req := &pb.ListAnalysesRequest{ 603 PageSize: -5, 604 } 605 _, err := server.ListAnalyses(c, req) 606 So(err, ShouldNotBeNil) 607 So(status.Convert(err).Code(), ShouldEqual, codes.InvalidArgument) 608 }) 609 610 Convey("Specifying page size is optional", func() { 611 req := &pb.ListAnalysesRequest{} 612 res, err := server.ListAnalyses(c, req) 613 So(err, ShouldBeNil) 614 So(len(res.Analyses), ShouldEqual, 4) 615 616 Convey("Next page token is empty if there are no more analyses", func() { 617 So(res.NextPageToken, ShouldEqual, "") 618 }) 619 }) 620 621 Convey("Response is limited by the page size", func() { 622 req := &pb.ListAnalysesRequest{ 623 PageSize: 3, 624 } 625 res, err := server.ListAnalyses(c, req) 626 So(err, ShouldBeNil) 627 So(len(res.Analyses), ShouldEqual, req.PageSize) 628 So(res.NextPageToken, ShouldNotEqual, "") 629 630 Convey("Returned analyses are sorted correctly", func() { 631 So(res.Analyses[0].AnalysisId, ShouldEqual, 4) 632 So(res.Analyses[1].AnalysisId, ShouldEqual, 2) 633 So(res.Analyses[2].AnalysisId, ShouldEqual, 3) 634 635 Convey("Page token will get the next page of analyses", func() { 636 req = &pb.ListAnalysesRequest{ 637 PageSize: 3, 638 PageToken: res.NextPageToken, 639 } 640 res, err = server.ListAnalyses(c, req) 641 So(err, ShouldBeNil) 642 So(len(res.Analyses), ShouldEqual, 1) 643 So(res.Analyses[0].AnalysisId, ShouldEqual, 1) 644 }) 645 }) 646 }) 647 }) 648 } 649 650 func TestListTestAnalyses(t *testing.T) { 651 t.Parallel() 652 server := &AnalysesServer{} 653 654 Convey("List existing analyses", t, func() { 655 // Set up context and AEAD so that page tokens can be generated 656 ctx := memory.Use(context.Background()) 657 ctx = secrets.GeneratePrimaryTinkAEADForTest(ctx) 658 testutil.UpdateIndices(ctx) 659 660 // Prepares datastore 661 for i := 1; i < 5; i++ { 662 tfa := testutil.CreateTestFailureAnalysis(ctx, &testutil.TestFailureAnalysisCreationOption{ 663 ID: int64(i), 664 CreateTime: time.Unix(int64(100+i), 0).UTC(), 665 TestFailureKey: datastore.MakeKey(ctx, "TestFailure", i), 666 }) 667 testutil.CreateTestFailure(ctx, &testutil.TestFailureCreationOption{ 668 ID: int64(i), 669 Analysis: tfa, 670 IsPrimary: true, 671 }) 672 } 673 674 Convey("Empty project", func() { 675 req := &pb.ListTestAnalysesRequest{ 676 PageSize: 3, 677 } 678 _, err := server.ListTestAnalyses(ctx, req) 679 So(err, ShouldNotBeNil) 680 So(status.Convert(err).Code(), ShouldEqual, codes.InvalidArgument) 681 }) 682 683 Convey("Invalid page size", func() { 684 req := &pb.ListTestAnalysesRequest{ 685 Project: "chromium", 686 PageSize: -5, 687 } 688 _, err := server.ListTestAnalyses(ctx, req) 689 So(err, ShouldNotBeNil) 690 So(status.Convert(err).Code(), ShouldEqual, codes.InvalidArgument) 691 }) 692 693 Convey("Specifying page size is optional", func() { 694 req := &pb.ListTestAnalysesRequest{ 695 Project: "chromium", 696 } 697 res, err := server.ListTestAnalyses(ctx, req) 698 So(err, ShouldBeNil) 699 So(len(res.Analyses), ShouldEqual, 4) 700 So(res.NextPageToken, ShouldEqual, "") 701 }) 702 703 Convey("Response is limited by the page size", func() { 704 req := &pb.ListTestAnalysesRequest{ 705 Project: "chromium", 706 PageSize: 3, 707 } 708 res, err := server.ListTestAnalyses(ctx, req) 709 So(err, ShouldBeNil) 710 So(len(res.Analyses), ShouldEqual, req.PageSize) 711 So(res.NextPageToken, ShouldNotEqual, "") 712 So(res.Analyses[0].AnalysisId, ShouldEqual, 4) 713 So(res.Analyses[1].AnalysisId, ShouldEqual, 3) 714 So(res.Analyses[2].AnalysisId, ShouldEqual, 2) 715 716 // Next page. 717 req = &pb.ListTestAnalysesRequest{ 718 Project: "chromium", 719 PageSize: 3, 720 PageToken: res.NextPageToken, 721 } 722 res, err = server.ListTestAnalyses(ctx, req) 723 So(err, ShouldBeNil) 724 So(len(res.Analyses), ShouldEqual, 1) 725 So(res.Analyses[0].AnalysisId, ShouldEqual, 1) 726 }) 727 }) 728 } 729 730 func TestGetTestAnalyses(t *testing.T) { 731 t.Parallel() 732 server := &AnalysesServer{} 733 734 Convey("List existing analyses", t, func() { 735 // Set up context and AEAD so that page tokens can be generated 736 ctx := memory.Use(context.Background()) 737 ctx = secrets.GeneratePrimaryTinkAEADForTest(ctx) 738 testutil.UpdateIndices(ctx) 739 740 tfa := testutil.CreateTestFailureAnalysis(ctx, &testutil.TestFailureAnalysisCreationOption{ 741 ID: int64(100), 742 CreateTime: time.Unix(int64(100), 0).UTC(), 743 TestFailureKey: datastore.MakeKey(ctx, "TestFailure", 100), 744 }) 745 testutil.CreateTestFailure(ctx, &testutil.TestFailureCreationOption{ 746 ID: int64(100), 747 Analysis: tfa, 748 IsPrimary: true, 749 TestID: "testID", 750 StartHour: time.Unix(int64(100), 0).UTC(), 751 }) 752 753 Convey("Not found", func() { 754 req := &pb.GetTestAnalysisRequest{ 755 AnalysisId: 101, 756 } 757 _, err := server.GetTestAnalysis(ctx, req) 758 So(err, ShouldNotBeNil) 759 So(status.Convert(err).Code(), ShouldEqual, codes.NotFound) 760 }) 761 762 Convey("Found", func() { 763 req := &pb.GetTestAnalysisRequest{ 764 AnalysisId: 100, 765 } 766 res, err := server.GetTestAnalysis(ctx, req) 767 So(err, ShouldBeNil) 768 So(res, ShouldResembleProto, &pb.TestAnalysis{ 769 AnalysisId: 100, 770 Builder: &buildbucketpb.BuilderID{ 771 Project: "chromium", 772 Bucket: "bucket", 773 Builder: "builder", 774 }, 775 SampleBbid: 8000, 776 CreatedTime: timestamppb.New(time.Unix(100, 0).UTC()), 777 StartCommit: &buildbucketpb.GitilesCommit{}, 778 EndCommit: &buildbucketpb.GitilesCommit{}, 779 TestFailures: []*pb.TestFailure{ 780 { 781 TestId: "testID", 782 IsPrimary: true, 783 Variant: &pb.Variant{}, 784 StartHour: timestamppb.New(time.Unix(100, 0).UTC()), 785 }, 786 }, 787 }) 788 }) 789 }) 790 } 791 792 func TestBatchGetTestAnalyses(t *testing.T) { 793 t.Parallel() 794 analysisClient := &fakeLUCIAnalysisClient{} 795 server := &AnalysesServer{AnalysisClient: analysisClient} 796 Convey("invalid request", t, func() { 797 ctx := memory.Use(context.Background()) 798 testutil.UpdateIndices(ctx) 799 Convey("missing project", func() { 800 req := &pb.BatchGetTestAnalysesRequest{ 801 TestFailures: []*pb.BatchGetTestAnalysesRequest_TestFailureIdentifier{}, 802 } 803 _, err := server.BatchGetTestAnalyses(ctx, req) 804 So(err, ShouldNotBeNil) 805 So(status.Code(err), ShouldEqual, codes.InvalidArgument) 806 So(err.Error(), ShouldContainSubstring, "project: unspecified") 807 }) 808 809 Convey("missing test failures", func() { 810 req := &pb.BatchGetTestAnalysesRequest{ 811 Project: "chromium", 812 TestFailures: []*pb.BatchGetTestAnalysesRequest_TestFailureIdentifier{}, 813 } 814 _, err := server.BatchGetTestAnalyses(ctx, req) 815 So(err, ShouldNotBeNil) 816 So(status.Code(err), ShouldEqual, codes.InvalidArgument) 817 So(err.Error(), ShouldContainSubstring, "test_failures: unspecified") 818 }) 819 820 Convey("missing test id", func() { 821 req := &pb.BatchGetTestAnalysesRequest{ 822 Project: "chromium", 823 TestFailures: []*pb.BatchGetTestAnalysesRequest_TestFailureIdentifier{{}}, 824 } 825 _, err := server.BatchGetTestAnalyses(ctx, req) 826 So(err, ShouldNotBeNil) 827 So(status.Code(err), ShouldEqual, codes.InvalidArgument) 828 So(err.Error(), ShouldContainSubstring, "test_variants[0]: test_id: unspecified") 829 }) 830 831 Convey("invalid variant hash", func() { 832 req := &pb.BatchGetTestAnalysesRequest{ 833 Project: "chromium", 834 TestFailures: []*pb.BatchGetTestAnalysesRequest_TestFailureIdentifier{{ 835 TestId: "testid", 836 VariantHash: "randomstring", 837 RefHash: "randomstring", 838 }}, 839 } 840 _, err := server.BatchGetTestAnalyses(ctx, req) 841 So(err, ShouldNotBeNil) 842 So(status.Code(err), ShouldEqual, codes.InvalidArgument) 843 So(err.Error(), ShouldContainSubstring, "test_variants[0].variant_hash") 844 }) 845 }) 846 847 Convey("valid request", t, func() { 848 ctx := memory.Use(context.Background()) 849 testutil.UpdateIndices(ctx) 850 851 testFailureInRequest := []*pb.BatchGetTestAnalysesRequest_TestFailureIdentifier{} 852 testVerdictKeys := []lucianalysis.TestVerdictKey{} 853 for i := 1; i < 6; i++ { 854 tfa := testutil.CreateTestFailureAnalysis(ctx, &testutil.TestFailureAnalysisCreationOption{ 855 ID: int64(100 + i), 856 CreateTime: time.Unix(int64(100), 0).UTC(), 857 TestFailureKey: datastore.MakeKey(ctx, "TestFailure", 100+i), 858 }) 859 // Create the most recent test failure. 860 testutil.CreateTestFailure(ctx, &testutil.TestFailureCreationOption{ 861 ID: int64(100 + i), 862 Analysis: tfa, 863 TestID: "testid" + fmt.Sprint(i), 864 VariantHash: "aaaaaaaaaaaaaaa" + fmt.Sprint(i), 865 RefHash: "bbbbbbbbbbbbbbb" + fmt.Sprint(i), 866 StartPosition: 101, 867 EndPosition: 110, 868 IsPrimary: true, 869 StartHour: time.Unix(int64(99), 0).UTC(), 870 IsDiverged: i == 5, // TestFailure 105 is diverged. 871 }) 872 // Create another less recent test failure. 873 testutil.CreateTestFailure(ctx, &testutil.TestFailureCreationOption{ 874 ID: int64(1000 + i), 875 TestID: "testid" + fmt.Sprint(i), 876 VariantHash: "aaaaaaaaaaaaaaa" + fmt.Sprint(i), 877 RefHash: "bbbbbbbbbbbbbbb" + fmt.Sprint(i), 878 StartPosition: 90, 879 EndPosition: 99, 880 IsPrimary: true, 881 }) 882 testFailureInRequest = append(testFailureInRequest, &pb.BatchGetTestAnalysesRequest_TestFailureIdentifier{ 883 TestId: "testid" + fmt.Sprint(i), 884 VariantHash: "aaaaaaaaaaaaaaa" + fmt.Sprint(i), 885 RefHash: "bbbbbbbbbbbbbbb" + fmt.Sprint(i), 886 }) 887 testVerdictKeys = append(testVerdictKeys, lucianalysis.TestVerdictKey{ 888 TestID: "testid" + fmt.Sprint(i), 889 VariantHash: "aaaaaaaaaaaaaaa" + fmt.Sprint(i), 890 RefHash: "bbbbbbbbbbbbbbb" + fmt.Sprint(i), 891 }) 892 } 893 894 Convey("request with multiple test failures", func() { 895 req := &pb.BatchGetTestAnalysesRequest{ 896 Project: "chromium", 897 TestFailures: testFailureInRequest, 898 } 899 segments := [][]*lucianalysis.Segment{ 900 nil, // No changepoint data -> return nil. 901 {{ 902 StartPosition: bigquery.NullInt64{Int64: 1, Valid: true}, 903 EndPosition: bigquery.NullInt64{Int64: 100, Valid: true}, 904 CountTotalResults: bigquery.NullInt64{Int64: 1, Valid: true}, 905 CountUnexpectedResults: bigquery.NullInt64{Int64: 1, Valid: true}, 906 }}, // One segment -> return nil. 907 {{ 908 StartPosition: bigquery.NullInt64{Int64: 111, Valid: true}, 909 EndPosition: bigquery.NullInt64{Int64: 200, Valid: true}, 910 CountTotalResults: bigquery.NullInt64{Int64: 1, Valid: true}, 911 CountUnexpectedResults: bigquery.NullInt64{Int64: 1, Valid: true}, 912 }, 913 { 914 StartPosition: bigquery.NullInt64{Int64: 1, Valid: true}, 915 EndPosition: bigquery.NullInt64{Int64: 105, Valid: true}, 916 CountTotalResults: bigquery.NullInt64{Int64: 1, Valid: true}, 917 CountUnexpectedResults: bigquery.NullInt64{Int64: 0, Valid: true}, 918 }}, // Two segment, failure not ongoing, regression range (105,111]-> return nil. 919 {{ 920 StartPosition: bigquery.NullInt64{Int64: 109, Valid: true}, 921 EndPosition: bigquery.NullInt64{Int64: 200, Valid: true}, 922 CountTotalResults: bigquery.NullInt64{Int64: 1, Valid: true}, 923 CountUnexpectedResults: bigquery.NullInt64{Int64: 1, Valid: true}, 924 }, 925 { 926 StartPosition: bigquery.NullInt64{Int64: 1, Valid: true}, 927 EndPosition: bigquery.NullInt64{Int64: 101, Valid: true}, 928 CountTotalResults: bigquery.NullInt64{Int64: 1, Valid: true}, 929 CountUnexpectedResults: bigquery.NullInt64{Int64: 0, Valid: true}, 930 }}, // Two segment, failure is ongoing, regression range (101,109] -> return test analysis 104. 931 {{ 932 StartPosition: bigquery.NullInt64{Int64: 109, Valid: true}, 933 EndPosition: bigquery.NullInt64{Int64: 200, Valid: true}, 934 CountTotalResults: bigquery.NullInt64{Int64: 1, Valid: true}, 935 CountUnexpectedResults: bigquery.NullInt64{Int64: 1, Valid: true}, 936 }, 937 { 938 StartPosition: bigquery.NullInt64{Int64: 1, Valid: true}, 939 EndPosition: bigquery.NullInt64{Int64: 101, Valid: true}, 940 CountTotalResults: bigquery.NullInt64{Int64: 1, Valid: true}, 941 CountUnexpectedResults: bigquery.NullInt64{Int64: 0, Valid: true}, 942 }}, // Two segment, failure is ongoing, regression range (101,109] -> not return test analysis because test failure is diverged. 943 } 944 analysisClient.ChangepointAnalysisForTestVariantResponse = makeChangepointAnalysisForTestVariantResponse(testVerdictKeys, segments) 945 946 resp, err := server.BatchGetTestAnalyses(ctx, req) 947 So(err, ShouldBeNil) 948 So(resp, ShouldResembleProto, &pb.BatchGetTestAnalysesResponse{ 949 TestAnalyses: []*pb.TestAnalysis{nil, nil, nil, { 950 AnalysisId: 104, 951 Builder: &buildbucketpb.BuilderID{Project: "chromium", Bucket: "bucket", Builder: "builder"}, 952 CreatedTime: timestamppb.New(time.Unix(int64(100), 0).UTC()), 953 EndCommit: &buildbucketpb.GitilesCommit{Position: 110}, 954 SampleBbid: 8000, 955 StartCommit: &buildbucketpb.GitilesCommit{Position: 101}, 956 TestFailures: []*pb.TestFailure{ 957 { 958 TestId: "testid4", 959 VariantHash: "aaaaaaaaaaaaaaa4", 960 RefHash: "bbbbbbbbbbbbbbb4", 961 Variant: &pb.Variant{}, 962 IsDiverged: false, 963 IsPrimary: true, 964 StartHour: timestamppb.New(time.Unix(int64(99), 0).UTC()), 965 }, 966 }, 967 }, nil}, 968 }) 969 }) 970 }) 971 } 972 973 type fakeLUCIAnalysisClient struct { 974 ChangepointAnalysisForTestVariantResponse map[lucianalysis.TestVerdictKey]*lucianalysis.ChangepointResult 975 } 976 977 func (cl *fakeLUCIAnalysisClient) ChangepointAnalysisForTestVariant(ctx context.Context, project string, keys []lucianalysis.TestVerdictKey) (map[lucianalysis.TestVerdictKey]*lucianalysis.ChangepointResult, error) { 978 return cl.ChangepointAnalysisForTestVariantResponse, nil 979 } 980 981 func makeChangepointAnalysisForTestVariantResponse(keys []lucianalysis.TestVerdictKey, segments [][]*lucianalysis.Segment) map[lucianalysis.TestVerdictKey]*lucianalysis.ChangepointResult { 982 results := map[lucianalysis.TestVerdictKey]*lucianalysis.ChangepointResult{} 983 for i, key := range keys { 984 results[key] = &lucianalysis.ChangepointResult{ 985 TestID: key.TestID, 986 VariantHash: key.VariantHash, 987 RefHash: key.RefHash, 988 Segments: segments[i], 989 } 990 } 991 return results 992 }