go.chromium.org/luci@v0.0.0-20240309015107-7cdc2e660f33/analysis/rpc/test_variant_branches.go (about) 1 // Copyright 2023 The LUCI Authors. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package rpc 16 17 import ( 18 "context" 19 "encoding/hex" 20 "fmt" 21 "strconv" 22 "time" 23 24 "google.golang.org/grpc/codes" 25 "google.golang.org/protobuf/types/known/anypb" 26 "google.golang.org/protobuf/types/known/durationpb" 27 "google.golang.org/protobuf/types/known/timestamppb" 28 29 "go.chromium.org/luci/common/errors" 30 gitilespb "go.chromium.org/luci/common/proto/gitiles" 31 "go.chromium.org/luci/grpc/appstatus" 32 rdbpbutil "go.chromium.org/luci/resultdb/pbutil" 33 "go.chromium.org/luci/server/span" 34 35 "go.chromium.org/luci/analysis/internal/changepoints" 36 "go.chromium.org/luci/analysis/internal/changepoints/inputbuffer" 37 cpb "go.chromium.org/luci/analysis/internal/changepoints/proto" 38 "go.chromium.org/luci/analysis/internal/changepoints/testvariantbranch" 39 "go.chromium.org/luci/analysis/internal/gitiles" 40 "go.chromium.org/luci/analysis/internal/pagination" 41 "go.chromium.org/luci/analysis/internal/perms" 42 "go.chromium.org/luci/analysis/internal/testverdicts" 43 "go.chromium.org/luci/analysis/pbutil" 44 pb "go.chromium.org/luci/analysis/proto/v1" 45 ) 46 47 type TestVerdictClient interface { 48 ReadTestVerdictsPerSourcePosition(ctx context.Context, options testverdicts.ReadTestVerdictsPerSourcePositionOptions) ([]*testverdicts.CommitWithVerdicts, error) 49 } 50 51 // NewTestVariantBranchesServer returns a new pb.TestVariantBranchesServer. 52 func NewTestVariantBranchesServer(tvc TestVerdictClient) pb.TestVariantBranchesServer { 53 return &pb.DecoratedTestVariantBranches{ 54 Prelude: checkAllowedPrelude, 55 Service: &testVariantBranchesServer{testVerdictClient: tvc}, 56 Postlude: gRPCifyAndLogPostlude, 57 } 58 } 59 60 // testVariantBranchesServer implements pb.TestVariantAnalysesServer. 61 type testVariantBranchesServer struct { 62 testVerdictClient TestVerdictClient 63 } 64 65 // Get fetches Spanner for test variant analysis. 66 func (*testVariantBranchesServer) GetRaw(ctx context.Context, req *pb.GetRawTestVariantBranchRequest) (*pb.TestVariantBranchRaw, error) { 67 // Currently, we only allow LUCI Analysis admins to use this API. 68 // In the future, if this end point is used for the UI, we should 69 // have proper ACL check. 70 if err := checkAllowed(ctx, luciAnalysisAdminGroup); err != nil { 71 return nil, err 72 } 73 tvbk, err := validateGetRawTestVariantBranchRequest(req) 74 if err != nil { 75 return nil, invalidArgumentError(err) 76 } 77 78 txn, cancel := span.ReadOnlyTransaction(ctx) 79 defer cancel() 80 tvbs, err := testvariantbranch.Read(txn, []testvariantbranch.Key{tvbk}) 81 if err != nil { 82 return nil, errors.Annotate(err, "read test variant branch").Err() 83 } 84 // Should not happen. 85 if len(tvbs) != 1 { 86 return nil, fmt.Errorf("expected to find only 1 test variant branch. Got %d", len(tvbs)) 87 } 88 // Not found. 89 if tvbs[0] == nil { 90 return nil, appstatus.Error(codes.NotFound, "analysis not found") 91 } 92 // Convert to proto. 93 analysis, err := toTestVariantBranchRawProto(tvbs[0]) 94 if err != nil { 95 return nil, errors.Annotate(err, "build proto").Err() 96 } 97 return analysis, nil 98 } 99 100 // BatchGet returns current state of segments for test variant branches. 101 func (*testVariantBranchesServer) BatchGet(ctx context.Context, req *pb.BatchGetTestVariantBranchRequest) (*pb.BatchGetTestVariantBranchResponse, error) { 102 // Currently, we only allow Googlers to use this API. 103 // TODO: implement proper ACL check with realms. 104 if err := checkAllowed(ctx, googlerOnlyGroup); err != nil { 105 return nil, err 106 } 107 if err := validateBatchGetTestVariantBranchRequest(req); err != nil { 108 return nil, invalidArgumentError(err) 109 } 110 keys := make([]testvariantbranch.Key, 0, len(req.Names)) 111 for _, name := range req.Names { 112 project, testID, variantHash, refHash, err := parseTestVariantBranchName(name) 113 if err != nil { 114 return nil, err 115 } 116 refHashBytes, err := hex.DecodeString(refHash) 117 if err != nil { 118 // This line is unreachable as ref hash should be validated already. 119 return nil, errors.Reason("ref hash must be an encoded hexadecimal string").Err() 120 } 121 keys = append(keys, testvariantbranch.Key{ 122 Project: project, 123 TestID: testID, 124 VariantHash: variantHash, 125 RefHash: testvariantbranch.RefHash(refHashBytes), 126 }) 127 } 128 129 txn, cancel := span.ReadOnlyTransaction(ctx) 130 defer cancel() 131 tvbs, err := testvariantbranch.Read(txn, keys) 132 if err != nil { 133 return nil, errors.Annotate(err, "read test variant branch").Err() 134 } 135 tvbpbs := make([]*pb.TestVariantBranch, 0, len(req.Names)) 136 var analysis changepoints.Analyzer 137 for _, tvb := range tvbs { 138 if tvb == nil { 139 tvbpbs = append(tvbpbs, nil) 140 continue 141 } 142 refHash := hex.EncodeToString(tvb.RefHash) 143 tvbpbs = append(tvbpbs, &pb.TestVariantBranch{ 144 Name: testVariantBranchName(tvb.Project, tvb.TestID, tvb.VariantHash, refHash), 145 Project: tvb.Project, 146 TestId: tvb.TestID, 147 VariantHash: tvb.VariantHash, 148 RefHash: refHash, 149 Variant: tvb.Variant, 150 Ref: tvb.SourceRef, 151 Segments: toSegmentsProto(tvb, analysis), 152 }) 153 } 154 return &pb.BatchGetTestVariantBranchResponse{ 155 TestVariantBranches: tvbpbs, 156 }, nil 157 } 158 159 func validateGetRawTestVariantBranchRequest(req *pb.GetRawTestVariantBranchRequest) (testvariantbranch.Key, error) { 160 project, testID, variantHash, refHash, err := parseTestVariantBranchName(req.Name) 161 if err != nil { 162 return testvariantbranch.Key{}, errors.Annotate(err, "name").Err() 163 } 164 165 // Check ref hash. 166 refHashBytes, err := hex.DecodeString(refHash) 167 if err != nil { 168 return testvariantbranch.Key{}, errors.Reason("ref component must be an encoded hexadecimal string").Err() 169 } 170 return testvariantbranch.Key{ 171 Project: project, 172 TestID: testID, 173 VariantHash: variantHash, 174 RefHash: testvariantbranch.RefHash(refHashBytes), 175 }, nil 176 } 177 178 func toTestVariantBranchRawProto(tvb *testvariantbranch.Entry) (*pb.TestVariantBranchRaw, error) { 179 var finalizedSegments *anypb.Any 180 var finalizingSegment *anypb.Any 181 var statistics *anypb.Any 182 183 // Hide the internal Spanner proto from our clients, as they 184 // must not depend on it. If this API is used for anything 185 // other than debug purposes in future, we will need to define 186 // a wire proto and use it instead. 187 if tvb.FinalizedSegments.GetSegments() != nil { 188 var err error 189 finalizedSegments, err = anypb.New(tvb.FinalizedSegments) 190 if err != nil { 191 return nil, err 192 } 193 } 194 if tvb.FinalizingSegment != nil { 195 var err error 196 finalizingSegment, err = anypb.New(tvb.FinalizingSegment) 197 if err != nil { 198 return nil, err 199 } 200 } 201 if tvb.Statistics != nil { 202 var err error 203 statistics, err = anypb.New(tvb.Statistics) 204 if err != nil { 205 return nil, err 206 } 207 } 208 209 refHash := hex.EncodeToString(tvb.RefHash) 210 result := &pb.TestVariantBranchRaw{ 211 Name: testVariantBranchName(tvb.Project, tvb.TestID, tvb.VariantHash, refHash), 212 Project: tvb.Project, 213 TestId: tvb.TestID, 214 VariantHash: tvb.VariantHash, 215 RefHash: refHash, 216 Variant: tvb.Variant, 217 Ref: tvb.SourceRef, 218 FinalizedSegments: finalizedSegments, 219 FinalizingSegment: finalizingSegment, 220 Statistics: statistics, 221 HotBuffer: toInputBufferProto(tvb.InputBuffer.HotBuffer), 222 ColdBuffer: toInputBufferProto(tvb.InputBuffer.ColdBuffer), 223 } 224 return result, nil 225 } 226 227 func toInputBufferProto(history inputbuffer.History) *pb.InputBuffer { 228 result := &pb.InputBuffer{ 229 Length: int64(len(history.Verdicts)), 230 Verdicts: []*pb.PositionVerdict{}, 231 } 232 for _, verdict := range history.Verdicts { 233 pv := &pb.PositionVerdict{ 234 CommitPosition: int64(verdict.CommitPosition), 235 Hour: timestamppb.New(verdict.Hour), 236 Runs: []*pb.PositionVerdict_Run{}, 237 } 238 if verdict.IsSimpleExpectedPass { 239 pv.Runs = []*pb.PositionVerdict_Run{ 240 { 241 ExpectedPassCount: 1, 242 }, 243 } 244 } else { 245 pv.IsExonerated = verdict.Details.IsExonerated 246 for _, r := range verdict.Details.Runs { 247 pv.Runs = append(pv.Runs, &pb.PositionVerdict_Run{ 248 ExpectedPassCount: int64(r.Expected.PassCount), 249 ExpectedFailCount: int64(r.Expected.FailCount), 250 ExpectedCrashCount: int64(r.Expected.CrashCount), 251 ExpectedAbortCount: int64(r.Expected.AbortCount), 252 UnexpectedPassCount: int64(r.Unexpected.PassCount), 253 UnexpectedFailCount: int64(r.Unexpected.FailCount), 254 UnexpectedCrashCount: int64(r.Unexpected.CrashCount), 255 UnexpectedAbortCount: int64(r.Unexpected.AbortCount), 256 IsDuplicate: r.IsDuplicate, 257 }) 258 } 259 } 260 result.Verdicts = append(result.Verdicts, pv) 261 } 262 return result 263 } 264 265 // toSegmentsProto returns the proto segments. 266 // The segments returned will be sorted, with the most recent segment 267 // comes first. 268 func toSegmentsProto(tvb *testvariantbranch.Entry, analysis changepoints.Analyzer) []*pb.Segment { 269 // Run analysis to get segments from the input buffer. 270 inputSegments := analysis.Run(tvb) 271 results := []*pb.Segment{} 272 273 // The index where the active segments starts. 274 // If there is a finalizing segment, then the we need to first combine it will 275 // the first segment from the input buffer. 276 activeStartIndex := 0 277 if tvb.FinalizingSegment != nil { 278 activeStartIndex = 1 279 } 280 281 // Add the active segments. 282 for i := len(inputSegments) - 1; i >= activeStartIndex; i-- { 283 inputSegment := inputSegments[i] 284 bqSegment := inputSegmentToBQSegment(inputSegment) 285 results = append(results, bqSegment) 286 } 287 288 // Add the finalizing segment. 289 if tvb.FinalizingSegment != nil { 290 bqSegment := combineSegment(tvb.FinalizingSegment, inputSegments[0]) 291 results = append(results, bqSegment) 292 } 293 294 // Add the finalized segments. 295 if tvb.FinalizedSegments != nil { 296 // More recent segments are on the back. 297 for i := len(tvb.FinalizedSegments.Segments) - 1; i >= 0; i-- { 298 segment := tvb.FinalizedSegments.Segments[i] 299 bqSegment := segmentToBQSegment(segment) 300 results = append(results, bqSegment) 301 } 302 } 303 304 return results 305 } 306 307 // combineSegment constructs a finalizing segment from its finalized part in 308 // the output buffer and its unfinalized part in the input buffer. 309 func combineSegment(finalizingSegment *cpb.Segment, inputSegment *inputbuffer.Segment) *pb.Segment { 310 return &pb.Segment{ 311 HasStartChangepoint: finalizingSegment.HasStartChangepoint, 312 StartPosition: finalizingSegment.StartPosition, 313 StartHour: timestamppb.New(finalizingSegment.StartHour.AsTime()), 314 StartPositionLowerBound_99Th: finalizingSegment.StartPositionLowerBound_99Th, 315 StartPositionUpperBound_99Th: finalizingSegment.StartPositionUpperBound_99Th, 316 EndPosition: inputSegment.EndPosition, 317 EndHour: timestamppb.New(inputSegment.EndHour.AsTime()), 318 Counts: countsToBQCounts(testvariantbranch.AddCounts(finalizingSegment.FinalizedCounts, inputSegment.Counts)), 319 } 320 } 321 322 func inputSegmentToBQSegment(segment *inputbuffer.Segment) *pb.Segment { 323 return &pb.Segment{ 324 HasStartChangepoint: segment.HasStartChangepoint, 325 StartPosition: segment.StartPosition, 326 StartPositionLowerBound_99Th: segment.StartPositionLowerBound99Th, 327 StartPositionUpperBound_99Th: segment.StartPositionUpperBound99Th, 328 StartHour: timestamppb.New(segment.StartHour.AsTime()), 329 EndPosition: segment.EndPosition, 330 EndHour: timestamppb.New(segment.EndHour.AsTime()), 331 Counts: countsToBQCounts(segment.Counts), 332 } 333 } 334 335 func segmentToBQSegment(segment *cpb.Segment) *pb.Segment { 336 return &pb.Segment{ 337 HasStartChangepoint: segment.HasStartChangepoint, 338 StartPosition: segment.StartPosition, 339 StartPositionLowerBound_99Th: segment.StartPositionLowerBound_99Th, 340 StartPositionUpperBound_99Th: segment.StartPositionUpperBound_99Th, 341 StartHour: timestamppb.New(segment.StartHour.AsTime()), 342 EndPosition: segment.EndPosition, 343 EndHour: timestamppb.New(segment.EndHour.AsTime()), 344 Counts: countsToBQCounts(segment.FinalizedCounts), 345 } 346 } 347 348 func countsToBQCounts(counts *cpb.Counts) *pb.Segment_Counts { 349 return &pb.Segment_Counts{ 350 TotalVerdicts: int32(counts.TotalVerdicts), 351 UnexpectedVerdicts: int32(counts.UnexpectedVerdicts), 352 FlakyVerdicts: int32(counts.FlakyVerdicts), 353 } 354 } 355 356 func validateBatchGetTestVariantBranchRequest(req *pb.BatchGetTestVariantBranchRequest) error { 357 // MaxTestVariantBranch is the maximum number of test variant branches to be queried in one request. 358 const MaxTestVariantBranch = 100 359 360 if len(req.Names) == 0 { 361 return errors.Reason("names: unspecified").Err() 362 } 363 if len(req.Names) > MaxTestVariantBranch { 364 return errors.Reason("names: no more than %v may be queried at a time", MaxTestVariantBranch).Err() 365 } 366 for _, name := range req.Names { 367 if _, _, _, _, err := parseTestVariantBranchName(name); err != nil { 368 return errors.Annotate(err, "name %s", name).Err() 369 } 370 } 371 return nil 372 } 373 374 // QuerySourcePositions returns commits and the test verdicts at these commits, starting from a source position. 375 func (s *testVariantBranchesServer) QuerySourcePositions(ctx context.Context, req *pb.QuerySourcePositionsRequest) (*pb.QuerySourcePositionsResponse, error) { 376 allowedRealms, err := perms.QueryRealmsNonEmpty(ctx, req.Project, nil, perms.ListTestResultsAndExonerations...) 377 if err != nil { 378 return nil, err 379 } 380 if err := validateQuerySourcePositionsRequest(req); err != nil { 381 return nil, invalidArgumentError(err) 382 } 383 pageSize := int64(pageSizeLimiter.Adjust(req.PageSize)) 384 startPosition := req.StartSourcePosition 385 if req.PageToken != "" { 386 startPosition, err = parseQuerySourcePositionsPageToken(req.PageToken) 387 if err != nil { 388 return nil, err 389 } 390 } 391 options := testverdicts.ReadTestVerdictsPerSourcePositionOptions{ 392 Project: req.Project, 393 TestID: req.TestId, 394 VariantHash: req.VariantHash, 395 RefHash: req.RefHash, 396 AllowedRealms: allowedRealms, 397 // This query aggregate over source position so that there is at most one row per source position. 398 // With the following setting of PositionMustGreater and NumCommits, the largest source position in the query result must 399 // greater than startPosition unless no such commit exists. 400 PositionMustGreater: startPosition - pageSize, 401 NumCommits: pageSize, 402 } 403 commitsWithVerdicts, err := s.testVerdictClient.ReadTestVerdictsPerSourcePosition(ctx, options) 404 if err != nil { 405 return nil, errors.Annotate(err, "read test verdicts from BigQuery").Err() 406 } 407 // Gitiles log requires us to supply a commit hash to fetch a commit log. 408 // Ideally, if we know the commit hash of the requested startPosition, we can just 409 // call gitiles with that, and page (backwards) through the commit history. 410 // However, as test verdicts are spare, we may not have the commit hash for the given startPosition. 411 // Therefore, we need to find the commit which is the closest to the startPosition 412 // but equal to or after startPosition (call it closestAfterCommit), so that we can call gitiles with that commit hash. 413 // Commits starts from startPosition will be at offset (closestAfterCommitPosition - startPosition) in the gitiles response. 414 closestAfterCommitHash, offset, exist := closestAfterCommit(startPosition, commitsWithVerdicts) 415 if !exist { 416 return nil, appstatus.Errorf(codes.NotFound, "no commit at or after the requested start position") 417 } 418 419 // We want to get (pagesize + offset) commits start from the closestAfterCommit. 420 // One gitiles log call returns at most 10000 commits. 421 // It is unlikely that (pagesize + offset) will be greater than 10000, we just throw NotFound here if that happens. 422 requiredNumCommits := pageSize + offset 423 if requiredNumCommits > 10000 { 424 return nil, appstatus.Errorf(codes.NotFound, "cannot find source positions because test verdicts is too sparse") 425 } 426 ref := commitsWithVerdicts[0].Ref 427 gitilesClient, err := gitiles.NewClient(ctx, ref.Gitiles.Host.String()) 428 if err != nil { 429 return nil, errors.Annotate(err, "create gitiles client").Err() 430 } 431 logReq := &gitilespb.LogRequest{ 432 Project: ref.Gitiles.Project.String(), 433 Committish: closestAfterCommitHash, 434 PageSize: int32(requiredNumCommits), 435 TreeDiff: true, 436 } 437 logRes, err := gitilesClient.Log(ctx, logReq) 438 if err != nil { 439 return nil, errors.Annotate(err, "gitiles log").Err() 440 } 441 // The response from gitiles contains commits from closestAfterCommit. 442 // Commit at the requested start position is at offset. 443 logs := logRes.Log[offset:] 444 res := []*pb.SourcePosition{} 445 for i, commit := range logs { 446 pos := startPosition - int64(i) 447 commitWithVerdicts := commitWithVerdictsAtSourcePosition(commitsWithVerdicts, pos) 448 tvspb := []*pb.TestVerdict{} 449 if commitWithVerdicts != nil { 450 tvspb = toVerdictsProto(commitWithVerdicts.TestVerdicts) 451 } 452 cwv := &pb.SourcePosition{ 453 Commit: commit, 454 Position: pos, 455 Verdicts: tvspb, 456 } 457 res = append(res, cwv) 458 } 459 // Page token contains the source position where next page should start from. 460 nextPageToken := pagination.Token(fmt.Sprintf("%d", startPosition-pageSize)) 461 return &pb.QuerySourcePositionsResponse{ 462 SourcePositions: res, 463 NextPageToken: nextPageToken, 464 }, nil 465 } 466 467 func toVerdictsProto(bqVerdicts []*testverdicts.TestVerdict) []*pb.TestVerdict { 468 res := make([]*pb.TestVerdict, 0, len(bqVerdicts)) 469 for _, tv := range bqVerdicts { 470 if !tv.HasAccess { 471 // The caller doesn't have access to this test verdict. 472 continue 473 } 474 tvpb := &pb.TestVerdict{ 475 TestId: tv.TestID, 476 VariantHash: tv.VariantHash, 477 InvocationId: tv.InvocationID, 478 Status: pb.TestVerdictStatus(pb.TestVerdictStatus_value[tv.Status]), 479 PartitionTime: timestamppb.New(tv.PartitionTime), 480 Changelists: []*pb.Changelist{}, 481 } 482 if tv.PassedAvgDurationUsec.Valid { 483 tvpb.PassedAvgDuration = durationpb.New(time.Duration(int(tv.PassedAvgDurationUsec.Float64*1000)) * time.Millisecond) 484 } 485 for _, cl := range tv.Changelists { 486 tvpb.Changelists = append(tvpb.Changelists, &pb.Changelist{ 487 Host: cl.Host.String(), 488 Change: cl.Change.Int64, 489 Patchset: int32(cl.Patchset.Int64), 490 OwnerKind: pb.ChangelistOwnerKind(pb.ChangelistOwnerKind_value[cl.OwnerKind.String()]), 491 }) 492 } 493 res = append(res, tvpb) 494 } 495 return res 496 } 497 498 // commitWithVerdictsAtSourcePosition find the testverdicts.CommitWithVerdicts at a certain source position. 499 // It returns nil if not found. 500 func commitWithVerdictsAtSourcePosition(commits []*testverdicts.CommitWithVerdicts, sourcePosition int64) *testverdicts.CommitWithVerdicts { 501 for _, commit := range commits { 502 if sourcePosition == commit.Position { 503 return commit 504 } 505 } 506 return nil 507 } 508 509 // closestAfterCommit returns the commit hash belongs to the commit in rows which is closest (or equal) to commit at pos and after pos. 510 // This function assumes rows are sorted by source position ASC. 511 func closestAfterCommit(pos int64, rows []*testverdicts.CommitWithVerdicts) (commitHash string, offset int64, exist bool) { 512 for _, r := range rows { 513 if r.Position >= pos { 514 return r.CommitHash, r.Position - pos, true 515 } 516 } 517 return "", 0, false 518 } 519 520 func parseQuerySourcePositionsPageToken(pageToken string) (int64, error) { 521 tokens, err := pagination.ParseToken(pageToken) 522 if err != nil { 523 return 0, invalidArgumentError(err) 524 } 525 if len(tokens) != 1 { 526 return 0, invalidArgumentError(err) 527 } 528 pos, err := strconv.Atoi(tokens[0]) 529 if err != nil { 530 return 0, invalidArgumentError(err) 531 } 532 return int64(pos), nil 533 } 534 535 func validateQuerySourcePositionsRequest(req *pb.QuerySourcePositionsRequest) error { 536 if err := pbutil.ValidateProject(req.Project); err != nil { 537 return errors.Annotate(err, "project").Err() 538 } 539 if err := rdbpbutil.ValidateTestID(req.TestId); err != nil { 540 return errors.Annotate(err, "test_id").Err() 541 } 542 if err := ValidateVariantHash(req.VariantHash); err != nil { 543 return errors.Annotate(err, "variant_hash").Err() 544 } 545 if err := ValidateRefHash(req.RefHash); err != nil { 546 return errors.Annotate(err, "ref_hash").Err() 547 } 548 if err := pagination.ValidatePageSize(req.GetPageSize()); err != nil { 549 return errors.Annotate(err, "page_size").Err() 550 } 551 if req.StartSourcePosition <= 0 { 552 return errors.Reason("start_source_position: must be a positive number").Err() 553 } 554 return nil 555 }