kythe.io@v0.0.68-0.20240422202219-7225dbc01741/kythe/go/serving/xrefs/xrefs.go (about) 1 /* 2 * Copyright 2015 The Kythe Authors. All rights reserved. 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 // Package xrefs provides a high-performance table-based implementation of the 18 // xrefs.Service. 19 // 20 // Table format: 21 // 22 // decor:<ticket> -> srvpb.FileDecorations 23 // docs:<ticket> -> srvpb.Document 24 // xrefs:<ticket> -> srvpb.PagedCrossReferences 25 // xrefPages:<page_key> -> srvpb.PagedCrossReferences_Page 26 package xrefs // import "kythe.io/kythe/go/serving/xrefs" 27 28 import ( 29 "context" 30 "encoding/base64" 31 "flag" 32 "fmt" 33 "path/filepath" 34 "regexp" 35 "strings" 36 "sync" 37 "time" 38 39 "kythe.io/kythe/go/services/xrefs" 40 "kythe.io/kythe/go/storage/table" 41 "kythe.io/kythe/go/util/flagutil" 42 "kythe.io/kythe/go/util/kytheuri" 43 "kythe.io/kythe/go/util/log" 44 "kythe.io/kythe/go/util/schema/edges" 45 "kythe.io/kythe/go/util/schema/facts" 46 "kythe.io/kythe/go/util/schema/tickets" 47 "kythe.io/kythe/go/util/span" 48 49 "bitbucket.org/creachadair/stringset" 50 "github.com/golang/snappy" 51 "golang.org/x/net/trace" 52 "golang.org/x/sync/errgroup" 53 "google.golang.org/grpc/codes" 54 "google.golang.org/grpc/status" 55 "google.golang.org/protobuf/proto" 56 57 cpb "kythe.io/kythe/proto/common_go_proto" 58 ipb "kythe.io/kythe/proto/internal_go_proto" 59 srvpb "kythe.io/kythe/proto/serving_go_proto" 60 xpb "kythe.io/kythe/proto/xref_go_proto" 61 ) 62 63 var ( 64 mergeCrossReferences = flag.Bool("merge_cross_references", true, "Whether to merge nodes when responding to a CrossReferencesRequest") 65 66 experimentalCrossReferenceIndirectionKinds flagutil.StringMultimap 67 68 // TODO(schroederc): remove once relevant clients specify their required quality 69 defaultTotalsQuality = flag.String("experimental_default_totals_quality", "APPROXIMATE_TOTALS", "Default TotalsQuality when unspecified in CrossReferencesRequest") 70 71 pageReadAhead = flag.Uint("page_read_ahead", 0, "How many xref pages to read ahead concurrently (0 disables readahead)") 72 73 responseLeewayTime = flag.Duration("xrefs_response_leeway_time", 50*time.Millisecond, "If possible, leave this much time at the end of a CrossReferencesRequest to return any results already read") 74 ) 75 76 func init() { 77 flag.Var(&experimentalCrossReferenceIndirectionKinds, "experimental_cross_reference_indirection_kinds", 78 `Comma-separated set of key-value pairs (node_kind=edge_kind) to indirect through in CrossReferences. For example, "talias=/kythe/edge/aliases" indicates that the targets of a 'talias' node's '/kythe/edge/aliases' related nodes will have their cross-references merged into the root 'talias' node's. A "*=edge_kind" entry indicates to indirect through the specified edge kind for any node kind.`) 79 } 80 81 type staticLookupTables interface { 82 fileDecorations(ctx context.Context, ticket string) (*srvpb.FileDecorations, error) 83 crossReferences(ctx context.Context, ticket string) (*srvpb.PagedCrossReferences, error) 84 crossReferencesPage(ctx context.Context, key string) (*srvpb.PagedCrossReferences_Page, error) 85 documentation(ctx context.Context, ticket string) (*srvpb.Document, error) 86 87 Close(context.Context) error 88 } 89 90 // SplitTable implements the xrefs Service interface using separate static 91 // lookup tables for each API component. 92 type SplitTable struct { 93 // Decorations is a table of srvpb.FileDecorations keyed by their source 94 // location tickets. 95 Decorations table.Proto 96 97 // CrossReferences is a table of srvpb.PagedCrossReferences keyed by their 98 // source node tickets. 99 CrossReferences table.Proto 100 101 // CrossReferencePages is a table of srvpb.PagedCrossReferences_Pages keyed by 102 // their page keys. 103 CrossReferencePages table.Proto 104 105 // Documentation is a table of srvpb.Documents keyed by their node ticket. 106 Documentation table.Proto 107 108 // RewriteEdgeLabel is an optional callback to rewrite edge labels. 109 // It will be called once per request; the function it returns will then be 110 // called once per edge. 111 RewriteEdgeLabel func(context.Context) func(string) string 112 } 113 114 // Close closes each underlying table.Proto. 115 func (s *SplitTable) Close(ctx context.Context) (err error) { 116 for _, t := range []table.Proto{s.Decorations, s.CrossReferences, s.CrossReferencePages, s.Documentation} { 117 if te := t.Close(ctx); te != nil { 118 err = te 119 } 120 } 121 return 122 } 123 124 func (s *SplitTable) rewriteFileDecorations(ctx context.Context, fd *srvpb.FileDecorations, err error) (*srvpb.FileDecorations, error) { 125 if fd == nil || err != nil || s.RewriteEdgeLabel == nil || fd.Decoration == nil { 126 return fd, err 127 } 128 f := s.RewriteEdgeLabel(ctx) 129 if f == nil { 130 return fd, err 131 } 132 for _, d := range fd.Decoration { 133 d.Kind = f(d.Kind) 134 } 135 return fd, err 136 } 137 138 func rewriteCrossReferencesGroup(g *srvpb.PagedCrossReferences_Group, f func(string) string) { 139 if f != nil && g != nil { 140 g.Kind = f(g.Kind) 141 } 142 } 143 144 func (s *SplitTable) rewriteCrossReferences(ctx context.Context, cr *srvpb.PagedCrossReferences, err error) (*srvpb.PagedCrossReferences, error) { 145 if cr == nil || err != nil || s.RewriteEdgeLabel == nil || cr.Group == nil { 146 return cr, err 147 } 148 f := s.RewriteEdgeLabel(ctx) 149 for _, g := range cr.Group { 150 rewriteCrossReferencesGroup(g, f) 151 } 152 return cr, err 153 } 154 155 func (s *SplitTable) rewriteCrossReferencesPage(ctx context.Context, cr *srvpb.PagedCrossReferences_Page, err error) (*srvpb.PagedCrossReferences_Page, error) { 156 if cr == nil || err != nil || s.RewriteEdgeLabel == nil || cr.Group == nil { 157 return cr, err 158 } 159 f := s.RewriteEdgeLabel(ctx) 160 rewriteCrossReferencesGroup(cr.Group, f) 161 return cr, err 162 } 163 164 func (s *SplitTable) fileDecorations(ctx context.Context, ticket string) (*srvpb.FileDecorations, error) { 165 tracePrintf(ctx, "Reading FileDecorations: %s", ticket) 166 var fd srvpb.FileDecorations 167 return s.rewriteFileDecorations(ctx, &fd, s.Decorations.Lookup(ctx, []byte(ticket), &fd)) 168 } 169 func (s *SplitTable) crossReferences(ctx context.Context, ticket string) (*srvpb.PagedCrossReferences, error) { 170 tracePrintf(ctx, "Reading PagedCrossReferences: %s", ticket) 171 var cr srvpb.PagedCrossReferences 172 return s.rewriteCrossReferences(ctx, &cr, s.CrossReferences.Lookup(ctx, []byte(ticket), &cr)) 173 } 174 func (s *SplitTable) crossReferencesPage(ctx context.Context, key string) (*srvpb.PagedCrossReferences_Page, error) { 175 tracePrintf(ctx, "Reading PagedCrossReferences.Page: %s", key) 176 var p srvpb.PagedCrossReferences_Page 177 return s.rewriteCrossReferencesPage(ctx, &p, s.CrossReferencePages.Lookup(ctx, []byte(key), &p)) 178 } 179 func (s *SplitTable) documentation(ctx context.Context, ticket string) (*srvpb.Document, error) { 180 tracePrintf(ctx, "Reading Document: %s", ticket) 181 var d srvpb.Document 182 return &d, s.Documentation.Lookup(ctx, []byte(ticket), &d) 183 } 184 185 // Key prefixes for the combinedTable implementation. 186 const ( 187 crossRefTablePrefix = "xrefs:" 188 crossRefPageTablePrefix = "xrefPages:" 189 decorTablePrefix = "decor:" 190 documentationTablePrefix = "docs:" 191 ) 192 193 type combinedTable struct{ table.Proto } 194 195 func (c *combinedTable) fileDecorations(ctx context.Context, ticket string) (*srvpb.FileDecorations, error) { 196 var fd srvpb.FileDecorations 197 return &fd, c.Lookup(ctx, DecorationsKey(ticket), &fd) 198 } 199 func (c *combinedTable) crossReferences(ctx context.Context, ticket string) (*srvpb.PagedCrossReferences, error) { 200 var cr srvpb.PagedCrossReferences 201 return &cr, c.Lookup(ctx, CrossReferencesKey(ticket), &cr) 202 } 203 func (c *combinedTable) crossReferencesPage(ctx context.Context, key string) (*srvpb.PagedCrossReferences_Page, error) { 204 var p srvpb.PagedCrossReferences_Page 205 return &p, c.Lookup(ctx, CrossReferencesPageKey(key), &p) 206 } 207 func (c *combinedTable) documentation(ctx context.Context, ticket string) (*srvpb.Document, error) { 208 var d srvpb.Document 209 return &d, c.Lookup(ctx, DocumentationKey(ticket), &d) 210 } 211 212 // NewSplitTable returns a table based on the given serving tables for each API 213 // component. 214 func NewSplitTable(c *SplitTable) *Table { return &Table{staticLookupTables: c} } 215 216 // NewCombinedTable returns a table for the given combined xrefs lookup table. 217 // The table's keys are expected to be constructed using only the *Key functions. 218 func NewCombinedTable(t table.Proto) *Table { return &Table{staticLookupTables: &combinedTable{t}} } 219 220 // DecorationsKey returns the decorations CombinedTable key for the given source 221 // location ticket. 222 func DecorationsKey(ticket string) []byte { 223 return []byte(decorTablePrefix + ticket) 224 } 225 226 // CrossReferencesKey returns the cross-references CombinedTable key for the 227 // given node ticket. 228 func CrossReferencesKey(ticket string) []byte { 229 return []byte(crossRefTablePrefix + ticket) 230 } 231 232 // CrossReferencesPageKey returns the cross-references page CombinedTable key 233 // for the given key. 234 func CrossReferencesPageKey(key string) []byte { 235 return []byte(crossRefPageTablePrefix + key) 236 } 237 238 // DocumentationKey returns the documentation CombinedTable key for the given 239 // ticket. 240 func DocumentationKey(ticket string) []byte { 241 return []byte(documentationTablePrefix + ticket) 242 } 243 244 // Table implements the xrefs Service interface using static lookup tables. 245 type Table struct { 246 staticLookupTables 247 248 // MakePatcher returns a patching client that targets a Workspace. 249 MakePatcher func(context.Context, *xpb.Workspace) (MultiFilePatcher, error) 250 251 // ResolvePath is used to resolve CorpusPaths for filtering. If unset, 252 // DefaultResolvePath will be used. 253 ResolvePath PathResolver 254 } 255 256 // A PathResolver resolves a CorpusPath into a single filepath. 257 type PathResolver func(*cpb.CorpusPath) string 258 259 // DefaultResolvePath returns the default resolved path for the CorpusPath by 260 // joining its corpus, root, and path into a single filepath. 261 func DefaultResolvePath(cp *cpb.CorpusPath) string { 262 return filepath.Join(cp.GetCorpus(), cp.GetRoot(), cp.GetPath()) 263 } 264 265 // A MultiFilePatcher provides an interface to patch sets of xref anchors to an 266 // underlying baseline, usually a Workspace. 267 // 268 // After creation, the client is required to call AddFile for each possible file 269 // referenced by any anchors that will be patched. After the files are added, a 270 // set of anchors may be passed to PatchAnchors. 271 type MultiFilePatcher interface { 272 // AddFile adds a file to current set of files to patch against. 273 AddFile(context.Context, *srvpb.FileInfo) error 274 275 // PatchAnchors updates the set of anchors given to match their referenced 276 // files' state as known by the MultiLinePatcher, usually based on a 277 // Workspace. If an anchor no longer exists, it will be ellided from the 278 // returned set. Otherwise, the ordering of the anchors will be retained. 279 PatchAnchors(context.Context, []*xpb.Anchor) ([]*xpb.Anchor, error) 280 281 // PatchRelatedAnchors updates the set of related anchors given to match their 282 // referenced files' state as known by the MultiLinePatcher, usually based on 283 // a Workspace. If an anchor no longer exists, it will be ellided from the 284 // returned set. Otherwise, the ordering of the anchors will be retained. 285 PatchRelatedAnchors(context.Context, []*xpb.CrossReferencesReply_RelatedAnchor) ([]*xpb.CrossReferencesReply_RelatedAnchor, error) 286 287 // Close releases any resources used the patcher. Further calls to the 288 // patcher will become invalid. 289 Close() error 290 } 291 292 const ( 293 defaultPageSize = 2048 294 maxPageSize = 10000 295 ) 296 297 type nodeConverter struct { 298 factPatterns []*regexp.Regexp 299 } 300 301 func (c *nodeConverter) ToInfo(n *srvpb.Node) *cpb.NodeInfo { 302 ni := &cpb.NodeInfo{Facts: make(map[string][]byte, len(n.Fact))} 303 for _, f := range n.Fact { 304 if xrefs.MatchesAny(f.Name, c.factPatterns) { 305 ni.Facts[f.Name] = f.Value 306 } 307 } 308 if len(ni.Facts) == 0 { 309 return nil 310 } 311 return ni 312 } 313 314 func corpusPathTicket(cp *cpb.CorpusPath) string { return kytheuri.FromCorpusPath(cp).String() } 315 316 // Decorations implements part of the xrefs Service interface. 317 func (t *Table) Decorations(ctx context.Context, req *xpb.DecorationsRequest) (*xpb.DecorationsReply, error) { 318 if req.GetLocation() == nil || req.GetLocation().Ticket == "" { 319 return nil, status.Error(codes.InvalidArgument, "missing location") 320 } 321 322 ticket, err := kytheuri.Fix(req.GetLocation().Ticket) 323 if err != nil { 324 return nil, status.Errorf(codes.InvalidArgument, "invalid ticket %q: %v", req.GetLocation().Ticket, err) 325 } 326 327 var multiPatcher MultiFilePatcher 328 if t.MakePatcher != nil && req.GetWorkspace() != nil && req.GetPatchAgainstWorkspace() { 329 multiPatcher, err = t.MakePatcher(ctx, req.GetWorkspace()) 330 if isNonContextError(err) { 331 log.ErrorContextf(ctx, "creating patcher: %v", err) 332 } 333 334 if multiPatcher != nil { 335 defer func() { 336 if err := multiPatcher.Close(); isNonContextError(err) { 337 // No need to fail the request; just log the error. 338 log.ErrorContextf(ctx, "closing patcher: %v", err) 339 } 340 }() 341 } 342 } 343 344 decor, err := t.fileDecorations(ctx, ticket) 345 if err == table.ErrNoSuchKey { 346 return nil, xrefs.ErrDecorationsNotFound 347 } else if err != nil { 348 return nil, canonicalError(err, "file decorations", ticket) 349 } 350 351 if decor.File == nil { 352 if len(decor.Diagnostic) == 0 { 353 log.ErrorContextf(ctx, "FileDecorations.file is missing without related diagnostics: %q", req.Location.Ticket) 354 return nil, xrefs.ErrDecorationsNotFound 355 } 356 357 // FileDecorations may be saved without a File if the file does not exist in 358 // the index but related diagnostics do exist. If diagnostics were 359 // requested, we may return them successfully, but otherwise, an error 360 // indicating a missing file is returned. 361 if req.Diagnostics { 362 return &xpb.DecorationsReply{ 363 Location: req.Location, 364 Diagnostic: decor.Diagnostic, 365 }, nil 366 } 367 return nil, xrefs.ErrDecorationsNotFound 368 } 369 370 text := decor.File.Text 371 if len(req.DirtyBuffer) > 0 { 372 text = req.DirtyBuffer 373 } 374 norm := span.NewNormalizer(text) 375 376 loc, err := norm.Location(req.GetLocation()) 377 if err != nil { 378 return nil, err 379 } 380 381 fileInfos := makeFileInfoMap(decor.FileInfo) 382 383 reply := &xpb.DecorationsReply{ 384 Location: loc, 385 Revision: fileInfos[loc.GetTicket()].GetRevision(), 386 } 387 388 for _, g := range decor.GeneratedBy { 389 uri, err := kytheuri.Parse(g) 390 if err != nil { 391 return nil, fmt.Errorf("unable to parse generated_by ticket %q: %w", g, err) 392 } 393 reply.GeneratedByFile = append(reply.GeneratedByFile, &xpb.File{ 394 CorpusPath: &cpb.CorpusPath{ 395 Corpus: uri.Corpus, 396 Root: uri.Root, 397 Path: uri.Path, 398 }, 399 Revision: fileInfos[g].GetRevision(), 400 }) 401 } 402 403 if req.SourceText && text != nil { 404 reply.Encoding = decor.File.Encoding 405 if loc.Kind == xpb.Location_FILE { 406 reply.SourceText = text 407 } else { 408 reply.SourceText = text[loc.Span.Start.ByteOffset:loc.Span.End.ByteOffset] 409 } 410 } 411 412 var patcher *span.Patcher 413 if len(req.DirtyBuffer) > 0 { 414 if multiPatcher != nil { 415 return nil, status.Errorf(codes.Unimplemented, "cannot patch decorations against Workspace with a dirty_buffer") 416 } 417 patcher, err = span.NewPatcher(decor.File.Text, req.DirtyBuffer) 418 if err != nil { 419 return nil, status.Errorf(codes.Internal, "error patching decorations for %s: %v", req.Location.Ticket, err) 420 } 421 } 422 423 // The span with which to constrain the set of returned anchor references. 424 var startBoundary, endBoundary int32 425 spanKind := req.SpanKind 426 if loc.Kind == xpb.Location_FILE { 427 startBoundary = 0 428 endBoundary = int32(len(text)) 429 spanKind = xpb.DecorationsRequest_WITHIN_SPAN 430 } else { 431 startBoundary = loc.Span.Start.ByteOffset 432 endBoundary = loc.Span.End.ByteOffset 433 } 434 435 if req.References { 436 patterns := xrefs.ConvertFilters(req.Filter) 437 buildConfigs := stringset.New(req.BuildConfig...) 438 439 ac := &anchorConverter{fileInfos: fileInfos} 440 nc := &nodeConverter{patterns} 441 442 reply.Reference = make([]*xpb.DecorationsReply_Reference, 0, len(decor.Decoration)) 443 reply.Nodes = make(map[string]*cpb.NodeInfo, len(decor.Target)) 444 445 // Reference.TargetTicket -> NodeInfo (superset of reply.Nodes) 446 nodes := make(map[string]*cpb.NodeInfo, len(decor.Target)) 447 if len(patterns) > 0 { 448 for _, n := range decor.Target { 449 if info := nc.ToInfo(n); info != nil { 450 nodes[n.Ticket] = info 451 } 452 } 453 } 454 tracePrintf(ctx, "Potential target nodes: %d", len(nodes)) 455 456 // All known definition locations (Anchor.Ticket -> Anchor) 457 defs := make(map[string]*xpb.Anchor, len(decor.TargetDefinitions)) 458 for _, def := range decor.TargetDefinitions { 459 a := ac.Convert(def).Anchor 460 defs[def.Ticket] = a 461 if multiPatcher != nil { 462 fileInfo := def.GetFileInfo() 463 if fileInfo == nil { 464 fileInfo = fileInfos[a.GetParent()] 465 } 466 if fileInfo != nil { 467 if err := multiPatcher.AddFile(ctx, fileInfo); isNonContextError(err) { 468 // Attempt to continue with the request, just log the error. 469 log.ErrorContextf(ctx, "adding file: %v", err) 470 } 471 } 472 } 473 } 474 if req.TargetDefinitions { 475 reply.DefinitionLocations = make(map[string]*xpb.Anchor, len(decor.TargetDefinitions)) 476 } 477 tracePrintf(ctx, "Potential target defs: %d", len(defs)) 478 479 bindings := stringset.New() 480 481 for _, d := range decor.Decoration { 482 // Filter decorations by requested build configs. 483 if len(buildConfigs) != 0 && !buildConfigs.Contains(d.Anchor.BuildConfiguration) { 484 continue 485 } 486 487 start, end, exists := patcher.Patch(d.Anchor.StartOffset, d.Anchor.EndOffset) 488 // Filter non-existent anchor. Anchors can no longer exist if we were 489 // given a dirty buffer and the anchor was inside a changed region. 490 if !exists || !span.InBounds(spanKind, start, end, startBoundary, endBoundary) { 491 continue 492 } 493 494 d.Anchor.StartOffset = start 495 d.Anchor.EndOffset = end 496 497 r := decorationToReference(norm, d) 498 if req.TargetDefinitions { 499 if def, ok := defs[d.TargetDefinition]; ok { 500 reply.DefinitionLocations[d.TargetDefinition] = def 501 } 502 } else { 503 r.TargetDefinition = "" 504 } 505 506 if !req.SemanticScopes { 507 r.SemanticScope = "" 508 } 509 510 // Populate any target revision, if known 511 r.TargetRevision = fileInfos[r.TargetTicket].GetRevision() 512 513 if req.ExtendsOverrides && (r.Kind == edges.Defines || r.Kind == edges.DefinesBinding) { 514 bindings.Add(r.TargetTicket) 515 } 516 517 reply.Reference = append(reply.Reference, r) 518 519 if n := nodes[r.TargetTicket]; n != nil { 520 reply.Nodes[r.TargetTicket] = n 521 } 522 } 523 tracePrintf(ctx, "References: %d", len(reply.Reference)) 524 525 if len(decor.TargetOverride) > 0 { 526 // Read overrides from serving data 527 reply.ExtendsOverrides = make(map[string]*xpb.DecorationsReply_Overrides, len(bindings)) 528 529 for _, o := range decor.TargetOverride { 530 if bindings.Contains(o.Overriding) { 531 def := defs[o.OverriddenDefinition] 532 if def != nil && len(buildConfigs) != 0 && !buildConfigs.Contains(def.BuildConfig) { 533 // Skip override with undesirable build configuration. 534 continue 535 } 536 537 os, ok := reply.ExtendsOverrides[o.Overriding] 538 if !ok { 539 os = &xpb.DecorationsReply_Overrides{} 540 reply.ExtendsOverrides[o.Overriding] = os 541 } 542 543 ov := &xpb.DecorationsReply_Override{ 544 Target: o.Overridden, 545 Kind: xpb.DecorationsReply_Override_Kind(o.Kind), 546 MarkedSource: o.MarkedSource, 547 } 548 os.Override = append(os.Override, ov) 549 550 if n := nodes[o.Overridden]; n != nil { 551 reply.Nodes[o.Overridden] = n 552 } 553 if req.TargetDefinitions && def != nil { 554 ov.TargetDefinition = o.OverriddenDefinition 555 reply.DefinitionLocations[o.OverriddenDefinition] = def 556 } 557 } 558 } 559 tracePrintf(ctx, "ExtendsOverrides: %d", len(reply.ExtendsOverrides)) 560 } 561 tracePrintf(ctx, "DefinitionLocations: %d", len(reply.DefinitionLocations)) 562 } 563 564 if req.Diagnostics { 565 for _, diag := range decor.Diagnostic { 566 if diag.Span == nil { 567 reply.Diagnostic = append(reply.Diagnostic, diag) 568 } else { 569 start, end, exists := patcher.Patch(span.ByteOffsets(diag.Span)) 570 // Filter non-existent (or out-of-bounds) diagnostic. Diagnostics can 571 // no longer exist if we were given a dirty buffer and the diagnostic 572 // was inside a changed region. 573 if !exists || !span.InBounds(spanKind, start, end, startBoundary, endBoundary) { 574 continue 575 } 576 577 diag.Span = norm.SpanOffsets(start, end) 578 reply.Diagnostic = append(reply.Diagnostic, diag) 579 } 580 } 581 tracePrintf(ctx, "Diagnostics: %d", len(reply.Diagnostic)) 582 } 583 584 if req.Snippets == xpb.SnippetsKind_NONE { 585 for _, anchor := range reply.DefinitionLocations { 586 clearSnippet(anchor) 587 } 588 } 589 590 if multiPatcher != nil { 591 defs, err := patchDefLocations(ctx, multiPatcher, reply.GetDefinitionLocations()) 592 if err != nil { 593 log.ErrorContextf(ctx, "patching definition locations: %v", err) 594 } else { 595 reply.DefinitionLocations = defs 596 } 597 } 598 599 return reply, nil 600 } 601 602 func patchDefLocations(ctx context.Context, patcher MultiFilePatcher, defLocs map[string]*xpb.Anchor) (map[string]*xpb.Anchor, error) { 603 if len(defLocs) == 0 { 604 return nil, nil 605 } 606 defs := make([]*xpb.Anchor, 0, len(defLocs)) 607 for _, def := range defLocs { 608 defs = append(defs, def) 609 } 610 defs, err := patcher.PatchAnchors(ctx, defs) 611 if err != nil { 612 return defLocs, err 613 } 614 res := make(map[string]*xpb.Anchor, len(defs)) 615 for _, def := range defs { 616 res[def.GetTicket()] = def 617 } 618 tracePrintf(ctx, "Patched DefinitionLocations: %d", len(defs)) 619 return res, nil 620 } 621 622 func makeFileInfoMap(infos []*srvpb.FileInfo) map[string]*srvpb.FileInfo { 623 fileInfos := make(map[string]*srvpb.FileInfo, len(infos)) 624 for _, info := range infos { 625 fileInfos[corpusPathTicket(info.CorpusPath)] = info 626 } 627 return fileInfos 628 } 629 630 func decorationToReference(norm *span.Normalizer, d *srvpb.FileDecorations_Decoration) *xpb.DecorationsReply_Reference { 631 span := norm.SpanOffsets(d.Anchor.StartOffset, d.Anchor.EndOffset) 632 return &xpb.DecorationsReply_Reference{ 633 TargetTicket: d.Target, 634 Kind: d.Kind, 635 Span: span, 636 TargetDefinition: d.TargetDefinition, 637 BuildConfig: d.Anchor.BuildConfiguration, 638 SemanticScope: d.SemanticScope, 639 } 640 } 641 642 type xrefCategory int 643 644 const ( 645 xrefCategoryNone xrefCategory = iota 646 xrefCategoryDef 647 xrefCategoryDecl 648 xrefCategoryRef 649 xrefCategoryCall 650 xrefCategoryRelated 651 xrefCategoryIndirection 652 ) 653 654 func (c xrefCategory) AddCount(reply *xpb.CrossReferencesReply, idx *srvpb.PagedCrossReferences_PageIndex, pageSet *pageSet) { 655 switch c { 656 case xrefCategoryDef: 657 if pageSet.Contains(idx) { 658 reply.Total.Definitions += int64(idx.Count) 659 } else { 660 reply.Filtered.Definitions += int64(idx.Count) 661 } 662 case xrefCategoryDecl: 663 if pageSet.Contains(idx) { 664 reply.Total.Declarations += int64(idx.Count) 665 } else { 666 reply.Filtered.Declarations += int64(idx.Count) 667 } 668 case xrefCategoryRef: 669 if pageSet.Contains(idx) { 670 reply.Total.RefEdgeToCount[strings.TrimPrefix(idx.Kind, "%")] += int64(idx.Count) 671 reply.Total.References += int64(idx.Count) 672 } else { 673 reply.Filtered.RefEdgeToCount[strings.TrimPrefix(idx.Kind, "%")] += int64(idx.Count) 674 reply.Filtered.References += int64(idx.Count) 675 } 676 case xrefCategoryRelated: 677 if pageSet.Contains(idx) { 678 reply.Total.RelatedNodesByRelation[idx.Kind] += int64(idx.Count) 679 } else { 680 reply.Filtered.RelatedNodesByRelation[idx.Kind] += int64(idx.Count) 681 } 682 case xrefCategoryCall: 683 if pageSet.Contains(idx) { 684 reply.Total.Callers += int64(idx.Count) 685 } else { 686 reply.Filtered.Callers += int64(idx.Count) 687 } 688 } 689 } 690 691 // CrossReferences implements part of the xrefs.Service interface. 692 func (t *Table) CrossReferences(ctx context.Context, req *xpb.CrossReferencesRequest) (*xpb.CrossReferencesReply, error) { 693 tickets, err := xrefs.FixTickets(req.Ticket) 694 if err != nil { 695 return nil, err 696 } 697 698 var leewayTime time.Time 699 if d, ok := ctx.Deadline(); ok && *responseLeewayTime > 0 { 700 leewayTime = d.Add(-*responseLeewayTime) 701 if leewayTime.Before(time.Now()) { 702 // Clear leeway time; try to use entire leftover timeout. 703 leewayTime = time.Time{} 704 } 705 } 706 707 filter, err := compileCorpusPathFilters(req.GetCorpusPathFilters(), t.ResolvePath) 708 if err != nil { 709 return nil, status.Errorf(codes.InvalidArgument, "invalid corpus_path_filters %s: %v", strings.ReplaceAll(req.GetCorpusPathFilters().String(), "\n", " "), err) 710 } 711 712 pageReadGroupCtx, stopReadingPages := context.WithCancel(ctx) 713 defer stopReadingPages() 714 pageReadGroup, pageReadGroupCtx := errgroup.WithContext(pageReadGroupCtx) 715 pageReadGroup.SetLimit(int(*pageReadAhead) + 1) 716 single := new(syncCache[*srvpb.PagedCrossReferences_Page]) 717 718 getCachedPage := func(ctx context.Context, pageKey string) (*srvpb.PagedCrossReferences_Page, error) { 719 return single.Get(pageKey, func() (*srvpb.PagedCrossReferences_Page, error) { 720 return t.crossReferencesPage(ctx, pageKey) 721 }) 722 } 723 getFilteredPage := func(ctx context.Context, pageKey string) (*srvpb.PagedCrossReferences_Page, int, error) { 724 p, err := getCachedPage(ctx, pageKey) 725 if err != nil { 726 return nil, 0, err 727 } 728 // Clear page from cache; it should only be used once. 729 single.Delete(pageKey) 730 return p, filter.FilterGroup(p.GetGroup()), nil 731 } 732 733 stats := refStats{ 734 max: int(req.PageSize), 735 736 refOptions: refOptions{ 737 anchorText: req.AnchorText, 738 includeScopes: req.SemanticScopes, 739 }, 740 } 741 if stats.max < 0 { 742 return nil, status.Errorf(codes.InvalidArgument, "invalid page_size: %d", req.PageSize) 743 } else if stats.max == 0 { 744 stats.max = defaultPageSize 745 } else if stats.max > maxPageSize { 746 stats.max = maxPageSize 747 } 748 749 var pageToken ipb.PageToken 750 if req.PageToken != "" { 751 rec, err := base64.StdEncoding.DecodeString(req.PageToken) 752 if err != nil { 753 return nil, status.Errorf(codes.InvalidArgument, "invalid page_token: %q", req.PageToken) 754 } 755 rec, err = snappy.Decode(nil, rec) 756 if err != nil { 757 return nil, status.Errorf(codes.InvalidArgument, "invalid page_token: %q", req.PageToken) 758 } 759 if err := proto.Unmarshal(rec, &pageToken); err != nil { 760 return nil, status.Errorf(codes.InvalidArgument, "invalid page_token: %q", req.PageToken) 761 } 762 for _, index := range pageToken.Indices { 763 if index < 0 { 764 return nil, status.Errorf(codes.InvalidArgument, "invalid page_token: %q", req.PageToken) 765 } 766 } 767 } 768 initialSkip := int(pageToken.Indices["skip"]) 769 stats.skip = initialSkip 770 771 reply := &xpb.CrossReferencesReply{ 772 CrossReferences: make(map[string]*xpb.CrossReferencesReply_CrossReferenceSet, len(req.Ticket)), 773 Nodes: make(map[string]*cpb.NodeInfo, len(req.Ticket)), 774 775 Total: &xpb.CrossReferencesReply_Total{ 776 RefEdgeToCount: make(map[string]int64), 777 }, 778 Filtered: &xpb.CrossReferencesReply_Total{ 779 RefEdgeToCount: make(map[string]int64), 780 RelatedNodesByRelation: make(map[string]int64), 781 }, 782 } 783 // Before we return reply, remove all RefEdgeToCount map entries that point to a 0 count. 784 defer cleanupRefEdgeToCount(reply) 785 786 if len(req.Filter) > 0 { 787 reply.Total.RelatedNodesByRelation = make(map[string]int64) 788 } 789 if req.NodeDefinitions { 790 reply.DefinitionLocations = make(map[string]*xpb.Anchor) 791 } 792 stats.reply = reply 793 794 buildConfigs := stringset.New(req.BuildConfig...) 795 patterns := xrefs.ConvertFilters(req.Filter) 796 stats.nodeConverter = nodeConverter{patterns} 797 798 nextPageToken := &ipb.PageToken{ 799 SubTokens: make(map[string]string), 800 Indices: make(map[string]int32), 801 } 802 803 mergeInto := make(map[string]string) 804 for _, ticket := range tickets { 805 mergeInto[ticket] = ticket 806 } 807 808 relatedKinds := stringset.New(req.RelatedNodeKind...) 809 810 wantMoreCrossRefs := (req.DefinitionKind != xpb.CrossReferencesRequest_NO_DEFINITIONS || 811 req.DeclarationKind != xpb.CrossReferencesRequest_NO_DECLARATIONS || 812 req.ReferenceKind != xpb.CrossReferencesRequest_NO_REFERENCES || 813 req.CallerKind != xpb.CrossReferencesRequest_NO_CALLERS || 814 len(req.Filter) > 0) 815 816 totalsQuality := req.TotalsQuality 817 if totalsQuality == xpb.CrossReferencesRequest_UNSPECIFIED_TOTALS { 818 totalsQuality = xpb.CrossReferencesRequest_TotalsQuality(xpb.CrossReferencesRequest_TotalsQuality_value[strings.ToUpper(*defaultTotalsQuality)]) 819 } 820 821 var patcher MultiFilePatcher 822 if t.MakePatcher != nil && req.GetWorkspace() != nil && req.GetPatchAgainstWorkspace() { 823 patcher, err = t.MakePatcher(ctx, req.GetWorkspace()) 824 if isNonContextError(err) { 825 log.ErrorContextf(ctx, "creating patcher: %v", err) 826 } 827 828 if patcher != nil { 829 defer func() { 830 if err := patcher.Close(); isNonContextError(err) { 831 // No need to fail the request; just log the error. 832 log.ErrorContextf(ctx, "closing patcher: %v", err) 833 } 834 }() 835 836 stats.refOptions.patcherFunc = func(f *srvpb.FileInfo) { 837 if err := patcher.AddFile(ctx, f); isNonContextError(err) { 838 // Attempt to continue with the request, just log the error. 839 log.ErrorContextf(ctx, "adding file: %v", err) 840 } 841 } 842 } 843 } 844 845 // Set of xref page keys to read for further indirection nodes. 846 var indirectionPages []string 847 848 var foundCrossRefs bool 849 readLoop: 850 for i := 0; i < len(tickets); i++ { 851 if totalsQuality == xpb.CrossReferencesRequest_APPROXIMATE_TOTALS && stats.done() { 852 break 853 } 854 855 if !leewayTime.IsZero() && time.Now().After(leewayTime) { 856 log.Warning("hit soft deadline; trying to return already read xrefs") 857 break 858 } 859 860 ticket := tickets[i] 861 cr, err := t.crossReferences(ctx, ticket) 862 if err == table.ErrNoSuchKey { 863 continue 864 } else if err != nil { 865 return nil, canonicalError(err, "cross-references", ticket) 866 } 867 foundCrossRefs = true 868 869 // If this node is to be merged into another, we will use that node's ticket 870 // for all further book-keeping purposes. 871 ticket = mergeInto[ticket] 872 873 // We may have partially completed the xrefs set due merge nodes. 874 crs := reply.CrossReferences[ticket] 875 if crs == nil { 876 crs = &xpb.CrossReferencesReply_CrossReferenceSet{ 877 Ticket: ticket, 878 } 879 reply.CrossReferences[ticket] = crs 880 881 // If visiting a non-merge node and facts are requested, add them to the result. 882 if ticket == cr.SourceTicket && len(patterns) > 0 && cr.SourceNode != nil { 883 if _, ok := reply.Nodes[ticket]; !ok { 884 if info := stats.ToInfo(cr.SourceNode); info != nil { 885 reply.Nodes[ticket] = info 886 } 887 } 888 } 889 } 890 if crs.MarkedSource == nil { 891 crs.MarkedSource = cr.MarkedSource 892 } 893 894 if *mergeCrossReferences { 895 // Add any additional merge nodes to the set of table lookups 896 for _, mergeNode := range cr.MergeWith { 897 tickets = addMergeNode(mergeInto, tickets, ticket, mergeNode) 898 } 899 } 900 901 // Read the set of indirection edge kinds for the given node kind. 902 nodeKind := nodeKind(cr.SourceNode) 903 indirections := experimentalCrossReferenceIndirectionKinds[nodeKind]. 904 Union(experimentalCrossReferenceIndirectionKinds["*"]) 905 906 for _, grp := range cr.Group { 907 // Filter anchor groups based on requested build configs 908 if len(buildConfigs) != 0 && !buildConfigs.Contains(grp.BuildConfig) && !xrefs.IsRelatedNodeKind(relatedKinds, grp.Kind) { 909 continue 910 } 911 912 switch { 913 case xrefs.IsDefKind(req.DefinitionKind, grp.Kind, cr.Incomplete): 914 filtered := filter.FilterGroup(grp) 915 reply.Total.Definitions += int64(len(grp.Anchor)) 916 reply.Total.Definitions += int64(countRefs(grp.GetScopedReference())) 917 reply.Filtered.Definitions += int64(filtered) 918 if wantMoreCrossRefs { 919 stats.addAnchors(&crs.Definition, grp) 920 } 921 case xrefs.IsDeclKind(req.DeclarationKind, grp.Kind, cr.Incomplete): 922 filtered := filter.FilterGroup(grp) 923 reply.Total.Declarations += int64(len(grp.Anchor)) 924 reply.Total.Declarations += int64(countRefs(grp.GetScopedReference())) 925 reply.Filtered.Declarations += int64(filtered) 926 if wantMoreCrossRefs { 927 stats.addAnchors(&crs.Declaration, grp) 928 } 929 case xrefs.IsRefKind(req.ReferenceKind, grp.Kind): 930 filtered := filter.FilterGroup(grp) 931 reply.Total.RefEdgeToCount[strings.TrimPrefix(grp.Kind, "%")] += int64(len(grp.Anchor)) 932 reply.Total.References += int64(len(grp.Anchor)) 933 reply.Total.RefEdgeToCount[strings.TrimPrefix(grp.Kind, "%")] += int64(countRefs(grp.GetScopedReference())) 934 reply.Total.References += int64(countRefs(grp.GetScopedReference())) 935 reply.Filtered.RefEdgeToCount[strings.TrimPrefix(grp.Kind, "%")] += int64(filtered) 936 reply.Filtered.References += int64(filtered) 937 if wantMoreCrossRefs { 938 stats.addAnchors(&crs.Reference, grp) 939 } 940 case len(grp.RelatedNode) > 0: 941 // If requested, add related nodes to merge node set. 942 if indirections.Contains(grp.Kind) { 943 for _, rn := range grp.RelatedNode { 944 tickets = addMergeNode(mergeInto, tickets, ticket, rn.Node.GetTicket()) 945 } 946 } 947 948 if len(req.Filter) > 0 && xrefs.IsRelatedNodeKind(relatedKinds, grp.Kind) { 949 filtered := filter.FilterGroup(grp) 950 reply.Total.RelatedNodesByRelation[grp.Kind] += int64(len(grp.RelatedNode)) 951 reply.Filtered.RelatedNodesByRelation[grp.Kind] += int64(filtered) 952 if wantMoreCrossRefs { 953 stats.addRelatedNodes(crs, grp) 954 } 955 } 956 case xrefs.IsCallerKind(req.CallerKind, grp.Kind): 957 filtered := filter.FilterGroup(grp) 958 reply.Total.Callers += int64(len(grp.Caller)) 959 reply.Filtered.Callers += int64(filtered) 960 if wantMoreCrossRefs { 961 stats.addCallers(crs, grp) 962 } 963 } 964 } 965 966 pageSet := filter.PageSet(cr) 967 968 pageCategory := func(idx *srvpb.PagedCrossReferences_PageIndex) xrefCategory { 969 // Filter anchor pages based on requested build configs 970 if len(buildConfigs) != 0 && !buildConfigs.Contains(idx.BuildConfig) && !xrefs.IsRelatedNodeKind(relatedKinds, idx.Kind) { 971 return xrefCategoryNone 972 } 973 974 switch { 975 case xrefs.IsDefKind(req.DefinitionKind, idx.Kind, cr.Incomplete): 976 return xrefCategoryDef 977 case xrefs.IsDeclKind(req.DeclarationKind, idx.Kind, cr.Incomplete): 978 return xrefCategoryDecl 979 case xrefs.IsRefKind(req.ReferenceKind, idx.Kind): 980 return xrefCategoryRef 981 case len(req.Filter) > 0 && xrefs.IsRelatedNodeKind(relatedKinds, idx.Kind): 982 return xrefCategoryRelated 983 case indirections.Contains(idx.Kind): 984 return xrefCategoryIndirection 985 case xrefs.IsCallerKind(req.CallerKind, idx.Kind): 986 return xrefCategoryCall 987 default: 988 return xrefCategoryNone 989 } 990 } 991 992 // Find the first unskipped page index so proper read ahead. 993 firstUnskippedPage := len(cr.GetPageIndex()) 994 for i, idx := range cr.GetPageIndex() { 995 c := pageCategory(idx) 996 if c == xrefCategoryNone { 997 continue 998 } 999 1000 if !stats.skipPage(idx) { 1001 firstUnskippedPage = i 1002 break 1003 } 1004 c.AddCount(reply, idx, pageSet) 1005 } 1006 1007 // If enabled, start reading pages concurrently starting from the first 1008 // unskipped page. 1009 if *pageReadAhead > 0 { 1010 pageReadGroup.Go(func() error { 1011 ctx := pageReadGroupCtx 1012 for _, idx := range cr.GetPageIndex()[firstUnskippedPage:] { 1013 if err := ctx.Err(); err != nil { 1014 return err 1015 } 1016 if pageCategory(idx) == xrefCategoryNone || !pageSet.Contains(idx) { 1017 continue 1018 } 1019 1020 idx := idx 1021 pageReadGroup.Go(func() error { 1022 _, err := getCachedPage(ctx, idx.PageKey) 1023 return err 1024 }) 1025 } 1026 return nil 1027 }) 1028 } 1029 1030 for _, idx := range cr.GetPageIndex()[firstUnskippedPage:] { 1031 if !leewayTime.IsZero() && time.Now().After(leewayTime) { 1032 log.WarningContextf(ctx, "hit soft deadline; trying to return already read xrefs: %s", time.Now().Sub(leewayTime)) 1033 break readLoop 1034 } 1035 1036 c := pageCategory(idx) 1037 if c == xrefCategoryNone { 1038 continue 1039 } 1040 if wantMoreCrossRefs && !stats.skipPage(idx) { 1041 c.AddCount(reply, idx, pageSet) 1042 } 1043 if c != xrefCategoryIndirection && c != xrefCategoryRelated && !pageSet.Contains(idx) { 1044 continue 1045 } 1046 1047 switch c { 1048 case xrefCategoryDef: 1049 if wantMoreCrossRefs && !stats.skipPage(idx) { 1050 p, filtered, err := getFilteredPage(ctx, idx.PageKey) 1051 if err != nil { 1052 return nil, fmt.Errorf("internal error: error retrieving cross-references page %v: %v", idx.PageKey, err) 1053 } 1054 reply.Total.Definitions -= int64(filtered) // update counts to reflect filtering 1055 reply.Filtered.Definitions += int64(filtered) 1056 stats.addAnchors(&crs.Definition, p.Group) 1057 } 1058 case xrefCategoryDecl: 1059 if wantMoreCrossRefs && !stats.skipPage(idx) { 1060 p, filtered, err := getFilteredPage(ctx, idx.PageKey) 1061 if err != nil { 1062 return nil, fmt.Errorf("internal error: error retrieving cross-references page %v: %v", idx.PageKey, err) 1063 } 1064 reply.Total.Declarations -= int64(filtered) // update counts to reflect filtering 1065 reply.Filtered.Declarations += int64(filtered) 1066 stats.addAnchors(&crs.Declaration, p.Group) 1067 } 1068 case xrefCategoryRef: 1069 if wantMoreCrossRefs && !stats.skipPage(idx) { 1070 p, filtered, err := getFilteredPage(ctx, idx.PageKey) 1071 if err != nil { 1072 return nil, fmt.Errorf("internal error: error retrieving cross-references page %v: %v", idx.PageKey, err) 1073 } 1074 reply.Total.RefEdgeToCount[strings.TrimPrefix(idx.Kind, "%")] -= int64(filtered) // update counts to reflect filtering 1075 reply.Total.References -= int64(filtered) // update counts to reflect filtering 1076 reply.Filtered.RefEdgeToCount[strings.TrimPrefix(idx.Kind, "%")] += int64(filtered) 1077 reply.Filtered.References += int64(filtered) 1078 stats.addAnchors(&crs.Reference, p.Group) 1079 } 1080 case xrefCategoryRelated, xrefCategoryIndirection: 1081 var p *srvpb.PagedCrossReferences_Page 1082 1083 if len(req.Filter) > 0 && xrefs.IsRelatedNodeKind(relatedKinds, idx.Kind) { 1084 if pageSet.Contains(idx) { 1085 if wantMoreCrossRefs && !stats.skipPage(idx) { 1086 var filtered int 1087 p, filtered, err = getFilteredPage(ctx, idx.PageKey) 1088 if err != nil { 1089 return nil, fmt.Errorf("internal error: error retrieving cross-references page: %v", idx.PageKey) 1090 } 1091 reply.Total.RelatedNodesByRelation[idx.Kind] -= int64(filtered) // update counts to reflect filtering 1092 reply.Filtered.RelatedNodesByRelation[idx.Kind] += int64(filtered) 1093 stats.addRelatedNodes(crs, p.Group) 1094 } 1095 } 1096 } 1097 1098 // If requested, add related nodes to merge node set. 1099 if indirections.Contains(idx.Kind) { 1100 if p == nil { 1101 // We haven't needed to read the page yet; save it until we need 1102 // more tickets. 1103 indirectionPages = append(indirectionPages, idx.PageKey) 1104 } else { 1105 // We've already read the page, immediately populate the indirect 1106 // nodes. 1107 for _, rn := range p.Group.RelatedNode { 1108 tickets = addMergeNode(mergeInto, tickets, ticket, rn.Node.GetTicket()) 1109 } 1110 } 1111 } 1112 case xrefCategoryCall: 1113 if wantMoreCrossRefs && !stats.skipPage(idx) { 1114 p, filtered, err := getFilteredPage(ctx, idx.PageKey) 1115 if err != nil { 1116 return nil, fmt.Errorf("internal error: error retrieving cross-references page: %v", idx.PageKey) 1117 } 1118 reply.Total.Callers -= int64(filtered) // update counts to reflect filtering 1119 reply.Filtered.Callers += int64(filtered) 1120 stats.addCallers(crs, p.Group) 1121 } 1122 } 1123 } 1124 1125 for i == len(tickets)-1 && len(indirectionPages) > 0 { 1126 // We've hit the end of known tickets to pull for xrefs; read an 1127 // indirection page until we've found another ticket or we've exhausted 1128 // all indirection pages. 1129 pageKey := indirectionPages[len(indirectionPages)-1] 1130 indirectionPages = indirectionPages[:len(indirectionPages)-1] 1131 p, err := t.crossReferencesPage(ctx, pageKey) 1132 if err != nil { 1133 return nil, fmt.Errorf("internal error: error retrieving cross-references page: %v", pageKey) 1134 } 1135 for _, rn := range p.Group.RelatedNode { 1136 tickets = addMergeNode(mergeInto, tickets, ticket, rn.Node.GetTicket()) 1137 } 1138 } 1139 1140 tracePrintf(ctx, "CrossReferenceSet: %s", crs.Ticket) 1141 } 1142 1143 stopReadingPages() 1144 go func() { 1145 if err := pageReadGroup.Wait(); isNonContextError(err) { 1146 log.ErrorContextf(ctx, "page read ahead error: %v", err) 1147 } 1148 }() 1149 1150 if !foundCrossRefs { 1151 // Short-circuit return; skip any slow requests. 1152 return &xpb.CrossReferencesReply{}, nil 1153 } 1154 1155 var emptySets []string 1156 for key, crs := range reply.CrossReferences { 1157 if len(crs.Declaration)+len(crs.Definition)+len(crs.Reference)+len(crs.Caller)+len(crs.RelatedNode) == 0 { 1158 emptySets = append(emptySets, key) 1159 } 1160 } 1161 for _, k := range emptySets { 1162 delete(reply.CrossReferences, k) 1163 } 1164 1165 if initialSkip+stats.total != sumTotalCrossRefs(reply.Total) && stats.total != 0 { 1166 nextPageToken.Indices["skip"] = int32(initialSkip + stats.total) 1167 } 1168 1169 if _, skip := nextPageToken.Indices["skip"]; skip { 1170 rec, err := proto.Marshal(nextPageToken) 1171 if err != nil { 1172 return nil, fmt.Errorf("internal error: error marshalling page token: %v", err) 1173 } 1174 reply.NextPageToken = base64.StdEncoding.EncodeToString(snappy.Encode(nil, rec)) 1175 } 1176 1177 if req.Snippets == xpb.SnippetsKind_NONE { 1178 for _, crs := range reply.CrossReferences { 1179 for _, def := range crs.Definition { 1180 clearRelatedSnippets(def) 1181 } 1182 for _, dec := range crs.Declaration { 1183 clearRelatedSnippets(dec) 1184 } 1185 for _, ref := range crs.Reference { 1186 clearRelatedSnippets(ref) 1187 } 1188 for _, ca := range crs.Caller { 1189 clearRelatedSnippets(ca) 1190 } 1191 } 1192 for _, def := range reply.DefinitionLocations { 1193 clearSnippet(def) 1194 } 1195 } 1196 1197 if patcher != nil { 1198 tracePrintf(ctx, "Patching anchors") 1199 // Patch each set of anchors in parallel. Files were added as they were 1200 // seen when populating the xref sets. 1201 g, gCtx := errgroup.WithContext(ctx) 1202 g.Go(func() error { 1203 defs, err := patchDefLocations(gCtx, patcher, reply.GetDefinitionLocations()) 1204 if err != nil { 1205 return err 1206 } 1207 reply.DefinitionLocations = defs 1208 return nil 1209 }) 1210 for _, set := range reply.GetCrossReferences() { 1211 g.Go(func() error { 1212 as, err := patcher.PatchRelatedAnchors(gCtx, set.GetDefinition()) 1213 if err != nil { 1214 return err 1215 } 1216 set.Definition = as 1217 tracePrintf(ctx, "Patched Definitions: %d", len(as)) 1218 return nil 1219 }) 1220 1221 g.Go(func() error { 1222 as, err := patcher.PatchRelatedAnchors(gCtx, set.GetDeclaration()) 1223 if err != nil { 1224 return err 1225 } 1226 set.Declaration = as 1227 tracePrintf(ctx, "Patched Declarations: %d", len(as)) 1228 return nil 1229 }) 1230 1231 g.Go(func() error { 1232 as, err := patcher.PatchRelatedAnchors(gCtx, set.GetReference()) 1233 if err != nil { 1234 return err 1235 } 1236 set.Reference = as 1237 tracePrintf(ctx, "Patched References: %d", len(as)) 1238 return nil 1239 }) 1240 1241 g.Go(func() error { 1242 as, err := patcher.PatchRelatedAnchors(gCtx, set.GetCaller()) 1243 if err != nil { 1244 return err 1245 } 1246 set.Caller = as 1247 tracePrintf(ctx, "Patched Callers: %d", len(as)) 1248 return nil 1249 }) 1250 } 1251 if err := g.Wait(); err != nil { 1252 return nil, err 1253 } 1254 } 1255 1256 return reply, nil 1257 } 1258 1259 // cleanupRefEdgeToCount removes all the keys from r.Total.RefEdgeToCount and 1260 // r.Filtered.RefEdgeToCount that have a value of 0. 1261 func cleanupRefEdgeToCount(r *xpb.CrossReferencesReply) { 1262 for k, v := range r.Total.RefEdgeToCount { 1263 if v == 0 { 1264 delete(r.Total.RefEdgeToCount, k) 1265 } 1266 } 1267 for k, v := range r.Filtered.RefEdgeToCount { 1268 if v == 0 { 1269 delete(r.Filtered.RefEdgeToCount, k) 1270 } 1271 } 1272 1273 } 1274 1275 func addMergeNode(mergeMap map[string]string, allTickets []string, rootNode, mergeNode string) []string { 1276 if _, ok := mergeMap[mergeNode]; ok { 1277 return allTickets 1278 } 1279 allTickets = append(allTickets, mergeNode) 1280 mergeMap[mergeNode] = rootNode 1281 return allTickets 1282 } 1283 1284 func countRefs(rs []*srvpb.PagedCrossReferences_ScopedReference) int { 1285 var n int 1286 for _, ref := range rs { 1287 n += len(ref.GetReference()) 1288 } 1289 return n 1290 } 1291 1292 func nodeKind(n *srvpb.Node) string { 1293 if n == nil { 1294 return "" 1295 } 1296 for _, f := range n.Fact { 1297 if f.Name == facts.NodeKind { 1298 return string(f.Value) 1299 } 1300 } 1301 return "" 1302 } 1303 1304 func sumTotalCrossRefs(ts *xpb.CrossReferencesReply_Total) int { 1305 var refs int 1306 for _, cnt := range ts.RefEdgeToCount { 1307 refs += int(cnt) 1308 } 1309 var relatedNodes int 1310 for _, cnt := range ts.RelatedNodesByRelation { 1311 relatedNodes += int(cnt) 1312 } 1313 return int(ts.Callers) + 1314 int(ts.Definitions) + 1315 int(ts.Declarations) + 1316 refs + 1317 int(ts.Documentation) + 1318 relatedNodes 1319 } 1320 1321 type refOptions struct { 1322 patcherFunc patcherFunc 1323 anchorText bool 1324 includeScopes bool 1325 } 1326 1327 type refStats struct { 1328 // number of refs: 1329 // to skip (returned on previous pages) 1330 // max to return (the page size) 1331 // total (count of refs so far read for current page) 1332 skip, total, max int 1333 1334 reply *xpb.CrossReferencesReply 1335 refOptions 1336 nodeConverter 1337 } 1338 1339 func (s *refStats) done() bool { return s.total == s.max } 1340 1341 func (s *refStats) skipPage(idx *srvpb.PagedCrossReferences_PageIndex) bool { 1342 if s.skip > int(idx.Count) { 1343 s.skip -= int(idx.Count) 1344 return true 1345 } 1346 return s.total >= s.max 1347 } 1348 1349 func (s *refStats) addCallers(crs *xpb.CrossReferencesReply_CrossReferenceSet, grp *srvpb.PagedCrossReferences_Group) bool { 1350 cs := grp.Caller 1351 converter := &anchorConverter{ 1352 fileInfos: makeFileInfoMap(grp.FileInfo), 1353 patcherFunc: s.patcherFunc, 1354 } 1355 1356 if s.done() { 1357 // We've already hit our cap; return true that we're done. 1358 return true 1359 } else if s.skip > len(cs) { 1360 // We can skip this entire group. 1361 s.skip -= len(cs) 1362 return false 1363 } else if s.skip > 0 { 1364 // Skip part of the group, and put the rest in the reply. 1365 cs = cs[s.skip:] 1366 s.skip = 0 1367 } 1368 1369 if s.total+len(cs) > s.max { 1370 cs = cs[:(s.max - s.total)] 1371 } 1372 s.total += len(cs) 1373 for _, c := range cs { 1374 ra := &xpb.CrossReferencesReply_RelatedAnchor{ 1375 Anchor: converter.Convert(c.Caller).Anchor, 1376 Ticket: c.SemanticCaller, 1377 Site: make([]*xpb.Anchor, 0, len(c.Callsite)), 1378 1379 Speculative: xrefs.IsSpeculative(grp.GetKind()), 1380 } 1381 ra.MarkedSource = c.MarkedSource 1382 for _, site := range c.Callsite { 1383 ra.Site = append(ra.Site, converter.Convert(site).Anchor) 1384 } 1385 crs.Caller = append(crs.Caller, ra) 1386 } 1387 return s.done() // return whether we've hit our cap 1388 } 1389 1390 func (s *refStats) addRelatedNodes(crs *xpb.CrossReferencesReply_CrossReferenceSet, grp *srvpb.PagedCrossReferences_Group) bool { 1391 ns := grp.RelatedNode 1392 nodes := s.reply.Nodes 1393 defs := s.reply.DefinitionLocations 1394 ac := &anchorConverter{ 1395 fileInfos: makeFileInfoMap(grp.FileInfo), 1396 patcherFunc: s.patcherFunc, 1397 } 1398 1399 if s.total == s.max { 1400 // We've already hit our cap; return true that we're done. 1401 return true 1402 } else if s.skip > len(ns) { 1403 // We can skip this entire group. 1404 s.skip -= len(ns) 1405 return false 1406 } else if s.skip > 0 { 1407 // Skip part of the group, and put the rest in the reply. 1408 ns = ns[s.skip:] 1409 s.skip = 0 1410 } 1411 1412 if s.total+len(ns) > s.max { 1413 ns = ns[:(s.max - s.total)] 1414 } 1415 s.total += len(ns) 1416 for _, rn := range ns { 1417 if _, ok := nodes[rn.Node.Ticket]; !ok { 1418 if info := s.ToInfo(rn.Node); info != nil { 1419 nodes[rn.Node.Ticket] = info 1420 if defs != nil && rn.Node.DefinitionLocation != nil { 1421 nodes[rn.Node.Ticket].Definition = rn.Node.DefinitionLocation.Ticket 1422 defs[rn.Node.DefinitionLocation.Ticket] = ac.Convert(rn.Node.DefinitionLocation).Anchor 1423 } 1424 } 1425 } 1426 crs.RelatedNode = append(crs.RelatedNode, &xpb.CrossReferencesReply_RelatedNode{ 1427 RelationKind: grp.Kind, 1428 Ticket: rn.Node.Ticket, 1429 Ordinal: rn.Ordinal, 1430 }) 1431 } 1432 return s.total == s.max // return whether we've hit our cap 1433 } 1434 1435 func (s *refStats) addAnchors(to *[]*xpb.CrossReferencesReply_RelatedAnchor, grp *srvpb.PagedCrossReferences_Group) bool { 1436 if s.total >= s.max { 1437 return true 1438 } 1439 1440 scopedRefs := grp.GetScopedReference()[:len(grp.GetScopedReference()):len(grp.GetScopedReference())] 1441 // Convert legacy unscoped references to simple ScopedReference containers 1442 for _, a := range grp.Anchor { 1443 scopedRefs = append(scopedRefs, &srvpb.PagedCrossReferences_ScopedReference{ 1444 Reference: []*srvpb.ExpandedAnchor{a}, 1445 }) 1446 } 1447 totalRefs := countRefs(scopedRefs) 1448 1449 if s.skip >= totalRefs { 1450 s.skip -= totalRefs 1451 return false 1452 } 1453 1454 if s.skip > 0 { 1455 var firstNonEmpty int 1456 for i := 0; i < len(scopedRefs) && s.skip > 0; i++ { 1457 sr := scopedRefs[i] 1458 if len(sr.GetReference()) <= s.skip { 1459 s.skip -= len(sr.GetReference()) 1460 firstNonEmpty++ 1461 continue 1462 } 1463 sr.Reference = sr.GetReference()[s.skip:] 1464 s.skip = 0 1465 } 1466 scopedRefs = scopedRefs[firstNonEmpty:] 1467 } 1468 1469 kind := edges.Canonical(grp.Kind) 1470 fileInfos := makeFileInfoMap(grp.FileInfo) 1471 c := &anchorConverter{fileInfos: fileInfos, anchorText: s.anchorText, patcherFunc: s.patcherFunc} 1472 for _, sr := range scopedRefs { 1473 if !s.includeScopes || sr.Scope == nil { 1474 for _, a := range sr.Reference { 1475 ra := c.Convert(a) 1476 ra.Anchor.Kind = kind 1477 *to = append(*to, ra) 1478 s.total++ 1479 if s.total >= s.max { 1480 return true 1481 } 1482 } 1483 continue 1484 } 1485 scope := c.Convert(sr.Scope).Anchor 1486 scope.Kind = sr.Scope.Kind 1487 ra := &xpb.CrossReferencesReply_RelatedAnchor{ 1488 Anchor: scope, 1489 Ticket: sr.SemanticScope, 1490 Site: make([]*xpb.Anchor, 0, len(sr.Reference)), 1491 1492 Speculative: xrefs.IsSpeculative(grp.GetKind()), 1493 } 1494 ra.MarkedSource = sr.MarkedSource 1495 refs := sr.GetReference() 1496 if s.total+len(refs) > s.max { 1497 refs = refs[:(s.max - s.total)] 1498 } 1499 for _, site := range refs { 1500 a := c.Convert(site).Anchor 1501 a.Kind = kind 1502 ra.Site = append(ra.Site, a) 1503 } 1504 *to = append(*to, ra) 1505 s.total += len(ra.Site) 1506 if s.total >= s.max { 1507 return true 1508 } 1509 } 1510 return false 1511 } 1512 1513 type patcherFunc func(f *srvpb.FileInfo) 1514 1515 type anchorConverter struct { 1516 fileInfos map[string]*srvpb.FileInfo 1517 anchorText bool 1518 patcherFunc patcherFunc 1519 } 1520 1521 func (c *anchorConverter) Convert(a *srvpb.ExpandedAnchor) *xpb.CrossReferencesReply_RelatedAnchor { 1522 var text string 1523 if c.anchorText { 1524 text = a.Text 1525 } 1526 parent, err := tickets.AnchorFile(a.Ticket) 1527 if err != nil { 1528 log.Errorf("parsing anchor ticket: %v", err) 1529 } 1530 fileInfo := a.GetFileInfo() 1531 if fileInfo == nil { 1532 fileInfo = c.fileInfos[parent] 1533 } 1534 if c.patcherFunc != nil { 1535 c.patcherFunc(fileInfo) 1536 } 1537 return &xpb.CrossReferencesReply_RelatedAnchor{Anchor: &xpb.Anchor{ 1538 Ticket: a.Ticket, 1539 Kind: edges.Canonical(a.Kind), 1540 Parent: parent, 1541 Text: text, 1542 Span: a.Span, 1543 Snippet: a.Snippet, 1544 SnippetSpan: a.SnippetSpan, 1545 BuildConfig: a.BuildConfiguration, 1546 Revision: fileInfo.GetRevision(), 1547 }, 1548 1549 Speculative: xrefs.IsSpeculative(a.GetKind()), 1550 } 1551 } 1552 1553 type documentConverter struct { 1554 anchorConverter 1555 nodeConverter 1556 1557 nodes map[string]*cpb.NodeInfo 1558 defs map[string]*xpb.Anchor 1559 } 1560 1561 func (c *documentConverter) Convert(d *srvpb.Document) *xpb.DocumentationReply_Document { 1562 for _, node := range d.Node { 1563 if _, ok := c.nodes[node.Ticket]; ok { 1564 continue 1565 } 1566 1567 n := c.ToInfo(node) 1568 if def := node.DefinitionLocation; def != nil { 1569 if n == nil { 1570 // Add an empty NodeInfo to attach definition location even if no facts 1571 // are requested. 1572 n = &cpb.NodeInfo{} 1573 } 1574 1575 n.Definition = def.Ticket 1576 if _, ok := c.defs[def.Ticket]; !ok { 1577 c.defs[def.Ticket] = c.anchorConverter.Convert(def).Anchor 1578 } 1579 } 1580 1581 if n != nil { 1582 c.nodes[node.Ticket] = n 1583 } 1584 } 1585 1586 return &xpb.DocumentationReply_Document{ 1587 Ticket: d.Ticket, 1588 Text: &xpb.Printable{ 1589 RawText: d.RawText, 1590 Link: d.Link, 1591 }, 1592 MarkedSource: d.MarkedSource, 1593 } 1594 } 1595 1596 func (t *Table) lookupDocument(ctx context.Context, ticket string) (*srvpb.Document, error) { 1597 d, err := t.documentation(ctx, ticket) 1598 if err != nil { 1599 return nil, err 1600 } 1601 tracePrintf(ctx, "Document: %s", ticket) 1602 1603 // If DocumentedBy is provided, replace document with another lookup. 1604 if d.DocumentedBy != "" { 1605 doc, err := t.documentation(ctx, d.DocumentedBy) 1606 if err != nil { 1607 log.ErrorContextf(ctx, "looking up subsuming documentation for {%+v}: %v", d, err) 1608 return nil, err 1609 } 1610 1611 // Ensure the subsuming documentation has the correct ticket and node. 1612 doc.Ticket = ticket 1613 for _, n := range d.Node { 1614 if n.Ticket == ticket { 1615 doc.Node = append(doc.Node, n) 1616 break 1617 } 1618 } 1619 1620 tracePrintf(ctx, "DocumentedBy: %s", d.DocumentedBy) 1621 d = doc 1622 } 1623 return d, nil 1624 } 1625 1626 // Documentation implements part of the xrefs Service interface. 1627 func (t *Table) Documentation(ctx context.Context, req *xpb.DocumentationRequest) (*xpb.DocumentationReply, error) { 1628 tickets, err := xrefs.FixTickets(req.Ticket) 1629 if err != nil { 1630 return nil, err 1631 } 1632 1633 reply := &xpb.DocumentationReply{ 1634 Nodes: make(map[string]*cpb.NodeInfo, len(tickets)), 1635 DefinitionLocations: make(map[string]*xpb.Anchor, len(tickets)), 1636 } 1637 patterns := xrefs.ConvertFilters(req.Filter) 1638 if len(patterns) == 0 { 1639 // Match all facts if given no filters 1640 patterns = xrefs.ConvertFilters([]string{"**"}) 1641 } 1642 fileInfos := make(map[string]*srvpb.FileInfo) 1643 1644 dc := &documentConverter{ 1645 anchorConverter: anchorConverter{fileInfos: fileInfos}, 1646 nodeConverter: nodeConverter{patterns}, 1647 nodes: reply.Nodes, 1648 defs: reply.DefinitionLocations, 1649 } 1650 1651 var patcher MultiFilePatcher 1652 if t.MakePatcher != nil && req.GetWorkspace() != nil && req.GetPatchAgainstWorkspace() { 1653 patcher, err = t.MakePatcher(ctx, req.GetWorkspace()) 1654 if isNonContextError(err) { 1655 log.ErrorContextf(ctx, "creating patcher: %v", err) 1656 } 1657 1658 if patcher != nil { 1659 defer func() { 1660 if err := patcher.Close(); isNonContextError(err) { 1661 // No need to fail the request; just log the error. 1662 log.ErrorContextf(ctx, "closing patcher: %v", err) 1663 } 1664 }() 1665 1666 dc.anchorConverter.patcherFunc = func(f *srvpb.FileInfo) { 1667 if err := patcher.AddFile(ctx, f); isNonContextError(err) { 1668 // Attempt to continue with the request, just log the error. 1669 log.ErrorContextf(ctx, "adding file: %v", err) 1670 } 1671 } 1672 } 1673 } 1674 1675 for _, ticket := range tickets { 1676 d, err := t.lookupDocument(ctx, ticket) 1677 if err == table.ErrNoSuchKey { 1678 continue 1679 } else if err != nil { 1680 return nil, canonicalError(err, "documentation", ticket) 1681 } 1682 1683 doc := dc.Convert(d) 1684 if req.IncludeChildren { 1685 for _, child := range d.ChildTicket { 1686 // TODO(schroederc): store children with root of documentation tree 1687 cd, err := t.lookupDocument(ctx, child) 1688 if err == table.ErrNoSuchKey { 1689 continue 1690 } else if err != nil { 1691 return nil, canonicalError(err, "documentation child", ticket) 1692 } 1693 1694 doc.Children = append(doc.Children, dc.Convert(cd)) 1695 } 1696 tracePrintf(ctx, "Children: %d", len(d.ChildTicket)) 1697 } 1698 1699 reply.Document = append(reply.Document, doc) 1700 } 1701 tracePrintf(ctx, "Documents: %d (nodes: %d) (defs: %d", len(reply.Document), len(reply.Nodes), len(reply.DefinitionLocations)) 1702 1703 if patcher != nil { 1704 defs, err := patchDefLocations(ctx, patcher, reply.GetDefinitionLocations()) 1705 if err != nil { 1706 log.ErrorContextf(ctx, "patching definition locations: %v", err) 1707 } else { 1708 reply.DefinitionLocations = defs 1709 } 1710 } 1711 1712 return reply, nil 1713 } 1714 1715 func clearRelatedSnippets(ra *xpb.CrossReferencesReply_RelatedAnchor) { 1716 clearSnippet(ra.Anchor) 1717 for _, site := range ra.Site { 1718 clearSnippet(site) 1719 } 1720 } 1721 1722 func clearSnippet(anchor *xpb.Anchor) { 1723 anchor.Snippet = "" 1724 anchor.SnippetSpan = nil 1725 } 1726 1727 func tracePrintf(ctx context.Context, msg string, args ...any) { 1728 if t, ok := trace.FromContext(ctx); ok { 1729 t.LazyPrintf(msg, args...) 1730 } 1731 } 1732 1733 // Wrap known error types with corresponding rpc status errors. 1734 // If no sensible parsing can be found, just return fmt.Errorf with some hints 1735 // about the calling code and ticket. 1736 // Follows util::error::Code from 1737 // http://google.github.io/google-api-cpp-client/latest/doxygen/namespacegoogleapis_1_1util_1_1error.html 1738 func canonicalError(err error, caller string, ticket string) error { 1739 switch code := status.Code(err); code { 1740 case codes.OK: 1741 return nil 1742 case codes.Canceled: 1743 return xrefs.ErrCanceled 1744 case codes.DeadlineExceeded: 1745 return xrefs.ErrDeadlineExceeded 1746 default: 1747 st := err.Error() 1748 if strings.Contains(st, "RPC::CANCELLED") || strings.Contains(st, "context canceled") { 1749 return xrefs.ErrCanceled 1750 } 1751 if strings.Contains(st, "RPC::DEADLINE_EXCEEDED") || strings.Contains(st, "context deadline exceeded") { 1752 return xrefs.ErrDeadlineExceeded 1753 } 1754 return status.Error(code, st) 1755 } 1756 } 1757 1758 func isNonContextError(err error) bool { 1759 err = canonicalError(err, "", "") 1760 return err != nil && err != xrefs.ErrCanceled && err != xrefs.ErrDeadlineExceeded 1761 } 1762 1763 // call is an in-flight or completed Get call 1764 type call[T any] struct { 1765 wg sync.WaitGroup 1766 val T 1767 err error 1768 } 1769 1770 type syncCache[T any] struct { 1771 mu sync.Mutex 1772 m map[string]*call[T] 1773 } 1774 1775 // Get executes and returns the results of the given function, making sure that 1776 // there is only one execution for a given key (until Delete is called). If a 1777 // duplicate comes in, the duplicate caller waits for the original to complete 1778 // and receives the same results. 1779 func (g *syncCache[T]) Get(key string, fn func() (T, error)) (T, error) { 1780 g.mu.Lock() 1781 if g.m == nil { 1782 g.m = make(map[string]*call[T]) 1783 } 1784 if c, ok := g.m[key]; ok { 1785 g.mu.Unlock() 1786 c.wg.Wait() 1787 return c.val, c.err 1788 } 1789 c := new(call[T]) 1790 c.wg.Add(1) 1791 g.m[key] = c 1792 g.mu.Unlock() 1793 1794 c.val, c.err = fn() 1795 c.wg.Done() 1796 1797 return c.val, c.err 1798 } 1799 1800 // Delete removes the given key from the cache. 1801 func (g *syncCache[T]) Delete(key string) { 1802 g.mu.Lock() 1803 delete(g.m, key) 1804 g.mu.Unlock() 1805 }