cuelang.org/go@v0.10.1/internal/golangorgx/gopls/cache/snapshot.go (about) 1 // Copyright 2019 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package cache 6 7 import ( 8 "bytes" 9 "context" 10 "errors" 11 "fmt" 12 "go/ast" 13 "go/build/constraint" 14 "go/parser" 15 "go/token" 16 "go/types" 17 "io" 18 "os" 19 "path" 20 "path/filepath" 21 "regexp" 22 "runtime" 23 "sort" 24 "strconv" 25 "strings" 26 "sync" 27 28 "cuelang.org/go/internal/golangorgx/gopls/cache/metadata" 29 "cuelang.org/go/internal/golangorgx/gopls/cache/methodsets" 30 "cuelang.org/go/internal/golangorgx/gopls/cache/typerefs" 31 "cuelang.org/go/internal/golangorgx/gopls/cache/xrefs" 32 "cuelang.org/go/internal/golangorgx/gopls/file" 33 "cuelang.org/go/internal/golangorgx/gopls/filecache" 34 "cuelang.org/go/internal/golangorgx/gopls/protocol" 35 "cuelang.org/go/internal/golangorgx/gopls/protocol/command" 36 "cuelang.org/go/internal/golangorgx/gopls/settings" 37 "cuelang.org/go/internal/golangorgx/gopls/util/bug" 38 "cuelang.org/go/internal/golangorgx/gopls/util/constraints" 39 "cuelang.org/go/internal/golangorgx/gopls/util/immutable" 40 "cuelang.org/go/internal/golangorgx/gopls/util/pathutil" 41 "cuelang.org/go/internal/golangorgx/gopls/util/persistent" 42 "cuelang.org/go/internal/golangorgx/gopls/util/slices" 43 "cuelang.org/go/internal/golangorgx/tools/event" 44 "cuelang.org/go/internal/golangorgx/tools/event/label" 45 "cuelang.org/go/internal/golangorgx/tools/event/tag" 46 "cuelang.org/go/internal/golangorgx/tools/gocommand" 47 "cuelang.org/go/internal/golangorgx/tools/memoize" 48 "cuelang.org/go/internal/golangorgx/tools/packagesinternal" 49 "cuelang.org/go/internal/golangorgx/tools/typesinternal" 50 "golang.org/x/sync/errgroup" 51 "golang.org/x/tools/go/packages" 52 "golang.org/x/tools/go/types/objectpath" 53 ) 54 55 // A Snapshot represents the current state for a given view. 56 // 57 // It is first and foremost an idempotent implementation of file.Source whose 58 // ReadFile method returns consistent information about the existence and 59 // content of each file throughout its lifetime. 60 // 61 // However, the snapshot also manages additional state (such as parsed files 62 // and packages) that are derived from file content. 63 // 64 // Snapshots are responsible for bookkeeping and invalidation of this state, 65 // implemented in Snapshot.clone. 66 type Snapshot struct { 67 // sequenceID is the monotonically increasing ID of this snapshot within its View. 68 // 69 // Sequence IDs for Snapshots from different Views cannot be compared. 70 sequenceID uint64 71 72 // TODO(rfindley): the snapshot holding a reference to the view poses 73 // lifecycle problems: a view may be shut down and waiting for work 74 // associated with this snapshot to complete. While most accesses of the view 75 // are benign (options or workspace information), this is not formalized and 76 // it is wrong for the snapshot to use a shutdown view. 77 // 78 // Fix this by passing options and workspace information to the snapshot, 79 // both of which should be immutable for the snapshot. 80 view *View 81 82 cancel func() 83 backgroundCtx context.Context 84 85 store *memoize.Store // cache of handles shared by all snapshots 86 87 refMu sync.Mutex 88 89 // refcount holds the number of outstanding references to the current 90 // Snapshot. When refcount is decremented to 0, the Snapshot maps are 91 // destroyed and the done function is called. 92 // 93 // TODO(rfindley): use atomic.Int32 on Go 1.19+. 94 refcount int 95 done func() // for implementing Session.Shutdown 96 97 // mu guards all of the maps in the snapshot, as well as the builtin URI and 98 // initialized. 99 mu sync.Mutex 100 101 // initialized reports whether the snapshot has been initialized. Concurrent 102 // initialization is guarded by the view.initializationSema. Each snapshot is 103 // initialized at most once: concurrent initialization is guarded by 104 // view.initializationSema. 105 initialized bool 106 107 // initialErr holds the last error resulting from initialization. If 108 // initialization fails, we only retry when the workspace modules change, 109 // to avoid too many go/packages calls. 110 // If initialized is false, initialErr stil holds the error resulting from 111 // the previous initialization. 112 // TODO(rfindley): can we unify the lifecycle of initialized and initialErr. 113 initialErr *InitializationError 114 115 // builtin is the location of builtin.go in GOROOT. 116 // 117 // TODO(rfindley): would it make more sense to eagerly parse builtin, and 118 // instead store a *ParsedGoFile here? 119 builtin protocol.DocumentURI 120 121 // meta holds loaded metadata. 122 // 123 // meta is guarded by mu, but the Graph itself is immutable. 124 // 125 // TODO(rfindley): in many places we hold mu while operating on meta, even 126 // though we only need to hold mu while reading the pointer. 127 meta *metadata.Graph 128 129 // files maps file URIs to their corresponding FileHandles. 130 // It may invalidated when a file's content changes. 131 files *fileMap 132 133 // symbolizeHandles maps each file URI to a handle for the future 134 // result of computing the symbols declared in that file. 135 symbolizeHandles *persistent.Map[protocol.DocumentURI, *memoize.Promise] // *memoize.Promise[symbolizeResult] 136 137 // packages maps a packageKey to a *packageHandle. 138 // It may be invalidated when a file's content changes. 139 // 140 // Invariants to preserve: 141 // - packages.Get(id).meta == meta.metadata[id] for all ids 142 // - if a package is in packages, then all of its dependencies should also 143 // be in packages, unless there is a missing import 144 packages *persistent.Map[PackageID, *packageHandle] 145 146 // activePackages maps a package ID to a memoized active package, or nil if 147 // the package is known not to be open. 148 // 149 // IDs not contained in the map are not known to be open or not open. 150 activePackages *persistent.Map[PackageID, *Package] 151 152 // workspacePackages contains the workspace's packages, which are loaded 153 // when the view is created. It does not contain intermediate test variants. 154 workspacePackages immutable.Map[PackageID, PackagePath] 155 156 // shouldLoad tracks packages that need to be reloaded, mapping a PackageID 157 // to the package paths that should be used to reload it 158 // 159 // When we try to load a package, we clear it from the shouldLoad map 160 // regardless of whether the load succeeded, to prevent endless loads. 161 shouldLoad *persistent.Map[PackageID, []PackagePath] 162 163 // unloadableFiles keeps track of files that we've failed to load. 164 unloadableFiles *persistent.Set[protocol.DocumentURI] 165 166 // TODO(rfindley): rename the handles below to "promises". A promise is 167 // different from a handle (we mutate the package handle.) 168 169 // parseModHandles keeps track of any parseModHandles for the snapshot. 170 // The handles need not refer to only the view's go.mod file. 171 parseModHandles *persistent.Map[protocol.DocumentURI, *memoize.Promise] // *memoize.Promise[parseModResult] 172 173 // parseWorkHandles keeps track of any parseWorkHandles for the snapshot. 174 // The handles need not refer to only the view's go.work file. 175 parseWorkHandles *persistent.Map[protocol.DocumentURI, *memoize.Promise] // *memoize.Promise[parseWorkResult] 176 177 // Preserve go.mod-related handles to avoid garbage-collecting the results 178 // of various calls to the go command. The handles need not refer to only 179 // the view's go.mod file. 180 modTidyHandles *persistent.Map[protocol.DocumentURI, *memoize.Promise] // *memoize.Promise[modTidyResult] 181 modWhyHandles *persistent.Map[protocol.DocumentURI, *memoize.Promise] // *memoize.Promise[modWhyResult] 182 183 // importGraph holds a shared import graph to use for type-checking. Adding 184 // more packages to this import graph can speed up type checking, at the 185 // expense of in-use memory. 186 // 187 // See getImportGraph for additional documentation. 188 importGraphDone chan struct{} // closed when importGraph is set; may be nil 189 importGraph *importGraph // copied from preceding snapshot and re-evaluated 190 191 // pkgIndex is an index of package IDs, for efficient storage of typerefs. 192 pkgIndex *typerefs.PackageIndex 193 194 // moduleUpgrades tracks known upgrades for module paths in each modfile. 195 // Each modfile has a map of module name to upgrade version. 196 moduleUpgrades *persistent.Map[protocol.DocumentURI, map[string]string] 197 198 // gcOptimizationDetails describes the packages for which we want 199 // optimization details to be included in the diagnostics. 200 gcOptimizationDetails map[metadata.PackageID]unit 201 } 202 203 var _ memoize.RefCounted = (*Snapshot)(nil) // snapshots are reference-counted 204 205 func (s *Snapshot) awaitPromise(ctx context.Context, p *memoize.Promise) (interface{}, error) { 206 return p.Get(ctx, s) 207 } 208 209 // Acquire prevents the snapshot from being destroyed until the returned 210 // function is called. 211 // 212 // (s.Acquire().release() could instead be expressed as a pair of 213 // method calls s.IncRef(); s.DecRef(). The latter has the advantage 214 // that the DecRefs are fungible and don't require holding anything in 215 // addition to the refcounted object s, but paradoxically that is also 216 // an advantage of the current approach, which forces the caller to 217 // consider the release function at every stage, making a reference 218 // leak more obvious.) 219 func (s *Snapshot) Acquire() func() { 220 s.refMu.Lock() 221 defer s.refMu.Unlock() 222 assert(s.refcount > 0, "non-positive refs") 223 s.refcount++ 224 225 return s.decref 226 } 227 228 // decref should only be referenced by Acquire, and by View when it frees its 229 // reference to View.snapshot. 230 func (s *Snapshot) decref() { 231 s.refMu.Lock() 232 defer s.refMu.Unlock() 233 234 assert(s.refcount > 0, "non-positive refs") 235 s.refcount-- 236 if s.refcount == 0 { 237 s.packages.Destroy() 238 s.activePackages.Destroy() 239 s.files.destroy() 240 s.symbolizeHandles.Destroy() 241 s.parseModHandles.Destroy() 242 s.parseWorkHandles.Destroy() 243 s.modTidyHandles.Destroy() 244 s.modWhyHandles.Destroy() 245 s.unloadableFiles.Destroy() 246 s.moduleUpgrades.Destroy() 247 s.done() 248 } 249 } 250 251 // SequenceID is the sequence id of this snapshot within its containing 252 // view. 253 // 254 // Relative to their view sequence ids are monotonically increasing, but this 255 // does not hold globally: when new views are created their initial snapshot 256 // has sequence ID 0. 257 func (s *Snapshot) SequenceID() uint64 { 258 return s.sequenceID 259 } 260 261 // SnapshotLabels returns a new slice of labels that should be used for events 262 // related to a snapshot. 263 func (s *Snapshot) Labels() []label.Label { 264 return []label.Label{tag.Snapshot.Of(s.SequenceID()), tag.Directory.Of(s.Folder())} 265 } 266 267 // Folder returns the folder at the base of this snapshot. 268 func (s *Snapshot) Folder() protocol.DocumentURI { 269 return s.view.folder.Dir 270 } 271 272 // View returns the View associated with this snapshot. 273 func (s *Snapshot) View() *View { 274 return s.view 275 } 276 277 // FileKind returns the kind of a file. 278 // 279 // We can't reliably deduce the kind from the file name alone, 280 // as some editors can be told to interpret a buffer as 281 // language different from the file name heuristic, e.g. that 282 // an .html file actually contains Go "html/template" syntax, 283 // or even that a .go file contains Python. 284 func (s *Snapshot) FileKind(fh file.Handle) file.Kind { 285 if k := fileKind(fh); k != file.UnknownKind { 286 return k 287 } 288 fext := filepath.Ext(fh.URI().Path()) 289 exts := s.Options().TemplateExtensions 290 for _, ext := range exts { 291 if fext == ext || fext == "."+ext { 292 return file.Tmpl 293 } 294 } 295 296 // and now what? This should never happen, but it does for cgo before go1.15 297 // 298 // TODO(rfindley): this doesn't look right. We should default to UnknownKind. 299 // Also, I don't understand the comment above, though I'd guess before go1.15 300 // we encountered cgo files without the .go extension. 301 return file.Go 302 } 303 304 // fileKind returns the default file kind for a file, before considering 305 // template file extensions. See [Snapshot.FileKind]. 306 func fileKind(fh file.Handle) file.Kind { 307 // The kind of an unsaved buffer comes from the 308 // TextDocumentItem.LanguageID field in the didChange event, 309 // not from the file name. They may differ. 310 if o, ok := fh.(*overlay); ok { 311 if o.kind != file.UnknownKind { 312 return o.kind 313 } 314 } 315 316 fext := filepath.Ext(fh.URI().Path()) 317 switch fext { 318 case ".go": 319 return file.Go 320 case ".mod": 321 return file.Mod 322 case ".sum": 323 return file.Sum 324 case ".work": 325 return file.Work 326 case ".cue": 327 return file.CUE 328 } 329 return file.UnknownKind 330 } 331 332 // Options returns the options associated with this snapshot. 333 func (s *Snapshot) Options() *settings.Options { 334 return s.view.folder.Options 335 } 336 337 // BackgroundContext returns a context used for all background processing 338 // on behalf of this snapshot. 339 func (s *Snapshot) BackgroundContext() context.Context { 340 return s.backgroundCtx 341 } 342 343 // Templates returns the .tmpl files. 344 func (s *Snapshot) Templates() map[protocol.DocumentURI]file.Handle { 345 s.mu.Lock() 346 defer s.mu.Unlock() 347 348 tmpls := map[protocol.DocumentURI]file.Handle{} 349 s.files.foreach(func(k protocol.DocumentURI, fh file.Handle) { 350 if s.FileKind(fh) == file.Tmpl { 351 tmpls[k] = fh 352 } 353 }) 354 return tmpls 355 } 356 357 // config returns the configuration used for the snapshot's interaction with 358 // the go/packages API. It uses the given working directory. 359 // 360 // TODO(rstambler): go/packages requires that we do not provide overlays for 361 // multiple modules in on config, so buildOverlay needs to filter overlays by 362 // module. 363 func (s *Snapshot) config(ctx context.Context, inv *gocommand.Invocation) *packages.Config { 364 365 cfg := &packages.Config{ 366 Context: ctx, 367 Dir: inv.WorkingDir, 368 Env: inv.Env, 369 BuildFlags: inv.BuildFlags, 370 Mode: packages.NeedName | 371 packages.NeedFiles | 372 packages.NeedCompiledGoFiles | 373 packages.NeedImports | 374 packages.NeedDeps | 375 packages.NeedTypesSizes | 376 packages.NeedModule | 377 packages.NeedEmbedFiles | 378 packages.LoadMode(packagesinternal.DepsErrors) | 379 packages.LoadMode(packagesinternal.ForTest), 380 Fset: nil, // we do our own parsing 381 Overlay: s.buildOverlay(), 382 ParseFile: func(*token.FileSet, string, []byte) (*ast.File, error) { 383 panic("go/packages must not be used to parse files") 384 }, 385 Logf: func(format string, args ...interface{}) { 386 if s.Options().VerboseOutput { 387 event.Log(ctx, fmt.Sprintf(format, args...)) 388 } 389 }, 390 Tests: true, 391 } 392 packagesinternal.SetModFile(cfg, inv.ModFile) 393 packagesinternal.SetModFlag(cfg, inv.ModFlag) 394 // We want to type check cgo code if go/types supports it. 395 if typesinternal.SetUsesCgo(&types.Config{}) { 396 cfg.Mode |= packages.LoadMode(packagesinternal.TypecheckCgo) 397 } 398 return cfg 399 } 400 401 // InvocationFlags represents the settings of a particular go command invocation. 402 // It is a mode, plus a set of flag bits. 403 type InvocationFlags int 404 405 const ( 406 // Normal is appropriate for commands that might be run by a user and don't 407 // deliberately modify go.mod files, e.g. `go test`. 408 Normal InvocationFlags = iota 409 // WriteTemporaryModFile is for commands that need information from a 410 // modified version of the user's go.mod file, e.g. `go mod tidy` used to 411 // generate diagnostics. 412 WriteTemporaryModFile 413 // LoadWorkspace is for packages.Load, and other operations that should 414 // consider the whole workspace at once. 415 LoadWorkspace 416 // AllowNetwork is a flag bit that indicates the invocation should be 417 // allowed to access the network. 418 AllowNetwork InvocationFlags = 1 << 10 419 ) 420 421 func (m InvocationFlags) Mode() InvocationFlags { 422 return m & (AllowNetwork - 1) 423 } 424 425 func (m InvocationFlags) AllowNetwork() bool { 426 return m&AllowNetwork != 0 427 } 428 429 // RunGoCommandDirect runs the given `go` command. Verb, Args, and 430 // WorkingDir must be specified. 431 func (s *Snapshot) RunGoCommandDirect(ctx context.Context, mode InvocationFlags, inv *gocommand.Invocation) (*bytes.Buffer, error) { 432 _, inv, cleanup, err := s.goCommandInvocation(ctx, mode, inv) 433 if err != nil { 434 return nil, err 435 } 436 defer cleanup() 437 438 return s.view.gocmdRunner.Run(ctx, *inv) 439 } 440 441 // RunGoCommandPiped runs the given `go` command, writing its output 442 // to stdout and stderr. Verb, Args, and WorkingDir must be specified. 443 // 444 // RunGoCommandPiped runs the command serially using gocommand.RunPiped, 445 // enforcing that this command executes exclusively to other commands on the 446 // server. 447 func (s *Snapshot) RunGoCommandPiped(ctx context.Context, mode InvocationFlags, inv *gocommand.Invocation, stdout, stderr io.Writer) error { 448 _, inv, cleanup, err := s.goCommandInvocation(ctx, mode, inv) 449 if err != nil { 450 return err 451 } 452 defer cleanup() 453 return s.view.gocmdRunner.RunPiped(ctx, *inv, stdout, stderr) 454 } 455 456 // RunGoModUpdateCommands runs a series of `go` commands that updates the go.mod 457 // and go.sum file for wd, and returns their updated contents. 458 // 459 // TODO(rfindley): the signature of RunGoModUpdateCommands is very confusing. 460 // Simplify it. 461 func (s *Snapshot) RunGoModUpdateCommands(ctx context.Context, wd string, run func(invoke func(...string) (*bytes.Buffer, error)) error) ([]byte, []byte, error) { 462 flags := WriteTemporaryModFile | AllowNetwork 463 tmpURI, inv, cleanup, err := s.goCommandInvocation(ctx, flags, &gocommand.Invocation{WorkingDir: wd}) 464 if err != nil { 465 return nil, nil, err 466 } 467 defer cleanup() 468 invoke := func(args ...string) (*bytes.Buffer, error) { 469 inv.Verb = args[0] 470 inv.Args = args[1:] 471 return s.view.gocmdRunner.Run(ctx, *inv) 472 } 473 if err := run(invoke); err != nil { 474 return nil, nil, err 475 } 476 if flags.Mode() != WriteTemporaryModFile { 477 return nil, nil, nil 478 } 479 var modBytes, sumBytes []byte 480 modBytes, err = os.ReadFile(tmpURI.Path()) 481 if err != nil && !os.IsNotExist(err) { 482 return nil, nil, err 483 } 484 sumBytes, err = os.ReadFile(strings.TrimSuffix(tmpURI.Path(), ".mod") + ".sum") 485 if err != nil && !os.IsNotExist(err) { 486 return nil, nil, err 487 } 488 return modBytes, sumBytes, nil 489 } 490 491 // goCommandInvocation populates inv with configuration for running go commands on the snapshot. 492 // 493 // TODO(rfindley): refactor this function to compose the required configuration 494 // explicitly, rather than implicitly deriving it from flags and inv. 495 // 496 // TODO(adonovan): simplify cleanup mechanism. It's hard to see, but 497 // it used only after call to tempModFile. 498 func (s *Snapshot) goCommandInvocation(ctx context.Context, flags InvocationFlags, inv *gocommand.Invocation) (tmpURI protocol.DocumentURI, updatedInv *gocommand.Invocation, cleanup func(), err error) { 499 allowModfileModificationOption := s.Options().AllowModfileModifications 500 allowNetworkOption := s.Options().AllowImplicitNetworkAccess 501 502 // TODO(rfindley): it's not clear that this is doing the right thing. 503 // Should inv.Env really overwrite view.options? Should s.view.envOverlay 504 // overwrite inv.Env? (Do we ever invoke this with a non-empty inv.Env?) 505 // 506 // We should survey existing uses and write down rules for how env is 507 // applied. 508 inv.Env = slices.Concat( 509 os.Environ(), 510 s.Options().EnvSlice(), 511 inv.Env, 512 []string{"GO111MODULE=" + s.view.adjustedGO111MODULE()}, 513 s.view.EnvOverlay(), 514 ) 515 inv.BuildFlags = append([]string{}, s.Options().BuildFlags...) 516 cleanup = func() {} // fallback 517 518 // All logic below is for module mode. 519 if len(s.view.workspaceModFiles) == 0 { 520 return "", inv, cleanup, nil 521 } 522 523 mode, allowNetwork := flags.Mode(), flags.AllowNetwork() 524 if !allowNetwork && !allowNetworkOption { 525 inv.Env = append(inv.Env, "GOPROXY=off") 526 } 527 528 // What follows is rather complicated logic for how to actually run the go 529 // command. A word of warning: this is the result of various incremental 530 // features added to gopls, and varying behavior of the Go command across Go 531 // versions. It can surely be cleaned up significantly, but tread carefully. 532 // 533 // Roughly speaking we need to resolve four things: 534 // - the working directory. 535 // - the -mod flag 536 // - the -modfile flag 537 // 538 // These are dependent on a number of factors: whether we need to run in a 539 // synthetic workspace, whether flags are supported at the current go 540 // version, and what we're actually trying to achieve (the 541 // InvocationFlags). 542 // 543 // TODO(rfindley): should we set -overlays here? 544 545 const mutableModFlag = "mod" 546 547 // If the mod flag isn't set, populate it based on the mode and workspace. 548 // 549 // (As noted in various TODOs throughout this function, this is very 550 // confusing and not obviously correct, but tests pass and we will eventually 551 // rewrite this entire function.) 552 if inv.ModFlag == "" { 553 switch mode { 554 case LoadWorkspace, Normal: 555 if allowModfileModificationOption { 556 inv.ModFlag = mutableModFlag 557 } 558 case WriteTemporaryModFile: 559 inv.ModFlag = mutableModFlag 560 // -mod must be readonly when using go.work files - see issue #48941 561 inv.Env = append(inv.Env, "GOWORK=off") 562 } 563 } 564 565 // TODO(rfindley): if inv.ModFlag was already set to "mod", we may not have 566 // set GOWORK=off here. But that doesn't happen. Clean up this entire API so 567 // that we don't have this mutation of the invocation, which is quite hard to 568 // follow. 569 570 // If the invocation needs to mutate the modfile, we must use a temp mod. 571 if inv.ModFlag == mutableModFlag { 572 var modURI protocol.DocumentURI 573 // Select the module context to use. 574 // If we're type checking, we need to use the workspace context, meaning 575 // the main (workspace) module. Otherwise, we should use the module for 576 // the passed-in working dir. 577 if mode == LoadWorkspace { 578 // TODO(rfindley): this seems unnecessary and overly complicated. Remove 579 // this along with 'allowModFileModifications'. 580 if s.view.typ == GoModView { 581 modURI = s.view.gomod 582 } 583 } else { 584 modURI = s.GoModForFile(protocol.URIFromPath(inv.WorkingDir)) 585 } 586 587 var modContent []byte 588 if modURI != "" { 589 modFH, err := s.ReadFile(ctx, modURI) 590 if err != nil { 591 return "", nil, cleanup, err 592 } 593 modContent, err = modFH.Content() 594 if err != nil { 595 return "", nil, cleanup, err 596 } 597 } 598 if modURI == "" { 599 return "", nil, cleanup, fmt.Errorf("no go.mod file found in %s", inv.WorkingDir) 600 } 601 // Use the go.sum if it happens to be available. 602 gosum := s.goSum(ctx, modURI) 603 tmpURI, cleanup, err = tempModFile(modURI, modContent, gosum) 604 if err != nil { 605 return "", nil, cleanup, err 606 } 607 inv.ModFile = tmpURI.Path() 608 } 609 610 return tmpURI, inv, cleanup, nil 611 } 612 613 func (s *Snapshot) buildOverlay() map[string][]byte { 614 overlays := make(map[string][]byte) 615 for _, overlay := range s.Overlays() { 616 if overlay.saved { 617 continue 618 } 619 // TODO(rfindley): previously, there was a todo here to make sure we don't 620 // send overlays outside of the current view. IMO we should instead make 621 // sure this doesn't matter. 622 overlays[overlay.URI().Path()] = overlay.content 623 } 624 return overlays 625 } 626 627 // Overlays returns the set of overlays at this snapshot. 628 // 629 // Note that this may differ from the set of overlays on the server, if the 630 // snapshot observed a historical state. 631 func (s *Snapshot) Overlays() []*overlay { 632 s.mu.Lock() 633 defer s.mu.Unlock() 634 635 return s.files.getOverlays() 636 } 637 638 // Package data kinds, identifying various package data that may be stored in 639 // the file cache. 640 const ( 641 xrefsKind = "xrefs" 642 methodSetsKind = "methodsets" 643 exportDataKind = "export" 644 diagnosticsKind = "diagnostics" 645 typerefsKind = "typerefs" 646 ) 647 648 // PackageDiagnostics returns diagnostics for files contained in specified 649 // packages. 650 // 651 // If these diagnostics cannot be loaded from cache, the requested packages 652 // may be type-checked. 653 func (s *Snapshot) PackageDiagnostics(ctx context.Context, ids ...PackageID) (map[protocol.DocumentURI][]*Diagnostic, error) { 654 ctx, done := event.Start(ctx, "cache.snapshot.PackageDiagnostics") 655 defer done() 656 657 var mu sync.Mutex 658 perFile := make(map[protocol.DocumentURI][]*Diagnostic) 659 collect := func(diags []*Diagnostic) { 660 mu.Lock() 661 defer mu.Unlock() 662 for _, diag := range diags { 663 perFile[diag.URI] = append(perFile[diag.URI], diag) 664 } 665 } 666 pre := func(_ int, ph *packageHandle) bool { 667 data, err := filecache.Get(diagnosticsKind, ph.key) 668 if err == nil { // hit 669 collect(ph.loadDiagnostics) 670 collect(decodeDiagnostics(data)) 671 return false 672 } else if err != filecache.ErrNotFound { 673 event.Error(ctx, "reading diagnostics from filecache", err) 674 } 675 return true 676 } 677 post := func(_ int, pkg *Package) { 678 collect(pkg.loadDiagnostics) 679 collect(pkg.pkg.diagnostics) 680 } 681 return perFile, s.forEachPackage(ctx, ids, pre, post) 682 } 683 684 // References returns cross-reference indexes for the specified packages. 685 // 686 // If these indexes cannot be loaded from cache, the requested packages may 687 // be type-checked. 688 func (s *Snapshot) References(ctx context.Context, ids ...PackageID) ([]xrefIndex, error) { 689 ctx, done := event.Start(ctx, "cache.snapshot.References") 690 defer done() 691 692 indexes := make([]xrefIndex, len(ids)) 693 pre := func(i int, ph *packageHandle) bool { 694 data, err := filecache.Get(xrefsKind, ph.key) 695 if err == nil { // hit 696 indexes[i] = xrefIndex{mp: ph.mp, data: data} 697 return false 698 } else if err != filecache.ErrNotFound { 699 event.Error(ctx, "reading xrefs from filecache", err) 700 } 701 return true 702 } 703 post := func(i int, pkg *Package) { 704 indexes[i] = xrefIndex{mp: pkg.metadata, data: pkg.pkg.xrefs()} 705 } 706 return indexes, s.forEachPackage(ctx, ids, pre, post) 707 } 708 709 // An xrefIndex is a helper for looking up references in a given package. 710 type xrefIndex struct { 711 mp *metadata.Package 712 data []byte 713 } 714 715 func (index xrefIndex) Lookup(targets map[PackagePath]map[objectpath.Path]struct{}) []protocol.Location { 716 return xrefs.Lookup(index.mp, index.data, targets) 717 } 718 719 // MethodSets returns method-set indexes for the specified packages. 720 // 721 // If these indexes cannot be loaded from cache, the requested packages may 722 // be type-checked. 723 func (s *Snapshot) MethodSets(ctx context.Context, ids ...PackageID) ([]*methodsets.Index, error) { 724 ctx, done := event.Start(ctx, "cache.snapshot.MethodSets") 725 defer done() 726 727 indexes := make([]*methodsets.Index, len(ids)) 728 pre := func(i int, ph *packageHandle) bool { 729 data, err := filecache.Get(methodSetsKind, ph.key) 730 if err == nil { // hit 731 indexes[i] = methodsets.Decode(data) 732 return false 733 } else if err != filecache.ErrNotFound { 734 event.Error(ctx, "reading methodsets from filecache", err) 735 } 736 return true 737 } 738 post := func(i int, pkg *Package) { 739 indexes[i] = pkg.pkg.methodsets() 740 } 741 return indexes, s.forEachPackage(ctx, ids, pre, post) 742 } 743 744 // MetadataForFile returns a new slice containing metadata for each 745 // package containing the Go file identified by uri, ordered by the 746 // number of CompiledGoFiles (i.e. "narrowest" to "widest" package), 747 // and secondarily by IsIntermediateTestVariant (false < true). 748 // The result may include tests and intermediate test variants of 749 // importable packages. 750 // It returns an error if the context was cancelled. 751 func (s *Snapshot) MetadataForFile(ctx context.Context, uri protocol.DocumentURI) ([]*metadata.Package, error) { 752 if s.view.typ == AdHocView { 753 // As described in golang/go#57209, in ad-hoc workspaces (where we load ./ 754 // rather than ./...), preempting the directory load with file loads can 755 // lead to an inconsistent outcome, where certain files are loaded with 756 // command-line-arguments packages and others are loaded only in the ad-hoc 757 // package. Therefore, ensure that the workspace is loaded before doing any 758 // file loads. 759 if err := s.awaitLoaded(ctx); err != nil { 760 return nil, err 761 } 762 } 763 764 s.mu.Lock() 765 766 // Start with the set of package associations derived from the last load. 767 ids := s.meta.IDs[uri] 768 769 shouldLoad := false // whether any packages containing uri are marked 'shouldLoad' 770 for _, id := range ids { 771 if pkgs, _ := s.shouldLoad.Get(id); len(pkgs) > 0 { 772 shouldLoad = true 773 } 774 } 775 776 // Check if uri is known to be unloadable. 777 unloadable := s.unloadableFiles.Contains(uri) 778 779 s.mu.Unlock() 780 781 // Reload if loading is likely to improve the package associations for uri: 782 // - uri is not contained in any valid packages 783 // - ...or one of the packages containing uri is marked 'shouldLoad' 784 // - ...but uri is not unloadable 785 if (shouldLoad || len(ids) == 0) && !unloadable { 786 scope := fileLoadScope(uri) 787 err := s.load(ctx, false, scope) 788 789 // 790 // Return the context error here as the current operation is no longer 791 // valid. 792 if err != nil { 793 // Guard against failed loads due to context cancellation. We don't want 794 // to mark loads as completed if they failed due to context cancellation. 795 if ctx.Err() != nil { 796 return nil, ctx.Err() 797 } 798 799 // Don't return an error here, as we may still return stale IDs. 800 // Furthermore, the result of MetadataForFile should be consistent upon 801 // subsequent calls, even if the file is marked as unloadable. 802 if !errors.Is(err, errNoPackages) { 803 event.Error(ctx, "MetadataForFile", err) 804 } 805 } 806 807 // We must clear scopes after loading. 808 // 809 // TODO(rfindley): unlike reloadWorkspace, this is simply marking loaded 810 // packages as loaded. We could do this from snapshot.load and avoid 811 // raciness. 812 s.clearShouldLoad(scope) 813 } 814 815 // Retrieve the metadata. 816 s.mu.Lock() 817 defer s.mu.Unlock() 818 ids = s.meta.IDs[uri] 819 metas := make([]*metadata.Package, len(ids)) 820 for i, id := range ids { 821 metas[i] = s.meta.Packages[id] 822 if metas[i] == nil { 823 panic("nil metadata") 824 } 825 } 826 // Metadata is only ever added by loading, 827 // so if we get here and still have 828 // no IDs, uri is unloadable. 829 if !unloadable && len(ids) == 0 { 830 s.unloadableFiles.Add(uri) 831 } 832 833 // Sort packages "narrowest" to "widest" (in practice: 834 // non-tests before tests), and regular packages before 835 // their intermediate test variants (which have the same 836 // files but different imports). 837 sort.Slice(metas, func(i, j int) bool { 838 x, y := metas[i], metas[j] 839 xfiles, yfiles := len(x.CompiledGoFiles), len(y.CompiledGoFiles) 840 if xfiles != yfiles { 841 return xfiles < yfiles 842 } 843 return boolLess(x.IsIntermediateTestVariant(), y.IsIntermediateTestVariant()) 844 }) 845 846 return metas, nil 847 } 848 849 func boolLess(x, y bool) bool { return !x && y } // false < true 850 851 // ReverseDependencies returns a new mapping whose entries are 852 // the ID and Metadata of each package in the workspace that 853 // directly or transitively depend on the package denoted by id, 854 // excluding id itself. 855 func (s *Snapshot) ReverseDependencies(ctx context.Context, id PackageID, transitive bool) (map[PackageID]*metadata.Package, error) { 856 if err := s.awaitLoaded(ctx); err != nil { 857 return nil, err 858 } 859 860 meta := s.MetadataGraph() 861 var rdeps map[PackageID]*metadata.Package 862 if transitive { 863 rdeps = meta.ReverseReflexiveTransitiveClosure(id) 864 865 // Remove the original package ID from the map. 866 // (Callers all want irreflexivity but it's easier 867 // to compute reflexively then subtract.) 868 delete(rdeps, id) 869 870 } else { 871 // direct reverse dependencies 872 rdeps = make(map[PackageID]*metadata.Package) 873 for _, rdepID := range meta.ImportedBy[id] { 874 if rdep := meta.Packages[rdepID]; rdep != nil { 875 rdeps[rdepID] = rdep 876 } 877 } 878 } 879 880 return rdeps, nil 881 } 882 883 // -- Active package tracking -- 884 // 885 // We say a package is "active" if any of its files are open. 886 // This is an optimization: the "active" concept is an 887 // implementation detail of the cache and is not exposed 888 // in the source or Snapshot API. 889 // After type-checking we keep active packages in memory. 890 // The activePackages persistent map does bookkeeping for 891 // the set of active packages. 892 893 // getActivePackage returns a the memoized active package for id, if it exists. 894 // If id is not active or has not yet been type-checked, it returns nil. 895 func (s *Snapshot) getActivePackage(id PackageID) *Package { 896 s.mu.Lock() 897 defer s.mu.Unlock() 898 899 if value, ok := s.activePackages.Get(id); ok { 900 return value 901 } 902 return nil 903 } 904 905 // setActivePackage checks if pkg is active, and if so either records it in 906 // the active packages map or returns the existing memoized active package for id. 907 func (s *Snapshot) setActivePackage(id PackageID, pkg *Package) { 908 s.mu.Lock() 909 defer s.mu.Unlock() 910 911 if _, ok := s.activePackages.Get(id); ok { 912 return // already memoized 913 } 914 915 if containsOpenFileLocked(s, pkg.Metadata()) { 916 s.activePackages.Set(id, pkg, nil) 917 } else { 918 s.activePackages.Set(id, (*Package)(nil), nil) // remember that pkg is not open 919 } 920 } 921 922 func (s *Snapshot) resetActivePackagesLocked() { 923 s.activePackages.Destroy() 924 s.activePackages = new(persistent.Map[PackageID, *Package]) 925 } 926 927 // See Session.FileWatchingGlobPatterns for a description of gopls' file 928 // watching heuristic. 929 func (s *Snapshot) fileWatchingGlobPatterns() map[protocol.RelativePattern]unit { 930 // Always watch files that may change the view definition. 931 patterns := make(map[protocol.RelativePattern]unit) 932 933 // If GOWORK is outside the folder, ensure we are watching it. 934 if s.view.gowork != "" && !s.view.folder.Dir.Encloses(s.view.gowork) { 935 workPattern := protocol.RelativePattern{ 936 BaseURI: s.view.gowork.Dir(), 937 Pattern: path.Base(string(s.view.gowork)), 938 } 939 patterns[workPattern] = unit{} 940 } 941 942 extensions := "go,mod,sum,work" 943 for _, ext := range s.Options().TemplateExtensions { 944 extensions += "," + ext 945 } 946 watchGoFiles := fmt.Sprintf("**/*.{%s}", extensions) 947 948 var dirs []string 949 if s.view.moduleMode() { 950 if s.view.typ == GoWorkView { 951 workVendorDir := filepath.Join(s.view.gowork.Dir().Path(), "vendor") 952 workVendorURI := protocol.URIFromPath(workVendorDir) 953 patterns[protocol.RelativePattern{BaseURI: workVendorURI, Pattern: watchGoFiles}] = unit{} 954 } 955 956 // In module mode, watch directories containing active modules, and collect 957 // these dirs for later filtering the set of known directories. 958 // 959 // The assumption is that the user is not actively editing non-workspace 960 // modules, so don't pay the price of file watching. 961 for modFile := range s.view.workspaceModFiles { 962 dir := filepath.Dir(modFile.Path()) 963 dirs = append(dirs, dir) 964 965 // TODO(golang/go#64724): thoroughly test these patterns, particularly on 966 // on Windows. 967 // 968 // Note that glob patterns should use '/' on Windows: 969 // https://code.visualstudio.com/docs/editor/glob-patterns 970 patterns[protocol.RelativePattern{BaseURI: modFile.Dir(), Pattern: watchGoFiles}] = unit{} 971 } 972 } else { 973 // In non-module modes (GOPATH or AdHoc), we just watch the workspace root. 974 dirs = []string{s.view.root.Path()} 975 patterns[protocol.RelativePattern{Pattern: watchGoFiles}] = unit{} 976 } 977 978 if s.watchSubdirs() { 979 // Some clients (e.g. VS Code) do not send notifications for changes to 980 // directories that contain Go code (golang/go#42348). To handle this, 981 // explicitly watch all of the directories in the workspace. We find them 982 // by adding the directories of every file in the snapshot's workspace 983 // directories. There may be thousands of patterns, each a single 984 // directory. 985 // 986 // We compute this set by looking at files that we've previously observed. 987 // This may miss changed to directories that we haven't observed, but that 988 // shouldn't matter as there is nothing to invalidate (if a directory falls 989 // in forest, etc). 990 // 991 // (A previous iteration created a single glob pattern holding a union of 992 // all the directories, but this was found to cause VS Code to get stuck 993 // for several minutes after a buffer was saved twice in a workspace that 994 // had >8000 watched directories.) 995 // 996 // Some clients (notably coc.nvim, which uses watchman for globs) perform 997 // poorly with a large list of individual directories. 998 s.addKnownSubdirs(patterns, dirs) 999 } 1000 1001 return patterns 1002 } 1003 1004 func (s *Snapshot) addKnownSubdirs(patterns map[protocol.RelativePattern]unit, wsDirs []string) { 1005 s.mu.Lock() 1006 defer s.mu.Unlock() 1007 1008 s.files.getDirs().Range(func(dir string) { 1009 for _, wsDir := range wsDirs { 1010 if pathutil.InDir(wsDir, dir) { 1011 patterns[protocol.RelativePattern{Pattern: filepath.ToSlash(dir)}] = unit{} 1012 } 1013 } 1014 }) 1015 } 1016 1017 // watchSubdirs reports whether gopls should request separate file watchers for 1018 // each relevant subdirectory. This is necessary only for clients (namely VS 1019 // Code) that do not send notifications for individual files in a directory 1020 // when the entire directory is deleted. 1021 func (s *Snapshot) watchSubdirs() bool { 1022 switch p := s.Options().SubdirWatchPatterns; p { 1023 case settings.SubdirWatchPatternsOn: 1024 return true 1025 case settings.SubdirWatchPatternsOff: 1026 return false 1027 case settings.SubdirWatchPatternsAuto: 1028 // See the documentation of InternalOptions.SubdirWatchPatterns for an 1029 // explanation of why VS Code gets a different default value here. 1030 // 1031 // Unfortunately, there is no authoritative list of client names, nor any 1032 // requirements that client names do not change. We should update the VS 1033 // Code extension to set a default value of "subdirWatchPatterns" to "on", 1034 // so that this workaround is only temporary. 1035 if s.Options().ClientInfo != nil && s.Options().ClientInfo.Name == "Visual Studio Code" { 1036 return true 1037 } 1038 return false 1039 default: 1040 bug.Reportf("invalid subdirWatchPatterns: %q", p) 1041 return false 1042 } 1043 } 1044 1045 // filesInDir returns all files observed by the snapshot that are contained in 1046 // a directory with the provided URI. 1047 func (s *Snapshot) filesInDir(uri protocol.DocumentURI) []protocol.DocumentURI { 1048 s.mu.Lock() 1049 defer s.mu.Unlock() 1050 1051 dir := uri.Path() 1052 if !s.files.getDirs().Contains(dir) { 1053 return nil 1054 } 1055 var files []protocol.DocumentURI 1056 s.files.foreach(func(uri protocol.DocumentURI, _ file.Handle) { 1057 if pathutil.InDir(dir, uri.Path()) { 1058 files = append(files, uri) 1059 } 1060 }) 1061 return files 1062 } 1063 1064 // WorkspaceMetadata returns a new, unordered slice containing 1065 // metadata for all ordinary and test packages (but not 1066 // intermediate test variants) in the workspace. 1067 // 1068 // The workspace is the set of modules typically defined by a 1069 // go.work file. It is not transitively closed: for example, 1070 // the standard library is not usually part of the workspace 1071 // even though every module in the workspace depends on it. 1072 // 1073 // Operations that must inspect all the dependencies of the 1074 // workspace packages should instead use AllMetadata. 1075 func (s *Snapshot) WorkspaceMetadata(ctx context.Context) ([]*metadata.Package, error) { 1076 if err := s.awaitLoaded(ctx); err != nil { 1077 return nil, err 1078 } 1079 1080 s.mu.Lock() 1081 defer s.mu.Unlock() 1082 1083 meta := make([]*metadata.Package, 0, s.workspacePackages.Len()) 1084 s.workspacePackages.Range(func(id PackageID, _ PackagePath) { 1085 meta = append(meta, s.meta.Packages[id]) 1086 }) 1087 return meta, nil 1088 } 1089 1090 // isWorkspacePackage reports whether the given package ID refers to a 1091 // workspace package for the snapshot. 1092 func (s *Snapshot) isWorkspacePackage(id PackageID) bool { 1093 s.mu.Lock() 1094 defer s.mu.Unlock() 1095 _, ok := s.workspacePackages.Value(id) 1096 return ok 1097 } 1098 1099 // Symbols extracts and returns symbol information for every file contained in 1100 // a loaded package. It awaits snapshot loading. 1101 // 1102 // If workspaceOnly is set, this only includes symbols from files in a 1103 // workspace package. Otherwise, it returns symbols from all loaded packages. 1104 // 1105 // TODO(rfindley): move to symbols.go. 1106 func (s *Snapshot) Symbols(ctx context.Context, workspaceOnly bool) (map[protocol.DocumentURI][]Symbol, error) { 1107 var ( 1108 meta []*metadata.Package 1109 err error 1110 ) 1111 if workspaceOnly { 1112 meta, err = s.WorkspaceMetadata(ctx) 1113 } else { 1114 meta, err = s.AllMetadata(ctx) 1115 } 1116 if err != nil { 1117 return nil, fmt.Errorf("loading metadata: %v", err) 1118 } 1119 1120 goFiles := make(map[protocol.DocumentURI]struct{}) 1121 for _, mp := range meta { 1122 for _, uri := range mp.GoFiles { 1123 goFiles[uri] = struct{}{} 1124 } 1125 for _, uri := range mp.CompiledGoFiles { 1126 goFiles[uri] = struct{}{} 1127 } 1128 } 1129 1130 // Symbolize them in parallel. 1131 var ( 1132 group errgroup.Group 1133 nprocs = 2 * runtime.GOMAXPROCS(-1) // symbolize is a mix of I/O and CPU 1134 resultMu sync.Mutex 1135 result = make(map[protocol.DocumentURI][]Symbol) 1136 ) 1137 group.SetLimit(nprocs) 1138 for uri := range goFiles { 1139 uri := uri 1140 group.Go(func() error { 1141 symbols, err := s.symbolize(ctx, uri) 1142 if err != nil { 1143 return err 1144 } 1145 resultMu.Lock() 1146 result[uri] = symbols 1147 resultMu.Unlock() 1148 return nil 1149 }) 1150 } 1151 // Keep going on errors, but log the first failure. 1152 // Partial results are better than no symbol results. 1153 if err := group.Wait(); err != nil { 1154 event.Error(ctx, "getting snapshot symbols", err) 1155 } 1156 return result, nil 1157 } 1158 1159 // AllMetadata returns a new unordered array of metadata for 1160 // all packages known to this snapshot, which includes the 1161 // packages of all workspace modules plus their transitive 1162 // import dependencies. 1163 // 1164 // It may also contain ad-hoc packages for standalone files. 1165 // It includes all test variants. 1166 // 1167 // TODO(rfindley): Replace this with s.MetadataGraph(). 1168 func (s *Snapshot) AllMetadata(ctx context.Context) ([]*metadata.Package, error) { 1169 if err := s.awaitLoaded(ctx); err != nil { 1170 return nil, err 1171 } 1172 1173 g := s.MetadataGraph() 1174 1175 meta := make([]*metadata.Package, 0, len(g.Packages)) 1176 for _, mp := range g.Packages { 1177 meta = append(meta, mp) 1178 } 1179 return meta, nil 1180 } 1181 1182 // GoModForFile returns the URI of the go.mod file for the given URI. 1183 // 1184 // TODO(rfindley): clarify that this is only active modules. Or update to just 1185 // use findRootPattern. 1186 func (s *Snapshot) GoModForFile(uri protocol.DocumentURI) protocol.DocumentURI { 1187 return moduleForURI(s.view.workspaceModFiles, uri) 1188 } 1189 1190 func moduleForURI(modFiles map[protocol.DocumentURI]struct{}, uri protocol.DocumentURI) protocol.DocumentURI { 1191 var match protocol.DocumentURI 1192 for modURI := range modFiles { 1193 if !modURI.Dir().Encloses(uri) { 1194 continue 1195 } 1196 if len(modURI) > len(match) { 1197 match = modURI 1198 } 1199 } 1200 return match 1201 } 1202 1203 // nearestModFile finds the nearest go.mod file contained in the directory 1204 // containing uri, or a parent of that directory. 1205 // 1206 // The given uri must be a file, not a directory. 1207 func nearestModFile(ctx context.Context, uri protocol.DocumentURI, fs file.Source) (protocol.DocumentURI, error) { 1208 dir := filepath.Dir(uri.Path()) 1209 return findRootPattern(ctx, protocol.URIFromPath(dir), "go.mod", fs) 1210 } 1211 1212 // Metadata returns the metadata for the specified package, 1213 // or nil if it was not found. 1214 func (s *Snapshot) Metadata(id PackageID) *metadata.Package { 1215 s.mu.Lock() 1216 defer s.mu.Unlock() 1217 return s.meta.Packages[id] 1218 } 1219 1220 // clearShouldLoad clears package IDs that no longer need to be reloaded after 1221 // scopes has been loaded. 1222 func (s *Snapshot) clearShouldLoad(scopes ...loadScope) { 1223 s.mu.Lock() 1224 defer s.mu.Unlock() 1225 1226 for _, scope := range scopes { 1227 switch scope := scope.(type) { 1228 case packageLoadScope: 1229 scopePath := PackagePath(scope) 1230 var toDelete []PackageID 1231 s.shouldLoad.Range(func(id PackageID, pkgPaths []PackagePath) { 1232 for _, pkgPath := range pkgPaths { 1233 if pkgPath == scopePath { 1234 toDelete = append(toDelete, id) 1235 } 1236 } 1237 }) 1238 for _, id := range toDelete { 1239 s.shouldLoad.Delete(id) 1240 } 1241 case fileLoadScope: 1242 uri := protocol.DocumentURI(scope) 1243 ids := s.meta.IDs[uri] 1244 for _, id := range ids { 1245 s.shouldLoad.Delete(id) 1246 } 1247 } 1248 } 1249 } 1250 1251 // FindFile returns the FileHandle for the given URI, if it is already 1252 // in the given snapshot. 1253 // TODO(adonovan): delete this operation; use ReadFile instead. 1254 func (s *Snapshot) FindFile(uri protocol.DocumentURI) file.Handle { 1255 s.mu.Lock() 1256 defer s.mu.Unlock() 1257 1258 result, _ := s.files.get(uri) 1259 return result 1260 } 1261 1262 // ReadFile returns a File for the given URI. If the file is unknown it is added 1263 // to the managed set. 1264 // 1265 // ReadFile succeeds even if the file does not exist. A non-nil error return 1266 // indicates some type of internal error, for example if ctx is cancelled. 1267 func (s *Snapshot) ReadFile(ctx context.Context, uri protocol.DocumentURI) (file.Handle, error) { 1268 s.mu.Lock() 1269 defer s.mu.Unlock() 1270 1271 fh, ok := s.files.get(uri) 1272 if !ok { 1273 var err error 1274 fh, err = s.view.fs.ReadFile(ctx, uri) 1275 if err != nil { 1276 return nil, err 1277 } 1278 s.files.set(uri, fh) 1279 } 1280 return fh, nil 1281 } 1282 1283 // preloadFiles delegates to the view FileSource to read the requested uris in 1284 // parallel, without holding the snapshot lock. 1285 func (s *Snapshot) preloadFiles(ctx context.Context, uris []protocol.DocumentURI) { 1286 files := make([]file.Handle, len(uris)) 1287 var wg sync.WaitGroup 1288 iolimit := make(chan struct{}, 20) // I/O concurrency limiting semaphore 1289 for i, uri := range uris { 1290 wg.Add(1) 1291 iolimit <- struct{}{} 1292 go func(i int, uri protocol.DocumentURI) { 1293 defer wg.Done() 1294 fh, err := s.view.fs.ReadFile(ctx, uri) 1295 <-iolimit 1296 if err != nil && ctx.Err() == nil { 1297 event.Error(ctx, fmt.Sprintf("reading %s", uri), err) 1298 return 1299 } 1300 files[i] = fh 1301 }(i, uri) 1302 } 1303 wg.Wait() 1304 1305 s.mu.Lock() 1306 defer s.mu.Unlock() 1307 1308 for i, fh := range files { 1309 if fh == nil { 1310 continue // error logged above 1311 } 1312 uri := uris[i] 1313 if _, ok := s.files.get(uri); !ok { 1314 s.files.set(uri, fh) 1315 } 1316 } 1317 } 1318 1319 // IsOpen returns whether the editor currently has a file open. 1320 func (s *Snapshot) IsOpen(uri protocol.DocumentURI) bool { 1321 s.mu.Lock() 1322 defer s.mu.Unlock() 1323 1324 fh, _ := s.files.get(uri) 1325 _, open := fh.(*overlay) 1326 return open 1327 } 1328 1329 // MetadataGraph returns the current metadata graph for the Snapshot. 1330 func (s *Snapshot) MetadataGraph() *metadata.Graph { 1331 s.mu.Lock() 1332 defer s.mu.Unlock() 1333 return s.meta 1334 } 1335 1336 // InitializationError returns the last error from initialization. 1337 func (s *Snapshot) InitializationError() *InitializationError { 1338 s.mu.Lock() 1339 defer s.mu.Unlock() 1340 return s.initialErr 1341 } 1342 1343 // awaitLoaded awaits initialization and package reloading, and returns 1344 // ctx.Err(). 1345 func (s *Snapshot) awaitLoaded(ctx context.Context) error { 1346 // Do not return results until the snapshot's view has been initialized. 1347 s.AwaitInitialized(ctx) 1348 s.reloadWorkspace(ctx) 1349 return ctx.Err() 1350 } 1351 1352 // AwaitInitialized waits until the snapshot's view is initialized. 1353 func (s *Snapshot) AwaitInitialized(ctx context.Context) { 1354 select { 1355 case <-ctx.Done(): 1356 return 1357 case <-s.view.initialWorkspaceLoad: 1358 } 1359 // We typically prefer to run something as intensive as the IWL without 1360 // blocking. I'm not sure if there is a way to do that here. 1361 s.initialize(ctx, false) 1362 } 1363 1364 // reloadWorkspace reloads the metadata for all invalidated workspace packages. 1365 func (s *Snapshot) reloadWorkspace(ctx context.Context) { 1366 var scopes []loadScope 1367 var seen map[PackagePath]bool 1368 s.mu.Lock() 1369 s.shouldLoad.Range(func(_ PackageID, pkgPaths []PackagePath) { 1370 for _, pkgPath := range pkgPaths { 1371 if seen == nil { 1372 seen = make(map[PackagePath]bool) 1373 } 1374 if seen[pkgPath] { 1375 continue 1376 } 1377 seen[pkgPath] = true 1378 scopes = append(scopes, packageLoadScope(pkgPath)) 1379 } 1380 }) 1381 s.mu.Unlock() 1382 1383 if len(scopes) == 0 { 1384 return 1385 } 1386 1387 // For an ad-hoc view, we cannot reload by package path. Just reload the view. 1388 if s.view.typ == AdHocView { 1389 scopes = []loadScope{viewLoadScope{}} 1390 } 1391 1392 err := s.load(ctx, false, scopes...) 1393 1394 // Unless the context was canceled, set "shouldLoad" to false for all 1395 // of the metadata we attempted to load. 1396 if !errors.Is(err, context.Canceled) { 1397 s.clearShouldLoad(scopes...) 1398 if err != nil { 1399 event.Error(ctx, "reloading workspace", err, s.Labels()...) 1400 } 1401 } 1402 } 1403 1404 func (s *Snapshot) orphanedFileDiagnostics(ctx context.Context, overlays []*overlay) ([]*Diagnostic, error) { 1405 if err := s.awaitLoaded(ctx); err != nil { 1406 return nil, err 1407 } 1408 1409 var diagnostics []*Diagnostic 1410 var orphaned []*overlay 1411 searchOverlays: 1412 for _, o := range overlays { 1413 uri := o.URI() 1414 if s.IsBuiltin(uri) || s.FileKind(o) != file.Go { 1415 continue 1416 } 1417 mps, err := s.MetadataForFile(ctx, uri) 1418 if err != nil { 1419 return nil, err 1420 } 1421 for _, mp := range mps { 1422 if !metadata.IsCommandLineArguments(mp.ID) || mp.Standalone { 1423 continue searchOverlays 1424 } 1425 } 1426 // With zero-config gopls (golang/go#57979), orphaned file diagnostics 1427 // include diagnostics for orphaned files -- not just diagnostics relating 1428 // to the reason the files are opened. 1429 // 1430 // This is because orphaned files are never considered part of a workspace 1431 // package: if they are loaded by a view, that view is arbitrary, and they 1432 // may be loaded by multiple views. If they were to be diagnosed by 1433 // multiple views, their diagnostics may become inconsistent. 1434 if len(mps) > 0 { 1435 diags, err := s.PackageDiagnostics(ctx, mps[0].ID) 1436 if err != nil { 1437 return nil, err 1438 } 1439 diagnostics = append(diagnostics, diags[uri]...) 1440 } 1441 orphaned = append(orphaned, o) 1442 } 1443 1444 if len(orphaned) == 0 { 1445 return nil, nil 1446 } 1447 1448 loadedModFiles := make(map[protocol.DocumentURI]struct{}) // all mod files, including dependencies 1449 ignoredFiles := make(map[protocol.DocumentURI]bool) // files reported in packages.Package.IgnoredFiles 1450 1451 g := s.MetadataGraph() 1452 for _, meta := range g.Packages { 1453 if meta.Module != nil && meta.Module.GoMod != "" { 1454 gomod := protocol.URIFromPath(meta.Module.GoMod) 1455 loadedModFiles[gomod] = struct{}{} 1456 } 1457 for _, ignored := range meta.IgnoredFiles { 1458 ignoredFiles[ignored] = true 1459 } 1460 } 1461 1462 initialErr := s.InitializationError() 1463 1464 for _, fh := range orphaned { 1465 pgf, rng, ok := orphanedFileDiagnosticRange(ctx, s.view.parseCache, fh) 1466 if !ok { 1467 continue // e.g. cancellation or parse error 1468 } 1469 1470 var ( 1471 msg string // if non-empty, report a diagnostic with this message 1472 suggestedFixes []SuggestedFix // associated fixes, if any 1473 ) 1474 if initialErr != nil { 1475 msg = fmt.Sprintf("initialization failed: %v", initialErr.MainError) 1476 } else if goMod, err := nearestModFile(ctx, fh.URI(), s); err == nil && goMod != "" { 1477 // If we have a relevant go.mod file, check whether the file is orphaned 1478 // due to its go.mod file being inactive. We could also offer a 1479 // prescriptive diagnostic in the case that there is no go.mod file, but it 1480 // is harder to be precise in that case, and less important. 1481 if _, ok := loadedModFiles[goMod]; !ok { 1482 modDir := filepath.Dir(goMod.Path()) 1483 viewDir := s.view.folder.Dir.Path() 1484 1485 // When the module is underneath the view dir, we offer 1486 // "use all modules" quick-fixes. 1487 inDir := pathutil.InDir(viewDir, modDir) 1488 1489 if rel, err := filepath.Rel(viewDir, modDir); err == nil { 1490 modDir = rel 1491 } 1492 1493 var fix string 1494 if s.view.folder.Env.GoVersion >= 18 { 1495 if s.view.gowork != "" { 1496 fix = fmt.Sprintf("To fix this problem, you can add this module to your go.work file (%s)", s.view.gowork) 1497 if cmd, err := command.NewRunGoWorkCommandCommand("Run `go work use`", command.RunGoWorkArgs{ 1498 ViewID: s.view.ID(), 1499 Args: []string{"use", modDir}, 1500 }); err == nil { 1501 suggestedFixes = append(suggestedFixes, SuggestedFix{ 1502 Title: "Use this module in your go.work file", 1503 Command: &cmd, 1504 ActionKind: protocol.QuickFix, 1505 }) 1506 } 1507 1508 if inDir { 1509 if cmd, err := command.NewRunGoWorkCommandCommand("Run `go work use -r`", command.RunGoWorkArgs{ 1510 ViewID: s.view.ID(), 1511 Args: []string{"use", "-r", "."}, 1512 }); err == nil { 1513 suggestedFixes = append(suggestedFixes, SuggestedFix{ 1514 Title: "Use all modules in your workspace", 1515 Command: &cmd, 1516 ActionKind: protocol.QuickFix, 1517 }) 1518 } 1519 } 1520 } else { 1521 fix = "To fix this problem, you can add a go.work file that uses this directory." 1522 1523 if cmd, err := command.NewRunGoWorkCommandCommand("Run `go work init && go work use`", command.RunGoWorkArgs{ 1524 ViewID: s.view.ID(), 1525 InitFirst: true, 1526 Args: []string{"use", modDir}, 1527 }); err == nil { 1528 suggestedFixes = []SuggestedFix{ 1529 { 1530 Title: "Add a go.work file using this module", 1531 Command: &cmd, 1532 ActionKind: protocol.QuickFix, 1533 }, 1534 } 1535 } 1536 1537 if inDir { 1538 if cmd, err := command.NewRunGoWorkCommandCommand("Run `go work init && go work use -r`", command.RunGoWorkArgs{ 1539 ViewID: s.view.ID(), 1540 InitFirst: true, 1541 Args: []string{"use", "-r", "."}, 1542 }); err == nil { 1543 suggestedFixes = append(suggestedFixes, SuggestedFix{ 1544 Title: "Add a go.work file using all modules in your workspace", 1545 Command: &cmd, 1546 ActionKind: protocol.QuickFix, 1547 }) 1548 } 1549 } 1550 } 1551 } else { 1552 fix = `To work with multiple modules simultaneously, please upgrade to Go 1.18 or 1553 later, reinstall gopls, and use a go.work file.` 1554 } 1555 1556 msg = fmt.Sprintf(`This file is within module %q, which is not included in your workspace. 1557 %s 1558 See the documentation for more information on setting up your workspace: 1559 https://github.com/golang/tools/blob/master/gopls/doc/workspace.md.`, modDir, fix) 1560 } 1561 } 1562 1563 if msg == "" { 1564 if ignoredFiles[fh.URI()] { 1565 // TODO(rfindley): use the constraint package to check if the file 1566 // _actually_ satisfies the current build context. 1567 hasConstraint := false 1568 walkConstraints(pgf.File, func(constraint.Expr) bool { 1569 hasConstraint = true 1570 return false 1571 }) 1572 var fix string 1573 if hasConstraint { 1574 fix = `This file may be excluded due to its build tags; try adding "-tags=<build tag>" to your gopls "buildFlags" configuration 1575 See the documentation for more information on working with build tags: 1576 https://github.com/golang/tools/blob/master/gopls/doc/settings.md#buildflags-string.` 1577 } else if strings.Contains(filepath.Base(fh.URI().Path()), "_") { 1578 fix = `This file may be excluded due to its GOOS/GOARCH, or other build constraints.` 1579 } else { 1580 fix = `This file is ignored by your gopls build.` // we don't know why 1581 } 1582 msg = fmt.Sprintf("No packages found for open file %s.\n%s", fh.URI().Path(), fix) 1583 } else { 1584 // Fall back: we're not sure why the file is orphaned. 1585 // TODO(rfindley): we could do better here, diagnosing the lack of a 1586 // go.mod file and malformed file names (see the perc%ent marker test). 1587 msg = fmt.Sprintf("No packages found for open file %s.", fh.URI().Path()) 1588 } 1589 } 1590 1591 if msg != "" { 1592 d := &Diagnostic{ 1593 URI: fh.URI(), 1594 Range: rng, 1595 Severity: protocol.SeverityWarning, 1596 Source: ListError, 1597 Message: msg, 1598 SuggestedFixes: suggestedFixes, 1599 } 1600 if ok := bundleQuickFixes(d); !ok { 1601 bug.Reportf("failed to bundle quick fixes for %v", d) 1602 } 1603 // Only report diagnostics if we detect an actual exclusion. 1604 diagnostics = append(diagnostics, d) 1605 } 1606 } 1607 return diagnostics, nil 1608 } 1609 1610 // orphanedFileDiagnosticRange returns the position to use for orphaned file diagnostics. 1611 // We only warn about an orphaned file if it is well-formed enough to actually 1612 // be part of a package. Otherwise, we need more information. 1613 func orphanedFileDiagnosticRange(ctx context.Context, cache *parseCache, fh file.Handle) (*ParsedGoFile, protocol.Range, bool) { 1614 pgfs, err := cache.parseFiles(ctx, token.NewFileSet(), ParseHeader, false, fh) 1615 if err != nil { 1616 return nil, protocol.Range{}, false 1617 } 1618 pgf := pgfs[0] 1619 if !pgf.File.Name.Pos().IsValid() { 1620 return nil, protocol.Range{}, false 1621 } 1622 rng, err := pgf.PosRange(pgf.File.Name.Pos(), pgf.File.Name.End()) 1623 if err != nil { 1624 return nil, protocol.Range{}, false 1625 } 1626 return pgf, rng, true 1627 } 1628 1629 // TODO(golang/go#53756): this function needs to consider more than just the 1630 // absolute URI, for example: 1631 // - the position of /vendor/ with respect to the relevant module root 1632 // - whether or not go.work is in use (as vendoring isn't supported in workspace mode) 1633 // 1634 // Most likely, each call site of inVendor needs to be reconsidered to 1635 // understand and correctly implement the desired behavior. 1636 func inVendor(uri protocol.DocumentURI) bool { 1637 _, after, found := strings.Cut(string(uri), "/vendor/") 1638 // Only subdirectories of /vendor/ are considered vendored 1639 // (/vendor/a/foo.go is vendored, /vendor/foo.go is not). 1640 return found && strings.Contains(after, "/") 1641 } 1642 1643 // clone copies state from the receiver into a new Snapshot, applying the given 1644 // state changes. 1645 // 1646 // The caller of clone must call Snapshot.decref on the returned 1647 // snapshot when they are finished using it. 1648 // 1649 // The resulting bool reports whether the change invalidates any derived 1650 // diagnostics for the snapshot, for example because it invalidates Packages or 1651 // parsed go.mod files. This is used to mark a view as needing diagnosis in the 1652 // server. 1653 // 1654 // TODO(rfindley): long term, it may be better to move responsibility for 1655 // diagnostics into the Snapshot (e.g. a Snapshot.Diagnostics method), at which 1656 // point the Snapshot could be responsible for tracking and forwarding a 1657 // 'viewsToDiagnose' field. As is, this field is instead externalized in the 1658 // server.viewsToDiagnose map. Moving it to the snapshot would entirely 1659 // eliminate any 'relevance' heuristics from Session.DidModifyFiles, but would 1660 // also require more strictness about diagnostic dependencies. For example, 1661 // template.Diagnostics currently re-parses every time: there is no Snapshot 1662 // data responsible for providing these diagnostics. 1663 func (s *Snapshot) clone(ctx, bgCtx context.Context, changed StateChange, done func()) (*Snapshot, bool) { 1664 changedFiles := changed.Files 1665 ctx, stop := event.Start(ctx, "cache.snapshot.clone") 1666 defer stop() 1667 1668 s.mu.Lock() 1669 defer s.mu.Unlock() 1670 1671 // TODO(rfindley): reorganize this function to make the derivation of 1672 // needsDiagnosis clearer. 1673 needsDiagnosis := len(changed.GCDetails) > 0 || len(changed.ModuleUpgrades) > 0 1674 1675 bgCtx, cancel := context.WithCancel(bgCtx) 1676 result := &Snapshot{ 1677 sequenceID: s.sequenceID + 1, 1678 store: s.store, 1679 refcount: 1, // Snapshots are born referenced. 1680 done: done, 1681 view: s.view, 1682 backgroundCtx: bgCtx, 1683 cancel: cancel, 1684 builtin: s.builtin, 1685 initialized: s.initialized, 1686 initialErr: s.initialErr, 1687 packages: s.packages.Clone(), 1688 activePackages: s.activePackages.Clone(), 1689 files: s.files.clone(changedFiles), 1690 symbolizeHandles: cloneWithout(s.symbolizeHandles, changedFiles, nil), 1691 workspacePackages: s.workspacePackages, 1692 shouldLoad: s.shouldLoad.Clone(), // not cloneWithout: shouldLoad is cleared on loads 1693 unloadableFiles: s.unloadableFiles.Clone(), // not cloneWithout: typing in a file doesn't necessarily make it loadable 1694 parseModHandles: cloneWithout(s.parseModHandles, changedFiles, &needsDiagnosis), 1695 parseWorkHandles: cloneWithout(s.parseWorkHandles, changedFiles, &needsDiagnosis), 1696 modTidyHandles: cloneWithout(s.modTidyHandles, changedFiles, &needsDiagnosis), 1697 modWhyHandles: cloneWithout(s.modWhyHandles, changedFiles, &needsDiagnosis), 1698 importGraph: s.importGraph, 1699 pkgIndex: s.pkgIndex, 1700 moduleUpgrades: cloneWith(s.moduleUpgrades, changed.ModuleUpgrades), 1701 } 1702 1703 // Compute the new set of packages for which we want gc details, after 1704 // applying changed.GCDetails. 1705 if len(s.gcOptimizationDetails) > 0 || len(changed.GCDetails) > 0 { 1706 newGCDetails := make(map[metadata.PackageID]unit) 1707 for id := range s.gcOptimizationDetails { 1708 if _, ok := changed.GCDetails[id]; !ok { 1709 newGCDetails[id] = unit{} // no change 1710 } 1711 } 1712 for id, want := range changed.GCDetails { 1713 if want { 1714 newGCDetails[id] = unit{} 1715 } 1716 } 1717 if len(newGCDetails) > 0 { 1718 result.gcOptimizationDetails = newGCDetails 1719 } 1720 } 1721 1722 reinit := false 1723 1724 // Changes to vendor tree may require reinitialization, 1725 // either because of an initialization error 1726 // (e.g. "inconsistent vendoring detected"), or because 1727 // one or more modules may have moved into or out of the 1728 // vendor tree after 'go mod vendor' or 'rm -fr vendor/'. 1729 // 1730 // In this case, we consider the actual modification to see if was a creation 1731 // or deletion. 1732 // 1733 // TODO(rfindley): revisit the location of this check. 1734 for _, mod := range changed.Modifications { 1735 if inVendor(mod.URI) && (mod.Action == file.Create || mod.Action == file.Delete) || 1736 strings.HasSuffix(string(mod.URI), "/vendor/modules.txt") { 1737 1738 reinit = true 1739 break 1740 } 1741 } 1742 1743 // Collect observed file handles for changed URIs from the old snapshot, if 1744 // they exist. Importantly, we don't call ReadFile here: consider the case 1745 // where a file is added on disk; we don't want to read the newly added file 1746 // into the old snapshot, as that will break our change detection below. 1747 // 1748 // TODO(rfindley): it may be more accurate to rely on the modification type 1749 // here, similarly to what we do for vendored files above. If we happened not 1750 // to have read a file in the previous snapshot, that's not the same as it 1751 // actually being created. 1752 oldFiles := make(map[protocol.DocumentURI]file.Handle) 1753 for uri := range changedFiles { 1754 if fh, ok := s.files.get(uri); ok { 1755 oldFiles[uri] = fh 1756 } 1757 } 1758 // changedOnDisk determines if the new file handle may have changed on disk. 1759 // It over-approximates, returning true if the new file is saved and either 1760 // the old file wasn't saved, or the on-disk contents changed. 1761 // 1762 // oldFH may be nil. 1763 changedOnDisk := func(oldFH, newFH file.Handle) bool { 1764 if !newFH.SameContentsOnDisk() { 1765 return false 1766 } 1767 if oe, ne := (oldFH != nil && fileExists(oldFH)), fileExists(newFH); !oe || !ne { 1768 return oe != ne 1769 } 1770 return !oldFH.SameContentsOnDisk() || oldFH.Identity() != newFH.Identity() 1771 } 1772 1773 // Reinitialize if any workspace mod file has changed on disk. 1774 for uri, newFH := range changedFiles { 1775 if _, ok := result.view.workspaceModFiles[uri]; ok && changedOnDisk(oldFiles[uri], newFH) { 1776 reinit = true 1777 } 1778 } 1779 1780 // Finally, process sumfile changes that may affect loading. 1781 for uri, newFH := range changedFiles { 1782 if !changedOnDisk(oldFiles[uri], newFH) { 1783 continue // like with go.mod files, we only reinit when things change on disk 1784 } 1785 dir, base := filepath.Split(uri.Path()) 1786 if base == "go.work.sum" && s.view.typ == GoWorkView && dir == filepath.Dir(s.view.gowork.Path()) { 1787 reinit = true 1788 } 1789 if base == "go.sum" { 1790 modURI := protocol.URIFromPath(filepath.Join(dir, "go.mod")) 1791 if _, active := result.view.workspaceModFiles[modURI]; active { 1792 reinit = true 1793 } 1794 } 1795 } 1796 1797 // The snapshot should be initialized if either s was uninitialized, or we've 1798 // detected a change that triggers reinitialization. 1799 if reinit { 1800 result.initialized = false 1801 needsDiagnosis = true 1802 } 1803 1804 // directIDs keeps track of package IDs that have directly changed. 1805 // Note: this is not a set, it's a map from id to invalidateMetadata. 1806 directIDs := map[PackageID]bool{} 1807 1808 // Invalidate all package metadata if the workspace module has changed. 1809 if reinit { 1810 for k := range s.meta.Packages { 1811 // TODO(rfindley): this seems brittle; can we just start over? 1812 directIDs[k] = true 1813 } 1814 } 1815 1816 // Compute invalidations based on file changes. 1817 anyImportDeleted := false // import deletions can resolve cycles 1818 anyFileOpenedOrClosed := false // opened files affect workspace packages 1819 anyFileAdded := false // adding a file can resolve missing dependencies 1820 1821 for uri, newFH := range changedFiles { 1822 // The original FileHandle for this URI is cached on the snapshot. 1823 oldFH := oldFiles[uri] // may be nil 1824 _, oldOpen := oldFH.(*overlay) 1825 _, newOpen := newFH.(*overlay) 1826 1827 anyFileOpenedOrClosed = anyFileOpenedOrClosed || (oldOpen != newOpen) 1828 anyFileAdded = anyFileAdded || (oldFH == nil || !fileExists(oldFH)) && fileExists(newFH) 1829 1830 // If uri is a Go file, check if it has changed in a way that would 1831 // invalidate metadata. Note that we can't use s.view.FileKind here, 1832 // because the file type that matters is not what the *client* tells us, 1833 // but what the Go command sees. 1834 var invalidateMetadata, pkgFileChanged, importDeleted bool 1835 if strings.HasSuffix(uri.Path(), ".go") { 1836 invalidateMetadata, pkgFileChanged, importDeleted = metadataChanges(ctx, s, oldFH, newFH) 1837 } 1838 if invalidateMetadata { 1839 // If this is a metadata-affecting change, perhaps a reload will succeed. 1840 result.unloadableFiles.Remove(uri) 1841 needsDiagnosis = true 1842 } 1843 1844 invalidateMetadata = invalidateMetadata || reinit 1845 anyImportDeleted = anyImportDeleted || importDeleted 1846 1847 // Mark all of the package IDs containing the given file. 1848 filePackageIDs := invalidatedPackageIDs(uri, s.meta.IDs, pkgFileChanged) 1849 for id := range filePackageIDs { 1850 directIDs[id] = directIDs[id] || invalidateMetadata // may insert 'false' 1851 } 1852 1853 // Invalidate the previous modTidyHandle if any of the files have been 1854 // saved or if any of the metadata has been invalidated. 1855 // 1856 // TODO(rfindley): this seems like too-aggressive invalidation of mod 1857 // results. We should instead thread through overlays to the Go command 1858 // invocation and only run this if invalidateMetadata (and perhaps then 1859 // still do it less frequently). 1860 if invalidateMetadata || fileWasSaved(oldFH, newFH) { 1861 // Only invalidate mod tidy results for the most relevant modfile in the 1862 // workspace. This is a potentially lossy optimization for workspaces 1863 // with many modules (such as google-cloud-go, which has 145 modules as 1864 // of writing). 1865 // 1866 // While it is theoretically possible that a change in workspace module A 1867 // could affect the mod-tidiness of workspace module B (if B transitively 1868 // requires A), such changes are probably unlikely and not worth the 1869 // penalty of re-running go mod tidy for everything. Note that mod tidy 1870 // ignores GOWORK, so the two modules would have to be related by a chain 1871 // of replace directives. 1872 // 1873 // We could improve accuracy by inspecting replace directives, using 1874 // overlays in go mod tidy, and/or checking for metadata changes from the 1875 // on-disk content. 1876 // 1877 // Note that we iterate the modTidyHandles map here, rather than e.g. 1878 // using nearestModFile, because we don't have access to an accurate 1879 // FileSource at this point in the snapshot clone. 1880 const onlyInvalidateMostRelevant = true 1881 if onlyInvalidateMostRelevant { 1882 deleteMostRelevantModFile(result.modTidyHandles, uri) 1883 } else { 1884 result.modTidyHandles.Clear() 1885 } 1886 1887 // TODO(rfindley): should we apply the above heuristic to mod vuln or mod 1888 // why handles as well? 1889 // 1890 // TODO(rfindley): no tests fail if I delete the line below. 1891 result.modWhyHandles.Clear() 1892 } 1893 } 1894 1895 // Deleting an import can cause list errors due to import cycles to be 1896 // resolved. The best we can do without parsing the list error message is to 1897 // hope that list errors may have been resolved by a deleted import. 1898 // 1899 // We could do better by parsing the list error message. We already do this 1900 // to assign a better range to the list error, but for such critical 1901 // functionality as metadata, it's better to be conservative until it proves 1902 // impractical. 1903 // 1904 // We could also do better by looking at which imports were deleted and 1905 // trying to find cycles they are involved in. This fails when the file goes 1906 // from an unparseable state to a parseable state, as we don't have a 1907 // starting point to compare with. 1908 if anyImportDeleted { 1909 for id, mp := range s.meta.Packages { 1910 if len(mp.Errors) > 0 { 1911 directIDs[id] = true 1912 } 1913 } 1914 } 1915 1916 // Adding a file can resolve missing dependencies from existing packages. 1917 // 1918 // We could be smart here and try to guess which packages may have been 1919 // fixed, but until that proves necessary, just invalidate metadata for any 1920 // package with missing dependencies. 1921 if anyFileAdded { 1922 for id, mp := range s.meta.Packages { 1923 for _, impID := range mp.DepsByImpPath { 1924 if impID == "" { // missing import 1925 directIDs[id] = true 1926 break 1927 } 1928 } 1929 } 1930 } 1931 1932 // Invalidate reverse dependencies too. 1933 // idsToInvalidate keeps track of transitive reverse dependencies. 1934 // If an ID is present in the map, invalidate its types. 1935 // If an ID's value is true, invalidate its metadata too. 1936 idsToInvalidate := map[PackageID]bool{} 1937 var addRevDeps func(PackageID, bool) 1938 addRevDeps = func(id PackageID, invalidateMetadata bool) { 1939 current, seen := idsToInvalidate[id] 1940 newInvalidateMetadata := current || invalidateMetadata 1941 1942 // If we've already seen this ID, and the value of invalidate 1943 // metadata has not changed, we can return early. 1944 if seen && current == newInvalidateMetadata { 1945 return 1946 } 1947 idsToInvalidate[id] = newInvalidateMetadata 1948 for _, rid := range s.meta.ImportedBy[id] { 1949 addRevDeps(rid, invalidateMetadata) 1950 } 1951 } 1952 for id, invalidateMetadata := range directIDs { 1953 addRevDeps(id, invalidateMetadata) 1954 } 1955 1956 // Invalidated package information. 1957 for id, invalidateMetadata := range idsToInvalidate { 1958 if _, ok := directIDs[id]; ok || invalidateMetadata { 1959 if result.packages.Delete(id) { 1960 needsDiagnosis = true 1961 } 1962 } else { 1963 if entry, hit := result.packages.Get(id); hit { 1964 needsDiagnosis = true 1965 ph := entry.clone(false) 1966 result.packages.Set(id, ph, nil) 1967 } 1968 } 1969 if result.activePackages.Delete(id) { 1970 needsDiagnosis = true 1971 } 1972 } 1973 1974 // Compute which metadata updates are required. We only need to invalidate 1975 // packages directly containing the affected file, and only if it changed in 1976 // a relevant way. 1977 metadataUpdates := make(map[PackageID]*metadata.Package) 1978 for id, mp := range s.meta.Packages { 1979 invalidateMetadata := idsToInvalidate[id] 1980 1981 // For metadata that has been newly invalidated, capture package paths 1982 // requiring reloading in the shouldLoad map. 1983 if invalidateMetadata && !metadata.IsCommandLineArguments(mp.ID) { 1984 needsReload := []PackagePath{mp.PkgPath} 1985 if mp.ForTest != "" && mp.ForTest != mp.PkgPath { 1986 // When reloading test variants, always reload their ForTest package as 1987 // well. Otherwise, we may miss test variants in the resulting load. 1988 // 1989 // TODO(rfindley): is this actually sufficient? Is it possible that 1990 // other test variants may be invalidated? Either way, we should 1991 // determine exactly what needs to be reloaded here. 1992 needsReload = append(needsReload, mp.ForTest) 1993 } 1994 result.shouldLoad.Set(id, needsReload, nil) 1995 } 1996 1997 // Check whether the metadata should be deleted. 1998 if invalidateMetadata { 1999 needsDiagnosis = true 2000 metadataUpdates[id] = nil 2001 continue 2002 } 2003 } 2004 2005 // Update metadata, if necessary. 2006 result.meta = s.meta.Update(metadataUpdates) 2007 2008 // Update workspace and active packages, if necessary. 2009 if result.meta != s.meta || anyFileOpenedOrClosed { 2010 needsDiagnosis = true 2011 result.workspacePackages = computeWorkspacePackagesLocked(result, result.meta) 2012 result.resetActivePackagesLocked() 2013 } else { 2014 result.workspacePackages = s.workspacePackages 2015 } 2016 2017 return result, needsDiagnosis 2018 } 2019 2020 // cloneWithout clones m then deletes from it the keys of changes. 2021 // 2022 // The optional didDelete variable is set to true if there were deletions. 2023 func cloneWithout[K constraints.Ordered, V1, V2 any](m *persistent.Map[K, V1], changes map[K]V2, didDelete *bool) *persistent.Map[K, V1] { 2024 m2 := m.Clone() 2025 for k := range changes { 2026 if m2.Delete(k) && didDelete != nil { 2027 *didDelete = true 2028 } 2029 } 2030 return m2 2031 } 2032 2033 // cloneWith clones m then inserts the changes into it. 2034 func cloneWith[K constraints.Ordered, V any](m *persistent.Map[K, V], changes map[K]V) *persistent.Map[K, V] { 2035 m2 := m.Clone() 2036 for k, v := range changes { 2037 m2.Set(k, v, nil) 2038 } 2039 return m2 2040 } 2041 2042 // deleteMostRelevantModFile deletes the mod file most likely to be the mod 2043 // file for the changed URI, if it exists. 2044 // 2045 // Specifically, this is the longest mod file path in a directory containing 2046 // changed. This might not be accurate if there is another mod file closer to 2047 // changed that happens not to be present in the map, but that's OK: the goal 2048 // of this function is to guarantee that IF the nearest mod file is present in 2049 // the map, it is invalidated. 2050 func deleteMostRelevantModFile(m *persistent.Map[protocol.DocumentURI, *memoize.Promise], changed protocol.DocumentURI) { 2051 var mostRelevant protocol.DocumentURI 2052 changedFile := changed.Path() 2053 2054 m.Range(func(modURI protocol.DocumentURI, _ *memoize.Promise) { 2055 if len(modURI) > len(mostRelevant) { 2056 if pathutil.InDir(filepath.Dir(modURI.Path()), changedFile) { 2057 mostRelevant = modURI 2058 } 2059 } 2060 }) 2061 if mostRelevant != "" { 2062 m.Delete(mostRelevant) 2063 } 2064 } 2065 2066 // invalidatedPackageIDs returns all packages invalidated by a change to uri. 2067 // If we haven't seen this URI before, we guess based on files in the same 2068 // directory. This is of course incorrect in build systems where packages are 2069 // not organized by directory. 2070 // 2071 // If packageFileChanged is set, the file is either a new file, or has a new 2072 // package name. In this case, all known packages in the directory will be 2073 // invalidated. 2074 func invalidatedPackageIDs(uri protocol.DocumentURI, known map[protocol.DocumentURI][]PackageID, packageFileChanged bool) map[PackageID]struct{} { 2075 invalidated := make(map[PackageID]struct{}) 2076 2077 // At a minimum, we invalidate packages known to contain uri. 2078 for _, id := range known[uri] { 2079 invalidated[id] = struct{}{} 2080 } 2081 2082 // If the file didn't move to a new package, we should only invalidate the 2083 // packages it is currently contained inside. 2084 if !packageFileChanged && len(invalidated) > 0 { 2085 return invalidated 2086 } 2087 2088 // This is a file we don't yet know about, or which has moved packages. Guess 2089 // relevant packages by considering files in the same directory. 2090 2091 // Cache of FileInfo to avoid unnecessary stats for multiple files in the 2092 // same directory. 2093 stats := make(map[string]struct { 2094 os.FileInfo 2095 error 2096 }) 2097 getInfo := func(dir string) (os.FileInfo, error) { 2098 if res, ok := stats[dir]; ok { 2099 return res.FileInfo, res.error 2100 } 2101 fi, err := os.Stat(dir) 2102 stats[dir] = struct { 2103 os.FileInfo 2104 error 2105 }{fi, err} 2106 return fi, err 2107 } 2108 dir := filepath.Dir(uri.Path()) 2109 fi, err := getInfo(dir) 2110 if err == nil { 2111 // Aggregate all possibly relevant package IDs. 2112 for knownURI, ids := range known { 2113 knownDir := filepath.Dir(knownURI.Path()) 2114 knownFI, err := getInfo(knownDir) 2115 if err != nil { 2116 continue 2117 } 2118 if os.SameFile(fi, knownFI) { 2119 for _, id := range ids { 2120 invalidated[id] = struct{}{} 2121 } 2122 } 2123 } 2124 } 2125 return invalidated 2126 } 2127 2128 // fileWasSaved reports whether the FileHandle passed in has been saved. It 2129 // accomplishes this by checking to see if the original and current FileHandles 2130 // are both overlays, and if the current FileHandle is saved while the original 2131 // FileHandle was not saved. 2132 func fileWasSaved(originalFH, currentFH file.Handle) bool { 2133 c, ok := currentFH.(*overlay) 2134 if !ok || c == nil { 2135 return true 2136 } 2137 o, ok := originalFH.(*overlay) 2138 if !ok || o == nil { 2139 return c.saved 2140 } 2141 return !o.saved && c.saved 2142 } 2143 2144 // metadataChanges detects features of the change from oldFH->newFH that may 2145 // affect package metadata. 2146 // 2147 // It uses lockedSnapshot to access cached parse information. lockedSnapshot 2148 // must be locked. 2149 // 2150 // The result parameters have the following meaning: 2151 // - invalidate means that package metadata for packages containing the file 2152 // should be invalidated. 2153 // - pkgFileChanged means that the file->package associates for the file have 2154 // changed (possibly because the file is new, or because its package name has 2155 // changed). 2156 // - importDeleted means that an import has been deleted, or we can't 2157 // determine if an import was deleted due to errors. 2158 func metadataChanges(ctx context.Context, lockedSnapshot *Snapshot, oldFH, newFH file.Handle) (invalidate, pkgFileChanged, importDeleted bool) { 2159 if oe, ne := oldFH != nil && fileExists(oldFH), fileExists(newFH); !oe || !ne { // existential changes 2160 changed := oe != ne 2161 return changed, changed, !ne // we don't know if an import was deleted 2162 } 2163 2164 // If the file hasn't changed, there's no need to reload. 2165 if oldFH.Identity() == newFH.Identity() { 2166 return false, false, false 2167 } 2168 2169 fset := token.NewFileSet() 2170 // Parse headers to compare package names and imports. 2171 oldHeads, oldErr := lockedSnapshot.view.parseCache.parseFiles(ctx, fset, ParseHeader, false, oldFH) 2172 newHeads, newErr := lockedSnapshot.view.parseCache.parseFiles(ctx, fset, ParseHeader, false, newFH) 2173 2174 if oldErr != nil || newErr != nil { 2175 errChanged := (oldErr == nil) != (newErr == nil) 2176 return errChanged, errChanged, (newErr != nil) // we don't know if an import was deleted 2177 } 2178 2179 oldHead := oldHeads[0] 2180 newHead := newHeads[0] 2181 2182 // `go list` fails completely if the file header cannot be parsed. If we go 2183 // from a non-parsing state to a parsing state, we should reload. 2184 if oldHead.ParseErr != nil && newHead.ParseErr == nil { 2185 return true, true, true // We don't know what changed, so fall back on full invalidation. 2186 } 2187 2188 // If a package name has changed, the set of package imports may have changed 2189 // in ways we can't detect here. Assume an import has been deleted. 2190 if oldHead.File.Name.Name != newHead.File.Name.Name { 2191 return true, true, true 2192 } 2193 2194 // Check whether package imports have changed. Only consider potentially 2195 // valid imports paths. 2196 oldImports := validImports(oldHead.File.Imports) 2197 newImports := validImports(newHead.File.Imports) 2198 2199 for path := range newImports { 2200 if _, ok := oldImports[path]; ok { 2201 delete(oldImports, path) 2202 } else { 2203 invalidate = true // a new, potentially valid import was added 2204 } 2205 } 2206 2207 if len(oldImports) > 0 { 2208 invalidate = true 2209 importDeleted = true 2210 } 2211 2212 // If the change does not otherwise invalidate metadata, get the full ASTs in 2213 // order to check magic comments. 2214 // 2215 // Note: if this affects performance we can probably avoid parsing in the 2216 // common case by first scanning the source for potential comments. 2217 if !invalidate { 2218 origFulls, oldErr := lockedSnapshot.view.parseCache.parseFiles(ctx, fset, ParseFull, false, oldFH) 2219 newFulls, newErr := lockedSnapshot.view.parseCache.parseFiles(ctx, fset, ParseFull, false, newFH) 2220 if oldErr == nil && newErr == nil { 2221 invalidate = magicCommentsChanged(origFulls[0].File, newFulls[0].File) 2222 } else { 2223 // At this point, we shouldn't ever fail to produce a ParsedGoFile, as 2224 // we're already past header parsing. 2225 bug.Reportf("metadataChanges: unparseable file %v (old error: %v, new error: %v)", oldFH.URI(), oldErr, newErr) 2226 } 2227 } 2228 2229 return invalidate, pkgFileChanged, importDeleted 2230 } 2231 2232 func magicCommentsChanged(original *ast.File, current *ast.File) bool { 2233 oldComments := extractMagicComments(original) 2234 newComments := extractMagicComments(current) 2235 if len(oldComments) != len(newComments) { 2236 return true 2237 } 2238 for i := range oldComments { 2239 if oldComments[i] != newComments[i] { 2240 return true 2241 } 2242 } 2243 return false 2244 } 2245 2246 // validImports extracts the set of valid import paths from imports. 2247 func validImports(imports []*ast.ImportSpec) map[string]struct{} { 2248 m := make(map[string]struct{}) 2249 for _, spec := range imports { 2250 if path := spec.Path.Value; validImportPath(path) { 2251 m[path] = struct{}{} 2252 } 2253 } 2254 return m 2255 } 2256 2257 func validImportPath(path string) bool { 2258 path, err := strconv.Unquote(path) 2259 if err != nil { 2260 return false 2261 } 2262 if path == "" { 2263 return false 2264 } 2265 if path[len(path)-1] == '/' { 2266 return false 2267 } 2268 return true 2269 } 2270 2271 var buildConstraintOrEmbedRe = regexp.MustCompile(`^//(go:embed|go:build|\s*\+build).*`) 2272 2273 // extractMagicComments finds magic comments that affect metadata in f. 2274 func extractMagicComments(f *ast.File) []string { 2275 var results []string 2276 for _, cg := range f.Comments { 2277 for _, c := range cg.List { 2278 if buildConstraintOrEmbedRe.MatchString(c.Text) { 2279 results = append(results, c.Text) 2280 } 2281 } 2282 } 2283 return results 2284 } 2285 2286 // BuiltinFile returns information about the special builtin package. 2287 func (s *Snapshot) BuiltinFile(ctx context.Context) (*ParsedGoFile, error) { 2288 s.AwaitInitialized(ctx) 2289 2290 s.mu.Lock() 2291 builtin := s.builtin 2292 s.mu.Unlock() 2293 2294 if builtin == "" { 2295 return nil, fmt.Errorf("no builtin package for view %s", s.view.folder.Name) 2296 } 2297 2298 fh, err := s.ReadFile(ctx, builtin) 2299 if err != nil { 2300 return nil, err 2301 } 2302 // For the builtin file only, we need syntactic object resolution 2303 // (since we can't type check). 2304 mode := ParseFull &^ parser.SkipObjectResolution 2305 pgfs, err := s.view.parseCache.parseFiles(ctx, token.NewFileSet(), mode, false, fh) 2306 if err != nil { 2307 return nil, err 2308 } 2309 return pgfs[0], nil 2310 } 2311 2312 // IsBuiltin reports whether uri is part of the builtin package. 2313 func (s *Snapshot) IsBuiltin(uri protocol.DocumentURI) bool { 2314 s.mu.Lock() 2315 defer s.mu.Unlock() 2316 // We should always get the builtin URI in a canonical form, so use simple 2317 // string comparison here. span.CompareURI is too expensive. 2318 return uri == s.builtin 2319 } 2320 2321 func (s *Snapshot) setBuiltin(path string) { 2322 s.mu.Lock() 2323 defer s.mu.Unlock() 2324 2325 s.builtin = protocol.URIFromPath(path) 2326 } 2327 2328 // WantGCDetails reports whether to compute GC optimization details for the 2329 // specified package. 2330 func (s *Snapshot) WantGCDetails(id metadata.PackageID) bool { 2331 _, ok := s.gcOptimizationDetails[id] 2332 return ok 2333 }