cuelang.org/go@v0.13.0/internal/golangorgx/gopls/cache/snapshot.go (about)

     1  // Copyright 2019 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package cache
     6  
     7  import (
     8  	"bytes"
     9  	"context"
    10  	"errors"
    11  	"fmt"
    12  	"go/token"
    13  	"path/filepath"
    14  	"runtime"
    15  	"sort"
    16  	"strconv"
    17  	"strings"
    18  	"sync"
    19  
    20  	cueast "cuelang.org/go/cue/ast"
    21  	"cuelang.org/go/cue/build"
    22  	cueparser "cuelang.org/go/cue/parser"
    23  
    24  	"cuelang.org/go/cue/load"
    25  	"cuelang.org/go/internal/cueimports"
    26  	"cuelang.org/go/internal/golangorgx/gopls/cache/metadata"
    27  	"cuelang.org/go/internal/golangorgx/gopls/cache/methodsets"
    28  	"cuelang.org/go/internal/golangorgx/gopls/cache/typerefs"
    29  	"cuelang.org/go/internal/golangorgx/gopls/cache/xrefs"
    30  	"cuelang.org/go/internal/golangorgx/gopls/file"
    31  	"cuelang.org/go/internal/golangorgx/gopls/protocol"
    32  	"cuelang.org/go/internal/golangorgx/gopls/settings"
    33  	"cuelang.org/go/internal/golangorgx/gopls/util/bug"
    34  	"cuelang.org/go/internal/golangorgx/gopls/util/constraints"
    35  	"cuelang.org/go/internal/golangorgx/gopls/util/immutable"
    36  	"cuelang.org/go/internal/golangorgx/gopls/util/pathutil"
    37  	"cuelang.org/go/internal/golangorgx/gopls/util/persistent"
    38  	"cuelang.org/go/internal/golangorgx/tools/event"
    39  	"cuelang.org/go/internal/golangorgx/tools/event/label"
    40  	"cuelang.org/go/internal/golangorgx/tools/event/tag"
    41  	"cuelang.org/go/internal/golangorgx/tools/memoize"
    42  	"golang.org/x/sync/errgroup"
    43  	"golang.org/x/tools/go/types/objectpath"
    44  )
    45  
    46  // A Snapshot represents the current state for a given view.
    47  //
    48  // It is first and foremost an idempotent implementation of file.Source whose
    49  // ReadFile method returns consistent information about the existence and
    50  // content of each file throughout its lifetime.
    51  //
    52  // However, the snapshot also manages additional state (such as parsed files
    53  // and packages) that are derived from file content.
    54  //
    55  // Snapshots are responsible for bookkeeping and invalidation of this state,
    56  // implemented in Snapshot.clone.
    57  type Snapshot struct {
    58  	// sequenceID is the monotonically increasing ID of this snapshot within its View.
    59  	//
    60  	// Sequence IDs for Snapshots from different Views cannot be compared.
    61  	sequenceID uint64
    62  
    63  	// TODO(rfindley): the snapshot holding a reference to the view poses
    64  	// lifecycle problems: a view may be shut down and waiting for work
    65  	// associated with this snapshot to complete. While most accesses of the view
    66  	// are benign (options or workspace information), this is not formalized and
    67  	// it is wrong for the snapshot to use a shutdown view.
    68  	//
    69  	// Fix this by passing options and workspace information to the snapshot,
    70  	// both of which should be immutable for the snapshot.
    71  	view *View
    72  
    73  	cancel        func()
    74  	backgroundCtx context.Context
    75  
    76  	store *memoize.Store // cache of handles shared by all snapshots
    77  
    78  	refMu sync.Mutex
    79  
    80  	// refcount holds the number of outstanding references to the current
    81  	// Snapshot. When refcount is decremented to 0, the Snapshot maps are
    82  	// destroyed and the done function is called.
    83  	//
    84  	// TODO(rfindley): use atomic.Int32 on Go 1.19+.
    85  	refcount int
    86  	done     func() // for implementing Session.Shutdown
    87  
    88  	// mu guards all of the maps in the snapshot, as well as the builtin URI and
    89  	// initialized.
    90  	mu sync.Mutex
    91  
    92  	// initialized reports whether the snapshot has been initialized. Concurrent
    93  	// initialization is guarded by the view.initializationSema. Each snapshot is
    94  	// initialized at most once: concurrent initialization is guarded by
    95  	// view.initializationSema.
    96  	initialized bool
    97  
    98  	// initialErr holds the last error resulting from initialization. If
    99  	// initialization fails, we only retry when the workspace modules change,
   100  	// to avoid too many go/packages calls.
   101  	// If initialized is false, initialErr stil holds the error resulting from
   102  	// the previous initialization.
   103  	// TODO(rfindley): can we unify the lifecycle of initialized and initialErr.
   104  	initialErr *InitializationError
   105  
   106  	// meta holds loaded metadata.
   107  	//
   108  	// meta is guarded by mu, but the Graph itself is immutable.
   109  	//
   110  	// TODO(rfindley): in many places we hold mu while operating on meta, even
   111  	// though we only need to hold mu while reading the pointer.
   112  	meta *metadata.Graph
   113  
   114  	// files maps file URIs to their corresponding FileHandles.
   115  	// It may invalidated when a file's content changes.
   116  	files *fileMap
   117  
   118  	// symbolizeHandles maps each file URI to a handle for the future
   119  	// result of computing the symbols declared in that file.
   120  	symbolizeHandles *persistent.Map[protocol.DocumentURI, *memoize.Promise] // *memoize.Promise[symbolizeResult]
   121  
   122  	// packages maps a packageKey to a *packageHandle.
   123  	// It may be invalidated when a file's content changes.
   124  	//
   125  	// Invariants to preserve:
   126  	//  - packages.Get(id).meta == meta.metadata[id] for all ids
   127  	//  - if a package is in packages, then all of its dependencies should also
   128  	//    be in packages, unless there is a missing import
   129  	packages *persistent.Map[ImportPath, *packageHandle]
   130  
   131  	// activePackages maps a package path to a memoized active build
   132  	// instance, or nil if the package is known not to be open.
   133  	//
   134  	// package paths not contained in the map are not known to be open
   135  	// or not open.
   136  	activePackages *persistent.Map[ImportPath, *build.Instance]
   137  
   138  	// workspacePackages contains the workspace's packages, which are loaded
   139  	// when the view is created. It does not contain intermediate test variants.
   140  	workspacePackages immutable.Map[ImportPath, unit]
   141  
   142  	// shouldLoad tracks packages that need to be reloaded, mapping a PackageID
   143  	// to the package paths that should be used to reload it
   144  	//
   145  	// When we try to load a package, we clear it from the shouldLoad map
   146  	// regardless of whether the load succeeded, to prevent endless loads.
   147  	shouldLoad *persistent.Map[ImportPath, []PackagePath]
   148  
   149  	// unloadableFiles keeps track of files that we've failed to load.
   150  	unloadableFiles *persistent.Set[protocol.DocumentURI]
   151  
   152  	// importGraph holds a shared import graph to use for type-checking. Adding
   153  	// more packages to this import graph can speed up type checking, at the
   154  	// expense of in-use memory.
   155  	//
   156  	// See getImportGraph for additional documentation.
   157  	importGraphDone chan struct{} // closed when importGraph is set; may be nil
   158  	importGraph     *importGraph  // copied from preceding snapshot and re-evaluated
   159  
   160  	// pkgIndex is an index of package IDs, for efficient storage of typerefs.
   161  	pkgIndex *typerefs.PackageIndex
   162  }
   163  
   164  var _ memoize.RefCounted = (*Snapshot)(nil) // snapshots are reference-counted
   165  
   166  func (s *Snapshot) awaitPromise(ctx context.Context, p *memoize.Promise) (interface{}, error) {
   167  	return p.Get(ctx, s)
   168  }
   169  
   170  // Acquire prevents the snapshot from being destroyed until the returned
   171  // function is called.
   172  //
   173  // (s.Acquire().release() could instead be expressed as a pair of
   174  // method calls s.IncRef(); s.DecRef(). The latter has the advantage
   175  // that the DecRefs are fungible and don't require holding anything in
   176  // addition to the refcounted object s, but paradoxically that is also
   177  // an advantage of the current approach, which forces the caller to
   178  // consider the release function at every stage, making a reference
   179  // leak more obvious.)
   180  func (s *Snapshot) Acquire() func() {
   181  	s.refMu.Lock()
   182  	defer s.refMu.Unlock()
   183  	assert(s.refcount > 0, "non-positive refs")
   184  	s.refcount++
   185  
   186  	return s.decref
   187  }
   188  
   189  // decref should only be referenced by Acquire, and by View when it frees its
   190  // reference to View.snapshot.
   191  func (s *Snapshot) decref() {
   192  	s.refMu.Lock()
   193  	defer s.refMu.Unlock()
   194  
   195  	assert(s.refcount > 0, "non-positive refs")
   196  	s.refcount--
   197  	if s.refcount == 0 {
   198  		s.packages.Destroy()
   199  		s.activePackages.Destroy()
   200  		s.files.destroy()
   201  		s.symbolizeHandles.Destroy()
   202  		s.unloadableFiles.Destroy()
   203  		s.done()
   204  	}
   205  }
   206  
   207  // SequenceID is the sequence id of this snapshot within its containing
   208  // view.
   209  //
   210  // Relative to their view sequence ids are monotonically increasing, but this
   211  // does not hold globally: when new views are created their initial snapshot
   212  // has sequence ID 0.
   213  func (s *Snapshot) SequenceID() uint64 {
   214  	return s.sequenceID
   215  }
   216  
   217  // SnapshotLabels returns a new slice of labels that should be used for events
   218  // related to a snapshot.
   219  func (s *Snapshot) Labels() []label.Label {
   220  	return []label.Label{tag.Snapshot.Of(s.SequenceID()), tag.Directory.Of(s.Folder())}
   221  }
   222  
   223  // Folder returns the folder at the base of this snapshot.
   224  func (s *Snapshot) Folder() protocol.DocumentURI {
   225  	return s.view.folder.Dir
   226  }
   227  
   228  // View returns the View associated with this snapshot.
   229  func (s *Snapshot) View() *View {
   230  	return s.view
   231  }
   232  
   233  // FileKind returns the kind of a file.
   234  //
   235  // We can't reliably deduce the kind from the file name alone,
   236  // as some editors can be told to interpret a buffer as
   237  // language different from the file name heuristic, e.g. that
   238  // an .html file actually contains Go "html/template" syntax,
   239  // or even that a .go file contains Python.
   240  func (s *Snapshot) FileKind(fh file.Handle) file.Kind {
   241  	return fileKind(fh)
   242  }
   243  
   244  // fileKind returns the default file kind for a file, before considering
   245  // template file extensions. See [Snapshot.FileKind].
   246  func fileKind(fh file.Handle) file.Kind {
   247  	// The kind of an unsaved buffer comes from the
   248  	// TextDocumentItem.LanguageID field in the didChange event,
   249  	// not from the file name. They may differ.
   250  	if o, ok := fh.(*overlay); ok {
   251  		if o.kind != file.UnknownKind {
   252  			return o.kind
   253  		}
   254  	}
   255  
   256  	fext := filepath.Ext(fh.URI().Path())
   257  	switch fext {
   258  	case ".go":
   259  		return file.Go
   260  	case ".mod":
   261  		return file.Mod
   262  	case ".sum":
   263  		return file.Sum
   264  	case ".work":
   265  		return file.Work
   266  	case ".cue":
   267  		return file.CUE
   268  	}
   269  	return file.UnknownKind
   270  }
   271  
   272  // Options returns the options associated with this snapshot.
   273  func (s *Snapshot) Options() *settings.Options {
   274  	return s.view.folder.Options
   275  }
   276  
   277  // BackgroundContext returns a context used for all background processing
   278  // on behalf of this snapshot.
   279  func (s *Snapshot) BackgroundContext() context.Context {
   280  	return s.backgroundCtx
   281  }
   282  
   283  // Templates returns the .tmpl files.
   284  func (s *Snapshot) Templates() map[protocol.DocumentURI]file.Handle {
   285  	s.mu.Lock()
   286  	defer s.mu.Unlock()
   287  
   288  	tmpls := map[protocol.DocumentURI]file.Handle{}
   289  	s.files.foreach(func(k protocol.DocumentURI, fh file.Handle) {
   290  		if s.FileKind(fh) == file.Tmpl {
   291  			tmpls[k] = fh
   292  		}
   293  	})
   294  	return tmpls
   295  }
   296  
   297  // InvocationFlags represents the settings of a particular go command invocation.
   298  // It is a mode, plus a set of flag bits.
   299  type InvocationFlags int
   300  
   301  const (
   302  	// Normal is appropriate for commands that might be run by a user and don't
   303  	// deliberately modify go.mod files, e.g. `go test`.
   304  	Normal InvocationFlags = iota
   305  	// WriteTemporaryModFile is for commands that need information from a
   306  	// modified version of the user's go.mod file, e.g. `go mod tidy` used to
   307  	// generate diagnostics.
   308  	WriteTemporaryModFile
   309  	// LoadWorkspace is for packages.Load, and other operations that should
   310  	// consider the whole workspace at once.
   311  	LoadWorkspace
   312  	// AllowNetwork is a flag bit that indicates the invocation should be
   313  	// allowed to access the network.
   314  	AllowNetwork InvocationFlags = 1 << 10
   315  )
   316  
   317  func (m InvocationFlags) Mode() InvocationFlags {
   318  	return m & (AllowNetwork - 1)
   319  }
   320  
   321  func (m InvocationFlags) AllowNetwork() bool {
   322  	return m&AllowNetwork != 0
   323  }
   324  
   325  func (s *Snapshot) buildOverlay() map[string]load.Source {
   326  	overlays := make(map[string]load.Source)
   327  	for _, overlay := range s.Overlays() {
   328  		if overlay.saved {
   329  			continue
   330  		}
   331  		// TODO(rfindley): previously, there was a todo here to make sure we don't
   332  		// send overlays outside of the current view. IMO we should instead make
   333  		// sure this doesn't matter.
   334  		overlays[overlay.URI().Path()] = load.FromBytes(overlay.content)
   335  	}
   336  	return overlays
   337  }
   338  
   339  // Overlays returns the set of overlays at this snapshot.
   340  //
   341  // Note that this may differ from the set of overlays on the server, if the
   342  // snapshot observed a historical state.
   343  func (s *Snapshot) Overlays() []*overlay {
   344  	s.mu.Lock()
   345  	defer s.mu.Unlock()
   346  
   347  	return s.files.getOverlays()
   348  }
   349  
   350  // Package data kinds, identifying various package data that may be stored in
   351  // the file cache.
   352  const (
   353  	xrefsKind       = "xrefs"
   354  	methodSetsKind  = "methodsets"
   355  	exportDataKind  = "export"
   356  	diagnosticsKind = "diagnostics"
   357  	typerefsKind    = "typerefs"
   358  )
   359  
   360  // PackageDiagnostics returns diagnostics for files contained in specified
   361  // packages.
   362  //
   363  // If these diagnostics cannot be loaded from cache, the requested packages
   364  // may be type-checked.
   365  func (s *Snapshot) PackageDiagnostics(ctx context.Context, paths ...ImportPath) (map[protocol.DocumentURI][]*Diagnostic, error) {
   366  	ctx, done := event.Start(ctx, "cache.snapshot.PackageDiagnostics")
   367  	defer done()
   368  
   369  	return make(map[protocol.DocumentURI][]*Diagnostic), nil
   370  }
   371  
   372  // References returns cross-reference indexes for the specified packages.
   373  //
   374  // If these indexes cannot be loaded from cache, the requested packages may
   375  // be type-checked.
   376  func (s *Snapshot) References(ctx context.Context, ids ...PackageID) ([]xrefIndex, error) {
   377  	ctx, done := event.Start(ctx, "cache.snapshot.References")
   378  	defer done()
   379  
   380  	return nil, nil
   381  }
   382  
   383  // An xrefIndex is a helper for looking up references in a given package.
   384  type xrefIndex struct {
   385  	mp   *metadata.Package
   386  	data []byte
   387  }
   388  
   389  func (index xrefIndex) Lookup(targets map[PackagePath]map[objectpath.Path]struct{}) []protocol.Location {
   390  	return xrefs.Lookup(index.mp, index.data, targets)
   391  }
   392  
   393  // MethodSets returns method-set indexes for the specified packages.
   394  //
   395  // If these indexes cannot be loaded from cache, the requested packages may
   396  // be type-checked.
   397  func (s *Snapshot) MethodSets(ctx context.Context, ids ...PackageID) ([]*methodsets.Index, error) {
   398  	ctx, done := event.Start(ctx, "cache.snapshot.MethodSets")
   399  	defer done()
   400  
   401  	return nil, nil
   402  }
   403  
   404  // MetadataForFile returns a new slice containing metadata for each
   405  // package containing the Go file identified by uri, ordered by the
   406  // number of CompiledGoFiles (i.e. "narrowest" to "widest" package),
   407  // and secondarily by IsIntermediateTestVariant (false < true).
   408  // The result may include tests and intermediate test variants of
   409  // importable packages.
   410  // It returns an error if the context was cancelled.
   411  func (s *Snapshot) MetadataForFile(ctx context.Context, uri protocol.DocumentURI) ([]*build.Instance, error) {
   412  	if s.view.typ == AdHocView {
   413  		// As described in golang/go#57209, in ad-hoc workspaces (where we load ./
   414  		// rather than ./...), preempting the directory load with file loads can
   415  		// lead to an inconsistent outcome, where certain files are loaded with
   416  		// command-line-arguments packages and others are loaded only in the ad-hoc
   417  		// package. Therefore, ensure that the workspace is loaded before doing any
   418  		// file loads.
   419  		if err := s.awaitLoaded(ctx); err != nil {
   420  			return nil, err
   421  		}
   422  	}
   423  
   424  	s.mu.Lock()
   425  
   426  	// Start with the set of package associations derived from the last load.
   427  	pkgPaths := s.meta.FilesToPackage[uri]
   428  
   429  	shouldLoad := false
   430  	for _, path := range pkgPaths {
   431  		if pkgs, _ := s.shouldLoad.Get(path); len(pkgs) > 0 {
   432  			shouldLoad = true
   433  		}
   434  	}
   435  
   436  	// Check if uri is known to be unloadable.
   437  	unloadable := s.unloadableFiles.Contains(uri)
   438  
   439  	s.mu.Unlock()
   440  
   441  	// Reload if loading is likely to improve the package associations for uri:
   442  	//  - uri is not contained in any valid packages
   443  	//  - ...or one of the packages containing uri is marked 'shouldLoad'
   444  	//  - ...but uri is not unloadable
   445  	if (shouldLoad || len(pkgPaths) == 0) && !unloadable {
   446  		scope := fileLoadScope(uri)
   447  		err := s.load(ctx, false, scope)
   448  
   449  		//
   450  		// Return the context error here as the current operation is no longer
   451  		// valid.
   452  		if err != nil {
   453  			// Guard against failed loads due to context cancellation. We don't want
   454  			// to mark loads as completed if they failed due to context cancellation.
   455  			if ctx.Err() != nil {
   456  				return nil, ctx.Err()
   457  			}
   458  
   459  			// Don't return an error here, as we may still return stale IDs.
   460  			// Furthermore, the result of MetadataForFile should be consistent upon
   461  			// subsequent calls, even if the file is marked as unloadable.
   462  			if !errors.Is(err, errNoInstances) {
   463  				event.Error(ctx, "MetadataForFile", err)
   464  			}
   465  		}
   466  
   467  		// We must clear scopes after loading.
   468  		//
   469  		// TODO(rfindley): unlike reloadWorkspace, this is simply marking loaded
   470  		// packages as loaded. We could do this from snapshot.load and avoid
   471  		// raciness.
   472  		s.clearShouldLoad(scope)
   473  	}
   474  
   475  	// Retrieve the metadata.
   476  	s.mu.Lock()
   477  	defer s.mu.Unlock()
   478  	pkgPaths = s.meta.FilesToPackage[uri]
   479  	metas := make([]*build.Instance, len(pkgPaths))
   480  	for i, path := range pkgPaths {
   481  		metas[i] = s.meta.Packages[path]
   482  		if metas[i] == nil {
   483  			panic("nil metadata")
   484  		}
   485  	}
   486  	// Metadata is only ever added by loading,
   487  	// so if we get here and still have
   488  	// no IDs, uri is unloadable.
   489  	if !unloadable && len(pkgPaths) == 0 {
   490  		s.unloadableFiles.Add(uri)
   491  	}
   492  
   493  	// Sort packages "narrowest" to "widest" (in practice:
   494  	// non-tests before tests), and regular packages before
   495  	// their intermediate test variants (which have the same
   496  	// files but different imports).
   497  	sort.Slice(metas, func(i, j int) bool {
   498  		x, y := metas[i], metas[j]
   499  		xfiles, yfiles := len(x.BuildFiles), len(y.BuildFiles)
   500  		return xfiles < yfiles
   501  	})
   502  
   503  	return metas, nil
   504  }
   505  
   506  // -- Active package tracking --
   507  //
   508  // We say a package is "active" if any of its files are open.
   509  // This is an optimization: the "active" concept is an
   510  // implementation detail of the cache and is not exposed
   511  // in the source or Snapshot API.
   512  // After type-checking we keep active packages in memory.
   513  // The activePackages persistent map does bookkeeping for
   514  // the set of active packages.
   515  
   516  // getActivePackage returns a the memoized active package for path, if
   517  // it exists.  If path is not active or has not yet been type-checked,
   518  // it returns nil.
   519  func (s *Snapshot) getActivePackage(path ImportPath) *build.Instance {
   520  	s.mu.Lock()
   521  	defer s.mu.Unlock()
   522  
   523  	if value, ok := s.activePackages.Get(path); ok {
   524  		return value
   525  	}
   526  	return nil
   527  }
   528  
   529  // setActivePackage checks if inst is active, and if so either records it in
   530  // the active packages map or returns the existing memoized active package for path.
   531  func (s *Snapshot) setActivePackage(path ImportPath, inst *build.Instance) {
   532  	s.mu.Lock()
   533  	defer s.mu.Unlock()
   534  
   535  	if _, ok := s.activePackages.Get(path); ok {
   536  		return // already memoized
   537  	}
   538  
   539  	if containsOpenFileLocked(s, inst) {
   540  		s.activePackages.Set(path, inst, nil)
   541  	} else {
   542  		s.activePackages.Set(path, nil, nil) // remember that pkg is not open
   543  	}
   544  }
   545  
   546  func (s *Snapshot) resetActivePackagesLocked() {
   547  	s.activePackages.Destroy()
   548  	s.activePackages = new(persistent.Map[ImportPath, *build.Instance])
   549  }
   550  
   551  // See Session.FileWatchingGlobPatterns for a description of gopls' file
   552  // watching heuristic.
   553  func (s *Snapshot) fileWatchingGlobPatterns() map[protocol.RelativePattern]unit {
   554  	// Always watch files that may change the view definition.
   555  	patterns := make(map[protocol.RelativePattern]unit)
   556  
   557  	// If GOWORK is outside the folder, ensure we are watching it.
   558  
   559  	extensions := "cue"
   560  	watchCueFiles := fmt.Sprintf("**/*.{%s}", extensions)
   561  
   562  	var dirs []string
   563  	if s.view.moduleMode() {
   564  
   565  		// In module mode, watch directories containing active modules, and collect
   566  		// these dirs for later filtering the set of known directories.
   567  		//
   568  		// The assumption is that the user is not actively editing non-workspace
   569  		// modules, so don't pay the price of file watching.
   570  		for modFile := range s.view.workspaceModFiles {
   571  			rootDir := filepath.Dir(filepath.Dir(modFile.Path()))
   572  			dirs = append(dirs, rootDir)
   573  
   574  			// TODO(golang/go#64724): thoroughly test these patterns, particularly on
   575  			// on Windows.
   576  			//
   577  			// Note that glob patterns should use '/' on Windows:
   578  			// https://code.visualstudio.com/docs/editor/glob-patterns
   579  			patterns[protocol.RelativePattern{BaseURI: protocol.URIFromPath(rootDir), Pattern: watchCueFiles}] = unit{}
   580  		}
   581  	} else {
   582  		// In non-module modes (GOPATH or AdHoc), we just watch the workspace root.
   583  		dirs = []string{s.view.root.Path()}
   584  		patterns[protocol.RelativePattern{Pattern: watchCueFiles}] = unit{}
   585  	}
   586  
   587  	if s.watchSubdirs() {
   588  		// Some clients (e.g. VS Code) do not send notifications for changes to
   589  		// directories that contain Go code (golang/go#42348). To handle this,
   590  		// explicitly watch all of the directories in the workspace. We find them
   591  		// by adding the directories of every file in the snapshot's workspace
   592  		// directories. There may be thousands of patterns, each a single
   593  		// directory.
   594  		//
   595  		// We compute this set by looking at files that we've previously observed.
   596  		// This may miss changed to directories that we haven't observed, but that
   597  		// shouldn't matter as there is nothing to invalidate (if a directory falls
   598  		// in forest, etc).
   599  		//
   600  		// (A previous iteration created a single glob pattern holding a union of
   601  		// all the directories, but this was found to cause VS Code to get stuck
   602  		// for several minutes after a buffer was saved twice in a workspace that
   603  		// had >8000 watched directories.)
   604  		//
   605  		// Some clients (notably coc.nvim, which uses watchman for globs) perform
   606  		// poorly with a large list of individual directories.
   607  		s.addKnownSubdirs(patterns, dirs)
   608  	}
   609  
   610  	return patterns
   611  }
   612  
   613  func (s *Snapshot) addKnownSubdirs(patterns map[protocol.RelativePattern]unit, wsDirs []string) {
   614  	s.mu.Lock()
   615  	defer s.mu.Unlock()
   616  
   617  	s.files.getDirs().Range(func(dir string) {
   618  		for _, wsDir := range wsDirs {
   619  			if pathutil.InDir(wsDir, dir) {
   620  				patterns[protocol.RelativePattern{Pattern: filepath.ToSlash(dir)}] = unit{}
   621  			}
   622  		}
   623  	})
   624  }
   625  
   626  // watchSubdirs reports whether gopls should request separate file watchers for
   627  // each relevant subdirectory. This is necessary only for clients (namely VS
   628  // Code) that do not send notifications for individual files in a directory
   629  // when the entire directory is deleted.
   630  func (s *Snapshot) watchSubdirs() bool {
   631  	switch p := s.Options().SubdirWatchPatterns; p {
   632  	case settings.SubdirWatchPatternsOn:
   633  		return true
   634  	case settings.SubdirWatchPatternsOff:
   635  		return false
   636  	case settings.SubdirWatchPatternsAuto:
   637  		// See the documentation of InternalOptions.SubdirWatchPatterns for an
   638  		// explanation of why VS Code gets a different default value here.
   639  		//
   640  		// Unfortunately, there is no authoritative list of client names, nor any
   641  		// requirements that client names do not change. We should update the VS
   642  		// Code extension to set a default value of "subdirWatchPatterns" to "on",
   643  		// so that this workaround is only temporary.
   644  		if s.Options().ClientInfo != nil && s.Options().ClientInfo.Name == "Visual Studio Code" {
   645  			return true
   646  		}
   647  		return false
   648  	default:
   649  		bug.Reportf("invalid subdirWatchPatterns: %q", p)
   650  		return false
   651  	}
   652  }
   653  
   654  // filesInDir returns all files observed by the snapshot that are contained in
   655  // a directory with the provided URI.
   656  func (s *Snapshot) filesInDir(uri protocol.DocumentURI) []protocol.DocumentURI {
   657  	s.mu.Lock()
   658  	defer s.mu.Unlock()
   659  
   660  	dir := uri.Path()
   661  	if !s.files.getDirs().Contains(dir) {
   662  		return nil
   663  	}
   664  	var files []protocol.DocumentURI
   665  	s.files.foreach(func(uri protocol.DocumentURI, _ file.Handle) {
   666  		if pathutil.InDir(dir, uri.Path()) {
   667  			files = append(files, uri)
   668  		}
   669  	})
   670  	return files
   671  }
   672  
   673  // WorkspaceMetadata returns a new, unordered slice containing
   674  // metadata for all ordinary and test packages (but not
   675  // intermediate test variants) in the workspace.
   676  //
   677  // The workspace is the set of modules typically defined by a
   678  // go.work file. It is not transitively closed: for example,
   679  // the standard library is not usually part of the workspace
   680  // even though every module in the workspace depends on it.
   681  //
   682  // Operations that must inspect all the dependencies of the
   683  // workspace packages should instead use AllMetadata.
   684  func (s *Snapshot) WorkspaceMetadata(ctx context.Context) ([]*build.Instance, error) {
   685  	if err := s.awaitLoaded(ctx); err != nil {
   686  		return nil, err
   687  	}
   688  
   689  	s.mu.Lock()
   690  	defer s.mu.Unlock()
   691  
   692  	meta := make([]*build.Instance, 0, s.workspacePackages.Len())
   693  	s.workspacePackages.Range(func(path ImportPath, _ unit) {
   694  		meta = append(meta, s.meta.Packages[path])
   695  	})
   696  	return meta, nil
   697  }
   698  
   699  // isWorkspacePackage reports whether the given package ID refers to a
   700  // workspace package for the snapshot.
   701  func (s *Snapshot) isWorkspacePackage(path ImportPath) bool {
   702  	s.mu.Lock()
   703  	defer s.mu.Unlock()
   704  	_, ok := s.workspacePackages.Value(path)
   705  	return ok
   706  }
   707  
   708  // Symbols extracts and returns symbol information for every file contained in
   709  // a loaded package. It awaits snapshot loading.
   710  //
   711  // If workspaceOnly is set, this only includes symbols from files in a
   712  // workspace package. Otherwise, it returns symbols from all loaded packages.
   713  //
   714  // TODO(rfindley): move to symbols.go.
   715  func (s *Snapshot) Symbols(ctx context.Context, workspaceOnly bool) (map[protocol.DocumentURI][]Symbol, error) {
   716  	var (
   717  		meta []*build.Instance
   718  		err  error
   719  	)
   720  	if workspaceOnly {
   721  		meta, err = s.WorkspaceMetadata(ctx)
   722  	} else {
   723  		meta, err = s.AllMetadata(ctx)
   724  	}
   725  	if err != nil {
   726  		return nil, fmt.Errorf("loading metadata: %v", err)
   727  	}
   728  
   729  	buildFiles := make(map[protocol.DocumentURI]struct{})
   730  	for _, inst := range meta {
   731  		for _, file := range inst.BuildFiles {
   732  			buildFiles[protocol.URIFromPath(file.Filename)] = struct{}{}
   733  		}
   734  	}
   735  
   736  	// Symbolize them in parallel.
   737  	var (
   738  		group    errgroup.Group
   739  		nprocs   = 2 * runtime.GOMAXPROCS(-1) // symbolize is a mix of I/O and CPU
   740  		resultMu sync.Mutex
   741  		result   = make(map[protocol.DocumentURI][]Symbol)
   742  	)
   743  	group.SetLimit(nprocs)
   744  	for uri := range buildFiles {
   745  		uri := uri
   746  		group.Go(func() error {
   747  			symbols, err := s.symbolize(ctx, uri)
   748  			if err != nil {
   749  				return err
   750  			}
   751  			resultMu.Lock()
   752  			result[uri] = symbols
   753  			resultMu.Unlock()
   754  			return nil
   755  		})
   756  	}
   757  	// Keep going on errors, but log the first failure.
   758  	// Partial results are better than no symbol results.
   759  	if err := group.Wait(); err != nil {
   760  		event.Error(ctx, "getting snapshot symbols", err)
   761  	}
   762  	return result, nil
   763  }
   764  
   765  // AllMetadata returns a new unordered array of metadata for
   766  // all packages known to this snapshot, which includes the
   767  // packages of all workspace modules plus their transitive
   768  // import dependencies.
   769  //
   770  // It may also contain ad-hoc packages for standalone files.
   771  // It includes all test variants.
   772  //
   773  // TODO(rfindley): Replace this with s.MetadataGraph().
   774  func (s *Snapshot) AllMetadata(ctx context.Context) ([]*build.Instance, error) {
   775  	if err := s.awaitLoaded(ctx); err != nil {
   776  		return nil, err
   777  	}
   778  
   779  	g := s.MetadataGraph()
   780  
   781  	meta := make([]*build.Instance, 0, len(g.Packages))
   782  	for _, mp := range g.Packages {
   783  		meta = append(meta, mp)
   784  	}
   785  	return meta, nil
   786  }
   787  
   788  // CueModForFile returns the URI of the go.mod file for the given URI.
   789  //
   790  // TODO(rfindley): clarify that this is only active modules. Or update to just
   791  // use findRootPattern.
   792  func (s *Snapshot) CueModForFile(uri protocol.DocumentURI) protocol.DocumentURI {
   793  	return moduleForURI(s.view.workspaceModFiles, uri)
   794  }
   795  
   796  func moduleForURI(modFiles map[protocol.DocumentURI]struct{}, uri protocol.DocumentURI) protocol.DocumentURI {
   797  	var match protocol.DocumentURI
   798  	for modURI := range modFiles {
   799  		if !modURI.Dir().Encloses(uri) {
   800  			continue
   801  		}
   802  		if len(modURI) > len(match) {
   803  			match = modURI
   804  		}
   805  	}
   806  	return match
   807  }
   808  
   809  // Metadata returns the metadata for the specified package,
   810  // or nil if it was not found.
   811  func (s *Snapshot) Metadata(path ImportPath) *build.Instance {
   812  	s.mu.Lock()
   813  	defer s.mu.Unlock()
   814  	return s.meta.Packages[path]
   815  }
   816  
   817  // clearShouldLoad clears package IDs that no longer need to be reloaded after
   818  // scopes has been loaded.
   819  func (s *Snapshot) clearShouldLoad(scopes ...loadScope) {
   820  	s.mu.Lock()
   821  	defer s.mu.Unlock()
   822  
   823  	for _, scope := range scopes {
   824  		switch scope := scope.(type) {
   825  		case packageLoadScope:
   826  			scopePath := PackagePath(scope)
   827  			var toDelete []ImportPath
   828  			s.shouldLoad.Range(func(path ImportPath, pkgPaths []PackagePath) {
   829  				for _, pkgPath := range pkgPaths {
   830  					if pkgPath == scopePath {
   831  						toDelete = append(toDelete, path)
   832  					}
   833  				}
   834  			})
   835  			for _, id := range toDelete {
   836  				s.shouldLoad.Delete(id)
   837  			}
   838  		case fileLoadScope:
   839  			uri := protocol.DocumentURI(scope)
   840  			pkgPaths := s.meta.FilesToPackage[uri]
   841  			for _, path := range pkgPaths {
   842  				s.shouldLoad.Delete(path)
   843  			}
   844  		}
   845  	}
   846  }
   847  
   848  // FindFile returns the FileHandle for the given URI, if it is already
   849  // in the given snapshot.
   850  // TODO(adonovan): delete this operation; use ReadFile instead.
   851  func (s *Snapshot) FindFile(uri protocol.DocumentURI) file.Handle {
   852  	s.mu.Lock()
   853  	defer s.mu.Unlock()
   854  
   855  	result, _ := s.files.get(uri)
   856  	return result
   857  }
   858  
   859  // ReadFile returns a File for the given URI. If the file is unknown it is added
   860  // to the managed set.
   861  //
   862  // ReadFile succeeds even if the file does not exist. A non-nil error return
   863  // indicates some type of internal error, for example if ctx is cancelled.
   864  func (s *Snapshot) ReadFile(ctx context.Context, uri protocol.DocumentURI) (file.Handle, error) {
   865  	s.mu.Lock()
   866  	defer s.mu.Unlock()
   867  
   868  	fh, ok := s.files.get(uri)
   869  	if !ok {
   870  		var err error
   871  		fh, err = s.view.fs.ReadFile(ctx, uri)
   872  		if err != nil {
   873  			return nil, err
   874  		}
   875  		s.files.set(uri, fh)
   876  	}
   877  	return fh, nil
   878  }
   879  
   880  // preloadFiles delegates to the view FileSource to read the requested uris in
   881  // parallel, without holding the snapshot lock.
   882  func (s *Snapshot) preloadFiles(ctx context.Context, uris []protocol.DocumentURI) {
   883  	files := make([]file.Handle, len(uris))
   884  	var wg sync.WaitGroup
   885  	iolimit := make(chan struct{}, 20) // I/O concurrency limiting semaphore
   886  	for i, uri := range uris {
   887  		wg.Add(1)
   888  		iolimit <- struct{}{}
   889  		go func(i int, uri protocol.DocumentURI) {
   890  			defer wg.Done()
   891  			fh, err := s.view.fs.ReadFile(ctx, uri)
   892  			<-iolimit
   893  			if err != nil && ctx.Err() == nil {
   894  				event.Error(ctx, fmt.Sprintf("reading %s", uri), err)
   895  				return
   896  			}
   897  			files[i] = fh
   898  		}(i, uri)
   899  	}
   900  	wg.Wait()
   901  
   902  	s.mu.Lock()
   903  	defer s.mu.Unlock()
   904  
   905  	for i, fh := range files {
   906  		if fh == nil {
   907  			continue // error logged above
   908  		}
   909  		uri := uris[i]
   910  		if _, ok := s.files.get(uri); !ok {
   911  			s.files.set(uri, fh)
   912  		}
   913  	}
   914  }
   915  
   916  // IsOpen returns whether the editor currently has a file open.
   917  func (s *Snapshot) IsOpen(uri protocol.DocumentURI) bool {
   918  	s.mu.Lock()
   919  	defer s.mu.Unlock()
   920  
   921  	fh, _ := s.files.get(uri)
   922  	_, open := fh.(*overlay)
   923  	return open
   924  }
   925  
   926  // MetadataGraph returns the current metadata graph for the Snapshot.
   927  func (s *Snapshot) MetadataGraph() *metadata.Graph {
   928  	s.mu.Lock()
   929  	defer s.mu.Unlock()
   930  	return s.meta
   931  }
   932  
   933  // InitializationError returns the last error from initialization.
   934  func (s *Snapshot) InitializationError() *InitializationError {
   935  	s.mu.Lock()
   936  	defer s.mu.Unlock()
   937  	return s.initialErr
   938  }
   939  
   940  // awaitLoaded awaits initialization and package reloading, and returns
   941  // ctx.Err().
   942  func (s *Snapshot) awaitLoaded(ctx context.Context) error {
   943  	// Do not return results until the snapshot's view has been initialized.
   944  	s.AwaitInitialized(ctx)
   945  	s.reloadWorkspace(ctx)
   946  	return ctx.Err()
   947  }
   948  
   949  // AwaitInitialized waits until the snapshot's view is initialized.
   950  func (s *Snapshot) AwaitInitialized(ctx context.Context) {
   951  	select {
   952  	case <-ctx.Done():
   953  		return
   954  	case <-s.view.initialWorkspaceLoad:
   955  	}
   956  	// We typically prefer to run something as intensive as the IWL without
   957  	// blocking. I'm not sure if there is a way to do that here.
   958  	s.initialize(ctx, false)
   959  }
   960  
   961  // reloadWorkspace reloads the metadata for all invalidated workspace packages.
   962  func (s *Snapshot) reloadWorkspace(ctx context.Context) {
   963  	var scopes []loadScope
   964  	var seen map[PackagePath]bool
   965  	s.mu.Lock()
   966  	s.shouldLoad.Range(func(_ ImportPath, pkgPaths []PackagePath) {
   967  		for _, pkgPath := range pkgPaths {
   968  			if seen == nil {
   969  				seen = make(map[PackagePath]bool)
   970  			}
   971  			if seen[pkgPath] {
   972  				continue
   973  			}
   974  			seen[pkgPath] = true
   975  			scopes = append(scopes, packageLoadScope(pkgPath))
   976  		}
   977  	})
   978  	s.mu.Unlock()
   979  
   980  	if len(scopes) == 0 {
   981  		return
   982  	}
   983  
   984  	// For an ad-hoc view, we cannot reload by package path. Just reload the view.
   985  	if s.view.typ == AdHocView {
   986  		scopes = []loadScope{viewLoadScope{}}
   987  	}
   988  
   989  	err := s.load(ctx, false, scopes...)
   990  
   991  	// Unless the context was canceled, set "shouldLoad" to false for all
   992  	// of the metadata we attempted to load.
   993  	if !errors.Is(err, context.Canceled) {
   994  		s.clearShouldLoad(scopes...)
   995  		if err != nil {
   996  			event.Error(ctx, "reloading workspace", err, s.Labels()...)
   997  		}
   998  	}
   999  }
  1000  
  1001  // orphanedFileDiagnosticRange returns the position to use for orphaned file diagnostics.
  1002  // We only warn about an orphaned file if it is well-formed enough to actually
  1003  // be part of a package. Otherwise, we need more information.
  1004  func orphanedFileDiagnosticRange(ctx context.Context, cache *parseCache, fh file.Handle) (*ParsedGoFile, protocol.Range, bool) {
  1005  	pgfs, err := cache.parseFiles(ctx, token.NewFileSet(), ParseHeader, false, fh)
  1006  	if err != nil {
  1007  		return nil, protocol.Range{}, false
  1008  	}
  1009  	pgf := pgfs[0]
  1010  	if !pgf.File.Name.Pos().IsValid() {
  1011  		return nil, protocol.Range{}, false
  1012  	}
  1013  	rng, err := pgf.PosRange(pgf.File.Name.Pos(), pgf.File.Name.End())
  1014  	if err != nil {
  1015  		return nil, protocol.Range{}, false
  1016  	}
  1017  	return pgf, rng, true
  1018  }
  1019  
  1020  // TODO(golang/go#53756): this function needs to consider more than just the
  1021  // absolute URI, for example:
  1022  //   - the position of /vendor/ with respect to the relevant module root
  1023  //   - whether or not go.work is in use (as vendoring isn't supported in workspace mode)
  1024  //
  1025  // Most likely, each call site of inVendor needs to be reconsidered to
  1026  // understand and correctly implement the desired behavior.
  1027  func inVendor(uri protocol.DocumentURI) bool {
  1028  	_, after, found := strings.Cut(string(uri), "/vendor/")
  1029  	// Only subdirectories of /vendor/ are considered vendored
  1030  	// (/vendor/a/foo.go is vendored, /vendor/foo.go is not).
  1031  	return found && strings.Contains(after, "/")
  1032  }
  1033  
  1034  // This exists temporarily to support development and debugging
  1035  //
  1036  // TODO(ms): delete asap
  1037  func printStackTrace(prefix string) {
  1038  	buf := make([]byte, 16384)
  1039  	n := runtime.Stack(buf, false) // false = only current goroutine
  1040  	fmt.Printf("%s\n%s\n\n", prefix, string(buf[:n]))
  1041  }
  1042  
  1043  // clone copies state from the receiver into a new Snapshot, applying the given
  1044  // state changes.
  1045  //
  1046  // The caller of clone must call Snapshot.decref on the returned
  1047  // snapshot when they are finished using it.
  1048  //
  1049  // The resulting bool reports whether the change invalidates any derived
  1050  // diagnostics for the snapshot, for example because it invalidates Packages or
  1051  // parsed go.mod files. This is used to mark a view as needing diagnosis in the
  1052  // server.
  1053  //
  1054  // TODO(rfindley): long term, it may be better to move responsibility for
  1055  // diagnostics into the Snapshot (e.g. a Snapshot.Diagnostics method), at which
  1056  // point the Snapshot could be responsible for tracking and forwarding a
  1057  // 'viewsToDiagnose' field. As is, this field is instead externalized in the
  1058  // server.viewsToDiagnose map. Moving it to the snapshot would entirely
  1059  // eliminate any 'relevance' heuristics from Session.DidModifyFiles, but would
  1060  // also require more strictness about diagnostic dependencies. For example,
  1061  // template.Diagnostics currently re-parses every time: there is no Snapshot
  1062  // data responsible for providing these diagnostics.
  1063  func (s *Snapshot) clone(ctx, bgCtx context.Context, changed StateChange, done func()) (*Snapshot, bool) {
  1064  	changedFiles := changed.Files
  1065  	ctx, stop := event.Start(ctx, "cache.snapshot.clone")
  1066  	defer stop()
  1067  
  1068  	s.mu.Lock()
  1069  	defer s.mu.Unlock()
  1070  
  1071  	needsDiagnosis := false
  1072  
  1073  	bgCtx, cancel := context.WithCancel(bgCtx)
  1074  	result := &Snapshot{
  1075  		sequenceID:        s.sequenceID + 1,
  1076  		view:              s.view,
  1077  		cancel:            cancel,
  1078  		backgroundCtx:     bgCtx,
  1079  		store:             s.store,
  1080  		refcount:          1, // Snapshots are born referenced.
  1081  		done:              done,
  1082  		initialized:       s.initialized,
  1083  		initialErr:        s.initialErr,
  1084  		files:             s.files.clone(changedFiles),
  1085  		symbolizeHandles:  cloneWithout(s.symbolizeHandles, changedFiles, nil),
  1086  		packages:          s.packages.Clone(),
  1087  		activePackages:    s.activePackages.Clone(),
  1088  		workspacePackages: s.workspacePackages,
  1089  		shouldLoad:        s.shouldLoad.Clone(),      // not cloneWithout: shouldLoad is cleared on loads
  1090  		unloadableFiles:   s.unloadableFiles.Clone(), // not cloneWithout: typing in a file doesn't necessarily make it loadable
  1091  		importGraph:       s.importGraph,
  1092  		pkgIndex:          s.pkgIndex,
  1093  	}
  1094  
  1095  	reinit := false
  1096  
  1097  	// Collect observed file handles for changed URIs from the old snapshot, if
  1098  	// they exist. Importantly, we don't call ReadFile here: consider the case
  1099  	// where a file is added on disk; we don't want to read the newly added file
  1100  	// into the old snapshot, as that will break our change detection below.
  1101  	//
  1102  	// TODO(rfindley): it may be more accurate to rely on the
  1103  	// modification type here, similarly to what we do for vendored
  1104  	// files above. If we happened not to have read a file in the
  1105  	// previous snapshot, that's not the same as it actually being
  1106  	// created.
  1107  	//
  1108  	// TODO(ms): (see the go-tools equiv for "vendored files above").
  1109  	oldFiles := make(map[protocol.DocumentURI]file.Handle)
  1110  	for uri := range changedFiles {
  1111  		if fh, ok := s.files.get(uri); ok {
  1112  			oldFiles[uri] = fh
  1113  		}
  1114  	}
  1115  	// changedOnDisk determines if the new file handle may have changed on disk.
  1116  	// It over-approximates, returning true if the new file is saved and either
  1117  	// the old file wasn't saved, or the on-disk contents changed.
  1118  	//
  1119  	// oldFH may be nil.
  1120  	changedOnDisk := func(oldFH, newFH file.Handle) bool {
  1121  		if !newFH.SameContentsOnDisk() {
  1122  			return false
  1123  		}
  1124  		if oe, ne := (oldFH != nil && fileExists(oldFH)), fileExists(newFH); !oe || !ne {
  1125  			return oe != ne
  1126  		}
  1127  		return !oldFH.SameContentsOnDisk() || oldFH.Identity() != newFH.Identity()
  1128  	}
  1129  
  1130  	// Reinitialize if any workspace mod file has changed on disk.
  1131  	for uri, newFH := range changedFiles {
  1132  		if _, ok := result.view.workspaceModFiles[uri]; ok && changedOnDisk(oldFiles[uri], newFH) {
  1133  			reinit = true
  1134  		}
  1135  	}
  1136  
  1137  	if reinit {
  1138  		result.initialized = false
  1139  	}
  1140  
  1141  	// directPkgPaths keeps track of packages that have directly
  1142  	// changed.  Note: this is not a set, it's a map from id to
  1143  	// invalidateMetadata.
  1144  	directPkgPaths := map[ImportPath]bool{}
  1145  
  1146  	// Invalidate all package metadata if the workspace module has changed.
  1147  	if reinit {
  1148  		for path := range s.meta.Packages {
  1149  			// TODO(rfindley): this seems brittle; can we just start over?
  1150  			directPkgPaths[path] = true
  1151  		}
  1152  	}
  1153  
  1154  	// Compute invalidations based on file changes.
  1155  	anyImportDeleted := false      // import deletions can resolve cycles
  1156  	anyFileOpenedOrClosed := false // opened files affect workspace packages
  1157  	anyFileAdded := false          // adding a file can resolve missing dependencies
  1158  
  1159  	for uri, newFH := range changedFiles {
  1160  		// The original FileHandle for this URI is cached on the snapshot.
  1161  		oldFH := oldFiles[uri] // may be nil
  1162  		_, oldOpen := oldFH.(*overlay)
  1163  		_, newOpen := newFH.(*overlay)
  1164  
  1165  		anyFileOpenedOrClosed = anyFileOpenedOrClosed || (oldOpen != newOpen)
  1166  		anyFileAdded = anyFileAdded || (oldFH == nil || !fileExists(oldFH)) && fileExists(newFH)
  1167  
  1168  		// If uri is a cue file, check if it has changed in a way that would
  1169  		// invalidate metadata.
  1170  		var invalidateMetadata, pkgFileChanged, importDeleted bool
  1171  		var pkgName string
  1172  		if s.FileKind(newFH) == file.CUE {
  1173  			invalidateMetadata, pkgFileChanged, importDeleted, pkgName = metadataChanges(ctx, s, oldFH, newFH)
  1174  		}
  1175  		if invalidateMetadata {
  1176  			// If this is a metadata-affecting change, perhaps a reload will succeed.
  1177  			result.unloadableFiles.Remove(uri)
  1178  			needsDiagnosis = true
  1179  		}
  1180  
  1181  		invalidateMetadata = invalidateMetadata || reinit
  1182  		anyImportDeleted = anyImportDeleted || importDeleted
  1183  
  1184  		pkgPaths := invalidatedPackageIDs(uri, s.meta, pkgFileChanged, pkgName)
  1185  		for path := range pkgPaths {
  1186  			directPkgPaths[path] = directPkgPaths[path] || invalidateMetadata // may insert 'false'
  1187  		}
  1188  	}
  1189  
  1190  	// Deleting an import can cause list errors due to import cycles to be
  1191  	// resolved. The best we can do without parsing the list error message is to
  1192  	// hope that list errors may have been resolved by a deleted import.
  1193  	//
  1194  	// We could do better by parsing the list error message. We already do this
  1195  	// to assign a better range to the list error, but for such critical
  1196  	// functionality as metadata, it's better to be conservative until it proves
  1197  	// impractical.
  1198  	//
  1199  	// We could also do better by looking at which imports were deleted and
  1200  	// trying to find cycles they are involved in. This fails when the file goes
  1201  	// from an unparseable state to a parseable state, as we don't have a
  1202  	// starting point to compare with.
  1203  	//
  1204  	// TODO(ms): tidy comment above given we aren't using go list, and
  1205  	// figure out whether the following is sufficient.
  1206  	if anyImportDeleted {
  1207  		for path, inst := range s.meta.Packages {
  1208  			if inst.Err != nil {
  1209  				directPkgPaths[path] = true
  1210  			}
  1211  		}
  1212  	}
  1213  
  1214  	// Adding a file can resolve missing dependencies from existing packages.
  1215  	//
  1216  	// We could be smart here and try to guess which packages may have been
  1217  	// fixed, but until that proves necessary, just invalidate metadata for any
  1218  	// package with missing dependencies.
  1219  	//
  1220  	// TODO(ms): this one needs more thinking about: I currently have
  1221  	// no idea what cue/load.Instances does with imports that can't be
  1222  	// resolved.
  1223  	//
  1224  	// if anyFileAdded {
  1225  	// 	for id, mp := range s.meta.Packages {
  1226  	// 		for _, impID := range mp.DepsByImpPath {
  1227  	// 			if impID == "" { // missing import
  1228  	// 				directIDs[id] = true
  1229  	// 				break
  1230  	// 			}
  1231  	// 		}
  1232  	// 	}
  1233  	// }
  1234  
  1235  	// Invalidate reverse dependencies too.
  1236  	// pkgPathsToInvalidate keeps track of transitive reverse dependencies.
  1237  	// If a pkgPath is present in the map, invalidate its types.
  1238  	// If a pkgPath's value is true, invalidate its metadata too.
  1239  	pkgPathsToInvalidate := map[ImportPath]bool{}
  1240  	var addRevDeps func(ImportPath, bool)
  1241  	addRevDeps = func(path ImportPath, invalidateMetadata bool) {
  1242  		current, seen := pkgPathsToInvalidate[path]
  1243  		newInvalidateMetadata := current || invalidateMetadata
  1244  
  1245  		// If we've already seen this path, and the value of invalidate
  1246  		// metadata has not changed, we can return early.
  1247  		if seen && current == newInvalidateMetadata {
  1248  			return
  1249  		}
  1250  		pkgPathsToInvalidate[path] = newInvalidateMetadata
  1251  		for _, importingPkgPath := range s.meta.ImportedBy[path] {
  1252  			addRevDeps(importingPkgPath, invalidateMetadata)
  1253  		}
  1254  	}
  1255  	for path, invalidateMetadata := range directPkgPaths {
  1256  		addRevDeps(path, invalidateMetadata)
  1257  	}
  1258  
  1259  	// Invalidated package information.
  1260  	for path, invalidateMetadata := range pkgPathsToInvalidate {
  1261  		if _, ok := directPkgPaths[path]; ok || invalidateMetadata {
  1262  			if result.packages.Delete(path) {
  1263  				needsDiagnosis = true
  1264  			}
  1265  		} else {
  1266  			if entry, hit := result.packages.Get(path); hit {
  1267  				needsDiagnosis = true
  1268  				ph := entry.clone(false)
  1269  				result.packages.Set(path, ph, nil)
  1270  			}
  1271  		}
  1272  		if result.activePackages.Delete(path) {
  1273  			needsDiagnosis = true
  1274  		}
  1275  	}
  1276  
  1277  	// Compute which metadata updates are required. We only need to invalidate
  1278  	// packages directly containing the affected file, and only if it changed in
  1279  	// a relevant way.
  1280  	metadataUpdates := make(map[ImportPath]*build.Instance)
  1281  	for path, inst := range s.meta.Packages {
  1282  		invalidateMetadata := pkgPathsToInvalidate[path]
  1283  
  1284  		// For metadata that has been newly invalidated, capture package paths
  1285  		// requiring reloading in the shouldLoad map.
  1286  		//
  1287  		// TODO(ms): far from clear what the values of the shouldLoad
  1288  		// map should actually be - they seem to be related to
  1289  		// packageLoad scope, which we don't support (or understand).
  1290  
  1291  		if invalidateMetadata && !metadata.IsCommandLineArguments(path) {
  1292  			needsReload := []PackagePath{PackagePath(inst.Dir)}
  1293  			result.shouldLoad.Set(path, needsReload, nil)
  1294  		}
  1295  
  1296  		// Check whether the metadata should be deleted.
  1297  		if invalidateMetadata {
  1298  			needsDiagnosis = true
  1299  			metadataUpdates[path] = nil
  1300  		}
  1301  	}
  1302  
  1303  	// Update metadata, if necessary.
  1304  	result.meta = s.meta.Update(metadataUpdates)
  1305  
  1306  	// Update workspace and active packages, if necessary.
  1307  	if result.meta != s.meta || anyFileOpenedOrClosed {
  1308  		needsDiagnosis = true
  1309  		result.workspacePackages = computeWorkspacePackagesLocked(result, result.meta)
  1310  		result.resetActivePackagesLocked()
  1311  	} else {
  1312  		result.workspacePackages = s.workspacePackages
  1313  	}
  1314  
  1315  	return result, needsDiagnosis
  1316  }
  1317  
  1318  // cloneWithout clones m then deletes from it the keys of changes.
  1319  //
  1320  // The optional didDelete variable is set to true if there were deletions.
  1321  func cloneWithout[K constraints.Ordered, V1, V2 any](m *persistent.Map[K, V1], changes map[K]V2, didDelete *bool) *persistent.Map[K, V1] {
  1322  	m2 := m.Clone()
  1323  	for k := range changes {
  1324  		if m2.Delete(k) && didDelete != nil {
  1325  			*didDelete = true
  1326  		}
  1327  	}
  1328  	return m2
  1329  }
  1330  
  1331  // cloneWith clones m then inserts the changes into it.
  1332  func cloneWith[K constraints.Ordered, V any](m *persistent.Map[K, V], changes map[K]V) *persistent.Map[K, V] {
  1333  	m2 := m.Clone()
  1334  	for k, v := range changes {
  1335  		m2.Set(k, v, nil)
  1336  	}
  1337  	return m2
  1338  }
  1339  
  1340  // deleteMostRelevantModFile deletes the mod file most likely to be the mod
  1341  // file for the changed URI, if it exists.
  1342  //
  1343  // Specifically, this is the longest mod file path in a directory containing
  1344  // changed. This might not be accurate if there is another mod file closer to
  1345  // changed that happens not to be present in the map, but that's OK: the goal
  1346  // of this function is to guarantee that IF the nearest mod file is present in
  1347  // the map, it is invalidated.
  1348  func deleteMostRelevantModFile(m *persistent.Map[protocol.DocumentURI, *memoize.Promise], changed protocol.DocumentURI) {
  1349  	var mostRelevant protocol.DocumentURI
  1350  	changedFile := changed.Path()
  1351  
  1352  	m.Range(func(modURI protocol.DocumentURI, _ *memoize.Promise) {
  1353  		if len(modURI) > len(mostRelevant) {
  1354  			if pathutil.InDir(filepath.Dir(modURI.Path()), changedFile) {
  1355  				mostRelevant = modURI
  1356  			}
  1357  		}
  1358  	})
  1359  	if mostRelevant != "" {
  1360  		m.Delete(mostRelevant)
  1361  	}
  1362  }
  1363  
  1364  // invalidatedPackageIDs returns all packages invalidated by a change to uri.
  1365  //
  1366  // If packageFileChanged is set, the file is either a new file, or has a new
  1367  // package name. In this case, all known packages in the directory will be
  1368  // invalidated.
  1369  func invalidatedPackageIDs(uri protocol.DocumentURI, graph *metadata.Graph, packageFileChanged bool, pkgName string) map[ImportPath]struct{} {
  1370  	invalidated := make(map[ImportPath]struct{})
  1371  
  1372  	// The only instances the file (uri) can be a part of are instances
  1373  	// from the same dir as the file, or descendants of the dir (with
  1374  	// the same package name). We want to find the "root" of these
  1375  	// instances, i.e. the instance with the some Dir as the file.
  1376  	var rootInvalidInstance *build.Instance
  1377  
  1378  	// At a minimum, we invalidate packages known to contain uri.
  1379  	for _, pkgPath := range graph.FilesToPackage[uri] {
  1380  		invalidated[pkgPath] = struct{}{}
  1381  		invalidInst := graph.Packages[pkgPath]
  1382  		if invalidInst.PkgName != "_" && invalidInst.Dir == filepath.Dir(uri.Path()) {
  1383  			rootInvalidInstance = invalidInst
  1384  		}
  1385  	}
  1386  
  1387  	// If the file didn't move to a new package (or the file has no
  1388  	// useful pkgName), we should only invalidate the packages it is
  1389  	// currently contained inside.
  1390  	if pkgName == "" || pkgName == "_" || rootInvalidInstance == nil || (!packageFileChanged && len(invalidated) > 0) {
  1391  		return invalidated
  1392  	}
  1393  
  1394  	invalidIpt := cueast.ParseImportPath(rootInvalidInstance.ImportPath)
  1395  
  1396  	// For all the packages we know about, add to the set of
  1397  	// invalidated packages, if they're in the same package (name) as
  1398  	// our file, and their import path is equal, or longer than our
  1399  	// root instance. This is the scenario when a file moves into a
  1400  	// package that already exists and may need to be included as an
  1401  	// ancestor.
  1402  	for pkgPath, inst := range graph.Packages {
  1403  		if _, invalid := invalidated[pkgPath]; invalid {
  1404  			continue
  1405  		}
  1406  		if inst.Module != rootInvalidInstance.Module {
  1407  			continue
  1408  		}
  1409  		if !strings.HasPrefix(inst.Dir, rootInvalidInstance.Dir) {
  1410  			continue
  1411  		}
  1412  
  1413  		ipt := cueast.ParseImportPath(inst.ImportPath)
  1414  
  1415  		if ipt.Qualifier == pkgName && strings.HasPrefix(ipt.Path, invalidIpt.Path+"/") {
  1416  			// It's the same pkgName (that we're moving to), and this inst is:
  1417  			// 1. in the same module,
  1418  			// 2. a descendant of the uri's directory, and
  1419  			// 3. a "descendant" of the rootInvalidInstance's package path
  1420  			// Therefore we invalidate it too. Note we deliberately
  1421  			// ignore versions and "explicit qualifier" here.
  1422  			invalidated[ImportPath(inst.ImportPath)] = struct{}{}
  1423  			break
  1424  		}
  1425  	}
  1426  
  1427  	return invalidated
  1428  }
  1429  
  1430  // fileWasSaved reports whether the FileHandle passed in has been saved. It
  1431  // accomplishes this by checking to see if the original and current FileHandles
  1432  // are both overlays, and if the current FileHandle is saved while the original
  1433  // FileHandle was not saved.
  1434  func fileWasSaved(originalFH, currentFH file.Handle) bool {
  1435  	c, ok := currentFH.(*overlay)
  1436  	if !ok || c == nil {
  1437  		return true
  1438  	}
  1439  	o, ok := originalFH.(*overlay)
  1440  	if !ok || o == nil {
  1441  		return c.saved
  1442  	}
  1443  	return !o.saved && c.saved
  1444  }
  1445  
  1446  func readCueHeaders(fh file.Handle) (*cueast.File, error) {
  1447  	content, err := fh.Content()
  1448  	if err != nil {
  1449  		return nil, err
  1450  	}
  1451  	// make sure we only parse the package name and imports, and don't
  1452  	// spend time parsing the rest of the file.
  1453  	headers, err := cueimports.Read(bytes.NewReader(content))
  1454  	if err != nil {
  1455  		return nil, err
  1456  	}
  1457  	return cueparser.ParseFile(fh.URI().Path(), headers, cueparser.ImportsOnly)
  1458  }
  1459  
  1460  // metadataChanges detects features of the change from oldFH->newFH that may
  1461  // affect package metadata.
  1462  //
  1463  // It uses lockedSnapshot to access cached parse information. lockedSnapshot
  1464  // must be locked.
  1465  //
  1466  // The result parameters have the following meaning:
  1467  //   - invalidate means that package metadata for packages containing the file
  1468  //     should be invalidated.
  1469  //   - pkgFileChanged means that the file->package associates for the file have
  1470  //     changed (possibly because the file is new, or because its package name has
  1471  //     changed).
  1472  //   - importDeleted means that an import has been deleted, or we can't
  1473  //     determine if an import was deleted due to errors.
  1474  func metadataChanges(ctx context.Context, lockedSnapshot *Snapshot, oldFH, newFH file.Handle) (invalidate, pkgFileChanged, importDeleted bool, pkgName string) {
  1475  	oldExists := oldFH != nil && fileExists(oldFH)
  1476  	newExists := fileExists(newFH)
  1477  
  1478  	var newHead *cueast.File
  1479  	var newErr error
  1480  	if newExists {
  1481  		newHead, newErr = readCueHeaders(newFH)
  1482  		if newErr == nil {
  1483  			pkgName = newHead.PackageName()
  1484  		}
  1485  	}
  1486  
  1487  	if !oldExists || !newExists { // existential changes
  1488  		changed := oldExists != newExists
  1489  		return changed, changed, !newExists, pkgName // we don't know if an import was deleted
  1490  	}
  1491  
  1492  	// If the file hasn't changed, there's no need to reload.
  1493  	if oldFH.Identity() == newFH.Identity() {
  1494  		return false, false, false, pkgName
  1495  	}
  1496  
  1497  	// Parse headers to compare package names and imports.
  1498  	oldHead, oldErr := readCueHeaders(oldFH)
  1499  
  1500  	if oldErr != nil || newErr != nil {
  1501  		errChanged := (oldErr == nil) != (newErr == nil)
  1502  		return errChanged, errChanged, errChanged, pkgName
  1503  	}
  1504  
  1505  	// If a package name has changed, the set of package imports may have changed
  1506  	// in ways we can't detect here. Assume an import has been deleted.
  1507  	if oldHead.PackageName() != pkgName {
  1508  		return true, true, true, pkgName
  1509  	}
  1510  
  1511  	// Check whether package imports have changed. Only consider potentially
  1512  	// valid imports paths.
  1513  	oldImports := validImports(oldHead.Imports)
  1514  	newImports := validImports(newHead.Imports)
  1515  
  1516  	for path := range newImports {
  1517  		if _, ok := oldImports[path]; ok {
  1518  			delete(oldImports, path)
  1519  		} else {
  1520  			invalidate = true // a new, potentially valid import was added
  1521  		}
  1522  	}
  1523  
  1524  	if len(oldImports) > 0 {
  1525  		invalidate = true
  1526  		importDeleted = true
  1527  	}
  1528  
  1529  	// TODO(ms): in the original, there's further work done here to
  1530  	// parse more of the file and get magic comments. We skip this for
  1531  	// now until we know if it's useful for CUE.
  1532  
  1533  	return invalidate, pkgFileChanged, importDeleted, pkgName
  1534  }
  1535  
  1536  // validImports extracts the set of valid import paths from imports.
  1537  func validImports(imports []*cueast.ImportSpec) map[string]struct{} {
  1538  	pkgPaths := make(map[string]struct{})
  1539  	for _, imp := range imports {
  1540  		pkgPath, err := strconv.Unquote(imp.Path.Value)
  1541  		if err != nil {
  1542  			continue
  1543  		}
  1544  		// Canonicalize the path.
  1545  		pkgPath = cueast.ParseImportPath(pkgPath).Canonical().String()
  1546  		pkgPaths[pkgPath] = struct{}{}
  1547  	}
  1548  	return pkgPaths
  1549  }