golang.org/x/tools/gopls@v0.15.3/internal/cache/check.go (about)

     1  // Copyright 2019 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package cache
     6  
     7  import (
     8  	"context"
     9  	"crypto/sha256"
    10  	"fmt"
    11  	"go/ast"
    12  	"go/build"
    13  	"go/parser"
    14  	"go/token"
    15  	"go/types"
    16  	"regexp"
    17  	"runtime"
    18  	"sort"
    19  	"strings"
    20  	"sync"
    21  	"sync/atomic"
    22  
    23  	"golang.org/x/mod/module"
    24  	"golang.org/x/sync/errgroup"
    25  	"golang.org/x/tools/go/ast/astutil"
    26  	"golang.org/x/tools/gopls/internal/cache/metadata"
    27  	"golang.org/x/tools/gopls/internal/cache/typerefs"
    28  	"golang.org/x/tools/gopls/internal/file"
    29  	"golang.org/x/tools/gopls/internal/filecache"
    30  	"golang.org/x/tools/gopls/internal/protocol"
    31  	"golang.org/x/tools/gopls/internal/util/bug"
    32  	"golang.org/x/tools/gopls/internal/util/safetoken"
    33  	"golang.org/x/tools/gopls/internal/util/slices"
    34  	"golang.org/x/tools/internal/analysisinternal"
    35  	"golang.org/x/tools/internal/event"
    36  	"golang.org/x/tools/internal/event/tag"
    37  	"golang.org/x/tools/internal/gcimporter"
    38  	"golang.org/x/tools/internal/packagesinternal"
    39  	"golang.org/x/tools/internal/tokeninternal"
    40  	"golang.org/x/tools/internal/typesinternal"
    41  	"golang.org/x/tools/internal/versions"
    42  )
    43  
    44  // Various optimizations that should not affect correctness.
    45  const (
    46  	preserveImportGraph = true // hold on to the import graph for open packages
    47  )
    48  
    49  type unit = struct{}
    50  
    51  // A typeCheckBatch holds data for a logical type-checking operation, which may
    52  // type-check many unrelated packages.
    53  //
    54  // It shares state such as parsed files and imports, to optimize type-checking
    55  // for packages with overlapping dependency graphs.
    56  type typeCheckBatch struct {
    57  	activePackageCache interface {
    58  		getActivePackage(id PackageID) *Package
    59  		setActivePackage(id PackageID, pkg *Package)
    60  	}
    61  	syntaxIndex map[PackageID]int // requested ID -> index in ids
    62  	pre         preTypeCheck
    63  	post        postTypeCheck
    64  	handles     map[PackageID]*packageHandle
    65  	parseCache  *parseCache
    66  	fset        *token.FileSet // describes all parsed or imported files
    67  	cpulimit    chan unit      // concurrency limiter for CPU-bound operations
    68  
    69  	mu             sync.Mutex
    70  	syntaxPackages map[PackageID]*futurePackage // results of processing a requested package; may hold (nil, nil)
    71  	importPackages map[PackageID]*futurePackage // package results to use for importing
    72  }
    73  
    74  // A futurePackage is a future result of type checking or importing a package,
    75  // to be cached in a map.
    76  //
    77  // The goroutine that creates the futurePackage is responsible for evaluating
    78  // its value, and closing the done channel.
    79  type futurePackage struct {
    80  	done chan unit
    81  	v    pkgOrErr
    82  }
    83  
    84  type pkgOrErr struct {
    85  	pkg *types.Package
    86  	err error
    87  }
    88  
    89  // TypeCheck parses and type-checks the specified packages,
    90  // and returns them in the same order as the ids.
    91  // The resulting packages' types may belong to different importers,
    92  // so types from different packages are incommensurable.
    93  //
    94  // The resulting packages slice always contains len(ids) entries, though some
    95  // of them may be nil if (and only if) the resulting error is non-nil.
    96  //
    97  // An error is returned if any of the requested packages fail to type-check.
    98  // This is different from having type-checking errors: a failure to type-check
    99  // indicates context cancellation or otherwise significant failure to perform
   100  // the type-checking operation.
   101  //
   102  // In general, clients should never need to type-checked syntax for an
   103  // intermediate test variant (ITV) package. Callers should apply
   104  // RemoveIntermediateTestVariants (or equivalent) before this method, or any
   105  // of the potentially type-checking methods below.
   106  func (s *Snapshot) TypeCheck(ctx context.Context, ids ...PackageID) ([]*Package, error) {
   107  	pkgs := make([]*Package, len(ids))
   108  
   109  	var (
   110  		needIDs []PackageID // ids to type-check
   111  		indexes []int       // original index of requested ids
   112  	)
   113  
   114  	// Check for existing active packages, as any package will do.
   115  	//
   116  	// This is also done inside forEachPackage, but doing it here avoids
   117  	// unnecessary set up for type checking (e.g. assembling the package handle
   118  	// graph).
   119  	for i, id := range ids {
   120  		if pkg := s.getActivePackage(id); pkg != nil {
   121  			pkgs[i] = pkg
   122  		} else {
   123  			needIDs = append(needIDs, id)
   124  			indexes = append(indexes, i)
   125  		}
   126  	}
   127  
   128  	post := func(i int, pkg *Package) {
   129  		pkgs[indexes[i]] = pkg
   130  	}
   131  	return pkgs, s.forEachPackage(ctx, needIDs, nil, post)
   132  }
   133  
   134  // getImportGraph returns a shared import graph use for this snapshot, or nil.
   135  //
   136  // This is purely an optimization: holding on to more imports allows trading
   137  // memory for CPU and latency. Currently, getImportGraph returns an import
   138  // graph containing all packages imported by open packages, since these are
   139  // highly likely to be needed when packages change.
   140  //
   141  // Furthermore, since we memoize active packages, including their imports in
   142  // the shared import graph means we don't run the risk of pinning duplicate
   143  // copies of common imports, if active packages are computed in separate type
   144  // checking batches.
   145  func (s *Snapshot) getImportGraph(ctx context.Context) *importGraph {
   146  	if !preserveImportGraph {
   147  		return nil
   148  	}
   149  	s.mu.Lock()
   150  
   151  	// Evaluate the shared import graph for the snapshot. There are three major
   152  	// codepaths here:
   153  	//
   154  	//  1. importGraphDone == nil, importGraph == nil: it is this goroutine's
   155  	//     responsibility to type-check the shared import graph.
   156  	//  2. importGraphDone == nil, importGraph != nil: it is this goroutine's
   157  	//     responsibility to resolve the import graph, which may result in
   158  	//     type-checking only if the existing importGraph (carried over from the
   159  	//     preceding snapshot) is invalid.
   160  	//  3. importGraphDone != nil: some other goroutine is doing (1) or (2), wait
   161  	//     for the work to be done.
   162  	done := s.importGraphDone
   163  	if done == nil {
   164  		done = make(chan unit)
   165  		s.importGraphDone = done
   166  		release := s.Acquire() // must acquire to use the snapshot asynchronously
   167  		go func() {
   168  			defer release()
   169  			importGraph, err := s.resolveImportGraph() // may be nil
   170  			if err != nil {
   171  				if ctx.Err() == nil {
   172  					event.Error(ctx, "computing the shared import graph", err)
   173  				}
   174  				importGraph = nil
   175  			}
   176  			s.mu.Lock()
   177  			s.importGraph = importGraph
   178  			s.mu.Unlock()
   179  			close(done)
   180  		}()
   181  	}
   182  	s.mu.Unlock()
   183  
   184  	select {
   185  	case <-done:
   186  		return s.importGraph
   187  	case <-ctx.Done():
   188  		return nil
   189  	}
   190  }
   191  
   192  // resolveImportGraph evaluates the shared import graph to use for
   193  // type-checking in this snapshot. This may involve re-using the import graph
   194  // of the previous snapshot (stored in s.importGraph), or computing a fresh
   195  // import graph.
   196  //
   197  // resolveImportGraph should only be called from getImportGraph.
   198  func (s *Snapshot) resolveImportGraph() (*importGraph, error) {
   199  	ctx := s.backgroundCtx
   200  	ctx, done := event.Start(event.Detach(ctx), "cache.resolveImportGraph")
   201  	defer done()
   202  
   203  	s.mu.Lock()
   204  	lastImportGraph := s.importGraph
   205  	s.mu.Unlock()
   206  
   207  	openPackages := make(map[PackageID]bool)
   208  	for _, fh := range s.Overlays() {
   209  		// golang/go#66145: don't call MetadataForFile here. This function, which
   210  		// builds a shared import graph, is an optimization. We don't want it to
   211  		// have the side effect of triggering a load.
   212  		//
   213  		// In the past, a call to MetadataForFile here caused a bunch of
   214  		// unnecessary loads in multi-root workspaces (and as a result, spurious
   215  		// diagnostics).
   216  		g := s.MetadataGraph()
   217  		var mps []*metadata.Package
   218  		for _, id := range g.IDs[fh.URI()] {
   219  			mps = append(mps, g.Packages[id])
   220  		}
   221  		metadata.RemoveIntermediateTestVariants(&mps)
   222  		for _, mp := range mps {
   223  			openPackages[mp.ID] = true
   224  		}
   225  	}
   226  
   227  	var openPackageIDs []PackageID
   228  	for id := range openPackages {
   229  		openPackageIDs = append(openPackageIDs, id)
   230  	}
   231  
   232  	handles, err := s.getPackageHandles(ctx, openPackageIDs)
   233  	if err != nil {
   234  		return nil, err
   235  	}
   236  
   237  	// Subtlety: we erase the upward cone of open packages from the shared import
   238  	// graph, to increase reusability.
   239  	//
   240  	// This is easiest to understand via an example: suppose A imports B, and B
   241  	// imports C. Now suppose A and B are open. If we preserve the entire set of
   242  	// shared deps by open packages, deps will be {B, C}. But this means that any
   243  	// change to the open package B will invalidate the shared import graph,
   244  	// meaning we will experience no benefit from sharing when B is edited.
   245  	// Consider that this will be a common scenario, when A is foo_test and B is
   246  	// foo. Better to just preserve the shared import C.
   247  	//
   248  	// With precise pruning, we may want to truncate this search based on
   249  	// reachability.
   250  	//
   251  	// TODO(rfindley): this logic could use a unit test.
   252  	volatileDeps := make(map[PackageID]bool)
   253  	var isVolatile func(*packageHandle) bool
   254  	isVolatile = func(ph *packageHandle) (volatile bool) {
   255  		if v, ok := volatileDeps[ph.mp.ID]; ok {
   256  			return v
   257  		}
   258  		defer func() {
   259  			volatileDeps[ph.mp.ID] = volatile
   260  		}()
   261  		if openPackages[ph.mp.ID] {
   262  			return true
   263  		}
   264  		for _, dep := range ph.mp.DepsByPkgPath {
   265  			if isVolatile(handles[dep]) {
   266  				return true
   267  			}
   268  		}
   269  		return false
   270  	}
   271  	for _, dep := range handles {
   272  		isVolatile(dep)
   273  	}
   274  	for id, volatile := range volatileDeps {
   275  		if volatile {
   276  			delete(handles, id)
   277  		}
   278  	}
   279  
   280  	// We reuse the last import graph if and only if none of the dependencies
   281  	// have changed. Doing better would involve analyzing dependencies to find
   282  	// subgraphs that are still valid. Not worth it, especially when in the
   283  	// common case nothing has changed.
   284  	unchanged := lastImportGraph != nil && len(handles) == len(lastImportGraph.depKeys)
   285  	var ids []PackageID
   286  	depKeys := make(map[PackageID]file.Hash)
   287  	for id, ph := range handles {
   288  		ids = append(ids, id)
   289  		depKeys[id] = ph.key
   290  		if unchanged {
   291  			prevKey, ok := lastImportGraph.depKeys[id]
   292  			unchanged = ok && prevKey == ph.key
   293  		}
   294  	}
   295  
   296  	if unchanged {
   297  		return lastImportGraph, nil
   298  	}
   299  
   300  	b, err := s.forEachPackageInternal(ctx, nil, ids, nil, nil, nil, handles)
   301  	if err != nil {
   302  		return nil, err
   303  	}
   304  
   305  	next := &importGraph{
   306  		fset:    b.fset,
   307  		depKeys: depKeys,
   308  		imports: make(map[PackageID]pkgOrErr),
   309  	}
   310  	for id, fut := range b.importPackages {
   311  		if fut.v.pkg == nil && fut.v.err == nil {
   312  			panic(fmt.Sprintf("internal error: import node %s is not evaluated", id))
   313  		}
   314  		next.imports[id] = fut.v
   315  	}
   316  	return next, nil
   317  }
   318  
   319  // An importGraph holds selected results of a type-checking pass, to be re-used
   320  // by subsequent snapshots.
   321  type importGraph struct {
   322  	fset    *token.FileSet          // fileset used for type checking imports
   323  	depKeys map[PackageID]file.Hash // hash of direct dependencies for this graph
   324  	imports map[PackageID]pkgOrErr  // results of type checking
   325  }
   326  
   327  // Package visiting functions used by forEachPackage; see the documentation of
   328  // forEachPackage for details.
   329  type (
   330  	preTypeCheck  = func(int, *packageHandle) bool // false => don't type check
   331  	postTypeCheck = func(int, *Package)
   332  )
   333  
   334  // forEachPackage does a pre- and post- order traversal of the packages
   335  // specified by ids using the provided pre and post functions.
   336  //
   337  // The pre func is optional. If set, pre is evaluated after the package
   338  // handle has been constructed, but before type-checking. If pre returns false,
   339  // type-checking is skipped for this package handle.
   340  //
   341  // post is called with a syntax package after type-checking completes
   342  // successfully. It is only called if pre returned true.
   343  //
   344  // Both pre and post may be called concurrently.
   345  func (s *Snapshot) forEachPackage(ctx context.Context, ids []PackageID, pre preTypeCheck, post postTypeCheck) error {
   346  	ctx, done := event.Start(ctx, "cache.forEachPackage", tag.PackageCount.Of(len(ids)))
   347  	defer done()
   348  
   349  	if len(ids) == 0 {
   350  		return nil // short cut: many call sites do not handle empty ids
   351  	}
   352  
   353  	handles, err := s.getPackageHandles(ctx, ids)
   354  	if err != nil {
   355  		return err
   356  	}
   357  
   358  	impGraph := s.getImportGraph(ctx)
   359  	_, err = s.forEachPackageInternal(ctx, impGraph, nil, ids, pre, post, handles)
   360  	return err
   361  }
   362  
   363  // forEachPackageInternal is used by both forEachPackage and loadImportGraph to
   364  // type-check a graph of packages.
   365  //
   366  // If a non-nil importGraph is provided, imports in this graph will be reused.
   367  func (s *Snapshot) forEachPackageInternal(ctx context.Context, importGraph *importGraph, importIDs, syntaxIDs []PackageID, pre preTypeCheck, post postTypeCheck, handles map[PackageID]*packageHandle) (*typeCheckBatch, error) {
   368  	b := &typeCheckBatch{
   369  		activePackageCache: s,
   370  		pre:                pre,
   371  		post:               post,
   372  		handles:            handles,
   373  		parseCache:         s.view.parseCache,
   374  		fset:               fileSetWithBase(reservedForParsing),
   375  		syntaxIndex:        make(map[PackageID]int),
   376  		cpulimit:           make(chan unit, runtime.GOMAXPROCS(0)),
   377  		syntaxPackages:     make(map[PackageID]*futurePackage),
   378  		importPackages:     make(map[PackageID]*futurePackage),
   379  	}
   380  
   381  	if importGraph != nil {
   382  		// Clone the file set every time, to ensure we do not leak files.
   383  		b.fset = tokeninternal.CloneFileSet(importGraph.fset)
   384  		// Pre-populate future cache with 'done' futures.
   385  		done := make(chan unit)
   386  		close(done)
   387  		for id, res := range importGraph.imports {
   388  			b.importPackages[id] = &futurePackage{done, res}
   389  		}
   390  	} else {
   391  		b.fset = fileSetWithBase(reservedForParsing)
   392  	}
   393  
   394  	for i, id := range syntaxIDs {
   395  		b.syntaxIndex[id] = i
   396  	}
   397  
   398  	// Start a single goroutine for each requested package.
   399  	//
   400  	// Other packages are reached recursively, and will not be evaluated if they
   401  	// are not needed.
   402  	var g errgroup.Group
   403  	for _, id := range importIDs {
   404  		id := id
   405  		g.Go(func() error {
   406  			_, err := b.getImportPackage(ctx, id)
   407  			return err
   408  		})
   409  	}
   410  	for i, id := range syntaxIDs {
   411  		i := i
   412  		id := id
   413  		g.Go(func() error {
   414  			_, err := b.handleSyntaxPackage(ctx, i, id)
   415  			return err
   416  		})
   417  	}
   418  	return b, g.Wait()
   419  }
   420  
   421  // TODO(rfindley): re-order the declarations below to read better from top-to-bottom.
   422  
   423  // getImportPackage returns the *types.Package to use for importing the
   424  // package referenced by id.
   425  //
   426  // This may be the package produced by type-checking syntax (as in the case
   427  // where id is in the set of requested IDs), a package loaded from export data,
   428  // or a package type-checked for import only.
   429  func (b *typeCheckBatch) getImportPackage(ctx context.Context, id PackageID) (pkg *types.Package, err error) {
   430  	b.mu.Lock()
   431  	f, ok := b.importPackages[id]
   432  	if ok {
   433  		b.mu.Unlock()
   434  
   435  		select {
   436  		case <-ctx.Done():
   437  			return nil, ctx.Err()
   438  		case <-f.done:
   439  			return f.v.pkg, f.v.err
   440  		}
   441  	}
   442  
   443  	f = &futurePackage{done: make(chan unit)}
   444  	b.importPackages[id] = f
   445  	b.mu.Unlock()
   446  
   447  	defer func() {
   448  		f.v = pkgOrErr{pkg, err}
   449  		close(f.done)
   450  	}()
   451  
   452  	if index, ok := b.syntaxIndex[id]; ok {
   453  		pkg, err := b.handleSyntaxPackage(ctx, index, id)
   454  		if err != nil {
   455  			return nil, err
   456  		}
   457  		if pkg != nil {
   458  			return pkg, nil
   459  		}
   460  		// type-checking was short-circuited by the pre- func.
   461  	}
   462  
   463  	// unsafe cannot be imported or type-checked.
   464  	if id == "unsafe" {
   465  		return types.Unsafe, nil
   466  	}
   467  
   468  	ph := b.handles[id]
   469  
   470  	// Do a second check for "unsafe" defensively, due to golang/go#60890.
   471  	if ph.mp.PkgPath == "unsafe" {
   472  		bug.Reportf("encountered \"unsafe\" as %s (golang/go#60890)", id)
   473  		return types.Unsafe, nil
   474  	}
   475  
   476  	data, err := filecache.Get(exportDataKind, ph.key)
   477  	if err == filecache.ErrNotFound {
   478  		// No cached export data: type-check as fast as possible.
   479  		return b.checkPackageForImport(ctx, ph)
   480  	}
   481  	if err != nil {
   482  		return nil, fmt.Errorf("failed to read cache data for %s: %v", ph.mp.ID, err)
   483  	}
   484  	return b.importPackage(ctx, ph.mp, data)
   485  }
   486  
   487  // handleSyntaxPackage handles one package from the ids slice.
   488  //
   489  // If type checking occurred while handling the package, it returns the
   490  // resulting types.Package so that it may be used for importing.
   491  //
   492  // handleSyntaxPackage returns (nil, nil) if pre returned false.
   493  func (b *typeCheckBatch) handleSyntaxPackage(ctx context.Context, i int, id PackageID) (pkg *types.Package, err error) {
   494  	b.mu.Lock()
   495  	f, ok := b.syntaxPackages[id]
   496  	if ok {
   497  		b.mu.Unlock()
   498  		<-f.done
   499  		return f.v.pkg, f.v.err
   500  	}
   501  
   502  	f = &futurePackage{done: make(chan unit)}
   503  	b.syntaxPackages[id] = f
   504  	b.mu.Unlock()
   505  	defer func() {
   506  		f.v = pkgOrErr{pkg, err}
   507  		close(f.done)
   508  	}()
   509  
   510  	ph := b.handles[id]
   511  	if b.pre != nil && !b.pre(i, ph) {
   512  		return nil, nil // skip: export data only
   513  	}
   514  
   515  	// Check for existing active packages.
   516  	//
   517  	// Since gopls can't depend on package identity, any instance of the
   518  	// requested package must be ok to return.
   519  	//
   520  	// This is an optimization to avoid redundant type-checking: following
   521  	// changes to an open package many LSP clients send several successive
   522  	// requests for package information for the modified package (semantic
   523  	// tokens, code lens, inlay hints, etc.)
   524  	if pkg := b.activePackageCache.getActivePackage(id); pkg != nil {
   525  		b.post(i, pkg)
   526  		return nil, nil // skip: not checked in this batch
   527  	}
   528  
   529  	// Wait for predecessors.
   530  	{
   531  		var g errgroup.Group
   532  		for _, depID := range ph.mp.DepsByPkgPath {
   533  			depID := depID
   534  			g.Go(func() error {
   535  				_, err := b.getImportPackage(ctx, depID)
   536  				return err
   537  			})
   538  		}
   539  		if err := g.Wait(); err != nil {
   540  			// Failure to import a package should not abort the whole operation.
   541  			// Stop only if the context was cancelled, a likely cause.
   542  			// Import errors will be reported as type diagnostics.
   543  			if ctx.Err() != nil {
   544  				return nil, ctx.Err()
   545  			}
   546  		}
   547  	}
   548  
   549  	// Wait to acquire a CPU token.
   550  	//
   551  	// Note: it is important to acquire this token only after awaiting
   552  	// predecessors, to avoid starvation.
   553  	select {
   554  	case <-ctx.Done():
   555  		return nil, ctx.Err()
   556  	case b.cpulimit <- unit{}:
   557  		defer func() {
   558  			<-b.cpulimit // release CPU token
   559  		}()
   560  	}
   561  
   562  	// Compute the syntax package.
   563  	p, err := b.checkPackage(ctx, ph)
   564  	if err != nil {
   565  		return nil, err
   566  	}
   567  
   568  	// Update caches.
   569  	b.activePackageCache.setActivePackage(id, p) // store active packages in memory
   570  	go storePackageResults(ctx, ph, p)           // ...and write all packages to disk
   571  
   572  	b.post(i, p)
   573  
   574  	return p.pkg.types, nil
   575  }
   576  
   577  // storePackageResults serializes and writes information derived from p to the
   578  // file cache.
   579  // The context is used only for logging; cancellation does not affect the operation.
   580  func storePackageResults(ctx context.Context, ph *packageHandle, p *Package) {
   581  	toCache := map[string][]byte{
   582  		xrefsKind:       p.pkg.xrefs(),
   583  		methodSetsKind:  p.pkg.methodsets().Encode(),
   584  		diagnosticsKind: encodeDiagnostics(p.pkg.diagnostics),
   585  	}
   586  
   587  	if p.metadata.PkgPath != "unsafe" { // unsafe cannot be exported
   588  		exportData, err := gcimporter.IExportShallow(p.pkg.fset, p.pkg.types, bug.Reportf)
   589  		if err != nil {
   590  			bug.Reportf("exporting package %v: %v", p.metadata.ID, err)
   591  		} else {
   592  			toCache[exportDataKind] = exportData
   593  		}
   594  	} else if p.metadata.ID != "unsafe" {
   595  		// golang/go#60890: we should only ever see one variant of the "unsafe"
   596  		// package.
   597  		bug.Reportf("encountered \"unsafe\" as %s (golang/go#60890)", p.metadata.ID)
   598  	}
   599  
   600  	for kind, data := range toCache {
   601  		if err := filecache.Set(kind, ph.key, data); err != nil {
   602  			event.Error(ctx, fmt.Sprintf("storing %s data for %s", kind, ph.mp.ID), err)
   603  		}
   604  	}
   605  }
   606  
   607  // importPackage loads the given package from its export data in p.exportData
   608  // (which must already be populated).
   609  func (b *typeCheckBatch) importPackage(ctx context.Context, mp *metadata.Package, data []byte) (*types.Package, error) {
   610  	ctx, done := event.Start(ctx, "cache.typeCheckBatch.importPackage", tag.Package.Of(string(mp.ID)))
   611  	defer done()
   612  
   613  	impMap := b.importMap(mp.ID)
   614  
   615  	thisPackage := types.NewPackage(string(mp.PkgPath), string(mp.Name))
   616  	getPackages := func(items []gcimporter.GetPackagesItem) error {
   617  		for i, item := range items {
   618  			var id PackageID
   619  			var pkg *types.Package
   620  			if item.Path == string(mp.PkgPath) {
   621  				id = mp.ID
   622  				pkg = thisPackage
   623  
   624  				// debugging issues #60904, #64235
   625  				if pkg.Name() != item.Name {
   626  					// This would mean that mp.Name != item.Name, so the
   627  					// manifest in the export data of mp.PkgPath is
   628  					// inconsistent with mp.Name. Or perhaps there
   629  					// are duplicate PkgPath items in the manifest?
   630  					return bug.Errorf("internal error: package name is %q, want %q (id=%q, path=%q) (see issue #60904)",
   631  						pkg.Name(), item.Name, id, item.Path)
   632  				}
   633  			} else {
   634  				id = impMap[item.Path]
   635  				var err error
   636  				pkg, err = b.getImportPackage(ctx, id)
   637  				if err != nil {
   638  					return err
   639  				}
   640  
   641  				// We intentionally duplicate the bug.Errorf calls because
   642  				// telemetry tells us only the program counter, not the message.
   643  
   644  				// debugging issues #60904, #64235
   645  				if pkg.Name() != item.Name {
   646  					// This means that, while reading the manifest of the
   647  					// export data of mp.PkgPath, one of its indirect
   648  					// dependencies had a name that differs from the
   649  					// Metadata.Name
   650  					return bug.Errorf("internal error: package name is %q, want %q (id=%q, path=%q) (see issue #60904)",
   651  						pkg.Name(), item.Name, id, item.Path)
   652  				}
   653  			}
   654  			items[i].Pkg = pkg
   655  
   656  		}
   657  		return nil
   658  	}
   659  
   660  	// Importing is potentially expensive, and might not encounter cancellations
   661  	// via dependencies (e.g. if they have already been evaluated).
   662  	if ctx.Err() != nil {
   663  		return nil, ctx.Err()
   664  	}
   665  
   666  	imported, err := gcimporter.IImportShallow(b.fset, getPackages, data, string(mp.PkgPath), bug.Reportf)
   667  	if err != nil {
   668  		return nil, fmt.Errorf("import failed for %q: %v", mp.ID, err)
   669  	}
   670  	return imported, nil
   671  }
   672  
   673  // checkPackageForImport type checks, but skips function bodies and does not
   674  // record syntax information.
   675  func (b *typeCheckBatch) checkPackageForImport(ctx context.Context, ph *packageHandle) (*types.Package, error) {
   676  	ctx, done := event.Start(ctx, "cache.typeCheckBatch.checkPackageForImport", tag.Package.Of(string(ph.mp.ID)))
   677  	defer done()
   678  
   679  	onError := func(e error) {
   680  		// Ignore errors for exporting.
   681  	}
   682  	cfg := b.typesConfig(ctx, ph.localInputs, onError)
   683  	cfg.IgnoreFuncBodies = true
   684  
   685  	// Parse the compiled go files, bypassing the parse cache as packages checked
   686  	// for import are unlikely to get cache hits. Additionally, we can optimize
   687  	// parsing slightly by not passing parser.ParseComments.
   688  	pgfs := make([]*ParsedGoFile, len(ph.localInputs.compiledGoFiles))
   689  	{
   690  		var group errgroup.Group
   691  		// Set an arbitrary concurrency limit; we want some parallelism but don't
   692  		// need GOMAXPROCS, as there is already a lot of concurrency among calls to
   693  		// checkPackageForImport.
   694  		//
   695  		// TODO(rfindley): is there a better way to limit parallelism here? We could
   696  		// have a global limit on the type-check batch, but would have to be very
   697  		// careful to avoid starvation.
   698  		group.SetLimit(4)
   699  		for i, fh := range ph.localInputs.compiledGoFiles {
   700  			i, fh := i, fh
   701  			group.Go(func() error {
   702  				pgf, err := parseGoImpl(ctx, b.fset, fh, parser.SkipObjectResolution, false)
   703  				pgfs[i] = pgf
   704  				return err
   705  			})
   706  		}
   707  		if err := group.Wait(); err != nil {
   708  			return nil, err // cancelled, or catastrophic error (e.g. missing file)
   709  		}
   710  	}
   711  	pkg := types.NewPackage(string(ph.localInputs.pkgPath), string(ph.localInputs.name))
   712  	check := types.NewChecker(cfg, b.fset, pkg, nil)
   713  
   714  	files := make([]*ast.File, len(pgfs))
   715  	for i, pgf := range pgfs {
   716  		files[i] = pgf.File
   717  	}
   718  
   719  	// Type checking is expensive, and we may not have encountered cancellations
   720  	// via parsing (e.g. if we got nothing but cache hits for parsed files).
   721  	if ctx.Err() != nil {
   722  		return nil, ctx.Err()
   723  	}
   724  
   725  	_ = check.Files(files) // ignore errors
   726  
   727  	// If the context was cancelled, we may have returned a ton of transient
   728  	// errors to the type checker. Swallow them.
   729  	if ctx.Err() != nil {
   730  		return nil, ctx.Err()
   731  	}
   732  
   733  	// Asynchronously record export data.
   734  	go func() {
   735  		exportData, err := gcimporter.IExportShallow(b.fset, pkg, bug.Reportf)
   736  		if err != nil {
   737  			bug.Reportf("exporting package %v: %v", ph.mp.ID, err)
   738  			return
   739  		}
   740  		if err := filecache.Set(exportDataKind, ph.key, exportData); err != nil {
   741  			event.Error(ctx, fmt.Sprintf("storing export data for %s", ph.mp.ID), err)
   742  		}
   743  	}()
   744  	return pkg, nil
   745  }
   746  
   747  // importMap returns the map of package path -> package ID relative to the
   748  // specified ID.
   749  func (b *typeCheckBatch) importMap(id PackageID) map[string]PackageID {
   750  	impMap := make(map[string]PackageID)
   751  	var populateDeps func(*metadata.Package)
   752  	populateDeps = func(parent *metadata.Package) {
   753  		for _, id := range parent.DepsByPkgPath {
   754  			mp := b.handles[id].mp
   755  			if prevID, ok := impMap[string(mp.PkgPath)]; ok {
   756  				// debugging #63822
   757  				if prevID != mp.ID {
   758  					bug.Reportf("inconsistent view of dependencies")
   759  				}
   760  				continue
   761  			}
   762  			impMap[string(mp.PkgPath)] = mp.ID
   763  			populateDeps(mp)
   764  		}
   765  	}
   766  	mp := b.handles[id].mp
   767  	populateDeps(mp)
   768  	return impMap
   769  }
   770  
   771  // A packageHandle holds inputs required to compute a Package, including
   772  // metadata, derived diagnostics, files, and settings. Additionally,
   773  // packageHandles manage a key for these inputs, to use in looking up
   774  // precomputed results.
   775  //
   776  // packageHandles may be invalid following an invalidation via snapshot.clone,
   777  // but the handles returned by getPackageHandles will always be valid.
   778  //
   779  // packageHandles are critical for implementing "precise pruning" in gopls:
   780  // packageHandle.key is a hash of a precise set of inputs, such as package
   781  // files and "reachable" syntax, that may affect type checking.
   782  //
   783  // packageHandles also keep track of state that allows gopls to compute, and
   784  // then quickly recompute, these keys. This state is split into two categories:
   785  //   - local state, which depends only on the package's local files and metadata
   786  //   - other state, which includes data derived from dependencies.
   787  //
   788  // Dividing the data in this way allows gopls to minimize invalidation when a
   789  // package is modified. For example, any change to a package file fully
   790  // invalidates the package handle. On the other hand, if that change was not
   791  // metadata-affecting it may be the case that packages indirectly depending on
   792  // the modified package are unaffected by the change. For that reason, we have
   793  // two types of invalidation, corresponding to the two types of data above:
   794  //   - deletion of the handle, which occurs when the package itself changes
   795  //   - clearing of the validated field, which marks the package as possibly
   796  //     invalid.
   797  //
   798  // With the second type of invalidation, packageHandles are re-evaluated from the
   799  // bottom up. If this process encounters a packageHandle whose deps have not
   800  // changed (as detected by the depkeys field), then the packageHandle in
   801  // question must also not have changed, and we need not re-evaluate its key.
   802  type packageHandle struct {
   803  	mp *metadata.Package
   804  
   805  	// loadDiagnostics memoizes the result of processing error messages from
   806  	// go/packages (i.e. `go list`).
   807  	//
   808  	// These are derived from metadata using a snapshot. Since they depend on
   809  	// file contents (for translating positions), they should theoretically be
   810  	// invalidated by file changes, but historically haven't been. In practice
   811  	// they are rare and indicate a fundamental error that needs to be corrected
   812  	// before development can continue, so it may not be worth significant
   813  	// engineering effort to implement accurate invalidation here.
   814  	//
   815  	// TODO(rfindley): loadDiagnostics are out of place here, as they don't
   816  	// directly relate to type checking. We should perhaps move the caching of
   817  	// load diagnostics to an entirely separate component, so that Packages need
   818  	// only be concerned with parsing and type checking.
   819  	// (Nevertheless, since the lifetime of load diagnostics matches that of the
   820  	// Metadata, it is convenient to memoize them here.)
   821  	loadDiagnostics []*Diagnostic
   822  
   823  	// Local data:
   824  
   825  	// localInputs holds all local type-checking localInputs, excluding
   826  	// dependencies.
   827  	localInputs typeCheckInputs
   828  	// localKey is a hash of localInputs.
   829  	localKey file.Hash
   830  	// refs is the result of syntactic dependency analysis produced by the
   831  	// typerefs package.
   832  	refs map[string][]typerefs.Symbol
   833  
   834  	// Data derived from dependencies:
   835  
   836  	// validated indicates whether the current packageHandle is known to have a
   837  	// valid key. Invalidated package handles are stored for packages whose
   838  	// type information may have changed.
   839  	validated bool
   840  	// depKeys records the key of each dependency that was used to calculate the
   841  	// key above. If the handle becomes invalid, we must re-check that each still
   842  	// matches.
   843  	depKeys map[PackageID]file.Hash
   844  	// key is the hashed key for the package.
   845  	//
   846  	// It includes the all bits of the transitive closure of
   847  	// dependencies's sources.
   848  	key file.Hash
   849  }
   850  
   851  // clone returns a copy of the receiver with the validated bit set to the
   852  // provided value.
   853  func (ph *packageHandle) clone(validated bool) *packageHandle {
   854  	copy := *ph
   855  	copy.validated = validated
   856  	return &copy
   857  }
   858  
   859  // getPackageHandles gets package handles for all given ids and their
   860  // dependencies, recursively.
   861  func (s *Snapshot) getPackageHandles(ctx context.Context, ids []PackageID) (map[PackageID]*packageHandle, error) {
   862  	// perform a two-pass traversal.
   863  	//
   864  	// On the first pass, build up a bidirectional graph of handle nodes, and collect leaves.
   865  	// Then build package handles from bottom up.
   866  
   867  	s.mu.Lock() // guard s.meta and s.packages below
   868  	b := &packageHandleBuilder{
   869  		s:              s,
   870  		transitiveRefs: make(map[typerefs.IndexID]*partialRefs),
   871  		nodes:          make(map[typerefs.IndexID]*handleNode),
   872  	}
   873  
   874  	var leaves []*handleNode
   875  	var makeNode func(*handleNode, PackageID) *handleNode
   876  	makeNode = func(from *handleNode, id PackageID) *handleNode {
   877  		idxID := b.s.pkgIndex.IndexID(id)
   878  		n, ok := b.nodes[idxID]
   879  		if !ok {
   880  			mp := s.meta.Packages[id]
   881  			if mp == nil {
   882  				panic(fmt.Sprintf("nil metadata for %q", id))
   883  			}
   884  			n = &handleNode{
   885  				mp:              mp,
   886  				idxID:           idxID,
   887  				unfinishedSuccs: int32(len(mp.DepsByPkgPath)),
   888  			}
   889  			if entry, hit := b.s.packages.Get(mp.ID); hit {
   890  				n.ph = entry
   891  			}
   892  			if n.unfinishedSuccs == 0 {
   893  				leaves = append(leaves, n)
   894  			} else {
   895  				n.succs = make(map[PackageID]*handleNode, n.unfinishedSuccs)
   896  			}
   897  			b.nodes[idxID] = n
   898  			for _, depID := range mp.DepsByPkgPath {
   899  				n.succs[depID] = makeNode(n, depID)
   900  			}
   901  		}
   902  		// Add edge from predecessor.
   903  		if from != nil {
   904  			n.preds = append(n.preds, from)
   905  		}
   906  		return n
   907  	}
   908  	for _, id := range ids {
   909  		makeNode(nil, id)
   910  	}
   911  	s.mu.Unlock()
   912  
   913  	g, ctx := errgroup.WithContext(ctx)
   914  
   915  	// files are preloaded, so building package handles is CPU-bound.
   916  	//
   917  	// Note that we can't use g.SetLimit, as that could result in starvation:
   918  	// g.Go blocks until a slot is available, and so all existing goroutines
   919  	// could be blocked trying to enqueue a predecessor.
   920  	limiter := make(chan unit, runtime.GOMAXPROCS(0))
   921  
   922  	var enqueue func(*handleNode)
   923  	enqueue = func(n *handleNode) {
   924  		g.Go(func() error {
   925  			limiter <- unit{}
   926  			defer func() { <-limiter }()
   927  
   928  			if ctx.Err() != nil {
   929  				return ctx.Err()
   930  			}
   931  
   932  			b.buildPackageHandle(ctx, n)
   933  
   934  			for _, pred := range n.preds {
   935  				if atomic.AddInt32(&pred.unfinishedSuccs, -1) == 0 {
   936  					enqueue(pred)
   937  				}
   938  			}
   939  
   940  			return n.err
   941  		})
   942  	}
   943  	for _, leaf := range leaves {
   944  		enqueue(leaf)
   945  	}
   946  
   947  	if err := g.Wait(); err != nil {
   948  		return nil, err
   949  	}
   950  
   951  	// Copy handles into the result map.
   952  	handles := make(map[PackageID]*packageHandle, len(b.nodes))
   953  	for _, v := range b.nodes {
   954  		assert(v.ph != nil, "nil handle")
   955  		handles[v.mp.ID] = v.ph
   956  	}
   957  
   958  	return handles, nil
   959  }
   960  
   961  // A packageHandleBuilder computes a batch of packageHandles concurrently,
   962  // sharing computed transitive reachability sets used to compute package keys.
   963  type packageHandleBuilder struct {
   964  	s *Snapshot
   965  
   966  	// nodes are assembled synchronously.
   967  	nodes map[typerefs.IndexID]*handleNode
   968  
   969  	// transitiveRefs is incrementally evaluated as package handles are built.
   970  	transitiveRefsMu sync.Mutex
   971  	transitiveRefs   map[typerefs.IndexID]*partialRefs // see getTransitiveRefs
   972  }
   973  
   974  // A handleNode represents a to-be-computed packageHandle within a graph of
   975  // predecessors and successors.
   976  //
   977  // It is used to implement a bottom-up construction of packageHandles.
   978  type handleNode struct {
   979  	mp              *metadata.Package
   980  	idxID           typerefs.IndexID
   981  	ph              *packageHandle
   982  	err             error
   983  	preds           []*handleNode
   984  	succs           map[PackageID]*handleNode
   985  	unfinishedSuccs int32
   986  }
   987  
   988  // partialRefs maps names declared by a given package to their set of
   989  // transitive references.
   990  //
   991  // If complete is set, refs is known to be complete for the package in
   992  // question. Otherwise, it may only map a subset of all names declared by the
   993  // package.
   994  type partialRefs struct {
   995  	refs     map[string]*typerefs.PackageSet
   996  	complete bool
   997  }
   998  
   999  // getTransitiveRefs gets or computes the set of transitively reachable
  1000  // packages for each exported name in the package specified by id.
  1001  //
  1002  // The operation may fail if building a predecessor failed. If and only if this
  1003  // occurs, the result will be nil.
  1004  func (b *packageHandleBuilder) getTransitiveRefs(pkgID PackageID) map[string]*typerefs.PackageSet {
  1005  	b.transitiveRefsMu.Lock()
  1006  	defer b.transitiveRefsMu.Unlock()
  1007  
  1008  	idxID := b.s.pkgIndex.IndexID(pkgID)
  1009  	trefs, ok := b.transitiveRefs[idxID]
  1010  	if !ok {
  1011  		trefs = &partialRefs{
  1012  			refs: make(map[string]*typerefs.PackageSet),
  1013  		}
  1014  		b.transitiveRefs[idxID] = trefs
  1015  	}
  1016  
  1017  	if !trefs.complete {
  1018  		trefs.complete = true
  1019  		ph := b.nodes[idxID].ph
  1020  		for name := range ph.refs {
  1021  			if ('A' <= name[0] && name[0] <= 'Z') || token.IsExported(name) {
  1022  				if _, ok := trefs.refs[name]; !ok {
  1023  					pkgs := b.s.pkgIndex.NewSet()
  1024  					for _, sym := range ph.refs[name] {
  1025  						pkgs.Add(sym.Package)
  1026  						otherSet := b.getOneTransitiveRefLocked(sym)
  1027  						pkgs.Union(otherSet)
  1028  					}
  1029  					trefs.refs[name] = pkgs
  1030  				}
  1031  			}
  1032  		}
  1033  	}
  1034  
  1035  	return trefs.refs
  1036  }
  1037  
  1038  // getOneTransitiveRefLocked computes the full set packages transitively
  1039  // reachable through the given sym reference.
  1040  //
  1041  // It may return nil if the reference is invalid (i.e. the referenced name does
  1042  // not exist).
  1043  func (b *packageHandleBuilder) getOneTransitiveRefLocked(sym typerefs.Symbol) *typerefs.PackageSet {
  1044  	assert(token.IsExported(sym.Name), "expected exported symbol")
  1045  
  1046  	trefs := b.transitiveRefs[sym.Package]
  1047  	if trefs == nil {
  1048  		trefs = &partialRefs{
  1049  			refs:     make(map[string]*typerefs.PackageSet),
  1050  			complete: false,
  1051  		}
  1052  		b.transitiveRefs[sym.Package] = trefs
  1053  	}
  1054  
  1055  	pkgs, ok := trefs.refs[sym.Name]
  1056  	if ok && pkgs == nil {
  1057  		// See below, where refs is set to nil before recursing.
  1058  		bug.Reportf("cycle detected to %q in reference graph", sym.Name)
  1059  	}
  1060  
  1061  	// Note that if (!ok && trefs.complete), the name does not exist in the
  1062  	// referenced package, and we should not write to trefs as that may introduce
  1063  	// a race.
  1064  	if !ok && !trefs.complete {
  1065  		n := b.nodes[sym.Package]
  1066  		if n == nil {
  1067  			// We should always have IndexID in our node set, because symbol references
  1068  			// should only be recorded for packages that actually exist in the import graph.
  1069  			//
  1070  			// However, it is not easy to prove this (typerefs are serialized and
  1071  			// deserialized), so make this code temporarily defensive while we are on a
  1072  			// point release.
  1073  			//
  1074  			// TODO(rfindley): in the future, we should turn this into an assertion.
  1075  			bug.Reportf("missing reference to package %s", b.s.pkgIndex.PackageID(sym.Package))
  1076  			return nil
  1077  		}
  1078  
  1079  		// Break cycles. This is perhaps overly defensive as cycles should not
  1080  		// exist at this point: metadata cycles should have been broken at load
  1081  		// time, and intra-package reference cycles should have been contracted by
  1082  		// the typerefs algorithm.
  1083  		//
  1084  		// See the "cycle detected" bug report above.
  1085  		trefs.refs[sym.Name] = nil
  1086  
  1087  		pkgs := b.s.pkgIndex.NewSet()
  1088  		for _, sym2 := range n.ph.refs[sym.Name] {
  1089  			pkgs.Add(sym2.Package)
  1090  			otherSet := b.getOneTransitiveRefLocked(sym2)
  1091  			pkgs.Union(otherSet)
  1092  		}
  1093  		trefs.refs[sym.Name] = pkgs
  1094  	}
  1095  
  1096  	return pkgs
  1097  }
  1098  
  1099  // buildPackageHandle gets or builds a package handle for the given id, storing
  1100  // its result in the snapshot.packages map.
  1101  //
  1102  // buildPackageHandle must only be called from getPackageHandles.
  1103  func (b *packageHandleBuilder) buildPackageHandle(ctx context.Context, n *handleNode) {
  1104  	var prevPH *packageHandle
  1105  	if n.ph != nil {
  1106  		// Existing package handle: if it is valid, return it. Otherwise, create a
  1107  		// copy to update.
  1108  		if n.ph.validated {
  1109  			return
  1110  		}
  1111  		prevPH = n.ph
  1112  		// Either prevPH is still valid, or we will update the key and depKeys of
  1113  		// this copy. In either case, the result will be valid.
  1114  		n.ph = prevPH.clone(true)
  1115  	} else {
  1116  		// No package handle: read and analyze the package syntax.
  1117  		inputs, err := b.s.typeCheckInputs(ctx, n.mp)
  1118  		if err != nil {
  1119  			n.err = err
  1120  			return
  1121  		}
  1122  		refs, err := b.s.typerefs(ctx, n.mp, inputs.compiledGoFiles)
  1123  		if err != nil {
  1124  			n.err = err
  1125  			return
  1126  		}
  1127  		n.ph = &packageHandle{
  1128  			mp:              n.mp,
  1129  			loadDiagnostics: computeLoadDiagnostics(ctx, b.s, n.mp),
  1130  			localInputs:     inputs,
  1131  			localKey:        localPackageKey(inputs),
  1132  			refs:            refs,
  1133  			validated:       true,
  1134  		}
  1135  	}
  1136  
  1137  	// ph either did not exist, or was invalid. We must re-evaluate deps and key.
  1138  	if err := b.evaluatePackageHandle(prevPH, n); err != nil {
  1139  		n.err = err
  1140  		return
  1141  	}
  1142  
  1143  	assert(n.ph.validated, "unvalidated handle")
  1144  
  1145  	// Ensure the result (or an equivalent) is recorded in the snapshot.
  1146  	b.s.mu.Lock()
  1147  	defer b.s.mu.Unlock()
  1148  
  1149  	// Check that the metadata has not changed
  1150  	// (which should invalidate this handle).
  1151  	//
  1152  	// TODO(rfindley): eventually promote this to an assert.
  1153  	// TODO(rfindley): move this to after building the package handle graph?
  1154  	if b.s.meta.Packages[n.mp.ID] != n.mp {
  1155  		bug.Reportf("stale metadata for %s", n.mp.ID)
  1156  	}
  1157  
  1158  	// Check the packages map again in case another goroutine got there first.
  1159  	if alt, ok := b.s.packages.Get(n.mp.ID); ok && alt.validated {
  1160  		if alt.mp != n.mp {
  1161  			bug.Reportf("existing package handle does not match for %s", n.mp.ID)
  1162  		}
  1163  		n.ph = alt
  1164  	} else {
  1165  		b.s.packages.Set(n.mp.ID, n.ph, nil)
  1166  	}
  1167  }
  1168  
  1169  // evaluatePackageHandle validates and/or computes the key of ph, setting key,
  1170  // depKeys, and the validated flag on ph.
  1171  //
  1172  // It uses prevPH to avoid recomputing keys that can't have changed, since
  1173  // their depKeys did not change.
  1174  //
  1175  // See the documentation for packageHandle for more details about packageHandle
  1176  // state, and see the documentation for the typerefs package for more details
  1177  // about precise reachability analysis.
  1178  func (b *packageHandleBuilder) evaluatePackageHandle(prevPH *packageHandle, n *handleNode) error {
  1179  	// Opt: if no dep keys have changed, we need not re-evaluate the key.
  1180  	if prevPH != nil {
  1181  		depsChanged := false
  1182  		assert(len(prevPH.depKeys) == len(n.succs), "mismatching dep count")
  1183  		for id, succ := range n.succs {
  1184  			oldKey, ok := prevPH.depKeys[id]
  1185  			assert(ok, "missing dep")
  1186  			if oldKey != succ.ph.key {
  1187  				depsChanged = true
  1188  				break
  1189  			}
  1190  		}
  1191  		if !depsChanged {
  1192  			return nil // key cannot have changed
  1193  		}
  1194  	}
  1195  
  1196  	// Deps have changed, so we must re-evaluate the key.
  1197  	n.ph.depKeys = make(map[PackageID]file.Hash)
  1198  
  1199  	// See the typerefs package: the reachable set of packages is defined to be
  1200  	// the set of packages containing syntax that is reachable through the
  1201  	// exported symbols in the dependencies of n.ph.
  1202  	reachable := b.s.pkgIndex.NewSet()
  1203  	for depID, succ := range n.succs {
  1204  		n.ph.depKeys[depID] = succ.ph.key
  1205  		reachable.Add(succ.idxID)
  1206  		trefs := b.getTransitiveRefs(succ.mp.ID)
  1207  		if trefs == nil {
  1208  			// A predecessor failed to build due to e.g. context cancellation.
  1209  			return fmt.Errorf("missing transitive refs for %s", succ.mp.ID)
  1210  		}
  1211  		for _, set := range trefs {
  1212  			reachable.Union(set)
  1213  		}
  1214  	}
  1215  
  1216  	// Collect reachable handles.
  1217  	var reachableHandles []*packageHandle
  1218  	// In the presence of context cancellation, any package may be missing.
  1219  	// We need all dependencies to produce a valid key.
  1220  	missingReachablePackage := false
  1221  	reachable.Elems(func(id typerefs.IndexID) {
  1222  		dh := b.nodes[id]
  1223  		if dh == nil {
  1224  			missingReachablePackage = true
  1225  		} else {
  1226  			assert(dh.ph.validated, "unvalidated dependency")
  1227  			reachableHandles = append(reachableHandles, dh.ph)
  1228  		}
  1229  	})
  1230  	if missingReachablePackage {
  1231  		return fmt.Errorf("missing reachable package")
  1232  	}
  1233  	// Sort for stability.
  1234  	sort.Slice(reachableHandles, func(i, j int) bool {
  1235  		return reachableHandles[i].mp.ID < reachableHandles[j].mp.ID
  1236  	})
  1237  
  1238  	// Key is the hash of the local key, and the local key of all reachable
  1239  	// packages.
  1240  	depHasher := sha256.New()
  1241  	depHasher.Write(n.ph.localKey[:])
  1242  	for _, rph := range reachableHandles {
  1243  		depHasher.Write(rph.localKey[:])
  1244  	}
  1245  	depHasher.Sum(n.ph.key[:0])
  1246  
  1247  	return nil
  1248  }
  1249  
  1250  // typerefs returns typerefs for the package described by m and cgfs, after
  1251  // either computing it or loading it from the file cache.
  1252  func (s *Snapshot) typerefs(ctx context.Context, mp *metadata.Package, cgfs []file.Handle) (map[string][]typerefs.Symbol, error) {
  1253  	imports := make(map[ImportPath]*metadata.Package)
  1254  	for impPath, id := range mp.DepsByImpPath {
  1255  		if id != "" {
  1256  			imports[impPath] = s.Metadata(id)
  1257  		}
  1258  	}
  1259  
  1260  	data, err := s.typerefData(ctx, mp.ID, imports, cgfs)
  1261  	if err != nil {
  1262  		return nil, err
  1263  	}
  1264  	classes := typerefs.Decode(s.pkgIndex, data)
  1265  	refs := make(map[string][]typerefs.Symbol)
  1266  	for _, class := range classes {
  1267  		for _, decl := range class.Decls {
  1268  			refs[decl] = class.Refs
  1269  		}
  1270  	}
  1271  	return refs, nil
  1272  }
  1273  
  1274  // typerefData retrieves encoded typeref data from the filecache, or computes it on
  1275  // a cache miss.
  1276  func (s *Snapshot) typerefData(ctx context.Context, id PackageID, imports map[ImportPath]*metadata.Package, cgfs []file.Handle) ([]byte, error) {
  1277  	key := typerefsKey(id, imports, cgfs)
  1278  	if data, err := filecache.Get(typerefsKind, key); err == nil {
  1279  		return data, nil
  1280  	} else if err != filecache.ErrNotFound {
  1281  		bug.Reportf("internal error reading typerefs data: %v", err)
  1282  	}
  1283  
  1284  	pgfs, err := s.view.parseCache.parseFiles(ctx, token.NewFileSet(), ParseFull&^parser.ParseComments, true, cgfs...)
  1285  	if err != nil {
  1286  		return nil, err
  1287  	}
  1288  	data := typerefs.Encode(pgfs, imports)
  1289  
  1290  	// Store the resulting data in the cache.
  1291  	go func() {
  1292  		if err := filecache.Set(typerefsKind, key, data); err != nil {
  1293  			event.Error(ctx, fmt.Sprintf("storing typerefs data for %s", id), err)
  1294  		}
  1295  	}()
  1296  
  1297  	return data, nil
  1298  }
  1299  
  1300  // typerefsKey produces a key for the reference information produced by the
  1301  // typerefs package.
  1302  func typerefsKey(id PackageID, imports map[ImportPath]*metadata.Package, compiledGoFiles []file.Handle) file.Hash {
  1303  	hasher := sha256.New()
  1304  
  1305  	fmt.Fprintf(hasher, "typerefs: %s\n", id)
  1306  
  1307  	importPaths := make([]string, 0, len(imports))
  1308  	for impPath := range imports {
  1309  		importPaths = append(importPaths, string(impPath))
  1310  	}
  1311  	sort.Strings(importPaths)
  1312  	for _, importPath := range importPaths {
  1313  		imp := imports[ImportPath(importPath)]
  1314  		// TODO(rfindley): strength reduce the typerefs.Export API to guarantee
  1315  		// that it only depends on these attributes of dependencies.
  1316  		fmt.Fprintf(hasher, "import %s %s %s", importPath, imp.ID, imp.Name)
  1317  	}
  1318  
  1319  	fmt.Fprintf(hasher, "compiledGoFiles: %d\n", len(compiledGoFiles))
  1320  	for _, fh := range compiledGoFiles {
  1321  		fmt.Fprintln(hasher, fh.Identity())
  1322  	}
  1323  
  1324  	var hash [sha256.Size]byte
  1325  	hasher.Sum(hash[:0])
  1326  	return hash
  1327  }
  1328  
  1329  // typeCheckInputs contains the inputs of a call to typeCheckImpl, which
  1330  // type-checks a package.
  1331  //
  1332  // Part of the purpose of this type is to keep type checking in-sync with the
  1333  // package handle key, by explicitly identifying the inputs to type checking.
  1334  type typeCheckInputs struct {
  1335  	id PackageID
  1336  
  1337  	// Used for type checking:
  1338  	pkgPath                  PackagePath
  1339  	name                     PackageName
  1340  	goFiles, compiledGoFiles []file.Handle
  1341  	sizes                    types.Sizes
  1342  	depsByImpPath            map[ImportPath]PackageID
  1343  	goVersion                string // packages.Module.GoVersion, e.g. "1.18"
  1344  
  1345  	// Used for type check diagnostics:
  1346  	// TODO(rfindley): consider storing less data in gobDiagnostics, and
  1347  	// interpreting each diagnostic in the context of a fixed set of options.
  1348  	// Then these fields need not be part of the type checking inputs.
  1349  	relatedInformation bool
  1350  	linkTarget         string
  1351  	moduleMode         bool
  1352  }
  1353  
  1354  func (s *Snapshot) typeCheckInputs(ctx context.Context, mp *metadata.Package) (typeCheckInputs, error) {
  1355  	// Read both lists of files of this package.
  1356  	//
  1357  	// Parallelism is not necessary here as the files will have already been
  1358  	// pre-read at load time.
  1359  	//
  1360  	// goFiles aren't presented to the type checker--nor
  1361  	// are they included in the key, unsoundly--but their
  1362  	// syntax trees are available from (*pkg).File(URI).
  1363  	// TODO(adonovan): consider parsing them on demand?
  1364  	// The need should be rare.
  1365  	goFiles, err := readFiles(ctx, s, mp.GoFiles)
  1366  	if err != nil {
  1367  		return typeCheckInputs{}, err
  1368  	}
  1369  	compiledGoFiles, err := readFiles(ctx, s, mp.CompiledGoFiles)
  1370  	if err != nil {
  1371  		return typeCheckInputs{}, err
  1372  	}
  1373  
  1374  	goVersion := ""
  1375  	if mp.Module != nil && mp.Module.GoVersion != "" {
  1376  		goVersion = mp.Module.GoVersion
  1377  	}
  1378  
  1379  	return typeCheckInputs{
  1380  		id:              mp.ID,
  1381  		pkgPath:         mp.PkgPath,
  1382  		name:            mp.Name,
  1383  		goFiles:         goFiles,
  1384  		compiledGoFiles: compiledGoFiles,
  1385  		sizes:           mp.TypesSizes,
  1386  		depsByImpPath:   mp.DepsByImpPath,
  1387  		goVersion:       goVersion,
  1388  
  1389  		relatedInformation: s.Options().RelatedInformationSupported,
  1390  		linkTarget:         s.Options().LinkTarget,
  1391  		moduleMode:         s.view.moduleMode(),
  1392  	}, nil
  1393  }
  1394  
  1395  // readFiles reads the content of each file URL from the source
  1396  // (e.g. snapshot or cache).
  1397  func readFiles(ctx context.Context, fs file.Source, uris []protocol.DocumentURI) (_ []file.Handle, err error) {
  1398  	fhs := make([]file.Handle, len(uris))
  1399  	for i, uri := range uris {
  1400  		fhs[i], err = fs.ReadFile(ctx, uri)
  1401  		if err != nil {
  1402  			return nil, err
  1403  		}
  1404  	}
  1405  	return fhs, nil
  1406  }
  1407  
  1408  // localPackageKey returns a key for local inputs into type-checking, excluding
  1409  // dependency information: files, metadata, and configuration.
  1410  func localPackageKey(inputs typeCheckInputs) file.Hash {
  1411  	hasher := sha256.New()
  1412  
  1413  	// In principle, a key must be the hash of an
  1414  	// unambiguous encoding of all the relevant data.
  1415  	// If it's ambiguous, we risk collisions.
  1416  
  1417  	// package identifiers
  1418  	fmt.Fprintf(hasher, "package: %s %s %s\n", inputs.id, inputs.name, inputs.pkgPath)
  1419  
  1420  	// module Go version
  1421  	fmt.Fprintf(hasher, "go %s\n", inputs.goVersion)
  1422  
  1423  	// import map
  1424  	importPaths := make([]string, 0, len(inputs.depsByImpPath))
  1425  	for impPath := range inputs.depsByImpPath {
  1426  		importPaths = append(importPaths, string(impPath))
  1427  	}
  1428  	sort.Strings(importPaths)
  1429  	for _, impPath := range importPaths {
  1430  		fmt.Fprintf(hasher, "import %s %s", impPath, string(inputs.depsByImpPath[ImportPath(impPath)]))
  1431  	}
  1432  
  1433  	// file names and contents
  1434  	fmt.Fprintf(hasher, "compiledGoFiles: %d\n", len(inputs.compiledGoFiles))
  1435  	for _, fh := range inputs.compiledGoFiles {
  1436  		fmt.Fprintln(hasher, fh.Identity())
  1437  	}
  1438  	fmt.Fprintf(hasher, "goFiles: %d\n", len(inputs.goFiles))
  1439  	for _, fh := range inputs.goFiles {
  1440  		fmt.Fprintln(hasher, fh.Identity())
  1441  	}
  1442  
  1443  	// types sizes
  1444  	wordSize := inputs.sizes.Sizeof(types.Typ[types.Int])
  1445  	maxAlign := inputs.sizes.Alignof(types.NewPointer(types.Typ[types.Int64]))
  1446  	fmt.Fprintf(hasher, "sizes: %d %d\n", wordSize, maxAlign)
  1447  
  1448  	fmt.Fprintf(hasher, "relatedInformation: %t\n", inputs.relatedInformation)
  1449  	fmt.Fprintf(hasher, "linkTarget: %s\n", inputs.linkTarget)
  1450  	fmt.Fprintf(hasher, "moduleMode: %t\n", inputs.moduleMode)
  1451  
  1452  	var hash [sha256.Size]byte
  1453  	hasher.Sum(hash[:0])
  1454  	return hash
  1455  }
  1456  
  1457  // checkPackage type checks the parsed source files in compiledGoFiles.
  1458  // (The resulting pkg also holds the parsed but not type-checked goFiles.)
  1459  // deps holds the future results of type-checking the direct dependencies.
  1460  func (b *typeCheckBatch) checkPackage(ctx context.Context, ph *packageHandle) (*Package, error) {
  1461  	inputs := ph.localInputs
  1462  	ctx, done := event.Start(ctx, "cache.typeCheckBatch.checkPackage", tag.Package.Of(string(inputs.id)))
  1463  	defer done()
  1464  
  1465  	pkg := &syntaxPackage{
  1466  		id:    inputs.id,
  1467  		fset:  b.fset, // must match parse call below
  1468  		types: types.NewPackage(string(inputs.pkgPath), string(inputs.name)),
  1469  		typesInfo: &types.Info{
  1470  			Types:      make(map[ast.Expr]types.TypeAndValue),
  1471  			Defs:       make(map[*ast.Ident]types.Object),
  1472  			Uses:       make(map[*ast.Ident]types.Object),
  1473  			Implicits:  make(map[ast.Node]types.Object),
  1474  			Instances:  make(map[*ast.Ident]types.Instance),
  1475  			Selections: make(map[*ast.SelectorExpr]*types.Selection),
  1476  			Scopes:     make(map[ast.Node]*types.Scope),
  1477  		},
  1478  	}
  1479  	versions.InitFileVersions(pkg.typesInfo)
  1480  
  1481  	// Collect parsed files from the type check pass, capturing parse errors from
  1482  	// compiled files.
  1483  	var err error
  1484  	pkg.goFiles, err = b.parseCache.parseFiles(ctx, b.fset, ParseFull, false, inputs.goFiles...)
  1485  	if err != nil {
  1486  		return nil, err
  1487  	}
  1488  	pkg.compiledGoFiles, err = b.parseCache.parseFiles(ctx, b.fset, ParseFull, false, inputs.compiledGoFiles...)
  1489  	if err != nil {
  1490  		return nil, err
  1491  	}
  1492  	for _, pgf := range pkg.compiledGoFiles {
  1493  		if pgf.ParseErr != nil {
  1494  			pkg.parseErrors = append(pkg.parseErrors, pgf.ParseErr)
  1495  		}
  1496  	}
  1497  
  1498  	// Use the default type information for the unsafe package.
  1499  	if inputs.pkgPath == "unsafe" {
  1500  		// Don't type check Unsafe: it's unnecessary, and doing so exposes a data
  1501  		// race to Unsafe.completed.
  1502  		pkg.types = types.Unsafe
  1503  	} else {
  1504  
  1505  		if len(pkg.compiledGoFiles) == 0 {
  1506  			// No files most likely means go/packages failed.
  1507  			//
  1508  			// TODO(rfindley): in the past, we would capture go list errors in this
  1509  			// case, to present go list errors to the user. However we had no tests for
  1510  			// this behavior. It is unclear if anything better can be done here.
  1511  			return nil, fmt.Errorf("no parsed files for package %s", inputs.pkgPath)
  1512  		}
  1513  
  1514  		onError := func(e error) {
  1515  			pkg.typeErrors = append(pkg.typeErrors, e.(types.Error))
  1516  		}
  1517  		cfg := b.typesConfig(ctx, inputs, onError)
  1518  		check := types.NewChecker(cfg, pkg.fset, pkg.types, pkg.typesInfo)
  1519  
  1520  		var files []*ast.File
  1521  		for _, cgf := range pkg.compiledGoFiles {
  1522  			files = append(files, cgf.File)
  1523  		}
  1524  
  1525  		// Type checking is expensive, and we may not have encountered cancellations
  1526  		// via parsing (e.g. if we got nothing but cache hits for parsed files).
  1527  		if ctx.Err() != nil {
  1528  			return nil, ctx.Err()
  1529  		}
  1530  
  1531  		// Type checking errors are handled via the config, so ignore them here.
  1532  		_ = check.Files(files) // 50us-15ms, depending on size of package
  1533  
  1534  		// If the context was cancelled, we may have returned a ton of transient
  1535  		// errors to the type checker. Swallow them.
  1536  		if ctx.Err() != nil {
  1537  			return nil, ctx.Err()
  1538  		}
  1539  
  1540  		// Collect imports by package path for the DependencyTypes API.
  1541  		pkg.importMap = make(map[PackagePath]*types.Package)
  1542  		var collectDeps func(*types.Package)
  1543  		collectDeps = func(p *types.Package) {
  1544  			pkgPath := PackagePath(p.Path())
  1545  			if _, ok := pkg.importMap[pkgPath]; ok {
  1546  				return
  1547  			}
  1548  			pkg.importMap[pkgPath] = p
  1549  			for _, imp := range p.Imports() {
  1550  				collectDeps(imp)
  1551  			}
  1552  		}
  1553  		collectDeps(pkg.types)
  1554  
  1555  		// Work around golang/go#61561: interface instances aren't concurrency-safe
  1556  		// as they are not completed by the type checker.
  1557  		for _, inst := range pkg.typesInfo.Instances {
  1558  			if iface, _ := inst.Type.Underlying().(*types.Interface); iface != nil {
  1559  				iface.Complete()
  1560  			}
  1561  		}
  1562  	}
  1563  
  1564  	// Our heuristic for whether to show type checking errors is:
  1565  	//  + If there is a parse error _in the current file_, suppress type
  1566  	//    errors in that file.
  1567  	//  + Otherwise, show type errors even in the presence of parse errors in
  1568  	//    other package files. go/types attempts to suppress follow-on errors
  1569  	//    due to bad syntax, so on balance type checking errors still provide
  1570  	//    a decent signal/noise ratio as long as the file in question parses.
  1571  
  1572  	// Track URIs with parse errors so that we can suppress type errors for these
  1573  	// files.
  1574  	unparseable := map[protocol.DocumentURI]bool{}
  1575  	for _, e := range pkg.parseErrors {
  1576  		diags, err := parseErrorDiagnostics(pkg, e)
  1577  		if err != nil {
  1578  			event.Error(ctx, "unable to compute positions for parse errors", err, tag.Package.Of(string(inputs.id)))
  1579  			continue
  1580  		}
  1581  		for _, diag := range diags {
  1582  			unparseable[diag.URI] = true
  1583  			pkg.diagnostics = append(pkg.diagnostics, diag)
  1584  		}
  1585  	}
  1586  
  1587  	diags := typeErrorsToDiagnostics(pkg, pkg.typeErrors, inputs.linkTarget, inputs.moduleMode, inputs.relatedInformation)
  1588  	for _, diag := range diags {
  1589  		// If the file didn't parse cleanly, it is highly likely that type
  1590  		// checking errors will be confusing or redundant. But otherwise, type
  1591  		// checking usually provides a good enough signal to include.
  1592  		if !unparseable[diag.URI] {
  1593  			pkg.diagnostics = append(pkg.diagnostics, diag)
  1594  		}
  1595  	}
  1596  
  1597  	return &Package{ph.mp, ph.loadDiagnostics, pkg}, nil
  1598  }
  1599  
  1600  // e.g. "go1" or "go1.2" or "go1.2.3"
  1601  var goVersionRx = regexp.MustCompile(`^go[1-9][0-9]*(?:\.(0|[1-9][0-9]*)){0,2}$`)
  1602  
  1603  func (b *typeCheckBatch) typesConfig(ctx context.Context, inputs typeCheckInputs, onError func(e error)) *types.Config {
  1604  	cfg := &types.Config{
  1605  		Sizes: inputs.sizes,
  1606  		Error: onError,
  1607  		Importer: importerFunc(func(path string) (*types.Package, error) {
  1608  			// While all of the import errors could be reported
  1609  			// based on the metadata before we start type checking,
  1610  			// reporting them via types.Importer places the errors
  1611  			// at the correct source location.
  1612  			id, ok := inputs.depsByImpPath[ImportPath(path)]
  1613  			if !ok {
  1614  				// If the import declaration is broken,
  1615  				// go list may fail to report metadata about it.
  1616  				// See TestFixImportDecl for an example.
  1617  				return nil, fmt.Errorf("missing metadata for import of %q", path)
  1618  			}
  1619  			depPH := b.handles[id]
  1620  			if depPH == nil {
  1621  				// e.g. missing metadata for dependencies in buildPackageHandle
  1622  				return nil, missingPkgError(inputs.id, path, inputs.moduleMode)
  1623  			}
  1624  			if !metadata.IsValidImport(inputs.pkgPath, depPH.mp.PkgPath) {
  1625  				return nil, fmt.Errorf("invalid use of internal package %q", path)
  1626  			}
  1627  			return b.getImportPackage(ctx, id)
  1628  		}),
  1629  	}
  1630  
  1631  	if inputs.goVersion != "" {
  1632  		goVersion := "go" + inputs.goVersion
  1633  		if validGoVersion(goVersion) {
  1634  			typesinternal.SetGoVersion(cfg, goVersion)
  1635  		}
  1636  	}
  1637  
  1638  	// We want to type check cgo code if go/types supports it.
  1639  	// We passed typecheckCgo to go/packages when we Loaded.
  1640  	typesinternal.SetUsesCgo(cfg)
  1641  	return cfg
  1642  }
  1643  
  1644  // validGoVersion reports whether goVersion is a valid Go version for go/types.
  1645  // types.NewChecker panics if GoVersion is invalid.
  1646  //
  1647  // Note that, prior to go1.21, go/types required exactly two components to the
  1648  // version number. For example, go types would panic with the Go version
  1649  // go1.21.1. validGoVersion handles this case when built with go1.20 or earlier.
  1650  func validGoVersion(goVersion string) bool {
  1651  	if !goVersionRx.MatchString(goVersion) {
  1652  		return false // malformed version string
  1653  	}
  1654  
  1655  	if relVer := releaseVersion(); relVer != "" && versions.Compare(relVer, goVersion) < 0 {
  1656  		return false // 'go list' is too new for go/types
  1657  	}
  1658  
  1659  	// TODO(rfindley): remove once we no longer support building gopls with Go
  1660  	// 1.20 or earlier.
  1661  	if !slices.Contains(build.Default.ReleaseTags, "go1.21") && strings.Count(goVersion, ".") >= 2 {
  1662  		return false // unsupported patch version
  1663  	}
  1664  
  1665  	return true
  1666  }
  1667  
  1668  // releaseVersion reports the Go language version used to compile gopls, or ""
  1669  // if it cannot be determined.
  1670  func releaseVersion() string {
  1671  	if len(build.Default.ReleaseTags) > 0 {
  1672  		v := build.Default.ReleaseTags[len(build.Default.ReleaseTags)-1]
  1673  		var dummy int
  1674  		if _, err := fmt.Sscanf(v, "go1.%d", &dummy); err == nil {
  1675  			return v
  1676  		}
  1677  	}
  1678  	return ""
  1679  }
  1680  
  1681  // depsErrors creates diagnostics for each metadata error (e.g. import cycle).
  1682  // These may be attached to import declarations in the transitive source files
  1683  // of pkg, or to 'requires' declarations in the package's go.mod file.
  1684  //
  1685  // TODO(rfindley): move this to load.go
  1686  func depsErrors(ctx context.Context, snapshot *Snapshot, mp *metadata.Package) ([]*Diagnostic, error) {
  1687  	// Select packages that can't be found, and were imported in non-workspace packages.
  1688  	// Workspace packages already show their own errors.
  1689  	var relevantErrors []*packagesinternal.PackageError
  1690  	for _, depsError := range mp.DepsErrors {
  1691  		// Up to Go 1.15, the missing package was included in the stack, which
  1692  		// was presumably a bug. We want the next one up.
  1693  		directImporterIdx := len(depsError.ImportStack) - 1
  1694  		if directImporterIdx < 0 {
  1695  			continue
  1696  		}
  1697  
  1698  		directImporter := depsError.ImportStack[directImporterIdx]
  1699  		if snapshot.isWorkspacePackage(PackageID(directImporter)) {
  1700  			continue
  1701  		}
  1702  		relevantErrors = append(relevantErrors, depsError)
  1703  	}
  1704  
  1705  	// Don't build the import index for nothing.
  1706  	if len(relevantErrors) == 0 {
  1707  		return nil, nil
  1708  	}
  1709  
  1710  	// Subsequent checks require Go files.
  1711  	if len(mp.CompiledGoFiles) == 0 {
  1712  		return nil, nil
  1713  	}
  1714  
  1715  	// Build an index of all imports in the package.
  1716  	type fileImport struct {
  1717  		cgf *ParsedGoFile
  1718  		imp *ast.ImportSpec
  1719  	}
  1720  	allImports := map[string][]fileImport{}
  1721  	for _, uri := range mp.CompiledGoFiles {
  1722  		pgf, err := parseGoURI(ctx, snapshot, uri, ParseHeader)
  1723  		if err != nil {
  1724  			return nil, err
  1725  		}
  1726  		fset := tokeninternal.FileSetFor(pgf.Tok)
  1727  		// TODO(adonovan): modify Imports() to accept a single token.File (cgf.Tok).
  1728  		for _, group := range astutil.Imports(fset, pgf.File) {
  1729  			for _, imp := range group {
  1730  				if imp.Path == nil {
  1731  					continue
  1732  				}
  1733  				path := strings.Trim(imp.Path.Value, `"`)
  1734  				allImports[path] = append(allImports[path], fileImport{pgf, imp})
  1735  			}
  1736  		}
  1737  	}
  1738  
  1739  	// Apply a diagnostic to any import involved in the error, stopping once
  1740  	// we reach the workspace.
  1741  	var errors []*Diagnostic
  1742  	for _, depErr := range relevantErrors {
  1743  		for i := len(depErr.ImportStack) - 1; i >= 0; i-- {
  1744  			item := depErr.ImportStack[i]
  1745  			if snapshot.isWorkspacePackage(PackageID(item)) {
  1746  				break
  1747  			}
  1748  
  1749  			for _, imp := range allImports[item] {
  1750  				rng, err := imp.cgf.NodeRange(imp.imp)
  1751  				if err != nil {
  1752  					return nil, err
  1753  				}
  1754  				diag := &Diagnostic{
  1755  					URI:            imp.cgf.URI,
  1756  					Range:          rng,
  1757  					Severity:       protocol.SeverityError,
  1758  					Source:         TypeError,
  1759  					Message:        fmt.Sprintf("error while importing %v: %v", item, depErr.Err),
  1760  					SuggestedFixes: goGetQuickFixes(mp.Module != nil, imp.cgf.URI, item),
  1761  				}
  1762  				if !bundleQuickFixes(diag) {
  1763  					bug.Reportf("failed to bundle fixes for diagnostic %q", diag.Message)
  1764  				}
  1765  				errors = append(errors, diag)
  1766  			}
  1767  		}
  1768  	}
  1769  
  1770  	modFile, err := nearestModFile(ctx, mp.CompiledGoFiles[0], snapshot)
  1771  	if err != nil {
  1772  		return nil, err
  1773  	}
  1774  	pm, err := parseModURI(ctx, snapshot, modFile)
  1775  	if err != nil {
  1776  		return nil, err
  1777  	}
  1778  
  1779  	// Add a diagnostic to the module that contained the lowest-level import of
  1780  	// the missing package.
  1781  	for _, depErr := range relevantErrors {
  1782  		for i := len(depErr.ImportStack) - 1; i >= 0; i-- {
  1783  			item := depErr.ImportStack[i]
  1784  			mp := snapshot.Metadata(PackageID(item))
  1785  			if mp == nil || mp.Module == nil {
  1786  				continue
  1787  			}
  1788  			modVer := module.Version{Path: mp.Module.Path, Version: mp.Module.Version}
  1789  			reference := findModuleReference(pm.File, modVer)
  1790  			if reference == nil {
  1791  				continue
  1792  			}
  1793  			rng, err := pm.Mapper.OffsetRange(reference.Start.Byte, reference.End.Byte)
  1794  			if err != nil {
  1795  				return nil, err
  1796  			}
  1797  			diag := &Diagnostic{
  1798  				URI:            pm.URI,
  1799  				Range:          rng,
  1800  				Severity:       protocol.SeverityError,
  1801  				Source:         TypeError,
  1802  				Message:        fmt.Sprintf("error while importing %v: %v", item, depErr.Err),
  1803  				SuggestedFixes: goGetQuickFixes(true, pm.URI, item),
  1804  			}
  1805  			if !bundleQuickFixes(diag) {
  1806  				bug.Reportf("failed to bundle fixes for diagnostic %q", diag.Message)
  1807  			}
  1808  			errors = append(errors, diag)
  1809  			break
  1810  		}
  1811  	}
  1812  	return errors, nil
  1813  }
  1814  
  1815  // missingPkgError returns an error message for a missing package that varies
  1816  // based on the user's workspace mode.
  1817  func missingPkgError(from PackageID, pkgPath string, moduleMode bool) error {
  1818  	// TODO(rfindley): improve this error. Previous versions of this error had
  1819  	// access to the full snapshot, and could provide more information (such as
  1820  	// the initialization error).
  1821  	if moduleMode {
  1822  		if metadata.IsCommandLineArguments(from) {
  1823  			return fmt.Errorf("current file is not included in a workspace module")
  1824  		} else {
  1825  			// Previously, we would present the initialization error here.
  1826  			return fmt.Errorf("no required module provides package %q", pkgPath)
  1827  		}
  1828  	} else {
  1829  		// Previously, we would list the directories in GOROOT and GOPATH here.
  1830  		return fmt.Errorf("cannot find package %q in GOROOT or GOPATH", pkgPath)
  1831  	}
  1832  }
  1833  
  1834  // typeErrorsToDiagnostics translates a slice of types.Errors into a slice of
  1835  // Diagnostics.
  1836  //
  1837  // In addition to simply mapping data such as position information and error
  1838  // codes, this function interprets related go/types "continuation" errors as
  1839  // protocol.DiagnosticRelatedInformation. Continuation errors are go/types
  1840  // errors whose messages starts with "\t". By convention, these errors relate
  1841  // to the previous error in the errs slice (such as if they were printed in
  1842  // sequence to a terminal).
  1843  //
  1844  // The linkTarget, moduleMode, and supportsRelatedInformation parameters affect
  1845  // the construction of protocol objects (see the code for details).
  1846  func typeErrorsToDiagnostics(pkg *syntaxPackage, errs []types.Error, linkTarget string, moduleMode, supportsRelatedInformation bool) []*Diagnostic {
  1847  	var result []*Diagnostic
  1848  
  1849  	// batch records diagnostics for a set of related types.Errors.
  1850  	batch := func(related []types.Error) {
  1851  		var diags []*Diagnostic
  1852  		for i, e := range related {
  1853  			code, start, end, ok := typesinternal.ReadGo116ErrorData(e)
  1854  			if !ok || !start.IsValid() || !end.IsValid() {
  1855  				start, end = e.Pos, e.Pos
  1856  				code = 0
  1857  			}
  1858  			if !start.IsValid() {
  1859  				// Type checker errors may be missing position information if they
  1860  				// relate to synthetic syntax, such as if the file were fixed. In that
  1861  				// case, we should have a parse error anyway, so skipping the type
  1862  				// checker error is likely benign.
  1863  				//
  1864  				// TODO(golang/go#64335): we should eventually verify that all type
  1865  				// checked syntax has valid positions, and promote this skip to a bug
  1866  				// report.
  1867  				continue
  1868  			}
  1869  			posn := safetoken.StartPosition(e.Fset, start)
  1870  			if !posn.IsValid() {
  1871  				// All valid positions produced by the type checker should described by
  1872  				// its fileset.
  1873  				//
  1874  				// Note: in golang/go#64488, we observed an error that was positioned
  1875  				// over fixed syntax, which overflowed its file. So it's definitely
  1876  				// possible that we get here (it's hard to reason about fixing up the
  1877  				// AST). Nevertheless, it's a bug.
  1878  				bug.Reportf("internal error: type checker error %q outside its Fset", e)
  1879  				continue
  1880  			}
  1881  			pgf, err := pkg.File(protocol.URIFromPath(posn.Filename))
  1882  			if err != nil {
  1883  				// Sometimes type-checker errors refer to positions in other packages,
  1884  				// such as when a declaration duplicates a dot-imported name.
  1885  				//
  1886  				// In these cases, we don't want to report an error in the other
  1887  				// package (the message would be rather confusing), but we do want to
  1888  				// report an error in the current package (golang/go#59005).
  1889  				if i == 0 {
  1890  					bug.Reportf("internal error: could not locate file for primary type checker error %v: %v", e, err)
  1891  				}
  1892  				continue
  1893  			}
  1894  			if !end.IsValid() || end == start {
  1895  				// Expand the end position to a more meaningful span.
  1896  				end = analysisinternal.TypeErrorEndPos(e.Fset, pgf.Src, start)
  1897  			}
  1898  			rng, err := pgf.Mapper.PosRange(pgf.Tok, start, end)
  1899  			if err != nil {
  1900  				bug.Reportf("internal error: could not compute pos to range for %v: %v", e, err)
  1901  				continue
  1902  			}
  1903  			msg := related[0].Msg
  1904  			if i > 0 {
  1905  				if supportsRelatedInformation {
  1906  					msg += " (see details)"
  1907  				} else {
  1908  					msg += fmt.Sprintf(" (this error: %v)", e.Msg)
  1909  				}
  1910  			}
  1911  			diag := &Diagnostic{
  1912  				URI:      pgf.URI,
  1913  				Range:    rng,
  1914  				Severity: protocol.SeverityError,
  1915  				Source:   TypeError,
  1916  				Message:  msg,
  1917  			}
  1918  			if code != 0 {
  1919  				diag.Code = code.String()
  1920  				diag.CodeHref = typesCodeHref(linkTarget, code)
  1921  			}
  1922  			if code == typesinternal.UnusedVar || code == typesinternal.UnusedImport {
  1923  				diag.Tags = append(diag.Tags, protocol.Unnecessary)
  1924  			}
  1925  			if match := importErrorRe.FindStringSubmatch(e.Msg); match != nil {
  1926  				diag.SuggestedFixes = append(diag.SuggestedFixes, goGetQuickFixes(moduleMode, pgf.URI, match[1])...)
  1927  			}
  1928  			if match := unsupportedFeatureRe.FindStringSubmatch(e.Msg); match != nil {
  1929  				diag.SuggestedFixes = append(diag.SuggestedFixes, editGoDirectiveQuickFix(moduleMode, pgf.URI, match[1])...)
  1930  			}
  1931  
  1932  			// Link up related information. For the primary error, all related errors
  1933  			// are treated as related information. For secondary errors, only the
  1934  			// primary is related.
  1935  			//
  1936  			// This is because go/types assumes that errors are read top-down, such as
  1937  			// in the cycle error "A refers to...". The structure of the secondary
  1938  			// error set likely only makes sense for the primary error.
  1939  			//
  1940  			// NOTE: len(diags) == 0 if the primary diagnostic has invalid positions.
  1941  			// See also golang/go#66731.
  1942  			if i > 0 && len(diags) > 0 {
  1943  				primary := diags[0]
  1944  				primary.Related = append(primary.Related, protocol.DiagnosticRelatedInformation{
  1945  					Location: protocol.Location{URI: diag.URI, Range: diag.Range},
  1946  					Message:  related[i].Msg, // use the unmodified secondary error for related errors.
  1947  				})
  1948  				diag.Related = []protocol.DiagnosticRelatedInformation{{
  1949  					Location: protocol.Location{URI: primary.URI, Range: primary.Range},
  1950  				}}
  1951  			}
  1952  			diags = append(diags, diag)
  1953  		}
  1954  		result = append(result, diags...)
  1955  	}
  1956  
  1957  	// Process batches of related errors.
  1958  	for len(errs) > 0 {
  1959  		related := []types.Error{errs[0]}
  1960  		for i := 1; i < len(errs); i++ {
  1961  			spl := errs[i]
  1962  			if len(spl.Msg) == 0 || spl.Msg[0] != '\t' {
  1963  				break
  1964  			}
  1965  			spl.Msg = spl.Msg[len("\t"):]
  1966  			related = append(related, spl)
  1967  		}
  1968  		batch(related)
  1969  		errs = errs[len(related):]
  1970  	}
  1971  
  1972  	return result
  1973  }
  1974  
  1975  // An importFunc is an implementation of the single-method
  1976  // types.Importer interface based on a function value.
  1977  type importerFunc func(path string) (*types.Package, error)
  1978  
  1979  func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) }