cuelang.org/go@v0.10.1/internal/golangorgx/gopls/cache/check.go (about)

     1  // Copyright 2019 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package cache
     6  
     7  import (
     8  	"context"
     9  	"crypto/sha256"
    10  	"fmt"
    11  	"go/ast"
    12  	"go/parser"
    13  	"go/token"
    14  	"go/types"
    15  	"regexp"
    16  	"runtime"
    17  	"sort"
    18  	"strings"
    19  	"sync"
    20  	"sync/atomic"
    21  
    22  	"cuelang.org/go/internal/golangorgx/gopls/cache/metadata"
    23  	"cuelang.org/go/internal/golangorgx/gopls/cache/typerefs"
    24  	"cuelang.org/go/internal/golangorgx/gopls/file"
    25  	"cuelang.org/go/internal/golangorgx/gopls/filecache"
    26  	"cuelang.org/go/internal/golangorgx/gopls/protocol"
    27  	"cuelang.org/go/internal/golangorgx/gopls/util/bug"
    28  	"cuelang.org/go/internal/golangorgx/gopls/util/safetoken"
    29  	"cuelang.org/go/internal/golangorgx/tools/analysisinternal"
    30  	"cuelang.org/go/internal/golangorgx/tools/event"
    31  	"cuelang.org/go/internal/golangorgx/tools/event/tag"
    32  	"cuelang.org/go/internal/golangorgx/tools/gcimporter"
    33  	"cuelang.org/go/internal/golangorgx/tools/packagesinternal"
    34  	"cuelang.org/go/internal/golangorgx/tools/tokeninternal"
    35  	"cuelang.org/go/internal/golangorgx/tools/typesinternal"
    36  	"cuelang.org/go/internal/golangorgx/tools/versions"
    37  	"golang.org/x/mod/module"
    38  	"golang.org/x/sync/errgroup"
    39  	"golang.org/x/tools/go/ast/astutil"
    40  )
    41  
    42  // Various optimizations that should not affect correctness.
    43  const (
    44  	preserveImportGraph = true // hold on to the import graph for open packages
    45  )
    46  
    47  type unit = struct{}
    48  
    49  // A typeCheckBatch holds data for a logical type-checking operation, which may
    50  // type-check many unrelated packages.
    51  //
    52  // It shares state such as parsed files and imports, to optimize type-checking
    53  // for packages with overlapping dependency graphs.
    54  type typeCheckBatch struct {
    55  	activePackageCache interface {
    56  		getActivePackage(id PackageID) *Package
    57  		setActivePackage(id PackageID, pkg *Package)
    58  	}
    59  	syntaxIndex map[PackageID]int // requested ID -> index in ids
    60  	pre         preTypeCheck
    61  	post        postTypeCheck
    62  	handles     map[PackageID]*packageHandle
    63  	parseCache  *parseCache
    64  	fset        *token.FileSet // describes all parsed or imported files
    65  	cpulimit    chan unit      // concurrency limiter for CPU-bound operations
    66  
    67  	mu             sync.Mutex
    68  	syntaxPackages map[PackageID]*futurePackage // results of processing a requested package; may hold (nil, nil)
    69  	importPackages map[PackageID]*futurePackage // package results to use for importing
    70  }
    71  
    72  // A futurePackage is a future result of type checking or importing a package,
    73  // to be cached in a map.
    74  //
    75  // The goroutine that creates the futurePackage is responsible for evaluating
    76  // its value, and closing the done channel.
    77  type futurePackage struct {
    78  	done chan unit
    79  	v    pkgOrErr
    80  }
    81  
    82  type pkgOrErr struct {
    83  	pkg *types.Package
    84  	err error
    85  }
    86  
    87  // TypeCheck parses and type-checks the specified packages,
    88  // and returns them in the same order as the ids.
    89  // The resulting packages' types may belong to different importers,
    90  // so types from different packages are incommensurable.
    91  //
    92  // The resulting packages slice always contains len(ids) entries, though some
    93  // of them may be nil if (and only if) the resulting error is non-nil.
    94  //
    95  // An error is returned if any of the requested packages fail to type-check.
    96  // This is different from having type-checking errors: a failure to type-check
    97  // indicates context cancellation or otherwise significant failure to perform
    98  // the type-checking operation.
    99  //
   100  // In general, clients should never need to type-checked syntax for an
   101  // intermediate test variant (ITV) package. Callers should apply
   102  // RemoveIntermediateTestVariants (or equivalent) before this method, or any
   103  // of the potentially type-checking methods below.
   104  func (s *Snapshot) TypeCheck(ctx context.Context, ids ...PackageID) ([]*Package, error) {
   105  	pkgs := make([]*Package, len(ids))
   106  
   107  	var (
   108  		needIDs []PackageID // ids to type-check
   109  		indexes []int       // original index of requested ids
   110  	)
   111  
   112  	// Check for existing active packages, as any package will do.
   113  	//
   114  	// This is also done inside forEachPackage, but doing it here avoids
   115  	// unnecessary set up for type checking (e.g. assembling the package handle
   116  	// graph).
   117  	for i, id := range ids {
   118  		if pkg := s.getActivePackage(id); pkg != nil {
   119  			pkgs[i] = pkg
   120  		} else {
   121  			needIDs = append(needIDs, id)
   122  			indexes = append(indexes, i)
   123  		}
   124  	}
   125  
   126  	post := func(i int, pkg *Package) {
   127  		pkgs[indexes[i]] = pkg
   128  	}
   129  	return pkgs, s.forEachPackage(ctx, needIDs, nil, post)
   130  }
   131  
   132  // getImportGraph returns a shared import graph use for this snapshot, or nil.
   133  //
   134  // This is purely an optimization: holding on to more imports allows trading
   135  // memory for CPU and latency. Currently, getImportGraph returns an import
   136  // graph containing all packages imported by open packages, since these are
   137  // highly likely to be needed when packages change.
   138  //
   139  // Furthermore, since we memoize active packages, including their imports in
   140  // the shared import graph means we don't run the risk of pinning duplicate
   141  // copies of common imports, if active packages are computed in separate type
   142  // checking batches.
   143  func (s *Snapshot) getImportGraph(ctx context.Context) *importGraph {
   144  	if !preserveImportGraph {
   145  		return nil
   146  	}
   147  	s.mu.Lock()
   148  
   149  	// Evaluate the shared import graph for the snapshot. There are three major
   150  	// codepaths here:
   151  	//
   152  	//  1. importGraphDone == nil, importGraph == nil: it is this goroutine's
   153  	//     responsibility to type-check the shared import graph.
   154  	//  2. importGraphDone == nil, importGraph != nil: it is this goroutine's
   155  	//     responsibility to resolve the import graph, which may result in
   156  	//     type-checking only if the existing importGraph (carried over from the
   157  	//     preceding snapshot) is invalid.
   158  	//  3. importGraphDone != nil: some other goroutine is doing (1) or (2), wait
   159  	//     for the work to be done.
   160  	done := s.importGraphDone
   161  	if done == nil {
   162  		done = make(chan unit)
   163  		s.importGraphDone = done
   164  		release := s.Acquire() // must acquire to use the snapshot asynchronously
   165  		go func() {
   166  			defer release()
   167  			importGraph, err := s.resolveImportGraph() // may be nil
   168  			if err != nil {
   169  				if ctx.Err() == nil {
   170  					event.Error(ctx, "computing the shared import graph", err)
   171  				}
   172  				importGraph = nil
   173  			}
   174  			s.mu.Lock()
   175  			s.importGraph = importGraph
   176  			s.mu.Unlock()
   177  			close(done)
   178  		}()
   179  	}
   180  	s.mu.Unlock()
   181  
   182  	select {
   183  	case <-done:
   184  		return s.importGraph
   185  	case <-ctx.Done():
   186  		return nil
   187  	}
   188  }
   189  
   190  // resolveImportGraph evaluates the shared import graph to use for
   191  // type-checking in this snapshot. This may involve re-using the import graph
   192  // of the previous snapshot (stored in s.importGraph), or computing a fresh
   193  // import graph.
   194  //
   195  // resolveImportGraph should only be called from getImportGraph.
   196  func (s *Snapshot) resolveImportGraph() (*importGraph, error) {
   197  	ctx := s.backgroundCtx
   198  	ctx, done := event.Start(event.Detach(ctx), "cache.resolveImportGraph")
   199  	defer done()
   200  
   201  	s.mu.Lock()
   202  	lastImportGraph := s.importGraph
   203  	s.mu.Unlock()
   204  
   205  	openPackages := make(map[PackageID]bool)
   206  	for _, fh := range s.Overlays() {
   207  		mps, err := s.MetadataForFile(ctx, fh.URI())
   208  		if err != nil {
   209  			return nil, err
   210  		}
   211  		metadata.RemoveIntermediateTestVariants(&mps)
   212  		for _, mp := range mps {
   213  			openPackages[mp.ID] = true
   214  		}
   215  	}
   216  
   217  	var openPackageIDs []PackageID
   218  	for id := range openPackages {
   219  		openPackageIDs = append(openPackageIDs, id)
   220  	}
   221  
   222  	handles, err := s.getPackageHandles(ctx, openPackageIDs)
   223  	if err != nil {
   224  		return nil, err
   225  	}
   226  
   227  	// Subtlety: we erase the upward cone of open packages from the shared import
   228  	// graph, to increase reusability.
   229  	//
   230  	// This is easiest to understand via an example: suppose A imports B, and B
   231  	// imports C. Now suppose A and B are open. If we preserve the entire set of
   232  	// shared deps by open packages, deps will be {B, C}. But this means that any
   233  	// change to the open package B will invalidate the shared import graph,
   234  	// meaning we will experience no benefit from sharing when B is edited.
   235  	// Consider that this will be a common scenario, when A is foo_test and B is
   236  	// foo. Better to just preserve the shared import C.
   237  	//
   238  	// With precise pruning, we may want to truncate this search based on
   239  	// reachability.
   240  	//
   241  	// TODO(rfindley): this logic could use a unit test.
   242  	volatileDeps := make(map[PackageID]bool)
   243  	var isVolatile func(*packageHandle) bool
   244  	isVolatile = func(ph *packageHandle) (volatile bool) {
   245  		if v, ok := volatileDeps[ph.mp.ID]; ok {
   246  			return v
   247  		}
   248  		defer func() {
   249  			volatileDeps[ph.mp.ID] = volatile
   250  		}()
   251  		if openPackages[ph.mp.ID] {
   252  			return true
   253  		}
   254  		for _, dep := range ph.mp.DepsByPkgPath {
   255  			if isVolatile(handles[dep]) {
   256  				return true
   257  			}
   258  		}
   259  		return false
   260  	}
   261  	for _, dep := range handles {
   262  		isVolatile(dep)
   263  	}
   264  	for id, volatile := range volatileDeps {
   265  		if volatile {
   266  			delete(handles, id)
   267  		}
   268  	}
   269  
   270  	// We reuse the last import graph if and only if none of the dependencies
   271  	// have changed. Doing better would involve analyzing dependencies to find
   272  	// subgraphs that are still valid. Not worth it, especially when in the
   273  	// common case nothing has changed.
   274  	unchanged := lastImportGraph != nil && len(handles) == len(lastImportGraph.depKeys)
   275  	var ids []PackageID
   276  	depKeys := make(map[PackageID]file.Hash)
   277  	for id, ph := range handles {
   278  		ids = append(ids, id)
   279  		depKeys[id] = ph.key
   280  		if unchanged {
   281  			prevKey, ok := lastImportGraph.depKeys[id]
   282  			unchanged = ok && prevKey == ph.key
   283  		}
   284  	}
   285  
   286  	if unchanged {
   287  		return lastImportGraph, nil
   288  	}
   289  
   290  	b, err := s.forEachPackageInternal(ctx, nil, ids, nil, nil, nil, handles)
   291  	if err != nil {
   292  		return nil, err
   293  	}
   294  
   295  	next := &importGraph{
   296  		fset:    b.fset,
   297  		depKeys: depKeys,
   298  		imports: make(map[PackageID]pkgOrErr),
   299  	}
   300  	for id, fut := range b.importPackages {
   301  		if fut.v.pkg == nil && fut.v.err == nil {
   302  			panic(fmt.Sprintf("internal error: import node %s is not evaluated", id))
   303  		}
   304  		next.imports[id] = fut.v
   305  	}
   306  	return next, nil
   307  }
   308  
   309  // An importGraph holds selected results of a type-checking pass, to be re-used
   310  // by subsequent snapshots.
   311  type importGraph struct {
   312  	fset    *token.FileSet          // fileset used for type checking imports
   313  	depKeys map[PackageID]file.Hash // hash of direct dependencies for this graph
   314  	imports map[PackageID]pkgOrErr  // results of type checking
   315  }
   316  
   317  // Package visiting functions used by forEachPackage; see the documentation of
   318  // forEachPackage for details.
   319  type (
   320  	preTypeCheck  = func(int, *packageHandle) bool // false => don't type check
   321  	postTypeCheck = func(int, *Package)
   322  )
   323  
   324  // forEachPackage does a pre- and post- order traversal of the packages
   325  // specified by ids using the provided pre and post functions.
   326  //
   327  // The pre func is optional. If set, pre is evaluated after the package
   328  // handle has been constructed, but before type-checking. If pre returns false,
   329  // type-checking is skipped for this package handle.
   330  //
   331  // post is called with a syntax package after type-checking completes
   332  // successfully. It is only called if pre returned true.
   333  //
   334  // Both pre and post may be called concurrently.
   335  func (s *Snapshot) forEachPackage(ctx context.Context, ids []PackageID, pre preTypeCheck, post postTypeCheck) error {
   336  	ctx, done := event.Start(ctx, "cache.forEachPackage", tag.PackageCount.Of(len(ids)))
   337  	defer done()
   338  
   339  	if len(ids) == 0 {
   340  		return nil // short cut: many call sites do not handle empty ids
   341  	}
   342  
   343  	handles, err := s.getPackageHandles(ctx, ids)
   344  	if err != nil {
   345  		return err
   346  	}
   347  
   348  	impGraph := s.getImportGraph(ctx)
   349  	_, err = s.forEachPackageInternal(ctx, impGraph, nil, ids, pre, post, handles)
   350  	return err
   351  }
   352  
   353  // forEachPackageInternal is used by both forEachPackage and loadImportGraph to
   354  // type-check a graph of packages.
   355  //
   356  // If a non-nil importGraph is provided, imports in this graph will be reused.
   357  func (s *Snapshot) forEachPackageInternal(ctx context.Context, importGraph *importGraph, importIDs, syntaxIDs []PackageID, pre preTypeCheck, post postTypeCheck, handles map[PackageID]*packageHandle) (*typeCheckBatch, error) {
   358  	b := &typeCheckBatch{
   359  		activePackageCache: s,
   360  		pre:                pre,
   361  		post:               post,
   362  		handles:            handles,
   363  		parseCache:         s.view.parseCache,
   364  		fset:               fileSetWithBase(reservedForParsing),
   365  		syntaxIndex:        make(map[PackageID]int),
   366  		cpulimit:           make(chan unit, runtime.GOMAXPROCS(0)),
   367  		syntaxPackages:     make(map[PackageID]*futurePackage),
   368  		importPackages:     make(map[PackageID]*futurePackage),
   369  	}
   370  
   371  	if importGraph != nil {
   372  		// Clone the file set every time, to ensure we do not leak files.
   373  		b.fset = tokeninternal.CloneFileSet(importGraph.fset)
   374  		// Pre-populate future cache with 'done' futures.
   375  		done := make(chan unit)
   376  		close(done)
   377  		for id, res := range importGraph.imports {
   378  			b.importPackages[id] = &futurePackage{done, res}
   379  		}
   380  	} else {
   381  		b.fset = fileSetWithBase(reservedForParsing)
   382  	}
   383  
   384  	for i, id := range syntaxIDs {
   385  		b.syntaxIndex[id] = i
   386  	}
   387  
   388  	// Start a single goroutine for each requested package.
   389  	//
   390  	// Other packages are reached recursively, and will not be evaluated if they
   391  	// are not needed.
   392  	var g errgroup.Group
   393  	for _, id := range importIDs {
   394  		id := id
   395  		g.Go(func() error {
   396  			_, err := b.getImportPackage(ctx, id)
   397  			return err
   398  		})
   399  	}
   400  	for i, id := range syntaxIDs {
   401  		i := i
   402  		id := id
   403  		g.Go(func() error {
   404  			_, err := b.handleSyntaxPackage(ctx, i, id)
   405  			return err
   406  		})
   407  	}
   408  	return b, g.Wait()
   409  }
   410  
   411  // TODO(rfindley): re-order the declarations below to read better from top-to-bottom.
   412  
   413  // getImportPackage returns the *types.Package to use for importing the
   414  // package referenced by id.
   415  //
   416  // This may be the package produced by type-checking syntax (as in the case
   417  // where id is in the set of requested IDs), a package loaded from export data,
   418  // or a package type-checked for import only.
   419  func (b *typeCheckBatch) getImportPackage(ctx context.Context, id PackageID) (pkg *types.Package, err error) {
   420  	b.mu.Lock()
   421  	f, ok := b.importPackages[id]
   422  	if ok {
   423  		b.mu.Unlock()
   424  
   425  		select {
   426  		case <-ctx.Done():
   427  			return nil, ctx.Err()
   428  		case <-f.done:
   429  			return f.v.pkg, f.v.err
   430  		}
   431  	}
   432  
   433  	f = &futurePackage{done: make(chan unit)}
   434  	b.importPackages[id] = f
   435  	b.mu.Unlock()
   436  
   437  	defer func() {
   438  		f.v = pkgOrErr{pkg, err}
   439  		close(f.done)
   440  	}()
   441  
   442  	if index, ok := b.syntaxIndex[id]; ok {
   443  		pkg, err := b.handleSyntaxPackage(ctx, index, id)
   444  		if err != nil {
   445  			return nil, err
   446  		}
   447  		if pkg != nil {
   448  			return pkg, nil
   449  		}
   450  		// type-checking was short-circuited by the pre- func.
   451  	}
   452  
   453  	// unsafe cannot be imported or type-checked.
   454  	if id == "unsafe" {
   455  		return types.Unsafe, nil
   456  	}
   457  
   458  	ph := b.handles[id]
   459  
   460  	// Do a second check for "unsafe" defensively, due to golang/go#60890.
   461  	if ph.mp.PkgPath == "unsafe" {
   462  		bug.Reportf("encountered \"unsafe\" as %s (golang/go#60890)", id)
   463  		return types.Unsafe, nil
   464  	}
   465  
   466  	data, err := filecache.Get(exportDataKind, ph.key)
   467  	if err == filecache.ErrNotFound {
   468  		// No cached export data: type-check as fast as possible.
   469  		return b.checkPackageForImport(ctx, ph)
   470  	}
   471  	if err != nil {
   472  		return nil, fmt.Errorf("failed to read cache data for %s: %v", ph.mp.ID, err)
   473  	}
   474  	return b.importPackage(ctx, ph.mp, data)
   475  }
   476  
   477  // handleSyntaxPackage handles one package from the ids slice.
   478  //
   479  // If type checking occurred while handling the package, it returns the
   480  // resulting types.Package so that it may be used for importing.
   481  //
   482  // handleSyntaxPackage returns (nil, nil) if pre returned false.
   483  func (b *typeCheckBatch) handleSyntaxPackage(ctx context.Context, i int, id PackageID) (pkg *types.Package, err error) {
   484  	b.mu.Lock()
   485  	f, ok := b.syntaxPackages[id]
   486  	if ok {
   487  		b.mu.Unlock()
   488  		<-f.done
   489  		return f.v.pkg, f.v.err
   490  	}
   491  
   492  	f = &futurePackage{done: make(chan unit)}
   493  	b.syntaxPackages[id] = f
   494  	b.mu.Unlock()
   495  	defer func() {
   496  		f.v = pkgOrErr{pkg, err}
   497  		close(f.done)
   498  	}()
   499  
   500  	ph := b.handles[id]
   501  	if b.pre != nil && !b.pre(i, ph) {
   502  		return nil, nil // skip: export data only
   503  	}
   504  
   505  	// Check for existing active packages.
   506  	//
   507  	// Since gopls can't depend on package identity, any instance of the
   508  	// requested package must be ok to return.
   509  	//
   510  	// This is an optimization to avoid redundant type-checking: following
   511  	// changes to an open package many LSP clients send several successive
   512  	// requests for package information for the modified package (semantic
   513  	// tokens, code lens, inlay hints, etc.)
   514  	if pkg := b.activePackageCache.getActivePackage(id); pkg != nil {
   515  		b.post(i, pkg)
   516  		return nil, nil // skip: not checked in this batch
   517  	}
   518  
   519  	// Wait for predecessors.
   520  	{
   521  		var g errgroup.Group
   522  		for _, depID := range ph.mp.DepsByPkgPath {
   523  			depID := depID
   524  			g.Go(func() error {
   525  				_, err := b.getImportPackage(ctx, depID)
   526  				return err
   527  			})
   528  		}
   529  		if err := g.Wait(); err != nil {
   530  			// Failure to import a package should not abort the whole operation.
   531  			// Stop only if the context was cancelled, a likely cause.
   532  			// Import errors will be reported as type diagnostics.
   533  			if ctx.Err() != nil {
   534  				return nil, ctx.Err()
   535  			}
   536  		}
   537  	}
   538  
   539  	// Wait to acquire a CPU token.
   540  	//
   541  	// Note: it is important to acquire this token only after awaiting
   542  	// predecessors, to avoid starvation.
   543  	select {
   544  	case <-ctx.Done():
   545  		return nil, ctx.Err()
   546  	case b.cpulimit <- unit{}:
   547  		defer func() {
   548  			<-b.cpulimit // release CPU token
   549  		}()
   550  	}
   551  
   552  	// Compute the syntax package.
   553  	p, err := b.checkPackage(ctx, ph)
   554  	if err != nil {
   555  		return nil, err
   556  	}
   557  
   558  	// Update caches.
   559  	b.activePackageCache.setActivePackage(id, p) // store active packages in memory
   560  	go storePackageResults(ctx, ph, p)           // ...and write all packages to disk
   561  
   562  	b.post(i, p)
   563  
   564  	return p.pkg.types, nil
   565  }
   566  
   567  // storePackageResults serializes and writes information derived from p to the
   568  // file cache.
   569  // The context is used only for logging; cancellation does not affect the operation.
   570  func storePackageResults(ctx context.Context, ph *packageHandle, p *Package) {
   571  	toCache := map[string][]byte{
   572  		xrefsKind:       p.pkg.xrefs(),
   573  		methodSetsKind:  p.pkg.methodsets().Encode(),
   574  		diagnosticsKind: encodeDiagnostics(p.pkg.diagnostics),
   575  	}
   576  
   577  	if p.metadata.PkgPath != "unsafe" { // unsafe cannot be exported
   578  		exportData, err := gcimporter.IExportShallow(p.pkg.fset, p.pkg.types, bug.Reportf)
   579  		if err != nil {
   580  			bug.Reportf("exporting package %v: %v", p.metadata.ID, err)
   581  		} else {
   582  			toCache[exportDataKind] = exportData
   583  		}
   584  	} else if p.metadata.ID != "unsafe" {
   585  		// golang/go#60890: we should only ever see one variant of the "unsafe"
   586  		// package.
   587  		bug.Reportf("encountered \"unsafe\" as %s (golang/go#60890)", p.metadata.ID)
   588  	}
   589  
   590  	for kind, data := range toCache {
   591  		if err := filecache.Set(kind, ph.key, data); err != nil {
   592  			event.Error(ctx, fmt.Sprintf("storing %s data for %s", kind, ph.mp.ID), err)
   593  		}
   594  	}
   595  }
   596  
   597  // importPackage loads the given package from its export data in p.exportData
   598  // (which must already be populated).
   599  func (b *typeCheckBatch) importPackage(ctx context.Context, mp *metadata.Package, data []byte) (*types.Package, error) {
   600  	ctx, done := event.Start(ctx, "cache.typeCheckBatch.importPackage", tag.Package.Of(string(mp.ID)))
   601  	defer done()
   602  
   603  	impMap := b.importMap(mp.ID)
   604  
   605  	thisPackage := types.NewPackage(string(mp.PkgPath), string(mp.Name))
   606  	getPackages := func(items []gcimporter.GetPackagesItem) error {
   607  		for i, item := range items {
   608  			var id PackageID
   609  			var pkg *types.Package
   610  			if item.Path == string(mp.PkgPath) {
   611  				id = mp.ID
   612  				pkg = thisPackage
   613  
   614  				// debugging issues #60904, #64235
   615  				if pkg.Name() != item.Name {
   616  					// This would mean that mp.Name != item.Name, so the
   617  					// manifest in the export data of mp.PkgPath is
   618  					// inconsistent with mp.Name. Or perhaps there
   619  					// are duplicate PkgPath items in the manifest?
   620  					return bug.Errorf("internal error: package name is %q, want %q (id=%q, path=%q) (see issue #60904)",
   621  						pkg.Name(), item.Name, id, item.Path)
   622  				}
   623  			} else {
   624  				id = impMap[item.Path]
   625  				var err error
   626  				pkg, err = b.getImportPackage(ctx, id)
   627  				if err != nil {
   628  					return err
   629  				}
   630  
   631  				// We intentionally duplicate the bug.Errorf calls because
   632  				// telemetry tells us only the program counter, not the message.
   633  
   634  				// debugging issues #60904, #64235
   635  				if pkg.Name() != item.Name {
   636  					// This means that, while reading the manifest of the
   637  					// export data of mp.PkgPath, one of its indirect
   638  					// dependencies had a name that differs from the
   639  					// Metadata.Name
   640  					return bug.Errorf("internal error: package name is %q, want %q (id=%q, path=%q) (see issue #60904)",
   641  						pkg.Name(), item.Name, id, item.Path)
   642  				}
   643  			}
   644  			items[i].Pkg = pkg
   645  
   646  		}
   647  		return nil
   648  	}
   649  
   650  	// Importing is potentially expensive, and might not encounter cancellations
   651  	// via dependencies (e.g. if they have already been evaluated).
   652  	if ctx.Err() != nil {
   653  		return nil, ctx.Err()
   654  	}
   655  
   656  	imported, err := gcimporter.IImportShallow(b.fset, getPackages, data, string(mp.PkgPath), bug.Reportf)
   657  	if err != nil {
   658  		return nil, fmt.Errorf("import failed for %q: %v", mp.ID, err)
   659  	}
   660  	return imported, nil
   661  }
   662  
   663  // checkPackageForImport type checks, but skips function bodies and does not
   664  // record syntax information.
   665  func (b *typeCheckBatch) checkPackageForImport(ctx context.Context, ph *packageHandle) (*types.Package, error) {
   666  	ctx, done := event.Start(ctx, "cache.typeCheckBatch.checkPackageForImport", tag.Package.Of(string(ph.mp.ID)))
   667  	defer done()
   668  
   669  	onError := func(e error) {
   670  		// Ignore errors for exporting.
   671  	}
   672  	cfg := b.typesConfig(ctx, ph.localInputs, onError)
   673  	cfg.IgnoreFuncBodies = true
   674  
   675  	// Parse the compiled go files, bypassing the parse cache as packages checked
   676  	// for import are unlikely to get cache hits. Additionally, we can optimize
   677  	// parsing slightly by not passing parser.ParseComments.
   678  	pgfs := make([]*ParsedGoFile, len(ph.localInputs.compiledGoFiles))
   679  	{
   680  		var group errgroup.Group
   681  		// Set an arbitrary concurrency limit; we want some parallelism but don't
   682  		// need GOMAXPROCS, as there is already a lot of concurrency among calls to
   683  		// checkPackageForImport.
   684  		//
   685  		// TODO(rfindley): is there a better way to limit parallelism here? We could
   686  		// have a global limit on the type-check batch, but would have to be very
   687  		// careful to avoid starvation.
   688  		group.SetLimit(4)
   689  		for i, fh := range ph.localInputs.compiledGoFiles {
   690  			i, fh := i, fh
   691  			group.Go(func() error {
   692  				pgf, err := parseGoImpl(ctx, b.fset, fh, parser.SkipObjectResolution, false)
   693  				pgfs[i] = pgf
   694  				return err
   695  			})
   696  		}
   697  		if err := group.Wait(); err != nil {
   698  			return nil, err // cancelled, or catastrophic error (e.g. missing file)
   699  		}
   700  	}
   701  	pkg := types.NewPackage(string(ph.localInputs.pkgPath), string(ph.localInputs.name))
   702  	check := types.NewChecker(cfg, b.fset, pkg, nil)
   703  
   704  	files := make([]*ast.File, len(pgfs))
   705  	for i, pgf := range pgfs {
   706  		files[i] = pgf.File
   707  	}
   708  
   709  	// Type checking is expensive, and we may not have encountered cancellations
   710  	// via parsing (e.g. if we got nothing but cache hits for parsed files).
   711  	if ctx.Err() != nil {
   712  		return nil, ctx.Err()
   713  	}
   714  
   715  	_ = check.Files(files) // ignore errors
   716  
   717  	// If the context was cancelled, we may have returned a ton of transient
   718  	// errors to the type checker. Swallow them.
   719  	if ctx.Err() != nil {
   720  		return nil, ctx.Err()
   721  	}
   722  
   723  	// Asynchronously record export data.
   724  	go func() {
   725  		exportData, err := gcimporter.IExportShallow(b.fset, pkg, bug.Reportf)
   726  		if err != nil {
   727  			bug.Reportf("exporting package %v: %v", ph.mp.ID, err)
   728  			return
   729  		}
   730  		if err := filecache.Set(exportDataKind, ph.key, exportData); err != nil {
   731  			event.Error(ctx, fmt.Sprintf("storing export data for %s", ph.mp.ID), err)
   732  		}
   733  	}()
   734  	return pkg, nil
   735  }
   736  
   737  // importMap returns the map of package path -> package ID relative to the
   738  // specified ID.
   739  func (b *typeCheckBatch) importMap(id PackageID) map[string]PackageID {
   740  	impMap := make(map[string]PackageID)
   741  	var populateDeps func(*metadata.Package)
   742  	populateDeps = func(parent *metadata.Package) {
   743  		for _, id := range parent.DepsByPkgPath {
   744  			mp := b.handles[id].mp
   745  			if prevID, ok := impMap[string(mp.PkgPath)]; ok {
   746  				// debugging #63822
   747  				if prevID != mp.ID {
   748  					bug.Reportf("inconsistent view of dependencies")
   749  				}
   750  				continue
   751  			}
   752  			impMap[string(mp.PkgPath)] = mp.ID
   753  			populateDeps(mp)
   754  		}
   755  	}
   756  	mp := b.handles[id].mp
   757  	populateDeps(mp)
   758  	return impMap
   759  }
   760  
   761  // A packageHandle holds inputs required to compute a Package, including
   762  // metadata, derived diagnostics, files, and settings. Additionally,
   763  // packageHandles manage a key for these inputs, to use in looking up
   764  // precomputed results.
   765  //
   766  // packageHandles may be invalid following an invalidation via snapshot.clone,
   767  // but the handles returned by getPackageHandles will always be valid.
   768  //
   769  // packageHandles are critical for implementing "precise pruning" in gopls:
   770  // packageHandle.key is a hash of a precise set of inputs, such as package
   771  // files and "reachable" syntax, that may affect type checking.
   772  //
   773  // packageHandles also keep track of state that allows gopls to compute, and
   774  // then quickly recompute, these keys. This state is split into two categories:
   775  //   - local state, which depends only on the package's local files and metadata
   776  //   - other state, which includes data derived from dependencies.
   777  //
   778  // Dividing the data in this way allows gopls to minimize invalidation when a
   779  // package is modified. For example, any change to a package file fully
   780  // invalidates the package handle. On the other hand, if that change was not
   781  // metadata-affecting it may be the case that packages indirectly depending on
   782  // the modified package are unaffected by the change. For that reason, we have
   783  // two types of invalidation, corresponding to the two types of data above:
   784  //   - deletion of the handle, which occurs when the package itself changes
   785  //   - clearing of the validated field, which marks the package as possibly
   786  //     invalid.
   787  //
   788  // With the second type of invalidation, packageHandles are re-evaluated from the
   789  // bottom up. If this process encounters a packageHandle whose deps have not
   790  // changed (as detected by the depkeys field), then the packageHandle in
   791  // question must also not have changed, and we need not re-evaluate its key.
   792  type packageHandle struct {
   793  	mp *metadata.Package
   794  
   795  	// loadDiagnostics memoizes the result of processing error messages from
   796  	// go/packages (i.e. `go list`).
   797  	//
   798  	// These are derived from metadata using a snapshot. Since they depend on
   799  	// file contents (for translating positions), they should theoretically be
   800  	// invalidated by file changes, but historically haven't been. In practice
   801  	// they are rare and indicate a fundamental error that needs to be corrected
   802  	// before development can continue, so it may not be worth significant
   803  	// engineering effort to implement accurate invalidation here.
   804  	//
   805  	// TODO(rfindley): loadDiagnostics are out of place here, as they don't
   806  	// directly relate to type checking. We should perhaps move the caching of
   807  	// load diagnostics to an entirely separate component, so that Packages need
   808  	// only be concerned with parsing and type checking.
   809  	// (Nevertheless, since the lifetime of load diagnostics matches that of the
   810  	// Metadata, it is convenient to memoize them here.)
   811  	loadDiagnostics []*Diagnostic
   812  
   813  	// Local data:
   814  
   815  	// localInputs holds all local type-checking localInputs, excluding
   816  	// dependencies.
   817  	localInputs typeCheckInputs
   818  	// localKey is a hash of localInputs.
   819  	localKey file.Hash
   820  	// refs is the result of syntactic dependency analysis produced by the
   821  	// typerefs package.
   822  	refs map[string][]typerefs.Symbol
   823  
   824  	// Data derived from dependencies:
   825  
   826  	// validated indicates whether the current packageHandle is known to have a
   827  	// valid key. Invalidated package handles are stored for packages whose
   828  	// type information may have changed.
   829  	validated bool
   830  	// depKeys records the key of each dependency that was used to calculate the
   831  	// key above. If the handle becomes invalid, we must re-check that each still
   832  	// matches.
   833  	depKeys map[PackageID]file.Hash
   834  	// key is the hashed key for the package.
   835  	//
   836  	// It includes the all bits of the transitive closure of
   837  	// dependencies's sources.
   838  	key file.Hash
   839  }
   840  
   841  // clone returns a copy of the receiver with the validated bit set to the
   842  // provided value.
   843  func (ph *packageHandle) clone(validated bool) *packageHandle {
   844  	copy := *ph
   845  	copy.validated = validated
   846  	return &copy
   847  }
   848  
   849  // getPackageHandles gets package handles for all given ids and their
   850  // dependencies, recursively.
   851  func (s *Snapshot) getPackageHandles(ctx context.Context, ids []PackageID) (map[PackageID]*packageHandle, error) {
   852  	// perform a two-pass traversal.
   853  	//
   854  	// On the first pass, build up a bidirectional graph of handle nodes, and collect leaves.
   855  	// Then build package handles from bottom up.
   856  
   857  	s.mu.Lock() // guard s.meta and s.packages below
   858  	b := &packageHandleBuilder{
   859  		s:              s,
   860  		transitiveRefs: make(map[typerefs.IndexID]*partialRefs),
   861  		nodes:          make(map[typerefs.IndexID]*handleNode),
   862  	}
   863  
   864  	var leaves []*handleNode
   865  	var makeNode func(*handleNode, PackageID) *handleNode
   866  	makeNode = func(from *handleNode, id PackageID) *handleNode {
   867  		idxID := b.s.pkgIndex.IndexID(id)
   868  		n, ok := b.nodes[idxID]
   869  		if !ok {
   870  			mp := s.meta.Packages[id]
   871  			if mp == nil {
   872  				panic(fmt.Sprintf("nil metadata for %q", id))
   873  			}
   874  			n = &handleNode{
   875  				mp:              mp,
   876  				idxID:           idxID,
   877  				unfinishedSuccs: int32(len(mp.DepsByPkgPath)),
   878  			}
   879  			if entry, hit := b.s.packages.Get(mp.ID); hit {
   880  				n.ph = entry
   881  			}
   882  			if n.unfinishedSuccs == 0 {
   883  				leaves = append(leaves, n)
   884  			} else {
   885  				n.succs = make(map[PackageID]*handleNode, n.unfinishedSuccs)
   886  			}
   887  			b.nodes[idxID] = n
   888  			for _, depID := range mp.DepsByPkgPath {
   889  				n.succs[depID] = makeNode(n, depID)
   890  			}
   891  		}
   892  		// Add edge from predecessor.
   893  		if from != nil {
   894  			n.preds = append(n.preds, from)
   895  		}
   896  		return n
   897  	}
   898  	for _, id := range ids {
   899  		makeNode(nil, id)
   900  	}
   901  	s.mu.Unlock()
   902  
   903  	g, ctx := errgroup.WithContext(ctx)
   904  
   905  	// files are preloaded, so building package handles is CPU-bound.
   906  	//
   907  	// Note that we can't use g.SetLimit, as that could result in starvation:
   908  	// g.Go blocks until a slot is available, and so all existing goroutines
   909  	// could be blocked trying to enqueue a predecessor.
   910  	limiter := make(chan unit, runtime.GOMAXPROCS(0))
   911  
   912  	var enqueue func(*handleNode)
   913  	enqueue = func(n *handleNode) {
   914  		g.Go(func() error {
   915  			limiter <- unit{}
   916  			defer func() { <-limiter }()
   917  
   918  			if ctx.Err() != nil {
   919  				return ctx.Err()
   920  			}
   921  
   922  			b.buildPackageHandle(ctx, n)
   923  
   924  			for _, pred := range n.preds {
   925  				if atomic.AddInt32(&pred.unfinishedSuccs, -1) == 0 {
   926  					enqueue(pred)
   927  				}
   928  			}
   929  
   930  			return n.err
   931  		})
   932  	}
   933  	for _, leaf := range leaves {
   934  		enqueue(leaf)
   935  	}
   936  
   937  	if err := g.Wait(); err != nil {
   938  		return nil, err
   939  	}
   940  
   941  	// Copy handles into the result map.
   942  	handles := make(map[PackageID]*packageHandle, len(b.nodes))
   943  	for _, v := range b.nodes {
   944  		assert(v.ph != nil, "nil handle")
   945  		handles[v.mp.ID] = v.ph
   946  	}
   947  
   948  	return handles, nil
   949  }
   950  
   951  // A packageHandleBuilder computes a batch of packageHandles concurrently,
   952  // sharing computed transitive reachability sets used to compute package keys.
   953  type packageHandleBuilder struct {
   954  	s *Snapshot
   955  
   956  	// nodes are assembled synchronously.
   957  	nodes map[typerefs.IndexID]*handleNode
   958  
   959  	// transitiveRefs is incrementally evaluated as package handles are built.
   960  	transitiveRefsMu sync.Mutex
   961  	transitiveRefs   map[typerefs.IndexID]*partialRefs // see getTransitiveRefs
   962  }
   963  
   964  // A handleNode represents a to-be-computed packageHandle within a graph of
   965  // predecessors and successors.
   966  //
   967  // It is used to implement a bottom-up construction of packageHandles.
   968  type handleNode struct {
   969  	mp              *metadata.Package
   970  	idxID           typerefs.IndexID
   971  	ph              *packageHandle
   972  	err             error
   973  	preds           []*handleNode
   974  	succs           map[PackageID]*handleNode
   975  	unfinishedSuccs int32
   976  }
   977  
   978  // partialRefs maps names declared by a given package to their set of
   979  // transitive references.
   980  //
   981  // If complete is set, refs is known to be complete for the package in
   982  // question. Otherwise, it may only map a subset of all names declared by the
   983  // package.
   984  type partialRefs struct {
   985  	refs     map[string]*typerefs.PackageSet
   986  	complete bool
   987  }
   988  
   989  // getTransitiveRefs gets or computes the set of transitively reachable
   990  // packages for each exported name in the package specified by id.
   991  //
   992  // The operation may fail if building a predecessor failed. If and only if this
   993  // occurs, the result will be nil.
   994  func (b *packageHandleBuilder) getTransitiveRefs(pkgID PackageID) map[string]*typerefs.PackageSet {
   995  	b.transitiveRefsMu.Lock()
   996  	defer b.transitiveRefsMu.Unlock()
   997  
   998  	idxID := b.s.pkgIndex.IndexID(pkgID)
   999  	trefs, ok := b.transitiveRefs[idxID]
  1000  	if !ok {
  1001  		trefs = &partialRefs{
  1002  			refs: make(map[string]*typerefs.PackageSet),
  1003  		}
  1004  		b.transitiveRefs[idxID] = trefs
  1005  	}
  1006  
  1007  	if !trefs.complete {
  1008  		trefs.complete = true
  1009  		ph := b.nodes[idxID].ph
  1010  		for name := range ph.refs {
  1011  			if ('A' <= name[0] && name[0] <= 'Z') || token.IsExported(name) {
  1012  				if _, ok := trefs.refs[name]; !ok {
  1013  					pkgs := b.s.pkgIndex.NewSet()
  1014  					for _, sym := range ph.refs[name] {
  1015  						pkgs.Add(sym.Package)
  1016  						otherSet := b.getOneTransitiveRefLocked(sym)
  1017  						pkgs.Union(otherSet)
  1018  					}
  1019  					trefs.refs[name] = pkgs
  1020  				}
  1021  			}
  1022  		}
  1023  	}
  1024  
  1025  	return trefs.refs
  1026  }
  1027  
  1028  // getOneTransitiveRefLocked computes the full set packages transitively
  1029  // reachable through the given sym reference.
  1030  //
  1031  // It may return nil if the reference is invalid (i.e. the referenced name does
  1032  // not exist).
  1033  func (b *packageHandleBuilder) getOneTransitiveRefLocked(sym typerefs.Symbol) *typerefs.PackageSet {
  1034  	assert(token.IsExported(sym.Name), "expected exported symbol")
  1035  
  1036  	trefs := b.transitiveRefs[sym.Package]
  1037  	if trefs == nil {
  1038  		trefs = &partialRefs{
  1039  			refs:     make(map[string]*typerefs.PackageSet),
  1040  			complete: false,
  1041  		}
  1042  		b.transitiveRefs[sym.Package] = trefs
  1043  	}
  1044  
  1045  	pkgs, ok := trefs.refs[sym.Name]
  1046  	if ok && pkgs == nil {
  1047  		// See below, where refs is set to nil before recursing.
  1048  		bug.Reportf("cycle detected to %q in reference graph", sym.Name)
  1049  	}
  1050  
  1051  	// Note that if (!ok && trefs.complete), the name does not exist in the
  1052  	// referenced package, and we should not write to trefs as that may introduce
  1053  	// a race.
  1054  	if !ok && !trefs.complete {
  1055  		n := b.nodes[sym.Package]
  1056  		if n == nil {
  1057  			// We should always have IndexID in our node set, because symbol references
  1058  			// should only be recorded for packages that actually exist in the import graph.
  1059  			//
  1060  			// However, it is not easy to prove this (typerefs are serialized and
  1061  			// deserialized), so make this code temporarily defensive while we are on a
  1062  			// point release.
  1063  			//
  1064  			// TODO(rfindley): in the future, we should turn this into an assertion.
  1065  			bug.Reportf("missing reference to package %s", b.s.pkgIndex.PackageID(sym.Package))
  1066  			return nil
  1067  		}
  1068  
  1069  		// Break cycles. This is perhaps overly defensive as cycles should not
  1070  		// exist at this point: metadata cycles should have been broken at load
  1071  		// time, and intra-package reference cycles should have been contracted by
  1072  		// the typerefs algorithm.
  1073  		//
  1074  		// See the "cycle detected" bug report above.
  1075  		trefs.refs[sym.Name] = nil
  1076  
  1077  		pkgs := b.s.pkgIndex.NewSet()
  1078  		for _, sym2 := range n.ph.refs[sym.Name] {
  1079  			pkgs.Add(sym2.Package)
  1080  			otherSet := b.getOneTransitiveRefLocked(sym2)
  1081  			pkgs.Union(otherSet)
  1082  		}
  1083  		trefs.refs[sym.Name] = pkgs
  1084  	}
  1085  
  1086  	return pkgs
  1087  }
  1088  
  1089  // buildPackageHandle gets or builds a package handle for the given id, storing
  1090  // its result in the snapshot.packages map.
  1091  //
  1092  // buildPackageHandle must only be called from getPackageHandles.
  1093  func (b *packageHandleBuilder) buildPackageHandle(ctx context.Context, n *handleNode) {
  1094  	var prevPH *packageHandle
  1095  	if n.ph != nil {
  1096  		// Existing package handle: if it is valid, return it. Otherwise, create a
  1097  		// copy to update.
  1098  		if n.ph.validated {
  1099  			return
  1100  		}
  1101  		prevPH = n.ph
  1102  		// Either prevPH is still valid, or we will update the key and depKeys of
  1103  		// this copy. In either case, the result will be valid.
  1104  		n.ph = prevPH.clone(true)
  1105  	} else {
  1106  		// No package handle: read and analyze the package syntax.
  1107  		inputs, err := b.s.typeCheckInputs(ctx, n.mp)
  1108  		if err != nil {
  1109  			n.err = err
  1110  			return
  1111  		}
  1112  		refs, err := b.s.typerefs(ctx, n.mp, inputs.compiledGoFiles)
  1113  		if err != nil {
  1114  			n.err = err
  1115  			return
  1116  		}
  1117  		n.ph = &packageHandle{
  1118  			mp:              n.mp,
  1119  			loadDiagnostics: computeLoadDiagnostics(ctx, b.s, n.mp),
  1120  			localInputs:     inputs,
  1121  			localKey:        localPackageKey(inputs),
  1122  			refs:            refs,
  1123  			validated:       true,
  1124  		}
  1125  	}
  1126  
  1127  	// ph either did not exist, or was invalid. We must re-evaluate deps and key.
  1128  	if err := b.evaluatePackageHandle(prevPH, n); err != nil {
  1129  		n.err = err
  1130  		return
  1131  	}
  1132  
  1133  	assert(n.ph.validated, "unvalidated handle")
  1134  
  1135  	// Ensure the result (or an equivalent) is recorded in the snapshot.
  1136  	b.s.mu.Lock()
  1137  	defer b.s.mu.Unlock()
  1138  
  1139  	// Check that the metadata has not changed
  1140  	// (which should invalidate this handle).
  1141  	//
  1142  	// TODO(rfindley): eventually promote this to an assert.
  1143  	// TODO(rfindley): move this to after building the package handle graph?
  1144  	if b.s.meta.Packages[n.mp.ID] != n.mp {
  1145  		bug.Reportf("stale metadata for %s", n.mp.ID)
  1146  	}
  1147  
  1148  	// Check the packages map again in case another goroutine got there first.
  1149  	if alt, ok := b.s.packages.Get(n.mp.ID); ok && alt.validated {
  1150  		if alt.mp != n.mp {
  1151  			bug.Reportf("existing package handle does not match for %s", n.mp.ID)
  1152  		}
  1153  		n.ph = alt
  1154  	} else {
  1155  		b.s.packages.Set(n.mp.ID, n.ph, nil)
  1156  	}
  1157  }
  1158  
  1159  // evaluatePackageHandle validates and/or computes the key of ph, setting key,
  1160  // depKeys, and the validated flag on ph.
  1161  //
  1162  // It uses prevPH to avoid recomputing keys that can't have changed, since
  1163  // their depKeys did not change.
  1164  //
  1165  // See the documentation for packageHandle for more details about packageHandle
  1166  // state, and see the documentation for the typerefs package for more details
  1167  // about precise reachability analysis.
  1168  func (b *packageHandleBuilder) evaluatePackageHandle(prevPH *packageHandle, n *handleNode) error {
  1169  	// Opt: if no dep keys have changed, we need not re-evaluate the key.
  1170  	if prevPH != nil {
  1171  		depsChanged := false
  1172  		assert(len(prevPH.depKeys) == len(n.succs), "mismatching dep count")
  1173  		for id, succ := range n.succs {
  1174  			oldKey, ok := prevPH.depKeys[id]
  1175  			assert(ok, "missing dep")
  1176  			if oldKey != succ.ph.key {
  1177  				depsChanged = true
  1178  				break
  1179  			}
  1180  		}
  1181  		if !depsChanged {
  1182  			return nil // key cannot have changed
  1183  		}
  1184  	}
  1185  
  1186  	// Deps have changed, so we must re-evaluate the key.
  1187  	n.ph.depKeys = make(map[PackageID]file.Hash)
  1188  
  1189  	// See the typerefs package: the reachable set of packages is defined to be
  1190  	// the set of packages containing syntax that is reachable through the
  1191  	// exported symbols in the dependencies of n.ph.
  1192  	reachable := b.s.pkgIndex.NewSet()
  1193  	for depID, succ := range n.succs {
  1194  		n.ph.depKeys[depID] = succ.ph.key
  1195  		reachable.Add(succ.idxID)
  1196  		trefs := b.getTransitiveRefs(succ.mp.ID)
  1197  		if trefs == nil {
  1198  			// A predecessor failed to build due to e.g. context cancellation.
  1199  			return fmt.Errorf("missing transitive refs for %s", succ.mp.ID)
  1200  		}
  1201  		for _, set := range trefs {
  1202  			reachable.Union(set)
  1203  		}
  1204  	}
  1205  
  1206  	// Collect reachable handles.
  1207  	var reachableHandles []*packageHandle
  1208  	// In the presence of context cancellation, any package may be missing.
  1209  	// We need all dependencies to produce a valid key.
  1210  	missingReachablePackage := false
  1211  	reachable.Elems(func(id typerefs.IndexID) {
  1212  		dh := b.nodes[id]
  1213  		if dh == nil {
  1214  			missingReachablePackage = true
  1215  		} else {
  1216  			assert(dh.ph.validated, "unvalidated dependency")
  1217  			reachableHandles = append(reachableHandles, dh.ph)
  1218  		}
  1219  	})
  1220  	if missingReachablePackage {
  1221  		return fmt.Errorf("missing reachable package")
  1222  	}
  1223  	// Sort for stability.
  1224  	sort.Slice(reachableHandles, func(i, j int) bool {
  1225  		return reachableHandles[i].mp.ID < reachableHandles[j].mp.ID
  1226  	})
  1227  
  1228  	// Key is the hash of the local key, and the local key of all reachable
  1229  	// packages.
  1230  	depHasher := sha256.New()
  1231  	depHasher.Write(n.ph.localKey[:])
  1232  	for _, rph := range reachableHandles {
  1233  		depHasher.Write(rph.localKey[:])
  1234  	}
  1235  	depHasher.Sum(n.ph.key[:0])
  1236  
  1237  	return nil
  1238  }
  1239  
  1240  // typerefs returns typerefs for the package described by m and cgfs, after
  1241  // either computing it or loading it from the file cache.
  1242  func (s *Snapshot) typerefs(ctx context.Context, mp *metadata.Package, cgfs []file.Handle) (map[string][]typerefs.Symbol, error) {
  1243  	imports := make(map[ImportPath]*metadata.Package)
  1244  	for impPath, id := range mp.DepsByImpPath {
  1245  		if id != "" {
  1246  			imports[impPath] = s.Metadata(id)
  1247  		}
  1248  	}
  1249  
  1250  	data, err := s.typerefData(ctx, mp.ID, imports, cgfs)
  1251  	if err != nil {
  1252  		return nil, err
  1253  	}
  1254  	classes := typerefs.Decode(s.pkgIndex, data)
  1255  	refs := make(map[string][]typerefs.Symbol)
  1256  	for _, class := range classes {
  1257  		for _, decl := range class.Decls {
  1258  			refs[decl] = class.Refs
  1259  		}
  1260  	}
  1261  	return refs, nil
  1262  }
  1263  
  1264  // typerefData retrieves encoded typeref data from the filecache, or computes it on
  1265  // a cache miss.
  1266  func (s *Snapshot) typerefData(ctx context.Context, id PackageID, imports map[ImportPath]*metadata.Package, cgfs []file.Handle) ([]byte, error) {
  1267  	key := typerefsKey(id, imports, cgfs)
  1268  	if data, err := filecache.Get(typerefsKind, key); err == nil {
  1269  		return data, nil
  1270  	} else if err != filecache.ErrNotFound {
  1271  		bug.Reportf("internal error reading typerefs data: %v", err)
  1272  	}
  1273  
  1274  	pgfs, err := s.view.parseCache.parseFiles(ctx, token.NewFileSet(), ParseFull&^parser.ParseComments, true, cgfs...)
  1275  	if err != nil {
  1276  		return nil, err
  1277  	}
  1278  	data := typerefs.Encode(pgfs, imports)
  1279  
  1280  	// Store the resulting data in the cache.
  1281  	go func() {
  1282  		if err := filecache.Set(typerefsKind, key, data); err != nil {
  1283  			event.Error(ctx, fmt.Sprintf("storing typerefs data for %s", id), err)
  1284  		}
  1285  	}()
  1286  
  1287  	return data, nil
  1288  }
  1289  
  1290  // typerefsKey produces a key for the reference information produced by the
  1291  // typerefs package.
  1292  func typerefsKey(id PackageID, imports map[ImportPath]*metadata.Package, compiledGoFiles []file.Handle) file.Hash {
  1293  	hasher := sha256.New()
  1294  
  1295  	fmt.Fprintf(hasher, "typerefs: %s\n", id)
  1296  
  1297  	importPaths := make([]string, 0, len(imports))
  1298  	for impPath := range imports {
  1299  		importPaths = append(importPaths, string(impPath))
  1300  	}
  1301  	sort.Strings(importPaths)
  1302  	for _, importPath := range importPaths {
  1303  		imp := imports[ImportPath(importPath)]
  1304  		// TODO(rfindley): strength reduce the typerefs.Export API to guarantee
  1305  		// that it only depends on these attributes of dependencies.
  1306  		fmt.Fprintf(hasher, "import %s %s %s", importPath, imp.ID, imp.Name)
  1307  	}
  1308  
  1309  	fmt.Fprintf(hasher, "compiledGoFiles: %d\n", len(compiledGoFiles))
  1310  	for _, fh := range compiledGoFiles {
  1311  		fmt.Fprintln(hasher, fh.Identity())
  1312  	}
  1313  
  1314  	var hash [sha256.Size]byte
  1315  	hasher.Sum(hash[:0])
  1316  	return hash
  1317  }
  1318  
  1319  // typeCheckInputs contains the inputs of a call to typeCheckImpl, which
  1320  // type-checks a package.
  1321  //
  1322  // Part of the purpose of this type is to keep type checking in-sync with the
  1323  // package handle key, by explicitly identifying the inputs to type checking.
  1324  type typeCheckInputs struct {
  1325  	id PackageID
  1326  
  1327  	// Used for type checking:
  1328  	pkgPath                  PackagePath
  1329  	name                     PackageName
  1330  	goFiles, compiledGoFiles []file.Handle
  1331  	sizes                    types.Sizes
  1332  	depsByImpPath            map[ImportPath]PackageID
  1333  	goVersion                string // packages.Module.GoVersion, e.g. "1.18"
  1334  
  1335  	// Used for type check diagnostics:
  1336  	// TODO(rfindley): consider storing less data in gobDiagnostics, and
  1337  	// interpreting each diagnostic in the context of a fixed set of options.
  1338  	// Then these fields need not be part of the type checking inputs.
  1339  	relatedInformation bool
  1340  	linkTarget         string
  1341  	moduleMode         bool
  1342  }
  1343  
  1344  func (s *Snapshot) typeCheckInputs(ctx context.Context, mp *metadata.Package) (typeCheckInputs, error) {
  1345  	// Read both lists of files of this package.
  1346  	//
  1347  	// Parallelism is not necessary here as the files will have already been
  1348  	// pre-read at load time.
  1349  	//
  1350  	// goFiles aren't presented to the type checker--nor
  1351  	// are they included in the key, unsoundly--but their
  1352  	// syntax trees are available from (*pkg).File(URI).
  1353  	// TODO(adonovan): consider parsing them on demand?
  1354  	// The need should be rare.
  1355  	goFiles, err := readFiles(ctx, s, mp.GoFiles)
  1356  	if err != nil {
  1357  		return typeCheckInputs{}, err
  1358  	}
  1359  	compiledGoFiles, err := readFiles(ctx, s, mp.CompiledGoFiles)
  1360  	if err != nil {
  1361  		return typeCheckInputs{}, err
  1362  	}
  1363  
  1364  	goVersion := ""
  1365  	if mp.Module != nil && mp.Module.GoVersion != "" {
  1366  		goVersion = mp.Module.GoVersion
  1367  	}
  1368  
  1369  	return typeCheckInputs{
  1370  		id:              mp.ID,
  1371  		pkgPath:         mp.PkgPath,
  1372  		name:            mp.Name,
  1373  		goFiles:         goFiles,
  1374  		compiledGoFiles: compiledGoFiles,
  1375  		sizes:           mp.TypesSizes,
  1376  		depsByImpPath:   mp.DepsByImpPath,
  1377  		goVersion:       goVersion,
  1378  
  1379  		relatedInformation: s.Options().RelatedInformationSupported,
  1380  		linkTarget:         s.Options().LinkTarget,
  1381  		moduleMode:         s.view.moduleMode(),
  1382  	}, nil
  1383  }
  1384  
  1385  // readFiles reads the content of each file URL from the source
  1386  // (e.g. snapshot or cache).
  1387  func readFiles(ctx context.Context, fs file.Source, uris []protocol.DocumentURI) (_ []file.Handle, err error) {
  1388  	fhs := make([]file.Handle, len(uris))
  1389  	for i, uri := range uris {
  1390  		fhs[i], err = fs.ReadFile(ctx, uri)
  1391  		if err != nil {
  1392  			return nil, err
  1393  		}
  1394  	}
  1395  	return fhs, nil
  1396  }
  1397  
  1398  // localPackageKey returns a key for local inputs into type-checking, excluding
  1399  // dependency information: files, metadata, and configuration.
  1400  func localPackageKey(inputs typeCheckInputs) file.Hash {
  1401  	hasher := sha256.New()
  1402  
  1403  	// In principle, a key must be the hash of an
  1404  	// unambiguous encoding of all the relevant data.
  1405  	// If it's ambiguous, we risk collisions.
  1406  
  1407  	// package identifiers
  1408  	fmt.Fprintf(hasher, "package: %s %s %s\n", inputs.id, inputs.name, inputs.pkgPath)
  1409  
  1410  	// module Go version
  1411  	fmt.Fprintf(hasher, "go %s\n", inputs.goVersion)
  1412  
  1413  	// import map
  1414  	importPaths := make([]string, 0, len(inputs.depsByImpPath))
  1415  	for impPath := range inputs.depsByImpPath {
  1416  		importPaths = append(importPaths, string(impPath))
  1417  	}
  1418  	sort.Strings(importPaths)
  1419  	for _, impPath := range importPaths {
  1420  		fmt.Fprintf(hasher, "import %s %s", impPath, string(inputs.depsByImpPath[ImportPath(impPath)]))
  1421  	}
  1422  
  1423  	// file names and contents
  1424  	fmt.Fprintf(hasher, "compiledGoFiles: %d\n", len(inputs.compiledGoFiles))
  1425  	for _, fh := range inputs.compiledGoFiles {
  1426  		fmt.Fprintln(hasher, fh.Identity())
  1427  	}
  1428  	fmt.Fprintf(hasher, "goFiles: %d\n", len(inputs.goFiles))
  1429  	for _, fh := range inputs.goFiles {
  1430  		fmt.Fprintln(hasher, fh.Identity())
  1431  	}
  1432  
  1433  	// types sizes
  1434  	wordSize := inputs.sizes.Sizeof(types.Typ[types.Int])
  1435  	maxAlign := inputs.sizes.Alignof(types.NewPointer(types.Typ[types.Int64]))
  1436  	fmt.Fprintf(hasher, "sizes: %d %d\n", wordSize, maxAlign)
  1437  
  1438  	fmt.Fprintf(hasher, "relatedInformation: %t\n", inputs.relatedInformation)
  1439  	fmt.Fprintf(hasher, "linkTarget: %s\n", inputs.linkTarget)
  1440  	fmt.Fprintf(hasher, "moduleMode: %t\n", inputs.moduleMode)
  1441  
  1442  	var hash [sha256.Size]byte
  1443  	hasher.Sum(hash[:0])
  1444  	return hash
  1445  }
  1446  
  1447  // checkPackage type checks the parsed source files in compiledGoFiles.
  1448  // (The resulting pkg also holds the parsed but not type-checked goFiles.)
  1449  // deps holds the future results of type-checking the direct dependencies.
  1450  func (b *typeCheckBatch) checkPackage(ctx context.Context, ph *packageHandle) (*Package, error) {
  1451  	inputs := ph.localInputs
  1452  	ctx, done := event.Start(ctx, "cache.typeCheckBatch.checkPackage", tag.Package.Of(string(inputs.id)))
  1453  	defer done()
  1454  
  1455  	pkg := &syntaxPackage{
  1456  		id:    inputs.id,
  1457  		fset:  b.fset, // must match parse call below
  1458  		types: types.NewPackage(string(inputs.pkgPath), string(inputs.name)),
  1459  		typesInfo: &types.Info{
  1460  			Types:      make(map[ast.Expr]types.TypeAndValue),
  1461  			Defs:       make(map[*ast.Ident]types.Object),
  1462  			Uses:       make(map[*ast.Ident]types.Object),
  1463  			Implicits:  make(map[ast.Node]types.Object),
  1464  			Instances:  make(map[*ast.Ident]types.Instance),
  1465  			Selections: make(map[*ast.SelectorExpr]*types.Selection),
  1466  			Scopes:     make(map[ast.Node]*types.Scope),
  1467  		},
  1468  	}
  1469  	versions.InitFileVersions(pkg.typesInfo)
  1470  
  1471  	// Collect parsed files from the type check pass, capturing parse errors from
  1472  	// compiled files.
  1473  	var err error
  1474  	pkg.goFiles, err = b.parseCache.parseFiles(ctx, b.fset, ParseFull, false, inputs.goFiles...)
  1475  	if err != nil {
  1476  		return nil, err
  1477  	}
  1478  	pkg.compiledGoFiles, err = b.parseCache.parseFiles(ctx, b.fset, ParseFull, false, inputs.compiledGoFiles...)
  1479  	if err != nil {
  1480  		return nil, err
  1481  	}
  1482  	for _, pgf := range pkg.compiledGoFiles {
  1483  		if pgf.ParseErr != nil {
  1484  			pkg.parseErrors = append(pkg.parseErrors, pgf.ParseErr)
  1485  		}
  1486  	}
  1487  
  1488  	// Use the default type information for the unsafe package.
  1489  	if inputs.pkgPath == "unsafe" {
  1490  		// Don't type check Unsafe: it's unnecessary, and doing so exposes a data
  1491  		// race to Unsafe.completed.
  1492  		pkg.types = types.Unsafe
  1493  	} else {
  1494  
  1495  		if len(pkg.compiledGoFiles) == 0 {
  1496  			// No files most likely means go/packages failed.
  1497  			//
  1498  			// TODO(rfindley): in the past, we would capture go list errors in this
  1499  			// case, to present go list errors to the user. However we had no tests for
  1500  			// this behavior. It is unclear if anything better can be done here.
  1501  			return nil, fmt.Errorf("no parsed files for package %s", inputs.pkgPath)
  1502  		}
  1503  
  1504  		onError := func(e error) {
  1505  			pkg.typeErrors = append(pkg.typeErrors, e.(types.Error))
  1506  		}
  1507  		cfg := b.typesConfig(ctx, inputs, onError)
  1508  		check := types.NewChecker(cfg, pkg.fset, pkg.types, pkg.typesInfo)
  1509  
  1510  		var files []*ast.File
  1511  		for _, cgf := range pkg.compiledGoFiles {
  1512  			files = append(files, cgf.File)
  1513  		}
  1514  
  1515  		// Type checking is expensive, and we may not have encountered cancellations
  1516  		// via parsing (e.g. if we got nothing but cache hits for parsed files).
  1517  		if ctx.Err() != nil {
  1518  			return nil, ctx.Err()
  1519  		}
  1520  
  1521  		// Type checking errors are handled via the config, so ignore them here.
  1522  		_ = check.Files(files) // 50us-15ms, depending on size of package
  1523  
  1524  		// If the context was cancelled, we may have returned a ton of transient
  1525  		// errors to the type checker. Swallow them.
  1526  		if ctx.Err() != nil {
  1527  			return nil, ctx.Err()
  1528  		}
  1529  
  1530  		// Collect imports by package path for the DependencyTypes API.
  1531  		pkg.importMap = make(map[PackagePath]*types.Package)
  1532  		var collectDeps func(*types.Package)
  1533  		collectDeps = func(p *types.Package) {
  1534  			pkgPath := PackagePath(p.Path())
  1535  			if _, ok := pkg.importMap[pkgPath]; ok {
  1536  				return
  1537  			}
  1538  			pkg.importMap[pkgPath] = p
  1539  			for _, imp := range p.Imports() {
  1540  				collectDeps(imp)
  1541  			}
  1542  		}
  1543  		collectDeps(pkg.types)
  1544  
  1545  		// Work around golang/go#61561: interface instances aren't concurrency-safe
  1546  		// as they are not completed by the type checker.
  1547  		for _, inst := range pkg.typesInfo.Instances {
  1548  			if iface, _ := inst.Type.Underlying().(*types.Interface); iface != nil {
  1549  				iface.Complete()
  1550  			}
  1551  		}
  1552  	}
  1553  
  1554  	// Our heuristic for whether to show type checking errors is:
  1555  	//  + If there is a parse error _in the current file_, suppress type
  1556  	//    errors in that file.
  1557  	//  + Otherwise, show type errors even in the presence of parse errors in
  1558  	//    other package files. go/types attempts to suppress follow-on errors
  1559  	//    due to bad syntax, so on balance type checking errors still provide
  1560  	//    a decent signal/noise ratio as long as the file in question parses.
  1561  
  1562  	// Track URIs with parse errors so that we can suppress type errors for these
  1563  	// files.
  1564  	unparseable := map[protocol.DocumentURI]bool{}
  1565  	for _, e := range pkg.parseErrors {
  1566  		diags, err := parseErrorDiagnostics(pkg, e)
  1567  		if err != nil {
  1568  			event.Error(ctx, "unable to compute positions for parse errors", err, tag.Package.Of(string(inputs.id)))
  1569  			continue
  1570  		}
  1571  		for _, diag := range diags {
  1572  			unparseable[diag.URI] = true
  1573  			pkg.diagnostics = append(pkg.diagnostics, diag)
  1574  		}
  1575  	}
  1576  
  1577  	diags := typeErrorsToDiagnostics(pkg, pkg.typeErrors, inputs.linkTarget, inputs.moduleMode, inputs.relatedInformation)
  1578  	for _, diag := range diags {
  1579  		// If the file didn't parse cleanly, it is highly likely that type
  1580  		// checking errors will be confusing or redundant. But otherwise, type
  1581  		// checking usually provides a good enough signal to include.
  1582  		if !unparseable[diag.URI] {
  1583  			pkg.diagnostics = append(pkg.diagnostics, diag)
  1584  		}
  1585  	}
  1586  
  1587  	return &Package{ph.mp, ph.loadDiagnostics, pkg}, nil
  1588  }
  1589  
  1590  // e.g. "go1" or "go1.2" or "go1.2.3"
  1591  var goVersionRx = regexp.MustCompile(`^go[1-9][0-9]*(?:\.(0|[1-9][0-9]*)){0,2}$`)
  1592  
  1593  func (b *typeCheckBatch) typesConfig(ctx context.Context, inputs typeCheckInputs, onError func(e error)) *types.Config {
  1594  	cfg := &types.Config{
  1595  		Sizes: inputs.sizes,
  1596  		Error: onError,
  1597  		Importer: importerFunc(func(path string) (*types.Package, error) {
  1598  			// While all of the import errors could be reported
  1599  			// based on the metadata before we start type checking,
  1600  			// reporting them via types.Importer places the errors
  1601  			// at the correct source location.
  1602  			id, ok := inputs.depsByImpPath[ImportPath(path)]
  1603  			if !ok {
  1604  				// If the import declaration is broken,
  1605  				// go list may fail to report metadata about it.
  1606  				// See TestFixImportDecl for an example.
  1607  				return nil, fmt.Errorf("missing metadata for import of %q", path)
  1608  			}
  1609  			depPH := b.handles[id]
  1610  			if depPH == nil {
  1611  				// e.g. missing metadata for dependencies in buildPackageHandle
  1612  				return nil, missingPkgError(inputs.id, path, inputs.moduleMode)
  1613  			}
  1614  			if !metadata.IsValidImport(inputs.pkgPath, depPH.mp.PkgPath) {
  1615  				return nil, fmt.Errorf("invalid use of internal package %q", path)
  1616  			}
  1617  			return b.getImportPackage(ctx, id)
  1618  		}),
  1619  	}
  1620  
  1621  	if inputs.goVersion != "" {
  1622  		goVersion := "go" + inputs.goVersion
  1623  		// types.NewChecker panics if GoVersion is invalid. An unparsable mod
  1624  		// file should probably stop us before we get here, but double check
  1625  		// just in case.
  1626  		if goVersionRx.MatchString(goVersion) {
  1627  			typesinternal.SetGoVersion(cfg, goVersion)
  1628  		}
  1629  	}
  1630  
  1631  	// We want to type check cgo code if go/types supports it.
  1632  	// We passed typecheckCgo to go/packages when we Loaded.
  1633  	typesinternal.SetUsesCgo(cfg)
  1634  	return cfg
  1635  }
  1636  
  1637  // depsErrors creates diagnostics for each metadata error (e.g. import cycle).
  1638  // These may be attached to import declarations in the transitive source files
  1639  // of pkg, or to 'requires' declarations in the package's go.mod file.
  1640  //
  1641  // TODO(rfindley): move this to load.go
  1642  func depsErrors(ctx context.Context, snapshot *Snapshot, mp *metadata.Package) ([]*Diagnostic, error) {
  1643  	// Select packages that can't be found, and were imported in non-workspace packages.
  1644  	// Workspace packages already show their own errors.
  1645  	var relevantErrors []*packagesinternal.PackageError
  1646  	for _, depsError := range mp.DepsErrors {
  1647  		// Up to Go 1.15, the missing package was included in the stack, which
  1648  		// was presumably a bug. We want the next one up.
  1649  		directImporterIdx := len(depsError.ImportStack) - 1
  1650  		if directImporterIdx < 0 {
  1651  			continue
  1652  		}
  1653  
  1654  		directImporter := depsError.ImportStack[directImporterIdx]
  1655  		if snapshot.isWorkspacePackage(PackageID(directImporter)) {
  1656  			continue
  1657  		}
  1658  		relevantErrors = append(relevantErrors, depsError)
  1659  	}
  1660  
  1661  	// Don't build the import index for nothing.
  1662  	if len(relevantErrors) == 0 {
  1663  		return nil, nil
  1664  	}
  1665  
  1666  	// Subsequent checks require Go files.
  1667  	if len(mp.CompiledGoFiles) == 0 {
  1668  		return nil, nil
  1669  	}
  1670  
  1671  	// Build an index of all imports in the package.
  1672  	type fileImport struct {
  1673  		cgf *ParsedGoFile
  1674  		imp *ast.ImportSpec
  1675  	}
  1676  	allImports := map[string][]fileImport{}
  1677  	for _, uri := range mp.CompiledGoFiles {
  1678  		pgf, err := parseGoURI(ctx, snapshot, uri, ParseHeader)
  1679  		if err != nil {
  1680  			return nil, err
  1681  		}
  1682  		fset := tokeninternal.FileSetFor(pgf.Tok)
  1683  		// TODO(adonovan): modify Imports() to accept a single token.File (cgf.Tok).
  1684  		for _, group := range astutil.Imports(fset, pgf.File) {
  1685  			for _, imp := range group {
  1686  				if imp.Path == nil {
  1687  					continue
  1688  				}
  1689  				path := strings.Trim(imp.Path.Value, `"`)
  1690  				allImports[path] = append(allImports[path], fileImport{pgf, imp})
  1691  			}
  1692  		}
  1693  	}
  1694  
  1695  	// Apply a diagnostic to any import involved in the error, stopping once
  1696  	// we reach the workspace.
  1697  	var errors []*Diagnostic
  1698  	for _, depErr := range relevantErrors {
  1699  		for i := len(depErr.ImportStack) - 1; i >= 0; i-- {
  1700  			item := depErr.ImportStack[i]
  1701  			if snapshot.isWorkspacePackage(PackageID(item)) {
  1702  				break
  1703  			}
  1704  
  1705  			for _, imp := range allImports[item] {
  1706  				rng, err := imp.cgf.NodeRange(imp.imp)
  1707  				if err != nil {
  1708  					return nil, err
  1709  				}
  1710  				diag := &Diagnostic{
  1711  					URI:            imp.cgf.URI,
  1712  					Range:          rng,
  1713  					Severity:       protocol.SeverityError,
  1714  					Source:         TypeError,
  1715  					Message:        fmt.Sprintf("error while importing %v: %v", item, depErr.Err),
  1716  					SuggestedFixes: goGetQuickFixes(mp.Module != nil, imp.cgf.URI, item),
  1717  				}
  1718  				if !bundleQuickFixes(diag) {
  1719  					bug.Reportf("failed to bundle fixes for diagnostic %q", diag.Message)
  1720  				}
  1721  				errors = append(errors, diag)
  1722  			}
  1723  		}
  1724  	}
  1725  
  1726  	modFile, err := nearestModFile(ctx, mp.CompiledGoFiles[0], snapshot)
  1727  	if err != nil {
  1728  		return nil, err
  1729  	}
  1730  	pm, err := parseModURI(ctx, snapshot, modFile)
  1731  	if err != nil {
  1732  		return nil, err
  1733  	}
  1734  
  1735  	// Add a diagnostic to the module that contained the lowest-level import of
  1736  	// the missing package.
  1737  	for _, depErr := range relevantErrors {
  1738  		for i := len(depErr.ImportStack) - 1; i >= 0; i-- {
  1739  			item := depErr.ImportStack[i]
  1740  			mp := snapshot.Metadata(PackageID(item))
  1741  			if mp == nil || mp.Module == nil {
  1742  				continue
  1743  			}
  1744  			modVer := module.Version{Path: mp.Module.Path, Version: mp.Module.Version}
  1745  			reference := findModuleReference(pm.File, modVer)
  1746  			if reference == nil {
  1747  				continue
  1748  			}
  1749  			rng, err := pm.Mapper.OffsetRange(reference.Start.Byte, reference.End.Byte)
  1750  			if err != nil {
  1751  				return nil, err
  1752  			}
  1753  			diag := &Diagnostic{
  1754  				URI:            pm.URI,
  1755  				Range:          rng,
  1756  				Severity:       protocol.SeverityError,
  1757  				Source:         TypeError,
  1758  				Message:        fmt.Sprintf("error while importing %v: %v", item, depErr.Err),
  1759  				SuggestedFixes: goGetQuickFixes(true, pm.URI, item),
  1760  			}
  1761  			if !bundleQuickFixes(diag) {
  1762  				bug.Reportf("failed to bundle fixes for diagnostic %q", diag.Message)
  1763  			}
  1764  			errors = append(errors, diag)
  1765  			break
  1766  		}
  1767  	}
  1768  	return errors, nil
  1769  }
  1770  
  1771  // missingPkgError returns an error message for a missing package that varies
  1772  // based on the user's workspace mode.
  1773  func missingPkgError(from PackageID, pkgPath string, moduleMode bool) error {
  1774  	// TODO(rfindley): improve this error. Previous versions of this error had
  1775  	// access to the full snapshot, and could provide more information (such as
  1776  	// the initialization error).
  1777  	if moduleMode {
  1778  		if metadata.IsCommandLineArguments(from) {
  1779  			return fmt.Errorf("current file is not included in a workspace module")
  1780  		} else {
  1781  			// Previously, we would present the initialization error here.
  1782  			return fmt.Errorf("no required module provides package %q", pkgPath)
  1783  		}
  1784  	} else {
  1785  		// Previously, we would list the directories in GOROOT and GOPATH here.
  1786  		return fmt.Errorf("cannot find package %q in GOROOT or GOPATH", pkgPath)
  1787  	}
  1788  }
  1789  
  1790  // typeErrorsToDiagnostics translates a slice of types.Errors into a slice of
  1791  // Diagnostics.
  1792  //
  1793  // In addition to simply mapping data such as position information and error
  1794  // codes, this function interprets related go/types "continuation" errors as
  1795  // protocol.DiagnosticRelatedInformation. Continuation errors are go/types
  1796  // errors whose messages starts with "\t". By convention, these errors relate
  1797  // to the previous error in the errs slice (such as if they were printed in
  1798  // sequence to a terminal).
  1799  //
  1800  // The linkTarget, moduleMode, and supportsRelatedInformation parameters affect
  1801  // the construction of protocol objects (see the code for details).
  1802  func typeErrorsToDiagnostics(pkg *syntaxPackage, errs []types.Error, linkTarget string, moduleMode, supportsRelatedInformation bool) []*Diagnostic {
  1803  	var result []*Diagnostic
  1804  
  1805  	// batch records diagnostics for a set of related types.Errors.
  1806  	batch := func(related []types.Error) {
  1807  		var diags []*Diagnostic
  1808  		for i, e := range related {
  1809  			code, start, end, ok := typesinternal.ReadGo116ErrorData(e)
  1810  			if !ok || !start.IsValid() || !end.IsValid() {
  1811  				start, end = e.Pos, e.Pos
  1812  				code = 0
  1813  			}
  1814  			if !start.IsValid() {
  1815  				// Type checker errors may be missing position information if they
  1816  				// relate to synthetic syntax, such as if the file were fixed. In that
  1817  				// case, we should have a parse error anyway, so skipping the type
  1818  				// checker error is likely benign.
  1819  				//
  1820  				// TODO(golang/go#64335): we should eventually verify that all type
  1821  				// checked syntax has valid positions, and promote this skip to a bug
  1822  				// report.
  1823  				continue
  1824  			}
  1825  			posn := safetoken.StartPosition(e.Fset, start)
  1826  			if !posn.IsValid() {
  1827  				// All valid positions produced by the type checker should described by
  1828  				// its fileset.
  1829  				//
  1830  				// Note: in golang/go#64488, we observed an error that was positioned
  1831  				// over fixed syntax, which overflowed its file. So it's definitely
  1832  				// possible that we get here (it's hard to reason about fixing up the
  1833  				// AST). Nevertheless, it's a bug.
  1834  				bug.Reportf("internal error: type checker error %q outside its Fset", e)
  1835  				continue
  1836  			}
  1837  			pgf, err := pkg.File(protocol.URIFromPath(posn.Filename))
  1838  			if err != nil {
  1839  				// Sometimes type-checker errors refer to positions in other packages,
  1840  				// such as when a declaration duplicates a dot-imported name.
  1841  				//
  1842  				// In these cases, we don't want to report an error in the other
  1843  				// package (the message would be rather confusing), but we do want to
  1844  				// report an error in the current package (golang/go#59005).
  1845  				if i == 0 {
  1846  					bug.Reportf("internal error: could not locate file for primary type checker error %v: %v", e, err)
  1847  				}
  1848  				continue
  1849  			}
  1850  			if !end.IsValid() || end == start {
  1851  				// Expand the end position to a more meaningful span.
  1852  				end = analysisinternal.TypeErrorEndPos(e.Fset, pgf.Src, start)
  1853  			}
  1854  			rng, err := pgf.Mapper.PosRange(pgf.Tok, start, end)
  1855  			if err != nil {
  1856  				bug.Reportf("internal error: could not compute pos to range for %v: %v", e, err)
  1857  				continue
  1858  			}
  1859  			msg := related[0].Msg
  1860  			if i > 0 {
  1861  				if supportsRelatedInformation {
  1862  					msg += " (see details)"
  1863  				} else {
  1864  					msg += fmt.Sprintf(" (this error: %v)", e.Msg)
  1865  				}
  1866  			}
  1867  			diag := &Diagnostic{
  1868  				URI:      pgf.URI,
  1869  				Range:    rng,
  1870  				Severity: protocol.SeverityError,
  1871  				Source:   TypeError,
  1872  				Message:  msg,
  1873  			}
  1874  			if code != 0 {
  1875  				diag.Code = code.String()
  1876  				diag.CodeHref = typesCodeHref(linkTarget, code)
  1877  			}
  1878  			if code == typesinternal.UnusedVar || code == typesinternal.UnusedImport {
  1879  				diag.Tags = append(diag.Tags, protocol.Unnecessary)
  1880  			}
  1881  			if match := importErrorRe.FindStringSubmatch(e.Msg); match != nil {
  1882  				diag.SuggestedFixes = append(diag.SuggestedFixes, goGetQuickFixes(moduleMode, pgf.URI, match[1])...)
  1883  			}
  1884  			if match := unsupportedFeatureRe.FindStringSubmatch(e.Msg); match != nil {
  1885  				diag.SuggestedFixes = append(diag.SuggestedFixes, editGoDirectiveQuickFix(moduleMode, pgf.URI, match[1])...)
  1886  			}
  1887  
  1888  			// Link up related information. For the primary error, all related errors
  1889  			// are treated as related information. For secondary errors, only the
  1890  			// primary is related.
  1891  			//
  1892  			// This is because go/types assumes that errors are read top-down, such as
  1893  			// in the cycle error "A refers to...". The structure of the secondary
  1894  			// error set likely only makes sense for the primary error.
  1895  			if i > 0 {
  1896  				primary := diags[0]
  1897  				primary.Related = append(primary.Related, protocol.DiagnosticRelatedInformation{
  1898  					Location: protocol.Location{URI: diag.URI, Range: diag.Range},
  1899  					Message:  related[i].Msg, // use the unmodified secondary error for related errors.
  1900  				})
  1901  				diag.Related = []protocol.DiagnosticRelatedInformation{{
  1902  					Location: protocol.Location{URI: primary.URI, Range: primary.Range},
  1903  				}}
  1904  			}
  1905  			diags = append(diags, diag)
  1906  		}
  1907  		result = append(result, diags...)
  1908  	}
  1909  
  1910  	// Process batches of related errors.
  1911  	for len(errs) > 0 {
  1912  		related := []types.Error{errs[0]}
  1913  		for i := 1; i < len(errs); i++ {
  1914  			spl := errs[i]
  1915  			if len(spl.Msg) == 0 || spl.Msg[0] != '\t' {
  1916  				break
  1917  			}
  1918  			spl.Msg = spl.Msg[len("\t"):]
  1919  			related = append(related, spl)
  1920  		}
  1921  		batch(related)
  1922  		errs = errs[len(related):]
  1923  	}
  1924  
  1925  	return result
  1926  }
  1927  
  1928  // An importFunc is an implementation of the single-method
  1929  // types.Importer interface based on a function value.
  1930  type importerFunc func(path string) (*types.Package, error)
  1931  
  1932  func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) }