golang.org/x/tools/gopls@v0.15.3/internal/cache/analysis.go (about)

     1  // Copyright 2019 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package cache
     6  
     7  // This file defines gopls' driver for modular static analysis (go/analysis).
     8  
     9  import (
    10  	"bytes"
    11  	"context"
    12  	"crypto/sha256"
    13  	"encoding/gob"
    14  	"encoding/json"
    15  	"errors"
    16  	"fmt"
    17  	"go/ast"
    18  	"go/parser"
    19  	"go/token"
    20  	"go/types"
    21  	"log"
    22  	urlpkg "net/url"
    23  	"path/filepath"
    24  	"reflect"
    25  	"runtime"
    26  	"runtime/debug"
    27  	"sort"
    28  	"strings"
    29  	"sync"
    30  	"sync/atomic"
    31  	"time"
    32  
    33  	"golang.org/x/sync/errgroup"
    34  	"golang.org/x/tools/go/analysis"
    35  	"golang.org/x/tools/gopls/internal/cache/metadata"
    36  	"golang.org/x/tools/gopls/internal/file"
    37  	"golang.org/x/tools/gopls/internal/filecache"
    38  	"golang.org/x/tools/gopls/internal/progress"
    39  	"golang.org/x/tools/gopls/internal/protocol"
    40  	"golang.org/x/tools/gopls/internal/settings"
    41  	"golang.org/x/tools/gopls/internal/util/astutil"
    42  	"golang.org/x/tools/gopls/internal/util/bug"
    43  	"golang.org/x/tools/gopls/internal/util/frob"
    44  	"golang.org/x/tools/gopls/internal/util/maps"
    45  	"golang.org/x/tools/internal/event"
    46  	"golang.org/x/tools/internal/event/tag"
    47  	"golang.org/x/tools/internal/facts"
    48  	"golang.org/x/tools/internal/gcimporter"
    49  	"golang.org/x/tools/internal/typesinternal"
    50  	"golang.org/x/tools/internal/versions"
    51  )
    52  
    53  /*
    54  
    55     DESIGN
    56  
    57     An analysis request (Snapshot.Analyze) is for a set of Analyzers and
    58     PackageIDs. The result is the set of diagnostics for those
    59     packages. Each request constructs a transitively closed DAG of
    60     nodes, each representing a package, then works bottom up in
    61     parallel postorder calling runCached to ensure that each node's
    62     analysis summary is up to date. The summary contains the analysis
    63     diagnostics as well as the intermediate results required by the
    64     recursion, such as serialized types and facts.
    65  
    66     The entire DAG is ephemeral. Each node in the DAG records the set
    67     of analyzers to run: the complete set for the root packages, and
    68     the "facty" subset for dependencies. Each package is thus analyzed
    69     at most once. The entire DAG shares a single FileSet for parsing
    70     and importing.
    71  
    72     Each node is processed by runCached. It gets the source file
    73     content hashes for package p, and the summaries of its "vertical"
    74     dependencies (direct imports), and from them it computes a key
    75     representing the unit of work (parsing, type-checking, and
    76     analysis) that it has to do. The key is a cryptographic hash of the
    77     "recipe" for this step, including the Metadata, the file contents,
    78     the set of analyzers, and the type and fact information from the
    79     vertical dependencies.
    80  
    81     The key is sought in a machine-global persistent file-system based
    82     cache. If this gopls process, or another gopls process on the same
    83     machine, has already performed this analysis step, runCached will
    84     make a cache hit and load the serialized summary of the results. If
    85     not, it will have to proceed to run() to parse and type-check the
    86     package and then apply a set of analyzers to it. (The set of
    87     analyzers applied to a single package itself forms a graph of
    88     "actions", and it too is evaluated in parallel postorder; these
    89     dependency edges within the same package are called "horizontal".)
    90     Finally it writes a new cache entry. The entry contains serialized
    91     types (export data) and analysis facts.
    92  
    93     Each node in the DAG acts like a go/types importer mapping,
    94     providing a consistent view of packages and their objects: the
    95     mapping for a node is a superset of its dependencies' mappings.
    96     Every node has an associated *types.Package, initially nil. A
    97     package is populated during run (cache miss) by type-checking its
    98     syntax; but for a cache hit, the package is populated lazily, i.e.
    99     not until it later becomes necessary because it is imported
   100     directly or referenced by export data higher up in the DAG.
   101  
   102     For types, we use "shallow" export data. Historically, the Go
   103     compiler always produced a summary of the types for a given package
   104     that included types from other packages that it indirectly
   105     referenced: "deep" export data. This had the advantage that the
   106     compiler (and analogous tools such as gopls) need only load one
   107     file per direct import.  However, it meant that the files tended to
   108     get larger based on the level of the package in the import
   109     graph. For example, higher-level packages in the kubernetes module
   110     have over 1MB of "deep" export data, even when they have almost no
   111     content of their own, merely because they mention a major type that
   112     references many others. In pathological cases the export data was
   113     300x larger than the source for a package due to this quadratic
   114     growth.
   115  
   116     "Shallow" export data means that the serialized types describe only
   117     a single package. If those types mention types from other packages,
   118     the type checker may need to request additional packages beyond
   119     just the direct imports. Type information for the entire transitive
   120     closure of imports is provided (lazily) by the DAG.
   121  
   122     For correct dependency analysis, the digest used as a cache key
   123     must reflect the "deep" export data, so it is derived recursively
   124     from the transitive closure. As an optimization, we needn't include
   125     every package of the transitive closure in the deep hash, only the
   126     packages that were actually requested by the type checker. This
   127     allows changes to a package that have no effect on its export data
   128     to be "pruned". The direct consumer will need to be re-executed,
   129     but if its export data is unchanged as a result, then indirect
   130     consumers may not need to be re-executed.  This allows, for example,
   131     one to insert a print statement in a function and not "rebuild" the
   132     whole application (though export data does record line numbers and
   133     offsets of types which may be perturbed by otherwise insignificant
   134     changes.)
   135  
   136     The summary must record whether a package is transitively
   137     error-free (whether it would compile) because many analyzers are
   138     not safe to run on packages with inconsistent types.
   139  
   140     For fact encoding, we use the same fact set as the unitchecker
   141     (vet) to record and serialize analysis facts. The fact
   142     serialization mechanism is analogous to "deep" export data.
   143  
   144  */
   145  
   146  // TODO(adonovan):
   147  // - Add a (white-box) test of pruning when a change doesn't affect export data.
   148  // - Optimise pruning based on subset of packages mentioned in exportdata.
   149  // - Better logging so that it is possible to deduce why an analyzer
   150  //   is not being run--often due to very indirect failures.
   151  //   Even if the ultimate consumer decides to ignore errors,
   152  //   tests and other situations want to be assured of freedom from
   153  //   errors, not just missing results. This should be recorded.
   154  // - Split this into a subpackage, gopls/internal/cache/driver,
   155  //   consisting of this file and three helpers from errors.go.
   156  //   The (*snapshot).Analyze method would stay behind and make calls
   157  //   to the driver package.
   158  //   Steps:
   159  //   - define a narrow driver.Snapshot interface with only these methods:
   160  //        Metadata(PackageID) Metadata
   161  //        ReadFile(Context, URI) (file.Handle, error)
   162  //        View() *View // for Options
   163  //   - share cache.{goVersionRx,parseGoImpl}
   164  
   165  // AnalysisProgressTitle is the title of the progress report for ongoing
   166  // analysis. It is sought by regression tests for the progress reporting
   167  // feature.
   168  const AnalysisProgressTitle = "Analyzing Dependencies"
   169  
   170  // Analyze applies a set of analyzers to the package denoted by id,
   171  // and returns their diagnostics for that package.
   172  //
   173  // The analyzers list must be duplicate free; order does not matter.
   174  //
   175  // Notifications of progress may be sent to the optional reporter.
   176  func (s *Snapshot) Analyze(ctx context.Context, pkgs map[PackageID]*metadata.Package, analyzers []*settings.Analyzer, reporter *progress.Tracker) ([]*Diagnostic, error) {
   177  	start := time.Now() // for progress reporting
   178  
   179  	var tagStr string // sorted comma-separated list of PackageIDs
   180  	{
   181  		keys := make([]string, 0, len(pkgs))
   182  		for id := range pkgs {
   183  			keys = append(keys, string(id))
   184  		}
   185  		sort.Strings(keys)
   186  		tagStr = strings.Join(keys, ",")
   187  	}
   188  	ctx, done := event.Start(ctx, "snapshot.Analyze", tag.Package.Of(tagStr))
   189  	defer done()
   190  
   191  	// Filter and sort enabled root analyzers.
   192  	// A disabled analyzer may still be run if required by another.
   193  	toSrc := make(map[*analysis.Analyzer]*settings.Analyzer)
   194  	var enabled []*analysis.Analyzer // enabled subset + transitive requirements
   195  	for _, a := range analyzers {
   196  		if a.IsEnabled(s.Options()) {
   197  			toSrc[a.Analyzer] = a
   198  			enabled = append(enabled, a.Analyzer)
   199  		}
   200  	}
   201  	sort.Slice(enabled, func(i, j int) bool {
   202  		return enabled[i].Name < enabled[j].Name
   203  	})
   204  	analyzers = nil // prevent accidental use
   205  
   206  	enabled = requiredAnalyzers(enabled)
   207  
   208  	// Perform basic sanity checks.
   209  	// (Ideally we would do this only once.)
   210  	if err := analysis.Validate(enabled); err != nil {
   211  		return nil, fmt.Errorf("invalid analyzer configuration: %v", err)
   212  	}
   213  
   214  	stableNames := make(map[*analysis.Analyzer]string)
   215  
   216  	var facty []*analysis.Analyzer // facty subset of enabled + transitive requirements
   217  	for _, a := range enabled {
   218  		// TODO(adonovan): reject duplicate stable names (very unlikely).
   219  		stableNames[a] = stableName(a)
   220  
   221  		// Register fact types of all required analyzers.
   222  		if len(a.FactTypes) > 0 {
   223  			facty = append(facty, a)
   224  			for _, f := range a.FactTypes {
   225  				gob.Register(f) // <2us
   226  			}
   227  		}
   228  	}
   229  	facty = requiredAnalyzers(facty)
   230  
   231  	// File set for this batch (entire graph) of analysis.
   232  	fset := token.NewFileSet()
   233  
   234  	// Starting from the root packages and following DepsByPkgPath,
   235  	// build the DAG of packages we're going to analyze.
   236  	//
   237  	// Root nodes will run the enabled set of analyzers,
   238  	// whereas dependencies will run only the facty set.
   239  	// Because (by construction) enabled is a superset of facty,
   240  	// we can analyze each node with exactly one set of analyzers.
   241  	nodes := make(map[PackageID]*analysisNode)
   242  	var leaves []*analysisNode // nodes with no unfinished successors
   243  	var makeNode func(from *analysisNode, id PackageID) (*analysisNode, error)
   244  	makeNode = func(from *analysisNode, id PackageID) (*analysisNode, error) {
   245  		an, ok := nodes[id]
   246  		if !ok {
   247  			mp := s.Metadata(id)
   248  			if mp == nil {
   249  				return nil, bug.Errorf("no metadata for %s", id)
   250  			}
   251  
   252  			// -- preorder --
   253  
   254  			an = &analysisNode{
   255  				fset:        fset,
   256  				mp:          mp,
   257  				analyzers:   facty, // all nodes run at least the facty analyzers
   258  				allDeps:     make(map[PackagePath]*analysisNode),
   259  				exportDeps:  make(map[PackagePath]*analysisNode),
   260  				stableNames: stableNames,
   261  			}
   262  			nodes[id] = an
   263  
   264  			// -- recursion --
   265  
   266  			// Build subgraphs for dependencies.
   267  			an.succs = make(map[PackageID]*analysisNode, len(mp.DepsByPkgPath))
   268  			for _, depID := range mp.DepsByPkgPath {
   269  				dep, err := makeNode(an, depID)
   270  				if err != nil {
   271  					return nil, err
   272  				}
   273  				an.succs[depID] = dep
   274  
   275  				// Compute the union of all dependencies.
   276  				// (This step has quadratic complexity.)
   277  				for pkgPath, node := range dep.allDeps {
   278  					an.allDeps[pkgPath] = node
   279  				}
   280  			}
   281  
   282  			// -- postorder --
   283  
   284  			an.allDeps[mp.PkgPath] = an // add self entry (reflexive transitive closure)
   285  
   286  			// Add leaf nodes (no successors) directly to queue.
   287  			if len(an.succs) == 0 {
   288  				leaves = append(leaves, an)
   289  			}
   290  
   291  			// Load the contents of each compiled Go file through
   292  			// the snapshot's cache. (These are all cache hits as
   293  			// files are pre-loaded following packages.Load)
   294  			an.files = make([]file.Handle, len(mp.CompiledGoFiles))
   295  			for i, uri := range mp.CompiledGoFiles {
   296  				fh, err := s.ReadFile(ctx, uri)
   297  				if err != nil {
   298  					return nil, err
   299  				}
   300  				an.files[i] = fh
   301  			}
   302  		}
   303  		// Add edge from predecessor.
   304  		if from != nil {
   305  			from.unfinishedSuccs.Add(+1) // incref
   306  			an.preds = append(an.preds, from)
   307  		}
   308  		an.unfinishedPreds.Add(+1)
   309  		return an, nil
   310  	}
   311  
   312  	// For root packages, we run the enabled set of analyzers.
   313  	var roots []*analysisNode
   314  	for id := range pkgs {
   315  		root, err := makeNode(nil, id)
   316  		if err != nil {
   317  			return nil, err
   318  		}
   319  		root.analyzers = enabled
   320  		roots = append(roots, root)
   321  	}
   322  
   323  	// Now that we have read all files,
   324  	// we no longer need the snapshot.
   325  	// (but options are needed for progress reporting)
   326  	options := s.Options()
   327  	s = nil
   328  
   329  	// Progress reporting. If supported, gopls reports progress on analysis
   330  	// passes that are taking a long time.
   331  	maybeReport := func(completed int64) {}
   332  
   333  	// Enable progress reporting if enabled by the user
   334  	// and we have a capable reporter.
   335  	if reporter != nil && reporter.SupportsWorkDoneProgress() && options.AnalysisProgressReporting {
   336  		var reportAfter = options.ReportAnalysisProgressAfter // tests may set this to 0
   337  		const reportEvery = 1 * time.Second
   338  
   339  		ctx, cancel := context.WithCancel(ctx)
   340  		defer cancel()
   341  
   342  		var (
   343  			reportMu   sync.Mutex
   344  			lastReport time.Time
   345  			wd         *progress.WorkDone
   346  		)
   347  		defer func() {
   348  			reportMu.Lock()
   349  			defer reportMu.Unlock()
   350  
   351  			if wd != nil {
   352  				wd.End(ctx, "Done.") // ensure that the progress report exits
   353  			}
   354  		}()
   355  		maybeReport = func(completed int64) {
   356  			now := time.Now()
   357  			if now.Sub(start) < reportAfter {
   358  				return
   359  			}
   360  
   361  			reportMu.Lock()
   362  			defer reportMu.Unlock()
   363  
   364  			if wd == nil {
   365  				wd = reporter.Start(ctx, AnalysisProgressTitle, "", nil, cancel)
   366  			}
   367  
   368  			if now.Sub(lastReport) > reportEvery {
   369  				lastReport = now
   370  				// Trailing space is intentional: some LSP clients strip newlines.
   371  				msg := fmt.Sprintf(`Indexed %d/%d packages. (Set "analysisProgressReporting" to false to disable notifications.)`,
   372  					completed, len(nodes))
   373  				pct := 100 * float64(completed) / float64(len(nodes))
   374  				wd.Report(ctx, msg, pct)
   375  			}
   376  		}
   377  	}
   378  
   379  	// Execute phase: run leaves first, adding
   380  	// new nodes to the queue as they become leaves.
   381  	var g errgroup.Group
   382  
   383  	// Analysis is CPU-bound.
   384  	//
   385  	// Note: avoid g.SetLimit here: it makes g.Go stop accepting work, which
   386  	// prevents workers from enqeuing, and thus finishing, and thus allowing the
   387  	// group to make progress: deadlock.
   388  	limiter := make(chan unit, runtime.GOMAXPROCS(0))
   389  	var completed atomic.Int64
   390  
   391  	var enqueue func(*analysisNode)
   392  	enqueue = func(an *analysisNode) {
   393  		g.Go(func() error {
   394  			limiter <- unit{}
   395  			defer func() { <-limiter }()
   396  
   397  			summary, err := an.runCached(ctx)
   398  			if err != nil {
   399  				return err // cancelled, or failed to produce a package
   400  			}
   401  			maybeReport(completed.Add(1))
   402  			an.summary = summary
   403  
   404  			// Notify each waiting predecessor,
   405  			// and enqueue it when it becomes a leaf.
   406  			for _, pred := range an.preds {
   407  				if pred.unfinishedSuccs.Add(-1) == 0 { // decref
   408  					enqueue(pred)
   409  				}
   410  			}
   411  
   412  			// Notify each successor that we no longer need
   413  			// its action summaries, which hold Result values.
   414  			// After the last one, delete it, so that we
   415  			// free up large results such as SSA.
   416  			for _, succ := range an.succs {
   417  				succ.decrefPreds()
   418  			}
   419  			return nil
   420  		})
   421  	}
   422  	for _, leaf := range leaves {
   423  		enqueue(leaf)
   424  	}
   425  	if err := g.Wait(); err != nil {
   426  		return nil, err // cancelled, or failed to produce a package
   427  	}
   428  
   429  	// Inv: all root nodes now have a summary (#66732).
   430  	//
   431  	// We know this is falsified empirically. This means either
   432  	// the summary was "successfully" set to nil (above), or there
   433  	// is a problem with the graph such the enqueuing leaves does
   434  	// not lead to completion of roots (or an error).
   435  	for _, root := range roots {
   436  		if root.summary == nil {
   437  			bug.Report("root analysisNode has nil summary")
   438  		}
   439  	}
   440  
   441  	// Report diagnostics only from enabled actions that succeeded.
   442  	// Errors from creating or analyzing packages are ignored.
   443  	// Diagnostics are reported in the order of the analyzers argument.
   444  	//
   445  	// TODO(adonovan): ignoring action errors gives the caller no way
   446  	// to distinguish "there are no problems in this code" from
   447  	// "the code (or analyzers!) are so broken that we couldn't even
   448  	// begin the analysis you asked for".
   449  	// Even if current callers choose to discard the
   450  	// results, we should propagate the per-action errors.
   451  	var results []*Diagnostic
   452  	for _, root := range roots {
   453  		for _, a := range enabled {
   454  			// Skip analyzers that were added only to
   455  			// fulfil requirements of the original set.
   456  			srcAnalyzer, ok := toSrc[a]
   457  			if !ok {
   458  				// Although this 'skip' operation is logically sound,
   459  				// it is nonetheless surprising that its absence should
   460  				// cause #60909 since none of the analyzers currently added for
   461  				// requirements (e.g. ctrlflow, inspect, buildssa)
   462  				// is capable of reporting diagnostics.
   463  				if summary := root.summary.Actions[stableNames[a]]; summary != nil {
   464  					if n := len(summary.Diagnostics); n > 0 {
   465  						bug.Reportf("Internal error: got %d unexpected diagnostics from analyzer %s. This analyzer was added only to fulfil the requirements of the requested set of analyzers, and it is not expected that such analyzers report diagnostics. Please report this in issue #60909.", n, a)
   466  					}
   467  				}
   468  				continue
   469  			}
   470  
   471  			// Inv: root.summary is the successful result of run (via runCached).
   472  			// TODO(adonovan): fix: root.summary is sometimes nil! (#66732).
   473  			summary, ok := root.summary.Actions[stableNames[a]]
   474  			if summary == nil {
   475  				panic(fmt.Sprintf("analyzeSummary.Actions[%q] = (nil, %t); got %v (#60551)",
   476  					stableNames[a], ok, root.summary.Actions))
   477  			}
   478  			if summary.Err != "" {
   479  				continue // action failed
   480  			}
   481  			for _, gobDiag := range summary.Diagnostics {
   482  				results = append(results, toSourceDiagnostic(srcAnalyzer, &gobDiag))
   483  			}
   484  		}
   485  	}
   486  	return results, nil
   487  }
   488  
   489  func (an *analysisNode) decrefPreds() {
   490  	if an.unfinishedPreds.Add(-1) == 0 {
   491  		an.summary.Actions = nil
   492  	}
   493  }
   494  
   495  // An analysisNode is a node in a doubly-linked DAG isomorphic to the
   496  // import graph. Each node represents a single package, and the DAG
   497  // represents a batch of analysis work done at once using a single
   498  // realm of token.Pos or types.Object values.
   499  //
   500  // A complete DAG is created anew for each batch of analysis;
   501  // subgraphs are not reused over time. Each node's *types.Package
   502  // field is initially nil and is populated on demand, either from
   503  // type-checking syntax trees (typeCheck) or from importing export
   504  // data (_import). When this occurs, the typesOnce event becomes
   505  // "done".
   506  //
   507  // Each node's allDeps map is a "view" of all its dependencies keyed by
   508  // package path, which defines the types.Importer mapping used when
   509  // populating the node's types.Package. Different nodes have different
   510  // views (e.g. due to variants), but two nodes that are related by
   511  // graph ordering have views that are consistent in their overlap.
   512  // exportDeps is the subset actually referenced by export data;
   513  // this is the set for which we attempt to decode facts.
   514  //
   515  // Each node's run method is called in parallel postorder. On success,
   516  // its summary field is populated, either from the cache (hit), or by
   517  // type-checking and analyzing syntax (miss).
   518  type analysisNode struct {
   519  	fset            *token.FileSet              // file set shared by entire batch (DAG)
   520  	mp              *metadata.Package           // metadata for this package
   521  	files           []file.Handle               // contents of CompiledGoFiles
   522  	analyzers       []*analysis.Analyzer        // set of analyzers to run
   523  	preds           []*analysisNode             // graph edges:
   524  	succs           map[PackageID]*analysisNode //   (preds -> self -> succs)
   525  	unfinishedSuccs atomic.Int32
   526  	unfinishedPreds atomic.Int32                  // effectively a summary.Actions refcount
   527  	allDeps         map[PackagePath]*analysisNode // all dependencies including self
   528  	exportDeps      map[PackagePath]*analysisNode // subset of allDeps ref'd by export data (+self)
   529  	summary         *analyzeSummary               // serializable result of analyzing this package
   530  	stableNames     map[*analysis.Analyzer]string // cross-process stable names for Analyzers
   531  
   532  	typesOnce sync.Once      // guards lazy population of types and typesErr fields
   533  	types     *types.Package // type information lazily imported from summary
   534  	typesErr  error          // an error producing type information
   535  }
   536  
   537  func (an *analysisNode) String() string { return string(an.mp.ID) }
   538  
   539  // _import imports this node's types.Package from export data, if not already done.
   540  // Precondition: analysis was a success.
   541  // Postcondition: an.types and an.exportDeps are populated.
   542  func (an *analysisNode) _import() (*types.Package, error) {
   543  	an.typesOnce.Do(func() {
   544  		if an.mp.PkgPath == "unsafe" {
   545  			an.types = types.Unsafe
   546  			return
   547  		}
   548  
   549  		an.types = types.NewPackage(string(an.mp.PkgPath), string(an.mp.Name))
   550  
   551  		// getPackages recursively imports each dependency
   552  		// referenced by the export data, in parallel.
   553  		getPackages := func(items []gcimporter.GetPackagesItem) error {
   554  			var g errgroup.Group
   555  			for i, item := range items {
   556  				path := PackagePath(item.Path)
   557  				dep, ok := an.allDeps[path]
   558  				if !ok {
   559  					// This early return bypasses Wait; that's ok.
   560  					return fmt.Errorf("%s: unknown dependency %q", an.mp, path)
   561  				}
   562  				an.exportDeps[path] = dep // record, for later fact decoding
   563  				if dep == an {
   564  					if an.typesErr != nil {
   565  						return an.typesErr
   566  					} else {
   567  						items[i].Pkg = an.types
   568  					}
   569  				} else {
   570  					i := i
   571  					g.Go(func() error {
   572  						depPkg, err := dep._import()
   573  						if err == nil {
   574  							items[i].Pkg = depPkg
   575  						}
   576  						return err
   577  					})
   578  				}
   579  			}
   580  			return g.Wait()
   581  		}
   582  		pkg, err := gcimporter.IImportShallow(an.fset, getPackages, an.summary.Export, string(an.mp.PkgPath), bug.Reportf)
   583  		if err != nil {
   584  			an.typesErr = bug.Errorf("%s: invalid export data: %v", an.mp, err)
   585  			an.types = nil
   586  		} else if pkg != an.types {
   587  			log.Fatalf("%s: inconsistent packages", an.mp)
   588  		}
   589  	})
   590  	return an.types, an.typesErr
   591  }
   592  
   593  // analyzeSummary is a gob-serializable summary of successfully
   594  // applying a list of analyzers to a package.
   595  type analyzeSummary struct {
   596  	Export         []byte    // encoded types of package
   597  	DeepExportHash file.Hash // hash of reflexive transitive closure of export data
   598  	Compiles       bool      // transitively free of list/parse/type errors
   599  	Actions        actionMap // maps analyzer stablename to analysis results (*actionSummary)
   600  }
   601  
   602  // actionMap defines a stable Gob encoding for a map.
   603  // TODO(adonovan): generalize and move to a library when we can use generics.
   604  type actionMap map[string]*actionSummary
   605  
   606  var (
   607  	_ gob.GobEncoder = (actionMap)(nil)
   608  	_ gob.GobDecoder = (*actionMap)(nil)
   609  )
   610  
   611  type actionsMapEntry struct {
   612  	K string
   613  	V *actionSummary
   614  }
   615  
   616  func (m actionMap) GobEncode() ([]byte, error) {
   617  	entries := make([]actionsMapEntry, 0, len(m))
   618  	for k, v := range m {
   619  		entries = append(entries, actionsMapEntry{k, v})
   620  	}
   621  	sort.Slice(entries, func(i, j int) bool {
   622  		return entries[i].K < entries[j].K
   623  	})
   624  	var buf bytes.Buffer
   625  	err := gob.NewEncoder(&buf).Encode(entries)
   626  	return buf.Bytes(), err
   627  }
   628  
   629  func (m *actionMap) GobDecode(data []byte) error {
   630  	var entries []actionsMapEntry
   631  	if err := gob.NewDecoder(bytes.NewReader(data)).Decode(&entries); err != nil {
   632  		return err
   633  	}
   634  	*m = make(actionMap, len(entries))
   635  	for _, e := range entries {
   636  		(*m)[e.K] = e.V
   637  	}
   638  	return nil
   639  }
   640  
   641  // actionSummary is a gob-serializable summary of one possibly failed analysis action.
   642  // If Err is non-empty, the other fields are undefined.
   643  type actionSummary struct {
   644  	Facts       []byte    // the encoded facts.Set
   645  	FactsHash   file.Hash // hash(Facts)
   646  	Diagnostics []gobDiagnostic
   647  	Err         string // "" => success
   648  }
   649  
   650  // runCached applies a list of analyzers (plus any others
   651  // transitively required by them) to a package.  It succeeds as long
   652  // as it could produce a types.Package, even if there were direct or
   653  // indirect list/parse/type errors, and even if all the analysis
   654  // actions failed. It usually fails only if the package was unknown,
   655  // a file was missing, or the operation was cancelled.
   656  //
   657  // Postcondition: runCached must not continue to use the snapshot
   658  // (in background goroutines) after it has returned; see memoize.RefCounted.
   659  func (an *analysisNode) runCached(ctx context.Context) (*analyzeSummary, error) {
   660  	// At this point we have the action results (serialized
   661  	// packages and facts) of our immediate dependencies,
   662  	// and the metadata and content of this package.
   663  	//
   664  	// We now compute a hash for all our inputs, and consult a
   665  	// global cache of promised results. If nothing material
   666  	// has changed, we'll make a hit in the shared cache.
   667  	//
   668  	// The hash of our inputs is based on the serialized export
   669  	// data and facts so that immaterial changes can be pruned
   670  	// without decoding.
   671  	key := an.cacheKey()
   672  
   673  	// Access the cache.
   674  	var summary *analyzeSummary
   675  	const cacheKind = "analysis"
   676  	if data, err := filecache.Get(cacheKind, key); err == nil {
   677  		// cache hit
   678  		analyzeSummaryCodec.Decode(data, &summary)
   679  		if summary == nil { // debugging #66732
   680  			bug.Reportf("analyzeSummaryCodec.Decode yielded nil *analyzeSummary")
   681  		}
   682  	} else if err != filecache.ErrNotFound {
   683  		return nil, bug.Errorf("internal error reading shared cache: %v", err)
   684  	} else {
   685  		// Cache miss: do the work.
   686  		var err error
   687  		summary, err = an.run(ctx)
   688  		if err != nil {
   689  			return nil, err
   690  		}
   691  		if summary == nil { // debugging #66732 (can't happen)
   692  			bug.Reportf("analyzeNode.run returned nil *analyzeSummary")
   693  		}
   694  
   695  		an.unfinishedPreds.Add(+1) // incref
   696  		go func() {
   697  			defer an.decrefPreds() //decref
   698  
   699  			cacheLimit <- unit{}            // acquire token
   700  			defer func() { <-cacheLimit }() // release token
   701  
   702  			data := analyzeSummaryCodec.Encode(summary)
   703  			if false {
   704  				log.Printf("Set key=%d value=%d id=%s\n", len(key), len(data), an.mp.ID)
   705  			}
   706  			if err := filecache.Set(cacheKind, key, data); err != nil {
   707  				event.Error(ctx, "internal error updating analysis shared cache", err)
   708  			}
   709  		}()
   710  	}
   711  
   712  	return summary, nil
   713  }
   714  
   715  // cacheLimit reduces parallelism of cache updates.
   716  // We allow more than typical GOMAXPROCS as it's a mix of CPU and I/O.
   717  var cacheLimit = make(chan unit, 32)
   718  
   719  // analysisCacheKey returns a cache key that is a cryptographic digest
   720  // of the all the values that might affect type checking and analysis:
   721  // the analyzer names, package metadata, names and contents of
   722  // compiled Go files, and vdeps (successor) information
   723  // (export data and facts).
   724  func (an *analysisNode) cacheKey() [sha256.Size]byte {
   725  	hasher := sha256.New()
   726  
   727  	// In principle, a key must be the hash of an
   728  	// unambiguous encoding of all the relevant data.
   729  	// If it's ambiguous, we risk collisions.
   730  
   731  	// analyzers
   732  	fmt.Fprintf(hasher, "analyzers: %d\n", len(an.analyzers))
   733  	for _, a := range an.analyzers {
   734  		fmt.Fprintln(hasher, a.Name)
   735  	}
   736  
   737  	// package metadata
   738  	mp := an.mp
   739  	fmt.Fprintf(hasher, "package: %s %s %s\n", mp.ID, mp.Name, mp.PkgPath)
   740  	// We can ignore m.DepsBy{Pkg,Import}Path: although the logic
   741  	// uses those fields, we account for them by hashing vdeps.
   742  
   743  	// type sizes
   744  	wordSize := an.mp.TypesSizes.Sizeof(types.Typ[types.Int])
   745  	maxAlign := an.mp.TypesSizes.Alignof(types.NewPointer(types.Typ[types.Int64]))
   746  	fmt.Fprintf(hasher, "sizes: %d %d\n", wordSize, maxAlign)
   747  
   748  	// metadata errors: used for 'compiles' field
   749  	fmt.Fprintf(hasher, "errors: %d", len(mp.Errors))
   750  
   751  	// module Go version
   752  	if mp.Module != nil && mp.Module.GoVersion != "" {
   753  		fmt.Fprintf(hasher, "go %s\n", mp.Module.GoVersion)
   754  	}
   755  
   756  	// file names and contents
   757  	fmt.Fprintf(hasher, "files: %d\n", len(an.files))
   758  	for _, fh := range an.files {
   759  		fmt.Fprintln(hasher, fh.Identity())
   760  	}
   761  
   762  	// vdeps, in PackageID order
   763  	depIDs := maps.Keys(an.succs)
   764  	// TODO(adonovan): use go1.2x slices.Sort(depIDs).
   765  	sort.Slice(depIDs, func(i, j int) bool { return depIDs[i] < depIDs[j] })
   766  	for _, depID := range depIDs {
   767  		vdep := an.succs[depID]
   768  		fmt.Fprintf(hasher, "dep: %s\n", vdep.mp.PkgPath)
   769  		fmt.Fprintf(hasher, "export: %s\n", vdep.summary.DeepExportHash)
   770  
   771  		// action results: errors and facts
   772  		actions := vdep.summary.Actions
   773  		names := make([]string, 0, len(actions))
   774  		for name := range actions {
   775  			names = append(names, name)
   776  		}
   777  		sort.Strings(names)
   778  		for _, name := range names {
   779  			summary := actions[name]
   780  			fmt.Fprintf(hasher, "action %s\n", name)
   781  			if summary.Err != "" {
   782  				fmt.Fprintf(hasher, "error %s\n", summary.Err)
   783  			} else {
   784  				fmt.Fprintf(hasher, "facts %s\n", summary.FactsHash)
   785  				// We can safely omit summary.diagnostics
   786  				// from the key since they have no downstream effect.
   787  			}
   788  		}
   789  	}
   790  
   791  	var hash [sha256.Size]byte
   792  	hasher.Sum(hash[:0])
   793  	return hash
   794  }
   795  
   796  // run implements the cache-miss case.
   797  // This function does not access the snapshot.
   798  //
   799  // Postcondition: on success, the analyzeSummary.Actions
   800  // key set is {a.Name for a in analyzers}.
   801  func (an *analysisNode) run(ctx context.Context) (*analyzeSummary, error) {
   802  	// Parse only the "compiled" Go files.
   803  	// Do the computation in parallel.
   804  	parsed := make([]*ParsedGoFile, len(an.files))
   805  	{
   806  		var group errgroup.Group
   807  		group.SetLimit(4) // not too much: run itself is already called in parallel
   808  		for i, fh := range an.files {
   809  			i, fh := i, fh
   810  			group.Go(func() error {
   811  				// Call parseGoImpl directly, not the caching wrapper,
   812  				// as cached ASTs require the global FileSet.
   813  				// ast.Object resolution is unfortunately an implied part of the
   814  				// go/analysis contract.
   815  				pgf, err := parseGoImpl(ctx, an.fset, fh, ParseFull&^parser.SkipObjectResolution, false)
   816  				parsed[i] = pgf
   817  				return err
   818  			})
   819  		}
   820  		if err := group.Wait(); err != nil {
   821  			return nil, err // cancelled, or catastrophic error (e.g. missing file)
   822  		}
   823  	}
   824  
   825  	// Type-check the package syntax.
   826  	pkg := an.typeCheck(parsed)
   827  
   828  	// Publish the completed package.
   829  	an.typesOnce.Do(func() { an.types = pkg.types })
   830  	if an.types != pkg.types {
   831  		log.Fatalf("typesOnce prematurely done")
   832  	}
   833  
   834  	// Compute the union of exportDeps across our direct imports.
   835  	// This is the set that will be needed by the fact decoder.
   836  	allExportDeps := make(map[PackagePath]*analysisNode)
   837  	for _, succ := range an.succs {
   838  		for k, v := range succ.exportDeps {
   839  			allExportDeps[k] = v
   840  		}
   841  	}
   842  
   843  	// The fact decoder needs a means to look up a Package by path.
   844  	pkg.factsDecoder = facts.NewDecoderFunc(pkg.types, func(path string) *types.Package {
   845  		// Note: Decode is called concurrently, and thus so is this function.
   846  
   847  		// Does the fact relate to a package referenced by export data?
   848  		if dep, ok := allExportDeps[PackagePath(path)]; ok {
   849  			dep.typesOnce.Do(func() { log.Fatal("dep.types not populated") })
   850  			if dep.typesErr == nil {
   851  				return dep.types
   852  			}
   853  			return nil
   854  		}
   855  
   856  		// If the fact relates to a dependency not referenced
   857  		// by export data, it is safe to ignore it.
   858  		// (In that case dep.types exists but may be unpopulated
   859  		// or in the process of being populated from export data.)
   860  		if an.allDeps[PackagePath(path)] == nil {
   861  			log.Fatalf("fact package %q is not a dependency", path)
   862  		}
   863  		return nil
   864  	})
   865  
   866  	// Poll cancellation state.
   867  	if err := ctx.Err(); err != nil {
   868  		return nil, err
   869  	}
   870  
   871  	// -- analysis --
   872  
   873  	// Build action graph for this package.
   874  	// Each graph node (action) is one unit of analysis.
   875  	actions := make(map[*analysis.Analyzer]*action)
   876  	var mkAction func(a *analysis.Analyzer) *action
   877  	mkAction = func(a *analysis.Analyzer) *action {
   878  		act, ok := actions[a]
   879  		if !ok {
   880  			var hdeps []*action
   881  			for _, req := range a.Requires {
   882  				hdeps = append(hdeps, mkAction(req))
   883  			}
   884  			act = &action{
   885  				a:          a,
   886  				stableName: an.stableNames[a],
   887  				pkg:        pkg,
   888  				vdeps:      an.succs,
   889  				hdeps:      hdeps,
   890  			}
   891  			actions[a] = act
   892  		}
   893  		return act
   894  	}
   895  
   896  	// Build actions for initial package.
   897  	var roots []*action
   898  	for _, a := range an.analyzers {
   899  		roots = append(roots, mkAction(a))
   900  	}
   901  
   902  	// Execute the graph in parallel.
   903  	execActions(roots)
   904  	// Inv: each root's summary is set (whether success or error).
   905  
   906  	// Don't return (or cache) the result in case of cancellation.
   907  	if err := ctx.Err(); err != nil {
   908  		return nil, err // cancelled
   909  	}
   910  
   911  	// Return summaries only for the requested actions.
   912  	summaries := make(map[string]*actionSummary)
   913  	for _, root := range roots {
   914  		if root.summary == nil {
   915  			panic("root has nil action.summary (#60551)")
   916  		}
   917  		summaries[root.stableName] = root.summary
   918  	}
   919  
   920  	return &analyzeSummary{
   921  		Export:         pkg.export,
   922  		DeepExportHash: pkg.deepExportHash,
   923  		Compiles:       pkg.compiles,
   924  		Actions:        summaries,
   925  	}, nil
   926  }
   927  
   928  // Postcondition: analysisPackage.types and an.exportDeps are populated.
   929  func (an *analysisNode) typeCheck(parsed []*ParsedGoFile) *analysisPackage {
   930  	mp := an.mp
   931  
   932  	if false { // debugging
   933  		log.Println("typeCheck", mp.ID)
   934  	}
   935  
   936  	pkg := &analysisPackage{
   937  		mp:       mp,
   938  		fset:     an.fset,
   939  		parsed:   parsed,
   940  		files:    make([]*ast.File, len(parsed)),
   941  		compiles: len(mp.Errors) == 0, // false => list error
   942  		types:    types.NewPackage(string(mp.PkgPath), string(mp.Name)),
   943  		typesInfo: &types.Info{
   944  			Types:      make(map[ast.Expr]types.TypeAndValue),
   945  			Defs:       make(map[*ast.Ident]types.Object),
   946  			Instances:  make(map[*ast.Ident]types.Instance),
   947  			Implicits:  make(map[ast.Node]types.Object),
   948  			Selections: make(map[*ast.SelectorExpr]*types.Selection),
   949  			Scopes:     make(map[ast.Node]*types.Scope),
   950  			Uses:       make(map[*ast.Ident]types.Object),
   951  		},
   952  		typesSizes: mp.TypesSizes,
   953  	}
   954  	versions.InitFileVersions(pkg.typesInfo)
   955  
   956  	// Unsafe has no syntax.
   957  	if mp.PkgPath == "unsafe" {
   958  		pkg.types = types.Unsafe
   959  		return pkg
   960  	}
   961  
   962  	for i, p := range parsed {
   963  		pkg.files[i] = p.File
   964  		if p.ParseErr != nil {
   965  			pkg.compiles = false // parse error
   966  		}
   967  	}
   968  
   969  	for _, vdep := range an.succs {
   970  		if !vdep.summary.Compiles {
   971  			pkg.compiles = false // transitive error
   972  		}
   973  	}
   974  
   975  	cfg := &types.Config{
   976  		Sizes: mp.TypesSizes,
   977  		Error: func(e error) {
   978  			pkg.compiles = false // type error
   979  
   980  			// Suppress type errors in files with parse errors
   981  			// as parser recovery can be quite lossy (#59888).
   982  			typeError := e.(types.Error)
   983  			for _, p := range parsed {
   984  				if p.ParseErr != nil && astutil.NodeContains(p.File, typeError.Pos) {
   985  					return
   986  				}
   987  			}
   988  			pkg.typeErrors = append(pkg.typeErrors, typeError)
   989  		},
   990  		Importer: importerFunc(func(importPath string) (*types.Package, error) {
   991  			// Beware that returning an error from this function
   992  			// will cause the type checker to synthesize a fake
   993  			// package whose Path is importPath, potentially
   994  			// losing a vendor/ prefix. If type-checking errors
   995  			// are swallowed, these packages may be confusing.
   996  
   997  			// Map ImportPath to ID.
   998  			id, ok := mp.DepsByImpPath[ImportPath(importPath)]
   999  			if !ok {
  1000  				// The import syntax is inconsistent with the metadata.
  1001  				// This could be because the import declaration was
  1002  				// incomplete and the metadata only includes complete
  1003  				// imports; or because the metadata ignores import
  1004  				// edges that would lead to cycles in the graph.
  1005  				return nil, fmt.Errorf("missing metadata for import of %q", importPath)
  1006  			}
  1007  
  1008  			// Map ID to node. (id may be "")
  1009  			dep := an.succs[id]
  1010  			if dep == nil {
  1011  				// Analogous to (*snapshot).missingPkgError
  1012  				// in the logic for regular type-checking,
  1013  				// but without a snapshot we can't provide
  1014  				// such detail, and anyway most analysis
  1015  				// failures aren't surfaced in the UI.
  1016  				return nil, fmt.Errorf("no required module provides analysis package %q (id=%q)", importPath, id)
  1017  			}
  1018  
  1019  			// (Duplicates logic from check.go.)
  1020  			if !metadata.IsValidImport(an.mp.PkgPath, dep.mp.PkgPath) {
  1021  				return nil, fmt.Errorf("invalid use of internal package %s", importPath)
  1022  			}
  1023  
  1024  			return dep._import()
  1025  		}),
  1026  	}
  1027  
  1028  	// Set Go dialect.
  1029  	if mp.Module != nil && mp.Module.GoVersion != "" {
  1030  		goVersion := "go" + mp.Module.GoVersion
  1031  		if validGoVersion(goVersion) {
  1032  			typesinternal.SetGoVersion(cfg, goVersion)
  1033  		}
  1034  	}
  1035  
  1036  	// We want to type check cgo code if go/types supports it.
  1037  	// We passed typecheckCgo to go/packages when we Loaded.
  1038  	// TODO(adonovan): do we actually need this??
  1039  	typesinternal.SetUsesCgo(cfg)
  1040  
  1041  	check := types.NewChecker(cfg, pkg.fset, pkg.types, pkg.typesInfo)
  1042  
  1043  	// Type checking errors are handled via the config, so ignore them here.
  1044  	_ = check.Files(pkg.files)
  1045  
  1046  	// debugging (type errors are quite normal)
  1047  	if false {
  1048  		if pkg.typeErrors != nil {
  1049  			log.Printf("package %s has type errors: %v", pkg.types.Path(), pkg.typeErrors)
  1050  		}
  1051  	}
  1052  
  1053  	// Emit the export data and compute the recursive hash.
  1054  	export, err := gcimporter.IExportShallow(pkg.fset, pkg.types, bug.Reportf)
  1055  	if err != nil {
  1056  		// TODO(adonovan): in light of exporter bugs such as #57729,
  1057  		// consider using bug.Report here and retrying the IExportShallow
  1058  		// call here using an empty types.Package.
  1059  		log.Fatalf("internal error writing shallow export data: %v", err)
  1060  	}
  1061  	pkg.export = export
  1062  
  1063  	// Compute a recursive hash to account for the export data of
  1064  	// this package and each dependency referenced by it.
  1065  	// Also, populate exportDeps.
  1066  	hash := sha256.New()
  1067  	fmt.Fprintf(hash, "%s %d\n", mp.PkgPath, len(export))
  1068  	hash.Write(export)
  1069  	paths, err := readShallowManifest(export)
  1070  	if err != nil {
  1071  		log.Fatalf("internal error: bad export data: %v", err)
  1072  	}
  1073  	for _, path := range paths {
  1074  		dep, ok := an.allDeps[path]
  1075  		if !ok {
  1076  			log.Fatalf("%s: missing dependency: %q", an, path)
  1077  		}
  1078  		fmt.Fprintf(hash, "%s %s\n", dep.mp.PkgPath, dep.summary.DeepExportHash)
  1079  		an.exportDeps[path] = dep
  1080  	}
  1081  	an.exportDeps[mp.PkgPath] = an // self
  1082  	hash.Sum(pkg.deepExportHash[:0])
  1083  
  1084  	return pkg
  1085  }
  1086  
  1087  // readShallowManifest returns the manifest of packages referenced by
  1088  // a shallow export data file for a package (excluding the package itself).
  1089  // TODO(adonovan): add a test.
  1090  func readShallowManifest(export []byte) ([]PackagePath, error) {
  1091  	const selfPath = "<self>" // dummy path
  1092  	var paths []PackagePath
  1093  	getPackages := func(items []gcimporter.GetPackagesItem) error {
  1094  		paths = []PackagePath{} // non-nil
  1095  		for _, item := range items {
  1096  			if item.Path != selfPath {
  1097  				paths = append(paths, PackagePath(item.Path))
  1098  			}
  1099  		}
  1100  		return errors.New("stop") // terminate importer
  1101  	}
  1102  	_, err := gcimporter.IImportShallow(token.NewFileSet(), getPackages, export, selfPath, bug.Reportf)
  1103  	if paths == nil {
  1104  		if err != nil {
  1105  			return nil, err // failed before getPackages callback
  1106  		}
  1107  		return nil, bug.Errorf("internal error: IImportShallow did not call getPackages")
  1108  	}
  1109  	return paths, nil // success
  1110  }
  1111  
  1112  // analysisPackage contains information about a package, including
  1113  // syntax trees, used transiently during its type-checking and analysis.
  1114  type analysisPackage struct {
  1115  	mp             *metadata.Package
  1116  	fset           *token.FileSet // local to this package
  1117  	parsed         []*ParsedGoFile
  1118  	files          []*ast.File // same as parsed[i].File
  1119  	types          *types.Package
  1120  	compiles       bool // package is transitively free of list/parse/type errors
  1121  	factsDecoder   *facts.Decoder
  1122  	export         []byte    // encoding of types.Package
  1123  	deepExportHash file.Hash // reflexive transitive hash of export data
  1124  	typesInfo      *types.Info
  1125  	typeErrors     []types.Error
  1126  	typesSizes     types.Sizes
  1127  }
  1128  
  1129  // An action represents one unit of analysis work: the application of
  1130  // one analysis to one package. Actions form a DAG, both within a
  1131  // package (as different analyzers are applied, either in sequence or
  1132  // parallel), and across packages (as dependencies are analyzed).
  1133  type action struct {
  1134  	once       sync.Once
  1135  	a          *analysis.Analyzer
  1136  	stableName string // cross-process stable name of analyzer
  1137  	pkg        *analysisPackage
  1138  	hdeps      []*action                   // horizontal dependencies
  1139  	vdeps      map[PackageID]*analysisNode // vertical dependencies
  1140  
  1141  	// results of action.exec():
  1142  	result  interface{} // result of Run function, of type a.ResultType
  1143  	summary *actionSummary
  1144  	err     error
  1145  }
  1146  
  1147  func (act *action) String() string {
  1148  	return fmt.Sprintf("%s@%s", act.a.Name, act.pkg.mp.ID)
  1149  }
  1150  
  1151  // execActions executes a set of action graph nodes in parallel.
  1152  // Postcondition: each action.summary is set, even in case of error.
  1153  func execActions(actions []*action) {
  1154  	var wg sync.WaitGroup
  1155  	for _, act := range actions {
  1156  		act := act
  1157  		wg.Add(1)
  1158  		go func() {
  1159  			defer wg.Done()
  1160  			act.once.Do(func() {
  1161  				execActions(act.hdeps) // analyze "horizontal" dependencies
  1162  				act.result, act.summary, act.err = act.exec()
  1163  				if act.err != nil {
  1164  					act.summary = &actionSummary{Err: act.err.Error()}
  1165  					// TODO(adonovan): suppress logging. But
  1166  					// shouldn't the root error's causal chain
  1167  					// include this information?
  1168  					if false { // debugging
  1169  						log.Printf("act.exec(%v) failed: %v", act, act.err)
  1170  					}
  1171  				}
  1172  			})
  1173  			if act.summary == nil {
  1174  				panic("nil action.summary (#60551)")
  1175  			}
  1176  		}()
  1177  	}
  1178  	wg.Wait()
  1179  }
  1180  
  1181  // exec defines the execution of a single action.
  1182  // It returns the (ephemeral) result of the analyzer's Run function,
  1183  // along with its (serializable) facts and diagnostics.
  1184  // Or it returns an error if the analyzer did not run to
  1185  // completion and deliver a valid result.
  1186  func (act *action) exec() (interface{}, *actionSummary, error) {
  1187  	analyzer := act.a
  1188  	pkg := act.pkg
  1189  
  1190  	hasFacts := len(analyzer.FactTypes) > 0
  1191  
  1192  	// Report an error if any action dependency (vertical or horizontal) failed.
  1193  	// To avoid long error messages describing chains of failure,
  1194  	// we return the dependencies' error' unadorned.
  1195  	if hasFacts {
  1196  		// TODO(adonovan): use deterministic order.
  1197  		for _, vdep := range act.vdeps {
  1198  			if summ := vdep.summary.Actions[act.stableName]; summ.Err != "" {
  1199  				return nil, nil, errors.New(summ.Err)
  1200  			}
  1201  		}
  1202  	}
  1203  	for _, dep := range act.hdeps {
  1204  		if dep.err != nil {
  1205  			return nil, nil, dep.err
  1206  		}
  1207  	}
  1208  	// Inv: all action dependencies succeeded.
  1209  
  1210  	// Were there list/parse/type errors that might prevent analysis?
  1211  	if !pkg.compiles && !analyzer.RunDespiteErrors {
  1212  		return nil, nil, fmt.Errorf("skipping analysis %q because package %q does not compile", analyzer.Name, pkg.mp.ID)
  1213  	}
  1214  	// Inv: package is well-formed enough to proceed with analysis.
  1215  
  1216  	if false { // debugging
  1217  		log.Println("action.exec", act)
  1218  	}
  1219  
  1220  	// Gather analysis Result values from horizontal dependencies.
  1221  	inputs := make(map[*analysis.Analyzer]interface{})
  1222  	for _, dep := range act.hdeps {
  1223  		inputs[dep.a] = dep.result
  1224  	}
  1225  
  1226  	// TODO(adonovan): opt: facts.Set works but it may be more
  1227  	// efficient to fork and tailor it to our precise needs.
  1228  	//
  1229  	// We've already sharded the fact encoding by action
  1230  	// so that it can be done in parallel.
  1231  	// We could eliminate locking.
  1232  	// We could also dovetail more closely with the export data
  1233  	// decoder to obtain a more compact representation of
  1234  	// packages and objects (e.g. its internal IDs, instead
  1235  	// of PkgPaths and objectpaths.)
  1236  	// More importantly, we should avoid re-export of
  1237  	// facts that related to objects that are discarded
  1238  	// by "deep" export data. Better still, use a "shallow" approach.
  1239  
  1240  	// Read and decode analysis facts for each direct import.
  1241  	factset, err := pkg.factsDecoder.Decode(func(pkgPath string) ([]byte, error) {
  1242  		if !hasFacts {
  1243  			return nil, nil // analyzer doesn't use facts, so no vdeps
  1244  		}
  1245  
  1246  		// Package.Imports() may contain a fake "C" package. Ignore it.
  1247  		if pkgPath == "C" {
  1248  			return nil, nil
  1249  		}
  1250  
  1251  		id, ok := pkg.mp.DepsByPkgPath[PackagePath(pkgPath)]
  1252  		if !ok {
  1253  			// This may mean imp was synthesized by the type
  1254  			// checker because it failed to import it for any reason
  1255  			// (e.g. bug processing export data; metadata ignoring
  1256  			// a cycle-forming import).
  1257  			// In that case, the fake package's imp.Path
  1258  			// is set to the failed importPath (and thus
  1259  			// it may lack a "vendor/" prefix).
  1260  			//
  1261  			// For now, silently ignore it on the assumption
  1262  			// that the error is already reported elsewhere.
  1263  			// return nil, fmt.Errorf("missing metadata")
  1264  			return nil, nil
  1265  		}
  1266  
  1267  		vdep := act.vdeps[id]
  1268  		if vdep == nil {
  1269  			return nil, bug.Errorf("internal error in %s: missing vdep for id=%s", pkg.types.Path(), id)
  1270  		}
  1271  
  1272  		return vdep.summary.Actions[act.stableName].Facts, nil
  1273  	})
  1274  	if err != nil {
  1275  		return nil, nil, fmt.Errorf("internal error decoding analysis facts: %w", err)
  1276  	}
  1277  
  1278  	// TODO(adonovan): make Export*Fact panic rather than discarding
  1279  	// undeclared fact types, so that we discover bugs in analyzers.
  1280  	factFilter := make(map[reflect.Type]bool)
  1281  	for _, f := range analyzer.FactTypes {
  1282  		factFilter[reflect.TypeOf(f)] = true
  1283  	}
  1284  
  1285  	// If the package contains "fixed" files, it's not necessarily an error if we
  1286  	// can't convert positions.
  1287  	hasFixedFiles := false
  1288  	for _, p := range pkg.parsed {
  1289  		if p.Fixed() {
  1290  			hasFixedFiles = true
  1291  			break
  1292  		}
  1293  	}
  1294  
  1295  	// posToLocation converts from token.Pos to protocol form.
  1296  	// TODO(adonovan): improve error messages.
  1297  	posToLocation := func(start, end token.Pos) (protocol.Location, error) {
  1298  		tokFile := pkg.fset.File(start)
  1299  
  1300  		for _, p := range pkg.parsed {
  1301  			if p.Tok == tokFile {
  1302  				if end == token.NoPos {
  1303  					end = start
  1304  				}
  1305  
  1306  				// debugging #64547
  1307  				fileStart := token.Pos(tokFile.Base())
  1308  				fileEnd := fileStart + token.Pos(tokFile.Size())
  1309  				if start < fileStart {
  1310  					bug.Reportf("start < start of file")
  1311  					start = fileStart
  1312  				}
  1313  				if end < start {
  1314  					// This can happen if End is zero (#66683)
  1315  					// or a small positive displacement from zero
  1316  					// due to recursively Node.End() computation.
  1317  					// This usually arises from poor parser recovery
  1318  					// of an incomplete term at EOF.
  1319  					bug.Reportf("end < start of file")
  1320  					end = fileEnd
  1321  				}
  1322  				if end > fileEnd+1 {
  1323  					bug.Reportf("end > end of file + 1")
  1324  					end = fileEnd
  1325  				}
  1326  
  1327  				return p.PosLocation(start, end)
  1328  			}
  1329  		}
  1330  		errorf := bug.Errorf
  1331  		if hasFixedFiles {
  1332  			errorf = fmt.Errorf
  1333  		}
  1334  		return protocol.Location{}, errorf("token.Pos not within package")
  1335  	}
  1336  
  1337  	// Now run the (pkg, analyzer) action.
  1338  	var diagnostics []gobDiagnostic
  1339  	pass := &analysis.Pass{
  1340  		Analyzer:   analyzer,
  1341  		Fset:       pkg.fset,
  1342  		Files:      pkg.files,
  1343  		Pkg:        pkg.types,
  1344  		TypesInfo:  pkg.typesInfo,
  1345  		TypesSizes: pkg.typesSizes,
  1346  		TypeErrors: pkg.typeErrors,
  1347  		ResultOf:   inputs,
  1348  		Report: func(d analysis.Diagnostic) {
  1349  			diagnostic, err := toGobDiagnostic(posToLocation, analyzer, d)
  1350  			if err != nil {
  1351  				if !hasFixedFiles {
  1352  					bug.Reportf("internal error converting diagnostic from analyzer %q: %v", analyzer.Name, err)
  1353  				}
  1354  				return
  1355  			}
  1356  			diagnostics = append(diagnostics, diagnostic)
  1357  		},
  1358  		ImportObjectFact:  factset.ImportObjectFact,
  1359  		ExportObjectFact:  factset.ExportObjectFact,
  1360  		ImportPackageFact: factset.ImportPackageFact,
  1361  		ExportPackageFact: factset.ExportPackageFact,
  1362  		AllObjectFacts:    func() []analysis.ObjectFact { return factset.AllObjectFacts(factFilter) },
  1363  		AllPackageFacts:   func() []analysis.PackageFact { return factset.AllPackageFacts(factFilter) },
  1364  	}
  1365  
  1366  	// Recover from panics (only) within the analyzer logic.
  1367  	// (Use an anonymous function to limit the recover scope.)
  1368  	var result interface{}
  1369  	func() {
  1370  		start := time.Now()
  1371  		defer func() {
  1372  			if r := recover(); r != nil {
  1373  				// An Analyzer panicked, likely due to a bug.
  1374  				//
  1375  				// In general we want to discover and fix such panics quickly,
  1376  				// so we don't suppress them, but some bugs in third-party
  1377  				// analyzers cannot be quickly fixed, so we use an allowlist
  1378  				// to suppress panics.
  1379  				const strict = true
  1380  				if strict && bug.PanicOnBugs &&
  1381  					analyzer.Name != "buildir" { // see https://github.com/dominikh/go-tools/issues/1343
  1382  					// Uncomment this when debugging suspected failures
  1383  					// in the driver, not the analyzer.
  1384  					if false {
  1385  						debug.SetTraceback("all") // show all goroutines
  1386  					}
  1387  					panic(r)
  1388  				} else {
  1389  					// In production, suppress the panic and press on.
  1390  					err = fmt.Errorf("analysis %s for package %s panicked: %v", analyzer.Name, pass.Pkg.Path(), r)
  1391  				}
  1392  			}
  1393  
  1394  			// Accumulate running time for each checker.
  1395  			analyzerRunTimesMu.Lock()
  1396  			analyzerRunTimes[analyzer] += time.Since(start)
  1397  			analyzerRunTimesMu.Unlock()
  1398  		}()
  1399  
  1400  		result, err = pass.Analyzer.Run(pass)
  1401  	}()
  1402  	if err != nil {
  1403  		return nil, nil, err
  1404  	}
  1405  
  1406  	if got, want := reflect.TypeOf(result), pass.Analyzer.ResultType; got != want {
  1407  		return nil, nil, bug.Errorf(
  1408  			"internal error: on package %s, analyzer %s returned a result of type %v, but declared ResultType %v",
  1409  			pass.Pkg.Path(), pass.Analyzer, got, want)
  1410  	}
  1411  
  1412  	// Disallow Export*Fact calls after Run.
  1413  	// (A panic means the Analyzer is abusing concurrency.)
  1414  	pass.ExportObjectFact = func(obj types.Object, fact analysis.Fact) {
  1415  		panic(fmt.Sprintf("%v: Pass.ExportObjectFact(%s, %T) called after Run", act, obj, fact))
  1416  	}
  1417  	pass.ExportPackageFact = func(fact analysis.Fact) {
  1418  		panic(fmt.Sprintf("%v: Pass.ExportPackageFact(%T) called after Run", act, fact))
  1419  	}
  1420  
  1421  	factsdata := factset.Encode()
  1422  	return result, &actionSummary{
  1423  		Diagnostics: diagnostics,
  1424  		Facts:       factsdata,
  1425  		FactsHash:   file.HashOf(factsdata),
  1426  	}, nil
  1427  }
  1428  
  1429  var (
  1430  	analyzerRunTimesMu sync.Mutex
  1431  	analyzerRunTimes   = make(map[*analysis.Analyzer]time.Duration)
  1432  )
  1433  
  1434  type LabelDuration struct {
  1435  	Label    string
  1436  	Duration time.Duration
  1437  }
  1438  
  1439  // AnalyzerTimes returns the accumulated time spent in each Analyzer's
  1440  // Run function since process start, in descending order.
  1441  func AnalyzerRunTimes() []LabelDuration {
  1442  	analyzerRunTimesMu.Lock()
  1443  	defer analyzerRunTimesMu.Unlock()
  1444  
  1445  	slice := make([]LabelDuration, 0, len(analyzerRunTimes))
  1446  	for a, t := range analyzerRunTimes {
  1447  		slice = append(slice, LabelDuration{Label: a.Name, Duration: t})
  1448  	}
  1449  	sort.Slice(slice, func(i, j int) bool {
  1450  		return slice[i].Duration > slice[j].Duration
  1451  	})
  1452  	return slice
  1453  }
  1454  
  1455  // requiredAnalyzers returns the transitive closure of required analyzers in preorder.
  1456  func requiredAnalyzers(analyzers []*analysis.Analyzer) []*analysis.Analyzer {
  1457  	var result []*analysis.Analyzer
  1458  	seen := make(map[*analysis.Analyzer]bool)
  1459  	var visitAll func([]*analysis.Analyzer)
  1460  	visitAll = func(analyzers []*analysis.Analyzer) {
  1461  		for _, a := range analyzers {
  1462  			if !seen[a] {
  1463  				seen[a] = true
  1464  				result = append(result, a)
  1465  				visitAll(a.Requires)
  1466  			}
  1467  		}
  1468  	}
  1469  	visitAll(analyzers)
  1470  	return result
  1471  }
  1472  
  1473  var analyzeSummaryCodec = frob.CodecFor[*analyzeSummary]()
  1474  
  1475  // -- data types for serialization of analysis.Diagnostic and golang.Diagnostic --
  1476  
  1477  // (The name says gob but we use frob.)
  1478  var diagnosticsCodec = frob.CodecFor[[]gobDiagnostic]()
  1479  
  1480  type gobDiagnostic struct {
  1481  	Location       protocol.Location
  1482  	Severity       protocol.DiagnosticSeverity
  1483  	Code           string
  1484  	CodeHref       string
  1485  	Source         string
  1486  	Message        string
  1487  	SuggestedFixes []gobSuggestedFix
  1488  	Related        []gobRelatedInformation
  1489  	Tags           []protocol.DiagnosticTag
  1490  }
  1491  
  1492  type gobRelatedInformation struct {
  1493  	Location protocol.Location
  1494  	Message  string
  1495  }
  1496  
  1497  type gobSuggestedFix struct {
  1498  	Message    string
  1499  	TextEdits  []gobTextEdit
  1500  	Command    *gobCommand
  1501  	ActionKind protocol.CodeActionKind
  1502  }
  1503  
  1504  type gobCommand struct {
  1505  	Title     string
  1506  	Command   string
  1507  	Arguments []json.RawMessage
  1508  }
  1509  
  1510  type gobTextEdit struct {
  1511  	Location protocol.Location
  1512  	NewText  []byte
  1513  }
  1514  
  1515  // toGobDiagnostic converts an analysis.Diagnosic to a serializable gobDiagnostic,
  1516  // which requires expanding token.Pos positions into protocol.Location form.
  1517  func toGobDiagnostic(posToLocation func(start, end token.Pos) (protocol.Location, error), a *analysis.Analyzer, diag analysis.Diagnostic) (gobDiagnostic, error) {
  1518  	var fixes []gobSuggestedFix
  1519  	for _, fix := range diag.SuggestedFixes {
  1520  		var gobEdits []gobTextEdit
  1521  		for _, textEdit := range fix.TextEdits {
  1522  			loc, err := posToLocation(textEdit.Pos, textEdit.End)
  1523  			if err != nil {
  1524  				return gobDiagnostic{}, fmt.Errorf("in SuggestedFixes: %w", err)
  1525  			}
  1526  			gobEdits = append(gobEdits, gobTextEdit{
  1527  				Location: loc,
  1528  				NewText:  textEdit.NewText,
  1529  			})
  1530  		}
  1531  		fixes = append(fixes, gobSuggestedFix{
  1532  			Message:   fix.Message,
  1533  			TextEdits: gobEdits,
  1534  		})
  1535  	}
  1536  
  1537  	var related []gobRelatedInformation
  1538  	for _, r := range diag.Related {
  1539  		loc, err := posToLocation(r.Pos, r.End)
  1540  		if err != nil {
  1541  			return gobDiagnostic{}, fmt.Errorf("in Related: %w", err)
  1542  		}
  1543  		related = append(related, gobRelatedInformation{
  1544  			Location: loc,
  1545  			Message:  r.Message,
  1546  		})
  1547  	}
  1548  
  1549  	loc, err := posToLocation(diag.Pos, diag.End)
  1550  	if err != nil {
  1551  		return gobDiagnostic{}, err
  1552  	}
  1553  
  1554  	// The Code column of VSCode's Problems table renders this
  1555  	// information as "Source(Code)" where code is a link to CodeHref.
  1556  	// (The code field must be nonempty for anything to appear.)
  1557  	diagURL := effectiveURL(a, diag)
  1558  	code := "default"
  1559  	if diag.Category != "" {
  1560  		code = diag.Category
  1561  	}
  1562  
  1563  	return gobDiagnostic{
  1564  		Location: loc,
  1565  		// Severity for analysis diagnostics is dynamic,
  1566  		// based on user configuration per analyzer.
  1567  		Code:           code,
  1568  		CodeHref:       diagURL,
  1569  		Source:         a.Name,
  1570  		Message:        diag.Message,
  1571  		SuggestedFixes: fixes,
  1572  		Related:        related,
  1573  		// Analysis diagnostics do not contain tags.
  1574  	}, nil
  1575  }
  1576  
  1577  // effectiveURL computes the effective URL of diag,
  1578  // using the algorithm specified at Diagnostic.URL.
  1579  func effectiveURL(a *analysis.Analyzer, diag analysis.Diagnostic) string {
  1580  	u := diag.URL
  1581  	if u == "" && diag.Category != "" {
  1582  		u = "#" + diag.Category
  1583  	}
  1584  	if base, err := urlpkg.Parse(a.URL); err == nil {
  1585  		if rel, err := urlpkg.Parse(u); err == nil {
  1586  			u = base.ResolveReference(rel).String()
  1587  		}
  1588  	}
  1589  	return u
  1590  }
  1591  
  1592  // stableName returns a name for the analyzer that is unique and
  1593  // stable across address spaces.
  1594  //
  1595  // Analyzer names are not unique. For example, gopls includes
  1596  // both x/tools/passes/nilness and staticcheck/nilness.
  1597  // For serialization, we must assign each analyzer a unique identifier
  1598  // that two gopls processes accessing the cache can agree on.
  1599  func stableName(a *analysis.Analyzer) string {
  1600  	// Incorporate the file and line of the analyzer's Run function.
  1601  	addr := reflect.ValueOf(a.Run).Pointer()
  1602  	fn := runtime.FuncForPC(addr)
  1603  	file, line := fn.FileLine(addr)
  1604  
  1605  	// It is tempting to use just a.Name as the stable name when
  1606  	// it is unique, but making them always differ helps avoid
  1607  	// name/stablename confusion.
  1608  	return fmt.Sprintf("%s(%s:%d)", a.Name, filepath.Base(file), line)
  1609  }