cuelang.org/go@v0.10.1/internal/golangorgx/gopls/cache/analysis.go (about)

     1  // Copyright 2019 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package cache
     6  
     7  // This file defines gopls' driver for modular static analysis (go/analysis).
     8  
     9  import (
    10  	"bytes"
    11  	"context"
    12  	"crypto/sha256"
    13  	"encoding/gob"
    14  	"encoding/json"
    15  	"errors"
    16  	"fmt"
    17  	"go/ast"
    18  	"go/parser"
    19  	"go/token"
    20  	"go/types"
    21  	"log"
    22  	urlpkg "net/url"
    23  	"path/filepath"
    24  	"reflect"
    25  	"runtime"
    26  	"runtime/debug"
    27  	"sort"
    28  	"strings"
    29  	"sync"
    30  	"sync/atomic"
    31  	"time"
    32  
    33  	"cuelang.org/go/internal/golangorgx/gopls/cache/metadata"
    34  	"cuelang.org/go/internal/golangorgx/gopls/file"
    35  	"cuelang.org/go/internal/golangorgx/gopls/filecache"
    36  	"cuelang.org/go/internal/golangorgx/gopls/progress"
    37  	"cuelang.org/go/internal/golangorgx/gopls/protocol"
    38  	"cuelang.org/go/internal/golangorgx/gopls/settings"
    39  	"cuelang.org/go/internal/golangorgx/gopls/util/astutil"
    40  	"cuelang.org/go/internal/golangorgx/gopls/util/bug"
    41  	"cuelang.org/go/internal/golangorgx/gopls/util/frob"
    42  	"cuelang.org/go/internal/golangorgx/tools/event"
    43  	"cuelang.org/go/internal/golangorgx/tools/event/tag"
    44  	"cuelang.org/go/internal/golangorgx/tools/facts"
    45  	"cuelang.org/go/internal/golangorgx/tools/gcimporter"
    46  	"cuelang.org/go/internal/golangorgx/tools/typesinternal"
    47  	"cuelang.org/go/internal/golangorgx/tools/versions"
    48  	"golang.org/x/sync/errgroup"
    49  	"golang.org/x/tools/go/analysis"
    50  )
    51  
    52  /*
    53  
    54     DESIGN
    55  
    56     An analysis request (Snapshot.Analyze) is for a set of Analyzers and
    57     PackageIDs. The result is the set of diagnostics for those
    58     packages. Each request constructs a transitively closed DAG of
    59     nodes, each representing a package, then works bottom up in
    60     parallel postorder calling runCached to ensure that each node's
    61     analysis summary is up to date. The summary contains the analysis
    62     diagnostics as well as the intermediate results required by the
    63     recursion, such as serialized types and facts.
    64  
    65     The entire DAG is ephemeral. Each node in the DAG records the set
    66     of analyzers to run: the complete set for the root packages, and
    67     the "facty" subset for dependencies. Each package is thus analyzed
    68     at most once. The entire DAG shares a single FileSet for parsing
    69     and importing.
    70  
    71     Each node is processed by runCached. It gets the source file
    72     content hashes for package p, and the summaries of its "vertical"
    73     dependencies (direct imports), and from them it computes a key
    74     representing the unit of work (parsing, type-checking, and
    75     analysis) that it has to do. The key is a cryptographic hash of the
    76     "recipe" for this step, including the Metadata, the file contents,
    77     the set of analyzers, and the type and fact information from the
    78     vertical dependencies.
    79  
    80     The key is sought in a machine-global persistent file-system based
    81     cache. If this gopls process, or another gopls process on the same
    82     machine, has already performed this analysis step, runCached will
    83     make a cache hit and load the serialized summary of the results. If
    84     not, it will have to proceed to run() to parse and type-check the
    85     package and then apply a set of analyzers to it. (The set of
    86     analyzers applied to a single package itself forms a graph of
    87     "actions", and it too is evaluated in parallel postorder; these
    88     dependency edges within the same package are called "horizontal".)
    89     Finally it writes a new cache entry. The entry contains serialized
    90     types (export data) and analysis facts.
    91  
    92     Each node in the DAG acts like a go/types importer mapping,
    93     providing a consistent view of packages and their objects: the
    94     mapping for a node is a superset of its dependencies' mappings.
    95     Every node has an associated *types.Package, initially nil. A
    96     package is populated during run (cache miss) by type-checking its
    97     syntax; but for a cache hit, the package is populated lazily, i.e.
    98     not until it later becomes necessary because it is imported
    99     directly or referenced by export data higher up in the DAG.
   100  
   101     For types, we use "shallow" export data. Historically, the Go
   102     compiler always produced a summary of the types for a given package
   103     that included types from other packages that it indirectly
   104     referenced: "deep" export data. This had the advantage that the
   105     compiler (and analogous tools such as gopls) need only load one
   106     file per direct import.  However, it meant that the files tended to
   107     get larger based on the level of the package in the import
   108     graph. For example, higher-level packages in the kubernetes module
   109     have over 1MB of "deep" export data, even when they have almost no
   110     content of their own, merely because they mention a major type that
   111     references many others. In pathological cases the export data was
   112     300x larger than the source for a package due to this quadratic
   113     growth.
   114  
   115     "Shallow" export data means that the serialized types describe only
   116     a single package. If those types mention types from other packages,
   117     the type checker may need to request additional packages beyond
   118     just the direct imports. Type information for the entire transitive
   119     closure of imports is provided (lazily) by the DAG.
   120  
   121     For correct dependency analysis, the digest used as a cache key
   122     must reflect the "deep" export data, so it is derived recursively
   123     from the transitive closure. As an optimization, we needn't include
   124     every package of the transitive closure in the deep hash, only the
   125     packages that were actually requested by the type checker. This
   126     allows changes to a package that have no effect on its export data
   127     to be "pruned". The direct consumer will need to be re-executed,
   128     but if its export data is unchanged as a result, then indirect
   129     consumers may not need to be re-executed.  This allows, for example,
   130     one to insert a print statement in a function and not "rebuild" the
   131     whole application (though export data does record line numbers and
   132     offsets of types which may be perturbed by otherwise insignificant
   133     changes.)
   134  
   135     The summary must record whether a package is transitively
   136     error-free (whether it would compile) because many analyzers are
   137     not safe to run on packages with inconsistent types.
   138  
   139     For fact encoding, we use the same fact set as the unitchecker
   140     (vet) to record and serialize analysis facts. The fact
   141     serialization mechanism is analogous to "deep" export data.
   142  
   143  */
   144  
   145  // TODO(adonovan):
   146  // - Add a (white-box) test of pruning when a change doesn't affect export data.
   147  // - Optimise pruning based on subset of packages mentioned in exportdata.
   148  // - Better logging so that it is possible to deduce why an analyzer
   149  //   is not being run--often due to very indirect failures.
   150  //   Even if the ultimate consumer decides to ignore errors,
   151  //   tests and other situations want to be assured of freedom from
   152  //   errors, not just missing results. This should be recorded.
   153  // - Split this into a subpackage, gopls/internal/cache/driver,
   154  //   consisting of this file and three helpers from errors.go.
   155  //   The (*snapshot).Analyze method would stay behind and make calls
   156  //   to the driver package.
   157  //   Steps:
   158  //   - define a narrow driver.Snapshot interface with only these methods:
   159  //        Metadata(PackageID) Metadata
   160  //        ReadFile(Context, URI) (file.Handle, error)
   161  //        View() *View // for Options
   162  //   - share cache.{goVersionRx,parseGoImpl}
   163  
   164  // AnalysisProgressTitle is the title of the progress report for ongoing
   165  // analysis. It is sought by regression tests for the progress reporting
   166  // feature.
   167  const AnalysisProgressTitle = "Analyzing Dependencies"
   168  
   169  // Analyze applies a set of analyzers to the package denoted by id,
   170  // and returns their diagnostics for that package.
   171  //
   172  // The analyzers list must be duplicate free; order does not matter.
   173  //
   174  // Notifications of progress may be sent to the optional reporter.
   175  func (s *Snapshot) Analyze(ctx context.Context, pkgs map[PackageID]*metadata.Package, analyzers []*settings.Analyzer, reporter *progress.Tracker) ([]*Diagnostic, error) {
   176  	start := time.Now() // for progress reporting
   177  
   178  	var tagStr string // sorted comma-separated list of PackageIDs
   179  	{
   180  		// TODO(adonovan): replace with a generic map[S]any -> string
   181  		// function in the tag package, and use maps.Keys + slices.Sort.
   182  		keys := make([]string, 0, len(pkgs))
   183  		for id := range pkgs {
   184  			keys = append(keys, string(id))
   185  		}
   186  		sort.Strings(keys)
   187  		tagStr = strings.Join(keys, ",")
   188  	}
   189  	ctx, done := event.Start(ctx, "snapshot.Analyze", tag.Package.Of(tagStr))
   190  	defer done()
   191  
   192  	// Filter and sort enabled root analyzers.
   193  	// A disabled analyzer may still be run if required by another.
   194  	toSrc := make(map[*analysis.Analyzer]*settings.Analyzer)
   195  	var enabled []*analysis.Analyzer // enabled subset + transitive requirements
   196  	for _, a := range analyzers {
   197  		if a.IsEnabled(s.Options()) {
   198  			toSrc[a.Analyzer] = a
   199  			enabled = append(enabled, a.Analyzer)
   200  		}
   201  	}
   202  	sort.Slice(enabled, func(i, j int) bool {
   203  		return enabled[i].Name < enabled[j].Name
   204  	})
   205  	analyzers = nil // prevent accidental use
   206  
   207  	enabled = requiredAnalyzers(enabled)
   208  
   209  	// Perform basic sanity checks.
   210  	// (Ideally we would do this only once.)
   211  	if err := analysis.Validate(enabled); err != nil {
   212  		return nil, fmt.Errorf("invalid analyzer configuration: %v", err)
   213  	}
   214  
   215  	stableNames := make(map[*analysis.Analyzer]string)
   216  
   217  	var facty []*analysis.Analyzer // facty subset of enabled + transitive requirements
   218  	for _, a := range enabled {
   219  		// TODO(adonovan): reject duplicate stable names (very unlikely).
   220  		stableNames[a] = stableName(a)
   221  
   222  		// Register fact types of all required analyzers.
   223  		if len(a.FactTypes) > 0 {
   224  			facty = append(facty, a)
   225  			for _, f := range a.FactTypes {
   226  				gob.Register(f) // <2us
   227  			}
   228  		}
   229  	}
   230  	facty = requiredAnalyzers(facty)
   231  
   232  	// File set for this batch (entire graph) of analysis.
   233  	fset := token.NewFileSet()
   234  
   235  	// Starting from the root packages and following DepsByPkgPath,
   236  	// build the DAG of packages we're going to analyze.
   237  	//
   238  	// Root nodes will run the enabled set of analyzers,
   239  	// whereas dependencies will run only the facty set.
   240  	// Because (by construction) enabled is a superset of facty,
   241  	// we can analyze each node with exactly one set of analyzers.
   242  	nodes := make(map[PackageID]*analysisNode)
   243  	var leaves []*analysisNode // nodes with no unfinished successors
   244  	var makeNode func(from *analysisNode, id PackageID) (*analysisNode, error)
   245  	makeNode = func(from *analysisNode, id PackageID) (*analysisNode, error) {
   246  		an, ok := nodes[id]
   247  		if !ok {
   248  			mp := s.Metadata(id)
   249  			if mp == nil {
   250  				return nil, bug.Errorf("no metadata for %s", id)
   251  			}
   252  
   253  			// -- preorder --
   254  
   255  			an = &analysisNode{
   256  				fset:        fset,
   257  				mp:          mp,
   258  				analyzers:   facty, // all nodes run at least the facty analyzers
   259  				allDeps:     make(map[PackagePath]*analysisNode),
   260  				exportDeps:  make(map[PackagePath]*analysisNode),
   261  				stableNames: stableNames,
   262  			}
   263  			nodes[id] = an
   264  
   265  			// -- recursion --
   266  
   267  			// Build subgraphs for dependencies.
   268  			an.succs = make(map[PackageID]*analysisNode, len(mp.DepsByPkgPath))
   269  			for _, depID := range mp.DepsByPkgPath {
   270  				dep, err := makeNode(an, depID)
   271  				if err != nil {
   272  					return nil, err
   273  				}
   274  				an.succs[depID] = dep
   275  
   276  				// Compute the union of all dependencies.
   277  				// (This step has quadratic complexity.)
   278  				for pkgPath, node := range dep.allDeps {
   279  					an.allDeps[pkgPath] = node
   280  				}
   281  			}
   282  
   283  			// -- postorder --
   284  
   285  			an.allDeps[mp.PkgPath] = an // add self entry (reflexive transitive closure)
   286  
   287  			// Add leaf nodes (no successors) directly to queue.
   288  			if len(an.succs) == 0 {
   289  				leaves = append(leaves, an)
   290  			}
   291  
   292  			// Load the contents of each compiled Go file through
   293  			// the snapshot's cache. (These are all cache hits as
   294  			// files are pre-loaded following packages.Load)
   295  			an.files = make([]file.Handle, len(mp.CompiledGoFiles))
   296  			for i, uri := range mp.CompiledGoFiles {
   297  				fh, err := s.ReadFile(ctx, uri)
   298  				if err != nil {
   299  					return nil, err
   300  				}
   301  				an.files[i] = fh
   302  			}
   303  		}
   304  		// Add edge from predecessor.
   305  		if from != nil {
   306  			atomic.AddInt32(&from.unfinishedSuccs, 1) // TODO(adonovan): use generics
   307  			an.preds = append(an.preds, from)
   308  		}
   309  		atomic.AddInt32(&an.unfinishedPreds, 1)
   310  		return an, nil
   311  	}
   312  
   313  	// For root packages, we run the enabled set of analyzers.
   314  	var roots []*analysisNode
   315  	for id := range pkgs {
   316  		root, err := makeNode(nil, id)
   317  		if err != nil {
   318  			return nil, err
   319  		}
   320  		root.analyzers = enabled
   321  		roots = append(roots, root)
   322  	}
   323  
   324  	// Now that we have read all files,
   325  	// we no longer need the snapshot.
   326  	// (but options are needed for progress reporting)
   327  	options := s.Options()
   328  	s = nil
   329  
   330  	// Progress reporting. If supported, gopls reports progress on analysis
   331  	// passes that are taking a long time.
   332  	maybeReport := func(completed int64) {}
   333  
   334  	// Enable progress reporting if enabled by the user
   335  	// and we have a capable reporter.
   336  	if reporter != nil && reporter.SupportsWorkDoneProgress() && options.AnalysisProgressReporting {
   337  		var reportAfter = options.ReportAnalysisProgressAfter // tests may set this to 0
   338  		const reportEvery = 1 * time.Second
   339  
   340  		ctx, cancel := context.WithCancel(ctx)
   341  		defer cancel()
   342  
   343  		var (
   344  			reportMu   sync.Mutex
   345  			lastReport time.Time
   346  			wd         *progress.WorkDone
   347  		)
   348  		defer func() {
   349  			reportMu.Lock()
   350  			defer reportMu.Unlock()
   351  
   352  			if wd != nil {
   353  				wd.End(ctx, "Done.") // ensure that the progress report exits
   354  			}
   355  		}()
   356  		maybeReport = func(completed int64) {
   357  			now := time.Now()
   358  			if now.Sub(start) < reportAfter {
   359  				return
   360  			}
   361  
   362  			reportMu.Lock()
   363  			defer reportMu.Unlock()
   364  
   365  			if wd == nil {
   366  				wd = reporter.Start(ctx, AnalysisProgressTitle, "", nil, cancel)
   367  			}
   368  
   369  			if now.Sub(lastReport) > reportEvery {
   370  				lastReport = now
   371  				// Trailing space is intentional: some LSP clients strip newlines.
   372  				msg := fmt.Sprintf(`Indexed %d/%d packages. (Set "analysisProgressReporting" to false to disable notifications.)`,
   373  					completed, len(nodes))
   374  				pct := 100 * float64(completed) / float64(len(nodes))
   375  				wd.Report(ctx, msg, pct)
   376  			}
   377  		}
   378  	}
   379  
   380  	// Execute phase: run leaves first, adding
   381  	// new nodes to the queue as they become leaves.
   382  	var g errgroup.Group
   383  
   384  	// Analysis is CPU-bound.
   385  	//
   386  	// Note: avoid g.SetLimit here: it makes g.Go stop accepting work, which
   387  	// prevents workers from enqeuing, and thus finishing, and thus allowing the
   388  	// group to make progress: deadlock.
   389  	limiter := make(chan unit, runtime.GOMAXPROCS(0))
   390  	var completed int64
   391  
   392  	var enqueue func(*analysisNode)
   393  	enqueue = func(an *analysisNode) {
   394  		g.Go(func() error {
   395  			limiter <- unit{}
   396  			defer func() { <-limiter }()
   397  
   398  			summary, err := an.runCached(ctx)
   399  			if err != nil {
   400  				return err // cancelled, or failed to produce a package
   401  			}
   402  			maybeReport(atomic.AddInt64(&completed, 1))
   403  			an.summary = summary
   404  
   405  			// Notify each waiting predecessor,
   406  			// and enqueue it when it becomes a leaf.
   407  			for _, pred := range an.preds {
   408  				if atomic.AddInt32(&pred.unfinishedSuccs, -1) == 0 {
   409  					enqueue(pred)
   410  				}
   411  			}
   412  
   413  			// Notify each successor that we no longer need
   414  			// its action summaries, which hold Result values.
   415  			// After the last one, delete it, so that we
   416  			// free up large results such as SSA.
   417  			for _, succ := range an.succs {
   418  				succ.decrefPreds()
   419  			}
   420  			return nil
   421  		})
   422  	}
   423  	for _, leaf := range leaves {
   424  		enqueue(leaf)
   425  	}
   426  	if err := g.Wait(); err != nil {
   427  		return nil, err // cancelled, or failed to produce a package
   428  	}
   429  
   430  	// Report diagnostics only from enabled actions that succeeded.
   431  	// Errors from creating or analyzing packages are ignored.
   432  	// Diagnostics are reported in the order of the analyzers argument.
   433  	//
   434  	// TODO(adonovan): ignoring action errors gives the caller no way
   435  	// to distinguish "there are no problems in this code" from
   436  	// "the code (or analyzers!) are so broken that we couldn't even
   437  	// begin the analysis you asked for".
   438  	// Even if current callers choose to discard the
   439  	// results, we should propagate the per-action errors.
   440  	var results []*Diagnostic
   441  	for _, root := range roots {
   442  		for _, a := range enabled {
   443  			// Skip analyzers that were added only to
   444  			// fulfil requirements of the original set.
   445  			srcAnalyzer, ok := toSrc[a]
   446  			if !ok {
   447  				// Although this 'skip' operation is logically sound,
   448  				// it is nonetheless surprising that its absence should
   449  				// cause #60909 since none of the analyzers currently added for
   450  				// requirements (e.g. ctrlflow, inspect, buildssa)
   451  				// is capable of reporting diagnostics.
   452  				if summary := root.summary.Actions[stableNames[a]]; summary != nil {
   453  					if n := len(summary.Diagnostics); n > 0 {
   454  						bug.Reportf("Internal error: got %d unexpected diagnostics from analyzer %s. This analyzer was added only to fulfil the requirements of the requested set of analyzers, and it is not expected that such analyzers report diagnostics. Please report this in issue #60909.", n, a)
   455  					}
   456  				}
   457  				continue
   458  			}
   459  
   460  			// Inv: root.summary is the successful result of run (via runCached).
   461  			summary, ok := root.summary.Actions[stableNames[a]]
   462  			if summary == nil {
   463  				panic(fmt.Sprintf("analyzeSummary.Actions[%q] = (nil, %t); got %v (#60551)",
   464  					stableNames[a], ok, root.summary.Actions))
   465  			}
   466  			if summary.Err != "" {
   467  				continue // action failed
   468  			}
   469  			for _, gobDiag := range summary.Diagnostics {
   470  				results = append(results, toSourceDiagnostic(srcAnalyzer, &gobDiag))
   471  			}
   472  		}
   473  	}
   474  	return results, nil
   475  }
   476  
   477  func (an *analysisNode) decrefPreds() {
   478  	if atomic.AddInt32(&an.unfinishedPreds, -1) == 0 {
   479  		an.summary.Actions = nil
   480  	}
   481  }
   482  
   483  // An analysisNode is a node in a doubly-linked DAG isomorphic to the
   484  // import graph. Each node represents a single package, and the DAG
   485  // represents a batch of analysis work done at once using a single
   486  // realm of token.Pos or types.Object values.
   487  //
   488  // A complete DAG is created anew for each batch of analysis;
   489  // subgraphs are not reused over time. Each node's *types.Package
   490  // field is initially nil and is populated on demand, either from
   491  // type-checking syntax trees (typeCheck) or from importing export
   492  // data (_import). When this occurs, the typesOnce event becomes
   493  // "done".
   494  //
   495  // Each node's allDeps map is a "view" of all its dependencies keyed by
   496  // package path, which defines the types.Importer mapping used when
   497  // populating the node's types.Package. Different nodes have different
   498  // views (e.g. due to variants), but two nodes that are related by
   499  // graph ordering have views that are consistent in their overlap.
   500  // exportDeps is the subset actually referenced by export data;
   501  // this is the set for which we attempt to decode facts.
   502  //
   503  // Each node's run method is called in parallel postorder. On success,
   504  // its summary field is populated, either from the cache (hit), or by
   505  // type-checking and analyzing syntax (miss).
   506  type analysisNode struct {
   507  	fset            *token.FileSet              // file set shared by entire batch (DAG)
   508  	mp              *metadata.Package           // metadata for this package
   509  	files           []file.Handle               // contents of CompiledGoFiles
   510  	analyzers       []*analysis.Analyzer        // set of analyzers to run
   511  	preds           []*analysisNode             // graph edges:
   512  	succs           map[PackageID]*analysisNode //   (preds -> self -> succs)
   513  	unfinishedSuccs int32
   514  	unfinishedPreds int32                         // effectively a summary.Actions refcount
   515  	allDeps         map[PackagePath]*analysisNode // all dependencies including self
   516  	exportDeps      map[PackagePath]*analysisNode // subset of allDeps ref'd by export data (+self)
   517  	summary         *analyzeSummary               // serializable result of analyzing this package
   518  	stableNames     map[*analysis.Analyzer]string // cross-process stable names for Analyzers
   519  
   520  	typesOnce sync.Once      // guards lazy population of types and typesErr fields
   521  	types     *types.Package // type information lazily imported from summary
   522  	typesErr  error          // an error producing type information
   523  }
   524  
   525  func (an *analysisNode) String() string { return string(an.mp.ID) }
   526  
   527  // _import imports this node's types.Package from export data, if not already done.
   528  // Precondition: analysis was a success.
   529  // Postcondition: an.types and an.exportDeps are populated.
   530  func (an *analysisNode) _import() (*types.Package, error) {
   531  	an.typesOnce.Do(func() {
   532  		if an.mp.PkgPath == "unsafe" {
   533  			an.types = types.Unsafe
   534  			return
   535  		}
   536  
   537  		an.types = types.NewPackage(string(an.mp.PkgPath), string(an.mp.Name))
   538  
   539  		// getPackages recursively imports each dependency
   540  		// referenced by the export data, in parallel.
   541  		getPackages := func(items []gcimporter.GetPackagesItem) error {
   542  			var g errgroup.Group
   543  			for i, item := range items {
   544  				path := PackagePath(item.Path)
   545  				dep, ok := an.allDeps[path]
   546  				if !ok {
   547  					// This early return bypasses Wait; that's ok.
   548  					return fmt.Errorf("%s: unknown dependency %q", an.mp, path)
   549  				}
   550  				an.exportDeps[path] = dep // record, for later fact decoding
   551  				if dep == an {
   552  					if an.typesErr != nil {
   553  						return an.typesErr
   554  					} else {
   555  						items[i].Pkg = an.types
   556  					}
   557  				} else {
   558  					i := i
   559  					g.Go(func() error {
   560  						depPkg, err := dep._import()
   561  						if err == nil {
   562  							items[i].Pkg = depPkg
   563  						}
   564  						return err
   565  					})
   566  				}
   567  			}
   568  			return g.Wait()
   569  		}
   570  		pkg, err := gcimporter.IImportShallow(an.fset, getPackages, an.summary.Export, string(an.mp.PkgPath), bug.Reportf)
   571  		if err != nil {
   572  			an.typesErr = bug.Errorf("%s: invalid export data: %v", an.mp, err)
   573  			an.types = nil
   574  		} else if pkg != an.types {
   575  			log.Fatalf("%s: inconsistent packages", an.mp)
   576  		}
   577  	})
   578  	return an.types, an.typesErr
   579  }
   580  
   581  // analyzeSummary is a gob-serializable summary of successfully
   582  // applying a list of analyzers to a package.
   583  type analyzeSummary struct {
   584  	Export         []byte    // encoded types of package
   585  	DeepExportHash file.Hash // hash of reflexive transitive closure of export data
   586  	Compiles       bool      // transitively free of list/parse/type errors
   587  	Actions        actionMap // maps analyzer stablename to analysis results (*actionSummary)
   588  }
   589  
   590  // actionMap defines a stable Gob encoding for a map.
   591  // TODO(adonovan): generalize and move to a library when we can use generics.
   592  type actionMap map[string]*actionSummary
   593  
   594  var (
   595  	_ gob.GobEncoder = (actionMap)(nil)
   596  	_ gob.GobDecoder = (*actionMap)(nil)
   597  )
   598  
   599  type actionsMapEntry struct {
   600  	K string
   601  	V *actionSummary
   602  }
   603  
   604  func (m actionMap) GobEncode() ([]byte, error) {
   605  	entries := make([]actionsMapEntry, 0, len(m))
   606  	for k, v := range m {
   607  		entries = append(entries, actionsMapEntry{k, v})
   608  	}
   609  	sort.Slice(entries, func(i, j int) bool {
   610  		return entries[i].K < entries[j].K
   611  	})
   612  	var buf bytes.Buffer
   613  	err := gob.NewEncoder(&buf).Encode(entries)
   614  	return buf.Bytes(), err
   615  }
   616  
   617  func (m *actionMap) GobDecode(data []byte) error {
   618  	var entries []actionsMapEntry
   619  	if err := gob.NewDecoder(bytes.NewReader(data)).Decode(&entries); err != nil {
   620  		return err
   621  	}
   622  	*m = make(actionMap, len(entries))
   623  	for _, e := range entries {
   624  		(*m)[e.K] = e.V
   625  	}
   626  	return nil
   627  }
   628  
   629  // actionSummary is a gob-serializable summary of one possibly failed analysis action.
   630  // If Err is non-empty, the other fields are undefined.
   631  type actionSummary struct {
   632  	Facts       []byte    // the encoded facts.Set
   633  	FactsHash   file.Hash // hash(Facts)
   634  	Diagnostics []gobDiagnostic
   635  	Err         string // "" => success
   636  }
   637  
   638  // runCached applies a list of analyzers (plus any others
   639  // transitively required by them) to a package.  It succeeds as long
   640  // as it could produce a types.Package, even if there were direct or
   641  // indirect list/parse/type errors, and even if all the analysis
   642  // actions failed. It usually fails only if the package was unknown,
   643  // a file was missing, or the operation was cancelled.
   644  //
   645  // Postcondition: runCached must not continue to use the snapshot
   646  // (in background goroutines) after it has returned; see memoize.RefCounted.
   647  func (an *analysisNode) runCached(ctx context.Context) (*analyzeSummary, error) {
   648  	// At this point we have the action results (serialized
   649  	// packages and facts) of our immediate dependencies,
   650  	// and the metadata and content of this package.
   651  	//
   652  	// We now compute a hash for all our inputs, and consult a
   653  	// global cache of promised results. If nothing material
   654  	// has changed, we'll make a hit in the shared cache.
   655  	//
   656  	// The hash of our inputs is based on the serialized export
   657  	// data and facts so that immaterial changes can be pruned
   658  	// without decoding.
   659  	key := an.cacheKey()
   660  
   661  	// Access the cache.
   662  	var summary *analyzeSummary
   663  	const cacheKind = "analysis"
   664  	if data, err := filecache.Get(cacheKind, key); err == nil {
   665  		// cache hit
   666  		analyzeSummaryCodec.Decode(data, &summary)
   667  	} else if err != filecache.ErrNotFound {
   668  		return nil, bug.Errorf("internal error reading shared cache: %v", err)
   669  	} else {
   670  		// Cache miss: do the work.
   671  		var err error
   672  		summary, err = an.run(ctx)
   673  		if err != nil {
   674  			return nil, err
   675  		}
   676  
   677  		atomic.AddInt32(&an.unfinishedPreds, +1) // incref
   678  		go func() {
   679  			defer an.decrefPreds() //decref
   680  
   681  			cacheLimit <- unit{}            // acquire token
   682  			defer func() { <-cacheLimit }() // release token
   683  
   684  			data := analyzeSummaryCodec.Encode(summary)
   685  			if false {
   686  				log.Printf("Set key=%d value=%d id=%s\n", len(key), len(data), an.mp.ID)
   687  			}
   688  			if err := filecache.Set(cacheKind, key, data); err != nil {
   689  				event.Error(ctx, "internal error updating analysis shared cache", err)
   690  			}
   691  		}()
   692  	}
   693  
   694  	return summary, nil
   695  }
   696  
   697  // cacheLimit reduces parallelism of cache updates.
   698  // We allow more than typical GOMAXPROCS as it's a mix of CPU and I/O.
   699  var cacheLimit = make(chan unit, 32)
   700  
   701  // analysisCacheKey returns a cache key that is a cryptographic digest
   702  // of the all the values that might affect type checking and analysis:
   703  // the analyzer names, package metadata, names and contents of
   704  // compiled Go files, and vdeps (successor) information
   705  // (export data and facts).
   706  func (an *analysisNode) cacheKey() [sha256.Size]byte {
   707  	hasher := sha256.New()
   708  
   709  	// In principle, a key must be the hash of an
   710  	// unambiguous encoding of all the relevant data.
   711  	// If it's ambiguous, we risk collisions.
   712  
   713  	// analyzers
   714  	fmt.Fprintf(hasher, "analyzers: %d\n", len(an.analyzers))
   715  	for _, a := range an.analyzers {
   716  		fmt.Fprintln(hasher, a.Name)
   717  	}
   718  
   719  	// package metadata
   720  	mp := an.mp
   721  	fmt.Fprintf(hasher, "package: %s %s %s\n", mp.ID, mp.Name, mp.PkgPath)
   722  	// We can ignore m.DepsBy{Pkg,Import}Path: although the logic
   723  	// uses those fields, we account for them by hashing vdeps.
   724  
   725  	// type sizes
   726  	wordSize := an.mp.TypesSizes.Sizeof(types.Typ[types.Int])
   727  	maxAlign := an.mp.TypesSizes.Alignof(types.NewPointer(types.Typ[types.Int64]))
   728  	fmt.Fprintf(hasher, "sizes: %d %d\n", wordSize, maxAlign)
   729  
   730  	// metadata errors: used for 'compiles' field
   731  	fmt.Fprintf(hasher, "errors: %d", len(mp.Errors))
   732  
   733  	// module Go version
   734  	if mp.Module != nil && mp.Module.GoVersion != "" {
   735  		fmt.Fprintf(hasher, "go %s\n", mp.Module.GoVersion)
   736  	}
   737  
   738  	// file names and contents
   739  	fmt.Fprintf(hasher, "files: %d\n", len(an.files))
   740  	for _, fh := range an.files {
   741  		fmt.Fprintln(hasher, fh.Identity())
   742  	}
   743  
   744  	// vdeps, in PackageID order
   745  	depIDs := make([]string, 0, len(an.succs))
   746  	for depID := range an.succs {
   747  		depIDs = append(depIDs, string(depID))
   748  	}
   749  	sort.Strings(depIDs) // TODO(adonovan): avoid conversions by using slices.Sort[PackageID]
   750  	for _, depID := range depIDs {
   751  		vdep := an.succs[PackageID(depID)]
   752  		fmt.Fprintf(hasher, "dep: %s\n", vdep.mp.PkgPath)
   753  		fmt.Fprintf(hasher, "export: %s\n", vdep.summary.DeepExportHash)
   754  
   755  		// action results: errors and facts
   756  		actions := vdep.summary.Actions
   757  		names := make([]string, 0, len(actions))
   758  		for name := range actions {
   759  			names = append(names, name)
   760  		}
   761  		sort.Strings(names)
   762  		for _, name := range names {
   763  			summary := actions[name]
   764  			fmt.Fprintf(hasher, "action %s\n", name)
   765  			if summary.Err != "" {
   766  				fmt.Fprintf(hasher, "error %s\n", summary.Err)
   767  			} else {
   768  				fmt.Fprintf(hasher, "facts %s\n", summary.FactsHash)
   769  				// We can safely omit summary.diagnostics
   770  				// from the key since they have no downstream effect.
   771  			}
   772  		}
   773  	}
   774  
   775  	var hash [sha256.Size]byte
   776  	hasher.Sum(hash[:0])
   777  	return hash
   778  }
   779  
   780  // run implements the cache-miss case.
   781  // This function does not access the snapshot.
   782  //
   783  // Postcondition: on success, the analyzeSummary.Actions
   784  // key set is {a.Name for a in analyzers}.
   785  func (an *analysisNode) run(ctx context.Context) (*analyzeSummary, error) {
   786  	// Parse only the "compiled" Go files.
   787  	// Do the computation in parallel.
   788  	parsed := make([]*ParsedGoFile, len(an.files))
   789  	{
   790  		var group errgroup.Group
   791  		group.SetLimit(4) // not too much: run itself is already called in parallel
   792  		for i, fh := range an.files {
   793  			i, fh := i, fh
   794  			group.Go(func() error {
   795  				// Call parseGoImpl directly, not the caching wrapper,
   796  				// as cached ASTs require the global FileSet.
   797  				// ast.Object resolution is unfortunately an implied part of the
   798  				// go/analysis contract.
   799  				pgf, err := parseGoImpl(ctx, an.fset, fh, ParseFull&^parser.SkipObjectResolution, false)
   800  				parsed[i] = pgf
   801  				return err
   802  			})
   803  		}
   804  		if err := group.Wait(); err != nil {
   805  			return nil, err // cancelled, or catastrophic error (e.g. missing file)
   806  		}
   807  	}
   808  
   809  	// Type-check the package syntax.
   810  	pkg := an.typeCheck(parsed)
   811  
   812  	// Publish the completed package.
   813  	an.typesOnce.Do(func() { an.types = pkg.types })
   814  	if an.types != pkg.types {
   815  		log.Fatalf("typesOnce prematurely done")
   816  	}
   817  
   818  	// Compute the union of exportDeps across our direct imports.
   819  	// This is the set that will be needed by the fact decoder.
   820  	allExportDeps := make(map[PackagePath]*analysisNode)
   821  	for _, succ := range an.succs {
   822  		for k, v := range succ.exportDeps {
   823  			allExportDeps[k] = v
   824  		}
   825  	}
   826  
   827  	// The fact decoder needs a means to look up a Package by path.
   828  	pkg.factsDecoder = facts.NewDecoderFunc(pkg.types, func(path string) *types.Package {
   829  		// Note: Decode is called concurrently, and thus so is this function.
   830  
   831  		// Does the fact relate to a package referenced by export data?
   832  		if dep, ok := allExportDeps[PackagePath(path)]; ok {
   833  			dep.typesOnce.Do(func() { log.Fatal("dep.types not populated") })
   834  			if dep.typesErr == nil {
   835  				return dep.types
   836  			}
   837  			return nil
   838  		}
   839  
   840  		// If the fact relates to a dependency not referenced
   841  		// by export data, it is safe to ignore it.
   842  		// (In that case dep.types exists but may be unpopulated
   843  		// or in the process of being populated from export data.)
   844  		if an.allDeps[PackagePath(path)] == nil {
   845  			log.Fatalf("fact package %q is not a dependency", path)
   846  		}
   847  		return nil
   848  	})
   849  
   850  	// Poll cancellation state.
   851  	if err := ctx.Err(); err != nil {
   852  		return nil, err
   853  	}
   854  
   855  	// -- analysis --
   856  
   857  	// Build action graph for this package.
   858  	// Each graph node (action) is one unit of analysis.
   859  	actions := make(map[*analysis.Analyzer]*action)
   860  	var mkAction func(a *analysis.Analyzer) *action
   861  	mkAction = func(a *analysis.Analyzer) *action {
   862  		act, ok := actions[a]
   863  		if !ok {
   864  			var hdeps []*action
   865  			for _, req := range a.Requires {
   866  				hdeps = append(hdeps, mkAction(req))
   867  			}
   868  			act = &action{
   869  				a:          a,
   870  				stableName: an.stableNames[a],
   871  				pkg:        pkg,
   872  				vdeps:      an.succs,
   873  				hdeps:      hdeps,
   874  			}
   875  			actions[a] = act
   876  		}
   877  		return act
   878  	}
   879  
   880  	// Build actions for initial package.
   881  	var roots []*action
   882  	for _, a := range an.analyzers {
   883  		roots = append(roots, mkAction(a))
   884  	}
   885  
   886  	// Execute the graph in parallel.
   887  	execActions(roots)
   888  	// Inv: each root's summary is set (whether success or error).
   889  
   890  	// Don't return (or cache) the result in case of cancellation.
   891  	if err := ctx.Err(); err != nil {
   892  		return nil, err // cancelled
   893  	}
   894  
   895  	// Return summaries only for the requested actions.
   896  	summaries := make(map[string]*actionSummary)
   897  	for _, root := range roots {
   898  		if root.summary == nil {
   899  			panic("root has nil action.summary (#60551)")
   900  		}
   901  		summaries[root.stableName] = root.summary
   902  	}
   903  
   904  	return &analyzeSummary{
   905  		Export:         pkg.export,
   906  		DeepExportHash: pkg.deepExportHash,
   907  		Compiles:       pkg.compiles,
   908  		Actions:        summaries,
   909  	}, nil
   910  }
   911  
   912  // Postcondition: analysisPackage.types and an.exportDeps are populated.
   913  func (an *analysisNode) typeCheck(parsed []*ParsedGoFile) *analysisPackage {
   914  	mp := an.mp
   915  
   916  	if false { // debugging
   917  		log.Println("typeCheck", mp.ID)
   918  	}
   919  
   920  	pkg := &analysisPackage{
   921  		mp:       mp,
   922  		fset:     an.fset,
   923  		parsed:   parsed,
   924  		files:    make([]*ast.File, len(parsed)),
   925  		compiles: len(mp.Errors) == 0, // false => list error
   926  		types:    types.NewPackage(string(mp.PkgPath), string(mp.Name)),
   927  		typesInfo: &types.Info{
   928  			Types:      make(map[ast.Expr]types.TypeAndValue),
   929  			Defs:       make(map[*ast.Ident]types.Object),
   930  			Instances:  make(map[*ast.Ident]types.Instance),
   931  			Implicits:  make(map[ast.Node]types.Object),
   932  			Selections: make(map[*ast.SelectorExpr]*types.Selection),
   933  			Scopes:     make(map[ast.Node]*types.Scope),
   934  			Uses:       make(map[*ast.Ident]types.Object),
   935  		},
   936  		typesSizes: mp.TypesSizes,
   937  	}
   938  	versions.InitFileVersions(pkg.typesInfo)
   939  
   940  	// Unsafe has no syntax.
   941  	if mp.PkgPath == "unsafe" {
   942  		pkg.types = types.Unsafe
   943  		return pkg
   944  	}
   945  
   946  	for i, p := range parsed {
   947  		pkg.files[i] = p.File
   948  		if p.ParseErr != nil {
   949  			pkg.compiles = false // parse error
   950  		}
   951  	}
   952  
   953  	for _, vdep := range an.succs {
   954  		if !vdep.summary.Compiles {
   955  			pkg.compiles = false // transitive error
   956  		}
   957  	}
   958  
   959  	cfg := &types.Config{
   960  		Sizes: mp.TypesSizes,
   961  		Error: func(e error) {
   962  			pkg.compiles = false // type error
   963  
   964  			// Suppress type errors in files with parse errors
   965  			// as parser recovery can be quite lossy (#59888).
   966  			typeError := e.(types.Error)
   967  			for _, p := range parsed {
   968  				if p.ParseErr != nil && astutil.NodeContains(p.File, typeError.Pos) {
   969  					return
   970  				}
   971  			}
   972  			pkg.typeErrors = append(pkg.typeErrors, typeError)
   973  		},
   974  		Importer: importerFunc(func(importPath string) (*types.Package, error) {
   975  			// Beware that returning an error from this function
   976  			// will cause the type checker to synthesize a fake
   977  			// package whose Path is importPath, potentially
   978  			// losing a vendor/ prefix. If type-checking errors
   979  			// are swallowed, these packages may be confusing.
   980  
   981  			// Map ImportPath to ID.
   982  			id, ok := mp.DepsByImpPath[ImportPath(importPath)]
   983  			if !ok {
   984  				// The import syntax is inconsistent with the metadata.
   985  				// This could be because the import declaration was
   986  				// incomplete and the metadata only includes complete
   987  				// imports; or because the metadata ignores import
   988  				// edges that would lead to cycles in the graph.
   989  				return nil, fmt.Errorf("missing metadata for import of %q", importPath)
   990  			}
   991  
   992  			// Map ID to node. (id may be "")
   993  			dep := an.succs[id]
   994  			if dep == nil {
   995  				// Analogous to (*snapshot).missingPkgError
   996  				// in the logic for regular type-checking,
   997  				// but without a snapshot we can't provide
   998  				// such detail, and anyway most analysis
   999  				// failures aren't surfaced in the UI.
  1000  				return nil, fmt.Errorf("no required module provides analysis package %q (id=%q)", importPath, id)
  1001  			}
  1002  
  1003  			// (Duplicates logic from check.go.)
  1004  			if !metadata.IsValidImport(an.mp.PkgPath, dep.mp.PkgPath) {
  1005  				return nil, fmt.Errorf("invalid use of internal package %s", importPath)
  1006  			}
  1007  
  1008  			return dep._import()
  1009  		}),
  1010  	}
  1011  
  1012  	// Set Go dialect.
  1013  	if mp.Module != nil && mp.Module.GoVersion != "" {
  1014  		goVersion := "go" + mp.Module.GoVersion
  1015  		// types.NewChecker panics if GoVersion is invalid.
  1016  		// An unparsable mod file should probably stop us
  1017  		// before we get here, but double check just in case.
  1018  		if goVersionRx.MatchString(goVersion) {
  1019  			typesinternal.SetGoVersion(cfg, goVersion)
  1020  		}
  1021  	}
  1022  
  1023  	// We want to type check cgo code if go/types supports it.
  1024  	// We passed typecheckCgo to go/packages when we Loaded.
  1025  	// TODO(adonovan): do we actually need this??
  1026  	typesinternal.SetUsesCgo(cfg)
  1027  
  1028  	check := types.NewChecker(cfg, pkg.fset, pkg.types, pkg.typesInfo)
  1029  
  1030  	// Type checking errors are handled via the config, so ignore them here.
  1031  	_ = check.Files(pkg.files)
  1032  
  1033  	// debugging (type errors are quite normal)
  1034  	if false {
  1035  		if pkg.typeErrors != nil {
  1036  			log.Printf("package %s has type errors: %v", pkg.types.Path(), pkg.typeErrors)
  1037  		}
  1038  	}
  1039  
  1040  	// Emit the export data and compute the recursive hash.
  1041  	export, err := gcimporter.IExportShallow(pkg.fset, pkg.types, bug.Reportf)
  1042  	if err != nil {
  1043  		// TODO(adonovan): in light of exporter bugs such as #57729,
  1044  		// consider using bug.Report here and retrying the IExportShallow
  1045  		// call here using an empty types.Package.
  1046  		log.Fatalf("internal error writing shallow export data: %v", err)
  1047  	}
  1048  	pkg.export = export
  1049  
  1050  	// Compute a recursive hash to account for the export data of
  1051  	// this package and each dependency referenced by it.
  1052  	// Also, populate exportDeps.
  1053  	hash := sha256.New()
  1054  	fmt.Fprintf(hash, "%s %d\n", mp.PkgPath, len(export))
  1055  	hash.Write(export)
  1056  	paths, err := readShallowManifest(export)
  1057  	if err != nil {
  1058  		log.Fatalf("internal error: bad export data: %v", err)
  1059  	}
  1060  	for _, path := range paths {
  1061  		dep, ok := an.allDeps[path]
  1062  		if !ok {
  1063  			log.Fatalf("%s: missing dependency: %q", an, path)
  1064  		}
  1065  		fmt.Fprintf(hash, "%s %s\n", dep.mp.PkgPath, dep.summary.DeepExportHash)
  1066  		an.exportDeps[path] = dep
  1067  	}
  1068  	an.exportDeps[mp.PkgPath] = an // self
  1069  	hash.Sum(pkg.deepExportHash[:0])
  1070  
  1071  	return pkg
  1072  }
  1073  
  1074  // readShallowManifest returns the manifest of packages referenced by
  1075  // a shallow export data file for a package (excluding the package itself).
  1076  // TODO(adonovan): add a test.
  1077  func readShallowManifest(export []byte) ([]PackagePath, error) {
  1078  	const selfPath = "<self>" // dummy path
  1079  	var paths []PackagePath
  1080  	getPackages := func(items []gcimporter.GetPackagesItem) error {
  1081  		paths = []PackagePath{} // non-nil
  1082  		for _, item := range items {
  1083  			if item.Path != selfPath {
  1084  				paths = append(paths, PackagePath(item.Path))
  1085  			}
  1086  		}
  1087  		return errors.New("stop") // terminate importer
  1088  	}
  1089  	_, err := gcimporter.IImportShallow(token.NewFileSet(), getPackages, export, selfPath, bug.Reportf)
  1090  	if paths == nil {
  1091  		if err != nil {
  1092  			return nil, err // failed before getPackages callback
  1093  		}
  1094  		return nil, bug.Errorf("internal error: IImportShallow did not call getPackages")
  1095  	}
  1096  	return paths, nil // success
  1097  }
  1098  
  1099  // analysisPackage contains information about a package, including
  1100  // syntax trees, used transiently during its type-checking and analysis.
  1101  type analysisPackage struct {
  1102  	mp             *metadata.Package
  1103  	fset           *token.FileSet // local to this package
  1104  	parsed         []*ParsedGoFile
  1105  	files          []*ast.File // same as parsed[i].File
  1106  	types          *types.Package
  1107  	compiles       bool // package is transitively free of list/parse/type errors
  1108  	factsDecoder   *facts.Decoder
  1109  	export         []byte    // encoding of types.Package
  1110  	deepExportHash file.Hash // reflexive transitive hash of export data
  1111  	typesInfo      *types.Info
  1112  	typeErrors     []types.Error
  1113  	typesSizes     types.Sizes
  1114  }
  1115  
  1116  // An action represents one unit of analysis work: the application of
  1117  // one analysis to one package. Actions form a DAG, both within a
  1118  // package (as different analyzers are applied, either in sequence or
  1119  // parallel), and across packages (as dependencies are analyzed).
  1120  type action struct {
  1121  	once       sync.Once
  1122  	a          *analysis.Analyzer
  1123  	stableName string // cross-process stable name of analyzer
  1124  	pkg        *analysisPackage
  1125  	hdeps      []*action                   // horizontal dependencies
  1126  	vdeps      map[PackageID]*analysisNode // vertical dependencies
  1127  
  1128  	// results of action.exec():
  1129  	result  interface{} // result of Run function, of type a.ResultType
  1130  	summary *actionSummary
  1131  	err     error
  1132  }
  1133  
  1134  func (act *action) String() string {
  1135  	return fmt.Sprintf("%s@%s", act.a.Name, act.pkg.mp.ID)
  1136  }
  1137  
  1138  // execActions executes a set of action graph nodes in parallel.
  1139  // Postcondition: each action.summary is set, even in case of error.
  1140  func execActions(actions []*action) {
  1141  	var wg sync.WaitGroup
  1142  	for _, act := range actions {
  1143  		act := act
  1144  		wg.Add(1)
  1145  		go func() {
  1146  			defer wg.Done()
  1147  			act.once.Do(func() {
  1148  				execActions(act.hdeps) // analyze "horizontal" dependencies
  1149  				act.result, act.summary, act.err = act.exec()
  1150  				if act.err != nil {
  1151  					act.summary = &actionSummary{Err: act.err.Error()}
  1152  					// TODO(adonovan): suppress logging. But
  1153  					// shouldn't the root error's causal chain
  1154  					// include this information?
  1155  					if false { // debugging
  1156  						log.Printf("act.exec(%v) failed: %v", act, act.err)
  1157  					}
  1158  				}
  1159  			})
  1160  			if act.summary == nil {
  1161  				panic("nil action.summary (#60551)")
  1162  			}
  1163  		}()
  1164  	}
  1165  	wg.Wait()
  1166  }
  1167  
  1168  // exec defines the execution of a single action.
  1169  // It returns the (ephemeral) result of the analyzer's Run function,
  1170  // along with its (serializable) facts and diagnostics.
  1171  // Or it returns an error if the analyzer did not run to
  1172  // completion and deliver a valid result.
  1173  func (act *action) exec() (interface{}, *actionSummary, error) {
  1174  	analyzer := act.a
  1175  	pkg := act.pkg
  1176  
  1177  	hasFacts := len(analyzer.FactTypes) > 0
  1178  
  1179  	// Report an error if any action dependency (vertical or horizontal) failed.
  1180  	// To avoid long error messages describing chains of failure,
  1181  	// we return the dependencies' error' unadorned.
  1182  	if hasFacts {
  1183  		// TODO(adonovan): use deterministic order.
  1184  		for _, vdep := range act.vdeps {
  1185  			if summ := vdep.summary.Actions[act.stableName]; summ.Err != "" {
  1186  				return nil, nil, errors.New(summ.Err)
  1187  			}
  1188  		}
  1189  	}
  1190  	for _, dep := range act.hdeps {
  1191  		if dep.err != nil {
  1192  			return nil, nil, dep.err
  1193  		}
  1194  	}
  1195  	// Inv: all action dependencies succeeded.
  1196  
  1197  	// Were there list/parse/type errors that might prevent analysis?
  1198  	if !pkg.compiles && !analyzer.RunDespiteErrors {
  1199  		return nil, nil, fmt.Errorf("skipping analysis %q because package %q does not compile", analyzer.Name, pkg.mp.ID)
  1200  	}
  1201  	// Inv: package is well-formed enough to proceed with analysis.
  1202  
  1203  	if false { // debugging
  1204  		log.Println("action.exec", act)
  1205  	}
  1206  
  1207  	// Gather analysis Result values from horizontal dependencies.
  1208  	inputs := make(map[*analysis.Analyzer]interface{})
  1209  	for _, dep := range act.hdeps {
  1210  		inputs[dep.a] = dep.result
  1211  	}
  1212  
  1213  	// TODO(adonovan): opt: facts.Set works but it may be more
  1214  	// efficient to fork and tailor it to our precise needs.
  1215  	//
  1216  	// We've already sharded the fact encoding by action
  1217  	// so that it can be done in parallel.
  1218  	// We could eliminate locking.
  1219  	// We could also dovetail more closely with the export data
  1220  	// decoder to obtain a more compact representation of
  1221  	// packages and objects (e.g. its internal IDs, instead
  1222  	// of PkgPaths and objectpaths.)
  1223  	// More importantly, we should avoid re-export of
  1224  	// facts that related to objects that are discarded
  1225  	// by "deep" export data. Better still, use a "shallow" approach.
  1226  
  1227  	// Read and decode analysis facts for each direct import.
  1228  	factset, err := pkg.factsDecoder.Decode(func(pkgPath string) ([]byte, error) {
  1229  		if !hasFacts {
  1230  			return nil, nil // analyzer doesn't use facts, so no vdeps
  1231  		}
  1232  
  1233  		// Package.Imports() may contain a fake "C" package. Ignore it.
  1234  		if pkgPath == "C" {
  1235  			return nil, nil
  1236  		}
  1237  
  1238  		id, ok := pkg.mp.DepsByPkgPath[PackagePath(pkgPath)]
  1239  		if !ok {
  1240  			// This may mean imp was synthesized by the type
  1241  			// checker because it failed to import it for any reason
  1242  			// (e.g. bug processing export data; metadata ignoring
  1243  			// a cycle-forming import).
  1244  			// In that case, the fake package's imp.Path
  1245  			// is set to the failed importPath (and thus
  1246  			// it may lack a "vendor/" prefix).
  1247  			//
  1248  			// For now, silently ignore it on the assumption
  1249  			// that the error is already reported elsewhere.
  1250  			// return nil, fmt.Errorf("missing metadata")
  1251  			return nil, nil
  1252  		}
  1253  
  1254  		vdep := act.vdeps[id]
  1255  		if vdep == nil {
  1256  			return nil, bug.Errorf("internal error in %s: missing vdep for id=%s", pkg.types.Path(), id)
  1257  		}
  1258  
  1259  		return vdep.summary.Actions[act.stableName].Facts, nil
  1260  	})
  1261  	if err != nil {
  1262  		return nil, nil, fmt.Errorf("internal error decoding analysis facts: %w", err)
  1263  	}
  1264  
  1265  	// TODO(adonovan): make Export*Fact panic rather than discarding
  1266  	// undeclared fact types, so that we discover bugs in analyzers.
  1267  	factFilter := make(map[reflect.Type]bool)
  1268  	for _, f := range analyzer.FactTypes {
  1269  		factFilter[reflect.TypeOf(f)] = true
  1270  	}
  1271  
  1272  	// If the package contains "fixed" files, it's not necessarily an error if we
  1273  	// can't convert positions.
  1274  	hasFixedFiles := false
  1275  	for _, p := range pkg.parsed {
  1276  		if p.Fixed() {
  1277  			hasFixedFiles = true
  1278  			break
  1279  		}
  1280  	}
  1281  
  1282  	// posToLocation converts from token.Pos to protocol form.
  1283  	// TODO(adonovan): improve error messages.
  1284  	posToLocation := func(start, end token.Pos) (protocol.Location, error) {
  1285  		tokFile := pkg.fset.File(start)
  1286  
  1287  		for _, p := range pkg.parsed {
  1288  			if p.Tok == tokFile {
  1289  				if end == token.NoPos {
  1290  					end = start
  1291  				}
  1292  				return p.PosLocation(start, end)
  1293  			}
  1294  		}
  1295  		errorf := bug.Errorf
  1296  		if hasFixedFiles {
  1297  			errorf = fmt.Errorf
  1298  		}
  1299  		return protocol.Location{}, errorf("token.Pos not within package")
  1300  	}
  1301  
  1302  	// Now run the (pkg, analyzer) action.
  1303  	var diagnostics []gobDiagnostic
  1304  	pass := &analysis.Pass{
  1305  		Analyzer:   analyzer,
  1306  		Fset:       pkg.fset,
  1307  		Files:      pkg.files,
  1308  		Pkg:        pkg.types,
  1309  		TypesInfo:  pkg.typesInfo,
  1310  		TypesSizes: pkg.typesSizes,
  1311  		TypeErrors: pkg.typeErrors,
  1312  		ResultOf:   inputs,
  1313  		Report: func(d analysis.Diagnostic) {
  1314  			diagnostic, err := toGobDiagnostic(posToLocation, analyzer, d)
  1315  			if err != nil {
  1316  				if !hasFixedFiles {
  1317  					bug.Reportf("internal error converting diagnostic from analyzer %q: %v", analyzer.Name, err)
  1318  				}
  1319  				return
  1320  			}
  1321  			diagnostics = append(diagnostics, diagnostic)
  1322  		},
  1323  		ImportObjectFact:  factset.ImportObjectFact,
  1324  		ExportObjectFact:  factset.ExportObjectFact,
  1325  		ImportPackageFact: factset.ImportPackageFact,
  1326  		ExportPackageFact: factset.ExportPackageFact,
  1327  		AllObjectFacts:    func() []analysis.ObjectFact { return factset.AllObjectFacts(factFilter) },
  1328  		AllPackageFacts:   func() []analysis.PackageFact { return factset.AllPackageFacts(factFilter) },
  1329  	}
  1330  
  1331  	// Recover from panics (only) within the analyzer logic.
  1332  	// (Use an anonymous function to limit the recover scope.)
  1333  	var result interface{}
  1334  	func() {
  1335  		start := time.Now()
  1336  		defer func() {
  1337  			if r := recover(); r != nil {
  1338  				// An Analyzer panicked, likely due to a bug.
  1339  				//
  1340  				// In general we want to discover and fix such panics quickly,
  1341  				// so we don't suppress them, but some bugs in third-party
  1342  				// analyzers cannot be quickly fixed, so we use an allowlist
  1343  				// to suppress panics.
  1344  				const strict = true
  1345  				if strict && bug.PanicOnBugs &&
  1346  					analyzer.Name != "buildir" { // see https://github.com/dominikh/go-tools/issues/1343
  1347  					// Uncomment this when debugging suspected failures
  1348  					// in the driver, not the analyzer.
  1349  					if false {
  1350  						debug.SetTraceback("all") // show all goroutines
  1351  					}
  1352  					panic(r)
  1353  				} else {
  1354  					// In production, suppress the panic and press on.
  1355  					err = fmt.Errorf("analysis %s for package %s panicked: %v", analyzer.Name, pass.Pkg.Path(), r)
  1356  				}
  1357  			}
  1358  
  1359  			// Accumulate running time for each checker.
  1360  			analyzerRunTimesMu.Lock()
  1361  			analyzerRunTimes[analyzer] += time.Since(start)
  1362  			analyzerRunTimesMu.Unlock()
  1363  		}()
  1364  
  1365  		result, err = pass.Analyzer.Run(pass)
  1366  	}()
  1367  	if err != nil {
  1368  		return nil, nil, err
  1369  	}
  1370  
  1371  	if got, want := reflect.TypeOf(result), pass.Analyzer.ResultType; got != want {
  1372  		return nil, nil, bug.Errorf(
  1373  			"internal error: on package %s, analyzer %s returned a result of type %v, but declared ResultType %v",
  1374  			pass.Pkg.Path(), pass.Analyzer, got, want)
  1375  	}
  1376  
  1377  	// Disallow Export*Fact calls after Run.
  1378  	// (A panic means the Analyzer is abusing concurrency.)
  1379  	pass.ExportObjectFact = func(obj types.Object, fact analysis.Fact) {
  1380  		panic(fmt.Sprintf("%v: Pass.ExportObjectFact(%s, %T) called after Run", act, obj, fact))
  1381  	}
  1382  	pass.ExportPackageFact = func(fact analysis.Fact) {
  1383  		panic(fmt.Sprintf("%v: Pass.ExportPackageFact(%T) called after Run", act, fact))
  1384  	}
  1385  
  1386  	factsdata := factset.Encode()
  1387  	return result, &actionSummary{
  1388  		Diagnostics: diagnostics,
  1389  		Facts:       factsdata,
  1390  		FactsHash:   file.HashOf(factsdata),
  1391  	}, nil
  1392  }
  1393  
  1394  var (
  1395  	analyzerRunTimesMu sync.Mutex
  1396  	analyzerRunTimes   = make(map[*analysis.Analyzer]time.Duration)
  1397  )
  1398  
  1399  type LabelDuration struct {
  1400  	Label    string
  1401  	Duration time.Duration
  1402  }
  1403  
  1404  // AnalyzerTimes returns the accumulated time spent in each Analyzer's
  1405  // Run function since process start, in descending order.
  1406  func AnalyzerRunTimes() []LabelDuration {
  1407  	analyzerRunTimesMu.Lock()
  1408  	defer analyzerRunTimesMu.Unlock()
  1409  
  1410  	slice := make([]LabelDuration, 0, len(analyzerRunTimes))
  1411  	for a, t := range analyzerRunTimes {
  1412  		slice = append(slice, LabelDuration{Label: a.Name, Duration: t})
  1413  	}
  1414  	sort.Slice(slice, func(i, j int) bool {
  1415  		return slice[i].Duration > slice[j].Duration
  1416  	})
  1417  	return slice
  1418  }
  1419  
  1420  // requiredAnalyzers returns the transitive closure of required analyzers in preorder.
  1421  func requiredAnalyzers(analyzers []*analysis.Analyzer) []*analysis.Analyzer {
  1422  	var result []*analysis.Analyzer
  1423  	seen := make(map[*analysis.Analyzer]bool)
  1424  	var visitAll func([]*analysis.Analyzer)
  1425  	visitAll = func(analyzers []*analysis.Analyzer) {
  1426  		for _, a := range analyzers {
  1427  			if !seen[a] {
  1428  				seen[a] = true
  1429  				result = append(result, a)
  1430  				visitAll(a.Requires)
  1431  			}
  1432  		}
  1433  	}
  1434  	visitAll(analyzers)
  1435  	return result
  1436  }
  1437  
  1438  var analyzeSummaryCodec = frob.CodecFor[*analyzeSummary]()
  1439  
  1440  // -- data types for serialization of analysis.Diagnostic and golang.Diagnostic --
  1441  
  1442  // (The name says gob but we use frob.)
  1443  var diagnosticsCodec = frob.CodecFor[[]gobDiagnostic]()
  1444  
  1445  type gobDiagnostic struct {
  1446  	Location       protocol.Location
  1447  	Severity       protocol.DiagnosticSeverity
  1448  	Code           string
  1449  	CodeHref       string
  1450  	Source         string
  1451  	Message        string
  1452  	SuggestedFixes []gobSuggestedFix
  1453  	Related        []gobRelatedInformation
  1454  	Tags           []protocol.DiagnosticTag
  1455  }
  1456  
  1457  type gobRelatedInformation struct {
  1458  	Location protocol.Location
  1459  	Message  string
  1460  }
  1461  
  1462  type gobSuggestedFix struct {
  1463  	Message    string
  1464  	TextEdits  []gobTextEdit
  1465  	Command    *gobCommand
  1466  	ActionKind protocol.CodeActionKind
  1467  }
  1468  
  1469  type gobCommand struct {
  1470  	Title     string
  1471  	Command   string
  1472  	Arguments []json.RawMessage
  1473  }
  1474  
  1475  type gobTextEdit struct {
  1476  	Location protocol.Location
  1477  	NewText  []byte
  1478  }
  1479  
  1480  // toGobDiagnostic converts an analysis.Diagnosic to a serializable gobDiagnostic,
  1481  // which requires expanding token.Pos positions into protocol.Location form.
  1482  func toGobDiagnostic(posToLocation func(start, end token.Pos) (protocol.Location, error), a *analysis.Analyzer, diag analysis.Diagnostic) (gobDiagnostic, error) {
  1483  	var fixes []gobSuggestedFix
  1484  	for _, fix := range diag.SuggestedFixes {
  1485  		var gobEdits []gobTextEdit
  1486  		for _, textEdit := range fix.TextEdits {
  1487  			loc, err := posToLocation(textEdit.Pos, textEdit.End)
  1488  			if err != nil {
  1489  				return gobDiagnostic{}, fmt.Errorf("in SuggestedFixes: %w", err)
  1490  			}
  1491  			gobEdits = append(gobEdits, gobTextEdit{
  1492  				Location: loc,
  1493  				NewText:  textEdit.NewText,
  1494  			})
  1495  		}
  1496  		fixes = append(fixes, gobSuggestedFix{
  1497  			Message:   fix.Message,
  1498  			TextEdits: gobEdits,
  1499  		})
  1500  	}
  1501  
  1502  	var related []gobRelatedInformation
  1503  	for _, r := range diag.Related {
  1504  		loc, err := posToLocation(r.Pos, r.End)
  1505  		if err != nil {
  1506  			return gobDiagnostic{}, fmt.Errorf("in Related: %w", err)
  1507  		}
  1508  		related = append(related, gobRelatedInformation{
  1509  			Location: loc,
  1510  			Message:  r.Message,
  1511  		})
  1512  	}
  1513  
  1514  	loc, err := posToLocation(diag.Pos, diag.End)
  1515  	if err != nil {
  1516  		return gobDiagnostic{}, err
  1517  	}
  1518  
  1519  	// The Code column of VSCode's Problems table renders this
  1520  	// information as "Source(Code)" where code is a link to CodeHref.
  1521  	// (The code field must be nonempty for anything to appear.)
  1522  	diagURL := effectiveURL(a, diag)
  1523  	code := "default"
  1524  	if diag.Category != "" {
  1525  		code = diag.Category
  1526  	}
  1527  
  1528  	return gobDiagnostic{
  1529  		Location: loc,
  1530  		// Severity for analysis diagnostics is dynamic,
  1531  		// based on user configuration per analyzer.
  1532  		Code:           code,
  1533  		CodeHref:       diagURL,
  1534  		Source:         a.Name,
  1535  		Message:        diag.Message,
  1536  		SuggestedFixes: fixes,
  1537  		Related:        related,
  1538  		// Analysis diagnostics do not contain tags.
  1539  	}, nil
  1540  }
  1541  
  1542  // effectiveURL computes the effective URL of diag,
  1543  // using the algorithm specified at Diagnostic.URL.
  1544  func effectiveURL(a *analysis.Analyzer, diag analysis.Diagnostic) string {
  1545  	u := diag.URL
  1546  	if u == "" && diag.Category != "" {
  1547  		u = "#" + diag.Category
  1548  	}
  1549  	if base, err := urlpkg.Parse(a.URL); err == nil {
  1550  		if rel, err := urlpkg.Parse(u); err == nil {
  1551  			u = base.ResolveReference(rel).String()
  1552  		}
  1553  	}
  1554  	return u
  1555  }
  1556  
  1557  // stableName returns a name for the analyzer that is unique and
  1558  // stable across address spaces.
  1559  //
  1560  // Analyzer names are not unique. For example, gopls includes
  1561  // both x/tools/passes/nilness and staticcheck/nilness.
  1562  // For serialization, we must assign each analyzer a unique identifier
  1563  // that two gopls processes accessing the cache can agree on.
  1564  func stableName(a *analysis.Analyzer) string {
  1565  	// Incorporate the file and line of the analyzer's Run function.
  1566  	addr := reflect.ValueOf(a.Run).Pointer()
  1567  	fn := runtime.FuncForPC(addr)
  1568  	file, line := fn.FileLine(addr)
  1569  
  1570  	// It is tempting to use just a.Name as the stable name when
  1571  	// it is unique, but making them always differ helps avoid
  1572  	// name/stablename confusion.
  1573  	return fmt.Sprintf("%s(%s:%d)", a.Name, filepath.Base(file), line)
  1574  }