cuelang.org/go@v0.13.0/internal/golangorgx/gopls/cache/analysis.go (about)

     1  // Copyright 2019 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package cache
     6  
     7  // This file defines gopls' driver for modular static analysis (go/analysis).
     8  
     9  import (
    10  	"bytes"
    11  	"context"
    12  	"crypto/sha256"
    13  	"encoding/gob"
    14  	"encoding/json"
    15  	"errors"
    16  	"fmt"
    17  	"go/ast"
    18  	"go/parser"
    19  	"go/token"
    20  	"go/types"
    21  	"log"
    22  	urlpkg "net/url"
    23  	"path/filepath"
    24  	"reflect"
    25  	"runtime"
    26  	"runtime/debug"
    27  	"sort"
    28  	"sync"
    29  	"sync/atomic"
    30  	"time"
    31  
    32  	"cuelang.org/go/internal/golangorgx/gopls/cache/metadata"
    33  	"cuelang.org/go/internal/golangorgx/gopls/file"
    34  	"cuelang.org/go/internal/golangorgx/gopls/filecache"
    35  	"cuelang.org/go/internal/golangorgx/gopls/protocol"
    36  	"cuelang.org/go/internal/golangorgx/gopls/util/astutil"
    37  	"cuelang.org/go/internal/golangorgx/gopls/util/bug"
    38  	"cuelang.org/go/internal/golangorgx/gopls/util/frob"
    39  	"cuelang.org/go/internal/golangorgx/tools/event"
    40  	"cuelang.org/go/internal/golangorgx/tools/facts"
    41  	"cuelang.org/go/internal/golangorgx/tools/gcimporter"
    42  	"cuelang.org/go/internal/golangorgx/tools/typesinternal"
    43  	"cuelang.org/go/internal/golangorgx/tools/versions"
    44  	"golang.org/x/sync/errgroup"
    45  	"golang.org/x/tools/go/analysis"
    46  )
    47  
    48  /*
    49  
    50     DESIGN
    51  
    52     An analysis request (Snapshot.Analyze) is for a set of Analyzers and
    53     PackageIDs. The result is the set of diagnostics for those
    54     packages. Each request constructs a transitively closed DAG of
    55     nodes, each representing a package, then works bottom up in
    56     parallel postorder calling runCached to ensure that each node's
    57     analysis summary is up to date. The summary contains the analysis
    58     diagnostics as well as the intermediate results required by the
    59     recursion, such as serialized types and facts.
    60  
    61     The entire DAG is ephemeral. Each node in the DAG records the set
    62     of analyzers to run: the complete set for the root packages, and
    63     the "facty" subset for dependencies. Each package is thus analyzed
    64     at most once. The entire DAG shares a single FileSet for parsing
    65     and importing.
    66  
    67     Each node is processed by runCached. It gets the source file
    68     content hashes for package p, and the summaries of its "vertical"
    69     dependencies (direct imports), and from them it computes a key
    70     representing the unit of work (parsing, type-checking, and
    71     analysis) that it has to do. The key is a cryptographic hash of the
    72     "recipe" for this step, including the Metadata, the file contents,
    73     the set of analyzers, and the type and fact information from the
    74     vertical dependencies.
    75  
    76     The key is sought in a machine-global persistent file-system based
    77     cache. If this gopls process, or another gopls process on the same
    78     machine, has already performed this analysis step, runCached will
    79     make a cache hit and load the serialized summary of the results. If
    80     not, it will have to proceed to run() to parse and type-check the
    81     package and then apply a set of analyzers to it. (The set of
    82     analyzers applied to a single package itself forms a graph of
    83     "actions", and it too is evaluated in parallel postorder; these
    84     dependency edges within the same package are called "horizontal".)
    85     Finally it writes a new cache entry. The entry contains serialized
    86     types (export data) and analysis facts.
    87  
    88     Each node in the DAG acts like a go/types importer mapping,
    89     providing a consistent view of packages and their objects: the
    90     mapping for a node is a superset of its dependencies' mappings.
    91     Every node has an associated *types.Package, initially nil. A
    92     package is populated during run (cache miss) by type-checking its
    93     syntax; but for a cache hit, the package is populated lazily, i.e.
    94     not until it later becomes necessary because it is imported
    95     directly or referenced by export data higher up in the DAG.
    96  
    97     For types, we use "shallow" export data. Historically, the Go
    98     compiler always produced a summary of the types for a given package
    99     that included types from other packages that it indirectly
   100     referenced: "deep" export data. This had the advantage that the
   101     compiler (and analogous tools such as gopls) need only load one
   102     file per direct import.  However, it meant that the files tended to
   103     get larger based on the level of the package in the import
   104     graph. For example, higher-level packages in the kubernetes module
   105     have over 1MB of "deep" export data, even when they have almost no
   106     content of their own, merely because they mention a major type that
   107     references many others. In pathological cases the export data was
   108     300x larger than the source for a package due to this quadratic
   109     growth.
   110  
   111     "Shallow" export data means that the serialized types describe only
   112     a single package. If those types mention types from other packages,
   113     the type checker may need to request additional packages beyond
   114     just the direct imports. Type information for the entire transitive
   115     closure of imports is provided (lazily) by the DAG.
   116  
   117     For correct dependency analysis, the digest used as a cache key
   118     must reflect the "deep" export data, so it is derived recursively
   119     from the transitive closure. As an optimization, we needn't include
   120     every package of the transitive closure in the deep hash, only the
   121     packages that were actually requested by the type checker. This
   122     allows changes to a package that have no effect on its export data
   123     to be "pruned". The direct consumer will need to be re-executed,
   124     but if its export data is unchanged as a result, then indirect
   125     consumers may not need to be re-executed.  This allows, for example,
   126     one to insert a print statement in a function and not "rebuild" the
   127     whole application (though export data does record line numbers and
   128     offsets of types which may be perturbed by otherwise insignificant
   129     changes.)
   130  
   131     The summary must record whether a package is transitively
   132     error-free (whether it would compile) because many analyzers are
   133     not safe to run on packages with inconsistent types.
   134  
   135     For fact encoding, we use the same fact set as the unitchecker
   136     (vet) to record and serialize analysis facts. The fact
   137     serialization mechanism is analogous to "deep" export data.
   138  
   139  */
   140  
   141  // TODO(adonovan):
   142  // - Add a (white-box) test of pruning when a change doesn't affect export data.
   143  // - Optimise pruning based on subset of packages mentioned in exportdata.
   144  // - Better logging so that it is possible to deduce why an analyzer
   145  //   is not being run--often due to very indirect failures.
   146  //   Even if the ultimate consumer decides to ignore errors,
   147  //   tests and other situations want to be assured of freedom from
   148  //   errors, not just missing results. This should be recorded.
   149  // - Split this into a subpackage, gopls/internal/cache/driver,
   150  //   consisting of this file and three helpers from errors.go.
   151  //   The (*snapshot).Analyze method would stay behind and make calls
   152  //   to the driver package.
   153  //   Steps:
   154  //   - define a narrow driver.Snapshot interface with only these methods:
   155  //        Metadata(PackageID) Metadata
   156  //        ReadFile(Context, URI) (file.Handle, error)
   157  //        View() *View // for Options
   158  //   - share cache.{goVersionRx,parseGoImpl}
   159  
   160  // AnalysisProgressTitle is the title of the progress report for ongoing
   161  // analysis. It is sought by regression tests for the progress reporting
   162  // feature.
   163  const AnalysisProgressTitle = "Analyzing Dependencies"
   164  
   165  func (an *analysisNode) decrefPreds() {
   166  	if atomic.AddInt32(&an.unfinishedPreds, -1) == 0 {
   167  		an.summary.Actions = nil
   168  	}
   169  }
   170  
   171  // An analysisNode is a node in a doubly-linked DAG isomorphic to the
   172  // import graph. Each node represents a single package, and the DAG
   173  // represents a batch of analysis work done at once using a single
   174  // realm of token.Pos or types.Object values.
   175  //
   176  // A complete DAG is created anew for each batch of analysis;
   177  // subgraphs are not reused over time. Each node's *types.Package
   178  // field is initially nil and is populated on demand, either from
   179  // type-checking syntax trees (typeCheck) or from importing export
   180  // data (_import). When this occurs, the typesOnce event becomes
   181  // "done".
   182  //
   183  // Each node's allDeps map is a "view" of all its dependencies keyed by
   184  // package path, which defines the types.Importer mapping used when
   185  // populating the node's types.Package. Different nodes have different
   186  // views (e.g. due to variants), but two nodes that are related by
   187  // graph ordering have views that are consistent in their overlap.
   188  // exportDeps is the subset actually referenced by export data;
   189  // this is the set for which we attempt to decode facts.
   190  //
   191  // Each node's run method is called in parallel postorder. On success,
   192  // its summary field is populated, either from the cache (hit), or by
   193  // type-checking and analyzing syntax (miss).
   194  type analysisNode struct {
   195  	fset            *token.FileSet              // file set shared by entire batch (DAG)
   196  	mp              *metadata.Package           // metadata for this package
   197  	files           []file.Handle               // contents of CompiledGoFiles
   198  	analyzers       []*analysis.Analyzer        // set of analyzers to run
   199  	preds           []*analysisNode             // graph edges:
   200  	succs           map[PackageID]*analysisNode //   (preds -> self -> succs)
   201  	unfinishedSuccs int32
   202  	unfinishedPreds int32                         // effectively a summary.Actions refcount
   203  	allDeps         map[PackagePath]*analysisNode // all dependencies including self
   204  	exportDeps      map[PackagePath]*analysisNode // subset of allDeps ref'd by export data (+self)
   205  	summary         *analyzeSummary               // serializable result of analyzing this package
   206  	stableNames     map[*analysis.Analyzer]string // cross-process stable names for Analyzers
   207  
   208  	typesOnce sync.Once      // guards lazy population of types and typesErr fields
   209  	types     *types.Package // type information lazily imported from summary
   210  	typesErr  error          // an error producing type information
   211  }
   212  
   213  func (an *analysisNode) String() string { return string(an.mp.ID) }
   214  
   215  // _import imports this node's types.Package from export data, if not already done.
   216  // Precondition: analysis was a success.
   217  // Postcondition: an.types and an.exportDeps are populated.
   218  func (an *analysisNode) _import() (*types.Package, error) {
   219  	an.typesOnce.Do(func() {
   220  		if an.mp.PkgPath == "unsafe" {
   221  			an.types = types.Unsafe
   222  			return
   223  		}
   224  
   225  		an.types = types.NewPackage(string(an.mp.PkgPath), string(an.mp.Name))
   226  
   227  		// getPackages recursively imports each dependency
   228  		// referenced by the export data, in parallel.
   229  		getPackages := func(items []gcimporter.GetPackagesItem) error {
   230  			var g errgroup.Group
   231  			for i, item := range items {
   232  				path := PackagePath(item.Path)
   233  				dep, ok := an.allDeps[path]
   234  				if !ok {
   235  					// This early return bypasses Wait; that's ok.
   236  					return fmt.Errorf("%s: unknown dependency %q", an.mp, path)
   237  				}
   238  				an.exportDeps[path] = dep // record, for later fact decoding
   239  				if dep == an {
   240  					if an.typesErr != nil {
   241  						return an.typesErr
   242  					} else {
   243  						items[i].Pkg = an.types
   244  					}
   245  				} else {
   246  					i := i
   247  					g.Go(func() error {
   248  						depPkg, err := dep._import()
   249  						if err == nil {
   250  							items[i].Pkg = depPkg
   251  						}
   252  						return err
   253  					})
   254  				}
   255  			}
   256  			return g.Wait()
   257  		}
   258  		pkg, err := gcimporter.IImportShallow(an.fset, getPackages, an.summary.Export, string(an.mp.PkgPath), bug.Reportf)
   259  		if err != nil {
   260  			an.typesErr = bug.Errorf("%s: invalid export data: %v", an.mp, err)
   261  			an.types = nil
   262  		} else if pkg != an.types {
   263  			log.Fatalf("%s: inconsistent packages", an.mp)
   264  		}
   265  	})
   266  	return an.types, an.typesErr
   267  }
   268  
   269  // analyzeSummary is a gob-serializable summary of successfully
   270  // applying a list of analyzers to a package.
   271  type analyzeSummary struct {
   272  	Export         []byte    // encoded types of package
   273  	DeepExportHash file.Hash // hash of reflexive transitive closure of export data
   274  	Compiles       bool      // transitively free of list/parse/type errors
   275  	Actions        actionMap // maps analyzer stablename to analysis results (*actionSummary)
   276  }
   277  
   278  // actionMap defines a stable Gob encoding for a map.
   279  // TODO(adonovan): generalize and move to a library when we can use generics.
   280  type actionMap map[string]*actionSummary
   281  
   282  var (
   283  	_ gob.GobEncoder = (actionMap)(nil)
   284  	_ gob.GobDecoder = (*actionMap)(nil)
   285  )
   286  
   287  type actionsMapEntry struct {
   288  	K string
   289  	V *actionSummary
   290  }
   291  
   292  func (m actionMap) GobEncode() ([]byte, error) {
   293  	entries := make([]actionsMapEntry, 0, len(m))
   294  	for k, v := range m {
   295  		entries = append(entries, actionsMapEntry{k, v})
   296  	}
   297  	sort.Slice(entries, func(i, j int) bool {
   298  		return entries[i].K < entries[j].K
   299  	})
   300  	var buf bytes.Buffer
   301  	err := gob.NewEncoder(&buf).Encode(entries)
   302  	return buf.Bytes(), err
   303  }
   304  
   305  func (m *actionMap) GobDecode(data []byte) error {
   306  	var entries []actionsMapEntry
   307  	if err := gob.NewDecoder(bytes.NewReader(data)).Decode(&entries); err != nil {
   308  		return err
   309  	}
   310  	*m = make(actionMap, len(entries))
   311  	for _, e := range entries {
   312  		(*m)[e.K] = e.V
   313  	}
   314  	return nil
   315  }
   316  
   317  // actionSummary is a gob-serializable summary of one possibly failed analysis action.
   318  // If Err is non-empty, the other fields are undefined.
   319  type actionSummary struct {
   320  	Facts       []byte    // the encoded facts.Set
   321  	FactsHash   file.Hash // hash(Facts)
   322  	Diagnostics []gobDiagnostic
   323  	Err         string // "" => success
   324  }
   325  
   326  // runCached applies a list of analyzers (plus any others
   327  // transitively required by them) to a package.  It succeeds as long
   328  // as it could produce a types.Package, even if there were direct or
   329  // indirect list/parse/type errors, and even if all the analysis
   330  // actions failed. It usually fails only if the package was unknown,
   331  // a file was missing, or the operation was cancelled.
   332  //
   333  // Postcondition: runCached must not continue to use the snapshot
   334  // (in background goroutines) after it has returned; see memoize.RefCounted.
   335  func (an *analysisNode) runCached(ctx context.Context) (*analyzeSummary, error) {
   336  	// At this point we have the action results (serialized
   337  	// packages and facts) of our immediate dependencies,
   338  	// and the metadata and content of this package.
   339  	//
   340  	// We now compute a hash for all our inputs, and consult a
   341  	// global cache of promised results. If nothing material
   342  	// has changed, we'll make a hit in the shared cache.
   343  	//
   344  	// The hash of our inputs is based on the serialized export
   345  	// data and facts so that immaterial changes can be pruned
   346  	// without decoding.
   347  	key := an.cacheKey()
   348  
   349  	// Access the cache.
   350  	var summary *analyzeSummary
   351  	const cacheKind = "analysis"
   352  	if data, err := filecache.Get(cacheKind, key); err == nil {
   353  		// cache hit
   354  		analyzeSummaryCodec.Decode(data, &summary)
   355  	} else if err != filecache.ErrNotFound {
   356  		return nil, bug.Errorf("internal error reading shared cache: %v", err)
   357  	} else {
   358  		// Cache miss: do the work.
   359  		var err error
   360  		summary, err = an.run(ctx)
   361  		if err != nil {
   362  			return nil, err
   363  		}
   364  
   365  		atomic.AddInt32(&an.unfinishedPreds, +1) // incref
   366  		go func() {
   367  			defer an.decrefPreds() //decref
   368  
   369  			cacheLimit <- unit{}            // acquire token
   370  			defer func() { <-cacheLimit }() // release token
   371  
   372  			data := analyzeSummaryCodec.Encode(summary)
   373  			if false {
   374  				log.Printf("Set key=%d value=%d id=%s\n", len(key), len(data), an.mp.ID)
   375  			}
   376  			if err := filecache.Set(cacheKind, key, data); err != nil {
   377  				event.Error(ctx, "internal error updating analysis shared cache", err)
   378  			}
   379  		}()
   380  	}
   381  
   382  	return summary, nil
   383  }
   384  
   385  // cacheLimit reduces parallelism of cache updates.
   386  // We allow more than typical GOMAXPROCS as it's a mix of CPU and I/O.
   387  var cacheLimit = make(chan unit, 32)
   388  
   389  // analysisCacheKey returns a cache key that is a cryptographic digest
   390  // of the all the values that might affect type checking and analysis:
   391  // the analyzer names, package metadata, names and contents of
   392  // compiled Go files, and vdeps (successor) information
   393  // (export data and facts).
   394  func (an *analysisNode) cacheKey() [sha256.Size]byte {
   395  	hasher := sha256.New()
   396  
   397  	// In principle, a key must be the hash of an
   398  	// unambiguous encoding of all the relevant data.
   399  	// If it's ambiguous, we risk collisions.
   400  
   401  	// analyzers
   402  	fmt.Fprintf(hasher, "analyzers: %d\n", len(an.analyzers))
   403  	for _, a := range an.analyzers {
   404  		fmt.Fprintln(hasher, a.Name)
   405  	}
   406  
   407  	// package metadata
   408  	mp := an.mp
   409  	fmt.Fprintf(hasher, "package: %s %s %s\n", mp.ID, mp.Name, mp.PkgPath)
   410  	// We can ignore m.DepsBy{Pkg,Import}Path: although the logic
   411  	// uses those fields, we account for them by hashing vdeps.
   412  
   413  	// type sizes
   414  	wordSize := an.mp.TypesSizes.Sizeof(types.Typ[types.Int])
   415  	maxAlign := an.mp.TypesSizes.Alignof(types.NewPointer(types.Typ[types.Int64]))
   416  	fmt.Fprintf(hasher, "sizes: %d %d\n", wordSize, maxAlign)
   417  
   418  	// metadata errors: used for 'compiles' field
   419  	fmt.Fprintf(hasher, "errors: %d", len(mp.Errors))
   420  
   421  	// module Go version
   422  	if mp.Module != nil && mp.Module.GoVersion != "" {
   423  		fmt.Fprintf(hasher, "go %s\n", mp.Module.GoVersion)
   424  	}
   425  
   426  	// file names and contents
   427  	fmt.Fprintf(hasher, "files: %d\n", len(an.files))
   428  	for _, fh := range an.files {
   429  		fmt.Fprintln(hasher, fh.Identity())
   430  	}
   431  
   432  	// vdeps, in PackageID order
   433  	depIDs := make([]string, 0, len(an.succs))
   434  	for depID := range an.succs {
   435  		depIDs = append(depIDs, string(depID))
   436  	}
   437  	sort.Strings(depIDs) // TODO(adonovan): avoid conversions by using slices.Sort[PackageID]
   438  	for _, depID := range depIDs {
   439  		vdep := an.succs[PackageID(depID)]
   440  		fmt.Fprintf(hasher, "dep: %s\n", vdep.mp.PkgPath)
   441  		fmt.Fprintf(hasher, "export: %s\n", vdep.summary.DeepExportHash)
   442  
   443  		// action results: errors and facts
   444  		actions := vdep.summary.Actions
   445  		names := make([]string, 0, len(actions))
   446  		for name := range actions {
   447  			names = append(names, name)
   448  		}
   449  		sort.Strings(names)
   450  		for _, name := range names {
   451  			summary := actions[name]
   452  			fmt.Fprintf(hasher, "action %s\n", name)
   453  			if summary.Err != "" {
   454  				fmt.Fprintf(hasher, "error %s\n", summary.Err)
   455  			} else {
   456  				fmt.Fprintf(hasher, "facts %s\n", summary.FactsHash)
   457  				// We can safely omit summary.diagnostics
   458  				// from the key since they have no downstream effect.
   459  			}
   460  		}
   461  	}
   462  
   463  	var hash [sha256.Size]byte
   464  	hasher.Sum(hash[:0])
   465  	return hash
   466  }
   467  
   468  // run implements the cache-miss case.
   469  // This function does not access the snapshot.
   470  //
   471  // Postcondition: on success, the analyzeSummary.Actions
   472  // key set is {a.Name for a in analyzers}.
   473  func (an *analysisNode) run(ctx context.Context) (*analyzeSummary, error) {
   474  	// Parse only the "compiled" Go files.
   475  	// Do the computation in parallel.
   476  	parsed := make([]*ParsedGoFile, len(an.files))
   477  	{
   478  		var group errgroup.Group
   479  		group.SetLimit(4) // not too much: run itself is already called in parallel
   480  		for i, fh := range an.files {
   481  			i, fh := i, fh
   482  			group.Go(func() error {
   483  				// Call parseGoImpl directly, not the caching wrapper,
   484  				// as cached ASTs require the global FileSet.
   485  				// ast.Object resolution is unfortunately an implied part of the
   486  				// go/analysis contract.
   487  				pgf, err := parseGoImpl(ctx, an.fset, fh, ParseFull&^parser.SkipObjectResolution, false)
   488  				parsed[i] = pgf
   489  				return err
   490  			})
   491  		}
   492  		if err := group.Wait(); err != nil {
   493  			return nil, err // cancelled, or catastrophic error (e.g. missing file)
   494  		}
   495  	}
   496  
   497  	// Type-check the package syntax.
   498  	pkg := an.typeCheck(parsed)
   499  
   500  	// Publish the completed package.
   501  	an.typesOnce.Do(func() { an.types = pkg.types })
   502  	if an.types != pkg.types {
   503  		log.Fatalf("typesOnce prematurely done")
   504  	}
   505  
   506  	// Compute the union of exportDeps across our direct imports.
   507  	// This is the set that will be needed by the fact decoder.
   508  	allExportDeps := make(map[PackagePath]*analysisNode)
   509  	for _, succ := range an.succs {
   510  		for k, v := range succ.exportDeps {
   511  			allExportDeps[k] = v
   512  		}
   513  	}
   514  
   515  	// The fact decoder needs a means to look up a Package by path.
   516  	pkg.factsDecoder = facts.NewDecoderFunc(pkg.types, func(path string) *types.Package {
   517  		// Note: Decode is called concurrently, and thus so is this function.
   518  
   519  		// Does the fact relate to a package referenced by export data?
   520  		if dep, ok := allExportDeps[PackagePath(path)]; ok {
   521  			dep.typesOnce.Do(func() { log.Fatal("dep.types not populated") })
   522  			if dep.typesErr == nil {
   523  				return dep.types
   524  			}
   525  			return nil
   526  		}
   527  
   528  		// If the fact relates to a dependency not referenced
   529  		// by export data, it is safe to ignore it.
   530  		// (In that case dep.types exists but may be unpopulated
   531  		// or in the process of being populated from export data.)
   532  		if an.allDeps[PackagePath(path)] == nil {
   533  			log.Fatalf("fact package %q is not a dependency", path)
   534  		}
   535  		return nil
   536  	})
   537  
   538  	// Poll cancellation state.
   539  	if err := ctx.Err(); err != nil {
   540  		return nil, err
   541  	}
   542  
   543  	// -- analysis --
   544  
   545  	// Build action graph for this package.
   546  	// Each graph node (action) is one unit of analysis.
   547  	actions := make(map[*analysis.Analyzer]*action)
   548  	var mkAction func(a *analysis.Analyzer) *action
   549  	mkAction = func(a *analysis.Analyzer) *action {
   550  		act, ok := actions[a]
   551  		if !ok {
   552  			var hdeps []*action
   553  			for _, req := range a.Requires {
   554  				hdeps = append(hdeps, mkAction(req))
   555  			}
   556  			act = &action{
   557  				a:          a,
   558  				stableName: an.stableNames[a],
   559  				pkg:        pkg,
   560  				vdeps:      an.succs,
   561  				hdeps:      hdeps,
   562  			}
   563  			actions[a] = act
   564  		}
   565  		return act
   566  	}
   567  
   568  	// Build actions for initial package.
   569  	var roots []*action
   570  	for _, a := range an.analyzers {
   571  		roots = append(roots, mkAction(a))
   572  	}
   573  
   574  	// Execute the graph in parallel.
   575  	execActions(roots)
   576  	// Inv: each root's summary is set (whether success or error).
   577  
   578  	// Don't return (or cache) the result in case of cancellation.
   579  	if err := ctx.Err(); err != nil {
   580  		return nil, err // cancelled
   581  	}
   582  
   583  	// Return summaries only for the requested actions.
   584  	summaries := make(map[string]*actionSummary)
   585  	for _, root := range roots {
   586  		if root.summary == nil {
   587  			panic("root has nil action.summary (#60551)")
   588  		}
   589  		summaries[root.stableName] = root.summary
   590  	}
   591  
   592  	return &analyzeSummary{
   593  		Export:         pkg.export,
   594  		DeepExportHash: pkg.deepExportHash,
   595  		Compiles:       pkg.compiles,
   596  		Actions:        summaries,
   597  	}, nil
   598  }
   599  
   600  // Postcondition: analysisPackage.types and an.exportDeps are populated.
   601  func (an *analysisNode) typeCheck(parsed []*ParsedGoFile) *analysisPackage {
   602  	mp := an.mp
   603  
   604  	if false { // debugging
   605  		log.Println("typeCheck", mp.ID)
   606  	}
   607  
   608  	pkg := &analysisPackage{
   609  		mp:       mp,
   610  		fset:     an.fset,
   611  		parsed:   parsed,
   612  		files:    make([]*ast.File, len(parsed)),
   613  		compiles: len(mp.Errors) == 0, // false => list error
   614  		types:    types.NewPackage(string(mp.PkgPath), string(mp.Name)),
   615  		typesInfo: &types.Info{
   616  			Types:      make(map[ast.Expr]types.TypeAndValue),
   617  			Defs:       make(map[*ast.Ident]types.Object),
   618  			Instances:  make(map[*ast.Ident]types.Instance),
   619  			Implicits:  make(map[ast.Node]types.Object),
   620  			Selections: make(map[*ast.SelectorExpr]*types.Selection),
   621  			Scopes:     make(map[ast.Node]*types.Scope),
   622  			Uses:       make(map[*ast.Ident]types.Object),
   623  		},
   624  		typesSizes: mp.TypesSizes,
   625  	}
   626  	versions.InitFileVersions(pkg.typesInfo)
   627  
   628  	// Unsafe has no syntax.
   629  	if mp.PkgPath == "unsafe" {
   630  		pkg.types = types.Unsafe
   631  		return pkg
   632  	}
   633  
   634  	for i, p := range parsed {
   635  		pkg.files[i] = p.File
   636  		if p.ParseErr != nil {
   637  			pkg.compiles = false // parse error
   638  		}
   639  	}
   640  
   641  	for _, vdep := range an.succs {
   642  		if !vdep.summary.Compiles {
   643  			pkg.compiles = false // transitive error
   644  		}
   645  	}
   646  
   647  	cfg := &types.Config{
   648  		Sizes: mp.TypesSizes,
   649  		Error: func(e error) {
   650  			pkg.compiles = false // type error
   651  
   652  			// Suppress type errors in files with parse errors
   653  			// as parser recovery can be quite lossy (#59888).
   654  			typeError := e.(types.Error)
   655  			for _, p := range parsed {
   656  				if p.ParseErr != nil && astutil.NodeContains(p.File, typeError.Pos) {
   657  					return
   658  				}
   659  			}
   660  			pkg.typeErrors = append(pkg.typeErrors, typeError)
   661  		},
   662  		Importer: importerFunc(func(importPath string) (*types.Package, error) {
   663  			// Beware that returning an error from this function
   664  			// will cause the type checker to synthesize a fake
   665  			// package whose Path is importPath, potentially
   666  			// losing a vendor/ prefix. If type-checking errors
   667  			// are swallowed, these packages may be confusing.
   668  
   669  			// Map ImportPath to ID.
   670  			id, ok := mp.DepsByImpPath[ImportPath(importPath)]
   671  			if !ok {
   672  				// The import syntax is inconsistent with the metadata.
   673  				// This could be because the import declaration was
   674  				// incomplete and the metadata only includes complete
   675  				// imports; or because the metadata ignores import
   676  				// edges that would lead to cycles in the graph.
   677  				return nil, fmt.Errorf("missing metadata for import of %q", importPath)
   678  			}
   679  
   680  			// Map ID to node. (id may be "")
   681  			dep := an.succs[id]
   682  			if dep == nil {
   683  				// Analogous to (*snapshot).missingPkgError
   684  				// in the logic for regular type-checking,
   685  				// but without a snapshot we can't provide
   686  				// such detail, and anyway most analysis
   687  				// failures aren't surfaced in the UI.
   688  				return nil, fmt.Errorf("no required module provides analysis package %q (id=%q)", importPath, id)
   689  			}
   690  
   691  			// (Duplicates logic from check.go.)
   692  			if !metadata.IsValidImport(an.mp.PkgPath, dep.mp.PkgPath) {
   693  				return nil, fmt.Errorf("invalid use of internal package %s", importPath)
   694  			}
   695  
   696  			return dep._import()
   697  		}),
   698  	}
   699  
   700  	// Set Go dialect.
   701  	if mp.Module != nil && mp.Module.GoVersion != "" {
   702  		goVersion := "go" + mp.Module.GoVersion
   703  		// types.NewChecker panics if GoVersion is invalid.
   704  		// An unparsable mod file should probably stop us
   705  		// before we get here, but double check just in case.
   706  		if goVersionRx.MatchString(goVersion) {
   707  			typesinternal.SetGoVersion(cfg, goVersion)
   708  		}
   709  	}
   710  
   711  	// We want to type check cgo code if go/types supports it.
   712  	// We passed typecheckCgo to go/packages when we Loaded.
   713  	// TODO(adonovan): do we actually need this??
   714  	typesinternal.SetUsesCgo(cfg)
   715  
   716  	check := types.NewChecker(cfg, pkg.fset, pkg.types, pkg.typesInfo)
   717  
   718  	// Type checking errors are handled via the config, so ignore them here.
   719  	_ = check.Files(pkg.files)
   720  
   721  	// debugging (type errors are quite normal)
   722  	if false {
   723  		if pkg.typeErrors != nil {
   724  			log.Printf("package %s has type errors: %v", pkg.types.Path(), pkg.typeErrors)
   725  		}
   726  	}
   727  
   728  	// Emit the export data and compute the recursive hash.
   729  	export, err := gcimporter.IExportShallow(pkg.fset, pkg.types, bug.Reportf)
   730  	if err != nil {
   731  		// TODO(adonovan): in light of exporter bugs such as #57729,
   732  		// consider using bug.Report here and retrying the IExportShallow
   733  		// call here using an empty types.Package.
   734  		log.Fatalf("internal error writing shallow export data: %v", err)
   735  	}
   736  	pkg.export = export
   737  
   738  	// Compute a recursive hash to account for the export data of
   739  	// this package and each dependency referenced by it.
   740  	// Also, populate exportDeps.
   741  	hash := sha256.New()
   742  	fmt.Fprintf(hash, "%s %d\n", mp.PkgPath, len(export))
   743  	hash.Write(export)
   744  	paths, err := readShallowManifest(export)
   745  	if err != nil {
   746  		log.Fatalf("internal error: bad export data: %v", err)
   747  	}
   748  	for _, path := range paths {
   749  		dep, ok := an.allDeps[path]
   750  		if !ok {
   751  			log.Fatalf("%s: missing dependency: %q", an, path)
   752  		}
   753  		fmt.Fprintf(hash, "%s %s\n", dep.mp.PkgPath, dep.summary.DeepExportHash)
   754  		an.exportDeps[path] = dep
   755  	}
   756  	an.exportDeps[mp.PkgPath] = an // self
   757  	hash.Sum(pkg.deepExportHash[:0])
   758  
   759  	return pkg
   760  }
   761  
   762  // readShallowManifest returns the manifest of packages referenced by
   763  // a shallow export data file for a package (excluding the package itself).
   764  // TODO(adonovan): add a test.
   765  func readShallowManifest(export []byte) ([]PackagePath, error) {
   766  	const selfPath = "<self>" // dummy path
   767  	var paths []PackagePath
   768  	getPackages := func(items []gcimporter.GetPackagesItem) error {
   769  		paths = []PackagePath{} // non-nil
   770  		for _, item := range items {
   771  			if item.Path != selfPath {
   772  				paths = append(paths, PackagePath(item.Path))
   773  			}
   774  		}
   775  		return errors.New("stop") // terminate importer
   776  	}
   777  	_, err := gcimporter.IImportShallow(token.NewFileSet(), getPackages, export, selfPath, bug.Reportf)
   778  	if paths == nil {
   779  		if err != nil {
   780  			return nil, err // failed before getPackages callback
   781  		}
   782  		return nil, bug.Errorf("internal error: IImportShallow did not call getPackages")
   783  	}
   784  	return paths, nil // success
   785  }
   786  
   787  // analysisPackage contains information about a package, including
   788  // syntax trees, used transiently during its type-checking and analysis.
   789  type analysisPackage struct {
   790  	mp             *metadata.Package
   791  	fset           *token.FileSet // local to this package
   792  	parsed         []*ParsedGoFile
   793  	files          []*ast.File // same as parsed[i].File
   794  	types          *types.Package
   795  	compiles       bool // package is transitively free of list/parse/type errors
   796  	factsDecoder   *facts.Decoder
   797  	export         []byte    // encoding of types.Package
   798  	deepExportHash file.Hash // reflexive transitive hash of export data
   799  	typesInfo      *types.Info
   800  	typeErrors     []types.Error
   801  	typesSizes     types.Sizes
   802  }
   803  
   804  // An action represents one unit of analysis work: the application of
   805  // one analysis to one package. Actions form a DAG, both within a
   806  // package (as different analyzers are applied, either in sequence or
   807  // parallel), and across packages (as dependencies are analyzed).
   808  type action struct {
   809  	once       sync.Once
   810  	a          *analysis.Analyzer
   811  	stableName string // cross-process stable name of analyzer
   812  	pkg        *analysisPackage
   813  	hdeps      []*action                   // horizontal dependencies
   814  	vdeps      map[PackageID]*analysisNode // vertical dependencies
   815  
   816  	// results of action.exec():
   817  	result  interface{} // result of Run function, of type a.ResultType
   818  	summary *actionSummary
   819  	err     error
   820  }
   821  
   822  func (act *action) String() string {
   823  	return fmt.Sprintf("%s@%s", act.a.Name, act.pkg.mp.ID)
   824  }
   825  
   826  // execActions executes a set of action graph nodes in parallel.
   827  // Postcondition: each action.summary is set, even in case of error.
   828  func execActions(actions []*action) {
   829  	var wg sync.WaitGroup
   830  	for _, act := range actions {
   831  		act := act
   832  		wg.Add(1)
   833  		go func() {
   834  			defer wg.Done()
   835  			act.once.Do(func() {
   836  				execActions(act.hdeps) // analyze "horizontal" dependencies
   837  				act.result, act.summary, act.err = act.exec()
   838  				if act.err != nil {
   839  					act.summary = &actionSummary{Err: act.err.Error()}
   840  					// TODO(adonovan): suppress logging. But
   841  					// shouldn't the root error's causal chain
   842  					// include this information?
   843  					if false { // debugging
   844  						log.Printf("act.exec(%v) failed: %v", act, act.err)
   845  					}
   846  				}
   847  			})
   848  			if act.summary == nil {
   849  				panic("nil action.summary (#60551)")
   850  			}
   851  		}()
   852  	}
   853  	wg.Wait()
   854  }
   855  
   856  // exec defines the execution of a single action.
   857  // It returns the (ephemeral) result of the analyzer's Run function,
   858  // along with its (serializable) facts and diagnostics.
   859  // Or it returns an error if the analyzer did not run to
   860  // completion and deliver a valid result.
   861  func (act *action) exec() (interface{}, *actionSummary, error) {
   862  	analyzer := act.a
   863  	pkg := act.pkg
   864  
   865  	hasFacts := len(analyzer.FactTypes) > 0
   866  
   867  	// Report an error if any action dependency (vertical or horizontal) failed.
   868  	// To avoid long error messages describing chains of failure,
   869  	// we return the dependencies' error' unadorned.
   870  	if hasFacts {
   871  		// TODO(adonovan): use deterministic order.
   872  		for _, vdep := range act.vdeps {
   873  			if summ := vdep.summary.Actions[act.stableName]; summ.Err != "" {
   874  				return nil, nil, errors.New(summ.Err)
   875  			}
   876  		}
   877  	}
   878  	for _, dep := range act.hdeps {
   879  		if dep.err != nil {
   880  			return nil, nil, dep.err
   881  		}
   882  	}
   883  	// Inv: all action dependencies succeeded.
   884  
   885  	// Were there list/parse/type errors that might prevent analysis?
   886  	if !pkg.compiles && !analyzer.RunDespiteErrors {
   887  		return nil, nil, fmt.Errorf("skipping analysis %q because package %q does not compile", analyzer.Name, pkg.mp.ID)
   888  	}
   889  	// Inv: package is well-formed enough to proceed with analysis.
   890  
   891  	if false { // debugging
   892  		log.Println("action.exec", act)
   893  	}
   894  
   895  	// Gather analysis Result values from horizontal dependencies.
   896  	inputs := make(map[*analysis.Analyzer]interface{})
   897  	for _, dep := range act.hdeps {
   898  		inputs[dep.a] = dep.result
   899  	}
   900  
   901  	// TODO(adonovan): opt: facts.Set works but it may be more
   902  	// efficient to fork and tailor it to our precise needs.
   903  	//
   904  	// We've already sharded the fact encoding by action
   905  	// so that it can be done in parallel.
   906  	// We could eliminate locking.
   907  	// We could also dovetail more closely with the export data
   908  	// decoder to obtain a more compact representation of
   909  	// packages and objects (e.g. its internal IDs, instead
   910  	// of PkgPaths and objectpaths.)
   911  	// More importantly, we should avoid re-export of
   912  	// facts that related to objects that are discarded
   913  	// by "deep" export data. Better still, use a "shallow" approach.
   914  
   915  	// Read and decode analysis facts for each direct import.
   916  	factset, err := pkg.factsDecoder.Decode(func(pkgPath string) ([]byte, error) {
   917  		if !hasFacts {
   918  			return nil, nil // analyzer doesn't use facts, so no vdeps
   919  		}
   920  
   921  		// Package.Imports() may contain a fake "C" package. Ignore it.
   922  		if pkgPath == "C" {
   923  			return nil, nil
   924  		}
   925  
   926  		id, ok := pkg.mp.DepsByPkgPath[PackagePath(pkgPath)]
   927  		if !ok {
   928  			// This may mean imp was synthesized by the type
   929  			// checker because it failed to import it for any reason
   930  			// (e.g. bug processing export data; metadata ignoring
   931  			// a cycle-forming import).
   932  			// In that case, the fake package's imp.Path
   933  			// is set to the failed importPath (and thus
   934  			// it may lack a "vendor/" prefix).
   935  			//
   936  			// For now, silently ignore it on the assumption
   937  			// that the error is already reported elsewhere.
   938  			// return nil, fmt.Errorf("missing metadata")
   939  			return nil, nil
   940  		}
   941  
   942  		vdep := act.vdeps[id]
   943  		if vdep == nil {
   944  			return nil, bug.Errorf("internal error in %s: missing vdep for id=%s", pkg.types.Path(), id)
   945  		}
   946  
   947  		return vdep.summary.Actions[act.stableName].Facts, nil
   948  	})
   949  	if err != nil {
   950  		return nil, nil, fmt.Errorf("internal error decoding analysis facts: %w", err)
   951  	}
   952  
   953  	// TODO(adonovan): make Export*Fact panic rather than discarding
   954  	// undeclared fact types, so that we discover bugs in analyzers.
   955  	factFilter := make(map[reflect.Type]bool)
   956  	for _, f := range analyzer.FactTypes {
   957  		factFilter[reflect.TypeOf(f)] = true
   958  	}
   959  
   960  	// If the package contains "fixed" files, it's not necessarily an error if we
   961  	// can't convert positions.
   962  	hasFixedFiles := false
   963  	for _, p := range pkg.parsed {
   964  		if p.Fixed() {
   965  			hasFixedFiles = true
   966  			break
   967  		}
   968  	}
   969  
   970  	// posToLocation converts from token.Pos to protocol form.
   971  	// TODO(adonovan): improve error messages.
   972  	posToLocation := func(start, end token.Pos) (protocol.Location, error) {
   973  		tokFile := pkg.fset.File(start)
   974  
   975  		for _, p := range pkg.parsed {
   976  			if p.Tok == tokFile {
   977  				if end == token.NoPos {
   978  					end = start
   979  				}
   980  				return p.PosLocation(start, end)
   981  			}
   982  		}
   983  		errorf := bug.Errorf
   984  		if hasFixedFiles {
   985  			errorf = fmt.Errorf
   986  		}
   987  		return protocol.Location{}, errorf("token.Pos not within package")
   988  	}
   989  
   990  	// Now run the (pkg, analyzer) action.
   991  	var diagnostics []gobDiagnostic
   992  	pass := &analysis.Pass{
   993  		Analyzer:   analyzer,
   994  		Fset:       pkg.fset,
   995  		Files:      pkg.files,
   996  		Pkg:        pkg.types,
   997  		TypesInfo:  pkg.typesInfo,
   998  		TypesSizes: pkg.typesSizes,
   999  		TypeErrors: pkg.typeErrors,
  1000  		ResultOf:   inputs,
  1001  		Report: func(d analysis.Diagnostic) {
  1002  			diagnostic, err := toGobDiagnostic(posToLocation, analyzer, d)
  1003  			if err != nil {
  1004  				if !hasFixedFiles {
  1005  					bug.Reportf("internal error converting diagnostic from analyzer %q: %v", analyzer.Name, err)
  1006  				}
  1007  				return
  1008  			}
  1009  			diagnostics = append(diagnostics, diagnostic)
  1010  		},
  1011  		ImportObjectFact:  factset.ImportObjectFact,
  1012  		ExportObjectFact:  factset.ExportObjectFact,
  1013  		ImportPackageFact: factset.ImportPackageFact,
  1014  		ExportPackageFact: factset.ExportPackageFact,
  1015  		AllObjectFacts:    func() []analysis.ObjectFact { return factset.AllObjectFacts(factFilter) },
  1016  		AllPackageFacts:   func() []analysis.PackageFact { return factset.AllPackageFacts(factFilter) },
  1017  	}
  1018  
  1019  	// Recover from panics (only) within the analyzer logic.
  1020  	// (Use an anonymous function to limit the recover scope.)
  1021  	var result interface{}
  1022  	func() {
  1023  		start := time.Now()
  1024  		defer func() {
  1025  			if r := recover(); r != nil {
  1026  				// An Analyzer panicked, likely due to a bug.
  1027  				//
  1028  				// In general we want to discover and fix such panics quickly,
  1029  				// so we don't suppress them, but some bugs in third-party
  1030  				// analyzers cannot be quickly fixed, so we use an allowlist
  1031  				// to suppress panics.
  1032  				const strict = true
  1033  				if strict && bug.PanicOnBugs &&
  1034  					analyzer.Name != "buildir" { // see https://github.com/dominikh/go-tools/issues/1343
  1035  					// Uncomment this when debugging suspected failures
  1036  					// in the driver, not the analyzer.
  1037  					if false {
  1038  						debug.SetTraceback("all") // show all goroutines
  1039  					}
  1040  					panic(r)
  1041  				} else {
  1042  					// In production, suppress the panic and press on.
  1043  					err = fmt.Errorf("analysis %s for package %s panicked: %v", analyzer.Name, pass.Pkg.Path(), r)
  1044  				}
  1045  			}
  1046  
  1047  			// Accumulate running time for each checker.
  1048  			analyzerRunTimesMu.Lock()
  1049  			analyzerRunTimes[analyzer] += time.Since(start)
  1050  			analyzerRunTimesMu.Unlock()
  1051  		}()
  1052  
  1053  		result, err = pass.Analyzer.Run(pass)
  1054  	}()
  1055  	if err != nil {
  1056  		return nil, nil, err
  1057  	}
  1058  
  1059  	if got, want := reflect.TypeOf(result), pass.Analyzer.ResultType; got != want {
  1060  		return nil, nil, bug.Errorf(
  1061  			"internal error: on package %s, analyzer %s returned a result of type %v, but declared ResultType %v",
  1062  			pass.Pkg.Path(), pass.Analyzer, got, want)
  1063  	}
  1064  
  1065  	// Disallow Export*Fact calls after Run.
  1066  	// (A panic means the Analyzer is abusing concurrency.)
  1067  	pass.ExportObjectFact = func(obj types.Object, fact analysis.Fact) {
  1068  		panic(fmt.Sprintf("%v: Pass.ExportObjectFact(%s, %T) called after Run", act, obj, fact))
  1069  	}
  1070  	pass.ExportPackageFact = func(fact analysis.Fact) {
  1071  		panic(fmt.Sprintf("%v: Pass.ExportPackageFact(%T) called after Run", act, fact))
  1072  	}
  1073  
  1074  	factsdata := factset.Encode()
  1075  	return result, &actionSummary{
  1076  		Diagnostics: diagnostics,
  1077  		Facts:       factsdata,
  1078  		FactsHash:   file.HashOf(factsdata),
  1079  	}, nil
  1080  }
  1081  
  1082  var (
  1083  	analyzerRunTimesMu sync.Mutex
  1084  	analyzerRunTimes   = make(map[*analysis.Analyzer]time.Duration)
  1085  )
  1086  
  1087  type LabelDuration struct {
  1088  	Label    string
  1089  	Duration time.Duration
  1090  }
  1091  
  1092  // AnalyzerTimes returns the accumulated time spent in each Analyzer's
  1093  // Run function since process start, in descending order.
  1094  func AnalyzerRunTimes() []LabelDuration {
  1095  	analyzerRunTimesMu.Lock()
  1096  	defer analyzerRunTimesMu.Unlock()
  1097  
  1098  	slice := make([]LabelDuration, 0, len(analyzerRunTimes))
  1099  	for a, t := range analyzerRunTimes {
  1100  		slice = append(slice, LabelDuration{Label: a.Name, Duration: t})
  1101  	}
  1102  	sort.Slice(slice, func(i, j int) bool {
  1103  		return slice[i].Duration > slice[j].Duration
  1104  	})
  1105  	return slice
  1106  }
  1107  
  1108  // requiredAnalyzers returns the transitive closure of required analyzers in preorder.
  1109  func requiredAnalyzers(analyzers []*analysis.Analyzer) []*analysis.Analyzer {
  1110  	var result []*analysis.Analyzer
  1111  	seen := make(map[*analysis.Analyzer]bool)
  1112  	var visitAll func([]*analysis.Analyzer)
  1113  	visitAll = func(analyzers []*analysis.Analyzer) {
  1114  		for _, a := range analyzers {
  1115  			if !seen[a] {
  1116  				seen[a] = true
  1117  				result = append(result, a)
  1118  				visitAll(a.Requires)
  1119  			}
  1120  		}
  1121  	}
  1122  	visitAll(analyzers)
  1123  	return result
  1124  }
  1125  
  1126  var analyzeSummaryCodec = frob.CodecFor[*analyzeSummary]()
  1127  
  1128  // -- data types for serialization of analysis.Diagnostic and golang.Diagnostic --
  1129  
  1130  // (The name says gob but we use frob.)
  1131  var diagnosticsCodec = frob.CodecFor[[]gobDiagnostic]()
  1132  
  1133  type gobDiagnostic struct {
  1134  	Location       protocol.Location
  1135  	Severity       protocol.DiagnosticSeverity
  1136  	Code           string
  1137  	CodeHref       string
  1138  	Source         string
  1139  	Message        string
  1140  	SuggestedFixes []gobSuggestedFix
  1141  	Related        []gobRelatedInformation
  1142  	Tags           []protocol.DiagnosticTag
  1143  }
  1144  
  1145  type gobRelatedInformation struct {
  1146  	Location protocol.Location
  1147  	Message  string
  1148  }
  1149  
  1150  type gobSuggestedFix struct {
  1151  	Message    string
  1152  	TextEdits  []gobTextEdit
  1153  	Command    *gobCommand
  1154  	ActionKind protocol.CodeActionKind
  1155  }
  1156  
  1157  type gobCommand struct {
  1158  	Title     string
  1159  	Command   string
  1160  	Arguments []json.RawMessage
  1161  }
  1162  
  1163  type gobTextEdit struct {
  1164  	Location protocol.Location
  1165  	NewText  []byte
  1166  }
  1167  
  1168  // toGobDiagnostic converts an analysis.Diagnosic to a serializable gobDiagnostic,
  1169  // which requires expanding token.Pos positions into protocol.Location form.
  1170  func toGobDiagnostic(posToLocation func(start, end token.Pos) (protocol.Location, error), a *analysis.Analyzer, diag analysis.Diagnostic) (gobDiagnostic, error) {
  1171  	var fixes []gobSuggestedFix
  1172  	for _, fix := range diag.SuggestedFixes {
  1173  		var gobEdits []gobTextEdit
  1174  		for _, textEdit := range fix.TextEdits {
  1175  			loc, err := posToLocation(textEdit.Pos, textEdit.End)
  1176  			if err != nil {
  1177  				return gobDiagnostic{}, fmt.Errorf("in SuggestedFixes: %w", err)
  1178  			}
  1179  			gobEdits = append(gobEdits, gobTextEdit{
  1180  				Location: loc,
  1181  				NewText:  textEdit.NewText,
  1182  			})
  1183  		}
  1184  		fixes = append(fixes, gobSuggestedFix{
  1185  			Message:   fix.Message,
  1186  			TextEdits: gobEdits,
  1187  		})
  1188  	}
  1189  
  1190  	var related []gobRelatedInformation
  1191  	for _, r := range diag.Related {
  1192  		loc, err := posToLocation(r.Pos, r.End)
  1193  		if err != nil {
  1194  			return gobDiagnostic{}, fmt.Errorf("in Related: %w", err)
  1195  		}
  1196  		related = append(related, gobRelatedInformation{
  1197  			Location: loc,
  1198  			Message:  r.Message,
  1199  		})
  1200  	}
  1201  
  1202  	loc, err := posToLocation(diag.Pos, diag.End)
  1203  	if err != nil {
  1204  		return gobDiagnostic{}, err
  1205  	}
  1206  
  1207  	// The Code column of VSCode's Problems table renders this
  1208  	// information as "Source(Code)" where code is a link to CodeHref.
  1209  	// (The code field must be nonempty for anything to appear.)
  1210  	diagURL := effectiveURL(a, diag)
  1211  	code := "default"
  1212  	if diag.Category != "" {
  1213  		code = diag.Category
  1214  	}
  1215  
  1216  	return gobDiagnostic{
  1217  		Location: loc,
  1218  		// Severity for analysis diagnostics is dynamic,
  1219  		// based on user configuration per analyzer.
  1220  		Code:           code,
  1221  		CodeHref:       diagURL,
  1222  		Source:         a.Name,
  1223  		Message:        diag.Message,
  1224  		SuggestedFixes: fixes,
  1225  		Related:        related,
  1226  		// Analysis diagnostics do not contain tags.
  1227  	}, nil
  1228  }
  1229  
  1230  // effectiveURL computes the effective URL of diag,
  1231  // using the algorithm specified at Diagnostic.URL.
  1232  func effectiveURL(a *analysis.Analyzer, diag analysis.Diagnostic) string {
  1233  	u := diag.URL
  1234  	if u == "" && diag.Category != "" {
  1235  		u = "#" + diag.Category
  1236  	}
  1237  	if base, err := urlpkg.Parse(a.URL); err == nil {
  1238  		if rel, err := urlpkg.Parse(u); err == nil {
  1239  			u = base.ResolveReference(rel).String()
  1240  		}
  1241  	}
  1242  	return u
  1243  }
  1244  
  1245  // stableName returns a name for the analyzer that is unique and
  1246  // stable across address spaces.
  1247  //
  1248  // Analyzer names are not unique. For example, gopls includes
  1249  // both x/tools/passes/nilness and staticcheck/nilness.
  1250  // For serialization, we must assign each analyzer a unique identifier
  1251  // that two gopls processes accessing the cache can agree on.
  1252  func stableName(a *analysis.Analyzer) string {
  1253  	// Incorporate the file and line of the analyzer's Run function.
  1254  	addr := reflect.ValueOf(a.Run).Pointer()
  1255  	fn := runtime.FuncForPC(addr)
  1256  	file, line := fn.FileLine(addr)
  1257  
  1258  	// It is tempting to use just a.Name as the stable name when
  1259  	// it is unique, but making them always differ helps avoid
  1260  	// name/stablename confusion.
  1261  	return fmt.Sprintf("%s(%s:%d)", a.Name, filepath.Base(file), line)
  1262  }