github.com/cockroachdb/tools@v0.0.0-20230222021103-a6d27438930d/go/analysis/internal/checker/checker.go (about)

     1  // Copyright 2018 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Package checker defines the implementation of the checker commands.
     6  // The same code drives the multi-analysis driver, the single-analysis
     7  // driver that is conventionally provided for convenience along with
     8  // each analysis package, and the test driver.
     9  package checker
    10  
    11  import (
    12  	"bytes"
    13  	"encoding/gob"
    14  	"errors"
    15  	"flag"
    16  	"fmt"
    17  	"go/format"
    18  	"go/token"
    19  	"go/types"
    20  	"io/ioutil"
    21  	"log"
    22  	"os"
    23  	"reflect"
    24  	"runtime"
    25  	"runtime/pprof"
    26  	"runtime/trace"
    27  	"sort"
    28  	"strings"
    29  	"sync"
    30  	"time"
    31  
    32  	"golang.org/x/tools/go/analysis"
    33  	"golang.org/x/tools/go/analysis/internal/analysisflags"
    34  	"golang.org/x/tools/go/packages"
    35  	"golang.org/x/tools/internal/diff"
    36  	"golang.org/x/tools/internal/robustio"
    37  )
    38  
    39  var (
    40  	// Debug is a set of single-letter flags:
    41  	//
    42  	//	f	show [f]acts as they are created
    43  	// 	p	disable [p]arallel execution of analyzers
    44  	//	s	do additional [s]anity checks on fact types and serialization
    45  	//	t	show [t]iming info (NB: use 'p' flag to avoid GC/scheduler noise)
    46  	//	v	show [v]erbose logging
    47  	//
    48  	Debug = ""
    49  
    50  	// Log files for optional performance tracing.
    51  	CPUProfile, MemProfile, Trace string
    52  
    53  	// IncludeTests indicates whether test files should be analyzed too.
    54  	IncludeTests = true
    55  
    56  	// Fix determines whether to apply all suggested fixes.
    57  	Fix bool
    58  )
    59  
    60  // RegisterFlags registers command-line flags used by the analysis driver.
    61  func RegisterFlags() {
    62  	// When adding flags here, remember to update
    63  	// the list of suppressed flags in analysisflags.
    64  
    65  	flag.StringVar(&Debug, "debug", Debug, `debug flags, any subset of "fpstv"`)
    66  
    67  	flag.StringVar(&CPUProfile, "cpuprofile", "", "write CPU profile to this file")
    68  	flag.StringVar(&MemProfile, "memprofile", "", "write memory profile to this file")
    69  	flag.StringVar(&Trace, "trace", "", "write trace log to this file")
    70  	flag.BoolVar(&IncludeTests, "test", IncludeTests, "indicates whether test files should be analyzed, too")
    71  
    72  	flag.BoolVar(&Fix, "fix", false, "apply all suggested fixes")
    73  }
    74  
    75  // Run loads the packages specified by args using go/packages,
    76  // then applies the specified analyzers to them.
    77  // Analysis flags must already have been set.
    78  // It provides most of the logic for the main functions of both the
    79  // singlechecker and the multi-analysis commands.
    80  // It returns the appropriate exit code.
    81  func Run(args []string, analyzers []*analysis.Analyzer) (exitcode int) {
    82  	if CPUProfile != "" {
    83  		f, err := os.Create(CPUProfile)
    84  		if err != nil {
    85  			log.Fatal(err)
    86  		}
    87  		if err := pprof.StartCPUProfile(f); err != nil {
    88  			log.Fatal(err)
    89  		}
    90  		// NB: profile won't be written in case of error.
    91  		defer pprof.StopCPUProfile()
    92  	}
    93  
    94  	if Trace != "" {
    95  		f, err := os.Create(Trace)
    96  		if err != nil {
    97  			log.Fatal(err)
    98  		}
    99  		if err := trace.Start(f); err != nil {
   100  			log.Fatal(err)
   101  		}
   102  		// NB: trace log won't be written in case of error.
   103  		defer func() {
   104  			trace.Stop()
   105  			log.Printf("To view the trace, run:\n$ go tool trace view %s", Trace)
   106  		}()
   107  	}
   108  
   109  	if MemProfile != "" {
   110  		f, err := os.Create(MemProfile)
   111  		if err != nil {
   112  			log.Fatal(err)
   113  		}
   114  		// NB: memprofile won't be written in case of error.
   115  		defer func() {
   116  			runtime.GC() // get up-to-date statistics
   117  			if err := pprof.WriteHeapProfile(f); err != nil {
   118  				log.Fatalf("Writing memory profile: %v", err)
   119  			}
   120  			f.Close()
   121  		}()
   122  	}
   123  
   124  	// Load the packages.
   125  	if dbg('v') {
   126  		log.SetPrefix("")
   127  		log.SetFlags(log.Lmicroseconds) // display timing
   128  		log.Printf("load %s", args)
   129  	}
   130  
   131  	// Optimization: if the selected analyzers don't produce/consume
   132  	// facts, we need source only for the initial packages.
   133  	allSyntax := needFacts(analyzers)
   134  	initial, err := load(args, allSyntax)
   135  	if err != nil {
   136  		if _, ok := err.(typeParseError); !ok {
   137  			// Fail when some of the errors are not
   138  			// related to parsing nor typing.
   139  			log.Print(err)
   140  			return 1
   141  		}
   142  		// TODO: filter analyzers based on RunDespiteError?
   143  	}
   144  
   145  	// Print the results.
   146  	roots := analyze(initial, analyzers)
   147  
   148  	if Fix {
   149  		if err := applyFixes(roots); err != nil {
   150  			// Fail when applying fixes failed.
   151  			log.Print(err)
   152  			return 1
   153  		}
   154  	}
   155  	return printDiagnostics(roots)
   156  }
   157  
   158  // typeParseError represents a package load error
   159  // that is related to typing and parsing.
   160  type typeParseError struct {
   161  	error
   162  }
   163  
   164  // load loads the initial packages. If all loading issues are related to
   165  // typing and parsing, the returned error is of type typeParseError.
   166  func load(patterns []string, allSyntax bool) ([]*packages.Package, error) {
   167  	mode := packages.LoadSyntax
   168  	if allSyntax {
   169  		mode = packages.LoadAllSyntax
   170  	}
   171  	conf := packages.Config{
   172  		Mode:  mode,
   173  		Tests: IncludeTests,
   174  	}
   175  	initial, err := packages.Load(&conf, patterns...)
   176  	if err == nil {
   177  		if len(initial) == 0 {
   178  			err = fmt.Errorf("%s matched no packages", strings.Join(patterns, " "))
   179  		} else {
   180  			err = loadingError(initial)
   181  		}
   182  	}
   183  	return initial, err
   184  }
   185  
   186  // loadingError checks for issues during the loading of initial
   187  // packages. Returns nil if there are no issues. Returns error
   188  // of type typeParseError if all errors, including those in
   189  // dependencies, are related to typing or parsing. Otherwise,
   190  // a plain error is returned with an appropriate message.
   191  func loadingError(initial []*packages.Package) error {
   192  	var err error
   193  	if n := packages.PrintErrors(initial); n > 1 {
   194  		err = fmt.Errorf("%d errors during loading", n)
   195  	} else if n == 1 {
   196  		err = errors.New("error during loading")
   197  	} else {
   198  		// no errors
   199  		return nil
   200  	}
   201  	all := true
   202  	packages.Visit(initial, nil, func(pkg *packages.Package) {
   203  		for _, err := range pkg.Errors {
   204  			typeOrParse := err.Kind == packages.TypeError || err.Kind == packages.ParseError
   205  			all = all && typeOrParse
   206  		}
   207  	})
   208  	if all {
   209  		return typeParseError{err}
   210  	}
   211  	return err
   212  }
   213  
   214  // TestAnalyzer applies an analysis to a set of packages (and their
   215  // dependencies if necessary) and returns the results.
   216  //
   217  // Facts about pkg are returned in a map keyed by object; package facts
   218  // have a nil key.
   219  //
   220  // This entry point is used only by analysistest.
   221  func TestAnalyzer(a *analysis.Analyzer, pkgs []*packages.Package) []*TestAnalyzerResult {
   222  	var results []*TestAnalyzerResult
   223  	for _, act := range analyze(pkgs, []*analysis.Analyzer{a}) {
   224  		facts := make(map[types.Object][]analysis.Fact)
   225  		for key, fact := range act.objectFacts {
   226  			if key.obj.Pkg() == act.pass.Pkg {
   227  				facts[key.obj] = append(facts[key.obj], fact)
   228  			}
   229  		}
   230  		for key, fact := range act.packageFacts {
   231  			if key.pkg == act.pass.Pkg {
   232  				facts[nil] = append(facts[nil], fact)
   233  			}
   234  		}
   235  
   236  		results = append(results, &TestAnalyzerResult{act.pass, act.diagnostics, facts, act.result, act.err})
   237  	}
   238  	return results
   239  }
   240  
   241  type TestAnalyzerResult struct {
   242  	Pass        *analysis.Pass
   243  	Diagnostics []analysis.Diagnostic
   244  	Facts       map[types.Object][]analysis.Fact
   245  	Result      interface{}
   246  	Err         error
   247  }
   248  
   249  func analyze(pkgs []*packages.Package, analyzers []*analysis.Analyzer) []*action {
   250  	// Construct the action graph.
   251  	if dbg('v') {
   252  		log.Printf("building graph of analysis passes")
   253  	}
   254  
   255  	// Each graph node (action) is one unit of analysis.
   256  	// Edges express package-to-package (vertical) dependencies,
   257  	// and analysis-to-analysis (horizontal) dependencies.
   258  	type key struct {
   259  		*analysis.Analyzer
   260  		*packages.Package
   261  	}
   262  	actions := make(map[key]*action)
   263  
   264  	var mkAction func(a *analysis.Analyzer, pkg *packages.Package) *action
   265  	mkAction = func(a *analysis.Analyzer, pkg *packages.Package) *action {
   266  		k := key{a, pkg}
   267  		act, ok := actions[k]
   268  		if !ok {
   269  			act = &action{a: a, pkg: pkg}
   270  
   271  			// Add a dependency on each required analyzers.
   272  			for _, req := range a.Requires {
   273  				act.deps = append(act.deps, mkAction(req, pkg))
   274  			}
   275  
   276  			// An analysis that consumes/produces facts
   277  			// must run on the package's dependencies too.
   278  			if len(a.FactTypes) > 0 {
   279  				paths := make([]string, 0, len(pkg.Imports))
   280  				for path := range pkg.Imports {
   281  					paths = append(paths, path)
   282  				}
   283  				sort.Strings(paths) // for determinism
   284  				for _, path := range paths {
   285  					dep := mkAction(a, pkg.Imports[path])
   286  					act.deps = append(act.deps, dep)
   287  				}
   288  			}
   289  
   290  			actions[k] = act
   291  		}
   292  		return act
   293  	}
   294  
   295  	// Build nodes for initial packages.
   296  	var roots []*action
   297  	for _, a := range analyzers {
   298  		for _, pkg := range pkgs {
   299  			root := mkAction(a, pkg)
   300  			root.isroot = true
   301  			roots = append(roots, root)
   302  		}
   303  	}
   304  
   305  	// Execute the graph in parallel.
   306  	execAll(roots)
   307  
   308  	return roots
   309  }
   310  
   311  func applyFixes(roots []*action) error {
   312  	// visit all of the actions and accumulate the suggested edits.
   313  	paths := make(map[robustio.FileID]string)
   314  	editsByAction := make(map[robustio.FileID]map[*action][]diff.Edit)
   315  	visited := make(map[*action]bool)
   316  	var apply func(*action) error
   317  	var visitAll func(actions []*action) error
   318  	visitAll = func(actions []*action) error {
   319  		for _, act := range actions {
   320  			if !visited[act] {
   321  				visited[act] = true
   322  				if err := visitAll(act.deps); err != nil {
   323  					return err
   324  				}
   325  				if err := apply(act); err != nil {
   326  					return err
   327  				}
   328  			}
   329  		}
   330  		return nil
   331  	}
   332  
   333  	apply = func(act *action) error {
   334  		editsForTokenFile := make(map[*token.File][]diff.Edit)
   335  		for _, diag := range act.diagnostics {
   336  			for _, sf := range diag.SuggestedFixes {
   337  				for _, edit := range sf.TextEdits {
   338  					// Validate the edit.
   339  					// Any error here indicates a bug in the analyzer.
   340  					file := act.pkg.Fset.File(edit.Pos)
   341  					if file == nil {
   342  						return fmt.Errorf("analysis %q suggests invalid fix: missing file info for pos (%v)",
   343  							act.a.Name, edit.Pos)
   344  					}
   345  					if edit.Pos > edit.End {
   346  						return fmt.Errorf("analysis %q suggests invalid fix: pos (%v) > end (%v)",
   347  							act.a.Name, edit.Pos, edit.End)
   348  					}
   349  					if eof := token.Pos(file.Base() + file.Size()); edit.End > eof {
   350  						return fmt.Errorf("analysis %q suggests invalid fix: end (%v) past end of file (%v)",
   351  							act.a.Name, edit.End, eof)
   352  					}
   353  					edit := diff.Edit{Start: file.Offset(edit.Pos), End: file.Offset(edit.End), New: string(edit.NewText)}
   354  					editsForTokenFile[file] = append(editsForTokenFile[file], edit)
   355  				}
   356  			}
   357  		}
   358  
   359  		for f, edits := range editsForTokenFile {
   360  			id, _, err := robustio.GetFileID(f.Name())
   361  			if err != nil {
   362  				return err
   363  			}
   364  			if _, hasId := paths[id]; !hasId {
   365  				paths[id] = f.Name()
   366  				editsByAction[id] = make(map[*action][]diff.Edit)
   367  			}
   368  			editsByAction[id][act] = edits
   369  		}
   370  		return nil
   371  	}
   372  
   373  	if err := visitAll(roots); err != nil {
   374  		return err
   375  	}
   376  
   377  	// Validate and group the edits to each actual file.
   378  	editsByPath := make(map[string][]diff.Edit)
   379  	for id, actToEdits := range editsByAction {
   380  		path := paths[id]
   381  		actions := make([]*action, 0, len(actToEdits))
   382  		for act := range actToEdits {
   383  			actions = append(actions, act)
   384  		}
   385  
   386  		// Does any action create conflicting edits?
   387  		for _, act := range actions {
   388  			edits := actToEdits[act]
   389  			if _, invalid := validateEdits(edits); invalid > 0 {
   390  				name, x, y := act.a.Name, edits[invalid-1], edits[invalid]
   391  				return diff3Conflict(path, name, name, []diff.Edit{x}, []diff.Edit{y})
   392  			}
   393  		}
   394  
   395  		// Does any pair of different actions create edits that conflict?
   396  		for j := range actions {
   397  			for k := range actions[:j] {
   398  				x, y := actions[j], actions[k]
   399  				if x.a.Name > y.a.Name {
   400  					x, y = y, x
   401  				}
   402  				xedits, yedits := actToEdits[x], actToEdits[y]
   403  				combined := append(xedits, yedits...)
   404  				if _, invalid := validateEdits(combined); invalid > 0 {
   405  					// TODO: consider applying each action's consistent list of edits entirely,
   406  					// and then using a three-way merge (such as GNU diff3) on the resulting
   407  					// files to report more precisely the parts that actually conflict.
   408  					return diff3Conflict(path, x.a.Name, y.a.Name, xedits, yedits)
   409  				}
   410  			}
   411  		}
   412  
   413  		var edits []diff.Edit
   414  		for act := range actToEdits {
   415  			edits = append(edits, actToEdits[act]...)
   416  		}
   417  		editsByPath[path], _ = validateEdits(edits) // remove duplicates. already validated.
   418  	}
   419  
   420  	// Now we've got a set of valid edits for each file. Apply them.
   421  	for path, edits := range editsByPath {
   422  		contents, err := ioutil.ReadFile(path)
   423  		if err != nil {
   424  			return err
   425  		}
   426  
   427  		out, err := diff.ApplyBytes(contents, edits)
   428  		if err != nil {
   429  			return err
   430  		}
   431  
   432  		// Try to format the file.
   433  		if formatted, err := format.Source(out); err == nil {
   434  			out = formatted
   435  		}
   436  
   437  		if err := ioutil.WriteFile(path, out, 0644); err != nil {
   438  			return err
   439  		}
   440  	}
   441  	return nil
   442  }
   443  
   444  // validateEdits returns a list of edits that is sorted and
   445  // contains no duplicate edits. Returns the index of some
   446  // overlapping adjacent edits if there is one and <0 if the
   447  // edits are valid.
   448  func validateEdits(edits []diff.Edit) ([]diff.Edit, int) {
   449  	if len(edits) == 0 {
   450  		return nil, -1
   451  	}
   452  	equivalent := func(x, y diff.Edit) bool {
   453  		return x.Start == y.Start && x.End == y.End && x.New == y.New
   454  	}
   455  	diff.SortEdits(edits)
   456  	unique := []diff.Edit{edits[0]}
   457  	invalid := -1
   458  	for i := 1; i < len(edits); i++ {
   459  		prev, cur := edits[i-1], edits[i]
   460  		// We skip over equivalent edits without considering them
   461  		// an error. This handles identical edits coming from the
   462  		// multiple ways of loading a package into a
   463  		// *go/packages.Packages for testing, e.g. packages "p" and "p [p.test]".
   464  		if !equivalent(prev, cur) {
   465  			unique = append(unique, cur)
   466  			if prev.End > cur.Start {
   467  				invalid = i
   468  			}
   469  		}
   470  	}
   471  	return unique, invalid
   472  }
   473  
   474  // diff3Conflict returns an error describing two conflicting sets of
   475  // edits on a file at path.
   476  func diff3Conflict(path string, xlabel, ylabel string, xedits, yedits []diff.Edit) error {
   477  	contents, err := ioutil.ReadFile(path)
   478  	if err != nil {
   479  		return err
   480  	}
   481  	oldlabel, old := "base", string(contents)
   482  
   483  	xdiff, err := diff.ToUnified(oldlabel, xlabel, old, xedits)
   484  	if err != nil {
   485  		return err
   486  	}
   487  	ydiff, err := diff.ToUnified(oldlabel, ylabel, old, yedits)
   488  	if err != nil {
   489  		return err
   490  	}
   491  
   492  	return fmt.Errorf("conflicting edits from %s and %s on %s\nfirst edits:\n%s\nsecond edits:\n%s",
   493  		xlabel, ylabel, path, xdiff, ydiff)
   494  }
   495  
   496  // printDiagnostics prints the diagnostics for the root packages in either
   497  // plain text or JSON format. JSON format also includes errors for any
   498  // dependencies.
   499  //
   500  // It returns the exitcode: in plain mode, 0 for success, 1 for analysis
   501  // errors, and 3 for diagnostics. We avoid 2 since the flag package uses
   502  // it. JSON mode always succeeds at printing errors and diagnostics in a
   503  // structured form to stdout.
   504  func printDiagnostics(roots []*action) (exitcode int) {
   505  	// Print the output.
   506  	//
   507  	// Print diagnostics only for root packages,
   508  	// but errors for all packages.
   509  	printed := make(map[*action]bool)
   510  	var print func(*action)
   511  	var visitAll func(actions []*action)
   512  	visitAll = func(actions []*action) {
   513  		for _, act := range actions {
   514  			if !printed[act] {
   515  				printed[act] = true
   516  				visitAll(act.deps)
   517  				print(act)
   518  			}
   519  		}
   520  	}
   521  
   522  	if analysisflags.JSON {
   523  		// JSON output
   524  		tree := make(analysisflags.JSONTree)
   525  		print = func(act *action) {
   526  			var diags []analysis.Diagnostic
   527  			if act.isroot {
   528  				diags = act.diagnostics
   529  			}
   530  			tree.Add(act.pkg.Fset, act.pkg.ID, act.a.Name, diags, act.err)
   531  		}
   532  		visitAll(roots)
   533  		tree.Print()
   534  	} else {
   535  		// plain text output
   536  
   537  		// De-duplicate diagnostics by position (not token.Pos) to
   538  		// avoid double-reporting in source files that belong to
   539  		// multiple packages, such as foo and foo.test.
   540  		type key struct {
   541  			pos token.Position
   542  			end token.Position
   543  			*analysis.Analyzer
   544  			message string
   545  		}
   546  		seen := make(map[key]bool)
   547  
   548  		print = func(act *action) {
   549  			if act.err != nil {
   550  				fmt.Fprintf(os.Stderr, "%s: %v\n", act.a.Name, act.err)
   551  				exitcode = 1 // analysis failed, at least partially
   552  				return
   553  			}
   554  			if act.isroot {
   555  				for _, diag := range act.diagnostics {
   556  					// We don't display a.Name/f.Category
   557  					// as most users don't care.
   558  
   559  					posn := act.pkg.Fset.Position(diag.Pos)
   560  					end := act.pkg.Fset.Position(diag.End)
   561  					k := key{posn, end, act.a, diag.Message}
   562  					if seen[k] {
   563  						continue // duplicate
   564  					}
   565  					seen[k] = true
   566  
   567  					analysisflags.PrintPlain(act.pkg.Fset, diag)
   568  				}
   569  			}
   570  		}
   571  		visitAll(roots)
   572  
   573  		if exitcode == 0 && len(seen) > 0 {
   574  			exitcode = 3 // successfully produced diagnostics
   575  		}
   576  	}
   577  
   578  	// Print timing info.
   579  	if dbg('t') {
   580  		if !dbg('p') {
   581  			log.Println("Warning: times are mostly GC/scheduler noise; use -debug=tp to disable parallelism")
   582  		}
   583  		var all []*action
   584  		var total time.Duration
   585  		for act := range printed {
   586  			all = append(all, act)
   587  			total += act.duration
   588  		}
   589  		sort.Slice(all, func(i, j int) bool {
   590  			return all[i].duration > all[j].duration
   591  		})
   592  
   593  		// Print actions accounting for 90% of the total.
   594  		var sum time.Duration
   595  		for _, act := range all {
   596  			fmt.Fprintf(os.Stderr, "%s\t%s\n", act.duration, act)
   597  			sum += act.duration
   598  			if sum >= total*9/10 {
   599  				break
   600  			}
   601  		}
   602  	}
   603  
   604  	return exitcode
   605  }
   606  
   607  // needFacts reports whether any analysis required by the specified set
   608  // needs facts.  If so, we must load the entire program from source.
   609  func needFacts(analyzers []*analysis.Analyzer) bool {
   610  	seen := make(map[*analysis.Analyzer]bool)
   611  	var q []*analysis.Analyzer // for BFS
   612  	q = append(q, analyzers...)
   613  	for len(q) > 0 {
   614  		a := q[0]
   615  		q = q[1:]
   616  		if !seen[a] {
   617  			seen[a] = true
   618  			if len(a.FactTypes) > 0 {
   619  				return true
   620  			}
   621  			q = append(q, a.Requires...)
   622  		}
   623  	}
   624  	return false
   625  }
   626  
   627  // An action represents one unit of analysis work: the application of
   628  // one analysis to one package. Actions form a DAG, both within a
   629  // package (as different analyzers are applied, either in sequence or
   630  // parallel), and across packages (as dependencies are analyzed).
   631  type action struct {
   632  	once         sync.Once
   633  	a            *analysis.Analyzer
   634  	pkg          *packages.Package
   635  	pass         *analysis.Pass
   636  	isroot       bool
   637  	deps         []*action
   638  	objectFacts  map[objectFactKey]analysis.Fact
   639  	packageFacts map[packageFactKey]analysis.Fact
   640  	result       interface{}
   641  	diagnostics  []analysis.Diagnostic
   642  	err          error
   643  	duration     time.Duration
   644  }
   645  
   646  type objectFactKey struct {
   647  	obj types.Object
   648  	typ reflect.Type
   649  }
   650  
   651  type packageFactKey struct {
   652  	pkg *types.Package
   653  	typ reflect.Type
   654  }
   655  
   656  func (act *action) String() string {
   657  	return fmt.Sprintf("%s@%s", act.a, act.pkg)
   658  }
   659  
   660  func execAll(actions []*action) {
   661  	sequential := dbg('p')
   662  	var wg sync.WaitGroup
   663  	for _, act := range actions {
   664  		wg.Add(1)
   665  		work := func(act *action) {
   666  			act.exec()
   667  			wg.Done()
   668  		}
   669  		if sequential {
   670  			work(act)
   671  		} else {
   672  			go work(act)
   673  		}
   674  	}
   675  	wg.Wait()
   676  }
   677  
   678  func (act *action) exec() { act.once.Do(act.execOnce) }
   679  
   680  func (act *action) execOnce() {
   681  	// Analyze dependencies.
   682  	execAll(act.deps)
   683  
   684  	// TODO(adonovan): uncomment this during profiling.
   685  	// It won't build pre-go1.11 but conditional compilation
   686  	// using build tags isn't warranted.
   687  	//
   688  	// ctx, task := trace.NewTask(context.Background(), "exec")
   689  	// trace.Log(ctx, "pass", act.String())
   690  	// defer task.End()
   691  
   692  	// Record time spent in this node but not its dependencies.
   693  	// In parallel mode, due to GC/scheduler contention, the
   694  	// time is 5x higher than in sequential mode, even with a
   695  	// semaphore limiting the number of threads here.
   696  	// So use -debug=tp.
   697  	if dbg('t') {
   698  		t0 := time.Now()
   699  		defer func() { act.duration = time.Since(t0) }()
   700  	}
   701  
   702  	// Report an error if any dependency failed.
   703  	var failed []string
   704  	for _, dep := range act.deps {
   705  		if dep.err != nil {
   706  			failed = append(failed, dep.String())
   707  		}
   708  	}
   709  	if failed != nil {
   710  		sort.Strings(failed)
   711  		act.err = fmt.Errorf("failed prerequisites: %s", strings.Join(failed, ", "))
   712  		return
   713  	}
   714  
   715  	// Plumb the output values of the dependencies
   716  	// into the inputs of this action.  Also facts.
   717  	inputs := make(map[*analysis.Analyzer]interface{})
   718  	act.objectFacts = make(map[objectFactKey]analysis.Fact)
   719  	act.packageFacts = make(map[packageFactKey]analysis.Fact)
   720  	for _, dep := range act.deps {
   721  		if dep.pkg == act.pkg {
   722  			// Same package, different analysis (horizontal edge):
   723  			// in-memory outputs of prerequisite analyzers
   724  			// become inputs to this analysis pass.
   725  			inputs[dep.a] = dep.result
   726  
   727  		} else if dep.a == act.a { // (always true)
   728  			// Same analysis, different package (vertical edge):
   729  			// serialized facts produced by prerequisite analysis
   730  			// become available to this analysis pass.
   731  			inheritFacts(act, dep)
   732  		}
   733  	}
   734  
   735  	// Run the analysis.
   736  	pass := &analysis.Pass{
   737  		Analyzer:     act.a,
   738  		Fset:         act.pkg.Fset,
   739  		Files:        act.pkg.Syntax,
   740  		OtherFiles:   act.pkg.OtherFiles,
   741  		IgnoredFiles: act.pkg.IgnoredFiles,
   742  		Pkg:          act.pkg.Types,
   743  		TypesInfo:    act.pkg.TypesInfo,
   744  		TypesSizes:   act.pkg.TypesSizes,
   745  		TypeErrors:   act.pkg.TypeErrors,
   746  
   747  		ResultOf:          inputs,
   748  		Report:            func(d analysis.Diagnostic) { act.diagnostics = append(act.diagnostics, d) },
   749  		ImportObjectFact:  act.importObjectFact,
   750  		ExportObjectFact:  act.exportObjectFact,
   751  		ImportPackageFact: act.importPackageFact,
   752  		ExportPackageFact: act.exportPackageFact,
   753  		AllObjectFacts:    act.allObjectFacts,
   754  		AllPackageFacts:   act.allPackageFacts,
   755  	}
   756  	act.pass = pass
   757  
   758  	var err error
   759  	if act.pkg.IllTyped && !pass.Analyzer.RunDespiteErrors {
   760  		err = fmt.Errorf("analysis skipped due to errors in package")
   761  	} else {
   762  		act.result, err = pass.Analyzer.Run(pass)
   763  		if err == nil {
   764  			if got, want := reflect.TypeOf(act.result), pass.Analyzer.ResultType; got != want {
   765  				err = fmt.Errorf(
   766  					"internal error: on package %s, analyzer %s returned a result of type %v, but declared ResultType %v",
   767  					pass.Pkg.Path(), pass.Analyzer, got, want)
   768  			}
   769  		}
   770  	}
   771  	act.err = err
   772  
   773  	// disallow calls after Run
   774  	pass.ExportObjectFact = nil
   775  	pass.ExportPackageFact = nil
   776  }
   777  
   778  // inheritFacts populates act.facts with
   779  // those it obtains from its dependency, dep.
   780  func inheritFacts(act, dep *action) {
   781  	serialize := dbg('s')
   782  
   783  	for key, fact := range dep.objectFacts {
   784  		// Filter out facts related to objects
   785  		// that are irrelevant downstream
   786  		// (equivalently: not in the compiler export data).
   787  		if !exportedFrom(key.obj, dep.pkg.Types) {
   788  			if false {
   789  				log.Printf("%v: discarding %T fact from %s for %s: %s", act, fact, dep, key.obj, fact)
   790  			}
   791  			continue
   792  		}
   793  
   794  		// Optionally serialize/deserialize fact
   795  		// to verify that it works across address spaces.
   796  		if serialize {
   797  			encodedFact, err := codeFact(fact)
   798  			if err != nil {
   799  				log.Panicf("internal error: encoding of %T fact failed in %v: %v", fact, act, err)
   800  			}
   801  			fact = encodedFact
   802  		}
   803  
   804  		if false {
   805  			log.Printf("%v: inherited %T fact for %s: %s", act, fact, key.obj, fact)
   806  		}
   807  		act.objectFacts[key] = fact
   808  	}
   809  
   810  	for key, fact := range dep.packageFacts {
   811  		// TODO: filter out facts that belong to
   812  		// packages not mentioned in the export data
   813  		// to prevent side channels.
   814  
   815  		// Optionally serialize/deserialize fact
   816  		// to verify that it works across address spaces
   817  		// and is deterministic.
   818  		if serialize {
   819  			encodedFact, err := codeFact(fact)
   820  			if err != nil {
   821  				log.Panicf("internal error: encoding of %T fact failed in %v", fact, act)
   822  			}
   823  			fact = encodedFact
   824  		}
   825  
   826  		if false {
   827  			log.Printf("%v: inherited %T fact for %s: %s", act, fact, key.pkg.Path(), fact)
   828  		}
   829  		act.packageFacts[key] = fact
   830  	}
   831  }
   832  
   833  // codeFact encodes then decodes a fact,
   834  // just to exercise that logic.
   835  func codeFact(fact analysis.Fact) (analysis.Fact, error) {
   836  	// We encode facts one at a time.
   837  	// A real modular driver would emit all facts
   838  	// into one encoder to improve gob efficiency.
   839  	var buf bytes.Buffer
   840  	if err := gob.NewEncoder(&buf).Encode(fact); err != nil {
   841  		return nil, err
   842  	}
   843  
   844  	// Encode it twice and assert that we get the same bits.
   845  	// This helps detect nondeterministic Gob encoding (e.g. of maps).
   846  	var buf2 bytes.Buffer
   847  	if err := gob.NewEncoder(&buf2).Encode(fact); err != nil {
   848  		return nil, err
   849  	}
   850  	if !bytes.Equal(buf.Bytes(), buf2.Bytes()) {
   851  		return nil, fmt.Errorf("encoding of %T fact is nondeterministic", fact)
   852  	}
   853  
   854  	new := reflect.New(reflect.TypeOf(fact).Elem()).Interface().(analysis.Fact)
   855  	if err := gob.NewDecoder(&buf).Decode(new); err != nil {
   856  		return nil, err
   857  	}
   858  	return new, nil
   859  }
   860  
   861  // exportedFrom reports whether obj may be visible to a package that imports pkg.
   862  // This includes not just the exported members of pkg, but also unexported
   863  // constants, types, fields, and methods, perhaps belonging to other packages,
   864  // that find there way into the API.
   865  // This is an overapproximation of the more accurate approach used by
   866  // gc export data, which walks the type graph, but it's much simpler.
   867  //
   868  // TODO(adonovan): do more accurate filtering by walking the type graph.
   869  func exportedFrom(obj types.Object, pkg *types.Package) bool {
   870  	switch obj := obj.(type) {
   871  	case *types.Func:
   872  		return obj.Exported() && obj.Pkg() == pkg ||
   873  			obj.Type().(*types.Signature).Recv() != nil
   874  	case *types.Var:
   875  		if obj.IsField() {
   876  			return true
   877  		}
   878  		// we can't filter more aggressively than this because we need
   879  		// to consider function parameters exported, but have no way
   880  		// of telling apart function parameters from local variables.
   881  		return obj.Pkg() == pkg
   882  	case *types.TypeName, *types.Const:
   883  		return true
   884  	}
   885  	return false // Nil, Builtin, Label, or PkgName
   886  }
   887  
   888  // importObjectFact implements Pass.ImportObjectFact.
   889  // Given a non-nil pointer ptr of type *T, where *T satisfies Fact,
   890  // importObjectFact copies the fact value to *ptr.
   891  func (act *action) importObjectFact(obj types.Object, ptr analysis.Fact) bool {
   892  	if obj == nil {
   893  		panic("nil object")
   894  	}
   895  	key := objectFactKey{obj, factType(ptr)}
   896  	if v, ok := act.objectFacts[key]; ok {
   897  		reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem())
   898  		return true
   899  	}
   900  	return false
   901  }
   902  
   903  // exportObjectFact implements Pass.ExportObjectFact.
   904  func (act *action) exportObjectFact(obj types.Object, fact analysis.Fact) {
   905  	if act.pass.ExportObjectFact == nil {
   906  		log.Panicf("%s: Pass.ExportObjectFact(%s, %T) called after Run", act, obj, fact)
   907  	}
   908  
   909  	if obj.Pkg() != act.pkg.Types {
   910  		log.Panicf("internal error: in analysis %s of package %s: Fact.Set(%s, %T): can't set facts on objects belonging another package",
   911  			act.a, act.pkg, obj, fact)
   912  	}
   913  
   914  	key := objectFactKey{obj, factType(fact)}
   915  	act.objectFacts[key] = fact // clobber any existing entry
   916  	if dbg('f') {
   917  		objstr := types.ObjectString(obj, (*types.Package).Name)
   918  		fmt.Fprintf(os.Stderr, "%s: object %s has fact %s\n",
   919  			act.pkg.Fset.Position(obj.Pos()), objstr, fact)
   920  	}
   921  }
   922  
   923  // allObjectFacts implements Pass.AllObjectFacts.
   924  func (act *action) allObjectFacts() []analysis.ObjectFact {
   925  	facts := make([]analysis.ObjectFact, 0, len(act.objectFacts))
   926  	for k := range act.objectFacts {
   927  		facts = append(facts, analysis.ObjectFact{Object: k.obj, Fact: act.objectFacts[k]})
   928  	}
   929  	return facts
   930  }
   931  
   932  // importPackageFact implements Pass.ImportPackageFact.
   933  // Given a non-nil pointer ptr of type *T, where *T satisfies Fact,
   934  // fact copies the fact value to *ptr.
   935  func (act *action) importPackageFact(pkg *types.Package, ptr analysis.Fact) bool {
   936  	if pkg == nil {
   937  		panic("nil package")
   938  	}
   939  	key := packageFactKey{pkg, factType(ptr)}
   940  	if v, ok := act.packageFacts[key]; ok {
   941  		reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem())
   942  		return true
   943  	}
   944  	return false
   945  }
   946  
   947  // exportPackageFact implements Pass.ExportPackageFact.
   948  func (act *action) exportPackageFact(fact analysis.Fact) {
   949  	if act.pass.ExportPackageFact == nil {
   950  		log.Panicf("%s: Pass.ExportPackageFact(%T) called after Run", act, fact)
   951  	}
   952  
   953  	key := packageFactKey{act.pass.Pkg, factType(fact)}
   954  	act.packageFacts[key] = fact // clobber any existing entry
   955  	if dbg('f') {
   956  		fmt.Fprintf(os.Stderr, "%s: package %s has fact %s\n",
   957  			act.pkg.Fset.Position(act.pass.Files[0].Pos()), act.pass.Pkg.Path(), fact)
   958  	}
   959  }
   960  
   961  func factType(fact analysis.Fact) reflect.Type {
   962  	t := reflect.TypeOf(fact)
   963  	if t.Kind() != reflect.Ptr {
   964  		log.Fatalf("invalid Fact type: got %T, want pointer", fact)
   965  	}
   966  	return t
   967  }
   968  
   969  // allPackageFacts implements Pass.AllPackageFacts.
   970  func (act *action) allPackageFacts() []analysis.PackageFact {
   971  	facts := make([]analysis.PackageFact, 0, len(act.packageFacts))
   972  	for k := range act.packageFacts {
   973  		facts = append(facts, analysis.PackageFact{Package: k.pkg, Fact: act.packageFacts[k]})
   974  	}
   975  	return facts
   976  }
   977  
   978  func dbg(b byte) bool { return strings.IndexByte(Debug, b) >= 0 }