golang.org/x/tools@v0.21.0/go/analysis/internal/checker/checker.go (about)

     1  // Copyright 2018 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Package checker defines the implementation of the checker commands.
     6  // The same code drives the multi-analysis driver, the single-analysis
     7  // driver that is conventionally provided for convenience along with
     8  // each analysis package, and the test driver.
     9  package checker
    10  
    11  import (
    12  	"bytes"
    13  	"encoding/gob"
    14  	"errors"
    15  	"flag"
    16  	"fmt"
    17  	"go/format"
    18  	"go/token"
    19  	"go/types"
    20  	"log"
    21  	"os"
    22  	"reflect"
    23  	"runtime"
    24  	"runtime/pprof"
    25  	"runtime/trace"
    26  	"sort"
    27  	"strings"
    28  	"sync"
    29  	"time"
    30  
    31  	"golang.org/x/tools/go/analysis"
    32  	"golang.org/x/tools/go/analysis/internal/analysisflags"
    33  	"golang.org/x/tools/go/packages"
    34  	"golang.org/x/tools/internal/analysisinternal"
    35  	"golang.org/x/tools/internal/diff"
    36  	"golang.org/x/tools/internal/robustio"
    37  )
    38  
    39  var (
    40  	// Debug is a set of single-letter flags:
    41  	//
    42  	//	f	show [f]acts as they are created
    43  	// 	p	disable [p]arallel execution of analyzers
    44  	//	s	do additional [s]anity checks on fact types and serialization
    45  	//	t	show [t]iming info (NB: use 'p' flag to avoid GC/scheduler noise)
    46  	//	v	show [v]erbose logging
    47  	//
    48  	Debug = ""
    49  
    50  	// Log files for optional performance tracing.
    51  	CPUProfile, MemProfile, Trace string
    52  
    53  	// IncludeTests indicates whether test files should be analyzed too.
    54  	IncludeTests = true
    55  
    56  	// Fix determines whether to apply all suggested fixes.
    57  	Fix bool
    58  )
    59  
    60  // RegisterFlags registers command-line flags used by the analysis driver.
    61  func RegisterFlags() {
    62  	// When adding flags here, remember to update
    63  	// the list of suppressed flags in analysisflags.
    64  
    65  	flag.StringVar(&Debug, "debug", Debug, `debug flags, any subset of "fpstv"`)
    66  
    67  	flag.StringVar(&CPUProfile, "cpuprofile", "", "write CPU profile to this file")
    68  	flag.StringVar(&MemProfile, "memprofile", "", "write memory profile to this file")
    69  	flag.StringVar(&Trace, "trace", "", "write trace log to this file")
    70  	flag.BoolVar(&IncludeTests, "test", IncludeTests, "indicates whether test files should be analyzed, too")
    71  
    72  	flag.BoolVar(&Fix, "fix", false, "apply all suggested fixes")
    73  }
    74  
    75  // Run loads the packages specified by args using go/packages,
    76  // then applies the specified analyzers to them.
    77  // Analysis flags must already have been set.
    78  // Analyzers must be valid according to [analysis.Validate].
    79  // It provides most of the logic for the main functions of both the
    80  // singlechecker and the multi-analysis commands.
    81  // It returns the appropriate exit code.
    82  func Run(args []string, analyzers []*analysis.Analyzer) (exitcode int) {
    83  	if CPUProfile != "" {
    84  		f, err := os.Create(CPUProfile)
    85  		if err != nil {
    86  			log.Fatal(err)
    87  		}
    88  		if err := pprof.StartCPUProfile(f); err != nil {
    89  			log.Fatal(err)
    90  		}
    91  		// NB: profile won't be written in case of error.
    92  		defer pprof.StopCPUProfile()
    93  	}
    94  
    95  	if Trace != "" {
    96  		f, err := os.Create(Trace)
    97  		if err != nil {
    98  			log.Fatal(err)
    99  		}
   100  		if err := trace.Start(f); err != nil {
   101  			log.Fatal(err)
   102  		}
   103  		// NB: trace log won't be written in case of error.
   104  		defer func() {
   105  			trace.Stop()
   106  			log.Printf("To view the trace, run:\n$ go tool trace view %s", Trace)
   107  		}()
   108  	}
   109  
   110  	if MemProfile != "" {
   111  		f, err := os.Create(MemProfile)
   112  		if err != nil {
   113  			log.Fatal(err)
   114  		}
   115  		// NB: memprofile won't be written in case of error.
   116  		defer func() {
   117  			runtime.GC() // get up-to-date statistics
   118  			if err := pprof.WriteHeapProfile(f); err != nil {
   119  				log.Fatalf("Writing memory profile: %v", err)
   120  			}
   121  			f.Close()
   122  		}()
   123  	}
   124  
   125  	// Load the packages.
   126  	if dbg('v') {
   127  		log.SetPrefix("")
   128  		log.SetFlags(log.Lmicroseconds) // display timing
   129  		log.Printf("load %s", args)
   130  	}
   131  
   132  	// Optimization: if the selected analyzers don't produce/consume
   133  	// facts, we need source only for the initial packages.
   134  	allSyntax := needFacts(analyzers)
   135  	initial, err := load(args, allSyntax)
   136  	if err != nil {
   137  		if _, ok := err.(typeParseError); !ok {
   138  			// Fail when some of the errors are not
   139  			// related to parsing nor typing.
   140  			log.Print(err)
   141  			return 1
   142  		}
   143  		// TODO: filter analyzers based on RunDespiteError?
   144  	}
   145  
   146  	// Run the analysis.
   147  	roots := analyze(initial, analyzers)
   148  
   149  	// Apply fixes.
   150  	if Fix {
   151  		if err := applyFixes(roots); err != nil {
   152  			// Fail when applying fixes failed.
   153  			log.Print(err)
   154  			return 1
   155  		}
   156  	}
   157  
   158  	// Print the results.
   159  	return printDiagnostics(roots)
   160  }
   161  
   162  // typeParseError represents a package load error
   163  // that is related to typing and parsing.
   164  type typeParseError struct {
   165  	error
   166  }
   167  
   168  // load loads the initial packages. If all loading issues are related to
   169  // typing and parsing, the returned error is of type typeParseError.
   170  func load(patterns []string, allSyntax bool) ([]*packages.Package, error) {
   171  	mode := packages.LoadSyntax
   172  	if allSyntax {
   173  		mode = packages.LoadAllSyntax
   174  	}
   175  	mode |= packages.NeedModule
   176  	conf := packages.Config{
   177  		Mode:  mode,
   178  		Tests: IncludeTests,
   179  	}
   180  	initial, err := packages.Load(&conf, patterns...)
   181  	if err == nil {
   182  		if len(initial) == 0 {
   183  			err = fmt.Errorf("%s matched no packages", strings.Join(patterns, " "))
   184  		} else {
   185  			err = loadingError(initial)
   186  		}
   187  	}
   188  	return initial, err
   189  }
   190  
   191  // loadingError checks for issues during the loading of initial
   192  // packages. Returns nil if there are no issues. Returns error
   193  // of type typeParseError if all errors, including those in
   194  // dependencies, are related to typing or parsing. Otherwise,
   195  // a plain error is returned with an appropriate message.
   196  func loadingError(initial []*packages.Package) error {
   197  	var err error
   198  	if n := packages.PrintErrors(initial); n > 1 {
   199  		err = fmt.Errorf("%d errors during loading", n)
   200  	} else if n == 1 {
   201  		err = errors.New("error during loading")
   202  	} else {
   203  		// no errors
   204  		return nil
   205  	}
   206  	all := true
   207  	packages.Visit(initial, nil, func(pkg *packages.Package) {
   208  		for _, err := range pkg.Errors {
   209  			typeOrParse := err.Kind == packages.TypeError || err.Kind == packages.ParseError
   210  			all = all && typeOrParse
   211  		}
   212  	})
   213  	if all {
   214  		return typeParseError{err}
   215  	}
   216  	return err
   217  }
   218  
   219  // TestAnalyzer applies an analyzer to a set of packages (and their
   220  // dependencies if necessary) and returns the results.
   221  // The analyzer must be valid according to [analysis.Validate].
   222  //
   223  // Facts about pkg are returned in a map keyed by object; package facts
   224  // have a nil key.
   225  //
   226  // This entry point is used only by analysistest.
   227  func TestAnalyzer(a *analysis.Analyzer, pkgs []*packages.Package) []*TestAnalyzerResult {
   228  	var results []*TestAnalyzerResult
   229  	for _, act := range analyze(pkgs, []*analysis.Analyzer{a}) {
   230  		facts := make(map[types.Object][]analysis.Fact)
   231  		for key, fact := range act.objectFacts {
   232  			if key.obj.Pkg() == act.pass.Pkg {
   233  				facts[key.obj] = append(facts[key.obj], fact)
   234  			}
   235  		}
   236  		for key, fact := range act.packageFacts {
   237  			if key.pkg == act.pass.Pkg {
   238  				facts[nil] = append(facts[nil], fact)
   239  			}
   240  		}
   241  
   242  		results = append(results, &TestAnalyzerResult{act.pass, act.diagnostics, facts, act.result, act.err})
   243  	}
   244  	return results
   245  }
   246  
   247  type TestAnalyzerResult struct {
   248  	Pass        *analysis.Pass
   249  	Diagnostics []analysis.Diagnostic
   250  	Facts       map[types.Object][]analysis.Fact
   251  	Result      interface{}
   252  	Err         error
   253  }
   254  
   255  func analyze(pkgs []*packages.Package, analyzers []*analysis.Analyzer) []*action {
   256  	// Construct the action graph.
   257  	if dbg('v') {
   258  		log.Printf("building graph of analysis passes")
   259  	}
   260  
   261  	// Each graph node (action) is one unit of analysis.
   262  	// Edges express package-to-package (vertical) dependencies,
   263  	// and analysis-to-analysis (horizontal) dependencies.
   264  	type key struct {
   265  		*analysis.Analyzer
   266  		*packages.Package
   267  	}
   268  	actions := make(map[key]*action)
   269  
   270  	var mkAction func(a *analysis.Analyzer, pkg *packages.Package) *action
   271  	mkAction = func(a *analysis.Analyzer, pkg *packages.Package) *action {
   272  		k := key{a, pkg}
   273  		act, ok := actions[k]
   274  		if !ok {
   275  			act = &action{a: a, pkg: pkg}
   276  
   277  			// Add a dependency on each required analyzers.
   278  			for _, req := range a.Requires {
   279  				act.deps = append(act.deps, mkAction(req, pkg))
   280  			}
   281  
   282  			// An analysis that consumes/produces facts
   283  			// must run on the package's dependencies too.
   284  			if len(a.FactTypes) > 0 {
   285  				paths := make([]string, 0, len(pkg.Imports))
   286  				for path := range pkg.Imports {
   287  					paths = append(paths, path)
   288  				}
   289  				sort.Strings(paths) // for determinism
   290  				for _, path := range paths {
   291  					dep := mkAction(a, pkg.Imports[path])
   292  					act.deps = append(act.deps, dep)
   293  				}
   294  			}
   295  
   296  			actions[k] = act
   297  		}
   298  		return act
   299  	}
   300  
   301  	// Build nodes for initial packages.
   302  	var roots []*action
   303  	for _, a := range analyzers {
   304  		for _, pkg := range pkgs {
   305  			root := mkAction(a, pkg)
   306  			root.isroot = true
   307  			roots = append(roots, root)
   308  		}
   309  	}
   310  
   311  	// Execute the graph in parallel.
   312  	execAll(roots)
   313  
   314  	return roots
   315  }
   316  
   317  func applyFixes(roots []*action) error {
   318  	// visit all of the actions and accumulate the suggested edits.
   319  	paths := make(map[robustio.FileID]string)
   320  	editsByAction := make(map[robustio.FileID]map[*action][]diff.Edit)
   321  	visited := make(map[*action]bool)
   322  	var apply func(*action) error
   323  	var visitAll func(actions []*action) error
   324  	visitAll = func(actions []*action) error {
   325  		for _, act := range actions {
   326  			if !visited[act] {
   327  				visited[act] = true
   328  				if err := visitAll(act.deps); err != nil {
   329  					return err
   330  				}
   331  				if err := apply(act); err != nil {
   332  					return err
   333  				}
   334  			}
   335  		}
   336  		return nil
   337  	}
   338  
   339  	apply = func(act *action) error {
   340  		editsForTokenFile := make(map[*token.File][]diff.Edit)
   341  		for _, diag := range act.diagnostics {
   342  			for _, sf := range diag.SuggestedFixes {
   343  				for _, edit := range sf.TextEdits {
   344  					// Validate the edit.
   345  					// Any error here indicates a bug in the analyzer.
   346  					start, end := edit.Pos, edit.End
   347  					file := act.pkg.Fset.File(start)
   348  					if file == nil {
   349  						return fmt.Errorf("analysis %q suggests invalid fix: missing file info for pos (%v)",
   350  							act.a.Name, start)
   351  					}
   352  					if !end.IsValid() {
   353  						end = start
   354  					}
   355  					if start > end {
   356  						return fmt.Errorf("analysis %q suggests invalid fix: pos (%v) > end (%v)",
   357  							act.a.Name, start, end)
   358  					}
   359  					if eof := token.Pos(file.Base() + file.Size()); end > eof {
   360  						return fmt.Errorf("analysis %q suggests invalid fix: end (%v) past end of file (%v)",
   361  							act.a.Name, end, eof)
   362  					}
   363  					edit := diff.Edit{
   364  						Start: file.Offset(start),
   365  						End:   file.Offset(end),
   366  						New:   string(edit.NewText),
   367  					}
   368  					editsForTokenFile[file] = append(editsForTokenFile[file], edit)
   369  				}
   370  			}
   371  		}
   372  
   373  		for f, edits := range editsForTokenFile {
   374  			id, _, err := robustio.GetFileID(f.Name())
   375  			if err != nil {
   376  				return err
   377  			}
   378  			if _, hasId := paths[id]; !hasId {
   379  				paths[id] = f.Name()
   380  				editsByAction[id] = make(map[*action][]diff.Edit)
   381  			}
   382  			editsByAction[id][act] = edits
   383  		}
   384  		return nil
   385  	}
   386  
   387  	if err := visitAll(roots); err != nil {
   388  		return err
   389  	}
   390  
   391  	// Validate and group the edits to each actual file.
   392  	editsByPath := make(map[string][]diff.Edit)
   393  	for id, actToEdits := range editsByAction {
   394  		path := paths[id]
   395  		actions := make([]*action, 0, len(actToEdits))
   396  		for act := range actToEdits {
   397  			actions = append(actions, act)
   398  		}
   399  
   400  		// Does any action create conflicting edits?
   401  		for _, act := range actions {
   402  			edits := actToEdits[act]
   403  			if _, invalid := validateEdits(edits); invalid > 0 {
   404  				name, x, y := act.a.Name, edits[invalid-1], edits[invalid]
   405  				return diff3Conflict(path, name, name, []diff.Edit{x}, []diff.Edit{y})
   406  			}
   407  		}
   408  
   409  		// Does any pair of different actions create edits that conflict?
   410  		for j := range actions {
   411  			for k := range actions[:j] {
   412  				x, y := actions[j], actions[k]
   413  				if x.a.Name > y.a.Name {
   414  					x, y = y, x
   415  				}
   416  				xedits, yedits := actToEdits[x], actToEdits[y]
   417  				combined := append(xedits, yedits...)
   418  				if _, invalid := validateEdits(combined); invalid > 0 {
   419  					// TODO: consider applying each action's consistent list of edits entirely,
   420  					// and then using a three-way merge (such as GNU diff3) on the resulting
   421  					// files to report more precisely the parts that actually conflict.
   422  					return diff3Conflict(path, x.a.Name, y.a.Name, xedits, yedits)
   423  				}
   424  			}
   425  		}
   426  
   427  		var edits []diff.Edit
   428  		for act := range actToEdits {
   429  			edits = append(edits, actToEdits[act]...)
   430  		}
   431  		editsByPath[path], _ = validateEdits(edits) // remove duplicates. already validated.
   432  	}
   433  
   434  	// Now we've got a set of valid edits for each file. Apply them.
   435  	for path, edits := range editsByPath {
   436  		// TODO(adonovan): this should really work on the same
   437  		// gulp from the file system that fed the analyzer (see #62292).
   438  		contents, err := os.ReadFile(path)
   439  		if err != nil {
   440  			return err
   441  		}
   442  
   443  		out, err := diff.ApplyBytes(contents, edits)
   444  		if err != nil {
   445  			return err
   446  		}
   447  
   448  		// Try to format the file.
   449  		if formatted, err := format.Source(out); err == nil {
   450  			out = formatted
   451  		}
   452  
   453  		if err := os.WriteFile(path, out, 0644); err != nil {
   454  			return err
   455  		}
   456  	}
   457  	return nil
   458  }
   459  
   460  // validateEdits returns a list of edits that is sorted and
   461  // contains no duplicate edits. Returns the index of some
   462  // overlapping adjacent edits if there is one and <0 if the
   463  // edits are valid.
   464  func validateEdits(edits []diff.Edit) ([]diff.Edit, int) {
   465  	if len(edits) == 0 {
   466  		return nil, -1
   467  	}
   468  	equivalent := func(x, y diff.Edit) bool {
   469  		return x.Start == y.Start && x.End == y.End && x.New == y.New
   470  	}
   471  	diff.SortEdits(edits)
   472  	unique := []diff.Edit{edits[0]}
   473  	invalid := -1
   474  	for i := 1; i < len(edits); i++ {
   475  		prev, cur := edits[i-1], edits[i]
   476  		// We skip over equivalent edits without considering them
   477  		// an error. This handles identical edits coming from the
   478  		// multiple ways of loading a package into a
   479  		// *go/packages.Packages for testing, e.g. packages "p" and "p [p.test]".
   480  		if !equivalent(prev, cur) {
   481  			unique = append(unique, cur)
   482  			if prev.End > cur.Start {
   483  				invalid = i
   484  			}
   485  		}
   486  	}
   487  	return unique, invalid
   488  }
   489  
   490  // diff3Conflict returns an error describing two conflicting sets of
   491  // edits on a file at path.
   492  func diff3Conflict(path string, xlabel, ylabel string, xedits, yedits []diff.Edit) error {
   493  	contents, err := os.ReadFile(path)
   494  	if err != nil {
   495  		return err
   496  	}
   497  	oldlabel, old := "base", string(contents)
   498  
   499  	xdiff, err := diff.ToUnified(oldlabel, xlabel, old, xedits, diff.DefaultContextLines)
   500  	if err != nil {
   501  		return err
   502  	}
   503  	ydiff, err := diff.ToUnified(oldlabel, ylabel, old, yedits, diff.DefaultContextLines)
   504  	if err != nil {
   505  		return err
   506  	}
   507  
   508  	return fmt.Errorf("conflicting edits from %s and %s on %s\nfirst edits:\n%s\nsecond edits:\n%s",
   509  		xlabel, ylabel, path, xdiff, ydiff)
   510  }
   511  
   512  // printDiagnostics prints the diagnostics for the root packages in either
   513  // plain text or JSON format. JSON format also includes errors for any
   514  // dependencies.
   515  //
   516  // It returns the exitcode: in plain mode, 0 for success, 1 for analysis
   517  // errors, and 3 for diagnostics. We avoid 2 since the flag package uses
   518  // it. JSON mode always succeeds at printing errors and diagnostics in a
   519  // structured form to stdout.
   520  func printDiagnostics(roots []*action) (exitcode int) {
   521  	// Print the output.
   522  	//
   523  	// Print diagnostics only for root packages,
   524  	// but errors for all packages.
   525  	printed := make(map[*action]bool)
   526  	var print func(*action)
   527  	var visitAll func(actions []*action)
   528  	visitAll = func(actions []*action) {
   529  		for _, act := range actions {
   530  			if !printed[act] {
   531  				printed[act] = true
   532  				visitAll(act.deps)
   533  				print(act)
   534  			}
   535  		}
   536  	}
   537  
   538  	if analysisflags.JSON {
   539  		// JSON output
   540  		tree := make(analysisflags.JSONTree)
   541  		print = func(act *action) {
   542  			var diags []analysis.Diagnostic
   543  			if act.isroot {
   544  				diags = act.diagnostics
   545  			}
   546  			tree.Add(act.pkg.Fset, act.pkg.ID, act.a.Name, diags, act.err)
   547  		}
   548  		visitAll(roots)
   549  		tree.Print()
   550  	} else {
   551  		// plain text output
   552  
   553  		// De-duplicate diagnostics by position (not token.Pos) to
   554  		// avoid double-reporting in source files that belong to
   555  		// multiple packages, such as foo and foo.test.
   556  		type key struct {
   557  			pos token.Position
   558  			end token.Position
   559  			*analysis.Analyzer
   560  			message string
   561  		}
   562  		seen := make(map[key]bool)
   563  
   564  		print = func(act *action) {
   565  			if act.err != nil {
   566  				fmt.Fprintf(os.Stderr, "%s: %v\n", act.a.Name, act.err)
   567  				exitcode = 1 // analysis failed, at least partially
   568  				return
   569  			}
   570  			if act.isroot {
   571  				for _, diag := range act.diagnostics {
   572  					// We don't display a.Name/f.Category
   573  					// as most users don't care.
   574  
   575  					posn := act.pkg.Fset.Position(diag.Pos)
   576  					end := act.pkg.Fset.Position(diag.End)
   577  					k := key{posn, end, act.a, diag.Message}
   578  					if seen[k] {
   579  						continue // duplicate
   580  					}
   581  					seen[k] = true
   582  
   583  					analysisflags.PrintPlain(act.pkg.Fset, diag)
   584  				}
   585  			}
   586  		}
   587  		visitAll(roots)
   588  
   589  		if exitcode == 0 && len(seen) > 0 {
   590  			exitcode = 3 // successfully produced diagnostics
   591  		}
   592  	}
   593  
   594  	// Print timing info.
   595  	if dbg('t') {
   596  		if !dbg('p') {
   597  			log.Println("Warning: times are mostly GC/scheduler noise; use -debug=tp to disable parallelism")
   598  		}
   599  		var all []*action
   600  		var total time.Duration
   601  		for act := range printed {
   602  			all = append(all, act)
   603  			total += act.duration
   604  		}
   605  		sort.Slice(all, func(i, j int) bool {
   606  			return all[i].duration > all[j].duration
   607  		})
   608  
   609  		// Print actions accounting for 90% of the total.
   610  		var sum time.Duration
   611  		for _, act := range all {
   612  			fmt.Fprintf(os.Stderr, "%s\t%s\n", act.duration, act)
   613  			sum += act.duration
   614  			if sum >= total*9/10 {
   615  				break
   616  			}
   617  		}
   618  	}
   619  
   620  	return exitcode
   621  }
   622  
   623  // needFacts reports whether any analysis required by the specified set
   624  // needs facts.  If so, we must load the entire program from source.
   625  func needFacts(analyzers []*analysis.Analyzer) bool {
   626  	seen := make(map[*analysis.Analyzer]bool)
   627  	var q []*analysis.Analyzer // for BFS
   628  	q = append(q, analyzers...)
   629  	for len(q) > 0 {
   630  		a := q[0]
   631  		q = q[1:]
   632  		if !seen[a] {
   633  			seen[a] = true
   634  			if len(a.FactTypes) > 0 {
   635  				return true
   636  			}
   637  			q = append(q, a.Requires...)
   638  		}
   639  	}
   640  	return false
   641  }
   642  
   643  // An action represents one unit of analysis work: the application of
   644  // one analysis to one package. Actions form a DAG, both within a
   645  // package (as different analyzers are applied, either in sequence or
   646  // parallel), and across packages (as dependencies are analyzed).
   647  type action struct {
   648  	once         sync.Once
   649  	a            *analysis.Analyzer
   650  	pkg          *packages.Package
   651  	pass         *analysis.Pass
   652  	isroot       bool
   653  	deps         []*action
   654  	objectFacts  map[objectFactKey]analysis.Fact
   655  	packageFacts map[packageFactKey]analysis.Fact
   656  	result       interface{}
   657  	diagnostics  []analysis.Diagnostic
   658  	err          error
   659  	duration     time.Duration
   660  }
   661  
   662  type objectFactKey struct {
   663  	obj types.Object
   664  	typ reflect.Type
   665  }
   666  
   667  type packageFactKey struct {
   668  	pkg *types.Package
   669  	typ reflect.Type
   670  }
   671  
   672  func (act *action) String() string {
   673  	return fmt.Sprintf("%s@%s", act.a, act.pkg)
   674  }
   675  
   676  func execAll(actions []*action) {
   677  	sequential := dbg('p')
   678  	var wg sync.WaitGroup
   679  	for _, act := range actions {
   680  		wg.Add(1)
   681  		work := func(act *action) {
   682  			act.exec()
   683  			wg.Done()
   684  		}
   685  		if sequential {
   686  			work(act)
   687  		} else {
   688  			go work(act)
   689  		}
   690  	}
   691  	wg.Wait()
   692  }
   693  
   694  func (act *action) exec() { act.once.Do(act.execOnce) }
   695  
   696  func (act *action) execOnce() {
   697  	// Analyze dependencies.
   698  	execAll(act.deps)
   699  
   700  	// TODO(adonovan): uncomment this during profiling.
   701  	// It won't build pre-go1.11 but conditional compilation
   702  	// using build tags isn't warranted.
   703  	//
   704  	// ctx, task := trace.NewTask(context.Background(), "exec")
   705  	// trace.Log(ctx, "pass", act.String())
   706  	// defer task.End()
   707  
   708  	// Record time spent in this node but not its dependencies.
   709  	// In parallel mode, due to GC/scheduler contention, the
   710  	// time is 5x higher than in sequential mode, even with a
   711  	// semaphore limiting the number of threads here.
   712  	// So use -debug=tp.
   713  	if dbg('t') {
   714  		t0 := time.Now()
   715  		defer func() { act.duration = time.Since(t0) }()
   716  	}
   717  
   718  	// Report an error if any dependency failed.
   719  	var failed []string
   720  	for _, dep := range act.deps {
   721  		if dep.err != nil {
   722  			failed = append(failed, dep.String())
   723  		}
   724  	}
   725  	if failed != nil {
   726  		sort.Strings(failed)
   727  		act.err = fmt.Errorf("failed prerequisites: %s", strings.Join(failed, ", "))
   728  		return
   729  	}
   730  
   731  	// Plumb the output values of the dependencies
   732  	// into the inputs of this action.  Also facts.
   733  	inputs := make(map[*analysis.Analyzer]interface{})
   734  	act.objectFacts = make(map[objectFactKey]analysis.Fact)
   735  	act.packageFacts = make(map[packageFactKey]analysis.Fact)
   736  	for _, dep := range act.deps {
   737  		if dep.pkg == act.pkg {
   738  			// Same package, different analysis (horizontal edge):
   739  			// in-memory outputs of prerequisite analyzers
   740  			// become inputs to this analysis pass.
   741  			inputs[dep.a] = dep.result
   742  
   743  		} else if dep.a == act.a { // (always true)
   744  			// Same analysis, different package (vertical edge):
   745  			// serialized facts produced by prerequisite analysis
   746  			// become available to this analysis pass.
   747  			inheritFacts(act, dep)
   748  		}
   749  	}
   750  
   751  	// Run the analysis.
   752  	pass := &analysis.Pass{
   753  		Analyzer:     act.a,
   754  		Fset:         act.pkg.Fset,
   755  		Files:        act.pkg.Syntax,
   756  		OtherFiles:   act.pkg.OtherFiles,
   757  		IgnoredFiles: act.pkg.IgnoredFiles,
   758  		Pkg:          act.pkg.Types,
   759  		TypesInfo:    act.pkg.TypesInfo,
   760  		TypesSizes:   act.pkg.TypesSizes,
   761  		TypeErrors:   act.pkg.TypeErrors,
   762  
   763  		ResultOf:          inputs,
   764  		Report:            func(d analysis.Diagnostic) { act.diagnostics = append(act.diagnostics, d) },
   765  		ImportObjectFact:  act.importObjectFact,
   766  		ExportObjectFact:  act.exportObjectFact,
   767  		ImportPackageFact: act.importPackageFact,
   768  		ExportPackageFact: act.exportPackageFact,
   769  		AllObjectFacts:    act.allObjectFacts,
   770  		AllPackageFacts:   act.allPackageFacts,
   771  	}
   772  	pass.ReadFile = analysisinternal.MakeReadFile(pass)
   773  	act.pass = pass
   774  
   775  	var err error
   776  	if act.pkg.IllTyped && !pass.Analyzer.RunDespiteErrors {
   777  		err = fmt.Errorf("analysis skipped due to errors in package")
   778  	} else {
   779  		act.result, err = pass.Analyzer.Run(pass)
   780  		if err == nil {
   781  			if got, want := reflect.TypeOf(act.result), pass.Analyzer.ResultType; got != want {
   782  				err = fmt.Errorf(
   783  					"internal error: on package %s, analyzer %s returned a result of type %v, but declared ResultType %v",
   784  					pass.Pkg.Path(), pass.Analyzer, got, want)
   785  			}
   786  		}
   787  	}
   788  	if err == nil { // resolve diagnostic URLs
   789  		for i := range act.diagnostics {
   790  			if url, uerr := analysisflags.ResolveURL(act.a, act.diagnostics[i]); uerr == nil {
   791  				act.diagnostics[i].URL = url
   792  			} else {
   793  				err = uerr // keep the last error
   794  			}
   795  		}
   796  	}
   797  	act.err = err
   798  
   799  	// disallow calls after Run
   800  	pass.ExportObjectFact = nil
   801  	pass.ExportPackageFact = nil
   802  }
   803  
   804  // inheritFacts populates act.facts with
   805  // those it obtains from its dependency, dep.
   806  func inheritFacts(act, dep *action) {
   807  	serialize := dbg('s')
   808  
   809  	for key, fact := range dep.objectFacts {
   810  		// Filter out facts related to objects
   811  		// that are irrelevant downstream
   812  		// (equivalently: not in the compiler export data).
   813  		if !exportedFrom(key.obj, dep.pkg.Types) {
   814  			if false {
   815  				log.Printf("%v: discarding %T fact from %s for %s: %s", act, fact, dep, key.obj, fact)
   816  			}
   817  			continue
   818  		}
   819  
   820  		// Optionally serialize/deserialize fact
   821  		// to verify that it works across address spaces.
   822  		if serialize {
   823  			encodedFact, err := codeFact(fact)
   824  			if err != nil {
   825  				log.Panicf("internal error: encoding of %T fact failed in %v: %v", fact, act, err)
   826  			}
   827  			fact = encodedFact
   828  		}
   829  
   830  		if false {
   831  			log.Printf("%v: inherited %T fact for %s: %s", act, fact, key.obj, fact)
   832  		}
   833  		act.objectFacts[key] = fact
   834  	}
   835  
   836  	for key, fact := range dep.packageFacts {
   837  		// TODO: filter out facts that belong to
   838  		// packages not mentioned in the export data
   839  		// to prevent side channels.
   840  
   841  		// Optionally serialize/deserialize fact
   842  		// to verify that it works across address spaces
   843  		// and is deterministic.
   844  		if serialize {
   845  			encodedFact, err := codeFact(fact)
   846  			if err != nil {
   847  				log.Panicf("internal error: encoding of %T fact failed in %v", fact, act)
   848  			}
   849  			fact = encodedFact
   850  		}
   851  
   852  		if false {
   853  			log.Printf("%v: inherited %T fact for %s: %s", act, fact, key.pkg.Path(), fact)
   854  		}
   855  		act.packageFacts[key] = fact
   856  	}
   857  }
   858  
   859  // codeFact encodes then decodes a fact,
   860  // just to exercise that logic.
   861  func codeFact(fact analysis.Fact) (analysis.Fact, error) {
   862  	// We encode facts one at a time.
   863  	// A real modular driver would emit all facts
   864  	// into one encoder to improve gob efficiency.
   865  	var buf bytes.Buffer
   866  	if err := gob.NewEncoder(&buf).Encode(fact); err != nil {
   867  		return nil, err
   868  	}
   869  
   870  	// Encode it twice and assert that we get the same bits.
   871  	// This helps detect nondeterministic Gob encoding (e.g. of maps).
   872  	var buf2 bytes.Buffer
   873  	if err := gob.NewEncoder(&buf2).Encode(fact); err != nil {
   874  		return nil, err
   875  	}
   876  	if !bytes.Equal(buf.Bytes(), buf2.Bytes()) {
   877  		return nil, fmt.Errorf("encoding of %T fact is nondeterministic", fact)
   878  	}
   879  
   880  	new := reflect.New(reflect.TypeOf(fact).Elem()).Interface().(analysis.Fact)
   881  	if err := gob.NewDecoder(&buf).Decode(new); err != nil {
   882  		return nil, err
   883  	}
   884  	return new, nil
   885  }
   886  
   887  // exportedFrom reports whether obj may be visible to a package that imports pkg.
   888  // This includes not just the exported members of pkg, but also unexported
   889  // constants, types, fields, and methods, perhaps belonging to other packages,
   890  // that find there way into the API.
   891  // This is an overapproximation of the more accurate approach used by
   892  // gc export data, which walks the type graph, but it's much simpler.
   893  //
   894  // TODO(adonovan): do more accurate filtering by walking the type graph.
   895  func exportedFrom(obj types.Object, pkg *types.Package) bool {
   896  	switch obj := obj.(type) {
   897  	case *types.Func:
   898  		return obj.Exported() && obj.Pkg() == pkg ||
   899  			obj.Type().(*types.Signature).Recv() != nil
   900  	case *types.Var:
   901  		if obj.IsField() {
   902  			return true
   903  		}
   904  		// we can't filter more aggressively than this because we need
   905  		// to consider function parameters exported, but have no way
   906  		// of telling apart function parameters from local variables.
   907  		return obj.Pkg() == pkg
   908  	case *types.TypeName, *types.Const:
   909  		return true
   910  	}
   911  	return false // Nil, Builtin, Label, or PkgName
   912  }
   913  
   914  // importObjectFact implements Pass.ImportObjectFact.
   915  // Given a non-nil pointer ptr of type *T, where *T satisfies Fact,
   916  // importObjectFact copies the fact value to *ptr.
   917  func (act *action) importObjectFact(obj types.Object, ptr analysis.Fact) bool {
   918  	if obj == nil {
   919  		panic("nil object")
   920  	}
   921  	key := objectFactKey{obj, factType(ptr)}
   922  	if v, ok := act.objectFacts[key]; ok {
   923  		reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem())
   924  		return true
   925  	}
   926  	return false
   927  }
   928  
   929  // exportObjectFact implements Pass.ExportObjectFact.
   930  func (act *action) exportObjectFact(obj types.Object, fact analysis.Fact) {
   931  	if act.pass.ExportObjectFact == nil {
   932  		log.Panicf("%s: Pass.ExportObjectFact(%s, %T) called after Run", act, obj, fact)
   933  	}
   934  
   935  	if obj.Pkg() != act.pkg.Types {
   936  		log.Panicf("internal error: in analysis %s of package %s: Fact.Set(%s, %T): can't set facts on objects belonging another package",
   937  			act.a, act.pkg, obj, fact)
   938  	}
   939  
   940  	key := objectFactKey{obj, factType(fact)}
   941  	act.objectFacts[key] = fact // clobber any existing entry
   942  	if dbg('f') {
   943  		objstr := types.ObjectString(obj, (*types.Package).Name)
   944  		fmt.Fprintf(os.Stderr, "%s: object %s has fact %s\n",
   945  			act.pkg.Fset.Position(obj.Pos()), objstr, fact)
   946  	}
   947  }
   948  
   949  // allObjectFacts implements Pass.AllObjectFacts.
   950  func (act *action) allObjectFacts() []analysis.ObjectFact {
   951  	facts := make([]analysis.ObjectFact, 0, len(act.objectFacts))
   952  	for k := range act.objectFacts {
   953  		facts = append(facts, analysis.ObjectFact{Object: k.obj, Fact: act.objectFacts[k]})
   954  	}
   955  	return facts
   956  }
   957  
   958  // importPackageFact implements Pass.ImportPackageFact.
   959  // Given a non-nil pointer ptr of type *T, where *T satisfies Fact,
   960  // fact copies the fact value to *ptr.
   961  func (act *action) importPackageFact(pkg *types.Package, ptr analysis.Fact) bool {
   962  	if pkg == nil {
   963  		panic("nil package")
   964  	}
   965  	key := packageFactKey{pkg, factType(ptr)}
   966  	if v, ok := act.packageFacts[key]; ok {
   967  		reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem())
   968  		return true
   969  	}
   970  	return false
   971  }
   972  
   973  // exportPackageFact implements Pass.ExportPackageFact.
   974  func (act *action) exportPackageFact(fact analysis.Fact) {
   975  	if act.pass.ExportPackageFact == nil {
   976  		log.Panicf("%s: Pass.ExportPackageFact(%T) called after Run", act, fact)
   977  	}
   978  
   979  	key := packageFactKey{act.pass.Pkg, factType(fact)}
   980  	act.packageFacts[key] = fact // clobber any existing entry
   981  	if dbg('f') {
   982  		fmt.Fprintf(os.Stderr, "%s: package %s has fact %s\n",
   983  			act.pkg.Fset.Position(act.pass.Files[0].Pos()), act.pass.Pkg.Path(), fact)
   984  	}
   985  }
   986  
   987  func factType(fact analysis.Fact) reflect.Type {
   988  	t := reflect.TypeOf(fact)
   989  	if t.Kind() != reflect.Ptr {
   990  		log.Fatalf("invalid Fact type: got %T, want pointer", fact)
   991  	}
   992  	return t
   993  }
   994  
   995  // allPackageFacts implements Pass.AllPackageFacts.
   996  func (act *action) allPackageFacts() []analysis.PackageFact {
   997  	facts := make([]analysis.PackageFact, 0, len(act.packageFacts))
   998  	for k := range act.packageFacts {
   999  		facts = append(facts, analysis.PackageFact{Package: k.pkg, Fact: act.packageFacts[k]})
  1000  	}
  1001  	return facts
  1002  }
  1003  
  1004  func dbg(b byte) bool { return strings.IndexByte(Debug, b) >= 0 }