github.com/powerman/golang-tools@v0.1.11-0.20220410185822-5ad214d8d803/go/analysis/internal/checker/checker.go (about) 1 // Copyright 2018 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Package checker defines the implementation of the checker commands. 6 // The same code drives the multi-analysis driver, the single-analysis 7 // driver that is conventionally provided for convenience along with 8 // each analysis package, and the test driver. 9 package checker 10 11 import ( 12 "bytes" 13 "encoding/gob" 14 "errors" 15 "flag" 16 "fmt" 17 "go/format" 18 "go/parser" 19 "go/token" 20 "go/types" 21 "io/ioutil" 22 "log" 23 "os" 24 "reflect" 25 "runtime" 26 "runtime/pprof" 27 "runtime/trace" 28 "sort" 29 "strings" 30 "sync" 31 "time" 32 33 "github.com/powerman/golang-tools/go/analysis" 34 "github.com/powerman/golang-tools/go/analysis/internal/analysisflags" 35 "github.com/powerman/golang-tools/go/packages" 36 "github.com/powerman/golang-tools/internal/analysisinternal" 37 "github.com/powerman/golang-tools/internal/span" 38 ) 39 40 var ( 41 // Debug is a set of single-letter flags: 42 // 43 // f show [f]acts as they are created 44 // p disable [p]arallel execution of analyzers 45 // s do additional [s]anity checks on fact types and serialization 46 // t show [t]iming info (NB: use 'p' flag to avoid GC/scheduler noise) 47 // v show [v]erbose logging 48 // 49 Debug = "" 50 51 // Log files for optional performance tracing. 52 CPUProfile, MemProfile, Trace string 53 54 // Fix determines whether to apply all suggested fixes. 55 Fix bool 56 ) 57 58 // RegisterFlags registers command-line flags used by the analysis driver. 59 func RegisterFlags() { 60 // When adding flags here, remember to update 61 // the list of suppressed flags in analysisflags. 62 63 flag.StringVar(&Debug, "debug", Debug, `debug flags, any subset of "fpstv"`) 64 65 flag.StringVar(&CPUProfile, "cpuprofile", "", "write CPU profile to this file") 66 flag.StringVar(&MemProfile, "memprofile", "", "write memory profile to this file") 67 flag.StringVar(&Trace, "trace", "", "write trace log to this file") 68 69 flag.BoolVar(&Fix, "fix", false, "apply all suggested fixes") 70 } 71 72 // Run loads the packages specified by args using go/packages, 73 // then applies the specified analyzers to them. 74 // Analysis flags must already have been set. 75 // It provides most of the logic for the main functions of both the 76 // singlechecker and the multi-analysis commands. 77 // It returns the appropriate exit code. 78 func Run(args []string, analyzers []*analysis.Analyzer) (exitcode int) { 79 if CPUProfile != "" { 80 f, err := os.Create(CPUProfile) 81 if err != nil { 82 log.Fatal(err) 83 } 84 if err := pprof.StartCPUProfile(f); err != nil { 85 log.Fatal(err) 86 } 87 // NB: profile won't be written in case of error. 88 defer pprof.StopCPUProfile() 89 } 90 91 if Trace != "" { 92 f, err := os.Create(Trace) 93 if err != nil { 94 log.Fatal(err) 95 } 96 if err := trace.Start(f); err != nil { 97 log.Fatal(err) 98 } 99 // NB: trace log won't be written in case of error. 100 defer func() { 101 trace.Stop() 102 log.Printf("To view the trace, run:\n$ go tool trace view %s", Trace) 103 }() 104 } 105 106 if MemProfile != "" { 107 f, err := os.Create(MemProfile) 108 if err != nil { 109 log.Fatal(err) 110 } 111 // NB: memprofile won't be written in case of error. 112 defer func() { 113 runtime.GC() // get up-to-date statistics 114 if err := pprof.WriteHeapProfile(f); err != nil { 115 log.Fatalf("Writing memory profile: %v", err) 116 } 117 f.Close() 118 }() 119 } 120 121 // Load the packages. 122 if dbg('v') { 123 log.SetPrefix("") 124 log.SetFlags(log.Lmicroseconds) // display timing 125 log.Printf("load %s", args) 126 } 127 128 // Optimization: if the selected analyzers don't produce/consume 129 // facts, we need source only for the initial packages. 130 allSyntax := needFacts(analyzers) 131 initial, err := load(args, allSyntax) 132 if err != nil { 133 if _, ok := err.(typeParseError); !ok { 134 // Fail when some of the errors are not 135 // related to parsing nor typing. 136 log.Print(err) 137 return 1 138 } 139 // TODO: filter analyzers based on RunDespiteError? 140 } 141 142 // Print the results. 143 roots := analyze(initial, analyzers) 144 145 if Fix { 146 applyFixes(roots) 147 } 148 return printDiagnostics(roots) 149 } 150 151 // typeParseError represents a package load error 152 // that is related to typing and parsing. 153 type typeParseError struct { 154 error 155 } 156 157 // load loads the initial packages. If all loading issues are related to 158 // typing and parsing, the returned error is of type typeParseError. 159 func load(patterns []string, allSyntax bool) ([]*packages.Package, error) { 160 mode := packages.LoadSyntax 161 if allSyntax { 162 mode = packages.LoadAllSyntax 163 } 164 conf := packages.Config{ 165 Mode: mode, 166 Tests: true, 167 } 168 initial, err := packages.Load(&conf, patterns...) 169 if err == nil { 170 if len(initial) == 0 { 171 err = fmt.Errorf("%s matched no packages", strings.Join(patterns, " ")) 172 } else { 173 err = loadingError(initial) 174 } 175 } 176 return initial, err 177 } 178 179 // loadingError checks for issues during the loading of initial 180 // packages. Returns nil if there are no issues. Returns error 181 // of type typeParseError if all errors, including those in 182 // dependencies, are related to typing or parsing. Otherwise, 183 // a plain error is returned with an appropriate message. 184 func loadingError(initial []*packages.Package) error { 185 var err error 186 if n := packages.PrintErrors(initial); n > 1 { 187 err = fmt.Errorf("%d errors during loading", n) 188 } else if n == 1 { 189 err = errors.New("error during loading") 190 } else { 191 // no errors 192 return nil 193 } 194 all := true 195 packages.Visit(initial, nil, func(pkg *packages.Package) { 196 for _, err := range pkg.Errors { 197 typeOrParse := err.Kind == packages.TypeError || err.Kind == packages.ParseError 198 all = all && typeOrParse 199 } 200 }) 201 if all { 202 return typeParseError{err} 203 } 204 return err 205 } 206 207 // TestAnalyzer applies an analysis to a set of packages (and their 208 // dependencies if necessary) and returns the results. 209 // 210 // Facts about pkg are returned in a map keyed by object; package facts 211 // have a nil key. 212 // 213 // This entry point is used only by analysistest. 214 func TestAnalyzer(a *analysis.Analyzer, pkgs []*packages.Package) []*TestAnalyzerResult { 215 var results []*TestAnalyzerResult 216 for _, act := range analyze(pkgs, []*analysis.Analyzer{a}) { 217 facts := make(map[types.Object][]analysis.Fact) 218 for key, fact := range act.objectFacts { 219 if key.obj.Pkg() == act.pass.Pkg { 220 facts[key.obj] = append(facts[key.obj], fact) 221 } 222 } 223 for key, fact := range act.packageFacts { 224 if key.pkg == act.pass.Pkg { 225 facts[nil] = append(facts[nil], fact) 226 } 227 } 228 229 results = append(results, &TestAnalyzerResult{act.pass, act.diagnostics, facts, act.result, act.err}) 230 } 231 return results 232 } 233 234 type TestAnalyzerResult struct { 235 Pass *analysis.Pass 236 Diagnostics []analysis.Diagnostic 237 Facts map[types.Object][]analysis.Fact 238 Result interface{} 239 Err error 240 } 241 242 func analyze(pkgs []*packages.Package, analyzers []*analysis.Analyzer) []*action { 243 // Construct the action graph. 244 if dbg('v') { 245 log.Printf("building graph of analysis passes") 246 } 247 248 // Each graph node (action) is one unit of analysis. 249 // Edges express package-to-package (vertical) dependencies, 250 // and analysis-to-analysis (horizontal) dependencies. 251 type key struct { 252 *analysis.Analyzer 253 *packages.Package 254 } 255 actions := make(map[key]*action) 256 257 var mkAction func(a *analysis.Analyzer, pkg *packages.Package) *action 258 mkAction = func(a *analysis.Analyzer, pkg *packages.Package) *action { 259 k := key{a, pkg} 260 act, ok := actions[k] 261 if !ok { 262 act = &action{a: a, pkg: pkg} 263 264 // Add a dependency on each required analyzers. 265 for _, req := range a.Requires { 266 act.deps = append(act.deps, mkAction(req, pkg)) 267 } 268 269 // An analysis that consumes/produces facts 270 // must run on the package's dependencies too. 271 if len(a.FactTypes) > 0 { 272 paths := make([]string, 0, len(pkg.Imports)) 273 for path := range pkg.Imports { 274 paths = append(paths, path) 275 } 276 sort.Strings(paths) // for determinism 277 for _, path := range paths { 278 dep := mkAction(a, pkg.Imports[path]) 279 act.deps = append(act.deps, dep) 280 } 281 } 282 283 actions[k] = act 284 } 285 return act 286 } 287 288 // Build nodes for initial packages. 289 var roots []*action 290 for _, a := range analyzers { 291 for _, pkg := range pkgs { 292 root := mkAction(a, pkg) 293 root.isroot = true 294 roots = append(roots, root) 295 } 296 } 297 298 // Execute the graph in parallel. 299 execAll(roots) 300 301 return roots 302 } 303 304 func applyFixes(roots []*action) { 305 visited := make(map[*action]bool) 306 var apply func(*action) error 307 var visitAll func(actions []*action) error 308 visitAll = func(actions []*action) error { 309 for _, act := range actions { 310 if !visited[act] { 311 visited[act] = true 312 visitAll(act.deps) 313 if err := apply(act); err != nil { 314 return err 315 } 316 } 317 } 318 return nil 319 } 320 321 // TODO(matloob): Is this tree business too complicated? (After all this is Go!) 322 // Just create a set (map) of edits, sort by pos and call it a day? 323 type offsetedit struct { 324 start, end int 325 newText []byte 326 } // TextEdit using byteOffsets instead of pos 327 type node struct { 328 edit offsetedit 329 left, right *node 330 } 331 332 var insert func(tree **node, edit offsetedit) error 333 insert = func(treeptr **node, edit offsetedit) error { 334 if *treeptr == nil { 335 *treeptr = &node{edit, nil, nil} 336 return nil 337 } 338 tree := *treeptr 339 if edit.end <= tree.edit.start { 340 return insert(&tree.left, edit) 341 } else if edit.start >= tree.edit.end { 342 return insert(&tree.right, edit) 343 } 344 345 // Overlapping text edit. 346 return fmt.Errorf("analyses applying overlapping text edits affecting pos range (%v, %v) and (%v, %v)", 347 edit.start, edit.end, tree.edit.start, tree.edit.end) 348 349 } 350 351 editsForFile := make(map[*token.File]*node) 352 353 apply = func(act *action) error { 354 for _, diag := range act.diagnostics { 355 for _, sf := range diag.SuggestedFixes { 356 for _, edit := range sf.TextEdits { 357 // Validate the edit. 358 if edit.Pos > edit.End { 359 return fmt.Errorf( 360 "diagnostic for analysis %v contains Suggested Fix with malformed edit: pos (%v) > end (%v)", 361 act.a.Name, edit.Pos, edit.End) 362 } 363 file, endfile := act.pkg.Fset.File(edit.Pos), act.pkg.Fset.File(edit.End) 364 if file == nil || endfile == nil || file != endfile { 365 return (fmt.Errorf( 366 "diagnostic for analysis %v contains Suggested Fix with malformed spanning files %v and %v", 367 act.a.Name, file.Name(), endfile.Name())) 368 } 369 start, end := file.Offset(edit.Pos), file.Offset(edit.End) 370 371 // TODO(matloob): Validate that edits do not affect other packages. 372 root := editsForFile[file] 373 if err := insert(&root, offsetedit{start, end, edit.NewText}); err != nil { 374 return err 375 } 376 editsForFile[file] = root // In case the root changed 377 } 378 } 379 } 380 return nil 381 } 382 383 visitAll(roots) 384 385 fset := token.NewFileSet() // Shared by parse calls below 386 // Now we've got a set of valid edits for each file. Get the new file contents. 387 for f, tree := range editsForFile { 388 contents, err := ioutil.ReadFile(f.Name()) 389 if err != nil { 390 log.Fatal(err) 391 } 392 393 cur := 0 // current position in the file 394 395 var out bytes.Buffer 396 397 var recurse func(*node) 398 recurse = func(node *node) { 399 if node.left != nil { 400 recurse(node.left) 401 } 402 403 edit := node.edit 404 if edit.start > cur { 405 out.Write(contents[cur:edit.start]) 406 out.Write(edit.newText) 407 } 408 cur = edit.end 409 410 if node.right != nil { 411 recurse(node.right) 412 } 413 } 414 recurse(tree) 415 // Write out the rest of the file. 416 if cur < len(contents) { 417 out.Write(contents[cur:]) 418 } 419 420 // Try to format the file. 421 ff, err := parser.ParseFile(fset, f.Name(), out.Bytes(), parser.ParseComments) 422 if err == nil { 423 var buf bytes.Buffer 424 if err = format.Node(&buf, fset, ff); err == nil { 425 out = buf 426 } 427 } 428 429 ioutil.WriteFile(f.Name(), out.Bytes(), 0644) 430 } 431 } 432 433 // printDiagnostics prints the diagnostics for the root packages in either 434 // plain text or JSON format. JSON format also includes errors for any 435 // dependencies. 436 // 437 // It returns the exitcode: in plain mode, 0 for success, 1 for analysis 438 // errors, and 3 for diagnostics. We avoid 2 since the flag package uses 439 // it. JSON mode always succeeds at printing errors and diagnostics in a 440 // structured form to stdout. 441 func printDiagnostics(roots []*action) (exitcode int) { 442 // Print the output. 443 // 444 // Print diagnostics only for root packages, 445 // but errors for all packages. 446 printed := make(map[*action]bool) 447 var print func(*action) 448 var visitAll func(actions []*action) 449 visitAll = func(actions []*action) { 450 for _, act := range actions { 451 if !printed[act] { 452 printed[act] = true 453 visitAll(act.deps) 454 print(act) 455 } 456 } 457 } 458 459 if analysisflags.JSON { 460 // JSON output 461 tree := make(analysisflags.JSONTree) 462 print = func(act *action) { 463 var diags []analysis.Diagnostic 464 if act.isroot { 465 diags = act.diagnostics 466 } 467 tree.Add(act.pkg.Fset, act.pkg.ID, act.a.Name, diags, act.err) 468 } 469 visitAll(roots) 470 tree.Print() 471 } else { 472 // plain text output 473 474 // De-duplicate diagnostics by position (not token.Pos) to 475 // avoid double-reporting in source files that belong to 476 // multiple packages, such as foo and foo.test. 477 type key struct { 478 pos token.Position 479 end token.Position 480 *analysis.Analyzer 481 message string 482 } 483 seen := make(map[key]bool) 484 485 print = func(act *action) { 486 if act.err != nil { 487 fmt.Fprintf(os.Stderr, "%s: %v\n", act.a.Name, act.err) 488 exitcode = 1 // analysis failed, at least partially 489 return 490 } 491 if act.isroot { 492 for _, diag := range act.diagnostics { 493 // We don't display a.Name/f.Category 494 // as most users don't care. 495 496 posn := act.pkg.Fset.Position(diag.Pos) 497 end := act.pkg.Fset.Position(diag.End) 498 k := key{posn, end, act.a, diag.Message} 499 if seen[k] { 500 continue // duplicate 501 } 502 seen[k] = true 503 504 analysisflags.PrintPlain(act.pkg.Fset, diag) 505 } 506 } 507 } 508 visitAll(roots) 509 510 if exitcode == 0 && len(seen) > 0 { 511 exitcode = 3 // successfully produced diagnostics 512 } 513 } 514 515 // Print timing info. 516 if dbg('t') { 517 if !dbg('p') { 518 log.Println("Warning: times are mostly GC/scheduler noise; use -debug=tp to disable parallelism") 519 } 520 var all []*action 521 var total time.Duration 522 for act := range printed { 523 all = append(all, act) 524 total += act.duration 525 } 526 sort.Slice(all, func(i, j int) bool { 527 return all[i].duration > all[j].duration 528 }) 529 530 // Print actions accounting for 90% of the total. 531 var sum time.Duration 532 for _, act := range all { 533 fmt.Fprintf(os.Stderr, "%s\t%s\n", act.duration, act) 534 sum += act.duration 535 if sum >= total*9/10 { 536 break 537 } 538 } 539 } 540 541 return exitcode 542 } 543 544 // needFacts reports whether any analysis required by the specified set 545 // needs facts. If so, we must load the entire program from source. 546 func needFacts(analyzers []*analysis.Analyzer) bool { 547 seen := make(map[*analysis.Analyzer]bool) 548 var q []*analysis.Analyzer // for BFS 549 q = append(q, analyzers...) 550 for len(q) > 0 { 551 a := q[0] 552 q = q[1:] 553 if !seen[a] { 554 seen[a] = true 555 if len(a.FactTypes) > 0 { 556 return true 557 } 558 q = append(q, a.Requires...) 559 } 560 } 561 return false 562 } 563 564 // An action represents one unit of analysis work: the application of 565 // one analysis to one package. Actions form a DAG, both within a 566 // package (as different analyzers are applied, either in sequence or 567 // parallel), and across packages (as dependencies are analyzed). 568 type action struct { 569 once sync.Once 570 a *analysis.Analyzer 571 pkg *packages.Package 572 pass *analysis.Pass 573 isroot bool 574 deps []*action 575 objectFacts map[objectFactKey]analysis.Fact 576 packageFacts map[packageFactKey]analysis.Fact 577 inputs map[*analysis.Analyzer]interface{} 578 result interface{} 579 diagnostics []analysis.Diagnostic 580 err error 581 duration time.Duration 582 } 583 584 type objectFactKey struct { 585 obj types.Object 586 typ reflect.Type 587 } 588 589 type packageFactKey struct { 590 pkg *types.Package 591 typ reflect.Type 592 } 593 594 func (act *action) String() string { 595 return fmt.Sprintf("%s@%s", act.a, act.pkg) 596 } 597 598 func execAll(actions []*action) { 599 sequential := dbg('p') 600 var wg sync.WaitGroup 601 for _, act := range actions { 602 wg.Add(1) 603 work := func(act *action) { 604 act.exec() 605 wg.Done() 606 } 607 if sequential { 608 work(act) 609 } else { 610 go work(act) 611 } 612 } 613 wg.Wait() 614 } 615 616 func (act *action) exec() { act.once.Do(act.execOnce) } 617 618 func (act *action) execOnce() { 619 // Analyze dependencies. 620 execAll(act.deps) 621 622 // TODO(adonovan): uncomment this during profiling. 623 // It won't build pre-go1.11 but conditional compilation 624 // using build tags isn't warranted. 625 // 626 // ctx, task := trace.NewTask(context.Background(), "exec") 627 // trace.Log(ctx, "pass", act.String()) 628 // defer task.End() 629 630 // Record time spent in this node but not its dependencies. 631 // In parallel mode, due to GC/scheduler contention, the 632 // time is 5x higher than in sequential mode, even with a 633 // semaphore limiting the number of threads here. 634 // So use -debug=tp. 635 if dbg('t') { 636 t0 := time.Now() 637 defer func() { act.duration = time.Since(t0) }() 638 } 639 640 // Report an error if any dependency failed. 641 var failed []string 642 for _, dep := range act.deps { 643 if dep.err != nil { 644 failed = append(failed, dep.String()) 645 } 646 } 647 if failed != nil { 648 sort.Strings(failed) 649 act.err = fmt.Errorf("failed prerequisites: %s", strings.Join(failed, ", ")) 650 return 651 } 652 653 // Plumb the output values of the dependencies 654 // into the inputs of this action. Also facts. 655 inputs := make(map[*analysis.Analyzer]interface{}) 656 act.objectFacts = make(map[objectFactKey]analysis.Fact) 657 act.packageFacts = make(map[packageFactKey]analysis.Fact) 658 for _, dep := range act.deps { 659 if dep.pkg == act.pkg { 660 // Same package, different analysis (horizontal edge): 661 // in-memory outputs of prerequisite analyzers 662 // become inputs to this analysis pass. 663 inputs[dep.a] = dep.result 664 665 } else if dep.a == act.a { // (always true) 666 // Same analysis, different package (vertical edge): 667 // serialized facts produced by prerequisite analysis 668 // become available to this analysis pass. 669 inheritFacts(act, dep) 670 } 671 } 672 673 // Run the analysis. 674 pass := &analysis.Pass{ 675 Analyzer: act.a, 676 Fset: act.pkg.Fset, 677 Files: act.pkg.Syntax, 678 OtherFiles: act.pkg.OtherFiles, 679 IgnoredFiles: act.pkg.IgnoredFiles, 680 Pkg: act.pkg.Types, 681 TypesInfo: act.pkg.TypesInfo, 682 TypesSizes: act.pkg.TypesSizes, 683 ResultOf: inputs, 684 Report: func(d analysis.Diagnostic) { act.diagnostics = append(act.diagnostics, d) }, 685 ImportObjectFact: act.importObjectFact, 686 ExportObjectFact: act.exportObjectFact, 687 ImportPackageFact: act.importPackageFact, 688 ExportPackageFact: act.exportPackageFact, 689 AllObjectFacts: act.allObjectFacts, 690 AllPackageFacts: act.allPackageFacts, 691 } 692 act.pass = pass 693 694 var errors []types.Error 695 // Get any type errors that are attributed to the pkg. 696 // This is necessary to test analyzers that provide 697 // suggested fixes for compiler/type errors. 698 for _, err := range act.pkg.Errors { 699 if err.Kind != packages.TypeError { 700 continue 701 } 702 // err.Pos is a string of form: "file:line:col" or "file:line" or "" or "-" 703 spn := span.Parse(err.Pos) 704 // Extract the token positions from the error string. 705 line, col, offset := spn.Start().Line(), spn.Start().Column(), -1 706 act.pkg.Fset.Iterate(func(f *token.File) bool { 707 if f.Name() != spn.URI().Filename() { 708 return true 709 } 710 offset = int(f.LineStart(line)) + col - 1 711 return false 712 }) 713 if offset == -1 { 714 continue 715 } 716 errors = append(errors, types.Error{ 717 Fset: act.pkg.Fset, 718 Msg: err.Msg, 719 Pos: token.Pos(offset), 720 }) 721 } 722 analysisinternal.SetTypeErrors(pass, errors) 723 724 var err error 725 if act.pkg.IllTyped && !pass.Analyzer.RunDespiteErrors { 726 err = fmt.Errorf("analysis skipped due to errors in package") 727 } else { 728 act.result, err = pass.Analyzer.Run(pass) 729 if err == nil { 730 if got, want := reflect.TypeOf(act.result), pass.Analyzer.ResultType; got != want { 731 err = fmt.Errorf( 732 "internal error: on package %s, analyzer %s returned a result of type %v, but declared ResultType %v", 733 pass.Pkg.Path(), pass.Analyzer, got, want) 734 } 735 } 736 } 737 act.err = err 738 739 // disallow calls after Run 740 pass.ExportObjectFact = nil 741 pass.ExportPackageFact = nil 742 } 743 744 // inheritFacts populates act.facts with 745 // those it obtains from its dependency, dep. 746 func inheritFacts(act, dep *action) { 747 serialize := dbg('s') 748 749 for key, fact := range dep.objectFacts { 750 // Filter out facts related to objects 751 // that are irrelevant downstream 752 // (equivalently: not in the compiler export data). 753 if !exportedFrom(key.obj, dep.pkg.Types) { 754 if false { 755 log.Printf("%v: discarding %T fact from %s for %s: %s", act, fact, dep, key.obj, fact) 756 } 757 continue 758 } 759 760 // Optionally serialize/deserialize fact 761 // to verify that it works across address spaces. 762 if serialize { 763 encodedFact, err := codeFact(fact) 764 if err != nil { 765 log.Panicf("internal error: encoding of %T fact failed in %v", fact, act) 766 } 767 fact = encodedFact 768 } 769 770 if false { 771 log.Printf("%v: inherited %T fact for %s: %s", act, fact, key.obj, fact) 772 } 773 act.objectFacts[key] = fact 774 } 775 776 for key, fact := range dep.packageFacts { 777 // TODO: filter out facts that belong to 778 // packages not mentioned in the export data 779 // to prevent side channels. 780 781 // Optionally serialize/deserialize fact 782 // to verify that it works across address spaces 783 // and is deterministic. 784 if serialize { 785 encodedFact, err := codeFact(fact) 786 if err != nil { 787 log.Panicf("internal error: encoding of %T fact failed in %v", fact, act) 788 } 789 fact = encodedFact 790 } 791 792 if false { 793 log.Printf("%v: inherited %T fact for %s: %s", act, fact, key.pkg.Path(), fact) 794 } 795 act.packageFacts[key] = fact 796 } 797 } 798 799 // codeFact encodes then decodes a fact, 800 // just to exercise that logic. 801 func codeFact(fact analysis.Fact) (analysis.Fact, error) { 802 // We encode facts one at a time. 803 // A real modular driver would emit all facts 804 // into one encoder to improve gob efficiency. 805 var buf bytes.Buffer 806 if err := gob.NewEncoder(&buf).Encode(fact); err != nil { 807 return nil, err 808 } 809 810 // Encode it twice and assert that we get the same bits. 811 // This helps detect nondeterministic Gob encoding (e.g. of maps). 812 var buf2 bytes.Buffer 813 if err := gob.NewEncoder(&buf2).Encode(fact); err != nil { 814 return nil, err 815 } 816 if !bytes.Equal(buf.Bytes(), buf2.Bytes()) { 817 return nil, fmt.Errorf("encoding of %T fact is nondeterministic", fact) 818 } 819 820 new := reflect.New(reflect.TypeOf(fact).Elem()).Interface().(analysis.Fact) 821 if err := gob.NewDecoder(&buf).Decode(new); err != nil { 822 return nil, err 823 } 824 return new, nil 825 } 826 827 // exportedFrom reports whether obj may be visible to a package that imports pkg. 828 // This includes not just the exported members of pkg, but also unexported 829 // constants, types, fields, and methods, perhaps belonging to other packages, 830 // that find there way into the API. 831 // This is an overapproximation of the more accurate approach used by 832 // gc export data, which walks the type graph, but it's much simpler. 833 // 834 // TODO(adonovan): do more accurate filtering by walking the type graph. 835 func exportedFrom(obj types.Object, pkg *types.Package) bool { 836 switch obj := obj.(type) { 837 case *types.Func: 838 return obj.Exported() && obj.Pkg() == pkg || 839 obj.Type().(*types.Signature).Recv() != nil 840 case *types.Var: 841 if obj.IsField() { 842 return true 843 } 844 // we can't filter more aggressively than this because we need 845 // to consider function parameters exported, but have no way 846 // of telling apart function parameters from local variables. 847 return obj.Pkg() == pkg 848 case *types.TypeName, *types.Const: 849 return true 850 } 851 return false // Nil, Builtin, Label, or PkgName 852 } 853 854 // importObjectFact implements Pass.ImportObjectFact. 855 // Given a non-nil pointer ptr of type *T, where *T satisfies Fact, 856 // importObjectFact copies the fact value to *ptr. 857 func (act *action) importObjectFact(obj types.Object, ptr analysis.Fact) bool { 858 if obj == nil { 859 panic("nil object") 860 } 861 key := objectFactKey{obj, factType(ptr)} 862 if v, ok := act.objectFacts[key]; ok { 863 reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem()) 864 return true 865 } 866 return false 867 } 868 869 // exportObjectFact implements Pass.ExportObjectFact. 870 func (act *action) exportObjectFact(obj types.Object, fact analysis.Fact) { 871 if act.pass.ExportObjectFact == nil { 872 log.Panicf("%s: Pass.ExportObjectFact(%s, %T) called after Run", act, obj, fact) 873 } 874 875 if obj.Pkg() != act.pkg.Types { 876 log.Panicf("internal error: in analysis %s of package %s: Fact.Set(%s, %T): can't set facts on objects belonging another package", 877 act.a, act.pkg, obj, fact) 878 } 879 880 key := objectFactKey{obj, factType(fact)} 881 act.objectFacts[key] = fact // clobber any existing entry 882 if dbg('f') { 883 objstr := types.ObjectString(obj, (*types.Package).Name) 884 fmt.Fprintf(os.Stderr, "%s: object %s has fact %s\n", 885 act.pkg.Fset.Position(obj.Pos()), objstr, fact) 886 } 887 } 888 889 // allObjectFacts implements Pass.AllObjectFacts. 890 func (act *action) allObjectFacts() []analysis.ObjectFact { 891 facts := make([]analysis.ObjectFact, 0, len(act.objectFacts)) 892 for k := range act.objectFacts { 893 facts = append(facts, analysis.ObjectFact{k.obj, act.objectFacts[k]}) 894 } 895 return facts 896 } 897 898 // importPackageFact implements Pass.ImportPackageFact. 899 // Given a non-nil pointer ptr of type *T, where *T satisfies Fact, 900 // fact copies the fact value to *ptr. 901 func (act *action) importPackageFact(pkg *types.Package, ptr analysis.Fact) bool { 902 if pkg == nil { 903 panic("nil package") 904 } 905 key := packageFactKey{pkg, factType(ptr)} 906 if v, ok := act.packageFacts[key]; ok { 907 reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem()) 908 return true 909 } 910 return false 911 } 912 913 // exportPackageFact implements Pass.ExportPackageFact. 914 func (act *action) exportPackageFact(fact analysis.Fact) { 915 if act.pass.ExportPackageFact == nil { 916 log.Panicf("%s: Pass.ExportPackageFact(%T) called after Run", act, fact) 917 } 918 919 key := packageFactKey{act.pass.Pkg, factType(fact)} 920 act.packageFacts[key] = fact // clobber any existing entry 921 if dbg('f') { 922 fmt.Fprintf(os.Stderr, "%s: package %s has fact %s\n", 923 act.pkg.Fset.Position(act.pass.Files[0].Pos()), act.pass.Pkg.Path(), fact) 924 } 925 } 926 927 func factType(fact analysis.Fact) reflect.Type { 928 t := reflect.TypeOf(fact) 929 if t.Kind() != reflect.Ptr { 930 log.Fatalf("invalid Fact type: got %T, want pointer", fact) 931 } 932 return t 933 } 934 935 // allObjectFacts implements Pass.AllObjectFacts. 936 func (act *action) allPackageFacts() []analysis.PackageFact { 937 facts := make([]analysis.PackageFact, 0, len(act.packageFacts)) 938 for k := range act.packageFacts { 939 facts = append(facts, analysis.PackageFact{k.pkg, act.packageFacts[k]}) 940 } 941 return facts 942 } 943 944 func dbg(b byte) bool { return strings.IndexByte(Debug, b) >= 0 }