github.com/aclements/go-misc@v0.0.0-20240129233631-2f6ede80790c/rtcheck/main.go (about)

     1  // Copyright 2016 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Command rtcheck performs static analysis of the Go runtime.
     6  //
     7  // Note: Currently this requires a small modification to
     8  // golang.org/x/tools/go/pointer:
     9  //
    10  //     --- a/go/pointer/intrinsics.go
    11  //     +++ b/go/pointer/intrinsics.go
    12  //     @@ -180,7 +180,6 @@ func (a *analysis) findIntrinsic(fn *ssa.Function) intrinsic {
    13  //      			// Ignore "runtime" (except SetFinalizer):
    14  //      			// it has few interesting effects on aliasing
    15  //      			// and is full of unsafe code we can't analyze.
    16  //     -			impl = ext۰NoEffect
    17  //      		}
    18  //
    19  //      		a.intrinsics[fn] = impl
    20  //
    21  // rtcheck currently implements one analysis:
    22  //
    23  // Deadlock detection
    24  //
    25  // Static deadlock detection constructs a lock graph and reports
    26  // cycles in that lock graph. These cycles indicate code paths with
    27  // the potential for deadlock.
    28  //
    29  // The report from the deadlock detector indicates all discovered
    30  // cycles in the lock graph and, for each edge L1 -> L2, shows the
    31  // code paths that acquire L2 while holding L1. In the simplest case
    32  // where L1 and L2 are the same lock, this cycle represents a
    33  // potential for self-deadlock within a single thread. More generally,
    34  // it means that if all of the code paths in the cycle execute
    35  // concurrently, the system may deadlock. If one of the edges in a
    36  // cycle is represented by significantly fewer code paths than the
    37  // other edges, fixing those code paths is likely the easiest way to
    38  // fix the deadlock.
    39  //
    40  // This uses an inter-procedural, path-sensitive, and partially
    41  // value-sensitive analysis based on Engler and Ashcroft, "RacerX:
    42  // Effective, static detection of race conditions and deadlocks", SOSP
    43  // 2003. It works by exploring possible code paths and finding paths
    44  // on which two or more locks are held simultaneously. Any such path
    45  // produces one or more edges in the lock graph indicating the order
    46  // in which those locks were acquired.
    47  //
    48  // Like many static analyses, this has limitations. First, it doesn't
    49  // reason about individual locks, but about lock *classes*, which are
    50  // modeled as sets of locks that may alias each other. As a result, if
    51  // the code acquires multiple locks from the same lock class
    52  // simultaneously (such as locks from different instances of the same
    53  // structure), but is careful to ensure a consistent order between
    54  // those locks at runtime (e.g., by sorting them), this analysis will
    55  // consider that a potential deadlock, even though it will not
    56  // deadlock at runtime.
    57  //
    58  // Second, it may explore code paths that are impossible at runtime.
    59  // The analysis performs very simple intra-procedural value
    60  // propagation to eliminate obviously impossible code paths, but this
    61  // is easily fooled. Consider
    62  //
    63  //     if complex condition 1 {
    64  //         lock(&x)
    65  //     }
    66  //     ...
    67  //     if complex condition 2 {
    68  //         unlock(&x)
    69  //     }
    70  //
    71  // where complex conditions 1 and 2 are equivalent, but beyond the
    72  // reach of the simple value propagation. The analysis will see *four*
    73  // distinct code paths here, rather than the two that are actually
    74  // possible, and think that x can still be held after the second if.
    75  // Similarly,
    76  //
    77  //     lock(&x)
    78  //     ensure !c
    79  //     ...
    80  //     if c {
    81  //         lock(&x)
    82  //     }
    83  //
    84  // If c can't be deduced by value propagation, this will appear as a
    85  // potential self-deadlock. Of course, if it requires complex dynamic
    86  // reasoning to show that a deadlock cannot occur at runtime, it may
    87  // be a good idea to simplify the code anyway.
    88  package main
    89  
    90  import (
    91  	"bytes"
    92  	"flag"
    93  	"fmt"
    94  	"go/ast"
    95  	"go/build"
    96  	"go/constant"
    97  	"go/parser"
    98  	"go/printer"
    99  	"go/token"
   100  	"go/types"
   101  	"io"
   102  	"log"
   103  	"math/big"
   104  	"os"
   105  	"path/filepath"
   106  	"reflect"
   107  	"runtime"
   108  	"strings"
   109  
   110  	"golang.org/x/tools/go/buildutil"
   111  	"golang.org/x/tools/go/callgraph"
   112  	"golang.org/x/tools/go/loader"
   113  	"golang.org/x/tools/go/pointer"
   114  	"golang.org/x/tools/go/ssa"
   115  	"golang.org/x/tools/go/ssa/ssautil"
   116  )
   117  
   118  // debugFunctions is a set of functions to enable extra debugging
   119  // tracing for. Each function in debugFunctions will generate a dot
   120  // file containing the block exploration graph of that function.
   121  var debugFunctions = map[string]bool{}
   122  
   123  func main() {
   124  	var (
   125  		outLockGraph string
   126  		outCallGraph string
   127  		outHTML      string
   128  		debugFuncs   string
   129  	)
   130  	flag.StringVar(&outLockGraph, "lockgraph", "", "write lock graph in dot to `file`")
   131  	flag.StringVar(&outCallGraph, "callgraph", "", "write call graph in dot to `file`")
   132  	flag.StringVar(&outHTML, "html", "", "write HTML deadlock report to `file`")
   133  	flag.StringVar(&debugFuncs, "debugfuncs", "", "write debug graphs for `funcs` (comma-separated list)")
   134  	flag.Parse()
   135  	if flag.NArg() > 0 {
   136  		flag.Usage()
   137  		os.Exit(2)
   138  	}
   139  	for _, name := range strings.Split(debugFuncs, ",") {
   140  		debugFunctions[name] = true
   141  	}
   142  
   143  	roots := getDefaultRoots()
   144  
   145  	var conf loader.Config
   146  
   147  	// TODO: Check all reasonable arch/OS combos.
   148  
   149  	// TODO: This would be so much easier and nicer if I could
   150  	// just plug (path, AST)s into the loader, or at least slip in
   151  	// between when the loader has parsed everything and when it
   152  	// type-checks everything. Currently it's only possible to
   153  	// provide ASTs for non-importable packages to the
   154  	// loader.Config.
   155  
   156  	newSources := make(map[string][]byte)
   157  	for _, pkgName := range []string{"runtime", "runtime/internal/atomic"} {
   158  		buildPkg, err := build.Import(pkgName, "", 0)
   159  		if err != nil {
   160  			log.Fatal(err)
   161  		}
   162  		var pkgRoots []string
   163  		if pkgName == "runtime" {
   164  			pkgRoots = roots
   165  		}
   166  		rewriteSources(buildPkg, pkgRoots, newSources)
   167  	}
   168  
   169  	ctxt := &build.Default
   170  	ctxt = buildutil.OverlayContext(ctxt, newSources)
   171  
   172  	conf.Build = ctxt
   173  	conf.Import("runtime")
   174  
   175  	lprog, err := conf.Load()
   176  	if err != nil {
   177  		log.Fatal("loading runtime: ", err)
   178  	}
   179  	fset := lprog.Fset
   180  
   181  	prog := ssautil.CreateProgram(lprog, 0)
   182  	prog.Build()
   183  	runtimePkg := prog.ImportedPackage("runtime")
   184  	lookupMembers(runtimePkg, runtimeFns)
   185  
   186  	// TODO: Teach it that you can jump to sigprof at any point?
   187  	//
   188  	// TODO: Teach it about implicit write barriers?
   189  
   190  	// Prepare for pointer analysis.
   191  	ptrConfig := pointer.Config{
   192  		Mains:          []*ssa.Package{runtimePkg},
   193  		BuildCallGraph: true,
   194  		//Log:            os.Stderr,
   195  	}
   196  
   197  	// Run pointer analysis.
   198  	pta, err := pointer.Analyze(&ptrConfig)
   199  	if err != nil {
   200  		log.Fatal(err)
   201  	}
   202  	cg := pta.CallGraph
   203  
   204  	cg.DeleteSyntheticNodes() // ?
   205  
   206  	// Output call graph if requested.
   207  	if outCallGraph != "" {
   208  		withWriter(outCallGraph, func(w io.Writer) {
   209  			type edge struct{ a, b *callgraph.Node }
   210  			have := make(map[edge]struct{})
   211  			fmt.Fprintln(w, "digraph callgraph {")
   212  			callgraph.GraphVisitEdges(pta.CallGraph, func(e *callgraph.Edge) error {
   213  				if _, ok := have[edge{e.Caller, e.Callee}]; ok {
   214  					return nil
   215  				}
   216  				have[edge{e.Caller, e.Callee}] = struct{}{}
   217  				fmt.Fprintf(w, "%q -> %q;\n", e.Caller.Func, e.Callee.Func)
   218  				return nil
   219  			})
   220  			fmt.Fprintln(w, "}")
   221  		})
   222  	}
   223  
   224  	s := state{
   225  		fset: fset,
   226  		cg:   cg,
   227  		pta:  pta,
   228  		fns:  make(map[*ssa.Function]*funcInfo),
   229  
   230  		lockOrder: NewLockOrder(fset),
   231  
   232  		roots:   nil,
   233  		rootSet: make(map[*ssa.Function]struct{}),
   234  	}
   235  	s.gscanLock = s.lca.NewLockClass("_Gscan", false)
   236  
   237  	// Create heap objects we care about.
   238  	//
   239  	// TODO: Also track m.preemptoff.
   240  	s.heap.curG = NewHeapObject("curG")
   241  	userG := NewHeapObject("userG")
   242  	userG_m := NewHeapObject("userG.m")
   243  	s.heap.g0 = NewHeapObject("g0")
   244  	g0_m := NewHeapObject("g0.m")
   245  	s.heap.curM = NewHeapObject("curM")
   246  	curM_g0 := NewHeapObject("curM.g0")
   247  	curM_curg := NewHeapObject("curM.curg")
   248  	s.heap.curM_locks = NewHeapObject("curM.locks")
   249  	curM_printlock := NewHeapObject("curM.printlock")
   250  
   251  	// Add roots to state.
   252  	for _, name := range roots {
   253  		m, ok := runtimePkg.Members[name].(*ssa.Function)
   254  		if !ok {
   255  			log.Fatalf("unknown root: %s", name)
   256  		}
   257  		s.addRoot(m)
   258  	}
   259  
   260  	// Analyze each root. Analysis may add more roots.
   261  	for i := 0; i < len(s.roots); i++ {
   262  		root := s.roots[i]
   263  
   264  		// Create initial heap state for entering from user space.
   265  		var vs ValState
   266  		vs = vs.ExtendHeap(s.heap.curG, DynHeapPtr{userG})
   267  		vs = vs.ExtendHeap(userG, DynStruct{"m": userG_m})
   268  		vs = vs.ExtendHeap(userG_m, DynHeapPtr{s.heap.curM})
   269  		vs = vs.ExtendHeap(s.heap.g0, DynStruct{"m": g0_m})
   270  		vs = vs.ExtendHeap(g0_m, DynHeapPtr{s.heap.curM})
   271  		vs = vs.ExtendHeap(s.heap.curM, DynStruct{"curg": curM_curg, "g0": curM_g0, "locks": s.heap.curM_locks, "printlock": curM_printlock})
   272  		vs = vs.ExtendHeap(curM_g0, DynHeapPtr{s.heap.g0})
   273  		// Initially we're on the user stack.
   274  		vs = vs.ExtendHeap(curM_curg, DynHeapPtr{userG})
   275  		// And hold no locks.
   276  		vs = vs.ExtendHeap(s.heap.curM_locks, DynConst{constant.MakeInt64(0)})
   277  		vs = vs.ExtendHeap(curM_printlock, DynConst{constant.MakeInt64(0)})
   278  
   279  		// Create the initial PathState.
   280  		ps := PathState{
   281  			lockSet: NewLockSet(),
   282  			vs:      vs,
   283  		}
   284  
   285  		// Walk the function.
   286  		exitStates := s.walkFunction(root, ps)
   287  
   288  		// Warn if any locks are held at return.
   289  		exitStates.ForEach(func(ps PathState) {
   290  			if len(ps.lockSet.stacks) == 0 {
   291  				return
   292  			}
   293  			s.warnl(root.Pos(), "locks at return from root %s: %s", root, ps.lockSet)
   294  			s.warnl(root.Pos(), "\t(likely analysis failed to match control flow for unlock)")
   295  		})
   296  	}
   297  
   298  	// Dump debug trees.
   299  	if s.debugTree != nil {
   300  		withWriter("debug-functions.dot", s.debugTree.WriteToDot)
   301  	}
   302  	for fn, fInfo := range s.fns {
   303  		if fInfo.debugTree == nil {
   304  			continue
   305  		}
   306  		withWriter(fmt.Sprintf("debug-%s.dot", fn), fInfo.debugTree.WriteToDot)
   307  	}
   308  
   309  	// Output lock graph.
   310  	if outLockGraph != "" {
   311  		withWriter(outLockGraph, s.lockOrder.WriteToDot)
   312  	}
   313  
   314  	// Output HTML report.
   315  	if outHTML != "" {
   316  		withWriter(outHTML, s.lockOrder.WriteToHTML)
   317  	}
   318  
   319  	// Output text lock cycle report.
   320  	fmt.Println()
   321  	fmt.Print("roots:")
   322  	for _, fn := range s.roots {
   323  		fmt.Printf(" %s", fn)
   324  	}
   325  	fmt.Print("\n")
   326  	fmt.Printf("number of lock cycles: %d\n\n", len(s.lockOrder.FindCycles()))
   327  	s.lockOrder.Check(os.Stdout)
   328  }
   329  
   330  // withWriter creates path and calls f with the file.
   331  func withWriter(path string, f func(w io.Writer)) {
   332  	file, err := os.Create(path)
   333  	if err != nil {
   334  		log.Fatal(err)
   335  	}
   336  	defer func() {
   337  		if err := file.Close(); err != nil {
   338  			log.Fatal(err)
   339  		}
   340  	}()
   341  	f(file)
   342  }
   343  
   344  // getDefaultRoots returns a list of functions in the runtime package
   345  // to use as roots.
   346  //
   347  // It parses $GOROOT/src/cmd/compile/internal/gc/builtin/runtime.go to
   348  // get this list, since these are the functions the compiler can
   349  // generate calls to.
   350  func getDefaultRoots() []string {
   351  	path := filepath.Join(runtime.GOROOT(), "src/cmd/compile/internal/gc/builtin/runtime.go")
   352  	fset := token.NewFileSet()
   353  	f, err := parser.ParseFile(fset, path, nil, 0)
   354  	if err != nil {
   355  		log.Fatalf("%s: %s", path, err)
   356  	}
   357  
   358  	var roots []string
   359  	for _, decl := range f.Decls {
   360  		decl, ok := decl.(*ast.FuncDecl)
   361  		if !ok {
   362  			continue
   363  		}
   364  		switch decl.Name.Name {
   365  		case "cmpstring", "eqstring",
   366  			"int64div", "uint64div", "int64mod", "uint64mod",
   367  			"float64toint64", "float64touint64",
   368  			"int64tofloat64", "uint64tofloat64",
   369  			// Go 1.8:
   370  			"float64touint32", "uint32tofloat64":
   371  			// These are declared only in assembly.
   372  			continue
   373  		}
   374  		if strings.HasPrefix(decl.Name.Name, "race") {
   375  			// These functions are declared by runtime.go,
   376  			// but only exist in race mode.
   377  			continue
   378  		}
   379  		roots = append(roots, decl.Name.Name)
   380  	}
   381  	return roots
   382  }
   383  
   384  // rewriteSources rewrites all of the Go files in pkg to eliminate
   385  // runtime-isms, make them easier for go/ssa to process, to add stubs
   386  // for internal functions, and to generate init-time calls to analysis
   387  // root functions. It fills rewritten with path -> new source
   388  // mappings.
   389  func rewriteSources(pkg *build.Package, roots []string, rewritten map[string][]byte) {
   390  	rootSet := make(map[string]struct{})
   391  	for _, root := range roots {
   392  		rootSet[root] = struct{}{}
   393  	}
   394  
   395  	for _, fname := range pkg.GoFiles {
   396  		path := filepath.Join(pkg.Dir, fname)
   397  
   398  		// Parse source.
   399  		fset := token.NewFileSet()
   400  		f, err := parser.ParseFile(fset, path, nil, parser.ParseComments)
   401  		if err != nil {
   402  			log.Fatalf("%s: %s", path, err)
   403  		}
   404  
   405  		isNosplit := map[ast.Decl]bool{}
   406  		rewriteStubs(f, isNosplit)
   407  		if pkg.Name == "runtime" {
   408  			addRootCalls(f, rootSet)
   409  			rewriteRuntime(f, isNosplit)
   410  		}
   411  
   412  		// Back to source.
   413  		var buf bytes.Buffer
   414  		if err := (&printer.Config{Mode: printer.SourcePos, Tabwidth: 8}).Fprint(&buf, fset, f); err != nil {
   415  			log.Fatalf("outputting replacement %s: %s", path, err)
   416  		}
   417  
   418  		if pkg.Name == "runtime" && fname == "stubs.go" {
   419  			// Declare functions used during rewriting.
   420  			buf.Write([]byte(`
   421  // systemstack is transformed into a call to presystemstack, then
   422  // the operation, then postsystemstack. These functions are handled
   423  // specially.
   424  func rtcheck۰presystemstack() *g { return nil }
   425  func rtcheck۰postsystemstack(*g) { }
   426  `))
   427  		}
   428  
   429  		rewritten[path] = buf.Bytes()
   430  	}
   431  
   432  	// Check that we found all of the roots.
   433  	if len(rootSet) > 0 {
   434  		fmt.Fprintf(os.Stderr, "unknown roots:")
   435  		for root := range rootSet {
   436  			fmt.Fprintf(os.Stderr, " %s", root)
   437  		}
   438  		fmt.Fprintf(os.Stderr, "\n")
   439  		os.Exit(1)
   440  	}
   441  }
   442  
   443  var newStubs = make(map[string]map[string]*ast.FuncDecl)
   444  
   445  func init() {
   446  	// TODO: Perhaps I should do most of these as "special"
   447  	// functions, and do the few that affect pointers (like
   448  	// noescape) as call rewrites.
   449  
   450  	// Stubs provide implementations for assembly functions that
   451  	// are not declared in the Go source code. All of these are
   452  	// automatically marked go:nosplit.
   453  	var runtimeStubs = `
   454  package runtime
   455  
   456  // stubs.go
   457  // getg is handled specially.
   458  // mcall and systemstack are eliminated during rewriting.
   459  func memclr() { }
   460  func memmove() { }
   461  func fastrand1() uint32 { return 0 }
   462  func memequal() bool { return false }
   463  func noescape(p unsafe.Pointer) unsafe.Pointer { return p }
   464  func cgocallback() { }
   465  func gogo() { for { } }
   466  func gosave() { }
   467  func mincore() int32 { return 0 }
   468  func jmpdefer() { for { } }
   469  func exit1() { for { } }
   470  func setg() { }
   471  func breakpoint() { }
   472  func reflectcall() { }
   473  func procyield() { }
   474  func cgocallback_gofunc() { }
   475  func publicationBarrier() { }
   476  func setcallerpc() { }
   477  func getcallerpc() uintptr { return 0 }
   478  func getcallersp() uintptr { return 0 }
   479  func asmcgocall() int32 { return 0 }
   480  // morestack is handled specially.
   481  func time_now() (int64, int32) { return 0, 0 }
   482  
   483  // os_linux.go
   484  func futex() int32 { return 0 }
   485  func clone() int32 { return 0 }
   486  func gettid() uint32 { return 0 }
   487  func sigreturn() { for { } }
   488  func rt_sigaction() int32 { return 0 }
   489  func sigaltstack() { }
   490  func setitimer() { }
   491  func rtsigprocmask() { }
   492  func getrlimit() int32 { return 0 }
   493  func raise() { for { } }
   494  func raiseproc() { for { } }
   495  func sched_getaffinity() int32 { return 0 }
   496  func osyield() { }
   497  
   498  // stubs2.go
   499  func read() { return 0 }
   500  func closefd() { return 0 }
   501  func exit() { for {} }
   502  func nanotime() { return 0 }
   503  func usleep() {}
   504  func munmap() {}
   505  func write() int32 { return 0 }
   506  func open() int32 { return 0 }
   507  func madvise() {}
   508  
   509  // cputicks.go
   510  func cputicks() { return 0 }
   511  
   512  // cgo_mmap.go
   513  func sysMmap() unsafe.Pointer { return nil }
   514  func callCgoMmap() uintptr { return 0 }
   515  
   516  // alg.go
   517  func aeshash(p unsafe.Pointer, h, s uintptr) uintptr { return 0 }
   518  func aeshash32(p unsafe.Pointer, h uintptr) uintptr { return 0 }
   519  func aeshash64(p unsafe.Pointer, h uintptr) uintptr { return 0 }
   520  func aeshashstr(p unsafe.Pointer, h uintptr) uintptr { return 0 }
   521  
   522  // netpoll_epoll.go
   523  func epollcreate(size int32) int32 { return 0 }
   524  func epollcreate1(flags int32) int32 { return 0 }
   525  func epollctl(epfd, op, fd int32, ev *epollevent) int32 { return 0 }
   526  func epollwait(epfd int32, ev *epollevent, nev, timeout int32) int32 { return 0 }
   527  func closeonexec(fd int32) {}
   528  `
   529  	var atomicStubs = `
   530  package atomic
   531  
   532  // stubs.go
   533  func Cas(ptr *uint32, old, new uint32) bool {
   534  	if *ptr == old { *ptr = new; return true }
   535  	return false
   536  }
   537  func Casp1(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool {
   538  	if *ptr == old { *ptr = new; return true }
   539  	return false
   540  }
   541  func Casuintptr(ptr *uintptr, old, new uintptr) bool {
   542  	if *ptr == old { *ptr = new; return true }
   543  	return false
   544  }
   545  func Storeuintptr(ptr *uintptr, new uintptr) { *ptr = new }
   546  func Loaduintptr(ptr *uintptr) uintptr { return *ptr }
   547  func Loaduint(ptr *uint) uint { return *ptr }
   548  func Loadint64(ptr *int64) int64 { return *ptr }
   549  func Xaddint64(ptr *int64, delta int64) int64 {
   550  	*ptr += delta
   551  	return *ptr
   552  }
   553  
   554  // atomic_*.go
   555  func Load(ptr *uint32) uint32 { return *ptr }
   556  func Loadp(ptr unsafe.Pointer) unsafe.Pointer { return *(*unsafe.Pointer)(ptr) }
   557  func Load64(ptr *uint64) uint64 { return *ptr }
   558  func Xadd(ptr *uint32, delta int32) uint32 {
   559  	*ptr += uint32(delta)
   560  	return *ptr
   561  }
   562  func Xadd64(ptr *uint64, delta int64) uint64 {
   563  	*ptr += uint64(delta)
   564  	return *ptr
   565  }
   566  func Xadduintptr(ptr *uintptr, delta uintptr) uintptr {
   567  	*ptr += delta
   568  	return *ptr
   569  }
   570  func Xchg(ptr *uint32, new uint32) uint32 {
   571  	old := *ptr
   572  	*ptr = new
   573  	return old
   574  }
   575  func Xchg64(ptr *uint64, new uint64) uint64 {
   576  	old := *ptr
   577  	*ptr = new
   578  	return old
   579  }
   580  func Xchguintptr(ptr *uintptr, new uintptr) uintptr {
   581  	old := *ptr
   582  	*ptr = new
   583  	return old
   584  }
   585  func And8(ptr *uint8, val uint8) { *ptr &= val }
   586  func Or8(ptr *uint8, val uint8) { *ptr |= val }
   587  func Cas64(ptr *uint64, old, new uint64) bool {
   588  	if *ptr == old { *ptr = new; return true }
   589  	return false
   590  }
   591  func Store(ptr *uint32, val uint32) { *ptr = val }
   592  func Store64(ptr *uint64, val uint64) { *ptr = val }
   593  func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer) {
   594  	*(*unsafe.Pointer)(ptr) = val
   595  }
   596  `
   597  
   598  	for _, stubs := range []string{runtimeStubs, atomicStubs} {
   599  		f, err := parser.ParseFile(token.NewFileSet(), "<newStubs>", stubs, 0)
   600  		if err != nil {
   601  			log.Fatal("parsing replacement stubs: ", err)
   602  		}
   603  
   604  		// Strip token.Pos information from stubs. It confuses
   605  		// the printer, which winds up producing invalid Go code.
   606  		ast.Inspect(f, func(n ast.Node) bool {
   607  			if n == nil {
   608  				return true
   609  			}
   610  			rn := reflect.ValueOf(n).Elem()
   611  			for i := 0; i < rn.NumField(); i++ {
   612  				f := rn.Field(i)
   613  				if _, ok := f.Interface().(token.Pos); ok {
   614  					f.Set(reflect.Zero(f.Type()))
   615  				}
   616  			}
   617  			return true
   618  		})
   619  
   620  		newMap := make(map[string]*ast.FuncDecl)
   621  		for _, decl := range f.Decls {
   622  			newMap[decl.(*ast.FuncDecl).Name.Name] = decl.(*ast.FuncDecl)
   623  		}
   624  		newStubs[f.Name.Name] = newMap
   625  	}
   626  }
   627  
   628  func rewriteStubs(f *ast.File, isNosplit map[ast.Decl]bool) {
   629  	// Replace declaration bodies.
   630  	for _, decl := range f.Decls {
   631  		switch decl := decl.(type) {
   632  		case *ast.FuncDecl:
   633  			if decl.Body != nil {
   634  				continue
   635  			}
   636  			newDecl, ok := newStubs[f.Name.Name][decl.Name.Name]
   637  			if !ok {
   638  				continue
   639  			}
   640  			decl.Body = newDecl.Body
   641  			isNosplit[decl] = true
   642  		}
   643  	}
   644  }
   645  
   646  func addRootCalls(f *ast.File, rootSet map[string]struct{}) {
   647  	var body []ast.Stmt
   648  	for _, decl := range f.Decls {
   649  		decl, ok := decl.(*ast.FuncDecl)
   650  		if !ok || decl.Recv != nil {
   651  			continue
   652  		}
   653  		if _, ok := rootSet[decl.Name.Name]; !ok {
   654  			continue
   655  		}
   656  		delete(rootSet, decl.Name.Name)
   657  
   658  		// Construct a valid call.
   659  		args := []ast.Expr{}
   660  		for _, aspec := range decl.Type.Params.List {
   661  			n := len(aspec.Names)
   662  			if aspec.Names == nil {
   663  				n = 1
   664  			}
   665  			for i := 0; i < n; i++ {
   666  				switch atype := aspec.Type.(type) {
   667  				case *ast.ChanType, *ast.FuncType,
   668  					*ast.InterfaceType, *ast.MapType,
   669  					*ast.StarExpr:
   670  					args = append(args, &ast.Ident{Name: "nil"})
   671  				case *ast.StructType:
   672  					log.Fatal("not implemented: struct args")
   673  				case *ast.ArrayType, *ast.Ident, *ast.SelectorExpr:
   674  					name := fmt.Sprintf("x%d", len(body))
   675  					adecl := &ast.DeclStmt{
   676  						&ast.GenDecl{
   677  							Tok: token.VAR,
   678  							Specs: []ast.Spec{
   679  								&ast.ValueSpec{
   680  									Names: []*ast.Ident{{Name: name}},
   681  									Type:  atype,
   682  								},
   683  							},
   684  						},
   685  					}
   686  					body = append(body, adecl)
   687  					args = append(args, &ast.Ident{Name: name})
   688  				default:
   689  					log.Fatalf("unexpected function argument type: %s", aspec)
   690  				}
   691  			}
   692  		}
   693  		body = append(body, &ast.ExprStmt{&ast.CallExpr{
   694  			Fun:  &ast.Ident{Name: decl.Name.Name},
   695  			Args: args,
   696  		}})
   697  	}
   698  	if len(body) > 0 {
   699  		f.Decls = append(f.Decls,
   700  			&ast.FuncDecl{
   701  				Name: &ast.Ident{Name: "init"},
   702  				Type: &ast.FuncType{Params: &ast.FieldList{}},
   703  				Body: &ast.BlockStmt{List: body},
   704  			})
   705  	}
   706  }
   707  
   708  func rewriteRuntime(f *ast.File, isNosplit map[ast.Decl]bool) {
   709  	// Attach go:nosplit directives to top-level declarations. We
   710  	// have to do this before the Rewrite walk because go/ast
   711  	// drops comments separated by newlines from the AST, leaving
   712  	// them only in File.Comments. But to agree with the
   713  	// compiler's interpretation of these comments, we need all of
   714  	// the comments.
   715  	cgs := f.Comments
   716  	for _, decl := range f.Decls {
   717  		// Process comments before decl.
   718  		for len(cgs) > 0 && cgs[0].Pos() < decl.Pos() {
   719  			for _, c := range cgs[0].List {
   720  				if c.Text == "//go:nosplit" {
   721  					isNosplit[decl] = true
   722  				}
   723  			}
   724  			cgs = cgs[1:]
   725  		}
   726  		// Ignore comments in decl.
   727  		for len(cgs) > 0 && cgs[0].Pos() < decl.End() {
   728  			cgs = cgs[1:]
   729  		}
   730  	}
   731  
   732  	// TODO: Do identifier resolution so I know I'm actually
   733  	// getting the runtime globals.
   734  	id := func(name string) *ast.Ident {
   735  		return &ast.Ident{Name: name}
   736  	}
   737  	Rewrite(func(node ast.Node) ast.Node {
   738  		switch node := node.(type) {
   739  		case *ast.CallExpr:
   740  			id, ok := node.Fun.(*ast.Ident)
   741  			if !ok {
   742  				break
   743  			}
   744  			switch id.Name {
   745  			case "systemstack":
   746  				log.Fatal("systemstack not at statement level")
   747  			case "mcall":
   748  				// mcall(f) -> f(nil)
   749  				return &ast.CallExpr{Fun: node.Args[0], Args: []ast.Expr{&ast.Ident{Name: "nil"}}}
   750  			case "gopark":
   751  				if cb, ok := node.Args[0].(*ast.Ident); ok && cb.Name == "nil" {
   752  					break
   753  				}
   754  				// gopark(fn, arg, ...) -> fn(nil, arg)
   755  				return &ast.CallExpr{
   756  					Fun: node.Args[0],
   757  					Args: []ast.Expr{
   758  						&ast.Ident{Name: "nil"},
   759  						node.Args[1],
   760  					},
   761  				}
   762  			case "goparkunlock":
   763  				// goparkunlock(x, ...) -> unlock(x)
   764  				return &ast.CallExpr{
   765  					Fun:  &ast.Ident{Name: "unlock"},
   766  					Args: []ast.Expr{node.Args[0]},
   767  				}
   768  			}
   769  
   770  		case *ast.ExprStmt:
   771  			// Rewrite:
   772  			//   systemstack(f) -> {g := presystemstack(); f(); postsystemstack(g) }
   773  			//   systemstack(func() { x }) -> {g := presystemstack(); x; postsystemstack(g) }
   774  			expr, ok := node.X.(*ast.CallExpr)
   775  			if !ok {
   776  				break
   777  			}
   778  			fnid, ok := expr.Fun.(*ast.Ident)
   779  			if !ok || fnid.Name != "systemstack" {
   780  				break
   781  			}
   782  			var x ast.Stmt
   783  			if arg, ok := expr.Args[0].(*ast.FuncLit); ok {
   784  				x = arg.Body
   785  			} else {
   786  				x = &ast.ExprStmt{&ast.CallExpr{Fun: expr.Args[0]}}
   787  			}
   788  			pre := &ast.AssignStmt{
   789  				Lhs: []ast.Expr{id("rtcheck۰g")},
   790  				Tok: token.DEFINE,
   791  				Rhs: []ast.Expr{&ast.CallExpr{Fun: id("rtcheck۰presystemstack")}},
   792  			}
   793  			post := &ast.ExprStmt{&ast.CallExpr{Fun: id("rtcheck۰postsystemstack"), Args: []ast.Expr{id("rtcheck۰g")}}}
   794  			return &ast.BlockStmt{List: []ast.Stmt{pre, x, post}}
   795  
   796  		case *ast.FuncDecl:
   797  			// TODO: Some functions are just too hairy for
   798  			// the analysis right now.
   799  			switch node.Name.Name {
   800  			case "throw":
   801  				node.Body = &ast.BlockStmt{
   802  					List: []ast.Stmt{
   803  						&ast.ForStmt{
   804  							Body: &ast.BlockStmt{},
   805  						},
   806  					},
   807  				}
   808  
   809  			case "traceEvent", "cgoContextPCs", "callCgoSymbolizer":
   810  				// TODO: If we handle traceEvent, we
   811  				// still can't handle inter-procedural
   812  				// correlated control flow between
   813  				// traceAcquireBuffer and
   814  				// traceReleaseBuffer, so hard-code
   815  				// that traceReleaseBuffer releases
   816  				// runtime.trace.bufLock.
   817  				//
   818  				// TODO: A bunch of false positives
   819  				// come from callCgoSymbolizer and
   820  				// cgoContextPCs, which dynamically
   821  				// call either cgocall or asmcgocall
   822  				// depending on whether we're on the
   823  				// system stack. We don't flow enough
   824  				// information through to tell, so we
   825  				// assume it can always call cgocall,
   826  				// which leads to all sorts of bad
   827  				// lock edges.
   828  				node.Body = &ast.BlockStmt{}
   829  			}
   830  
   831  			// Insert morestack() prologue.
   832  			//
   833  			// TODO: This only happens in the runtime
   834  			// package right now. It should happen in all
   835  			// packages.
   836  			if node.Body == nil || len(node.Body.List) == 0 || isNosplit[node] {
   837  				break
   838  			}
   839  			call := &ast.ExprStmt{&ast.CallExpr{Fun: id("morestack"), Args: []ast.Expr{}, Lparen: node.Body.Pos()}}
   840  			node.Body.List = append([]ast.Stmt{call}, node.Body.List...)
   841  		}
   842  		return node
   843  	}, f)
   844  }
   845  
   846  var fns struct {
   847  	// Locking functions.
   848  	lock, unlock *ssa.Function
   849  
   850  	// Allocation functions.
   851  	newobject, newarray, makemap, makechan *ssa.Function
   852  
   853  	// Slice functions.
   854  	growslice, slicecopy, slicestringcopy *ssa.Function
   855  
   856  	// Map functions.
   857  	mapaccess1, mapaccess2, mapassign1, mapassign, mapdelete *ssa.Function
   858  
   859  	// Channel functions.
   860  	chansend1, closechan *ssa.Function
   861  
   862  	// Misc.
   863  	gopanic *ssa.Function
   864  }
   865  
   866  var runtimeFns = map[string]interface{}{
   867  	"lock": &fns.lock, "unlock": &fns.unlock,
   868  	"newobject": &fns.newobject, "newarray": &fns.newarray,
   869  	"makemap": &fns.makemap, "makechan": &fns.makechan,
   870  	"growslice": &fns.growslice, "slicecopy": &fns.slicecopy,
   871  	"slicestringcopy": &fns.slicestringcopy,
   872  	"mapaccess1":      &fns.mapaccess1, "mapaccess2": &fns.mapaccess2,
   873  	//"mapassign1": &fns.mapassign1, // Pre-1.8
   874  	"mapassign": &fns.mapassign, // Go 1.8
   875  	"mapdelete": &fns.mapdelete,
   876  	"chansend1": &fns.chansend1, "closechan": &fns.closechan,
   877  	"gopanic": &fns.gopanic,
   878  }
   879  
   880  func lookupMembers(pkg *ssa.Package, out map[string]interface{}) {
   881  	for name, ptr := range out {
   882  		member, ok := pkg.Members[name]
   883  		if !ok {
   884  			log.Fatalf("%s.%s not found", pkg, name)
   885  		}
   886  		reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(member))
   887  	}
   888  }
   889  
   890  // StringSpace interns strings into small integers.
   891  type StringSpace struct {
   892  	m map[string]int
   893  	s []string
   894  }
   895  
   896  // NewStringSpace returns a new, empty StringSpace.
   897  func NewStringSpace() *StringSpace {
   898  	return &StringSpace{m: make(map[string]int)}
   899  }
   900  
   901  // Intern turns str into a small integer where Intern(x) == Intern(y)
   902  // iff x == y.
   903  func (sp *StringSpace) Intern(str string) int {
   904  	if id, ok := sp.m[str]; ok {
   905  		return id
   906  	}
   907  	id := len(sp.s)
   908  	sp.s = append(sp.s, str)
   909  	sp.m[str] = id
   910  	return id
   911  }
   912  
   913  // TryIntern interns str if it has been interned before. Otherwise, it
   914  // does not intern the string and returns 0, false.
   915  func (sp *StringSpace) TryIntern(str string) (int, bool) {
   916  	id, ok := sp.m[str]
   917  	return id, ok
   918  }
   919  
   920  // LockSet represents a set of locks and where they were acquired.
   921  type LockSet struct {
   922  	lca    *LockClassAnalysis
   923  	bits   big.Int
   924  	stacks map[int]*StackFrame
   925  }
   926  
   927  type LockSetKey string
   928  
   929  func NewLockSet() *LockSet {
   930  	return &LockSet{}
   931  }
   932  
   933  func (set *LockSet) clone() *LockSet {
   934  	out := &LockSet{lca: set.lca, stacks: map[int]*StackFrame{}}
   935  	out.bits.Set(&set.bits)
   936  	for k, v := range set.stacks {
   937  		out.stacks[k] = v
   938  	}
   939  	return out
   940  }
   941  
   942  func (set *LockSet) withLCA(lca *LockClassAnalysis) *LockSet {
   943  	if set.lca == nil {
   944  		set.lca = lca
   945  	} else if set.lca != lca {
   946  		panic("cannot mix locks from different LockClassAnalyses")
   947  	}
   948  	return set
   949  }
   950  
   951  // Key returns a string such that two LockSet's Keys are == iff both
   952  // LockSets have the same locks acquired at the same stacks.
   953  func (set *LockSet) Key() LockSetKey {
   954  	// TODO: This is complex enough now that maybe I just want a
   955  	// hash function and an equality function.
   956  	k := set.bits.Text(16)
   957  	for i := 0; i < set.bits.BitLen(); i++ {
   958  		if set.bits.Bit(i) != 0 {
   959  			k += ":"
   960  			for sf := set.stacks[i]; sf != nil; sf = sf.parent {
   961  				k += fmt.Sprintf("%v,", sf.call.Pos())
   962  			}
   963  		}
   964  	}
   965  	return LockSetKey(k)
   966  }
   967  
   968  // HashKey returns a key such that set1.Equal(set2) implies
   969  // set1.HashKey() == set2.HashKey().
   970  func (set *LockSet) HashKey() string {
   971  	return set.bits.Text(16)
   972  }
   973  
   974  // Equal returns whether set and set2 contain the same locks acquired
   975  // at the same stacks.
   976  func (set *LockSet) Equal(set2 *LockSet) bool {
   977  	if set.lca != set2.lca {
   978  		return false
   979  	}
   980  	if set.bits.Cmp(&set2.bits) != 0 {
   981  		return false
   982  	}
   983  	for k, v := range set.stacks {
   984  		if set2.stacks[k] != v {
   985  			return false
   986  		}
   987  	}
   988  	return true
   989  }
   990  
   991  // Contains returns true if set contains lock class lc.
   992  func (set *LockSet) Contains(lc *LockClass) bool {
   993  	return set.lca == lc.Analysis() && set.bits.Bit(lc.Id()) != 0
   994  }
   995  
   996  // Plus returns a LockSet that extends set with lock class lc,
   997  // acquired at stack. If lc is already in set, it does not get
   998  // re-added and Plus returns set.
   999  func (set *LockSet) Plus(lc *LockClass, stack *StackFrame) *LockSet {
  1000  	if set.bits.Bit(lc.Id()) != 0 {
  1001  		return set
  1002  	}
  1003  	out := set.clone().withLCA(lc.Analysis())
  1004  	out.bits.SetBit(&out.bits, lc.Id(), 1)
  1005  	out.stacks[lc.Id()] = stack
  1006  	return out
  1007  }
  1008  
  1009  // Union returns a LockSet that is the union of set and o. If both set
  1010  // and o contain the same lock, the stack from set is preferred.
  1011  func (set *LockSet) Union(o *LockSet) *LockSet {
  1012  	var new big.Int
  1013  	new.AndNot(&o.bits, &set.bits)
  1014  	if new.Sign() == 0 {
  1015  		// Nothing to add.
  1016  		return set
  1017  	}
  1018  
  1019  	out := set.clone().withLCA(o.lca)
  1020  	out.bits.Or(&out.bits, &o.bits)
  1021  	for k, v := range o.stacks {
  1022  		if out.stacks[k] == nil {
  1023  			out.stacks[k] = v
  1024  		}
  1025  	}
  1026  	return out
  1027  }
  1028  
  1029  // Minus returns a LockSet that is like set, but does not contain lock
  1030  // class lc.
  1031  func (set *LockSet) Minus(lc *LockClass) *LockSet {
  1032  	if set.bits.Bit(lc.Id()) == 0 {
  1033  		return set
  1034  	}
  1035  	out := set.clone().withLCA(lc.Analysis())
  1036  	out.bits.SetBit(&out.bits, lc.Id(), 0)
  1037  	delete(out.stacks, lc.Id())
  1038  	return out
  1039  }
  1040  
  1041  func (set *LockSet) String() string {
  1042  	b := []byte("{")
  1043  	first := true
  1044  	for i := 0; i < set.bits.BitLen(); i++ {
  1045  		if set.bits.Bit(i) != 0 {
  1046  			if !first {
  1047  				b = append(b, ',')
  1048  			}
  1049  			first = false
  1050  			b = append(b, set.lca.Lookup(i).String()...)
  1051  		}
  1052  	}
  1053  	return string(append(b, '}'))
  1054  }
  1055  
  1056  // A LockSetSet is a set of LockSets.
  1057  type LockSetSet struct {
  1058  	M map[LockSetKey]*LockSet
  1059  }
  1060  
  1061  func NewLockSetSet() *LockSetSet {
  1062  	return &LockSetSet{make(map[LockSetKey]*LockSet)}
  1063  }
  1064  
  1065  func (lss *LockSetSet) Add(ss *LockSet) {
  1066  	lss.M[ss.Key()] = ss
  1067  }
  1068  
  1069  func (lss *LockSetSet) Union(lss2 *LockSetSet) {
  1070  	if lss2 == nil {
  1071  		return
  1072  	}
  1073  	for k, ss := range lss2.M {
  1074  		lss.M[k] = ss
  1075  	}
  1076  }
  1077  
  1078  func (lss *LockSetSet) ToSlice() []*LockSet {
  1079  	// TODO: Make deterministic?
  1080  	slice := make([]*LockSet, 0, len(lss.M))
  1081  	for _, ss := range lss.M {
  1082  		slice = append(slice, ss)
  1083  	}
  1084  	return slice
  1085  }
  1086  
  1087  func (lss *LockSetSet) String() string {
  1088  	b := []byte("{")
  1089  	first := true
  1090  	for _, ss := range lss.M {
  1091  		if !first {
  1092  			b = append(b, ',')
  1093  		}
  1094  		first = false
  1095  		b = append(b, ss.String()...)
  1096  	}
  1097  	return string(append(b, '}'))
  1098  }
  1099  
  1100  // funcInfo contains analysis state for a single function.
  1101  type funcInfo struct {
  1102  	// exitStates is a memoization cache that maps from the enter
  1103  	// PathState of state.walkFunction to its exit *PathStateSet.
  1104  	exitStates *PathStateMap
  1105  
  1106  	// ifDeps records the set of control-flow dependencies for
  1107  	// each ssa.BasicBlock of this function. These are the values
  1108  	// at entry to each block that may affect future control flow
  1109  	// decisions.
  1110  	ifDeps []map[ssa.Value]struct{}
  1111  
  1112  	// debugTree is the block trace debug tree for this function.
  1113  	// If nil, this function is not being debug traced.
  1114  	debugTree *DebugTree
  1115  }
  1116  
  1117  // StackFrame is a stack of call sites. A nil *StackFrame represents
  1118  // an empty stack.
  1119  type StackFrame struct {
  1120  	parent *StackFrame
  1121  	call   ssa.Instruction
  1122  }
  1123  
  1124  var internedStackFrames = make(map[StackFrame]*StackFrame)
  1125  
  1126  // Flatten turns sf into a list of calls where the outer-most call is
  1127  // first.
  1128  func (sf *StackFrame) Flatten(into []ssa.Instruction) []ssa.Instruction {
  1129  	if sf == nil {
  1130  		if into == nil {
  1131  			return nil
  1132  		}
  1133  		return into[:0]
  1134  	}
  1135  	return append(sf.parent.Flatten(into), sf.call)
  1136  }
  1137  
  1138  // Extend returns a new StackFrame that extends sf with call. call is
  1139  // typically an *ssa.Call, but other instructions can invoke runtime
  1140  // function calls as well.
  1141  func (sf *StackFrame) Extend(call ssa.Instruction) *StackFrame {
  1142  	return &StackFrame{sf, call}
  1143  }
  1144  
  1145  // Intern returns a canonical *StackFrame such that a.Intern() ==
  1146  // b.Intern() iff a and b have the same sequence of calls.
  1147  func (sf *StackFrame) Intern() *StackFrame {
  1148  	if sf == nil {
  1149  		return nil
  1150  	}
  1151  	if sf, ok := internedStackFrames[*sf]; ok {
  1152  		return sf
  1153  	}
  1154  	nsf := sf.parent.Intern().Extend(sf.call)
  1155  	if nsf, ok := internedStackFrames[*nsf]; ok {
  1156  		return nsf
  1157  	}
  1158  	internedStackFrames[*nsf] = nsf
  1159  	return nsf
  1160  }
  1161  
  1162  // TrimCommonPrefix eliminates the outermost frames that sf and other
  1163  // have in common and returns their distinct suffixes.
  1164  func (sf *StackFrame) TrimCommonPrefix(other *StackFrame, minLen int) (*StackFrame, *StackFrame) {
  1165  	var buf [64]ssa.Instruction
  1166  	f1 := sf.Flatten(buf[:])
  1167  	f2 := other.Flatten(f1[len(f1):cap(f1)])
  1168  
  1169  	// Find the common prefix.
  1170  	var common int
  1171  	for common < len(f1)-minLen && common < len(f2)-minLen && f1[common] == f2[common] {
  1172  		common++
  1173  	}
  1174  
  1175  	// Reconstitute.
  1176  	if common == 0 {
  1177  		return sf, other
  1178  	}
  1179  	var nsf1, nsf2 *StackFrame
  1180  	for _, call := range f1[common:] {
  1181  		nsf1 = nsf1.Extend(call)
  1182  	}
  1183  	for _, call := range f2[common:] {
  1184  		nsf2 = nsf2.Extend(call)
  1185  	}
  1186  	return nsf1, nsf2
  1187  }
  1188  
  1189  type state struct {
  1190  	fset  *token.FileSet
  1191  	cg    *callgraph.Graph
  1192  	pta   *pointer.Result
  1193  	fns   map[*ssa.Function]*funcInfo
  1194  	stack *StackFrame
  1195  
  1196  	// heap contains handles to heap objects that are needed by
  1197  	// specially handled functions.
  1198  	heap struct {
  1199  		curG       *HeapObject
  1200  		g0         *HeapObject
  1201  		curM       *HeapObject
  1202  		curM_locks *HeapObject
  1203  	}
  1204  
  1205  	lca       LockClassAnalysis
  1206  	gscanLock *LockClass
  1207  
  1208  	lockOrder *LockOrder
  1209  
  1210  	// messages is the set of warning strings that have been
  1211  	// emitted.
  1212  	messages map[string]struct{}
  1213  
  1214  	// roots is the list of root functions to visit.
  1215  	roots   []*ssa.Function
  1216  	rootSet map[*ssa.Function]struct{}
  1217  
  1218  	// debugTree, if non-nil is the function CFG debug tree.
  1219  	debugTree *DebugTree
  1220  	// debugging indicates that we're debugging this subgraph of
  1221  	// the CFG.
  1222  	debugging bool
  1223  }
  1224  
  1225  func (s *state) warnl(pos token.Pos, format string, args ...interface{}) {
  1226  	// TODO: Have a different message for path terminating conditions.
  1227  	var msg bytes.Buffer
  1228  	if pos.IsValid() {
  1229  		fmt.Fprintf(&msg, "%s: ", s.fset.Position(pos))
  1230  	}
  1231  	fmt.Fprintf(&msg, format+"\n", args...)
  1232  	if _, ok := s.messages[msg.String()]; ok {
  1233  		return
  1234  	}
  1235  	if s.messages == nil {
  1236  		s.messages = make(map[string]struct{})
  1237  	}
  1238  	s.messages[msg.String()] = struct{}{}
  1239  	fmt.Print(msg.String())
  1240  }
  1241  
  1242  func (s *state) warnp(pos token.Pos, format string, args ...interface{}) {
  1243  	var buf bytes.Buffer
  1244  	for stack := s.stack; stack != nil; stack = stack.parent {
  1245  		fmt.Fprintf(&buf, "    %s\n", stack.call.Parent().String())
  1246  		fmt.Fprintf(&buf, "        %s\n", s.fset.Position(stack.call.Pos()))
  1247  	}
  1248  	tb := strings.TrimSuffix(buf.String(), "\n")
  1249  	args = append(args, tb)
  1250  	s.warnl(pos, format+" at\n%s", args...)
  1251  }
  1252  
  1253  // addRoot adds fn as a root of the control flow graph to visit.
  1254  func (s *state) addRoot(fn *ssa.Function) {
  1255  	if _, ok := s.rootSet[fn]; ok {
  1256  		return
  1257  	}
  1258  	s.roots = append(s.roots, fn)
  1259  	s.rootSet[fn] = struct{}{}
  1260  }
  1261  
  1262  // callees returns the set of functions that call could possibly
  1263  // invoke. It returns nil for built-in functions or if pointer
  1264  // analysis failed.
  1265  func (s *state) callees(call ssa.CallInstruction) []*ssa.Function {
  1266  	if builtin, ok := call.Common().Value.(*ssa.Builtin); ok {
  1267  		// TODO: cap, len for map and channel
  1268  		switch builtin.Name() {
  1269  		case "append":
  1270  			return []*ssa.Function{fns.growslice}
  1271  		case "close":
  1272  			return []*ssa.Function{fns.closechan}
  1273  		case "copy":
  1274  			arg0 := builtin.Type().(*types.Signature).Params().At(0).Type().Underlying()
  1275  			if b, ok := arg0.(*types.Basic); ok && b.Kind() == types.String {
  1276  				return []*ssa.Function{fns.slicestringcopy}
  1277  			}
  1278  			return []*ssa.Function{fns.slicecopy}
  1279  		case "delete":
  1280  			return []*ssa.Function{fns.mapdelete}
  1281  		}
  1282  
  1283  		// Ignore others.
  1284  		return nil
  1285  	}
  1286  
  1287  	if fn := call.Common().StaticCallee(); fn != nil {
  1288  		return []*ssa.Function{fn}
  1289  	} else if cnode := s.cg.Nodes[call.Parent()]; cnode != nil {
  1290  		var callees []*ssa.Function
  1291  		// TODO: Build an index in walkFunction?
  1292  		for _, o := range cnode.Out {
  1293  			if o.Site != call {
  1294  				continue
  1295  			}
  1296  			callees = append(callees, o.Callee.Func)
  1297  		}
  1298  		return callees
  1299  	}
  1300  
  1301  	s.warnl(call.Pos(), "no call graph for %v", call)
  1302  	return nil
  1303  }
  1304  
  1305  // walkFunction explores f, starting at the given path state. It
  1306  // returns the set of path states possible on exit from f.
  1307  //
  1308  // ps should have block and mask set to nil, and ps.vs may contain
  1309  // heap values and parameter/free variable values for this function.
  1310  // ps.vs should not contain anything else.
  1311  //
  1312  // Path states returned from walkFunction will likewise have block and
  1313  // mask set to nil and ps.vs will be restricted to just heap values.
  1314  //
  1315  // This implements the lockset algorithm from Engler and Ashcroft,
  1316  // SOSP 2003, plus simple path sensitivity to reduce mistakes from
  1317  // correlated control flow.
  1318  //
  1319  // TODO: This totally fails with multi-use higher-order functions,
  1320  // since the flow computed by the pointer analysis is not segregated
  1321  // by PathState.
  1322  //
  1323  // TODO: A lot of call trees simply don't take locks. We could record
  1324  // that fact and fast-path the entry locks to the exit locks.
  1325  func (s *state) walkFunction(f *ssa.Function, ps PathState) *PathStateSet {
  1326  	fInfo := s.fns[f]
  1327  	if fInfo == nil {
  1328  		// First visit of this function.
  1329  
  1330  		// Compute control-flow dependencies.
  1331  		//
  1332  		// TODO: Figure out which control flow decisions
  1333  		// actually affect locking and only track those. Right
  1334  		// now we hit a lot of simple increment loops that
  1335  		// cause path aborts, but don't involve any locking.
  1336  		// Find all of the branches that could lead to a
  1337  		// lock/unlock (the may-precede set) and eliminate
  1338  		// those where both directions will always lead to the
  1339  		// lock/unlock anyway (where the lock/unlock is in the
  1340  		// must-succeed set). This can be answered with the
  1341  		// post-dominator tree. This is basically the same
  1342  		// computation we need to propagate liveness over
  1343  		// control flow.
  1344  		var ifInstrs []ssa.Instruction
  1345  		for _, b := range f.Blocks {
  1346  			if len(b.Instrs) == 0 {
  1347  				continue
  1348  			}
  1349  			instr, ok := b.Instrs[len(b.Instrs)-1].(*ssa.If)
  1350  			if !ok {
  1351  				continue
  1352  			}
  1353  			ifInstrs = append(ifInstrs, instr)
  1354  		}
  1355  		ifDeps := livenessFor(f, ifInstrs)
  1356  		if debugFunctions[f.String()] {
  1357  			f.WriteTo(os.Stderr)
  1358  			fmt.Fprintf(os.Stderr, "if deps:\n")
  1359  			for bid, vals := range ifDeps {
  1360  				fmt.Fprintf(os.Stderr, "  %d: ", bid)
  1361  				for dep := range vals {
  1362  					fmt.Fprintf(os.Stderr, " %s", dep.(ssa.Value).Name())
  1363  				}
  1364  				fmt.Fprintf(os.Stderr, "\n")
  1365  			}
  1366  		}
  1367  
  1368  		fInfo = &funcInfo{
  1369  			exitStates: NewPathStateMap(),
  1370  			ifDeps:     ifDeps,
  1371  		}
  1372  		s.fns[f] = fInfo
  1373  
  1374  		if f.Blocks == nil {
  1375  			s.warnl(f.Pos(), "external function %s", f)
  1376  		}
  1377  
  1378  		if debugFunctions[f.String()] {
  1379  			fInfo.debugTree = new(DebugTree)
  1380  		}
  1381  	}
  1382  
  1383  	if f.Blocks == nil {
  1384  		// External function. Assume it doesn't affect locks
  1385  		// or heap state.
  1386  		pss1 := NewPathStateSet()
  1387  		pss1.Add(ps)
  1388  		return pss1
  1389  	}
  1390  
  1391  	if debugFunctions[f.String()] && s.debugging == false {
  1392  		// Turn on debugging of this subtree.
  1393  		if s.debugTree == nil {
  1394  			s.debugTree = new(DebugTree)
  1395  		}
  1396  		s.debugging = true
  1397  		defer func() { s.debugging = false }()
  1398  	}
  1399  
  1400  	if s.debugging {
  1401  		var buf bytes.Buffer
  1402  		fmt.Fprintf(&buf, "%s\n- enter -\n", f)
  1403  		ps.WriteTo(&buf)
  1404  		s.debugTree.Push(buf.String())
  1405  		defer s.debugTree.Pop()
  1406  	}
  1407  
  1408  	// Check memoization cache.
  1409  	//
  1410  	// TODO: Our lockset can differ from a cached lockset by only
  1411  	// the stacks of the locks. Can we do something smarter than
  1412  	// recomputing the entire sub-graph in that situation? It's
  1413  	// rather complex because we may alter the lock order graph
  1414  	// with new stacks in the process. One could imagine tracking
  1415  	// a "predicate" and a compressed "delta" for the computation
  1416  	// and caching that.
  1417  	if memo := fInfo.exitStates.Get(ps); memo != nil {
  1418  		if s.debugging {
  1419  			s.debugTree.Appendf("\n- cached exit -\n%v", memo)
  1420  		}
  1421  		return memo.(*PathStateSet)
  1422  	}
  1423  
  1424  	if fInfo.debugTree != nil {
  1425  		var buf bytes.Buffer
  1426  		fmt.Fprintf(&buf, "%s\n- enter -\n", f)
  1427  		ps.WriteTo(&buf)
  1428  		fInfo.debugTree.Push(buf.String())
  1429  		defer fInfo.debugTree.Pop()
  1430  	}
  1431  
  1432  	// Resolve function cycles by returning an empty set of
  1433  	// locksets, which terminates this code path.
  1434  	//
  1435  	// TODO: RacerX detects cycles *without* regard to the entry
  1436  	// lock set. We could do that, but it doesn't seem to be an
  1437  	// issue to include the lock set. However, since we have the
  1438  	// lock set, maybe if we have a cycle with a non-empty lock
  1439  	// set we should report a self-deadlock.
  1440  	fInfo.exitStates.Set(ps, emptyPathStateSet)
  1441  
  1442  	blockCache := NewPathStateSet()
  1443  	enterPathState := PathState{f.Blocks[0], ps.lockSet, ps.vs, nil}
  1444  	exitStates := NewPathStateSet()
  1445  	s.walkBlock(blockCache, enterPathState, exitStates)
  1446  	fInfo.exitStates.Set(ps, exitStates)
  1447  	//log.Printf("%s: %s -> %s", f.Name(), locks, exitStates)
  1448  	if s.debugging {
  1449  		s.debugTree.Appendf("\n- exit -\n%v", exitStates)
  1450  	}
  1451  	return exitStates
  1452  }
  1453  
  1454  // PathState is the state during execution of a particular function.
  1455  type PathState struct {
  1456  	block   *ssa.BasicBlock
  1457  	lockSet *LockSet
  1458  	vs      ValState
  1459  	mask    map[ssa.Value]struct{}
  1460  }
  1461  
  1462  type pathStateKey struct {
  1463  	block   *ssa.BasicBlock
  1464  	lockSet string
  1465  }
  1466  
  1467  // HashKey returns a key such that ps1.Equal(ps2) implies
  1468  // ps1.HashKey() == ps2.HashKey().
  1469  func (ps *PathState) HashKey() pathStateKey {
  1470  	// Note that PathStateSet.Contains depends on this capturing
  1471  	// everything except the stacks and value state.
  1472  	return pathStateKey{ps.block, ps.lockSet.HashKey()}
  1473  }
  1474  
  1475  // Equal returns whether ps and ps2 have represent the same program
  1476  // state.
  1477  func (ps *PathState) Equal(ps2 *PathState) bool {
  1478  	// ps.block == ps2.block implies ps.mask == ps2.mask, so this
  1479  	// is symmetric. Maybe we should just keep pre-masked
  1480  	// ValStates.
  1481  	return ps.block == ps2.block && ps.lockSet.Equal(ps2.lockSet) && ps.vs.EqualAt(ps2.vs, ps.mask)
  1482  }
  1483  
  1484  // ExitState returns ps narrowed to the path state tracked across a
  1485  // function return.
  1486  func (ps *PathState) ExitState() PathState {
  1487  	return PathState{
  1488  		lockSet: ps.lockSet,
  1489  		vs:      ps.vs.LimitToHeap(),
  1490  	}
  1491  }
  1492  
  1493  func (ps *PathState) WriteTo(w io.Writer) {
  1494  	if ps.block == nil {
  1495  		fmt.Fprintf(w, "PathState for function:\n")
  1496  	} else {
  1497  		fmt.Fprintf(w, "PathState for %s block %d:\n", ps.block.Parent(), ps.block.Index)
  1498  	}
  1499  	fmt.Fprintf(w, "  locks: %v\n", ps.lockSet)
  1500  	fmt.Fprintf(w, "  values:\n")
  1501  	ps.vs.WriteTo(&IndentWriter{W: w, Indent: []byte("    ")})
  1502  }
  1503  
  1504  // PathStateSet is a mutable set of PathStates.
  1505  type PathStateSet struct {
  1506  	m map[pathStateKey][]PathState
  1507  }
  1508  
  1509  // NewPathStateSet returns a new, empty PathStateSet.
  1510  func NewPathStateSet() *PathStateSet {
  1511  	return &PathStateSet{make(map[pathStateKey][]PathState)}
  1512  }
  1513  
  1514  var emptyPathStateSet = NewPathStateSet()
  1515  
  1516  func (set *PathStateSet) Empty() bool {
  1517  	return len(set.m) == 0
  1518  }
  1519  
  1520  // Add adds PathState ps to set.
  1521  func (set *PathStateSet) Add(ps PathState) {
  1522  	key := ps.HashKey()
  1523  	slice := set.m[key]
  1524  	for i := range slice {
  1525  		if slice[i].Equal(&ps) {
  1526  			return
  1527  		}
  1528  	}
  1529  	set.m[key] = append(slice, ps)
  1530  }
  1531  
  1532  // Contains returns whether set contains ps and the number of
  1533  // PathStates that differ only in value state and lock stacks.
  1534  func (set *PathStateSet) Contains(ps PathState) (bool, int) {
  1535  	// The "similar" count depends on the implementation of
  1536  	// PathState.HashKey.
  1537  	key := ps.HashKey()
  1538  	slice := set.m[key]
  1539  	for i := range slice {
  1540  		if slice[i].Equal(&ps) {
  1541  			return true, len(slice)
  1542  		}
  1543  	}
  1544  	return false, len(slice)
  1545  }
  1546  
  1547  // MapInPlace applies f to each PathState in set and replaces that
  1548  // PathState with f's result. This is optimized for the case where f
  1549  // returns the same PathState.
  1550  func (set *PathStateSet) MapInPlace(f func(ps PathState) PathState) {
  1551  	var toAdd []PathState
  1552  	for hashKey, slice := range set.m {
  1553  		for i := 0; i < len(slice); i++ {
  1554  			ps2 := f(slice[i])
  1555  			if slice[i].Equal(&ps2) {
  1556  				continue
  1557  			}
  1558  			// Remove ps from the set and queue ps2 to add.
  1559  			slice[i] = slice[len(slice)-1]
  1560  			slice = slice[:len(slice)-1]
  1561  			if len(slice) == 0 {
  1562  				delete(set.m, hashKey)
  1563  			} else {
  1564  				set.m[hashKey] = slice
  1565  			}
  1566  			toAdd = append(toAdd, ps2)
  1567  		}
  1568  	}
  1569  	for _, ps := range toAdd {
  1570  		set.Add(ps)
  1571  	}
  1572  }
  1573  
  1574  // ForEach applies f to each PathState in set.
  1575  func (set *PathStateSet) ForEach(f func(ps PathState)) {
  1576  	for _, slice := range set.m {
  1577  		for i := range slice {
  1578  			f(slice[i])
  1579  		}
  1580  	}
  1581  }
  1582  
  1583  // FlatMap applies f to each PathState in set and returns a new
  1584  // PathStateSet consisting of the union of f's results. f may use
  1585  // scratch as temporary space and may return it; this will always be a
  1586  // slice with length 0.
  1587  func (set *PathStateSet) FlatMap(f func(ps PathState, scatch []PathState) []PathState) *PathStateSet {
  1588  	var scratch [16]PathState
  1589  	out := NewPathStateSet()
  1590  	for _, slice := range set.m {
  1591  		for _, ps := range slice {
  1592  			for _, nps := range f(ps, scratch[:0]) {
  1593  				out.Add(nps)
  1594  			}
  1595  		}
  1596  	}
  1597  	return out
  1598  }
  1599  
  1600  // PathStateMap is a mutable map keyed by PathState.
  1601  type PathStateMap struct {
  1602  	m map[pathStateKey][]pathStateMapEntry
  1603  }
  1604  
  1605  type pathStateMapEntry struct {
  1606  	ps  PathState
  1607  	val interface{}
  1608  }
  1609  
  1610  // NewPathStateMap returns a new empty PathStateMap.
  1611  func NewPathStateMap() *PathStateMap {
  1612  	return &PathStateMap{make(map[pathStateKey][]pathStateMapEntry)}
  1613  }
  1614  
  1615  // Set sets the value associated with ps to val in psm.
  1616  func (psm *PathStateMap) Set(ps PathState, val interface{}) {
  1617  	key := ps.HashKey()
  1618  	slice := psm.m[key]
  1619  	for i := range slice {
  1620  		if slice[i].ps.Equal(&ps) {
  1621  			slice[i].val = val
  1622  			return
  1623  		}
  1624  	}
  1625  	psm.m[key] = append(slice, pathStateMapEntry{ps, val})
  1626  }
  1627  
  1628  // Get returns the value associated with ps in psm.
  1629  func (psm *PathStateMap) Get(ps PathState) interface{} {
  1630  	slice := psm.m[ps.HashKey()]
  1631  	for i := range slice {
  1632  		if slice[i].ps.Equal(&ps) {
  1633  			return slice[i].val
  1634  		}
  1635  	}
  1636  	return nil
  1637  }
  1638  
  1639  // walkBlock visits a block and all blocks reachable from it, starting
  1640  // from the path state enterPathState. When walkBlock reaches the
  1641  // return point of the function, it adds the possible path states at
  1642  // that point to exitStates. blockCache is the set of already
  1643  // visited path states within this function as of the beginning of
  1644  // visited blocks.
  1645  func (s *state) walkBlock(blockCache *PathStateSet, enterPathState PathState, exitStates *PathStateSet) {
  1646  	b := enterPathState.block
  1647  	f := b.Parent()
  1648  	// Check the values that are live at this
  1649  	// block. Note that the live set includes phis
  1650  	// at the beginning of this block if they
  1651  	// participate in control flow decisions, so
  1652  	// we'll pick up any phi values assigned by
  1653  	// our called.
  1654  	enterPathState.mask = s.fns[f].ifDeps[b.Index]
  1655  
  1656  	debugTree := s.fns[f].debugTree
  1657  	if debugTree != nil {
  1658  		var buf bytes.Buffer
  1659  		fmt.Fprintf(&buf, "block %v\n", b.Index)
  1660  		enterPathState.WriteTo(&buf)
  1661  		debugTree.Push(buf.String())
  1662  		defer debugTree.Pop()
  1663  	}
  1664  
  1665  	if cached, similar := blockCache.Contains(enterPathState); cached {
  1666  		// Terminate recursion. Some other path has already
  1667  		// visited here with this lock set and value state.
  1668  		if debugTree != nil {
  1669  			debugTree.Leaf("cached")
  1670  		}
  1671  		return
  1672  	} else if similar > 10 {
  1673  		s.warnl(blockPos(b), "too many states, trimming path (block %d)", b.Index)
  1674  		if debugTree != nil {
  1675  			debugTree.Leaf("too many states")
  1676  		}
  1677  		return
  1678  	}
  1679  	blockCache.Add(enterPathState)
  1680  
  1681  	// Upon block entry there's just the one entry path state.
  1682  	pathStates := NewPathStateSet()
  1683  	pathStates.Add(enterPathState)
  1684  
  1685  	doCall := func(instr ssa.Instruction, fns []*ssa.Function) {
  1686  		s.stack = s.stack.Extend(instr)
  1687  		pathStates = pathStates.FlatMap(func(ps PathState, newps []PathState) []PathState {
  1688  			psEntry := PathState{
  1689  				lockSet: ps.lockSet,
  1690  				vs:      ps.vs.LimitToHeap(),
  1691  			}
  1692  			for _, fn := range fns {
  1693  				if handler, ok := callHandlers[fn.String()]; ok {
  1694  					// TODO: Instead of using
  1695  					// FlatMap, I could just pass
  1696  					// the PathStateSet to add new
  1697  					// states to.
  1698  					newps = handler(s, ps, instr, newps)
  1699  				} else {
  1700  					// Bind arguments values if
  1701  					// this function is marked for
  1702  					// argument tracking.
  1703  					psEntry := psEntry
  1704  					if trackArgs[fn.String()] {
  1705  						for i, arg := range instr.(*ssa.Call).Call.Args {
  1706  							aval := ps.vs.Get(arg)
  1707  							if aval != nil {
  1708  								psEntry.vs = psEntry.vs.Extend(fn.Params[i], aval)
  1709  							}
  1710  						}
  1711  					}
  1712  
  1713  					s.walkFunction(fn, psEntry).ForEach(func(ps2 PathState) {
  1714  						ps.lockSet = ps2.lockSet
  1715  						ps.vs.heap = ps2.vs.heap
  1716  						newps = append(newps, ps)
  1717  					})
  1718  				}
  1719  			}
  1720  			return newps
  1721  		})
  1722  		s.stack = s.stack.parent
  1723  	}
  1724  
  1725  	// For each instruction, compute the effect of that
  1726  	// instruction on all possible path states at that point.
  1727  	var ifCond ssa.Value
  1728  	for _, instr := range b.Instrs {
  1729  		// Update value state with the effect of this
  1730  		// instruction.
  1731  		pathStates.MapInPlace(func(ps PathState) PathState {
  1732  			ps.vs = ps.vs.Do(instr)
  1733  			return ps
  1734  		})
  1735  
  1736  		switch instr := instr.(type) {
  1737  		case *ssa.If:
  1738  			// We'll bind ifCond to true or false when we
  1739  			// visit successors.
  1740  			ifCond = instr.Cond
  1741  
  1742  		case *ssa.Call:
  1743  			// TODO: There are other types of
  1744  			// ssa.CallInstructions, but they have different
  1745  			// control flow.
  1746  			outs := s.callees(instr)
  1747  			if len(outs) == 0 {
  1748  				// This is a built-in like print or
  1749  				// len. Assume it doesn't affect the
  1750  				// locksets.
  1751  				break
  1752  			}
  1753  			doCall(instr, outs)
  1754  
  1755  		// TODO: runtime calls for ssa.ChangeInterface,
  1756  		// ssa.Convert, ssa.Defer, ssa.MakeInterface,
  1757  		// ssa.Next, ssa.Range, ssa.Select, ssa.TypeAssert.
  1758  
  1759  		// Unfortunately, we can't turn ssa.Alloc into a
  1760  		// newobject call because ssa turns any variable
  1761  		// captured by a closure into an Alloc. There's no way
  1762  		// to tell if it was actually a new() expression or
  1763  		// not.
  1764  		// case *ssa.Alloc:
  1765  		// 	if instr.Heap {
  1766  		// 		doCall(instr, []*ssa.Function{fns.newobject})
  1767  		// 	}
  1768  
  1769  		case *ssa.Lookup:
  1770  			if _, ok := instr.X.Type().Underlying().(*types.Map); !ok {
  1771  				break
  1772  			}
  1773  			if instr.CommaOk {
  1774  				doCall(instr, []*ssa.Function{fns.mapaccess2})
  1775  			} else {
  1776  				doCall(instr, []*ssa.Function{fns.mapaccess1})
  1777  			}
  1778  
  1779  		case *ssa.MakeChan:
  1780  			doCall(instr, []*ssa.Function{fns.makechan})
  1781  
  1782  		case *ssa.MakeMap:
  1783  			doCall(instr, []*ssa.Function{fns.makemap})
  1784  
  1785  		case *ssa.MakeSlice:
  1786  			doCall(instr, []*ssa.Function{fns.newarray})
  1787  
  1788  		case *ssa.MapUpdate:
  1789  			fn := fns.mapassign // Go 1.8
  1790  			if fn == nil {
  1791  				fn = fns.mapassign1
  1792  			}
  1793  			doCall(instr, []*ssa.Function{fn})
  1794  
  1795  		case *ssa.Panic:
  1796  			doCall(instr, []*ssa.Function{fns.gopanic})
  1797  
  1798  		case *ssa.Send:
  1799  			doCall(instr, []*ssa.Function{fns.chansend1})
  1800  
  1801  		case *ssa.Go:
  1802  			for _, o := range s.callees(instr) {
  1803  				//log.Printf("found go %s; adding to roots", o)
  1804  				s.addRoot(o)
  1805  			}
  1806  
  1807  		case *ssa.Return:
  1808  			// We've reached function exit. Add the
  1809  			// current lock sets to exitLockSets.
  1810  			//
  1811  			// TODO: Handle defers.
  1812  
  1813  			pathStates.ForEach(func(ps PathState) {
  1814  				exitStates.Add(ps.ExitState())
  1815  				if debugTree != nil {
  1816  					var buf bytes.Buffer
  1817  					ps.WriteTo(&buf)
  1818  					debugTree.Leaff("exit:\n%s", buf)
  1819  				}
  1820  			})
  1821  		}
  1822  	}
  1823  
  1824  	// Annoyingly, the last instruction in an ssa.BasicBlock
  1825  	// doesn't have a location, even if it obviously corresponds
  1826  	// to a source statement. exitPos guesses one.
  1827  	exitPos := func(b *ssa.BasicBlock) token.Pos {
  1828  		for b != nil {
  1829  			for i := len(b.Instrs) - 1; i >= 0; i-- {
  1830  				if pos := b.Instrs[i].Pos(); pos != 0 {
  1831  					return pos
  1832  				}
  1833  			}
  1834  			if len(b.Preds) == 0 {
  1835  				break
  1836  			}
  1837  			b = b.Preds[0]
  1838  		}
  1839  		return 0
  1840  	}
  1841  	_ = exitPos
  1842  
  1843  	if len(pathStates.m) == 0 && debugTree != nil {
  1844  		// This happens after functions that don't return.
  1845  		debugTree.Leaf("no path states")
  1846  	}
  1847  
  1848  	// Process successor blocks.
  1849  	pathStates.ForEach(func(ps PathState) {
  1850  		// If this is an "if", see if we have enough
  1851  		// information to determine its direction.
  1852  		succs := b.Succs
  1853  		if ifCond != nil {
  1854  			x := ps.vs.Get(ifCond)
  1855  			if x != nil {
  1856  				//log.Printf("determined control flow at %s: %v", s.fset.Position(exitPos(b)), x)
  1857  				if constant.BoolVal(x.(DynConst).c) {
  1858  					// Take true path.
  1859  					succs = succs[:1]
  1860  				} else {
  1861  					// Take false path.
  1862  					succs = succs[1:]
  1863  				}
  1864  			}
  1865  		}
  1866  
  1867  		// Process block successors.
  1868  		for i, b2 := range succs {
  1869  			ps2 := ps
  1870  			ps2.block = b2
  1871  			if ifCond != nil {
  1872  				// TODO: We could back-propagate this
  1873  				// in simple cases, like when ifCond
  1874  				// is a == BinOp. (And we could
  1875  				// forward-propagate that! Hmm.)
  1876  				ps2.vs = ps2.vs.Extend(ifCond, DynConst{constant.MakeBool(i == 0)})
  1877  			}
  1878  
  1879  			// Propagate values over phis at the beginning
  1880  			// of b2.
  1881  			for _, instr := range b2.Instrs {
  1882  				instr, ok := instr.(*ssa.Phi)
  1883  				if !ok {
  1884  					break
  1885  				}
  1886  				for i, inval := range instr.Edges {
  1887  					if b2.Preds[i] == b {
  1888  						x := ps2.vs.Get(inval)
  1889  						if x != nil {
  1890  							ps2.vs = ps2.vs.Extend(instr, x)
  1891  						}
  1892  					}
  1893  				}
  1894  			}
  1895  
  1896  			if debugTree != nil && len(b.Succs) > 1 {
  1897  				if b2 == b.Succs[0] {
  1898  					debugTree.SetEdge("T")
  1899  				} else if b2 == b.Succs[1] {
  1900  					debugTree.SetEdge("F")
  1901  				}
  1902  			}
  1903  			s.walkBlock(blockCache, ps2, exitStates)
  1904  		}
  1905  	})
  1906  }
  1907  
  1908  // blockPos returns the best position it can for b.
  1909  func blockPos(b *ssa.BasicBlock) token.Pos {
  1910  	var visited []bool
  1911  	for {
  1912  		if visited != nil {
  1913  			if visited[b.Index] {
  1914  				// Give up.
  1915  				return b.Parent().Pos()
  1916  			}
  1917  			visited[b.Index] = true
  1918  		}
  1919  		// Phis have useless line numbers. Find the first
  1920  		// "real" instruction.
  1921  		for _, i := range b.Instrs {
  1922  			if _, ok := i.(*ssa.Phi); ok || !i.Pos().IsValid() {
  1923  				continue
  1924  			}
  1925  			return i.Pos()
  1926  		}
  1927  		if len(b.Preds) == 0 {
  1928  			return b.Parent().Pos()
  1929  		}
  1930  		// Try b's predecessor.
  1931  		if visited == nil {
  1932  			// Delayed allocation of visited.
  1933  			visited = make([]bool, len(b.Parent().Blocks))
  1934  			visited[b.Index] = true
  1935  		}
  1936  		b = b.Preds[0]
  1937  	}
  1938  }