github.com/bir3/gocompiler@v0.9.2202/src/cmd/compile/internal/escape/solve.go (about)

     1  // Copyright 2018 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package escape
     6  
     7  import (
     8  	"github.com/bir3/gocompiler/src/cmd/compile/internal/base"
     9  	"github.com/bir3/gocompiler/src/cmd/compile/internal/ir"
    10  	"github.com/bir3/gocompiler/src/cmd/compile/internal/logopt"
    11  	"github.com/bir3/gocompiler/src/cmd/internal/src"
    12  	"fmt"
    13  	"strings"
    14  )
    15  
    16  // walkAll computes the minimal dereferences between all pairs of
    17  // locations.
    18  func (b *batch) walkAll() {
    19  	// We use a work queue to keep track of locations that we need
    20  	// to visit, and repeatedly walk until we reach a fixed point.
    21  	//
    22  	// We walk once from each location (including the heap), and
    23  	// then re-enqueue each location on its transition from
    24  	// !persists->persists and !escapes->escapes, which can each
    25  	// happen at most once. So we take Θ(len(e.allLocs)) walks.
    26  
    27  	// LIFO queue, has enough room for e.allLocs and e.heapLoc.
    28  	todo := make([]*location, 0, len(b.allLocs)+1)
    29  	enqueue := func(loc *location) {
    30  		if !loc.queued {
    31  			todo = append(todo, loc)
    32  			loc.queued = true
    33  		}
    34  	}
    35  
    36  	for _, loc := range b.allLocs {
    37  		enqueue(loc)
    38  	}
    39  	enqueue(&b.mutatorLoc)
    40  	enqueue(&b.calleeLoc)
    41  	enqueue(&b.heapLoc)
    42  
    43  	var walkgen uint32
    44  	for len(todo) > 0 {
    45  		root := todo[len(todo)-1]
    46  		todo = todo[:len(todo)-1]
    47  		root.queued = false
    48  
    49  		walkgen++
    50  		b.walkOne(root, walkgen, enqueue)
    51  	}
    52  }
    53  
    54  // walkOne computes the minimal number of dereferences from root to
    55  // all other locations.
    56  func (b *batch) walkOne(root *location, walkgen uint32, enqueue func(*location)) {
    57  	// The data flow graph has negative edges (from addressing
    58  	// operations), so we use the Bellman-Ford algorithm. However,
    59  	// we don't have to worry about infinite negative cycles since
    60  	// we bound intermediate dereference counts to 0.
    61  
    62  	root.walkgen = walkgen
    63  	root.derefs = 0
    64  	root.dst = nil
    65  
    66  	if root.hasAttr(attrCalls) {
    67  		if clo, ok := root.n.(*ir.ClosureExpr); ok {
    68  			if fn := clo.Func; b.inMutualBatch(fn.Nname) && !fn.ClosureResultsLost() {
    69  				fn.SetClosureResultsLost(true)
    70  
    71  				// Re-flow from the closure's results, now that we're aware
    72  				// we lost track of them.
    73  				for _, result := range fn.Type().Results() {
    74  					enqueue(b.oldLoc(result.Nname.(*ir.Name)))
    75  				}
    76  			}
    77  		}
    78  	}
    79  
    80  	todo := []*location{root}	// LIFO queue
    81  	for len(todo) > 0 {
    82  		l := todo[len(todo)-1]
    83  		todo = todo[:len(todo)-1]
    84  
    85  		derefs := l.derefs
    86  		var newAttrs locAttr
    87  
    88  		// If l.derefs < 0, then l's address flows to root.
    89  		addressOf := derefs < 0
    90  		if addressOf {
    91  			// For a flow path like "root = &l; l = x",
    92  			// l's address flows to root, but x's does
    93  			// not. We recognize this by lower bounding
    94  			// derefs at 0.
    95  			derefs = 0
    96  
    97  			// If l's address flows somewhere that
    98  			// outlives it, then l needs to be heap
    99  			// allocated.
   100  			if b.outlives(root, l) {
   101  				if !l.hasAttr(attrEscapes) && (logopt.Enabled() || base.Flag.LowerM >= 2) {
   102  					if base.Flag.LowerM >= 2 {
   103  						fmt.Printf("%s: %v escapes to heap:\n", base.FmtPos(l.n.Pos()), l.n)
   104  					}
   105  					explanation := b.explainPath(root, l)
   106  					if logopt.Enabled() {
   107  						var e_curfn *ir.Func	// TODO(mdempsky): Fix.
   108  						logopt.LogOpt(l.n.Pos(), "escape", "escape", ir.FuncName(e_curfn), fmt.Sprintf("%v escapes to heap", l.n), explanation)
   109  					}
   110  				}
   111  				newAttrs |= attrEscapes | attrPersists | attrMutates | attrCalls
   112  			} else
   113  			// If l's address flows to a persistent location, then l needs
   114  			// to persist too.
   115  			if root.hasAttr(attrPersists) {
   116  				newAttrs |= attrPersists
   117  			}
   118  		}
   119  
   120  		if derefs == 0 {
   121  			newAttrs |= root.attrs & (attrMutates | attrCalls)
   122  		}
   123  
   124  		// l's value flows to root. If l is a function
   125  		// parameter and root is the heap or a
   126  		// corresponding result parameter, then record
   127  		// that value flow for tagging the function
   128  		// later.
   129  		if l.isName(ir.PPARAM) {
   130  			if b.outlives(root, l) {
   131  				if !l.hasAttr(attrEscapes) && (logopt.Enabled() || base.Flag.LowerM >= 2) {
   132  					if base.Flag.LowerM >= 2 {
   133  						fmt.Printf("%s: parameter %v leaks to %s with derefs=%d:\n", base.FmtPos(l.n.Pos()), l.n, b.explainLoc(root), derefs)
   134  					}
   135  					explanation := b.explainPath(root, l)
   136  					if logopt.Enabled() {
   137  						var e_curfn *ir.Func	// TODO(mdempsky): Fix.
   138  						logopt.LogOpt(l.n.Pos(), "leak", "escape", ir.FuncName(e_curfn),
   139  							fmt.Sprintf("parameter %v leaks to %s with derefs=%d", l.n, b.explainLoc(root), derefs), explanation)
   140  					}
   141  				}
   142  				l.leakTo(root, derefs)
   143  			}
   144  			if root.hasAttr(attrMutates) {
   145  				l.paramEsc.AddMutator(derefs)
   146  			}
   147  			if root.hasAttr(attrCalls) {
   148  				l.paramEsc.AddCallee(derefs)
   149  			}
   150  		}
   151  
   152  		if newAttrs&^l.attrs != 0 {
   153  			l.attrs |= newAttrs
   154  			enqueue(l)
   155  			if l.attrs&attrEscapes != 0 {
   156  				continue
   157  			}
   158  		}
   159  
   160  		for i, edge := range l.edges {
   161  			if edge.src.hasAttr(attrEscapes) {
   162  				continue
   163  			}
   164  			d := derefs + edge.derefs
   165  			if edge.src.walkgen != walkgen || edge.src.derefs > d {
   166  				edge.src.walkgen = walkgen
   167  				edge.src.derefs = d
   168  				edge.src.dst = l
   169  				edge.src.dstEdgeIdx = i
   170  				todo = append(todo, edge.src)
   171  			}
   172  		}
   173  	}
   174  }
   175  
   176  // explainPath prints an explanation of how src flows to the walk root.
   177  func (b *batch) explainPath(root, src *location) []*logopt.LoggedOpt {
   178  	visited := make(map[*location]bool)
   179  	pos := base.FmtPos(src.n.Pos())
   180  	var explanation []*logopt.LoggedOpt
   181  	for {
   182  		// Prevent infinite loop.
   183  		if visited[src] {
   184  			if base.Flag.LowerM >= 2 {
   185  				fmt.Printf("%s:   warning: truncated explanation due to assignment cycle; see golang.org/issue/35518\n", pos)
   186  			}
   187  			break
   188  		}
   189  		visited[src] = true
   190  		dst := src.dst
   191  		edge := &dst.edges[src.dstEdgeIdx]
   192  		if edge.src != src {
   193  			base.Fatalf("path inconsistency: %v != %v", edge.src, src)
   194  		}
   195  
   196  		explanation = b.explainFlow(pos, dst, src, edge.derefs, edge.notes, explanation)
   197  
   198  		if dst == root {
   199  			break
   200  		}
   201  		src = dst
   202  	}
   203  
   204  	return explanation
   205  }
   206  
   207  func (b *batch) explainFlow(pos string, dst, srcloc *location, derefs int, notes *note, explanation []*logopt.LoggedOpt) []*logopt.LoggedOpt {
   208  	ops := "&"
   209  	if derefs >= 0 {
   210  		ops = strings.Repeat("*", derefs)
   211  	}
   212  	print := base.Flag.LowerM >= 2
   213  
   214  	flow := fmt.Sprintf("   flow: %s = %s%v:", b.explainLoc(dst), ops, b.explainLoc(srcloc))
   215  	if print {
   216  		fmt.Printf("%s:%s\n", pos, flow)
   217  	}
   218  	if logopt.Enabled() {
   219  		var epos src.XPos
   220  		if notes != nil {
   221  			epos = notes.where.Pos()
   222  		} else if srcloc != nil && srcloc.n != nil {
   223  			epos = srcloc.n.Pos()
   224  		}
   225  		var e_curfn *ir.Func	// TODO(mdempsky): Fix.
   226  		explanation = append(explanation, logopt.NewLoggedOpt(epos, epos, "escflow", "escape", ir.FuncName(e_curfn), flow))
   227  	}
   228  
   229  	for note := notes; note != nil; note = note.next {
   230  		if print {
   231  			fmt.Printf("%s:     from %v (%v) at %s\n", pos, note.where, note.why, base.FmtPos(note.where.Pos()))
   232  		}
   233  		if logopt.Enabled() {
   234  			var e_curfn *ir.Func	// TODO(mdempsky): Fix.
   235  			notePos := note.where.Pos()
   236  			explanation = append(explanation, logopt.NewLoggedOpt(notePos, notePos, "escflow", "escape", ir.FuncName(e_curfn),
   237  				fmt.Sprintf("     from %v (%v)", note.where, note.why)))
   238  		}
   239  	}
   240  	return explanation
   241  }
   242  
   243  func (b *batch) explainLoc(l *location) string {
   244  	if l == &b.heapLoc {
   245  		return "{heap}"
   246  	}
   247  	if l.n == nil {
   248  		// TODO(mdempsky): Omit entirely.
   249  		return "{temp}"
   250  	}
   251  	if l.n.Op() == ir.ONAME {
   252  		return fmt.Sprintf("%v", l.n)
   253  	}
   254  	return fmt.Sprintf("{storage for %v}", l.n)
   255  }
   256  
   257  // outlives reports whether values stored in l may survive beyond
   258  // other's lifetime if stack allocated.
   259  func (b *batch) outlives(l, other *location) bool {
   260  	// The heap outlives everything.
   261  	if l.hasAttr(attrEscapes) {
   262  		return true
   263  	}
   264  
   265  	// Pseudo-locations that don't really exist.
   266  	if l == &b.mutatorLoc || l == &b.calleeLoc {
   267  		return false
   268  	}
   269  
   270  	// We don't know what callers do with returned values, so
   271  	// pessimistically we need to assume they flow to the heap and
   272  	// outlive everything too.
   273  	if l.isName(ir.PPARAMOUT) {
   274  		// Exception: Closures can return locations allocated outside of
   275  		// them without forcing them to the heap, if we can statically
   276  		// identify all call sites. For example:
   277  		//
   278  		//	var u int  // okay to stack allocate
   279  		//	fn := func() *int { return &u }()
   280  		//	*fn() = 42
   281  		if containsClosure(other.curfn, l.curfn) && !l.curfn.ClosureResultsLost() {
   282  			return false
   283  		}
   284  
   285  		return true
   286  	}
   287  
   288  	// If l and other are within the same function, then l
   289  	// outlives other if it was declared outside other's loop
   290  	// scope. For example:
   291  	//
   292  	//	var l *int
   293  	//	for {
   294  	//		l = new(int) // must heap allocate: outlives for loop
   295  	//	}
   296  	if l.curfn == other.curfn && l.loopDepth < other.loopDepth {
   297  		return true
   298  	}
   299  
   300  	// If other is declared within a child closure of where l is
   301  	// declared, then l outlives it. For example:
   302  	//
   303  	//	var l *int
   304  	//	func() {
   305  	//		l = new(int) // must heap allocate: outlives call frame (if not inlined)
   306  	//	}()
   307  	if containsClosure(l.curfn, other.curfn) {
   308  		return true
   309  	}
   310  
   311  	return false
   312  }
   313  
   314  // containsClosure reports whether c is a closure contained within f.
   315  func containsClosure(f, c *ir.Func) bool {
   316  	// Common cases.
   317  	if f == c || c.OClosure == nil {
   318  		return false
   319  	}
   320  
   321  	// Closures within function Foo are named like "Foo.funcN..."
   322  	// TODO(mdempsky): Better way to recognize this.
   323  	fn := f.Sym().Name
   324  	cn := c.Sym().Name
   325  	return len(cn) > len(fn) && cn[:len(fn)] == fn && cn[len(fn)] == '.'
   326  }