github.com/gagliardetto/golang-go@v0.0.0-20201020153340-53909ea70814/cmd/compile/internal/gc/escape.go (about)

     1  // Copyright 2018 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package gc
     6  
     7  import (
     8  	"github.com/gagliardetto/golang-go/cmd/compile/internal/logopt"
     9  	"github.com/gagliardetto/golang-go/cmd/compile/internal/types"
    10  	"fmt"
    11  	"math"
    12  	"strings"
    13  )
    14  
    15  // Escape analysis.
    16  //
    17  // Here we analyze functions to determine which Go variables
    18  // (including implicit allocations such as calls to "new" or "make",
    19  // composite literals, etc.) can be allocated on the stack. The two
    20  // key invariants we have to ensure are: (1) pointers to stack objects
    21  // cannot be stored in the heap, and (2) pointers to a stack object
    22  // cannot outlive that object (e.g., because the declaring function
    23  // returned and destroyed the object's stack frame, or its space is
    24  // reused across loop iterations for logically distinct variables).
    25  //
    26  // We implement this with a static data-flow analysis of the AST.
    27  // First, we construct a directed weighted graph where vertices
    28  // (termed "locations") represent variables allocated by statements
    29  // and expressions, and edges represent assignments between variables
    30  // (with weights representing addressing/dereference counts).
    31  //
    32  // Next we walk the graph looking for assignment paths that might
    33  // violate the invariants stated above. If a variable v's address is
    34  // stored in the heap or elsewhere that may outlive it, then v is
    35  // marked as requiring heap allocation.
    36  //
    37  // To support interprocedural analysis, we also record data-flow from
    38  // each function's parameters to the heap and to its result
    39  // parameters. This information is summarized as "parameter tags",
    40  // which are used at static call sites to improve escape analysis of
    41  // function arguments.
    42  
    43  // Constructing the location graph.
    44  //
    45  // Every allocating statement (e.g., variable declaration) or
    46  // expression (e.g., "new" or "make") is first mapped to a unique
    47  // "location."
    48  //
    49  // We also model every Go assignment as a directed edges between
    50  // locations. The number of dereference operations minus the number of
    51  // addressing operations is recorded as the edge's weight (termed
    52  // "derefs"). For example:
    53  //
    54  //     p = &q    // -1
    55  //     p = q     //  0
    56  //     p = *q    //  1
    57  //     p = **q   //  2
    58  //
    59  //     p = **&**&q  // 2
    60  //
    61  // Note that the & operator can only be applied to addressable
    62  // expressions, and the expression &x itself is not addressable, so
    63  // derefs cannot go below -1.
    64  //
    65  // Every Go language construct is lowered into this representation,
    66  // generally without sensitivity to flow, path, or context; and
    67  // without distinguishing elements within a compound variable. For
    68  // example:
    69  //
    70  //     var x struct { f, g *int }
    71  //     var u []*int
    72  //
    73  //     x.f = u[0]
    74  //
    75  // is modeled simply as
    76  //
    77  //     x = *u
    78  //
    79  // That is, we don't distinguish x.f from x.g, or u[0] from u[1],
    80  // u[2], etc. However, we do record the implicit dereference involved
    81  // in indexing a slice.
    82  
    83  type Escape struct {
    84  	allLocs []*EscLocation
    85  
    86  	curfn *Node
    87  
    88  	// loopDepth counts the current loop nesting depth within
    89  	// curfn. It increments within each "for" loop and at each
    90  	// label with a corresponding backwards "goto" (i.e.,
    91  	// unstructured loop).
    92  	loopDepth int
    93  
    94  	heapLoc  EscLocation
    95  	blankLoc EscLocation
    96  }
    97  
    98  // An EscLocation represents an abstract location that stores a Go
    99  // variable.
   100  type EscLocation struct {
   101  	n         *Node     // represented variable or expression, if any
   102  	curfn     *Node     // enclosing function
   103  	edges     []EscEdge // incoming edges
   104  	loopDepth int       // loopDepth at declaration
   105  
   106  	// derefs and walkgen are used during walkOne to track the
   107  	// minimal dereferences from the walk root.
   108  	derefs  int // >= -1
   109  	walkgen uint32
   110  
   111  	// dst and dstEdgeindex track the next immediate assignment
   112  	// destination location during walkone, along with the index
   113  	// of the edge pointing back to this location.
   114  	dst        *EscLocation
   115  	dstEdgeIdx int
   116  
   117  	// queued is used by walkAll to track whether this location is
   118  	// in the walk queue.
   119  	queued bool
   120  
   121  	// escapes reports whether the represented variable's address
   122  	// escapes; that is, whether the variable must be heap
   123  	// allocated.
   124  	escapes bool
   125  
   126  	// transient reports whether the represented expression's
   127  	// address does not outlive the statement; that is, whether
   128  	// its storage can be immediately reused.
   129  	transient bool
   130  
   131  	// paramEsc records the represented parameter's leak set.
   132  	paramEsc EscLeaks
   133  }
   134  
   135  // An EscEdge represents an assignment edge between two Go variables.
   136  type EscEdge struct {
   137  	src    *EscLocation
   138  	derefs int // >= -1
   139  	notes  *EscNote
   140  }
   141  
   142  // escapeFuncs performs escape analysis on a minimal batch of
   143  // functions.
   144  func escapeFuncs(fns []*Node, recursive bool) {
   145  	for _, fn := range fns {
   146  		if fn.Op != ODCLFUNC {
   147  			Fatalf("unexpected node: %v", fn)
   148  		}
   149  	}
   150  
   151  	var e Escape
   152  	e.heapLoc.escapes = true
   153  
   154  	// Construct data-flow graph from syntax trees.
   155  	for _, fn := range fns {
   156  		e.initFunc(fn)
   157  	}
   158  	for _, fn := range fns {
   159  		e.walkFunc(fn)
   160  	}
   161  	e.curfn = nil
   162  
   163  	e.walkAll()
   164  	e.finish(fns)
   165  }
   166  
   167  func (e *Escape) initFunc(fn *Node) {
   168  	if fn.Op != ODCLFUNC || fn.Esc != EscFuncUnknown {
   169  		Fatalf("unexpected node: %v", fn)
   170  	}
   171  	fn.Esc = EscFuncPlanned
   172  	if Debug['m'] > 3 {
   173  		Dump("escAnalyze", fn)
   174  	}
   175  
   176  	e.curfn = fn
   177  	e.loopDepth = 1
   178  
   179  	// Allocate locations for local variables.
   180  	for _, dcl := range fn.Func.Dcl {
   181  		if dcl.Op == ONAME {
   182  			e.newLoc(dcl, false)
   183  		}
   184  	}
   185  }
   186  
   187  func (e *Escape) walkFunc(fn *Node) {
   188  	fn.Esc = EscFuncStarted
   189  
   190  	// Identify labels that mark the head of an unstructured loop.
   191  	inspectList(fn.Nbody, func(n *Node) bool {
   192  		switch n.Op {
   193  		case OLABEL:
   194  			n.Sym.Label = asTypesNode(&nonlooping)
   195  
   196  		case OGOTO:
   197  			// If we visited the label before the goto,
   198  			// then this is a looping label.
   199  			if n.Sym.Label == asTypesNode(&nonlooping) {
   200  				n.Sym.Label = asTypesNode(&looping)
   201  			}
   202  		}
   203  
   204  		return true
   205  	})
   206  
   207  	e.curfn = fn
   208  	e.loopDepth = 1
   209  	e.block(fn.Nbody)
   210  }
   211  
   212  // Below we implement the methods for walking the AST and recording
   213  // data flow edges. Note that because a sub-expression might have
   214  // side-effects, it's important to always visit the entire AST.
   215  //
   216  // For example, write either:
   217  //
   218  //     if x {
   219  //         e.discard(n.Left)
   220  //     } else {
   221  //         e.value(k, n.Left)
   222  //     }
   223  //
   224  // or
   225  //
   226  //     if x {
   227  //         k = e.discardHole()
   228  //     }
   229  //     e.value(k, n.Left)
   230  //
   231  // Do NOT write:
   232  //
   233  //    // BAD: possibly loses side-effects within n.Left
   234  //    if !x {
   235  //        e.value(k, n.Left)
   236  //    }
   237  
   238  // stmt evaluates a single Go statement.
   239  func (e *Escape) stmt(n *Node) {
   240  	if n == nil {
   241  		return
   242  	}
   243  
   244  	lno := setlineno(n)
   245  	defer func() {
   246  		lineno = lno
   247  	}()
   248  
   249  	if Debug['m'] > 2 {
   250  		fmt.Printf("%v:[%d] %v stmt: %v\n", linestr(lineno), e.loopDepth, funcSym(e.curfn), n)
   251  	}
   252  
   253  	e.stmts(n.Ninit)
   254  
   255  	switch n.Op {
   256  	default:
   257  		Fatalf("unexpected stmt: %v", n)
   258  
   259  	case ODCLCONST, ODCLTYPE, OEMPTY, OFALL, OINLMARK:
   260  		// nop
   261  
   262  	case OBREAK, OCONTINUE, OGOTO:
   263  		// TODO(mdempsky): Handle dead code?
   264  
   265  	case OBLOCK:
   266  		e.stmts(n.List)
   267  
   268  	case ODCL:
   269  		// Record loop depth at declaration.
   270  		if !n.Left.isBlank() {
   271  			e.dcl(n.Left)
   272  		}
   273  
   274  	case OLABEL:
   275  		switch asNode(n.Sym.Label) {
   276  		case &nonlooping:
   277  			if Debug['m'] > 2 {
   278  				fmt.Printf("%v:%v non-looping label\n", linestr(lineno), n)
   279  			}
   280  		case &looping:
   281  			if Debug['m'] > 2 {
   282  				fmt.Printf("%v: %v looping label\n", linestr(lineno), n)
   283  			}
   284  			e.loopDepth++
   285  		default:
   286  			Fatalf("label missing tag")
   287  		}
   288  		n.Sym.Label = nil
   289  
   290  	case OIF:
   291  		e.discard(n.Left)
   292  		e.block(n.Nbody)
   293  		e.block(n.Rlist)
   294  
   295  	case OFOR, OFORUNTIL:
   296  		e.loopDepth++
   297  		e.discard(n.Left)
   298  		e.stmt(n.Right)
   299  		e.block(n.Nbody)
   300  		e.loopDepth--
   301  
   302  	case ORANGE:
   303  		// for List = range Right { Nbody }
   304  		e.loopDepth++
   305  		ks := e.addrs(n.List)
   306  		e.block(n.Nbody)
   307  		e.loopDepth--
   308  
   309  		// Right is evaluated outside the loop.
   310  		k := e.discardHole()
   311  		if len(ks) >= 2 {
   312  			if n.Right.Type.IsArray() {
   313  				k = ks[1].note(n, "range")
   314  			} else {
   315  				k = ks[1].deref(n, "range-deref")
   316  			}
   317  		}
   318  		e.expr(e.later(k), n.Right)
   319  
   320  	case OSWITCH:
   321  		typesw := n.Left != nil && n.Left.Op == OTYPESW
   322  
   323  		var ks []EscHole
   324  		for _, cas := range n.List.Slice() { // cases
   325  			if typesw && n.Left.Left != nil {
   326  				cv := cas.Rlist.First()
   327  				k := e.dcl(cv) // type switch variables have no ODCL.
   328  				if types.Haspointers(cv.Type) {
   329  					ks = append(ks, k.dotType(cv.Type, cas, "switch case"))
   330  				}
   331  			}
   332  
   333  			e.discards(cas.List)
   334  			e.block(cas.Nbody)
   335  		}
   336  
   337  		if typesw {
   338  			e.expr(e.teeHole(ks...), n.Left.Right)
   339  		} else {
   340  			e.discard(n.Left)
   341  		}
   342  
   343  	case OSELECT:
   344  		for _, cas := range n.List.Slice() {
   345  			e.stmt(cas.Left)
   346  			e.block(cas.Nbody)
   347  		}
   348  	case OSELRECV:
   349  		e.assign(n.Left, n.Right, "selrecv", n)
   350  	case OSELRECV2:
   351  		e.assign(n.Left, n.Right, "selrecv", n)
   352  		e.assign(n.List.First(), nil, "selrecv", n)
   353  	case ORECV:
   354  		// TODO(mdempsky): Consider e.discard(n.Left).
   355  		e.exprSkipInit(e.discardHole(), n) // already visited n.Ninit
   356  	case OSEND:
   357  		e.discard(n.Left)
   358  		e.assignHeap(n.Right, "send", n)
   359  
   360  	case OAS, OASOP:
   361  		e.assign(n.Left, n.Right, "assign", n)
   362  
   363  	case OAS2:
   364  		for i, nl := range n.List.Slice() {
   365  			e.assign(nl, n.Rlist.Index(i), "assign-pair", n)
   366  		}
   367  
   368  	case OAS2DOTTYPE: // v, ok = x.(type)
   369  		e.assign(n.List.First(), n.Right, "assign-pair-dot-type", n)
   370  		e.assign(n.List.Second(), nil, "assign-pair-dot-type", n)
   371  	case OAS2MAPR: // v, ok = m[k]
   372  		e.assign(n.List.First(), n.Right, "assign-pair-mapr", n)
   373  		e.assign(n.List.Second(), nil, "assign-pair-mapr", n)
   374  	case OAS2RECV: // v, ok = <-ch
   375  		e.assign(n.List.First(), n.Right, "assign-pair-receive", n)
   376  		e.assign(n.List.Second(), nil, "assign-pair-receive", n)
   377  
   378  	case OAS2FUNC:
   379  		e.stmts(n.Right.Ninit)
   380  		e.call(e.addrs(n.List), n.Right, nil)
   381  	case ORETURN:
   382  		results := e.curfn.Type.Results().FieldSlice()
   383  		for i, v := range n.List.Slice() {
   384  			e.assign(asNode(results[i].Nname), v, "return", n)
   385  		}
   386  	case OCALLFUNC, OCALLMETH, OCALLINTER, OCLOSE, OCOPY, ODELETE, OPANIC, OPRINT, OPRINTN, ORECOVER:
   387  		e.call(nil, n, nil)
   388  	case OGO, ODEFER:
   389  		e.stmts(n.Left.Ninit)
   390  		e.call(nil, n.Left, n)
   391  
   392  	case ORETJMP:
   393  		// TODO(mdempsky): What do? esc.go just ignores it.
   394  	}
   395  }
   396  
   397  func (e *Escape) stmts(l Nodes) {
   398  	for _, n := range l.Slice() {
   399  		e.stmt(n)
   400  	}
   401  }
   402  
   403  // block is like stmts, but preserves loopDepth.
   404  func (e *Escape) block(l Nodes) {
   405  	old := e.loopDepth
   406  	e.stmts(l)
   407  	e.loopDepth = old
   408  }
   409  
   410  // expr models evaluating an expression n and flowing the result into
   411  // hole k.
   412  func (e *Escape) expr(k EscHole, n *Node) {
   413  	if n == nil {
   414  		return
   415  	}
   416  	e.stmts(n.Ninit)
   417  	e.exprSkipInit(k, n)
   418  }
   419  
   420  func (e *Escape) exprSkipInit(k EscHole, n *Node) {
   421  	if n == nil {
   422  		return
   423  	}
   424  
   425  	lno := setlineno(n)
   426  	defer func() {
   427  		lineno = lno
   428  	}()
   429  
   430  	if k.derefs >= 0 && !types.Haspointers(n.Type) {
   431  		k = e.discardHole()
   432  	}
   433  
   434  	switch n.Op {
   435  	default:
   436  		Fatalf("unexpected expr: %v", n)
   437  
   438  	case OLITERAL, OGETG, OCLOSUREVAR, OTYPE:
   439  		// nop
   440  
   441  	case ONAME:
   442  		if n.Class() == PFUNC || n.Class() == PEXTERN {
   443  			return
   444  		}
   445  		e.flow(k, e.oldLoc(n))
   446  
   447  	case OPLUS, ONEG, OBITNOT, ONOT:
   448  		e.discard(n.Left)
   449  	case OADD, OSUB, OOR, OXOR, OMUL, ODIV, OMOD, OLSH, ORSH, OAND, OANDNOT, OEQ, ONE, OLT, OLE, OGT, OGE, OANDAND, OOROR:
   450  		e.discard(n.Left)
   451  		e.discard(n.Right)
   452  
   453  	case OADDR:
   454  		e.expr(k.addr(n, "address-of"), n.Left) // "address-of"
   455  	case ODEREF:
   456  		e.expr(k.deref(n, "indirection"), n.Left) // "indirection"
   457  	case ODOT, ODOTMETH, ODOTINTER:
   458  		e.expr(k.note(n, "dot"), n.Left)
   459  	case ODOTPTR:
   460  		e.expr(k.deref(n, "dot of pointer"), n.Left) // "dot of pointer"
   461  	case ODOTTYPE, ODOTTYPE2:
   462  		e.expr(k.dotType(n.Type, n, "dot"), n.Left)
   463  	case OINDEX:
   464  		if n.Left.Type.IsArray() {
   465  			e.expr(k.note(n, "fixed-array-index-of"), n.Left)
   466  		} else {
   467  			// TODO(mdempsky): Fix why reason text.
   468  			e.expr(k.deref(n, "dot of pointer"), n.Left)
   469  		}
   470  		e.discard(n.Right)
   471  	case OINDEXMAP:
   472  		e.discard(n.Left)
   473  		e.discard(n.Right)
   474  	case OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR, OSLICESTR:
   475  		e.expr(k.note(n, "slice"), n.Left)
   476  		low, high, max := n.SliceBounds()
   477  		e.discard(low)
   478  		e.discard(high)
   479  		e.discard(max)
   480  
   481  	case OCONV, OCONVNOP:
   482  		if checkPtr(e.curfn, 2) && n.Type.Etype == TUNSAFEPTR && n.Left.Type.IsPtr() {
   483  			// When -d=checkptr=2 is enabled, treat
   484  			// conversions to unsafe.Pointer as an
   485  			// escaping operation. This allows better
   486  			// runtime instrumentation, since we can more
   487  			// easily detect object boundaries on the heap
   488  			// than the stack.
   489  			e.assignHeap(n.Left, "conversion to unsafe.Pointer", n)
   490  		} else if n.Type.Etype == TUNSAFEPTR && n.Left.Type.Etype == TUINTPTR {
   491  			e.unsafeValue(k, n.Left)
   492  		} else {
   493  			e.expr(k, n.Left)
   494  		}
   495  	case OCONVIFACE:
   496  		if !n.Left.Type.IsInterface() && !isdirectiface(n.Left.Type) {
   497  			k = e.spill(k, n)
   498  		}
   499  		e.expr(k.note(n, "interface-converted"), n.Left)
   500  
   501  	case ORECV:
   502  		e.discard(n.Left)
   503  
   504  	case OCALLMETH, OCALLFUNC, OCALLINTER, OLEN, OCAP, OCOMPLEX, OREAL, OIMAG, OAPPEND, OCOPY:
   505  		e.call([]EscHole{k}, n, nil)
   506  
   507  	case ONEW:
   508  		e.spill(k, n)
   509  
   510  	case OMAKESLICE:
   511  		e.spill(k, n)
   512  		e.discard(n.Left)
   513  		e.discard(n.Right)
   514  	case OMAKECHAN:
   515  		e.discard(n.Left)
   516  	case OMAKEMAP:
   517  		e.spill(k, n)
   518  		e.discard(n.Left)
   519  
   520  	case ORECOVER:
   521  		// nop
   522  
   523  	case OCALLPART:
   524  		e.spill(k, n)
   525  
   526  		// TODO(mdempsky): We can do better here. See #27557.
   527  		e.assignHeap(n.Left, "call part", n)
   528  
   529  	case OPTRLIT:
   530  		e.expr(e.spill(k, n), n.Left)
   531  
   532  	case OARRAYLIT:
   533  		for _, elt := range n.List.Slice() {
   534  			if elt.Op == OKEY {
   535  				elt = elt.Right
   536  			}
   537  			e.expr(k.note(n, "array literal element"), elt)
   538  		}
   539  
   540  	case OSLICELIT:
   541  		k = e.spill(k, n)
   542  
   543  		for _, elt := range n.List.Slice() {
   544  			if elt.Op == OKEY {
   545  				elt = elt.Right
   546  			}
   547  			e.expr(k.note(n, "slice-literal-element"), elt)
   548  		}
   549  
   550  	case OSTRUCTLIT:
   551  		for _, elt := range n.List.Slice() {
   552  			e.expr(k.note(n, "struct literal element"), elt.Left)
   553  		}
   554  
   555  	case OMAPLIT:
   556  		e.spill(k, n)
   557  
   558  		// Map keys and values are always stored in the heap.
   559  		for _, elt := range n.List.Slice() {
   560  			e.assignHeap(elt.Left, "map literal key", n)
   561  			e.assignHeap(elt.Right, "map literal value", n)
   562  		}
   563  
   564  	case OCLOSURE:
   565  		k = e.spill(k, n)
   566  
   567  		// Link addresses of captured variables to closure.
   568  		for _, v := range n.Func.Closure.Func.Cvars.Slice() {
   569  			if v.Op == OXXX { // unnamed out argument; see dcl.go:/^funcargs
   570  				continue
   571  			}
   572  
   573  			k := k
   574  			if !v.Name.Byval() {
   575  				k = k.addr(v, "reference")
   576  			}
   577  
   578  			e.expr(k.note(n, "captured by a closure"), v.Name.Defn)
   579  		}
   580  
   581  	case ORUNES2STR, OBYTES2STR, OSTR2RUNES, OSTR2BYTES, ORUNESTR:
   582  		e.spill(k, n)
   583  		e.discard(n.Left)
   584  
   585  	case OADDSTR:
   586  		e.spill(k, n)
   587  
   588  		// Arguments of OADDSTR never escape;
   589  		// runtime.concatstrings makes sure of that.
   590  		e.discards(n.List)
   591  	}
   592  }
   593  
   594  // unsafeValue evaluates a uintptr-typed arithmetic expression looking
   595  // for conversions from an unsafe.Pointer.
   596  func (e *Escape) unsafeValue(k EscHole, n *Node) {
   597  	if n.Type.Etype != TUINTPTR {
   598  		Fatalf("unexpected type %v for %v", n.Type, n)
   599  	}
   600  
   601  	e.stmts(n.Ninit)
   602  
   603  	switch n.Op {
   604  	case OCONV, OCONVNOP:
   605  		if n.Left.Type.Etype == TUNSAFEPTR {
   606  			e.expr(k, n.Left)
   607  		} else {
   608  			e.discard(n.Left)
   609  		}
   610  	case ODOTPTR:
   611  		if isReflectHeaderDataField(n) {
   612  			e.expr(k.deref(n, "reflect.Header.Data"), n.Left)
   613  		} else {
   614  			e.discard(n.Left)
   615  		}
   616  	case OPLUS, ONEG, OBITNOT:
   617  		e.unsafeValue(k, n.Left)
   618  	case OADD, OSUB, OOR, OXOR, OMUL, ODIV, OMOD, OAND, OANDNOT:
   619  		e.unsafeValue(k, n.Left)
   620  		e.unsafeValue(k, n.Right)
   621  	case OLSH, ORSH:
   622  		e.unsafeValue(k, n.Left)
   623  		// RHS need not be uintptr-typed (#32959) and can't meaningfully
   624  		// flow pointers anyway.
   625  		e.discard(n.Right)
   626  	default:
   627  		e.exprSkipInit(e.discardHole(), n)
   628  	}
   629  }
   630  
   631  // discard evaluates an expression n for side-effects, but discards
   632  // its value.
   633  func (e *Escape) discard(n *Node) {
   634  	e.expr(e.discardHole(), n)
   635  }
   636  
   637  func (e *Escape) discards(l Nodes) {
   638  	for _, n := range l.Slice() {
   639  		e.discard(n)
   640  	}
   641  }
   642  
   643  // addr evaluates an addressable expression n and returns an EscHole
   644  // that represents storing into the represented location.
   645  func (e *Escape) addr(n *Node) EscHole {
   646  	if n == nil || n.isBlank() {
   647  		// Can happen at least in OSELRECV.
   648  		// TODO(mdempsky): Anywhere else?
   649  		return e.discardHole()
   650  	}
   651  
   652  	k := e.heapHole()
   653  
   654  	switch n.Op {
   655  	default:
   656  		Fatalf("unexpected addr: %v", n)
   657  	case ONAME:
   658  		if n.Class() == PEXTERN {
   659  			break
   660  		}
   661  		k = e.oldLoc(n).asHole()
   662  	case ODOT:
   663  		k = e.addr(n.Left)
   664  	case OINDEX:
   665  		e.discard(n.Right)
   666  		if n.Left.Type.IsArray() {
   667  			k = e.addr(n.Left)
   668  		} else {
   669  			e.discard(n.Left)
   670  		}
   671  	case ODEREF, ODOTPTR:
   672  		e.discard(n)
   673  	case OINDEXMAP:
   674  		e.discard(n.Left)
   675  		e.assignHeap(n.Right, "key of map put", n)
   676  	}
   677  
   678  	if !types.Haspointers(n.Type) {
   679  		k = e.discardHole()
   680  	}
   681  
   682  	return k
   683  }
   684  
   685  func (e *Escape) addrs(l Nodes) []EscHole {
   686  	var ks []EscHole
   687  	for _, n := range l.Slice() {
   688  		ks = append(ks, e.addr(n))
   689  	}
   690  	return ks
   691  }
   692  
   693  // assign evaluates the assignment dst = src.
   694  func (e *Escape) assign(dst, src *Node, why string, where *Node) {
   695  	// Filter out some no-op assignments for escape analysis.
   696  	ignore := dst != nil && src != nil && isSelfAssign(dst, src)
   697  	if ignore && Debug['m'] != 0 {
   698  		Warnl(where.Pos, "%v ignoring self-assignment in %S", funcSym(e.curfn), where)
   699  	}
   700  
   701  	k := e.addr(dst)
   702  	if dst != nil && dst.Op == ODOTPTR && isReflectHeaderDataField(dst) {
   703  		e.unsafeValue(e.heapHole().note(where, why), src)
   704  	} else {
   705  		if ignore {
   706  			k = e.discardHole()
   707  		}
   708  		e.expr(k.note(where, why), src)
   709  	}
   710  }
   711  
   712  func (e *Escape) assignHeap(src *Node, why string, where *Node) {
   713  	e.expr(e.heapHole().note(where, why), src)
   714  }
   715  
   716  // call evaluates a call expressions, including builtin calls. ks
   717  // should contain the holes representing where the function callee's
   718  // results flows; where is the OGO/ODEFER context of the call, if any.
   719  func (e *Escape) call(ks []EscHole, call, where *Node) {
   720  	// First, pick out the function callee, its type, and receiver
   721  	// (if any) and normal arguments list.
   722  	var fn, recv *Node
   723  	var fntype *types.Type
   724  	args := call.List.Slice()
   725  	switch call.Op {
   726  	case OCALLFUNC:
   727  		fn = call.Left
   728  		if fn.Op == OCLOSURE {
   729  			fn = fn.Func.Closure.Func.Nname
   730  		}
   731  		fntype = fn.Type
   732  	case OCALLMETH:
   733  		fn = asNode(call.Left.Type.FuncType().Nname)
   734  		fntype = fn.Type
   735  		recv = call.Left.Left
   736  	case OCALLINTER:
   737  		fntype = call.Left.Type
   738  		recv = call.Left.Left
   739  	case OAPPEND, ODELETE, OPRINT, OPRINTN, ORECOVER:
   740  		// ok
   741  	case OLEN, OCAP, OREAL, OIMAG, OCLOSE, OPANIC:
   742  		args = []*Node{call.Left}
   743  	case OCOMPLEX, OCOPY:
   744  		args = []*Node{call.Left, call.Right}
   745  	default:
   746  		Fatalf("unexpected call op: %v", call.Op)
   747  	}
   748  
   749  	static := fn != nil && fn.Op == ONAME && fn.Class() == PFUNC
   750  
   751  	// Setup evaluation holes for each receiver/argument.
   752  	var recvK EscHole
   753  	var paramKs []EscHole
   754  
   755  	if static && fn.Name.Defn != nil && fn.Name.Defn.Esc < EscFuncTagged {
   756  		// Static call to function in same mutually recursive
   757  		// group; incorporate into data flow graph.
   758  
   759  		if fn.Name.Defn.Esc == EscFuncUnknown {
   760  			Fatalf("graph inconsistency")
   761  		}
   762  
   763  		if ks != nil {
   764  			for i, result := range fntype.Results().FieldSlice() {
   765  				e.expr(ks[i], asNode(result.Nname))
   766  			}
   767  		}
   768  
   769  		if r := fntype.Recv(); r != nil {
   770  			recvK = e.addr(asNode(r.Nname))
   771  		}
   772  		for _, param := range fntype.Params().FieldSlice() {
   773  			paramKs = append(paramKs, e.addr(asNode(param.Nname)))
   774  		}
   775  	} else if call.Op == OCALLFUNC || call.Op == OCALLMETH || call.Op == OCALLINTER {
   776  		// Dynamic call, or call to previously tagged
   777  		// function. Setup flows to heap and/or ks according
   778  		// to parameter tags.
   779  		if r := fntype.Recv(); r != nil {
   780  			recvK = e.tagHole(ks, r, static)
   781  		}
   782  		for _, param := range fntype.Params().FieldSlice() {
   783  			paramKs = append(paramKs, e.tagHole(ks, param, static))
   784  		}
   785  	} else {
   786  		// Handle escape analysis for builtins.
   787  		// By default, we just discard everything.
   788  		for range args {
   789  			paramKs = append(paramKs, e.discardHole())
   790  		}
   791  
   792  		switch call.Op {
   793  		case OAPPEND:
   794  			// Appendee slice may flow directly to the
   795  			// result, if it has enough capacity.
   796  			// Alternatively, a new heap slice might be
   797  			// allocated, and all slice elements might
   798  			// flow to heap.
   799  			paramKs[0] = e.teeHole(paramKs[0], ks[0])
   800  			if types.Haspointers(args[0].Type.Elem()) {
   801  				paramKs[0] = e.teeHole(paramKs[0], e.heapHole().deref(call, "appendee slice"))
   802  			}
   803  
   804  			if call.IsDDD() {
   805  				if args[1].Type.IsSlice() && types.Haspointers(args[1].Type.Elem()) {
   806  					paramKs[1] = e.teeHole(paramKs[1], e.heapHole().deref(call, "appended slice..."))
   807  				}
   808  			} else {
   809  				for i := 1; i < len(args); i++ {
   810  					paramKs[i] = e.heapHole()
   811  				}
   812  			}
   813  
   814  		case OCOPY:
   815  			if call.Right.Type.IsSlice() && types.Haspointers(call.Right.Type.Elem()) {
   816  				paramKs[1] = e.teeHole(paramKs[1], e.heapHole().deref(call, "copied slice"))
   817  			}
   818  
   819  		case OPANIC:
   820  			paramKs[0] = e.heapHole()
   821  		}
   822  	}
   823  
   824  	if call.Op == OCALLFUNC {
   825  		// Evaluate callee function expression.
   826  		e.expr(e.augmentParamHole(e.discardHole(), call, where), call.Left)
   827  	}
   828  
   829  	if recv != nil {
   830  		// TODO(mdempsky): Handle go:uintptrescapes here too?
   831  		e.expr(e.augmentParamHole(recvK, call, where), recv)
   832  	}
   833  
   834  	// Apply augmentParamHole before ODDDARG so that it affects
   835  	// the implicit slice allocation for variadic calls, if any.
   836  	for i, paramK := range paramKs {
   837  		paramKs[i] = e.augmentParamHole(paramK, call, where)
   838  	}
   839  
   840  	// TODO(mdempsky): Remove after early ddd-ification.
   841  	if fntype != nil && fntype.IsVariadic() && !call.IsDDD() {
   842  		vi := fntype.NumParams() - 1
   843  
   844  		elt := fntype.Params().Field(vi).Type.Elem()
   845  		nva := call.List.Len()
   846  		nva -= vi
   847  
   848  		// Introduce ODDDARG node to represent ... allocation.
   849  		ddd := nodl(call.Pos, ODDDARG, nil, nil)
   850  		ddd.Type = types.NewPtr(types.NewArray(elt, int64(nva)))
   851  		call.Right = ddd
   852  
   853  		dddK := e.spill(paramKs[vi], ddd)
   854  		paramKs = paramKs[:vi]
   855  		for i := 0; i < nva; i++ {
   856  			paramKs = append(paramKs, dddK)
   857  		}
   858  	}
   859  
   860  	for i, arg := range args {
   861  		// For arguments to go:uintptrescapes, peel
   862  		// away an unsafe.Pointer->uintptr conversion,
   863  		// if present.
   864  		if static && arg.Op == OCONVNOP && arg.Type.Etype == TUINTPTR && arg.Left.Type.Etype == TUNSAFEPTR {
   865  			x := i
   866  			if fntype.IsVariadic() && x >= fntype.NumParams() {
   867  				x = fntype.NumParams() - 1
   868  			}
   869  			if fntype.Params().Field(x).Note == uintptrEscapesTag {
   870  				arg = arg.Left
   871  			}
   872  		}
   873  
   874  		// no augmentParamHole here; handled in loop before ODDDARG
   875  		e.expr(paramKs[i], arg)
   876  	}
   877  }
   878  
   879  // augmentParamHole augments parameter holes as necessary for use in
   880  // go/defer statements.
   881  func (e *Escape) augmentParamHole(k EscHole, call, where *Node) EscHole {
   882  	k = k.note(call, "call parameter")
   883  	if where == nil {
   884  		return k
   885  	}
   886  
   887  	// Top level defers arguments don't escape to heap, but they
   888  	// do need to last until end of function. Tee with a
   889  	// non-transient location to avoid arguments from being
   890  	// transiently allocated.
   891  	if where.Op == ODEFER && e.loopDepth == 1 {
   892  		// force stack allocation of defer record, unless open-coded
   893  		// defers are used (see ssa.go)
   894  		where.Esc = EscNever
   895  		return e.later(k)
   896  	}
   897  
   898  	return e.heapHole().note(where, "call parameter")
   899  }
   900  
   901  // tagHole returns a hole for evaluating an argument passed to param.
   902  // ks should contain the holes representing where the function
   903  // callee's results flows; static indicates whether this is a static
   904  // call.
   905  func (e *Escape) tagHole(ks []EscHole, param *types.Field, static bool) EscHole {
   906  	// If this is a dynamic call, we can't rely on param.Note.
   907  	if !static {
   908  		return e.heapHole()
   909  	}
   910  
   911  	var tagKs []EscHole
   912  
   913  	esc := ParseLeaks(param.Note)
   914  	if x := esc.Heap(); x >= 0 {
   915  		tagKs = append(tagKs, e.heapHole().shift(x))
   916  	}
   917  
   918  	if ks != nil {
   919  		for i := 0; i < numEscResults; i++ {
   920  			if x := esc.Result(i); x >= 0 {
   921  				tagKs = append(tagKs, ks[i].shift(x))
   922  			}
   923  		}
   924  	}
   925  
   926  	return e.teeHole(tagKs...)
   927  }
   928  
   929  // An EscHole represents a context for evaluation a Go
   930  // expression. E.g., when evaluating p in "x = **p", we'd have a hole
   931  // with dst==x and derefs==2.
   932  type EscHole struct {
   933  	dst    *EscLocation
   934  	derefs int // >= -1
   935  	notes  *EscNote
   936  }
   937  
   938  type EscNote struct {
   939  	next  *EscNote
   940  	where *Node
   941  	why   string
   942  }
   943  
   944  func (k EscHole) note(where *Node, why string) EscHole {
   945  	if where == nil || why == "" {
   946  		Fatalf("note: missing where/why")
   947  	}
   948  	if Debug['m'] >= 2 {
   949  		k.notes = &EscNote{
   950  			next:  k.notes,
   951  			where: where,
   952  			why:   why,
   953  		}
   954  	}
   955  	return k
   956  }
   957  
   958  func (k EscHole) shift(delta int) EscHole {
   959  	k.derefs += delta
   960  	if k.derefs < -1 {
   961  		Fatalf("derefs underflow: %v", k.derefs)
   962  	}
   963  	return k
   964  }
   965  
   966  func (k EscHole) deref(where *Node, why string) EscHole { return k.shift(1).note(where, why) }
   967  func (k EscHole) addr(where *Node, why string) EscHole  { return k.shift(-1).note(where, why) }
   968  
   969  func (k EscHole) dotType(t *types.Type, where *Node, why string) EscHole {
   970  	if !t.IsInterface() && !isdirectiface(t) {
   971  		k = k.shift(1)
   972  	}
   973  	return k.note(where, why)
   974  }
   975  
   976  // teeHole returns a new hole that flows into each hole of ks,
   977  // similar to the Unix tee(1) command.
   978  func (e *Escape) teeHole(ks ...EscHole) EscHole {
   979  	if len(ks) == 0 {
   980  		return e.discardHole()
   981  	}
   982  	if len(ks) == 1 {
   983  		return ks[0]
   984  	}
   985  	// TODO(mdempsky): Optimize if there's only one non-discard hole?
   986  
   987  	// Given holes "l1 = _", "l2 = **_", "l3 = *_", ..., create a
   988  	// new temporary location ltmp, wire it into place, and return
   989  	// a hole for "ltmp = _".
   990  	loc := e.newLoc(nil, true)
   991  	for _, k := range ks {
   992  		// N.B., "p = &q" and "p = &tmp; tmp = q" are not
   993  		// semantically equivalent. To combine holes like "l1
   994  		// = _" and "l2 = &_", we'd need to wire them as "l1 =
   995  		// *ltmp" and "l2 = ltmp" and return "ltmp = &_"
   996  		// instead.
   997  		if k.derefs < 0 {
   998  			Fatalf("teeHole: negative derefs")
   999  		}
  1000  
  1001  		e.flow(k, loc)
  1002  	}
  1003  	return loc.asHole()
  1004  }
  1005  
  1006  func (e *Escape) dcl(n *Node) EscHole {
  1007  	loc := e.oldLoc(n)
  1008  	loc.loopDepth = e.loopDepth
  1009  	return loc.asHole()
  1010  }
  1011  
  1012  // spill allocates a new location associated with expression n, flows
  1013  // its address to k, and returns a hole that flows values to it. It's
  1014  // intended for use with most expressions that allocate storage.
  1015  func (e *Escape) spill(k EscHole, n *Node) EscHole {
  1016  	loc := e.newLoc(n, true)
  1017  	e.flow(k.addr(n, "spill"), loc)
  1018  	return loc.asHole()
  1019  }
  1020  
  1021  // later returns a new hole that flows into k, but some time later.
  1022  // Its main effect is to prevent immediate reuse of temporary
  1023  // variables introduced during Order.
  1024  func (e *Escape) later(k EscHole) EscHole {
  1025  	loc := e.newLoc(nil, false)
  1026  	e.flow(k, loc)
  1027  	return loc.asHole()
  1028  }
  1029  
  1030  // canonicalNode returns the canonical *Node that n logically
  1031  // represents.
  1032  func canonicalNode(n *Node) *Node {
  1033  	if n != nil && n.Op == ONAME && n.Name.IsClosureVar() {
  1034  		n = n.Name.Defn
  1035  		if n.Name.IsClosureVar() {
  1036  			Fatalf("still closure var")
  1037  		}
  1038  	}
  1039  
  1040  	return n
  1041  }
  1042  
  1043  func (e *Escape) newLoc(n *Node, transient bool) *EscLocation {
  1044  	if e.curfn == nil {
  1045  		Fatalf("e.curfn isn't set")
  1046  	}
  1047  
  1048  	n = canonicalNode(n)
  1049  	loc := &EscLocation{
  1050  		n:         n,
  1051  		curfn:     e.curfn,
  1052  		loopDepth: e.loopDepth,
  1053  		transient: transient,
  1054  	}
  1055  	e.allLocs = append(e.allLocs, loc)
  1056  	if n != nil {
  1057  		if n.Op == ONAME && n.Name.Curfn != e.curfn {
  1058  			Fatalf("curfn mismatch: %v != %v", n.Name.Curfn, e.curfn)
  1059  		}
  1060  
  1061  		if n.HasOpt() {
  1062  			Fatalf("%v already has a location", n)
  1063  		}
  1064  		n.SetOpt(loc)
  1065  
  1066  		if mustHeapAlloc(n) {
  1067  			why := "too large for stack"
  1068  			if n.Op == OMAKESLICE && (!Isconst(n.Left, CTINT) || !Isconst(n.Right, CTINT)) {
  1069  				why = "non-constant size"
  1070  			}
  1071  			e.flow(e.heapHole().addr(n, why), loc)
  1072  		}
  1073  	}
  1074  	return loc
  1075  }
  1076  
  1077  func (e *Escape) oldLoc(n *Node) *EscLocation {
  1078  	n = canonicalNode(n)
  1079  	return n.Opt().(*EscLocation)
  1080  }
  1081  
  1082  func (l *EscLocation) asHole() EscHole {
  1083  	return EscHole{dst: l}
  1084  }
  1085  
  1086  func (e *Escape) flow(k EscHole, src *EscLocation) {
  1087  	dst := k.dst
  1088  	if dst == &e.blankLoc {
  1089  		return
  1090  	}
  1091  	if dst == src && k.derefs >= 0 { // dst = dst, dst = *dst, ...
  1092  		return
  1093  	}
  1094  	if dst.escapes && k.derefs < 0 { // dst = &src
  1095  		if Debug['m'] >= 2 {
  1096  			pos := linestr(src.n.Pos)
  1097  			fmt.Printf("%s: %v escapes to heap:\n", pos, src.n)
  1098  			e.explainFlow(pos, dst, src, k.derefs, k.notes)
  1099  		}
  1100  		src.escapes = true
  1101  		return
  1102  	}
  1103  
  1104  	// TODO(mdempsky): Deduplicate edges?
  1105  	dst.edges = append(dst.edges, EscEdge{src: src, derefs: k.derefs, notes: k.notes})
  1106  }
  1107  
  1108  func (e *Escape) heapHole() EscHole    { return e.heapLoc.asHole() }
  1109  func (e *Escape) discardHole() EscHole { return e.blankLoc.asHole() }
  1110  
  1111  // walkAll computes the minimal dereferences between all pairs of
  1112  // locations.
  1113  func (e *Escape) walkAll() {
  1114  	// We use a work queue to keep track of locations that we need
  1115  	// to visit, and repeatedly walk until we reach a fixed point.
  1116  	//
  1117  	// We walk once from each location (including the heap), and
  1118  	// then re-enqueue each location on its transition from
  1119  	// transient->!transient and !escapes->escapes, which can each
  1120  	// happen at most once. So we take Θ(len(e.allLocs)) walks.
  1121  
  1122  	var todo []*EscLocation // LIFO queue
  1123  	enqueue := func(loc *EscLocation) {
  1124  		if !loc.queued {
  1125  			todo = append(todo, loc)
  1126  			loc.queued = true
  1127  		}
  1128  	}
  1129  
  1130  	for _, loc := range e.allLocs {
  1131  		enqueue(loc)
  1132  	}
  1133  	enqueue(&e.heapLoc)
  1134  
  1135  	var walkgen uint32
  1136  	for len(todo) > 0 {
  1137  		root := todo[len(todo)-1]
  1138  		todo = todo[:len(todo)-1]
  1139  		root.queued = false
  1140  
  1141  		walkgen++
  1142  		e.walkOne(root, walkgen, enqueue)
  1143  	}
  1144  }
  1145  
  1146  // walkOne computes the minimal number of dereferences from root to
  1147  // all other locations.
  1148  func (e *Escape) walkOne(root *EscLocation, walkgen uint32, enqueue func(*EscLocation)) {
  1149  	// The data flow graph has negative edges (from addressing
  1150  	// operations), so we use the Bellman-Ford algorithm. However,
  1151  	// we don't have to worry about infinite negative cycles since
  1152  	// we bound intermediate dereference counts to 0.
  1153  
  1154  	root.walkgen = walkgen
  1155  	root.derefs = 0
  1156  	root.dst = nil
  1157  
  1158  	todo := []*EscLocation{root} // LIFO queue
  1159  	for len(todo) > 0 {
  1160  		l := todo[len(todo)-1]
  1161  		todo = todo[:len(todo)-1]
  1162  
  1163  		base := l.derefs
  1164  
  1165  		// If l.derefs < 0, then l's address flows to root.
  1166  		addressOf := base < 0
  1167  		if addressOf {
  1168  			// For a flow path like "root = &l; l = x",
  1169  			// l's address flows to root, but x's does
  1170  			// not. We recognize this by lower bounding
  1171  			// base at 0.
  1172  			base = 0
  1173  
  1174  			// If l's address flows to a non-transient
  1175  			// location, then l can't be transiently
  1176  			// allocated.
  1177  			if !root.transient && l.transient {
  1178  				l.transient = false
  1179  				enqueue(l)
  1180  			}
  1181  		}
  1182  
  1183  		if e.outlives(root, l) {
  1184  			// l's value flows to root. If l is a function
  1185  			// parameter and root is the heap or a
  1186  			// corresponding result parameter, then record
  1187  			// that value flow for tagging the function
  1188  			// later.
  1189  			if l.isName(PPARAM) {
  1190  				if Debug['m'] >= 2 && !l.escapes {
  1191  					fmt.Printf("%s: parameter %v leaks to %s with derefs=%d:\n", linestr(l.n.Pos), l.n, e.explainLoc(root), base)
  1192  					e.explainPath(root, l)
  1193  				}
  1194  				l.leakTo(root, base)
  1195  			}
  1196  
  1197  			// If l's address flows somewhere that
  1198  			// outlives it, then l needs to be heap
  1199  			// allocated.
  1200  			if addressOf && !l.escapes {
  1201  				if Debug['m'] >= 2 {
  1202  					fmt.Printf("%s: %v escapes to heap:\n", linestr(l.n.Pos), l.n)
  1203  					e.explainPath(root, l)
  1204  				}
  1205  				l.escapes = true
  1206  				enqueue(l)
  1207  				continue
  1208  			}
  1209  		}
  1210  
  1211  		for i, edge := range l.edges {
  1212  			if edge.src.escapes {
  1213  				continue
  1214  			}
  1215  			derefs := base + edge.derefs
  1216  			if edge.src.walkgen != walkgen || edge.src.derefs > derefs {
  1217  				edge.src.walkgen = walkgen
  1218  				edge.src.derefs = derefs
  1219  				edge.src.dst = l
  1220  				edge.src.dstEdgeIdx = i
  1221  				todo = append(todo, edge.src)
  1222  			}
  1223  		}
  1224  	}
  1225  }
  1226  
  1227  // explainPath prints an explanation of how src flows to the walk root.
  1228  func (e *Escape) explainPath(root, src *EscLocation) {
  1229  	visited := make(map[*EscLocation]bool)
  1230  
  1231  	pos := linestr(src.n.Pos)
  1232  	for {
  1233  		// Prevent infinite loop.
  1234  		if visited[src] {
  1235  			fmt.Printf("%s:   warning: truncated explanation due to assignment cycle; see golang.org/issue/35518\n", pos)
  1236  			break
  1237  		}
  1238  		visited[src] = true
  1239  
  1240  		dst := src.dst
  1241  		edge := &dst.edges[src.dstEdgeIdx]
  1242  		if edge.src != src {
  1243  			Fatalf("path inconsistency: %v != %v", edge.src, src)
  1244  		}
  1245  
  1246  		e.explainFlow(pos, dst, src, edge.derefs, edge.notes)
  1247  
  1248  		if dst == root {
  1249  			break
  1250  		}
  1251  		src = dst
  1252  	}
  1253  }
  1254  
  1255  func (e *Escape) explainFlow(pos string, dst, src *EscLocation, derefs int, notes *EscNote) {
  1256  	ops := "&"
  1257  	if derefs >= 0 {
  1258  		ops = strings.Repeat("*", derefs)
  1259  	}
  1260  
  1261  	fmt.Printf("%s:   flow: %s = %s%v:\n", pos, e.explainLoc(dst), ops, e.explainLoc(src))
  1262  	for note := notes; note != nil; note = note.next {
  1263  		fmt.Printf("%s:     from %v (%v) at %s\n", pos, note.where, note.why, linestr(note.where.Pos))
  1264  	}
  1265  }
  1266  
  1267  func (e *Escape) explainLoc(l *EscLocation) string {
  1268  	if l == &e.heapLoc {
  1269  		return "{heap}"
  1270  	}
  1271  	if l.n == nil {
  1272  		// TODO(mdempsky): Omit entirely.
  1273  		return "{temp}"
  1274  	}
  1275  	if l.n.Op == ONAME {
  1276  		return fmt.Sprintf("%v", l.n)
  1277  	}
  1278  	return fmt.Sprintf("{storage for %v}", l.n)
  1279  }
  1280  
  1281  // outlives reports whether values stored in l may survive beyond
  1282  // other's lifetime if stack allocated.
  1283  func (e *Escape) outlives(l, other *EscLocation) bool {
  1284  	// The heap outlives everything.
  1285  	if l.escapes {
  1286  		return true
  1287  	}
  1288  
  1289  	// We don't know what callers do with returned values, so
  1290  	// pessimistically we need to assume they flow to the heap and
  1291  	// outlive everything too.
  1292  	if l.isName(PPARAMOUT) {
  1293  		// Exception: Directly called closures can return
  1294  		// locations allocated outside of them without forcing
  1295  		// them to the heap. For example:
  1296  		//
  1297  		//    var u int  // okay to stack allocate
  1298  		//    *(func() *int { return &u }()) = 42
  1299  		if containsClosure(other.curfn, l.curfn) && l.curfn.Func.Closure.Func.Top&ctxCallee != 0 {
  1300  			return false
  1301  		}
  1302  
  1303  		return true
  1304  	}
  1305  
  1306  	// If l and other are within the same function, then l
  1307  	// outlives other if it was declared outside other's loop
  1308  	// scope. For example:
  1309  	//
  1310  	//    var l *int
  1311  	//    for {
  1312  	//        l = new(int)
  1313  	//    }
  1314  	if l.curfn == other.curfn && l.loopDepth < other.loopDepth {
  1315  		return true
  1316  	}
  1317  
  1318  	// If other is declared within a child closure of where l is
  1319  	// declared, then l outlives it. For example:
  1320  	//
  1321  	//    var l *int
  1322  	//    func() {
  1323  	//        l = new(int)
  1324  	//    }
  1325  	if containsClosure(l.curfn, other.curfn) {
  1326  		return true
  1327  	}
  1328  
  1329  	return false
  1330  }
  1331  
  1332  // containsClosure reports whether c is a closure contained within f.
  1333  func containsClosure(f, c *Node) bool {
  1334  	if f.Op != ODCLFUNC || c.Op != ODCLFUNC {
  1335  		Fatalf("bad containsClosure: %v, %v", f, c)
  1336  	}
  1337  
  1338  	// Common case.
  1339  	if f == c {
  1340  		return false
  1341  	}
  1342  
  1343  	// Closures within function Foo are named like "Foo.funcN..."
  1344  	// TODO(mdempsky): Better way to recognize this.
  1345  	fn := f.Func.Nname.Sym.Name
  1346  	cn := c.Func.Nname.Sym.Name
  1347  	return len(cn) > len(fn) && cn[:len(fn)] == fn && cn[len(fn)] == '.'
  1348  }
  1349  
  1350  // leak records that parameter l leaks to sink.
  1351  func (l *EscLocation) leakTo(sink *EscLocation, derefs int) {
  1352  	// If sink is a result parameter and we can fit return bits
  1353  	// into the escape analysis tag, then record a return leak.
  1354  	if sink.isName(PPARAMOUT) && sink.curfn == l.curfn {
  1355  		// TODO(mdempsky): Eliminate dependency on Vargen here.
  1356  		ri := int(sink.n.Name.Vargen) - 1
  1357  		if ri < numEscResults {
  1358  			// Leak to result parameter.
  1359  			l.paramEsc.AddResult(ri, derefs)
  1360  			return
  1361  		}
  1362  	}
  1363  
  1364  	// Otherwise, record as heap leak.
  1365  	l.paramEsc.AddHeap(derefs)
  1366  }
  1367  
  1368  func (e *Escape) finish(fns []*Node) {
  1369  	// Record parameter tags for package export data.
  1370  	for _, fn := range fns {
  1371  		fn.Esc = EscFuncTagged
  1372  
  1373  		narg := 0
  1374  		for _, fs := range types.RecvsParams {
  1375  			for _, f := range fs(fn.Type).Fields().Slice() {
  1376  				narg++
  1377  				f.Note = e.paramTag(fn, narg, f)
  1378  			}
  1379  		}
  1380  	}
  1381  
  1382  	for _, loc := range e.allLocs {
  1383  		n := loc.n
  1384  		if n == nil {
  1385  			continue
  1386  		}
  1387  		n.SetOpt(nil)
  1388  
  1389  		// Update n.Esc based on escape analysis results.
  1390  
  1391  		if loc.escapes {
  1392  			if n.Op != ONAME {
  1393  				if Debug['m'] != 0 {
  1394  					Warnl(n.Pos, "%S escapes to heap", n)
  1395  				}
  1396  				if logopt.Enabled() {
  1397  					logopt.LogOpt(n.Pos, "escape", "escape", e.curfn.funcname())
  1398  				}
  1399  			}
  1400  			n.Esc = EscHeap
  1401  			addrescapes(n)
  1402  		} else {
  1403  			if Debug['m'] != 0 && n.Op != ONAME {
  1404  				Warnl(n.Pos, "%S does not escape", n)
  1405  			}
  1406  			n.Esc = EscNone
  1407  			if loc.transient {
  1408  				n.SetTransient(true)
  1409  			}
  1410  		}
  1411  	}
  1412  }
  1413  
  1414  func (l *EscLocation) isName(c Class) bool {
  1415  	return l.n != nil && l.n.Op == ONAME && l.n.Class() == c
  1416  }
  1417  
  1418  const numEscResults = 7
  1419  
  1420  // An EscLeaks represents a set of assignment flows from a parameter
  1421  // to the heap or to any of its function's (first numEscResults)
  1422  // result parameters.
  1423  type EscLeaks [1 + numEscResults]uint8
  1424  
  1425  // Empty reports whether l is an empty set (i.e., no assignment flows).
  1426  func (l EscLeaks) Empty() bool { return l == EscLeaks{} }
  1427  
  1428  // Heap returns the minimum deref count of any assignment flow from l
  1429  // to the heap. If no such flows exist, Heap returns -1.
  1430  func (l EscLeaks) Heap() int { return l.get(0) }
  1431  
  1432  // Result returns the minimum deref count of any assignment flow from
  1433  // l to its function's i'th result parameter. If no such flows exist,
  1434  // Result returns -1.
  1435  func (l EscLeaks) Result(i int) int { return l.get(1 + i) }
  1436  
  1437  // AddHeap adds an assignment flow from l to the heap.
  1438  func (l *EscLeaks) AddHeap(derefs int) { l.add(0, derefs) }
  1439  
  1440  // AddResult adds an assignment flow from l to its function's i'th
  1441  // result parameter.
  1442  func (l *EscLeaks) AddResult(i, derefs int) { l.add(1+i, derefs) }
  1443  
  1444  func (l *EscLeaks) setResult(i, derefs int) { l.set(1+i, derefs) }
  1445  
  1446  func (l EscLeaks) get(i int) int { return int(l[i]) - 1 }
  1447  
  1448  func (l *EscLeaks) add(i, derefs int) {
  1449  	if old := l.get(i); old < 0 || derefs < old {
  1450  		l.set(i, derefs)
  1451  	}
  1452  }
  1453  
  1454  func (l *EscLeaks) set(i, derefs int) {
  1455  	v := derefs + 1
  1456  	if v < 0 {
  1457  		Fatalf("invalid derefs count: %v", derefs)
  1458  	}
  1459  	if v > math.MaxUint8 {
  1460  		v = math.MaxUint8
  1461  	}
  1462  
  1463  	l[i] = uint8(v)
  1464  }
  1465  
  1466  // Optimize removes result flow paths that are equal in length or
  1467  // longer than the shortest heap flow path.
  1468  func (l *EscLeaks) Optimize() {
  1469  	// If we have a path to the heap, then there's no use in
  1470  	// keeping equal or longer paths elsewhere.
  1471  	if x := l.Heap(); x >= 0 {
  1472  		for i := 0; i < numEscResults; i++ {
  1473  			if l.Result(i) >= x {
  1474  				l.setResult(i, -1)
  1475  			}
  1476  		}
  1477  	}
  1478  }
  1479  
  1480  var leakTagCache = map[EscLeaks]string{}
  1481  
  1482  // Encode converts l into a binary string for export data.
  1483  func (l EscLeaks) Encode() string {
  1484  	if l.Heap() == 0 {
  1485  		// Space optimization: empty string encodes more
  1486  		// efficiently in export data.
  1487  		return ""
  1488  	}
  1489  	if s, ok := leakTagCache[l]; ok {
  1490  		return s
  1491  	}
  1492  
  1493  	n := len(l)
  1494  	for n > 0 && l[n-1] == 0 {
  1495  		n--
  1496  	}
  1497  	s := "esc:" + string(l[:n])
  1498  	leakTagCache[l] = s
  1499  	return s
  1500  }
  1501  
  1502  // ParseLeaks parses a binary string representing an EscLeaks.
  1503  func ParseLeaks(s string) EscLeaks {
  1504  	var l EscLeaks
  1505  	if !strings.HasPrefix(s, "esc:") {
  1506  		l.AddHeap(0)
  1507  		return l
  1508  	}
  1509  	copy(l[:], s[4:])
  1510  	return l
  1511  }