github.com/Filosottile/go@v0.0.0-20170906193555-dbed9972d994/src/cmd/compile/internal/gc/plive.go (about)

     1  // Copyright 2013 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Garbage collector liveness bitmap generation.
     6  
     7  // The command line flag -live causes this code to print debug information.
     8  // The levels are:
     9  //
    10  //	-live (aka -live=1): print liveness lists as code warnings at safe points
    11  //	-live=2: print an assembly listing with liveness annotations
    12  //
    13  // Each level includes the earlier output as well.
    14  
    15  package gc
    16  
    17  import (
    18  	"cmd/compile/internal/ssa"
    19  	"cmd/compile/internal/types"
    20  	"cmd/internal/obj"
    21  	"cmd/internal/objabi"
    22  	"cmd/internal/src"
    23  	"crypto/md5"
    24  	"crypto/sha1"
    25  	"fmt"
    26  	"os"
    27  	"strings"
    28  )
    29  
    30  // TODO(mdempsky): Update to reference OpVar{Def,Kill,Live} instead.
    31  
    32  // VARDEF is an annotation for the liveness analysis, marking a place
    33  // where a complete initialization (definition) of a variable begins.
    34  // Since the liveness analysis can see initialization of single-word
    35  // variables quite easy, gvardef is usually only called for multi-word
    36  // or 'fat' variables, those satisfying isfat(n->type).
    37  // However, gvardef is also called when a non-fat variable is initialized
    38  // via a block move; the only time this happens is when you have
    39  //	return f()
    40  // for a function with multiple return values exactly matching the return
    41  // types of the current function.
    42  //
    43  // A 'VARDEF x' annotation in the instruction stream tells the liveness
    44  // analysis to behave as though the variable x is being initialized at that
    45  // point in the instruction stream. The VARDEF must appear before the
    46  // actual (multi-instruction) initialization, and it must also appear after
    47  // any uses of the previous value, if any. For example, if compiling:
    48  //
    49  //	x = x[1:]
    50  //
    51  // it is important to generate code like:
    52  //
    53  //	base, len, cap = pieces of x[1:]
    54  //	VARDEF x
    55  //	x = {base, len, cap}
    56  //
    57  // If instead the generated code looked like:
    58  //
    59  //	VARDEF x
    60  //	base, len, cap = pieces of x[1:]
    61  //	x = {base, len, cap}
    62  //
    63  // then the liveness analysis would decide the previous value of x was
    64  // unnecessary even though it is about to be used by the x[1:] computation.
    65  // Similarly, if the generated code looked like:
    66  //
    67  //	base, len, cap = pieces of x[1:]
    68  //	x = {base, len, cap}
    69  //	VARDEF x
    70  //
    71  // then the liveness analysis will not preserve the new value of x, because
    72  // the VARDEF appears to have "overwritten" it.
    73  //
    74  // VARDEF is a bit of a kludge to work around the fact that the instruction
    75  // stream is working on single-word values but the liveness analysis
    76  // wants to work on individual variables, which might be multi-word
    77  // aggregates. It might make sense at some point to look into letting
    78  // the liveness analysis work on single-word values as well, although
    79  // there are complications around interface values, slices, and strings,
    80  // all of which cannot be treated as individual words.
    81  //
    82  // VARKILL is the opposite of VARDEF: it marks a value as no longer needed,
    83  // even if its address has been taken. That is, a VARKILL annotation asserts
    84  // that its argument is certainly dead, for use when the liveness analysis
    85  // would not otherwise be able to deduce that fact.
    86  
    87  // BlockEffects summarizes the liveness effects on an SSA block.
    88  type BlockEffects struct {
    89  	lastbitmapindex int // for livenessepilogue
    90  
    91  	// Computed during livenessprologue using only the content of
    92  	// individual blocks:
    93  	//
    94  	//	uevar: upward exposed variables (used before set in block)
    95  	//	varkill: killed variables (set in block)
    96  	//	avarinit: addrtaken variables set or used (proof of initialization)
    97  	uevar    bvec
    98  	varkill  bvec
    99  	avarinit bvec
   100  
   101  	// Computed during livenesssolve using control flow information:
   102  	//
   103  	//	livein: variables live at block entry
   104  	//	liveout: variables live at block exit
   105  	//	avarinitany: addrtaken variables possibly initialized at block exit
   106  	//		(initialized in block or at exit from any predecessor block)
   107  	//	avarinitall: addrtaken variables certainly initialized at block exit
   108  	//		(initialized in block or at exit from all predecessor blocks)
   109  	livein      bvec
   110  	liveout     bvec
   111  	avarinitany bvec
   112  	avarinitall bvec
   113  }
   114  
   115  // A collection of global state used by liveness analysis.
   116  type Liveness struct {
   117  	fn         *Node
   118  	f          *ssa.Func
   119  	vars       []*Node
   120  	idx        map[*Node]int32
   121  	stkptrsize int64
   122  
   123  	be []BlockEffects
   124  
   125  	// stackMapIndex maps from safe points (i.e., CALLs) to their
   126  	// index within the stack maps.
   127  	stackMapIndex map[*ssa.Value]int
   128  
   129  	// An array with a bit vector for each safe point tracking live variables.
   130  	livevars []bvec
   131  
   132  	cache progeffectscache
   133  }
   134  
   135  type progeffectscache struct {
   136  	textavarinit []int32
   137  	retuevar     []int32
   138  	tailuevar    []int32
   139  	initialized  bool
   140  }
   141  
   142  // livenessShouldTrack reports whether the liveness analysis
   143  // should track the variable n.
   144  // We don't care about variables that have no pointers,
   145  // nor do we care about non-local variables,
   146  // nor do we care about empty structs (handled by the pointer check),
   147  // nor do we care about the fake PAUTOHEAP variables.
   148  func livenessShouldTrack(n *Node) bool {
   149  	return n.Op == ONAME && (n.Class() == PAUTO || n.Class() == PPARAM || n.Class() == PPARAMOUT) && types.Haspointers(n.Type)
   150  }
   151  
   152  // getvariables returns the list of on-stack variables that we need to track
   153  // and a map for looking up indices by *Node.
   154  func getvariables(fn *Node) ([]*Node, map[*Node]int32) {
   155  	var vars []*Node
   156  	for _, n := range fn.Func.Dcl {
   157  		if livenessShouldTrack(n) {
   158  			vars = append(vars, n)
   159  		}
   160  	}
   161  	idx := make(map[*Node]int32, len(vars))
   162  	for i, n := range vars {
   163  		idx[n] = int32(i)
   164  	}
   165  	return vars, idx
   166  }
   167  
   168  func (lv *Liveness) initcache() {
   169  	if lv.cache.initialized {
   170  		Fatalf("liveness cache initialized twice")
   171  		return
   172  	}
   173  	lv.cache.initialized = true
   174  
   175  	for i, node := range lv.vars {
   176  		switch node.Class() {
   177  		case PPARAM:
   178  			// A return instruction with a p.to is a tail return, which brings
   179  			// the stack pointer back up (if it ever went down) and then jumps
   180  			// to a new function entirely. That form of instruction must read
   181  			// all the parameters for correctness, and similarly it must not
   182  			// read the out arguments - they won't be set until the new
   183  			// function runs.
   184  
   185  			lv.cache.tailuevar = append(lv.cache.tailuevar, int32(i))
   186  
   187  			if node.Addrtaken() {
   188  				lv.cache.textavarinit = append(lv.cache.textavarinit, int32(i))
   189  			}
   190  
   191  		case PPARAMOUT:
   192  			// If the result had its address taken, it is being tracked
   193  			// by the avarinit code, which does not use uevar.
   194  			// If we added it to uevar too, we'd not see any kill
   195  			// and decide that the variable was live entry, which it is not.
   196  			// So only use uevar in the non-addrtaken case.
   197  			// The p.to.type == obj.TYPE_NONE limits the bvset to
   198  			// non-tail-call return instructions; see note below for details.
   199  			if !node.Addrtaken() {
   200  				lv.cache.retuevar = append(lv.cache.retuevar, int32(i))
   201  			}
   202  		}
   203  	}
   204  }
   205  
   206  // A liveEffect is a set of flags that describe an instruction's
   207  // liveness effects on a variable.
   208  //
   209  // The possible flags are:
   210  //	uevar - used by the instruction
   211  //	varkill - killed by the instruction
   212  //		for variables without address taken, means variable was set
   213  //		for variables with address taken, means variable was marked dead
   214  //	avarinit - initialized or referred to by the instruction,
   215  //		only for variables with address taken but not escaping to heap
   216  //
   217  // The avarinit output serves as a signal that the data has been
   218  // initialized, because any use of a variable must come after its
   219  // initialization.
   220  type liveEffect int
   221  
   222  const (
   223  	uevar liveEffect = 1 << iota
   224  	varkill
   225  	avarinit
   226  )
   227  
   228  // valueEffects returns the index of a variable in lv.vars and the
   229  // liveness effects v has on that variable.
   230  // If v does not affect any tracked variables, it returns -1, 0.
   231  func (lv *Liveness) valueEffects(v *ssa.Value) (int32, liveEffect) {
   232  	n, e := affectedNode(v)
   233  	if e == 0 || n == nil || n.Op != ONAME { // cheapest checks first
   234  		return -1, 0
   235  	}
   236  
   237  	// AllocFrame has dropped unused variables from
   238  	// lv.fn.Func.Dcl, but they might still be referenced by
   239  	// OpVarFoo pseudo-ops. Ignore them to prevent "lost track of
   240  	// variable" ICEs (issue 19632).
   241  	switch v.Op {
   242  	case ssa.OpVarDef, ssa.OpVarKill, ssa.OpVarLive, ssa.OpKeepAlive:
   243  		if !n.Name.Used() {
   244  			return -1, 0
   245  		}
   246  	}
   247  
   248  	var effect liveEffect
   249  	if n.Addrtaken() {
   250  		if v.Op != ssa.OpVarKill {
   251  			effect |= avarinit
   252  		}
   253  		if v.Op == ssa.OpVarDef || v.Op == ssa.OpVarKill {
   254  			effect |= varkill
   255  		}
   256  	} else {
   257  		// Read is a read, obviously.
   258  		// Addr by itself is also implicitly a read.
   259  		//
   260  		// Addr|Write means that the address is being taken
   261  		// but only so that the instruction can write to the value.
   262  		// It is not a read.
   263  
   264  		if e&ssa.SymRead != 0 || e&(ssa.SymAddr|ssa.SymWrite) == ssa.SymAddr {
   265  			effect |= uevar
   266  		}
   267  		if e&ssa.SymWrite != 0 && (!isfat(n.Type) || v.Op == ssa.OpVarDef) {
   268  			effect |= varkill
   269  		}
   270  	}
   271  
   272  	if effect == 0 {
   273  		return -1, 0
   274  	}
   275  
   276  	if pos, ok := lv.idx[n]; ok {
   277  		return pos, effect
   278  	}
   279  	return -1, 0
   280  }
   281  
   282  // affectedNode returns the *Node affected by v
   283  func affectedNode(v *ssa.Value) (*Node, ssa.SymEffect) {
   284  	// Special cases.
   285  	switch v.Op {
   286  	case ssa.OpLoadReg:
   287  		n, _ := AutoVar(v.Args[0])
   288  		return n, ssa.SymRead
   289  	case ssa.OpStoreReg:
   290  		n, _ := AutoVar(v)
   291  		return n, ssa.SymWrite
   292  
   293  	case ssa.OpVarLive:
   294  		switch a := v.Aux.(type) {
   295  		case *ssa.ArgSymbol:
   296  			return a.Node.(*Node), ssa.SymRead
   297  		case *ssa.AutoSymbol:
   298  			return a.Node.(*Node), ssa.SymRead
   299  		default:
   300  			Fatalf("unknown VarLive aux type: %s", v.LongString())
   301  		}
   302  	case ssa.OpVarDef, ssa.OpVarKill:
   303  		return v.Aux.(*Node), ssa.SymWrite
   304  	case ssa.OpKeepAlive:
   305  		n, _ := AutoVar(v.Args[0])
   306  		return n, ssa.SymRead
   307  	}
   308  
   309  	e := v.Op.SymEffect()
   310  	if e == 0 {
   311  		return nil, 0
   312  	}
   313  
   314  	var n *Node
   315  	switch a := v.Aux.(type) {
   316  	case nil, *ssa.ExternSymbol:
   317  		// ok, but no node
   318  	case *ssa.ArgSymbol:
   319  		n = a.Node.(*Node)
   320  	case *ssa.AutoSymbol:
   321  		n = a.Node.(*Node)
   322  	default:
   323  		Fatalf("weird aux: %s", v.LongString())
   324  	}
   325  
   326  	return n, e
   327  }
   328  
   329  // Constructs a new liveness structure used to hold the global state of the
   330  // liveness computation. The cfg argument is a slice of *BasicBlocks and the
   331  // vars argument is a slice of *Nodes.
   332  func newliveness(fn *Node, f *ssa.Func, vars []*Node, idx map[*Node]int32, stkptrsize int64) *Liveness {
   333  	lv := &Liveness{
   334  		fn:         fn,
   335  		f:          f,
   336  		vars:       vars,
   337  		idx:        idx,
   338  		stkptrsize: stkptrsize,
   339  		be:         make([]BlockEffects, f.NumBlocks()),
   340  	}
   341  
   342  	nblocks := int32(len(f.Blocks))
   343  	nvars := int32(len(vars))
   344  	bulk := bvbulkalloc(nvars, nblocks*7)
   345  	for _, b := range f.Blocks {
   346  		be := lv.blockEffects(b)
   347  
   348  		be.uevar = bulk.next()
   349  		be.varkill = bulk.next()
   350  		be.livein = bulk.next()
   351  		be.liveout = bulk.next()
   352  		be.avarinit = bulk.next()
   353  		be.avarinitany = bulk.next()
   354  		be.avarinitall = bulk.next()
   355  	}
   356  	return lv
   357  }
   358  
   359  func (lv *Liveness) blockEffects(b *ssa.Block) *BlockEffects {
   360  	return &lv.be[b.ID]
   361  }
   362  
   363  // NOTE: The bitmap for a specific type t should be cached in t after the first run
   364  // and then simply copied into bv at the correct offset on future calls with
   365  // the same type t. On https://rsc.googlecode.com/hg/testdata/slow.go, onebitwalktype1
   366  // accounts for 40% of the 6g execution time.
   367  func onebitwalktype1(t *types.Type, xoffset *int64, bv bvec) {
   368  	if t.Align > 0 && *xoffset&int64(t.Align-1) != 0 {
   369  		Fatalf("onebitwalktype1: invalid initial alignment, %v", t)
   370  	}
   371  
   372  	switch t.Etype {
   373  	case TINT8,
   374  		TUINT8,
   375  		TINT16,
   376  		TUINT16,
   377  		TINT32,
   378  		TUINT32,
   379  		TINT64,
   380  		TUINT64,
   381  		TINT,
   382  		TUINT,
   383  		TUINTPTR,
   384  		TBOOL,
   385  		TFLOAT32,
   386  		TFLOAT64,
   387  		TCOMPLEX64,
   388  		TCOMPLEX128:
   389  		*xoffset += t.Width
   390  
   391  	case TPTR32,
   392  		TPTR64,
   393  		TUNSAFEPTR,
   394  		TFUNC,
   395  		TCHAN,
   396  		TMAP:
   397  		if *xoffset&int64(Widthptr-1) != 0 {
   398  			Fatalf("onebitwalktype1: invalid alignment, %v", t)
   399  		}
   400  		bv.Set(int32(*xoffset / int64(Widthptr))) // pointer
   401  		*xoffset += t.Width
   402  
   403  	case TSTRING:
   404  		// struct { byte *str; intgo len; }
   405  		if *xoffset&int64(Widthptr-1) != 0 {
   406  			Fatalf("onebitwalktype1: invalid alignment, %v", t)
   407  		}
   408  		bv.Set(int32(*xoffset / int64(Widthptr))) //pointer in first slot
   409  		*xoffset += t.Width
   410  
   411  	case TINTER:
   412  		// struct { Itab *tab;	void *data; }
   413  		// or, when isnilinter(t)==true:
   414  		// struct { Type *type; void *data; }
   415  		if *xoffset&int64(Widthptr-1) != 0 {
   416  			Fatalf("onebitwalktype1: invalid alignment, %v", t)
   417  		}
   418  		bv.Set(int32(*xoffset / int64(Widthptr)))   // pointer in first slot
   419  		bv.Set(int32(*xoffset/int64(Widthptr) + 1)) // pointer in second slot
   420  		*xoffset += t.Width
   421  
   422  	case TSLICE:
   423  		// struct { byte *array; uintgo len; uintgo cap; }
   424  		if *xoffset&int64(Widthptr-1) != 0 {
   425  			Fatalf("onebitwalktype1: invalid TARRAY alignment, %v", t)
   426  		}
   427  		bv.Set(int32(*xoffset / int64(Widthptr))) // pointer in first slot (BitsPointer)
   428  		*xoffset += t.Width
   429  
   430  	case TARRAY:
   431  		for i := int64(0); i < t.NumElem(); i++ {
   432  			onebitwalktype1(t.Elem(), xoffset, bv)
   433  		}
   434  
   435  	case TSTRUCT:
   436  		var o int64
   437  		for _, t1 := range t.Fields().Slice() {
   438  			fieldoffset := t1.Offset
   439  			*xoffset += fieldoffset - o
   440  			onebitwalktype1(t1.Type, xoffset, bv)
   441  			o = fieldoffset + t1.Type.Width
   442  		}
   443  
   444  		*xoffset += t.Width - o
   445  
   446  	default:
   447  		Fatalf("onebitwalktype1: unexpected type, %v", t)
   448  	}
   449  }
   450  
   451  // Returns the number of words of local variables.
   452  func localswords(lv *Liveness) int32 {
   453  	return int32(lv.stkptrsize / int64(Widthptr))
   454  }
   455  
   456  // Returns the number of words of in and out arguments.
   457  func argswords(lv *Liveness) int32 {
   458  	return int32(lv.fn.Type.ArgWidth() / int64(Widthptr))
   459  }
   460  
   461  // Generates live pointer value maps for arguments and local variables. The
   462  // this argument and the in arguments are always assumed live. The vars
   463  // argument is a slice of *Nodes.
   464  func onebitlivepointermap(lv *Liveness, liveout bvec, vars []*Node, args bvec, locals bvec) {
   465  	var xoffset int64
   466  
   467  	for i := int32(0); ; i++ {
   468  		i = liveout.Next(i)
   469  		if i < 0 {
   470  			break
   471  		}
   472  		node := vars[i]
   473  		switch node.Class() {
   474  		case PAUTO:
   475  			xoffset = node.Xoffset + lv.stkptrsize
   476  			onebitwalktype1(node.Type, &xoffset, locals)
   477  
   478  		case PPARAM, PPARAMOUT:
   479  			xoffset = node.Xoffset
   480  			onebitwalktype1(node.Type, &xoffset, args)
   481  		}
   482  	}
   483  }
   484  
   485  // Returns true for instructions that are safe points that must be annotated
   486  // with liveness information.
   487  func issafepoint(v *ssa.Value) bool {
   488  	return v.Op.IsCall()
   489  }
   490  
   491  // Initializes the sets for solving the live variables. Visits all the
   492  // instructions in each basic block to summarizes the information at each basic
   493  // block
   494  func livenessprologue(lv *Liveness) {
   495  	lv.initcache()
   496  
   497  	for _, b := range lv.f.Blocks {
   498  		be := lv.blockEffects(b)
   499  
   500  		// Walk the block instructions backward and update the block
   501  		// effects with the each prog effects.
   502  		for j := len(b.Values) - 1; j >= 0; j-- {
   503  			pos, e := lv.valueEffects(b.Values[j])
   504  			if e&varkill != 0 {
   505  				be.varkill.Set(pos)
   506  				be.uevar.Unset(pos)
   507  			}
   508  			if e&uevar != 0 {
   509  				be.uevar.Set(pos)
   510  			}
   511  		}
   512  
   513  		// Walk the block instructions forward to update avarinit bits.
   514  		// avarinit describes the effect at the end of the block, not the beginning.
   515  		for j := 0; j < len(b.Values); j++ {
   516  			pos, e := lv.valueEffects(b.Values[j])
   517  			if e&varkill != 0 {
   518  				be.avarinit.Unset(pos)
   519  			}
   520  			if e&avarinit != 0 {
   521  				be.avarinit.Set(pos)
   522  			}
   523  		}
   524  	}
   525  }
   526  
   527  // Solve the liveness dataflow equations.
   528  func livenesssolve(lv *Liveness) {
   529  	// These temporary bitvectors exist to avoid successive allocations and
   530  	// frees within the loop.
   531  	newlivein := bvalloc(int32(len(lv.vars)))
   532  	newliveout := bvalloc(int32(len(lv.vars)))
   533  	any := bvalloc(int32(len(lv.vars)))
   534  	all := bvalloc(int32(len(lv.vars)))
   535  
   536  	// Push avarinitall, avarinitany forward.
   537  	// avarinitall says the addressed var is initialized along all paths reaching the block exit.
   538  	// avarinitany says the addressed var is initialized along some path reaching the block exit.
   539  	for _, b := range lv.f.Blocks {
   540  		be := lv.blockEffects(b)
   541  		if b == lv.f.Entry {
   542  			be.avarinitall.Copy(be.avarinit)
   543  		} else {
   544  			be.avarinitall.Clear()
   545  			be.avarinitall.Not()
   546  		}
   547  		be.avarinitany.Copy(be.avarinit)
   548  	}
   549  
   550  	// Walk blocks in the general direction of propagation (RPO
   551  	// for avarinit{any,all}, and PO for live{in,out}). This
   552  	// improves convergence.
   553  	po := lv.f.Postorder()
   554  
   555  	for change := true; change; {
   556  		change = false
   557  		for i := len(po) - 1; i >= 0; i-- {
   558  			b := po[i]
   559  			be := lv.blockEffects(b)
   560  			lv.avarinitanyall(b, any, all)
   561  
   562  			any.AndNot(any, be.varkill)
   563  			all.AndNot(all, be.varkill)
   564  			any.Or(any, be.avarinit)
   565  			all.Or(all, be.avarinit)
   566  			if !any.Eq(be.avarinitany) {
   567  				change = true
   568  				be.avarinitany.Copy(any)
   569  			}
   570  
   571  			if !all.Eq(be.avarinitall) {
   572  				change = true
   573  				be.avarinitall.Copy(all)
   574  			}
   575  		}
   576  	}
   577  
   578  	// Iterate through the blocks in reverse round-robin fashion. A work
   579  	// queue might be slightly faster. As is, the number of iterations is
   580  	// so low that it hardly seems to be worth the complexity.
   581  
   582  	for change := true; change; {
   583  		change = false
   584  		for _, b := range po {
   585  			be := lv.blockEffects(b)
   586  
   587  			newliveout.Clear()
   588  			switch b.Kind {
   589  			case ssa.BlockRet:
   590  				for _, pos := range lv.cache.retuevar {
   591  					newliveout.Set(pos)
   592  				}
   593  			case ssa.BlockRetJmp:
   594  				for _, pos := range lv.cache.tailuevar {
   595  					newliveout.Set(pos)
   596  				}
   597  			case ssa.BlockExit:
   598  				// nothing to do
   599  			default:
   600  				// A variable is live on output from this block
   601  				// if it is live on input to some successor.
   602  				//
   603  				// out[b] = \bigcup_{s \in succ[b]} in[s]
   604  				newliveout.Copy(lv.blockEffects(b.Succs[0].Block()).livein)
   605  				for _, succ := range b.Succs[1:] {
   606  					newliveout.Or(newliveout, lv.blockEffects(succ.Block()).livein)
   607  				}
   608  			}
   609  
   610  			if !be.liveout.Eq(newliveout) {
   611  				change = true
   612  				be.liveout.Copy(newliveout)
   613  			}
   614  
   615  			// A variable is live on input to this block
   616  			// if it is live on output from this block and
   617  			// not set by the code in this block.
   618  			//
   619  			// in[b] = uevar[b] \cup (out[b] \setminus varkill[b])
   620  			newlivein.AndNot(be.liveout, be.varkill)
   621  			be.livein.Or(newlivein, be.uevar)
   622  		}
   623  	}
   624  }
   625  
   626  // Visits all instructions in a basic block and computes a bit vector of live
   627  // variables at each safe point locations.
   628  func livenessepilogue(lv *Liveness) {
   629  	nvars := int32(len(lv.vars))
   630  	liveout := bvalloc(nvars)
   631  	any := bvalloc(nvars)
   632  	all := bvalloc(nvars)
   633  	livedefer := bvalloc(nvars) // always-live variables
   634  
   635  	// If there is a defer (that could recover), then all output
   636  	// parameters are live all the time.  In addition, any locals
   637  	// that are pointers to heap-allocated output parameters are
   638  	// also always live (post-deferreturn code needs these
   639  	// pointers to copy values back to the stack).
   640  	// TODO: if the output parameter is heap-allocated, then we
   641  	// don't need to keep the stack copy live?
   642  	if lv.fn.Func.HasDefer() {
   643  		for i, n := range lv.vars {
   644  			if n.Class() == PPARAMOUT {
   645  				if n.IsOutputParamHeapAddr() {
   646  					// Just to be paranoid.  Heap addresses are PAUTOs.
   647  					Fatalf("variable %v both output param and heap output param", n)
   648  				}
   649  				if n.Name.Param.Heapaddr != nil {
   650  					// If this variable moved to the heap, then
   651  					// its stack copy is not live.
   652  					continue
   653  				}
   654  				// Note: zeroing is handled by zeroResults in walk.go.
   655  				livedefer.Set(int32(i))
   656  			}
   657  			if n.IsOutputParamHeapAddr() {
   658  				n.Name.SetNeedzero(true)
   659  				livedefer.Set(int32(i))
   660  			}
   661  		}
   662  	}
   663  
   664  	{
   665  		// Reserve an entry for function entry.
   666  		live := bvalloc(nvars)
   667  		for _, pos := range lv.cache.textavarinit {
   668  			live.Set(pos)
   669  		}
   670  		lv.livevars = append(lv.livevars, live)
   671  	}
   672  
   673  	for _, b := range lv.f.Blocks {
   674  		be := lv.blockEffects(b)
   675  
   676  		// Compute avarinitany and avarinitall for entry to block.
   677  		// This duplicates information known during livenesssolve
   678  		// but avoids storing two more vectors for each block.
   679  		lv.avarinitanyall(b, any, all)
   680  
   681  		// Walk forward through the basic block instructions and
   682  		// allocate liveness maps for those instructions that need them.
   683  		// Seed the maps with information about the addrtaken variables.
   684  		for _, v := range b.Values {
   685  			pos, e := lv.valueEffects(v)
   686  			if e&varkill != 0 {
   687  				any.Unset(pos)
   688  				all.Unset(pos)
   689  			}
   690  			if e&avarinit != 0 {
   691  				any.Set(pos)
   692  				all.Set(pos)
   693  			}
   694  
   695  			if !issafepoint(v) {
   696  				continue
   697  			}
   698  
   699  			// Annotate ambiguously live variables so that they can
   700  			// be zeroed at function entry and at VARKILL points.
   701  			// liveout is dead here and used as a temporary.
   702  			liveout.AndNot(any, all)
   703  			if !liveout.IsEmpty() {
   704  				for pos := int32(0); pos < liveout.n; pos++ {
   705  					if !liveout.Get(pos) {
   706  						continue
   707  					}
   708  					all.Set(pos) // silence future warnings in this block
   709  					n := lv.vars[pos]
   710  					if !n.Name.Needzero() {
   711  						n.Name.SetNeedzero(true)
   712  						if debuglive >= 1 {
   713  							Warnl(v.Pos, "%v: %L is ambiguously live", lv.fn.Func.Nname, n)
   714  						}
   715  					}
   716  				}
   717  			}
   718  
   719  			// Live stuff first.
   720  			live := bvalloc(nvars)
   721  			live.Copy(any)
   722  			lv.livevars = append(lv.livevars, live)
   723  		}
   724  
   725  		be.lastbitmapindex = len(lv.livevars) - 1
   726  	}
   727  
   728  	for _, b := range lv.f.Blocks {
   729  		be := lv.blockEffects(b)
   730  
   731  		// walk backward, emit pcdata and populate the maps
   732  		index := int32(be.lastbitmapindex)
   733  		if index < 0 {
   734  			// the first block we encounter should have the ATEXT so
   735  			// at no point should pos ever be less than zero.
   736  			Fatalf("livenessepilogue")
   737  		}
   738  
   739  		liveout.Copy(be.liveout)
   740  		for i := len(b.Values) - 1; i >= 0; i-- {
   741  			v := b.Values[i]
   742  
   743  			if issafepoint(v) {
   744  				// Found an interesting instruction, record the
   745  				// corresponding liveness information.
   746  
   747  				live := lv.livevars[index]
   748  				live.Or(live, liveout)
   749  				live.Or(live, livedefer) // only for non-entry safe points
   750  				index--
   751  			}
   752  
   753  			// Update liveness information.
   754  			pos, e := lv.valueEffects(v)
   755  			if e&varkill != 0 {
   756  				liveout.Unset(pos)
   757  			}
   758  			if e&uevar != 0 {
   759  				liveout.Set(pos)
   760  			}
   761  		}
   762  
   763  		if b == lv.f.Entry {
   764  			if index != 0 {
   765  				Fatalf("bad index for entry point: %v", index)
   766  			}
   767  
   768  			// Record live variables.
   769  			live := lv.livevars[index]
   770  			live.Or(live, liveout)
   771  		}
   772  	}
   773  
   774  	// Useful sanity check: on entry to the function,
   775  	// the only things that can possibly be live are the
   776  	// input parameters.
   777  	for j, n := range lv.vars {
   778  		if n.Class() != PPARAM && lv.livevars[0].Get(int32(j)) {
   779  			Fatalf("internal error: %v %L recorded as live on entry", lv.fn.Func.Nname, n)
   780  		}
   781  	}
   782  }
   783  
   784  func (lv *Liveness) clobber() {
   785  	// The clobberdead experiment inserts code to clobber all the dead variables (locals and args)
   786  	// before and after every safepoint. This experiment is useful for debugging the generation
   787  	// of live pointer bitmaps.
   788  	if objabi.Clobberdead_enabled == 0 {
   789  		return
   790  	}
   791  	var varSize int64
   792  	for _, n := range lv.vars {
   793  		varSize += n.Type.Size()
   794  	}
   795  	if len(lv.livevars) > 1000 || varSize > 10000 {
   796  		// Be careful to avoid doing too much work.
   797  		// Bail if >1000 safepoints or >10000 bytes of variables.
   798  		// Otherwise, giant functions make this experiment generate too much code.
   799  		return
   800  	}
   801  	if h := os.Getenv("GOCLOBBERDEADHASH"); h != "" {
   802  		// Clobber only functions where the hash of the function name matches a pattern.
   803  		// Useful for binary searching for a miscompiled function.
   804  		hstr := ""
   805  		for _, b := range sha1.Sum([]byte(lv.fn.funcname())) {
   806  			hstr += fmt.Sprintf("%08b", b)
   807  		}
   808  		if !strings.HasSuffix(hstr, h) {
   809  			return
   810  		}
   811  		fmt.Printf("\t\t\tCLOBBERDEAD %s\n", lv.fn.funcname())
   812  	}
   813  	if lv.f.Name == "forkAndExecInChild" {
   814  		// forkAndExecInChild calls vfork (on linux/amd64, anyway).
   815  		// The code we add here clobbers parts of the stack in the child.
   816  		// When the parent resumes, it is using the same stack frame. But the
   817  		// child has clobbered stack variables that the parent needs. Boom!
   818  		// In particular, the sys argument gets clobbered.
   819  		// Note to self: GOCLOBBERDEADHASH=011100101110
   820  		return
   821  	}
   822  
   823  	var oldSched []*ssa.Value
   824  	for _, b := range lv.f.Blocks {
   825  		// Copy block's values to a temporary.
   826  		oldSched = append(oldSched[:0], b.Values...)
   827  		b.Values = b.Values[:0]
   828  
   829  		// Clobber all dead variables at entry.
   830  		if b == lv.f.Entry {
   831  			for len(oldSched) > 0 && len(oldSched[0].Args) == 0 {
   832  				// Skip argless ops. We need to skip at least
   833  				// the lowered ClosurePtr op, because it
   834  				// really wants to be first. This will also
   835  				// skip ops like InitMem and SP, which are ok.
   836  				b.Values = append(b.Values, oldSched[0])
   837  				oldSched = oldSched[1:]
   838  			}
   839  			clobber(lv, b, lv.livevars[0])
   840  		}
   841  
   842  		// Copy values into schedule, adding clobbering around safepoints.
   843  		for _, v := range oldSched {
   844  			if !issafepoint(v) {
   845  				b.Values = append(b.Values, v)
   846  				continue
   847  			}
   848  			before := true
   849  			if v.Op.IsCall() && v.Aux != nil && v.Aux.(*obj.LSym) == typedmemmove {
   850  				// Can't put clobber code before the call to typedmemmove.
   851  				// The variable to-be-copied is marked as dead
   852  				// at the callsite. That is ok, though, as typedmemmove
   853  				// is marked as nosplit, and the first thing it does
   854  				// is to call memmove (also nosplit), after which
   855  				// the source value is dead.
   856  				// See issue 16026.
   857  				before = false
   858  			}
   859  			if before {
   860  				clobber(lv, b, lv.livevars[lv.stackMapIndex[v]])
   861  			}
   862  			b.Values = append(b.Values, v)
   863  			clobber(lv, b, lv.livevars[lv.stackMapIndex[v]])
   864  		}
   865  	}
   866  }
   867  
   868  // clobber generates code to clobber all dead variables (those not marked in live).
   869  // Clobbering instructions are added to the end of b.Values.
   870  func clobber(lv *Liveness, b *ssa.Block, live bvec) {
   871  	for i, n := range lv.vars {
   872  		if !live.Get(int32(i)) {
   873  			clobberVar(b, n)
   874  		}
   875  	}
   876  }
   877  
   878  // clobberVar generates code to trash the pointers in v.
   879  // Clobbering instructions are added to the end of b.Values.
   880  func clobberVar(b *ssa.Block, v *Node) {
   881  	clobberWalk(b, v, 0, v.Type)
   882  }
   883  
   884  // b = block to which we append instructions
   885  // v = variable
   886  // offset = offset of (sub-portion of) variable to clobber (in bytes)
   887  // t = type of sub-portion of v.
   888  func clobberWalk(b *ssa.Block, v *Node, offset int64, t *types.Type) {
   889  	if !types.Haspointers(t) {
   890  		return
   891  	}
   892  	switch t.Etype {
   893  	case TPTR32,
   894  		TPTR64,
   895  		TUNSAFEPTR,
   896  		TFUNC,
   897  		TCHAN,
   898  		TMAP:
   899  		clobberPtr(b, v, offset)
   900  
   901  	case TSTRING:
   902  		// struct { byte *str; int len; }
   903  		clobberPtr(b, v, offset)
   904  
   905  	case TINTER:
   906  		// struct { Itab *tab; void *data; }
   907  		// or, when isnilinter(t)==true:
   908  		// struct { Type *type; void *data; }
   909  		clobberPtr(b, v, offset)
   910  		clobberPtr(b, v, offset+int64(Widthptr))
   911  
   912  	case TSLICE:
   913  		// struct { byte *array; int len; int cap; }
   914  		clobberPtr(b, v, offset)
   915  
   916  	case TARRAY:
   917  		for i := int64(0); i < t.NumElem(); i++ {
   918  			clobberWalk(b, v, offset+i*t.Elem().Size(), t.Elem())
   919  		}
   920  
   921  	case TSTRUCT:
   922  		for _, t1 := range t.Fields().Slice() {
   923  			clobberWalk(b, v, offset+t1.Offset, t1.Type)
   924  		}
   925  
   926  	default:
   927  		Fatalf("clobberWalk: unexpected type, %v", t)
   928  	}
   929  }
   930  
   931  // clobberPtr generates a clobber of the pointer at offset offset in v.
   932  // The clobber instruction is added at the end of b.
   933  func clobberPtr(b *ssa.Block, v *Node, offset int64) {
   934  	var aux interface{}
   935  	if v.Class() == PAUTO {
   936  		aux = &ssa.AutoSymbol{Node: v}
   937  	} else {
   938  		aux = &ssa.ArgSymbol{Node: v}
   939  	}
   940  	b.NewValue0IA(src.NoXPos, ssa.OpClobber, types.TypeVoid, offset, aux)
   941  }
   942  
   943  func (lv *Liveness) avarinitanyall(b *ssa.Block, any, all bvec) {
   944  	if len(b.Preds) == 0 {
   945  		any.Clear()
   946  		all.Clear()
   947  		for _, pos := range lv.cache.textavarinit {
   948  			any.Set(pos)
   949  			all.Set(pos)
   950  		}
   951  		return
   952  	}
   953  
   954  	be := lv.blockEffects(b.Preds[0].Block())
   955  	any.Copy(be.avarinitany)
   956  	all.Copy(be.avarinitall)
   957  
   958  	for _, pred := range b.Preds[1:] {
   959  		be := lv.blockEffects(pred.Block())
   960  		any.Or(any, be.avarinitany)
   961  		all.And(all, be.avarinitall)
   962  	}
   963  }
   964  
   965  // FNV-1 hash function constants.
   966  const (
   967  	H0 = 2166136261
   968  	Hp = 16777619
   969  )
   970  
   971  func hashbitmap(h uint32, bv bvec) uint32 {
   972  	n := int((bv.n + 31) / 32)
   973  	for i := 0; i < n; i++ {
   974  		w := bv.b[i]
   975  		h = (h * Hp) ^ (w & 0xff)
   976  		h = (h * Hp) ^ ((w >> 8) & 0xff)
   977  		h = (h * Hp) ^ ((w >> 16) & 0xff)
   978  		h = (h * Hp) ^ ((w >> 24) & 0xff)
   979  	}
   980  
   981  	return h
   982  }
   983  
   984  // Compact liveness information by coalescing identical per-call-site bitmaps.
   985  // The merging only happens for a single function, not across the entire binary.
   986  //
   987  // There are actually two lists of bitmaps, one list for the local variables and one
   988  // list for the function arguments. Both lists are indexed by the same PCDATA
   989  // index, so the corresponding pairs must be considered together when
   990  // merging duplicates. The argument bitmaps change much less often during
   991  // function execution than the local variable bitmaps, so it is possible that
   992  // we could introduce a separate PCDATA index for arguments vs locals and
   993  // then compact the set of argument bitmaps separately from the set of
   994  // local variable bitmaps. As of 2014-04-02, doing this to the godoc binary
   995  // is actually a net loss: we save about 50k of argument bitmaps but the new
   996  // PCDATA tables cost about 100k. So for now we keep using a single index for
   997  // both bitmap lists.
   998  func livenesscompact(lv *Liveness) {
   999  	// Linear probing hash table of bitmaps seen so far.
  1000  	// The hash table has 4n entries to keep the linear
  1001  	// scan short. An entry of -1 indicates an empty slot.
  1002  	n := len(lv.livevars)
  1003  
  1004  	tablesize := 4 * n
  1005  	table := make([]int, tablesize)
  1006  	for i := range table {
  1007  		table[i] = -1
  1008  	}
  1009  
  1010  	// remap[i] = the new index of the old bit vector #i.
  1011  	remap := make([]int, n)
  1012  	for i := range remap {
  1013  		remap[i] = -1
  1014  	}
  1015  	uniq := 0 // unique tables found so far
  1016  
  1017  	// Consider bit vectors in turn.
  1018  	// If new, assign next number using uniq,
  1019  	// record in remap, record in lv.livevars
  1020  	// under the new index, and add entry to hash table.
  1021  	// If already seen, record earlier index in remap.
  1022  Outer:
  1023  	for i, live := range lv.livevars {
  1024  		h := hashbitmap(H0, live) % uint32(tablesize)
  1025  
  1026  		for {
  1027  			j := table[h]
  1028  			if j < 0 {
  1029  				break
  1030  			}
  1031  			jlive := lv.livevars[j]
  1032  			if live.Eq(jlive) {
  1033  				remap[i] = j
  1034  				continue Outer
  1035  			}
  1036  
  1037  			h++
  1038  			if h == uint32(tablesize) {
  1039  				h = 0
  1040  			}
  1041  		}
  1042  
  1043  		table[h] = uniq
  1044  		remap[i] = uniq
  1045  		lv.livevars[uniq] = live
  1046  		uniq++
  1047  	}
  1048  
  1049  	// We've already reordered lv.livevars[0:uniq]. Clear the
  1050  	// pointers later in the array so they can be GC'd.
  1051  	tail := lv.livevars[uniq:]
  1052  	for i := range tail { // memclr loop pattern
  1053  		tail[i] = bvec{}
  1054  	}
  1055  	lv.livevars = lv.livevars[:uniq]
  1056  
  1057  	// Rewrite PCDATA instructions to use new numbering.
  1058  	lv.showlive(nil, lv.livevars[0])
  1059  	pos := 1
  1060  	lv.stackMapIndex = make(map[*ssa.Value]int)
  1061  	for _, b := range lv.f.Blocks {
  1062  		for _, v := range b.Values {
  1063  			if issafepoint(v) {
  1064  				lv.showlive(v, lv.livevars[remap[pos]])
  1065  				lv.stackMapIndex[v] = int(remap[pos])
  1066  				pos++
  1067  			}
  1068  		}
  1069  	}
  1070  }
  1071  
  1072  func (lv *Liveness) showlive(v *ssa.Value, live bvec) {
  1073  	if debuglive == 0 || lv.fn.funcname() == "init" || strings.HasPrefix(lv.fn.funcname(), ".") {
  1074  		return
  1075  	}
  1076  	if live.IsEmpty() {
  1077  		return
  1078  	}
  1079  
  1080  	pos := lv.fn.Func.Nname.Pos
  1081  	if v != nil {
  1082  		pos = v.Pos
  1083  	}
  1084  
  1085  	s := "live at "
  1086  	if v == nil {
  1087  		s += fmt.Sprintf("entry to %s:", lv.fn.funcname())
  1088  	} else if sym, ok := v.Aux.(*obj.LSym); ok {
  1089  		fn := sym.Name
  1090  		if pos := strings.Index(fn, "."); pos >= 0 {
  1091  			fn = fn[pos+1:]
  1092  		}
  1093  		s += fmt.Sprintf("call to %s:", fn)
  1094  	} else {
  1095  		s += "indirect call:"
  1096  	}
  1097  
  1098  	for j, n := range lv.vars {
  1099  		if live.Get(int32(j)) {
  1100  			s += fmt.Sprintf(" %v", n)
  1101  		}
  1102  	}
  1103  
  1104  	Warnl(pos, s)
  1105  }
  1106  
  1107  func (lv *Liveness) printbvec(printed bool, name string, live bvec) bool {
  1108  	started := false
  1109  	for i, n := range lv.vars {
  1110  		if !live.Get(int32(i)) {
  1111  			continue
  1112  		}
  1113  		if !started {
  1114  			if !printed {
  1115  				fmt.Printf("\t")
  1116  			} else {
  1117  				fmt.Printf(" ")
  1118  			}
  1119  			started = true
  1120  			printed = true
  1121  			fmt.Printf("%s=", name)
  1122  		} else {
  1123  			fmt.Printf(",")
  1124  		}
  1125  
  1126  		fmt.Printf("%s", n.Sym.Name)
  1127  	}
  1128  	return printed
  1129  }
  1130  
  1131  // printeffect is like printbvec, but for a single variable.
  1132  func (lv *Liveness) printeffect(printed bool, name string, pos int32, x bool) bool {
  1133  	if !x {
  1134  		return printed
  1135  	}
  1136  	if !printed {
  1137  		fmt.Printf("\t")
  1138  	} else {
  1139  		fmt.Printf(" ")
  1140  	}
  1141  	fmt.Printf("%s=%s", name, lv.vars[pos].Sym.Name)
  1142  	return true
  1143  }
  1144  
  1145  // Prints the computed liveness information and inputs, for debugging.
  1146  // This format synthesizes the information used during the multiple passes
  1147  // into a single presentation.
  1148  func livenessprintdebug(lv *Liveness) {
  1149  	fmt.Printf("liveness: %s\n", lv.fn.funcname())
  1150  
  1151  	pcdata := 0
  1152  	for i, b := range lv.f.Blocks {
  1153  		if i > 0 {
  1154  			fmt.Printf("\n")
  1155  		}
  1156  
  1157  		// bb#0 pred=1,2 succ=3,4
  1158  		fmt.Printf("bb#%d pred=", b.ID)
  1159  		for j, pred := range b.Preds {
  1160  			if j > 0 {
  1161  				fmt.Printf(",")
  1162  			}
  1163  			fmt.Printf("%d", pred.Block().ID)
  1164  		}
  1165  		fmt.Printf(" succ=")
  1166  		for j, succ := range b.Succs {
  1167  			if j > 0 {
  1168  				fmt.Printf(",")
  1169  			}
  1170  			fmt.Printf("%d", succ.Block().ID)
  1171  		}
  1172  		fmt.Printf("\n")
  1173  
  1174  		be := lv.blockEffects(b)
  1175  
  1176  		// initial settings
  1177  		printed := false
  1178  		printed = lv.printbvec(printed, "uevar", be.uevar)
  1179  		printed = lv.printbvec(printed, "livein", be.livein)
  1180  		if printed {
  1181  			fmt.Printf("\n")
  1182  		}
  1183  
  1184  		// program listing, with individual effects listed
  1185  
  1186  		if b == lv.f.Entry {
  1187  			live := lv.livevars[pcdata]
  1188  			fmt.Printf("(%s) function entry\n", linestr(lv.fn.Func.Nname.Pos))
  1189  			fmt.Printf("\tlive=")
  1190  			printed = false
  1191  			for j, n := range lv.vars {
  1192  				if !live.Get(int32(j)) {
  1193  					continue
  1194  				}
  1195  				if printed {
  1196  					fmt.Printf(",")
  1197  				}
  1198  				fmt.Printf("%v", n)
  1199  				printed = true
  1200  			}
  1201  			fmt.Printf("\n")
  1202  		}
  1203  
  1204  		for _, v := range b.Values {
  1205  			fmt.Printf("(%s) %v\n", linestr(v.Pos), v.LongString())
  1206  
  1207  			if pos, ok := lv.stackMapIndex[v]; ok {
  1208  				pcdata = pos
  1209  			}
  1210  
  1211  			pos, effect := lv.valueEffects(v)
  1212  			printed = false
  1213  			printed = lv.printeffect(printed, "uevar", pos, effect&uevar != 0)
  1214  			printed = lv.printeffect(printed, "varkill", pos, effect&varkill != 0)
  1215  			printed = lv.printeffect(printed, "avarinit", pos, effect&avarinit != 0)
  1216  			if printed {
  1217  				fmt.Printf("\n")
  1218  			}
  1219  
  1220  			if !issafepoint(v) {
  1221  				continue
  1222  			}
  1223  
  1224  			live := lv.livevars[pcdata]
  1225  			fmt.Printf("\tlive=")
  1226  			printed = false
  1227  			for j, n := range lv.vars {
  1228  				if !live.Get(int32(j)) {
  1229  					continue
  1230  				}
  1231  				if printed {
  1232  					fmt.Printf(",")
  1233  				}
  1234  				fmt.Printf("%v", n)
  1235  				printed = true
  1236  			}
  1237  			fmt.Printf("\n")
  1238  		}
  1239  
  1240  		// bb bitsets
  1241  		fmt.Printf("end\n")
  1242  		printed = false
  1243  		printed = lv.printbvec(printed, "varkill", be.varkill)
  1244  		printed = lv.printbvec(printed, "liveout", be.liveout)
  1245  		printed = lv.printbvec(printed, "avarinit", be.avarinit)
  1246  		printed = lv.printbvec(printed, "avarinitany", be.avarinitany)
  1247  		printed = lv.printbvec(printed, "avarinitall", be.avarinitall)
  1248  		if printed {
  1249  			fmt.Printf("\n")
  1250  		}
  1251  	}
  1252  
  1253  	fmt.Printf("\n")
  1254  }
  1255  
  1256  // Dumps a slice of bitmaps to a symbol as a sequence of uint32 values. The
  1257  // first word dumped is the total number of bitmaps. The second word is the
  1258  // length of the bitmaps. All bitmaps are assumed to be of equal length. The
  1259  // remaining bytes are the raw bitmaps.
  1260  func livenessemit(lv *Liveness, argssym, livesym *obj.LSym) {
  1261  	args := bvalloc(argswords(lv))
  1262  	aoff := duint32(argssym, 0, uint32(len(lv.livevars))) // number of bitmaps
  1263  	aoff = duint32(argssym, aoff, uint32(args.n))         // number of bits in each bitmap
  1264  
  1265  	locals := bvalloc(localswords(lv))
  1266  	loff := duint32(livesym, 0, uint32(len(lv.livevars))) // number of bitmaps
  1267  	loff = duint32(livesym, loff, uint32(locals.n))       // number of bits in each bitmap
  1268  
  1269  	for _, live := range lv.livevars {
  1270  		args.Clear()
  1271  		locals.Clear()
  1272  
  1273  		onebitlivepointermap(lv, live, lv.vars, args, locals)
  1274  
  1275  		aoff = dbvec(argssym, aoff, args)
  1276  		loff = dbvec(livesym, loff, locals)
  1277  	}
  1278  
  1279  	// Give these LSyms content-addressable names,
  1280  	// so that they can be de-duplicated.
  1281  	// This provides significant binary size savings.
  1282  	// It is safe to rename these LSyms because
  1283  	// they are tracked separately from ctxt.hash.
  1284  	argssym.Name = fmt.Sprintf("gclocals·%x", md5.Sum(argssym.P))
  1285  	livesym.Name = fmt.Sprintf("gclocals·%x", md5.Sum(livesym.P))
  1286  }
  1287  
  1288  // Entry pointer for liveness analysis. Solves for the liveness of
  1289  // pointer variables in the function and emits a runtime data
  1290  // structure read by the garbage collector.
  1291  // Returns a map from GC safe points to their corresponding stack map index.
  1292  func liveness(e *ssafn, f *ssa.Func) map[*ssa.Value]int {
  1293  	// Construct the global liveness state.
  1294  	vars, idx := getvariables(e.curfn)
  1295  	lv := newliveness(e.curfn, f, vars, idx, e.stkptrsize)
  1296  
  1297  	// Run the dataflow framework.
  1298  	livenessprologue(lv)
  1299  	livenesssolve(lv)
  1300  	livenessepilogue(lv)
  1301  	livenesscompact(lv)
  1302  	lv.clobber()
  1303  	if debuglive >= 2 {
  1304  		livenessprintdebug(lv)
  1305  	}
  1306  
  1307  	// Emit the live pointer map data structures
  1308  	if ls := e.curfn.Func.lsym; ls != nil {
  1309  		livenessemit(lv, &ls.Func.GCArgs, &ls.Func.GCLocals)
  1310  	}
  1311  	return lv.stackMapIndex
  1312  }