github.com/gagliardetto/golang-go@v0.0.0-20201020153340-53909ea70814/cmd/compile/internal/gc/pgen.go (about)

     1  // Copyright 2011 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package gc
     6  
     7  import (
     8  	"github.com/gagliardetto/golang-go/cmd/compile/internal/ssa"
     9  	"github.com/gagliardetto/golang-go/cmd/compile/internal/types"
    10  	"github.com/gagliardetto/golang-go/cmd/internal/dwarf"
    11  	"github.com/gagliardetto/golang-go/cmd/internal/obj"
    12  	"github.com/gagliardetto/golang-go/cmd/internal/objabi"
    13  	"github.com/gagliardetto/golang-go/cmd/internal/src"
    14  	"github.com/gagliardetto/golang-go/cmd/internal/sys"
    15  	"fmt"
    16  	"github.com/gagliardetto/golang-go/not-internal/race"
    17  	"math/rand"
    18  	"sort"
    19  	"sync"
    20  	"time"
    21  )
    22  
    23  // "Portable" code generation.
    24  
    25  var (
    26  	nBackendWorkers int     // number of concurrent backend workers, set by a compiler flag
    27  	compilequeue    []*Node // functions waiting to be compiled
    28  )
    29  
    30  func emitptrargsmap(fn *Node) {
    31  	if fn.funcname() == "_" {
    32  		return
    33  	}
    34  	sym := lookup(fmt.Sprintf("%s.args_stackmap", fn.funcname()))
    35  	lsym := sym.Linksym()
    36  
    37  	nptr := int(fn.Type.ArgWidth() / int64(Widthptr))
    38  	bv := bvalloc(int32(nptr) * 2)
    39  	nbitmap := 1
    40  	if fn.Type.NumResults() > 0 {
    41  		nbitmap = 2
    42  	}
    43  	off := duint32(lsym, 0, uint32(nbitmap))
    44  	off = duint32(lsym, off, uint32(bv.n))
    45  
    46  	if fn.IsMethod() {
    47  		onebitwalktype1(fn.Type.Recvs(), 0, bv)
    48  	}
    49  	if fn.Type.NumParams() > 0 {
    50  		onebitwalktype1(fn.Type.Params(), 0, bv)
    51  	}
    52  	off = dbvec(lsym, off, bv)
    53  
    54  	if fn.Type.NumResults() > 0 {
    55  		onebitwalktype1(fn.Type.Results(), 0, bv)
    56  		off = dbvec(lsym, off, bv)
    57  	}
    58  
    59  	ggloblsym(lsym, int32(off), obj.RODATA|obj.LOCAL)
    60  }
    61  
    62  // cmpstackvarlt reports whether the stack variable a sorts before b.
    63  //
    64  // Sort the list of stack variables. Autos after anything else,
    65  // within autos, unused after used, within used, things with
    66  // pointers first, zeroed things first, and then decreasing size.
    67  // Because autos are laid out in decreasing addresses
    68  // on the stack, pointers first, zeroed things first and decreasing size
    69  // really means, in memory, things with pointers needing zeroing at
    70  // the top of the stack and increasing in size.
    71  // Non-autos sort on offset.
    72  func cmpstackvarlt(a, b *Node) bool {
    73  	if (a.Class() == PAUTO) != (b.Class() == PAUTO) {
    74  		return b.Class() == PAUTO
    75  	}
    76  
    77  	if a.Class() != PAUTO {
    78  		return a.Xoffset < b.Xoffset
    79  	}
    80  
    81  	if a.Name.Used() != b.Name.Used() {
    82  		return a.Name.Used()
    83  	}
    84  
    85  	ap := types.Haspointers(a.Type)
    86  	bp := types.Haspointers(b.Type)
    87  	if ap != bp {
    88  		return ap
    89  	}
    90  
    91  	ap = a.Name.Needzero()
    92  	bp = b.Name.Needzero()
    93  	if ap != bp {
    94  		return ap
    95  	}
    96  
    97  	if a.Type.Width != b.Type.Width {
    98  		return a.Type.Width > b.Type.Width
    99  	}
   100  
   101  	return a.Sym.Name < b.Sym.Name
   102  }
   103  
   104  // byStackvar implements sort.Interface for []*Node using cmpstackvarlt.
   105  type byStackVar []*Node
   106  
   107  func (s byStackVar) Len() int           { return len(s) }
   108  func (s byStackVar) Less(i, j int) bool { return cmpstackvarlt(s[i], s[j]) }
   109  func (s byStackVar) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
   110  
   111  func (s *ssafn) AllocFrame(f *ssa.Func) {
   112  	s.stksize = 0
   113  	s.stkptrsize = 0
   114  	fn := s.curfn.Func
   115  
   116  	// Mark the PAUTO's unused.
   117  	for _, ln := range fn.Dcl {
   118  		if ln.Class() == PAUTO {
   119  			ln.Name.SetUsed(false)
   120  		}
   121  	}
   122  
   123  	for _, l := range f.RegAlloc {
   124  		if ls, ok := l.(ssa.LocalSlot); ok {
   125  			ls.N.(*Node).Name.SetUsed(true)
   126  		}
   127  	}
   128  
   129  	scratchUsed := false
   130  	for _, b := range f.Blocks {
   131  		for _, v := range b.Values {
   132  			if n, ok := v.Aux.(*Node); ok {
   133  				switch n.Class() {
   134  				case PPARAM, PPARAMOUT:
   135  					// Don't modify nodfp; it is a global.
   136  					if n != nodfp {
   137  						n.Name.SetUsed(true)
   138  					}
   139  				case PAUTO:
   140  					n.Name.SetUsed(true)
   141  				}
   142  			}
   143  			if !scratchUsed {
   144  				scratchUsed = v.Op.UsesScratch()
   145  			}
   146  
   147  		}
   148  	}
   149  
   150  	if f.Config.NeedsFpScratch && scratchUsed {
   151  		s.scratchFpMem = tempAt(src.NoXPos, s.curfn, types.Types[TUINT64])
   152  	}
   153  
   154  	sort.Sort(byStackVar(fn.Dcl))
   155  
   156  	// Reassign stack offsets of the locals that are used.
   157  	lastHasPtr := false
   158  	for i, n := range fn.Dcl {
   159  		if n.Op != ONAME || n.Class() != PAUTO {
   160  			continue
   161  		}
   162  		if !n.Name.Used() {
   163  			fn.Dcl = fn.Dcl[:i]
   164  			break
   165  		}
   166  
   167  		dowidth(n.Type)
   168  		w := n.Type.Width
   169  		if w >= thearch.MAXWIDTH || w < 0 {
   170  			Fatalf("bad width")
   171  		}
   172  		if w == 0 && lastHasPtr {
   173  			// Pad between a pointer-containing object and a zero-sized object.
   174  			// This prevents a pointer to the zero-sized object from being interpreted
   175  			// as a pointer to the pointer-containing object (and causing it
   176  			// to be scanned when it shouldn't be). See issue 24993.
   177  			w = 1
   178  		}
   179  		s.stksize += w
   180  		s.stksize = Rnd(s.stksize, int64(n.Type.Align))
   181  		if types.Haspointers(n.Type) {
   182  			s.stkptrsize = s.stksize
   183  			lastHasPtr = true
   184  		} else {
   185  			lastHasPtr = false
   186  		}
   187  		if thearch.LinkArch.InFamily(sys.MIPS, sys.MIPS64, sys.ARM, sys.ARM64, sys.PPC64, sys.S390X) {
   188  			s.stksize = Rnd(s.stksize, int64(Widthptr))
   189  		}
   190  		n.Xoffset = -s.stksize
   191  	}
   192  
   193  	s.stksize = Rnd(s.stksize, int64(Widthreg))
   194  	s.stkptrsize = Rnd(s.stkptrsize, int64(Widthreg))
   195  }
   196  
   197  func funccompile(fn *Node) {
   198  	if Curfn != nil {
   199  		Fatalf("funccompile %v inside %v", fn.Func.Nname.Sym, Curfn.Func.Nname.Sym)
   200  	}
   201  
   202  	if fn.Type == nil {
   203  		if nerrors == 0 {
   204  			Fatalf("funccompile missing type")
   205  		}
   206  		return
   207  	}
   208  
   209  	// assign parameter offsets
   210  	dowidth(fn.Type)
   211  
   212  	if fn.Nbody.Len() == 0 {
   213  		// Initialize ABI wrappers if necessary.
   214  		fn.Func.initLSym(false)
   215  		emitptrargsmap(fn)
   216  		return
   217  	}
   218  
   219  	dclcontext = PAUTO
   220  	Curfn = fn
   221  
   222  	compile(fn)
   223  
   224  	Curfn = nil
   225  	dclcontext = PEXTERN
   226  }
   227  
   228  func compile(fn *Node) {
   229  	saveerrors()
   230  
   231  	order(fn)
   232  	if nerrors != 0 {
   233  		return
   234  	}
   235  
   236  	walk(fn)
   237  	if nerrors != 0 {
   238  		return
   239  	}
   240  	if instrumenting {
   241  		instrument(fn)
   242  	}
   243  
   244  	// From this point, there should be no uses of Curfn. Enforce that.
   245  	Curfn = nil
   246  
   247  	if fn.funcname() == "_" {
   248  		// We don't need to generate code for this function, just report errors in its body.
   249  		// At this point we've generated any errors needed.
   250  		// (Beyond here we generate only non-spec errors, like "stack frame too large".)
   251  		// See issue 29870.
   252  		return
   253  	}
   254  
   255  	// Set up the function's LSym early to avoid data races with the assemblers.
   256  	fn.Func.initLSym(true)
   257  
   258  	// Make sure type syms are declared for all types that might
   259  	// be types of stack objects. We need to do this here
   260  	// because symbols must be allocated before the parallel
   261  	// phase of the compiler.
   262  	for _, n := range fn.Func.Dcl {
   263  		switch n.Class() {
   264  		case PPARAM, PPARAMOUT, PAUTO:
   265  			if livenessShouldTrack(n) && n.Name.Addrtaken() {
   266  				dtypesym(n.Type)
   267  				// Also make sure we allocate a linker symbol
   268  				// for the stack object data, for the same reason.
   269  				if fn.Func.lsym.Func.StackObjects == nil {
   270  					fn.Func.lsym.Func.StackObjects = Ctxt.Lookup(fn.Func.lsym.Name + ".stkobj")
   271  				}
   272  			}
   273  		}
   274  	}
   275  
   276  	if compilenow() {
   277  		compileSSA(fn, 0)
   278  	} else {
   279  		compilequeue = append(compilequeue, fn)
   280  	}
   281  }
   282  
   283  // compilenow reports whether to compile immediately.
   284  // If functions are not compiled immediately,
   285  // they are enqueued in compilequeue,
   286  // which is drained by compileFunctions.
   287  func compilenow() bool {
   288  	return nBackendWorkers == 1 && Debug_compilelater == 0
   289  }
   290  
   291  const maxStackSize = 1 << 30
   292  
   293  // compileSSA builds an SSA backend function,
   294  // uses it to generate a plist,
   295  // and flushes that plist to machine code.
   296  // worker indicates which of the backend workers is doing the processing.
   297  func compileSSA(fn *Node, worker int) {
   298  	f := buildssa(fn, worker)
   299  	// Note: check arg size to fix issue 25507.
   300  	if f.Frontend().(*ssafn).stksize >= maxStackSize || fn.Type.ArgWidth() >= maxStackSize {
   301  		largeStackFramesMu.Lock()
   302  		largeStackFrames = append(largeStackFrames, largeStack{locals: f.Frontend().(*ssafn).stksize, args: fn.Type.ArgWidth(), pos: fn.Pos})
   303  		largeStackFramesMu.Unlock()
   304  		return
   305  	}
   306  	pp := newProgs(fn, worker)
   307  	defer pp.Free()
   308  	genssa(f, pp)
   309  	// Check frame size again.
   310  	// The check above included only the space needed for local variables.
   311  	// After genssa, the space needed includes local variables and the callee arg region.
   312  	// We must do this check prior to calling pp.Flush.
   313  	// If there are any oversized stack frames,
   314  	// the assembler may emit inscrutable complaints about invalid instructions.
   315  	if pp.Text.To.Offset >= maxStackSize {
   316  		largeStackFramesMu.Lock()
   317  		locals := f.Frontend().(*ssafn).stksize
   318  		largeStackFrames = append(largeStackFrames, largeStack{locals: locals, args: fn.Type.ArgWidth(), callee: pp.Text.To.Offset - locals, pos: fn.Pos})
   319  		largeStackFramesMu.Unlock()
   320  		return
   321  	}
   322  
   323  	pp.Flush() // assemble, fill in boilerplate, etc.
   324  	// fieldtrack must be called after pp.Flush. See issue 20014.
   325  	fieldtrack(pp.Text.From.Sym, fn.Func.FieldTrack)
   326  }
   327  
   328  func init() {
   329  	if race.Enabled {
   330  		rand.Seed(time.Now().UnixNano())
   331  	}
   332  }
   333  
   334  // compileFunctions compiles all functions in compilequeue.
   335  // It fans out nBackendWorkers to do the work
   336  // and waits for them to complete.
   337  func compileFunctions() {
   338  	if len(compilequeue) != 0 {
   339  		sizeCalculationDisabled = true // not safe to calculate sizes concurrently
   340  		if race.Enabled {
   341  			// Randomize compilation order to try to shake out races.
   342  			tmp := make([]*Node, len(compilequeue))
   343  			perm := rand.Perm(len(compilequeue))
   344  			for i, v := range perm {
   345  				tmp[v] = compilequeue[i]
   346  			}
   347  			copy(compilequeue, tmp)
   348  		} else {
   349  			// Compile the longest functions first,
   350  			// since they're most likely to be the slowest.
   351  			// This helps avoid stragglers.
   352  			sort.Slice(compilequeue, func(i, j int) bool {
   353  				return compilequeue[i].Nbody.Len() > compilequeue[j].Nbody.Len()
   354  			})
   355  		}
   356  		var wg sync.WaitGroup
   357  		Ctxt.InParallel = true
   358  		c := make(chan *Node, nBackendWorkers)
   359  		for i := 0; i < nBackendWorkers; i++ {
   360  			wg.Add(1)
   361  			go func(worker int) {
   362  				for fn := range c {
   363  					compileSSA(fn, worker)
   364  				}
   365  				wg.Done()
   366  			}(i)
   367  		}
   368  		for _, fn := range compilequeue {
   369  			c <- fn
   370  		}
   371  		close(c)
   372  		compilequeue = nil
   373  		wg.Wait()
   374  		Ctxt.InParallel = false
   375  		sizeCalculationDisabled = false
   376  	}
   377  }
   378  
   379  func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.Scope, dwarf.InlCalls) {
   380  	fn := curfn.(*Node)
   381  	if fn.Func.Nname != nil {
   382  		if expect := fn.Func.Nname.Sym.Linksym(); fnsym != expect {
   383  			Fatalf("unexpected fnsym: %v != %v", fnsym, expect)
   384  		}
   385  	}
   386  
   387  	var apdecls []*Node
   388  	// Populate decls for fn.
   389  	for _, n := range fn.Func.Dcl {
   390  		if n.Op != ONAME { // might be OTYPE or OLITERAL
   391  			continue
   392  		}
   393  		switch n.Class() {
   394  		case PAUTO:
   395  			if !n.Name.Used() {
   396  				// Text == nil -> generating abstract function
   397  				if fnsym.Func.Text != nil {
   398  					Fatalf("debuginfo unused node (AllocFrame should truncate fn.Func.Dcl)")
   399  				}
   400  				continue
   401  			}
   402  		case PPARAM, PPARAMOUT:
   403  		default:
   404  			continue
   405  		}
   406  		apdecls = append(apdecls, n)
   407  		fnsym.Func.RecordAutoType(ngotype(n).Linksym())
   408  	}
   409  
   410  	decls, dwarfVars := createDwarfVars(fnsym, fn.Func, apdecls)
   411  
   412  	// For each type referenced by the functions auto vars, attach a
   413  	// dummy relocation to the function symbol to insure that the type
   414  	// included in DWARF processing during linking.
   415  	typesyms := []*obj.LSym{}
   416  	for t, _ := range fnsym.Func.Autot {
   417  		typesyms = append(typesyms, t)
   418  	}
   419  	sort.Sort(obj.BySymName(typesyms))
   420  	for _, sym := range typesyms {
   421  		r := obj.Addrel(infosym)
   422  		r.Sym = sym
   423  		r.Type = objabi.R_USETYPE
   424  	}
   425  	fnsym.Func.Autot = nil
   426  
   427  	var varScopes []ScopeID
   428  	for _, decl := range decls {
   429  		pos := decl.Pos
   430  		if decl.Name.Defn != nil && (decl.Name.Captured() || decl.Name.Byval()) {
   431  			// It's not clear which position is correct for captured variables here:
   432  			// * decl.Pos is the wrong position for captured variables, in the inner
   433  			//   function, but it is the right position in the outer function.
   434  			// * decl.Name.Defn is nil for captured variables that were arguments
   435  			//   on the outer function, however the decl.Pos for those seems to be
   436  			//   correct.
   437  			// * decl.Name.Defn is the "wrong" thing for variables declared in the
   438  			//   header of a type switch, it's their position in the header, rather
   439  			//   than the position of the case statement. In principle this is the
   440  			//   right thing, but here we prefer the latter because it makes each
   441  			//   instance of the header variable local to the lexical block of its
   442  			//   case statement.
   443  			// This code is probably wrong for type switch variables that are also
   444  			// captured.
   445  			pos = decl.Name.Defn.Pos
   446  		}
   447  		varScopes = append(varScopes, findScope(fn.Func.Marks, pos))
   448  	}
   449  
   450  	scopes := assembleScopes(fnsym, fn, dwarfVars, varScopes)
   451  	var inlcalls dwarf.InlCalls
   452  	if genDwarfInline > 0 {
   453  		inlcalls = assembleInlines(fnsym, dwarfVars)
   454  	}
   455  	return scopes, inlcalls
   456  }
   457  
   458  // createSimpleVars creates a DWARF entry for every variable declared in the
   459  // function, claiming that they are permanently on the stack.
   460  func createSimpleVars(apDecls []*Node) ([]*Node, []*dwarf.Var, map[*Node]bool) {
   461  	var vars []*dwarf.Var
   462  	var decls []*Node
   463  	selected := make(map[*Node]bool)
   464  	for _, n := range apDecls {
   465  		if n.IsAutoTmp() {
   466  			continue
   467  		}
   468  
   469  		decls = append(decls, n)
   470  		vars = append(vars, createSimpleVar(n))
   471  		selected[n] = true
   472  	}
   473  	return decls, vars, selected
   474  }
   475  
   476  func createSimpleVar(n *Node) *dwarf.Var {
   477  	var abbrev int
   478  	offs := n.Xoffset
   479  
   480  	switch n.Class() {
   481  	case PAUTO:
   482  		abbrev = dwarf.DW_ABRV_AUTO
   483  		if Ctxt.FixedFrameSize() == 0 {
   484  			offs -= int64(Widthptr)
   485  		}
   486  		if objabi.Framepointer_enabled(objabi.GOOS, objabi.GOARCH) || objabi.GOARCH == "arm64" {
   487  			// There is a word space for FP on ARM64 even if the frame pointer is disabled
   488  			offs -= int64(Widthptr)
   489  		}
   490  
   491  	case PPARAM, PPARAMOUT:
   492  		abbrev = dwarf.DW_ABRV_PARAM
   493  		offs += Ctxt.FixedFrameSize()
   494  	default:
   495  		Fatalf("createSimpleVar unexpected class %v for node %v", n.Class(), n)
   496  	}
   497  
   498  	typename := dwarf.InfoPrefix + typesymname(n.Type)
   499  	inlIndex := 0
   500  	if genDwarfInline > 1 {
   501  		if n.Name.InlFormal() || n.Name.InlLocal() {
   502  			inlIndex = posInlIndex(n.Pos) + 1
   503  			if n.Name.InlFormal() {
   504  				abbrev = dwarf.DW_ABRV_PARAM
   505  			}
   506  		}
   507  	}
   508  	declpos := Ctxt.InnermostPos(n.Pos)
   509  	return &dwarf.Var{
   510  		Name:          n.Sym.Name,
   511  		IsReturnValue: n.Class() == PPARAMOUT,
   512  		IsInlFormal:   n.Name.InlFormal(),
   513  		Abbrev:        abbrev,
   514  		StackOffset:   int32(offs),
   515  		Type:          Ctxt.Lookup(typename),
   516  		DeclFile:      declpos.RelFilename(),
   517  		DeclLine:      declpos.RelLine(),
   518  		DeclCol:       declpos.Col(),
   519  		InlIndex:      int32(inlIndex),
   520  		ChildIndex:    -1,
   521  	}
   522  }
   523  
   524  // createComplexVars creates recomposed DWARF vars with location lists,
   525  // suitable for describing optimized code.
   526  func createComplexVars(fn *Func) ([]*Node, []*dwarf.Var, map[*Node]bool) {
   527  	debugInfo := fn.DebugInfo
   528  
   529  	// Produce a DWARF variable entry for each user variable.
   530  	var decls []*Node
   531  	var vars []*dwarf.Var
   532  	ssaVars := make(map[*Node]bool)
   533  
   534  	for varID, dvar := range debugInfo.Vars {
   535  		n := dvar.(*Node)
   536  		ssaVars[n] = true
   537  		for _, slot := range debugInfo.VarSlots[varID] {
   538  			ssaVars[debugInfo.Slots[slot].N.(*Node)] = true
   539  		}
   540  
   541  		if dvar := createComplexVar(fn, ssa.VarID(varID)); dvar != nil {
   542  			decls = append(decls, n)
   543  			vars = append(vars, dvar)
   544  		}
   545  	}
   546  
   547  	return decls, vars, ssaVars
   548  }
   549  
   550  // createDwarfVars process fn, returning a list of DWARF variables and the
   551  // Nodes they represent.
   552  func createDwarfVars(fnsym *obj.LSym, fn *Func, apDecls []*Node) ([]*Node, []*dwarf.Var) {
   553  	// Collect a raw list of DWARF vars.
   554  	var vars []*dwarf.Var
   555  	var decls []*Node
   556  	var selected map[*Node]bool
   557  	if Ctxt.Flag_locationlists && Ctxt.Flag_optimize && fn.DebugInfo != nil {
   558  		decls, vars, selected = createComplexVars(fn)
   559  	} else {
   560  		decls, vars, selected = createSimpleVars(apDecls)
   561  	}
   562  
   563  	dcl := apDecls
   564  	if fnsym.WasInlined() {
   565  		dcl = preInliningDcls(fnsym)
   566  	}
   567  
   568  	// If optimization is enabled, the list above will typically be
   569  	// missing some of the original pre-optimization variables in the
   570  	// function (they may have been promoted to registers, folded into
   571  	// constants, dead-coded away, etc).  Input arguments not eligible
   572  	// for SSA optimization are also missing.  Here we add back in entries
   573  	// for selected missing vars. Note that the recipe below creates a
   574  	// conservative location. The idea here is that we want to
   575  	// communicate to the user that "yes, there is a variable named X
   576  	// in this function, but no, I don't have enough information to
   577  	// reliably report its contents."
   578  	// For non-SSA-able arguments, however, the correct information
   579  	// is known -- they have a single home on the stack.
   580  	for _, n := range dcl {
   581  		if _, found := selected[n]; found {
   582  			continue
   583  		}
   584  		c := n.Sym.Name[0]
   585  		if c == '.' || n.Type.IsUntyped() {
   586  			continue
   587  		}
   588  		if n.Class() == PPARAM && !canSSAType(n.Type) {
   589  			// SSA-able args get location lists, and may move in and
   590  			// out of registers, so those are handled elsewhere.
   591  			// Autos and named output params seem to get handled
   592  			// with VARDEF, which creates location lists.
   593  			// Args not of SSA-able type are treated here; they
   594  			// are homed on the stack in a single place for the
   595  			// entire call.
   596  			vars = append(vars, createSimpleVar(n))
   597  			decls = append(decls, n)
   598  			continue
   599  		}
   600  		typename := dwarf.InfoPrefix + typesymname(n.Type)
   601  		decls = append(decls, n)
   602  		abbrev := dwarf.DW_ABRV_AUTO_LOCLIST
   603  		isReturnValue := (n.Class() == PPARAMOUT)
   604  		if n.Class() == PPARAM || n.Class() == PPARAMOUT {
   605  			abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
   606  		} else if n.Class() == PAUTOHEAP {
   607  			// If dcl in question has been promoted to heap, do a bit
   608  			// of extra work to recover original class (auto or param);
   609  			// see issue 30908. This insures that we get the proper
   610  			// signature in the abstract function DIE, but leaves a
   611  			// misleading location for the param (we want pointer-to-heap
   612  			// and not stack).
   613  			// TODO(thanm): generate a better location expression
   614  			stackcopy := n.Name.Param.Stackcopy
   615  			if stackcopy != nil && (stackcopy.Class() == PPARAM || stackcopy.Class() == PPARAMOUT) {
   616  				abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
   617  				isReturnValue = (stackcopy.Class() == PPARAMOUT)
   618  			}
   619  		}
   620  		inlIndex := 0
   621  		if genDwarfInline > 1 {
   622  			if n.Name.InlFormal() || n.Name.InlLocal() {
   623  				inlIndex = posInlIndex(n.Pos) + 1
   624  				if n.Name.InlFormal() {
   625  					abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
   626  				}
   627  			}
   628  		}
   629  		declpos := Ctxt.InnermostPos(n.Pos)
   630  		vars = append(vars, &dwarf.Var{
   631  			Name:          n.Sym.Name,
   632  			IsReturnValue: isReturnValue,
   633  			Abbrev:        abbrev,
   634  			StackOffset:   int32(n.Xoffset),
   635  			Type:          Ctxt.Lookup(typename),
   636  			DeclFile:      declpos.RelFilename(),
   637  			DeclLine:      declpos.RelLine(),
   638  			DeclCol:       declpos.Col(),
   639  			InlIndex:      int32(inlIndex),
   640  			ChildIndex:    -1,
   641  		})
   642  		// Record go type of to insure that it gets emitted by the linker.
   643  		fnsym.Func.RecordAutoType(ngotype(n).Linksym())
   644  	}
   645  
   646  	return decls, vars
   647  }
   648  
   649  // Given a function that was inlined at some point during the
   650  // compilation, return a sorted list of nodes corresponding to the
   651  // autos/locals in that function prior to inlining. If this is a
   652  // function that is not local to the package being compiled, then the
   653  // names of the variables may have been "versioned" to avoid conflicts
   654  // with local vars; disregard this versioning when sorting.
   655  func preInliningDcls(fnsym *obj.LSym) []*Node {
   656  	fn := Ctxt.DwFixups.GetPrecursorFunc(fnsym).(*Node)
   657  	var rdcl []*Node
   658  	for _, n := range fn.Func.Inl.Dcl {
   659  		c := n.Sym.Name[0]
   660  		// Avoid reporting "_" parameters, since if there are more than
   661  		// one, it can result in a collision later on, as in #23179.
   662  		if unversion(n.Sym.Name) == "_" || c == '.' || n.Type.IsUntyped() {
   663  			continue
   664  		}
   665  		rdcl = append(rdcl, n)
   666  	}
   667  	return rdcl
   668  }
   669  
   670  // stackOffset returns the stack location of a LocalSlot relative to the
   671  // stack pointer, suitable for use in a DWARF location entry. This has nothing
   672  // to do with its offset in the user variable.
   673  func stackOffset(slot ssa.LocalSlot) int32 {
   674  	n := slot.N.(*Node)
   675  	var base int64
   676  	switch n.Class() {
   677  	case PAUTO:
   678  		if Ctxt.FixedFrameSize() == 0 {
   679  			base -= int64(Widthptr)
   680  		}
   681  		if objabi.Framepointer_enabled(objabi.GOOS, objabi.GOARCH) || objabi.GOARCH == "arm64" {
   682  			// There is a word space for FP on ARM64 even if the frame pointer is disabled
   683  			base -= int64(Widthptr)
   684  		}
   685  	case PPARAM, PPARAMOUT:
   686  		base += Ctxt.FixedFrameSize()
   687  	}
   688  	return int32(base + n.Xoffset + slot.Off)
   689  }
   690  
   691  // createComplexVar builds a single DWARF variable entry and location list.
   692  func createComplexVar(fn *Func, varID ssa.VarID) *dwarf.Var {
   693  	debug := fn.DebugInfo
   694  	n := debug.Vars[varID].(*Node)
   695  
   696  	var abbrev int
   697  	switch n.Class() {
   698  	case PAUTO:
   699  		abbrev = dwarf.DW_ABRV_AUTO_LOCLIST
   700  	case PPARAM, PPARAMOUT:
   701  		abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
   702  	default:
   703  		return nil
   704  	}
   705  
   706  	gotype := ngotype(n).Linksym()
   707  	typename := dwarf.InfoPrefix + gotype.Name[len("type."):]
   708  	inlIndex := 0
   709  	if genDwarfInline > 1 {
   710  		if n.Name.InlFormal() || n.Name.InlLocal() {
   711  			inlIndex = posInlIndex(n.Pos) + 1
   712  			if n.Name.InlFormal() {
   713  				abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
   714  			}
   715  		}
   716  	}
   717  	declpos := Ctxt.InnermostPos(n.Pos)
   718  	dvar := &dwarf.Var{
   719  		Name:          n.Sym.Name,
   720  		IsReturnValue: n.Class() == PPARAMOUT,
   721  		IsInlFormal:   n.Name.InlFormal(),
   722  		Abbrev:        abbrev,
   723  		Type:          Ctxt.Lookup(typename),
   724  		// The stack offset is used as a sorting key, so for decomposed
   725  		// variables just give it the first one. It's not used otherwise.
   726  		// This won't work well if the first slot hasn't been assigned a stack
   727  		// location, but it's not obvious how to do better.
   728  		StackOffset: stackOffset(debug.Slots[debug.VarSlots[varID][0]]),
   729  		DeclFile:    declpos.RelFilename(),
   730  		DeclLine:    declpos.RelLine(),
   731  		DeclCol:     declpos.Col(),
   732  		InlIndex:    int32(inlIndex),
   733  		ChildIndex:  -1,
   734  	}
   735  	list := debug.LocationLists[varID]
   736  	if len(list) != 0 {
   737  		dvar.PutLocationList = func(listSym, startPC dwarf.Sym) {
   738  			debug.PutLocationList(list, Ctxt, listSym.(*obj.LSym), startPC.(*obj.LSym))
   739  		}
   740  	}
   741  	return dvar
   742  }
   743  
   744  // fieldtrack adds R_USEFIELD relocations to fnsym to record any
   745  // struct fields that it used.
   746  func fieldtrack(fnsym *obj.LSym, tracked map[*types.Sym]struct{}) {
   747  	if fnsym == nil {
   748  		return
   749  	}
   750  	if objabi.Fieldtrack_enabled == 0 || len(tracked) == 0 {
   751  		return
   752  	}
   753  
   754  	trackSyms := make([]*types.Sym, 0, len(tracked))
   755  	for sym := range tracked {
   756  		trackSyms = append(trackSyms, sym)
   757  	}
   758  	sort.Sort(symByName(trackSyms))
   759  	for _, sym := range trackSyms {
   760  		r := obj.Addrel(fnsym)
   761  		r.Sym = sym.Linksym()
   762  		r.Type = objabi.R_USEFIELD
   763  	}
   764  }
   765  
   766  type symByName []*types.Sym
   767  
   768  func (a symByName) Len() int           { return len(a) }
   769  func (a symByName) Less(i, j int) bool { return a[i].Name < a[j].Name }
   770  func (a symByName) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }