github.com/bir3/gocompiler@v0.3.205/src/cmd/compile/internal/ssagen/pgen.go (about)

     1  // Copyright 2011 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package ssagen
     6  
     7  import (
     8  	"github.com/bir3/gocompiler/src/internal/buildcfg"
     9  	"sort"
    10  	"sync"
    11  
    12  	"github.com/bir3/gocompiler/src/cmd/compile/internal/base"
    13  	"github.com/bir3/gocompiler/src/cmd/compile/internal/ir"
    14  	"github.com/bir3/gocompiler/src/cmd/compile/internal/objw"
    15  	"github.com/bir3/gocompiler/src/cmd/compile/internal/ssa"
    16  	"github.com/bir3/gocompiler/src/cmd/compile/internal/types"
    17  	"github.com/bir3/gocompiler/src/cmd/internal/obj"
    18  	"github.com/bir3/gocompiler/src/cmd/internal/objabi"
    19  	"github.com/bir3/gocompiler/src/cmd/internal/src"
    20  )
    21  
    22  // cmpstackvarlt reports whether the stack variable a sorts before b.
    23  //
    24  // Sort the list of stack variables. Autos after anything else,
    25  // within autos, unused after used, within used, things with
    26  // pointers first, zeroed things first, and then decreasing size.
    27  // Because autos are laid out in decreasing addresses
    28  // on the stack, pointers first, zeroed things first and decreasing size
    29  // really means, in memory, things with pointers needing zeroing at
    30  // the top of the stack and increasing in size.
    31  // Non-autos sort on offset.
    32  func cmpstackvarlt(a, b *ir.Name) bool {
    33  	if needAlloc(a) != needAlloc(b) {
    34  		return needAlloc(b)
    35  	}
    36  
    37  	if !needAlloc(a) {
    38  		return a.FrameOffset() < b.FrameOffset()
    39  	}
    40  
    41  	if a.Used() != b.Used() {
    42  		return a.Used()
    43  	}
    44  
    45  	ap := a.Type().HasPointers()
    46  	bp := b.Type().HasPointers()
    47  	if ap != bp {
    48  		return ap
    49  	}
    50  
    51  	ap = a.Needzero()
    52  	bp = b.Needzero()
    53  	if ap != bp {
    54  		return ap
    55  	}
    56  
    57  	if a.Type().Size() != b.Type().Size() {
    58  		return a.Type().Size() > b.Type().Size()
    59  	}
    60  
    61  	return a.Sym().Name < b.Sym().Name
    62  }
    63  
    64  // byStackVar implements sort.Interface for []*Node using cmpstackvarlt.
    65  type byStackVar []*ir.Name
    66  
    67  func (s byStackVar) Len() int           { return len(s) }
    68  func (s byStackVar) Less(i, j int) bool { return cmpstackvarlt(s[i], s[j]) }
    69  func (s byStackVar) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
    70  
    71  // needAlloc reports whether n is within the current frame, for which we need to
    72  // allocate space. In particular, it excludes arguments and results, which are in
    73  // the callers frame.
    74  func needAlloc(n *ir.Name) bool {
    75  	if n.Op() != ir.ONAME {
    76  		base.FatalfAt(n.Pos(), "%v has unexpected Op %v", n, n.Op())
    77  	}
    78  
    79  	switch n.Class {
    80  	case ir.PAUTO:
    81  		return true
    82  	case ir.PPARAM:
    83  		return false
    84  	case ir.PPARAMOUT:
    85  		return n.IsOutputParamInRegisters()
    86  
    87  	default:
    88  		base.FatalfAt(n.Pos(), "%v has unexpected Class %v", n, n.Class)
    89  		return false
    90  	}
    91  }
    92  
    93  func (s *ssafn) AllocFrame(f *ssa.Func) {
    94  	s.stksize = 0
    95  	s.stkptrsize = 0
    96  	s.stkalign = int64(types.RegSize)
    97  	fn := s.curfn
    98  
    99  	// Mark the PAUTO's unused.
   100  	for _, ln := range fn.Dcl {
   101  		if needAlloc(ln) {
   102  			ln.SetUsed(false)
   103  		}
   104  	}
   105  
   106  	for _, l := range f.RegAlloc {
   107  		if ls, ok := l.(ssa.LocalSlot); ok {
   108  			ls.N.SetUsed(true)
   109  		}
   110  	}
   111  
   112  	for _, b := range f.Blocks {
   113  		for _, v := range b.Values {
   114  			if n, ok := v.Aux.(*ir.Name); ok {
   115  				switch n.Class {
   116  				case ir.PPARAMOUT:
   117  					if n.IsOutputParamInRegisters() && v.Op == ssa.OpVarDef {
   118  						// ignore VarDef, look for "real" uses.
   119  						// TODO: maybe do this for PAUTO as well?
   120  						continue
   121  					}
   122  					fallthrough
   123  				case ir.PPARAM, ir.PAUTO:
   124  					n.SetUsed(true)
   125  				}
   126  			}
   127  		}
   128  	}
   129  
   130  	// Use sort.Stable instead of sort.Sort so stack layout (and thus
   131  	// compiler output) is less sensitive to frontend changes that
   132  	// introduce or remove unused variables.
   133  	sort.Stable(byStackVar(fn.Dcl))
   134  
   135  	// Reassign stack offsets of the locals that are used.
   136  	lastHasPtr := false
   137  	for i, n := range fn.Dcl {
   138  		if n.Op() != ir.ONAME || n.Class != ir.PAUTO && !(n.Class == ir.PPARAMOUT && n.IsOutputParamInRegisters()) {
   139  			// i.e., stack assign if AUTO, or if PARAMOUT in registers (which has no predefined spill locations)
   140  			continue
   141  		}
   142  		if !n.Used() {
   143  			fn.DebugInfo.(*ssa.FuncDebug).OptDcl = fn.Dcl[i:]
   144  			fn.Dcl = fn.Dcl[:i]
   145  			break
   146  		}
   147  
   148  		types.CalcSize(n.Type())
   149  		w := n.Type().Size()
   150  		if w >= types.MaxWidth || w < 0 {
   151  			base.Fatalf("bad width")
   152  		}
   153  		if w == 0 && lastHasPtr {
   154  			// Pad between a pointer-containing object and a zero-sized object.
   155  			// This prevents a pointer to the zero-sized object from being interpreted
   156  			// as a pointer to the pointer-containing object (and causing it
   157  			// to be scanned when it shouldn't be). See issue 24993.
   158  			w = 1
   159  		}
   160  		s.stksize += w
   161  		s.stksize = types.RoundUp(s.stksize, n.Type().Alignment())
   162  		if n.Type().Alignment() > int64(types.RegSize) {
   163  			s.stkalign = n.Type().Alignment()
   164  		}
   165  		if n.Type().HasPointers() {
   166  			s.stkptrsize = s.stksize
   167  			lastHasPtr = true
   168  		} else {
   169  			lastHasPtr = false
   170  		}
   171  		n.SetFrameOffset(-s.stksize)
   172  	}
   173  
   174  	s.stksize = types.RoundUp(s.stksize, s.stkalign)
   175  	s.stkptrsize = types.RoundUp(s.stkptrsize, s.stkalign)
   176  }
   177  
   178  const maxStackSize = 1 << 30
   179  
   180  // Compile builds an SSA backend function,
   181  // uses it to generate a plist,
   182  // and flushes that plist to machine code.
   183  // worker indicates which of the backend workers is doing the processing.
   184  func Compile(fn *ir.Func, worker int) {
   185  	f := buildssa(fn, worker)
   186  	// Note: check arg size to fix issue 25507.
   187  	if f.Frontend().(*ssafn).stksize >= maxStackSize || f.OwnAux.ArgWidth() >= maxStackSize {
   188  		largeStackFramesMu.Lock()
   189  		largeStackFrames = append(largeStackFrames, largeStack{locals: f.Frontend().(*ssafn).stksize, args: f.OwnAux.ArgWidth(), pos: fn.Pos()})
   190  		largeStackFramesMu.Unlock()
   191  		return
   192  	}
   193  	pp := objw.NewProgs(fn, worker)
   194  	defer pp.Free()
   195  	genssa(f, pp)
   196  	// Check frame size again.
   197  	// The check above included only the space needed for local variables.
   198  	// After genssa, the space needed includes local variables and the callee arg region.
   199  	// We must do this check prior to calling pp.Flush.
   200  	// If there are any oversized stack frames,
   201  	// the assembler may emit inscrutable complaints about invalid instructions.
   202  	if pp.Text.To.Offset >= maxStackSize {
   203  		largeStackFramesMu.Lock()
   204  		locals := f.Frontend().(*ssafn).stksize
   205  		largeStackFrames = append(largeStackFrames, largeStack{locals: locals, args: f.OwnAux.ArgWidth(), callee: pp.Text.To.Offset - locals, pos: fn.Pos()})
   206  		largeStackFramesMu.Unlock()
   207  		return
   208  	}
   209  
   210  	pp.Flush() // assemble, fill in boilerplate, etc.
   211  	// fieldtrack must be called after pp.Flush. See issue 20014.
   212  	fieldtrack(pp.Text.From.Sym, fn.FieldTrack)
   213  }
   214  
   215  // StackOffset returns the stack location of a LocalSlot relative to the
   216  // stack pointer, suitable for use in a DWARF location entry. This has nothing
   217  // to do with its offset in the user variable.
   218  func StackOffset(slot ssa.LocalSlot) int32 {
   219  	n := slot.N
   220  	var off int64
   221  	switch n.Class {
   222  	case ir.PPARAM, ir.PPARAMOUT:
   223  		if !n.IsOutputParamInRegisters() {
   224  			off = n.FrameOffset() + base.Ctxt.Arch.FixedFrameSize
   225  			break
   226  		}
   227  		fallthrough // PPARAMOUT in registers allocates like an AUTO
   228  	case ir.PAUTO:
   229  		off = n.FrameOffset()
   230  		if base.Ctxt.Arch.FixedFrameSize == 0 {
   231  			off -= int64(types.PtrSize)
   232  		}
   233  		if buildcfg.FramePointerEnabled {
   234  			off -= int64(types.PtrSize)
   235  		}
   236  	}
   237  	return int32(off + slot.Off)
   238  }
   239  
   240  // fieldtrack adds R_USEFIELD relocations to fnsym to record any
   241  // struct fields that it used.
   242  func fieldtrack(fnsym *obj.LSym, tracked map[*obj.LSym]struct{}) {
   243  	if fnsym == nil {
   244  		return
   245  	}
   246  	if !buildcfg.Experiment.FieldTrack || len(tracked) == 0 {
   247  		return
   248  	}
   249  
   250  	trackSyms := make([]*obj.LSym, 0, len(tracked))
   251  	for sym := range tracked {
   252  		trackSyms = append(trackSyms, sym)
   253  	}
   254  	sort.Slice(trackSyms, func(i, j int) bool { return trackSyms[i].Name < trackSyms[j].Name })
   255  	for _, sym := range trackSyms {
   256  		r := obj.Addrel(fnsym)
   257  		r.Sym = sym
   258  		r.Type = objabi.R_USEFIELD
   259  	}
   260  }
   261  
   262  // largeStack is info about a function whose stack frame is too large (rare).
   263  type largeStack struct {
   264  	locals int64
   265  	args   int64
   266  	callee int64
   267  	pos    src.XPos
   268  }
   269  
   270  var (
   271  	largeStackFramesMu sync.Mutex // protects largeStackFrames
   272  	largeStackFrames   []largeStack
   273  )
   274  
   275  func CheckLargeStacks() {
   276  	// Check whether any of the functions we have compiled have gigantic stack frames.
   277  	sort.Slice(largeStackFrames, func(i, j int) bool {
   278  		return largeStackFrames[i].pos.Before(largeStackFrames[j].pos)
   279  	})
   280  	for _, large := range largeStackFrames {
   281  		if large.callee != 0 {
   282  			base.ErrorfAt(large.pos, "stack frame too large (>1GB): %d MB locals + %d MB args + %d MB callee", large.locals>>20, large.args>>20, large.callee>>20)
   283  		} else {
   284  			base.ErrorfAt(large.pos, "stack frame too large (>1GB): %d MB locals + %d MB args", large.locals>>20, large.args>>20)
   285  		}
   286  	}
   287  }