github.com/slayercat/go@v0.0.0-20170428012452-c51559813f61/src/cmd/compile/internal/gc/pgen.go (about)

     1  // Copyright 2011 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package gc
     6  
     7  import (
     8  	"cmd/compile/internal/ssa"
     9  	"cmd/compile/internal/types"
    10  	"cmd/internal/dwarf"
    11  	"cmd/internal/obj"
    12  	"cmd/internal/objabi"
    13  	"cmd/internal/src"
    14  	"cmd/internal/sys"
    15  	"fmt"
    16  	"math/rand"
    17  	"sort"
    18  	"sync"
    19  )
    20  
    21  // "Portable" code generation.
    22  
    23  var (
    24  	nBackendWorkers int     // number of concurrent backend workers, set by a compiler flag
    25  	compilequeue    []*Node // functions waiting to be compiled
    26  )
    27  
    28  func emitptrargsmap() {
    29  	if Curfn.funcname() == "_" {
    30  		return
    31  	}
    32  	sym := lookup(fmt.Sprintf("%s.args_stackmap", Curfn.funcname()))
    33  	lsym := sym.Linksym()
    34  
    35  	nptr := int(Curfn.Type.ArgWidth() / int64(Widthptr))
    36  	bv := bvalloc(int32(nptr) * 2)
    37  	nbitmap := 1
    38  	if Curfn.Type.Results().NumFields() > 0 {
    39  		nbitmap = 2
    40  	}
    41  	off := duint32(lsym, 0, uint32(nbitmap))
    42  	off = duint32(lsym, off, uint32(bv.n))
    43  	var xoffset int64
    44  	if Curfn.IsMethod() {
    45  		xoffset = 0
    46  		onebitwalktype1(Curfn.Type.Recvs(), &xoffset, bv)
    47  	}
    48  
    49  	if Curfn.Type.Params().NumFields() > 0 {
    50  		xoffset = 0
    51  		onebitwalktype1(Curfn.Type.Params(), &xoffset, bv)
    52  	}
    53  
    54  	off = dbvec(lsym, off, bv)
    55  	if Curfn.Type.Results().NumFields() > 0 {
    56  		xoffset = 0
    57  		onebitwalktype1(Curfn.Type.Results(), &xoffset, bv)
    58  		off = dbvec(lsym, off, bv)
    59  	}
    60  
    61  	ggloblsym(lsym, int32(off), obj.RODATA|obj.LOCAL)
    62  }
    63  
    64  // cmpstackvarlt reports whether the stack variable a sorts before b.
    65  //
    66  // Sort the list of stack variables. Autos after anything else,
    67  // within autos, unused after used, within used, things with
    68  // pointers first, zeroed things first, and then decreasing size.
    69  // Because autos are laid out in decreasing addresses
    70  // on the stack, pointers first, zeroed things first and decreasing size
    71  // really means, in memory, things with pointers needing zeroing at
    72  // the top of the stack and increasing in size.
    73  // Non-autos sort on offset.
    74  func cmpstackvarlt(a, b *Node) bool {
    75  	if (a.Class() == PAUTO) != (b.Class() == PAUTO) {
    76  		return b.Class() == PAUTO
    77  	}
    78  
    79  	if a.Class() != PAUTO {
    80  		return a.Xoffset < b.Xoffset
    81  	}
    82  
    83  	if a.Name.Used() != b.Name.Used() {
    84  		return a.Name.Used()
    85  	}
    86  
    87  	ap := types.Haspointers(a.Type)
    88  	bp := types.Haspointers(b.Type)
    89  	if ap != bp {
    90  		return ap
    91  	}
    92  
    93  	ap = a.Name.Needzero()
    94  	bp = b.Name.Needzero()
    95  	if ap != bp {
    96  		return ap
    97  	}
    98  
    99  	if a.Type.Width != b.Type.Width {
   100  		return a.Type.Width > b.Type.Width
   101  	}
   102  
   103  	return a.Sym.Name < b.Sym.Name
   104  }
   105  
   106  // byStackvar implements sort.Interface for []*Node using cmpstackvarlt.
   107  type byStackVar []*Node
   108  
   109  func (s byStackVar) Len() int           { return len(s) }
   110  func (s byStackVar) Less(i, j int) bool { return cmpstackvarlt(s[i], s[j]) }
   111  func (s byStackVar) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
   112  
   113  func (s *ssafn) AllocFrame(f *ssa.Func) {
   114  	s.stksize = 0
   115  	s.stkptrsize = 0
   116  	fn := s.curfn.Func
   117  
   118  	// Mark the PAUTO's unused.
   119  	for _, ln := range fn.Dcl {
   120  		if ln.Class() == PAUTO {
   121  			ln.Name.SetUsed(false)
   122  		}
   123  	}
   124  
   125  	for _, l := range f.RegAlloc {
   126  		if ls, ok := l.(ssa.LocalSlot); ok {
   127  			ls.N.(*Node).Name.SetUsed(true)
   128  		}
   129  	}
   130  
   131  	scratchUsed := false
   132  	for _, b := range f.Blocks {
   133  		for _, v := range b.Values {
   134  			switch a := v.Aux.(type) {
   135  			case *ssa.ArgSymbol:
   136  				n := a.Node.(*Node)
   137  				// Don't modify nodfp; it is a global.
   138  				if n != nodfp {
   139  					n.Name.SetUsed(true)
   140  				}
   141  			case *ssa.AutoSymbol:
   142  				a.Node.(*Node).Name.SetUsed(true)
   143  			}
   144  
   145  			if !scratchUsed {
   146  				scratchUsed = v.Op.UsesScratch()
   147  			}
   148  		}
   149  	}
   150  
   151  	if f.Config.NeedsFpScratch && scratchUsed {
   152  		s.scratchFpMem = tempAt(src.NoXPos, s.curfn, types.Types[TUINT64])
   153  	}
   154  
   155  	sort.Sort(byStackVar(fn.Dcl))
   156  
   157  	// Reassign stack offsets of the locals that are used.
   158  	for i, n := range fn.Dcl {
   159  		if n.Op != ONAME || n.Class() != PAUTO {
   160  			continue
   161  		}
   162  		if !n.Name.Used() {
   163  			fn.Dcl = fn.Dcl[:i]
   164  			break
   165  		}
   166  
   167  		dowidth(n.Type)
   168  		w := n.Type.Width
   169  		if w >= thearch.MAXWIDTH || w < 0 {
   170  			Fatalf("bad width")
   171  		}
   172  		s.stksize += w
   173  		s.stksize = Rnd(s.stksize, int64(n.Type.Align))
   174  		if types.Haspointers(n.Type) {
   175  			s.stkptrsize = s.stksize
   176  		}
   177  		if thearch.LinkArch.InFamily(sys.MIPS, sys.MIPS64, sys.ARM, sys.ARM64, sys.PPC64, sys.S390X) {
   178  			s.stksize = Rnd(s.stksize, int64(Widthptr))
   179  		}
   180  		n.Xoffset = -s.stksize
   181  	}
   182  
   183  	s.stksize = Rnd(s.stksize, int64(Widthreg))
   184  	s.stkptrsize = Rnd(s.stkptrsize, int64(Widthreg))
   185  }
   186  
   187  func compile(fn *Node) {
   188  	Curfn = fn
   189  	dowidth(fn.Type)
   190  
   191  	if fn.Nbody.Len() == 0 {
   192  		emitptrargsmap()
   193  		return
   194  	}
   195  
   196  	saveerrors()
   197  
   198  	order(fn)
   199  	if nerrors != 0 {
   200  		return
   201  	}
   202  
   203  	walk(fn)
   204  	if nerrors != 0 {
   205  		return
   206  	}
   207  	if instrumenting {
   208  		instrument(fn)
   209  	}
   210  
   211  	// From this point, there should be no uses of Curfn. Enforce that.
   212  	Curfn = nil
   213  
   214  	// Set up the function's LSym early to avoid data races with the assemblers.
   215  	fn.Func.initLSym()
   216  
   217  	if compilenow() {
   218  		compileSSA(fn, 0)
   219  	} else {
   220  		compilequeue = append(compilequeue, fn)
   221  	}
   222  }
   223  
   224  // compilenow reports whether to compile immediately.
   225  // If functions are not compiled immediately,
   226  // they are enqueued in compilequeue,
   227  // which is drained by compileFunctions.
   228  func compilenow() bool {
   229  	return nBackendWorkers == 1
   230  }
   231  
   232  // compileSSA builds an SSA backend function,
   233  // uses it to generate a plist,
   234  // and flushes that plist to machine code.
   235  // worker indicates which of the backend workers is doing the processing.
   236  func compileSSA(fn *Node, worker int) {
   237  	ssafn := buildssa(fn, worker)
   238  	pp := newProgs(fn, worker)
   239  	genssa(ssafn, pp)
   240  	if pp.Text.To.Offset < 1<<31 {
   241  		pp.Flush()
   242  	} else {
   243  		largeStackFramesMu.Lock()
   244  		largeStackFrames = append(largeStackFrames, fn.Pos)
   245  		largeStackFramesMu.Unlock()
   246  	}
   247  	// fieldtrack must be called after pp.Flush. See issue 20014.
   248  	fieldtrack(pp.Text.From.Sym, fn.Func.FieldTrack)
   249  	pp.Free()
   250  }
   251  
   252  // compileFunctions compiles all functions in compilequeue.
   253  // It fans out nBackendWorkers to do the work
   254  // and waits for them to complete.
   255  func compileFunctions() {
   256  	if len(compilequeue) != 0 {
   257  		sizeCalculationDisabled = true // not safe to calculate sizes concurrently
   258  		if raceEnabled {
   259  			// Randomize compilation order to try to shake out races.
   260  			tmp := make([]*Node, len(compilequeue))
   261  			perm := rand.Perm(len(compilequeue))
   262  			for i, v := range perm {
   263  				tmp[v] = compilequeue[i]
   264  			}
   265  			copy(compilequeue, tmp)
   266  		} else {
   267  			// Compile the longest functions first,
   268  			// since they're most likely to be the slowest.
   269  			// This helps avoid stragglers.
   270  			obj.SortSlice(compilequeue, func(i, j int) bool {
   271  				return compilequeue[i].Nbody.Len() > compilequeue[j].Nbody.Len()
   272  			})
   273  		}
   274  		var wg sync.WaitGroup
   275  		c := make(chan *Node)
   276  		for i := 0; i < nBackendWorkers; i++ {
   277  			wg.Add(1)
   278  			go func(worker int) {
   279  				for fn := range c {
   280  					compileSSA(fn, worker)
   281  				}
   282  				wg.Done()
   283  			}(i)
   284  		}
   285  		for _, fn := range compilequeue {
   286  			c <- fn
   287  		}
   288  		close(c)
   289  		compilequeue = nil
   290  		wg.Wait()
   291  		sizeCalculationDisabled = false
   292  	}
   293  }
   294  
   295  func debuginfo(fnsym *obj.LSym, curfn interface{}) []*dwarf.Var {
   296  	fn := curfn.(*Node)
   297  	if expect := fn.Func.Nname.Sym.Linksym(); fnsym != expect {
   298  		Fatalf("unexpected fnsym: %v != %v", fnsym, expect)
   299  	}
   300  
   301  	var vars []*dwarf.Var
   302  	for _, n := range fn.Func.Dcl {
   303  		if n.Op != ONAME { // might be OTYPE or OLITERAL
   304  			continue
   305  		}
   306  
   307  		var name obj.AddrName
   308  		var abbrev int
   309  		offs := n.Xoffset
   310  
   311  		switch n.Class() {
   312  		case PAUTO:
   313  			if !n.Name.Used() {
   314  				Fatalf("debuginfo unused node (AllocFrame should truncate fn.Func.Dcl)")
   315  			}
   316  			name = obj.NAME_AUTO
   317  
   318  			abbrev = dwarf.DW_ABRV_AUTO
   319  			if Ctxt.FixedFrameSize() == 0 {
   320  				offs -= int64(Widthptr)
   321  			}
   322  			if objabi.Framepointer_enabled(objabi.GOOS, objabi.GOARCH) {
   323  				offs -= int64(Widthptr)
   324  			}
   325  
   326  		case PPARAM, PPARAMOUT:
   327  			name = obj.NAME_PARAM
   328  
   329  			abbrev = dwarf.DW_ABRV_PARAM
   330  			offs += Ctxt.FixedFrameSize()
   331  
   332  		default:
   333  			continue
   334  		}
   335  
   336  		gotype := ngotype(n).Linksym()
   337  		fnsym.Func.Autom = append(fnsym.Func.Autom, &obj.Auto{
   338  			Asym:    Ctxt.Lookup(n.Sym.Name),
   339  			Aoffset: int32(n.Xoffset),
   340  			Name:    name,
   341  			Gotype:  gotype,
   342  		})
   343  
   344  		if n.IsAutoTmp() {
   345  			continue
   346  		}
   347  
   348  		typename := dwarf.InfoPrefix + gotype.Name[len("type."):]
   349  		vars = append(vars, &dwarf.Var{
   350  			Name:   n.Sym.Name,
   351  			Abbrev: abbrev,
   352  			Offset: int32(offs),
   353  			Type:   Ctxt.Lookup(typename),
   354  		})
   355  	}
   356  
   357  	// Stable sort so that ties are broken with declaration order.
   358  	sort.Stable(dwarf.VarsByOffset(vars))
   359  
   360  	return vars
   361  }
   362  
   363  // fieldtrack adds R_USEFIELD relocations to fnsym to record any
   364  // struct fields that it used.
   365  func fieldtrack(fnsym *obj.LSym, tracked map[*types.Sym]struct{}) {
   366  	if fnsym == nil {
   367  		return
   368  	}
   369  	if objabi.Fieldtrack_enabled == 0 || len(tracked) == 0 {
   370  		return
   371  	}
   372  
   373  	trackSyms := make([]*types.Sym, 0, len(tracked))
   374  	for sym := range tracked {
   375  		trackSyms = append(trackSyms, sym)
   376  	}
   377  	sort.Sort(symByName(trackSyms))
   378  	for _, sym := range trackSyms {
   379  		r := obj.Addrel(fnsym)
   380  		r.Sym = sym.Linksym()
   381  		r.Type = objabi.R_USEFIELD
   382  	}
   383  }
   384  
   385  type symByName []*types.Sym
   386  
   387  func (a symByName) Len() int           { return len(a) }
   388  func (a symByName) Less(i, j int) bool { return a[i].Name < a[j].Name }
   389  func (a symByName) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }