github.com/sanprasirt/go@v0.0.0-20170607001320-a027466e4b6d/src/cmd/compile/internal/gc/pgen.go (about)

     1  // Copyright 2011 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package gc
     6  
     7  import (
     8  	"cmd/compile/internal/ssa"
     9  	"cmd/compile/internal/types"
    10  	"cmd/internal/dwarf"
    11  	"cmd/internal/obj"
    12  	"cmd/internal/objabi"
    13  	"cmd/internal/src"
    14  	"cmd/internal/sys"
    15  	"fmt"
    16  	"math/rand"
    17  	"sort"
    18  	"sync"
    19  	"time"
    20  )
    21  
    22  // "Portable" code generation.
    23  
    24  var (
    25  	nBackendWorkers int     // number of concurrent backend workers, set by a compiler flag
    26  	compilequeue    []*Node // functions waiting to be compiled
    27  )
    28  
    29  func emitptrargsmap() {
    30  	if Curfn.funcname() == "_" {
    31  		return
    32  	}
    33  	sym := lookup(fmt.Sprintf("%s.args_stackmap", Curfn.funcname()))
    34  	lsym := sym.Linksym()
    35  
    36  	nptr := int(Curfn.Type.ArgWidth() / int64(Widthptr))
    37  	bv := bvalloc(int32(nptr) * 2)
    38  	nbitmap := 1
    39  	if Curfn.Type.Results().NumFields() > 0 {
    40  		nbitmap = 2
    41  	}
    42  	off := duint32(lsym, 0, uint32(nbitmap))
    43  	off = duint32(lsym, off, uint32(bv.n))
    44  	var xoffset int64
    45  	if Curfn.IsMethod() {
    46  		xoffset = 0
    47  		onebitwalktype1(Curfn.Type.Recvs(), &xoffset, bv)
    48  	}
    49  
    50  	if Curfn.Type.Params().NumFields() > 0 {
    51  		xoffset = 0
    52  		onebitwalktype1(Curfn.Type.Params(), &xoffset, bv)
    53  	}
    54  
    55  	off = dbvec(lsym, off, bv)
    56  	if Curfn.Type.Results().NumFields() > 0 {
    57  		xoffset = 0
    58  		onebitwalktype1(Curfn.Type.Results(), &xoffset, bv)
    59  		off = dbvec(lsym, off, bv)
    60  	}
    61  
    62  	ggloblsym(lsym, int32(off), obj.RODATA|obj.LOCAL)
    63  }
    64  
    65  // cmpstackvarlt reports whether the stack variable a sorts before b.
    66  //
    67  // Sort the list of stack variables. Autos after anything else,
    68  // within autos, unused after used, within used, things with
    69  // pointers first, zeroed things first, and then decreasing size.
    70  // Because autos are laid out in decreasing addresses
    71  // on the stack, pointers first, zeroed things first and decreasing size
    72  // really means, in memory, things with pointers needing zeroing at
    73  // the top of the stack and increasing in size.
    74  // Non-autos sort on offset.
    75  func cmpstackvarlt(a, b *Node) bool {
    76  	if (a.Class() == PAUTO) != (b.Class() == PAUTO) {
    77  		return b.Class() == PAUTO
    78  	}
    79  
    80  	if a.Class() != PAUTO {
    81  		return a.Xoffset < b.Xoffset
    82  	}
    83  
    84  	if a.Name.Used() != b.Name.Used() {
    85  		return a.Name.Used()
    86  	}
    87  
    88  	ap := types.Haspointers(a.Type)
    89  	bp := types.Haspointers(b.Type)
    90  	if ap != bp {
    91  		return ap
    92  	}
    93  
    94  	ap = a.Name.Needzero()
    95  	bp = b.Name.Needzero()
    96  	if ap != bp {
    97  		return ap
    98  	}
    99  
   100  	if a.Type.Width != b.Type.Width {
   101  		return a.Type.Width > b.Type.Width
   102  	}
   103  
   104  	return a.Sym.Name < b.Sym.Name
   105  }
   106  
   107  // byStackvar implements sort.Interface for []*Node using cmpstackvarlt.
   108  type byStackVar []*Node
   109  
   110  func (s byStackVar) Len() int           { return len(s) }
   111  func (s byStackVar) Less(i, j int) bool { return cmpstackvarlt(s[i], s[j]) }
   112  func (s byStackVar) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
   113  
   114  func (s *ssafn) AllocFrame(f *ssa.Func) {
   115  	s.stksize = 0
   116  	s.stkptrsize = 0
   117  	fn := s.curfn.Func
   118  
   119  	// Mark the PAUTO's unused.
   120  	for _, ln := range fn.Dcl {
   121  		if ln.Class() == PAUTO {
   122  			ln.Name.SetUsed(false)
   123  		}
   124  	}
   125  
   126  	for _, l := range f.RegAlloc {
   127  		if ls, ok := l.(ssa.LocalSlot); ok {
   128  			ls.N.(*Node).Name.SetUsed(true)
   129  		}
   130  	}
   131  
   132  	scratchUsed := false
   133  	for _, b := range f.Blocks {
   134  		for _, v := range b.Values {
   135  			switch a := v.Aux.(type) {
   136  			case *ssa.ArgSymbol:
   137  				n := a.Node.(*Node)
   138  				// Don't modify nodfp; it is a global.
   139  				if n != nodfp {
   140  					n.Name.SetUsed(true)
   141  				}
   142  			case *ssa.AutoSymbol:
   143  				a.Node.(*Node).Name.SetUsed(true)
   144  			}
   145  
   146  			if !scratchUsed {
   147  				scratchUsed = v.Op.UsesScratch()
   148  			}
   149  		}
   150  	}
   151  
   152  	if f.Config.NeedsFpScratch && scratchUsed {
   153  		s.scratchFpMem = tempAt(src.NoXPos, s.curfn, types.Types[TUINT64])
   154  	}
   155  
   156  	sort.Sort(byStackVar(fn.Dcl))
   157  
   158  	// Reassign stack offsets of the locals that are used.
   159  	for i, n := range fn.Dcl {
   160  		if n.Op != ONAME || n.Class() != PAUTO {
   161  			continue
   162  		}
   163  		if !n.Name.Used() {
   164  			fn.Dcl = fn.Dcl[:i]
   165  			break
   166  		}
   167  
   168  		dowidth(n.Type)
   169  		w := n.Type.Width
   170  		if w >= thearch.MAXWIDTH || w < 0 {
   171  			Fatalf("bad width")
   172  		}
   173  		s.stksize += w
   174  		s.stksize = Rnd(s.stksize, int64(n.Type.Align))
   175  		if types.Haspointers(n.Type) {
   176  			s.stkptrsize = s.stksize
   177  		}
   178  		if thearch.LinkArch.InFamily(sys.MIPS, sys.MIPS64, sys.ARM, sys.ARM64, sys.PPC64, sys.S390X) {
   179  			s.stksize = Rnd(s.stksize, int64(Widthptr))
   180  		}
   181  		n.Xoffset = -s.stksize
   182  	}
   183  
   184  	s.stksize = Rnd(s.stksize, int64(Widthreg))
   185  	s.stkptrsize = Rnd(s.stkptrsize, int64(Widthreg))
   186  }
   187  
   188  func compile(fn *Node) {
   189  	Curfn = fn
   190  	dowidth(fn.Type)
   191  
   192  	if fn.Nbody.Len() == 0 {
   193  		emitptrargsmap()
   194  		return
   195  	}
   196  
   197  	saveerrors()
   198  
   199  	order(fn)
   200  	if nerrors != 0 {
   201  		return
   202  	}
   203  
   204  	walk(fn)
   205  	if nerrors != 0 {
   206  		return
   207  	}
   208  	if instrumenting {
   209  		instrument(fn)
   210  	}
   211  
   212  	// From this point, there should be no uses of Curfn. Enforce that.
   213  	Curfn = nil
   214  
   215  	// Set up the function's LSym early to avoid data races with the assemblers.
   216  	fn.Func.initLSym()
   217  
   218  	if compilenow() {
   219  		compileSSA(fn, 0)
   220  	} else {
   221  		compilequeue = append(compilequeue, fn)
   222  	}
   223  }
   224  
   225  // compilenow reports whether to compile immediately.
   226  // If functions are not compiled immediately,
   227  // they are enqueued in compilequeue,
   228  // which is drained by compileFunctions.
   229  func compilenow() bool {
   230  	return nBackendWorkers == 1 && Debug_compilelater == 0
   231  }
   232  
   233  const maxStackSize = 1 << 31
   234  
   235  // compileSSA builds an SSA backend function,
   236  // uses it to generate a plist,
   237  // and flushes that plist to machine code.
   238  // worker indicates which of the backend workers is doing the processing.
   239  func compileSSA(fn *Node, worker int) {
   240  	ssafn := buildssa(fn, worker)
   241  	pp := newProgs(fn, worker)
   242  	genssa(ssafn, pp)
   243  	if pp.Text.To.Offset < maxStackSize {
   244  		pp.Flush()
   245  	} else {
   246  		largeStackFramesMu.Lock()
   247  		largeStackFrames = append(largeStackFrames, fn.Pos)
   248  		largeStackFramesMu.Unlock()
   249  	}
   250  	// fieldtrack must be called after pp.Flush. See issue 20014.
   251  	fieldtrack(pp.Text.From.Sym, fn.Func.FieldTrack)
   252  	pp.Free()
   253  }
   254  
   255  func init() {
   256  	if raceEnabled {
   257  		rand.Seed(time.Now().UnixNano())
   258  	}
   259  }
   260  
   261  // compileFunctions compiles all functions in compilequeue.
   262  // It fans out nBackendWorkers to do the work
   263  // and waits for them to complete.
   264  func compileFunctions() {
   265  	if len(compilequeue) != 0 {
   266  		sizeCalculationDisabled = true // not safe to calculate sizes concurrently
   267  		if raceEnabled {
   268  			// Randomize compilation order to try to shake out races.
   269  			tmp := make([]*Node, len(compilequeue))
   270  			perm := rand.Perm(len(compilequeue))
   271  			for i, v := range perm {
   272  				tmp[v] = compilequeue[i]
   273  			}
   274  			copy(compilequeue, tmp)
   275  		} else {
   276  			// Compile the longest functions first,
   277  			// since they're most likely to be the slowest.
   278  			// This helps avoid stragglers.
   279  			obj.SortSlice(compilequeue, func(i, j int) bool {
   280  				return compilequeue[i].Nbody.Len() > compilequeue[j].Nbody.Len()
   281  			})
   282  		}
   283  		var wg sync.WaitGroup
   284  		c := make(chan *Node, nBackendWorkers)
   285  		for i := 0; i < nBackendWorkers; i++ {
   286  			wg.Add(1)
   287  			go func(worker int) {
   288  				for fn := range c {
   289  					compileSSA(fn, worker)
   290  				}
   291  				wg.Done()
   292  			}(i)
   293  		}
   294  		for _, fn := range compilequeue {
   295  			c <- fn
   296  		}
   297  		close(c)
   298  		compilequeue = nil
   299  		wg.Wait()
   300  		sizeCalculationDisabled = false
   301  	}
   302  }
   303  
   304  func debuginfo(fnsym *obj.LSym, curfn interface{}) []dwarf.Scope {
   305  	fn := curfn.(*Node)
   306  	if expect := fn.Func.Nname.Sym.Linksym(); fnsym != expect {
   307  		Fatalf("unexpected fnsym: %v != %v", fnsym, expect)
   308  	}
   309  
   310  	var dwarfVars []*dwarf.Var
   311  	var varScopes []ScopeID
   312  
   313  	for _, n := range fn.Func.Dcl {
   314  		if n.Op != ONAME { // might be OTYPE or OLITERAL
   315  			continue
   316  		}
   317  
   318  		var name obj.AddrName
   319  		var abbrev int
   320  		offs := n.Xoffset
   321  
   322  		switch n.Class() {
   323  		case PAUTO:
   324  			if !n.Name.Used() {
   325  				Fatalf("debuginfo unused node (AllocFrame should truncate fn.Func.Dcl)")
   326  			}
   327  			name = obj.NAME_AUTO
   328  
   329  			abbrev = dwarf.DW_ABRV_AUTO
   330  			if Ctxt.FixedFrameSize() == 0 {
   331  				offs -= int64(Widthptr)
   332  			}
   333  			if objabi.Framepointer_enabled(objabi.GOOS, objabi.GOARCH) {
   334  				offs -= int64(Widthptr)
   335  			}
   336  
   337  		case PPARAM, PPARAMOUT:
   338  			name = obj.NAME_PARAM
   339  
   340  			abbrev = dwarf.DW_ABRV_PARAM
   341  			offs += Ctxt.FixedFrameSize()
   342  
   343  		default:
   344  			continue
   345  		}
   346  
   347  		gotype := ngotype(n).Linksym()
   348  		fnsym.Func.Autom = append(fnsym.Func.Autom, &obj.Auto{
   349  			Asym:    Ctxt.Lookup(n.Sym.Name),
   350  			Aoffset: int32(n.Xoffset),
   351  			Name:    name,
   352  			Gotype:  gotype,
   353  		})
   354  
   355  		if n.IsAutoTmp() {
   356  			continue
   357  		}
   358  
   359  		typename := dwarf.InfoPrefix + gotype.Name[len("type."):]
   360  		dwarfVars = append(dwarfVars, &dwarf.Var{
   361  			Name:   n.Sym.Name,
   362  			Abbrev: abbrev,
   363  			Offset: int32(offs),
   364  			Type:   Ctxt.Lookup(typename),
   365  		})
   366  
   367  		var scope ScopeID
   368  		if !n.Name.Captured() && !n.Name.Byval() {
   369  			// n.Pos of captured variables is their first
   370  			// use in the closure but they should always
   371  			// be assigned to scope 0 instead.
   372  			// TODO(mdempsky): Verify this.
   373  			scope = findScope(fn.Func.Marks, n.Pos)
   374  		}
   375  
   376  		varScopes = append(varScopes, scope)
   377  	}
   378  
   379  	return assembleScopes(fnsym, fn, dwarfVars, varScopes)
   380  }
   381  
   382  // fieldtrack adds R_USEFIELD relocations to fnsym to record any
   383  // struct fields that it used.
   384  func fieldtrack(fnsym *obj.LSym, tracked map[*types.Sym]struct{}) {
   385  	if fnsym == nil {
   386  		return
   387  	}
   388  	if objabi.Fieldtrack_enabled == 0 || len(tracked) == 0 {
   389  		return
   390  	}
   391  
   392  	trackSyms := make([]*types.Sym, 0, len(tracked))
   393  	for sym := range tracked {
   394  		trackSyms = append(trackSyms, sym)
   395  	}
   396  	sort.Sort(symByName(trackSyms))
   397  	for _, sym := range trackSyms {
   398  		r := obj.Addrel(fnsym)
   399  		r.Sym = sym.Linksym()
   400  		r.Type = objabi.R_USEFIELD
   401  	}
   402  }
   403  
   404  type symByName []*types.Sym
   405  
   406  func (a symByName) Len() int           { return len(a) }
   407  func (a symByName) Less(i, j int) bool { return a[i].Name < a[j].Name }
   408  func (a symByName) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }