github.com/euank/go@v0.0.0-20160829210321-495514729181/src/cmd/compile/internal/mips64/ggen.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package mips64
     6  
     7  import (
     8  	"cmd/compile/internal/gc"
     9  	"cmd/internal/obj"
    10  	"cmd/internal/obj/mips"
    11  	"fmt"
    12  )
    13  
    14  func defframe(ptxt *obj.Prog) {
    15  	// fill in argument size, stack size
    16  	ptxt.To.Type = obj.TYPE_TEXTSIZE
    17  
    18  	ptxt.To.Val = int32(gc.Rnd(gc.Curfn.Type.ArgWidth(), int64(gc.Widthptr)))
    19  	frame := uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
    20  	ptxt.To.Offset = int64(frame)
    21  
    22  	// insert code to zero ambiguously live variables
    23  	// so that the garbage collector only sees initialized values
    24  	// when it looks for pointers.
    25  	p := ptxt
    26  
    27  	hi := int64(0)
    28  	lo := hi
    29  
    30  	// iterate through declarations - they are sorted in decreasing xoffset order.
    31  	for _, n := range gc.Curfn.Func.Dcl {
    32  		if !n.Name.Needzero {
    33  			continue
    34  		}
    35  		if n.Class != gc.PAUTO {
    36  			gc.Fatalf("needzero class %d", n.Class)
    37  		}
    38  		if n.Type.Width%int64(gc.Widthptr) != 0 || n.Xoffset%int64(gc.Widthptr) != 0 || n.Type.Width == 0 {
    39  			gc.Fatalf("var %v has size %d offset %d", gc.Nconv(n, gc.FmtLong), int(n.Type.Width), int(n.Xoffset))
    40  		}
    41  
    42  		if lo != hi && n.Xoffset+n.Type.Width >= lo-int64(2*gc.Widthreg) {
    43  			// merge with range we already have
    44  			lo = n.Xoffset
    45  
    46  			continue
    47  		}
    48  
    49  		// zero old range
    50  		p = zerorange(p, int64(frame), lo, hi)
    51  
    52  		// set new range
    53  		hi = n.Xoffset + n.Type.Width
    54  
    55  		lo = n.Xoffset
    56  	}
    57  
    58  	// zero final range
    59  	zerorange(p, int64(frame), lo, hi)
    60  }
    61  
    62  func zerorange(p *obj.Prog, frame int64, lo int64, hi int64) *obj.Prog {
    63  	cnt := hi - lo
    64  	if cnt == 0 {
    65  		return p
    66  	}
    67  	if cnt < int64(4*gc.Widthptr) {
    68  		for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
    69  			p = appendpp(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, 8+frame+lo+i)
    70  		}
    71  		// TODO(dfc): https://golang.org/issue/12108
    72  		// If DUFFZERO is used inside a tail call (see genwrapper) it will
    73  		// overwrite the link register.
    74  	} else if false && cnt <= int64(128*gc.Widthptr) {
    75  		p = appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, 8+frame+lo-8, obj.TYPE_REG, mips.REGRT1, 0)
    76  		p.Reg = mips.REGSP
    77  		p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
    78  		f := gc.Sysfunc("duffzero")
    79  		gc.Naddr(&p.To, f)
    80  		gc.Afunclit(&p.To, f)
    81  		p.To.Offset = 8 * (128 - cnt/int64(gc.Widthptr))
    82  	} else {
    83  		//	ADDV	$(8+frame+lo-8), SP, r1
    84  		//	ADDV	$cnt, r1, r2
    85  		// loop:
    86  		//	MOVV	R0, (Widthptr)r1
    87  		//	ADDV	$Widthptr, r1
    88  		//	BNE		r1, r2, loop
    89  		p = appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, 8+frame+lo-8, obj.TYPE_REG, mips.REGRT1, 0)
    90  		p.Reg = mips.REGSP
    91  		p = appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, mips.REGRT2, 0)
    92  		p.Reg = mips.REGRT1
    93  		p = appendpp(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGRT1, int64(gc.Widthptr))
    94  		p1 := p
    95  		p = appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, int64(gc.Widthptr), obj.TYPE_REG, mips.REGRT1, 0)
    96  		p = appendpp(p, mips.ABNE, obj.TYPE_REG, mips.REGRT1, 0, obj.TYPE_BRANCH, 0, 0)
    97  		p.Reg = mips.REGRT2
    98  		gc.Patch(p, p1)
    99  	}
   100  
   101  	return p
   102  }
   103  
   104  func appendpp(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int, foffset int64, ttype obj.AddrType, treg int, toffset int64) *obj.Prog {
   105  	q := gc.Ctxt.NewProg()
   106  	gc.Clearp(q)
   107  	q.As = as
   108  	q.Lineno = p.Lineno
   109  	q.From.Type = ftype
   110  	q.From.Reg = int16(freg)
   111  	q.From.Offset = foffset
   112  	q.To.Type = ttype
   113  	q.To.Reg = int16(treg)
   114  	q.To.Offset = toffset
   115  	q.Link = p.Link
   116  	p.Link = q
   117  	return q
   118  }
   119  
   120  func ginsnop() {
   121  	var reg gc.Node
   122  	gc.Nodreg(&reg, gc.Types[gc.TINT], mips.REG_R0)
   123  	gins(mips.ANOR, &reg, &reg)
   124  }
   125  
   126  var panicdiv *gc.Node
   127  
   128  /*
   129   * generate division.
   130   * generates one of:
   131   *	res = nl / nr
   132   *	res = nl % nr
   133   * according to op.
   134   */
   135  func dodiv(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) {
   136  	t := nl.Type
   137  
   138  	t0 := t
   139  
   140  	if t.Width < 8 {
   141  		if t.IsSigned() {
   142  			t = gc.Types[gc.TINT64]
   143  		} else {
   144  			t = gc.Types[gc.TUINT64]
   145  		}
   146  	}
   147  
   148  	a := optoas(gc.ODIV, t)
   149  
   150  	var tl gc.Node
   151  	gc.Regalloc(&tl, t0, nil)
   152  	var tr gc.Node
   153  	gc.Regalloc(&tr, t0, nil)
   154  	if nl.Ullman >= nr.Ullman {
   155  		gc.Cgen(nl, &tl)
   156  		gc.Cgen(nr, &tr)
   157  	} else {
   158  		gc.Cgen(nr, &tr)
   159  		gc.Cgen(nl, &tl)
   160  	}
   161  
   162  	if t != t0 {
   163  		// Convert
   164  		tl2 := tl
   165  
   166  		tr2 := tr
   167  		tl.Type = t
   168  		tr.Type = t
   169  		gmove(&tl2, &tl)
   170  		gmove(&tr2, &tr)
   171  	}
   172  
   173  	// Handle divide-by-zero panic.
   174  	p1 := ginsbranch(mips.ABNE, nil, &tr, nil, 0)
   175  	if panicdiv == nil {
   176  		panicdiv = gc.Sysfunc("panicdivide")
   177  	}
   178  	gc.Ginscall(panicdiv, -1)
   179  	gc.Patch(p1, gc.Pc)
   180  
   181  	gins3(a, &tr, &tl, nil)
   182  	gc.Regfree(&tr)
   183  	if op == gc.ODIV {
   184  		var lo gc.Node
   185  		gc.Nodreg(&lo, gc.Types[gc.TUINT64], mips.REG_LO)
   186  		gins(mips.AMOVV, &lo, &tl)
   187  	} else { // remainder in REG_HI
   188  		var hi gc.Node
   189  		gc.Nodreg(&hi, gc.Types[gc.TUINT64], mips.REG_HI)
   190  		gins(mips.AMOVV, &hi, &tl)
   191  	}
   192  	gmove(&tl, res)
   193  	gc.Regfree(&tl)
   194  }
   195  
   196  /*
   197   * generate high multiply:
   198   *   res = (nl*nr) >> width
   199   */
   200  func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
   201  	// largest ullman on left.
   202  	if nl.Ullman < nr.Ullman {
   203  		nl, nr = nr, nl
   204  	}
   205  
   206  	t := nl.Type
   207  	w := t.Width * 8
   208  	var n1 gc.Node
   209  	gc.Cgenr(nl, &n1, res)
   210  	var n2 gc.Node
   211  	gc.Cgenr(nr, &n2, nil)
   212  	switch gc.Simtype[t.Etype] {
   213  	case gc.TINT8,
   214  		gc.TINT16,
   215  		gc.TINT32:
   216  		gins3(optoas(gc.OMUL, t), &n2, &n1, nil)
   217  		var lo gc.Node
   218  		gc.Nodreg(&lo, gc.Types[gc.TUINT64], mips.REG_LO)
   219  		gins(mips.AMOVV, &lo, &n1)
   220  		p := gins(mips.ASRAV, nil, &n1)
   221  		p.From.Type = obj.TYPE_CONST
   222  		p.From.Offset = w
   223  
   224  	case gc.TUINT8,
   225  		gc.TUINT16,
   226  		gc.TUINT32:
   227  		gins3(optoas(gc.OMUL, t), &n2, &n1, nil)
   228  		var lo gc.Node
   229  		gc.Nodreg(&lo, gc.Types[gc.TUINT64], mips.REG_LO)
   230  		gins(mips.AMOVV, &lo, &n1)
   231  		p := gins(mips.ASRLV, nil, &n1)
   232  		p.From.Type = obj.TYPE_CONST
   233  		p.From.Offset = w
   234  
   235  	case gc.TINT64,
   236  		gc.TUINT64:
   237  		if t.IsSigned() {
   238  			gins3(mips.AMULV, &n2, &n1, nil)
   239  		} else {
   240  			gins3(mips.AMULVU, &n2, &n1, nil)
   241  		}
   242  		var hi gc.Node
   243  		gc.Nodreg(&hi, gc.Types[gc.TUINT64], mips.REG_HI)
   244  		gins(mips.AMOVV, &hi, &n1)
   245  
   246  	default:
   247  		gc.Fatalf("cgen_hmul %v", t)
   248  	}
   249  
   250  	gc.Cgen(&n1, res)
   251  	gc.Regfree(&n1)
   252  	gc.Regfree(&n2)
   253  }
   254  
   255  /*
   256   * generate shift according to op, one of:
   257   *	res = nl << nr
   258   *	res = nl >> nr
   259   */
   260  func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
   261  	a := optoas(op, nl.Type)
   262  
   263  	if nr.Op == gc.OLITERAL {
   264  		var n1 gc.Node
   265  		gc.Regalloc(&n1, nl.Type, res)
   266  		gc.Cgen(nl, &n1)
   267  		sc := uint64(nr.Int64())
   268  		if sc >= uint64(nl.Type.Width*8) {
   269  			// large shift gets 2 shifts by width-1
   270  			var n3 gc.Node
   271  			gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
   272  
   273  			gins(a, &n3, &n1)
   274  			gins(a, &n3, &n1)
   275  		} else {
   276  			gins(a, nr, &n1)
   277  		}
   278  		gmove(&n1, res)
   279  		gc.Regfree(&n1)
   280  		return
   281  	}
   282  
   283  	if nl.Ullman >= gc.UINF {
   284  		var n4 gc.Node
   285  		gc.Tempname(&n4, nl.Type)
   286  		gc.Cgen(nl, &n4)
   287  		nl = &n4
   288  	}
   289  
   290  	if nr.Ullman >= gc.UINF {
   291  		var n5 gc.Node
   292  		gc.Tempname(&n5, nr.Type)
   293  		gc.Cgen(nr, &n5)
   294  		nr = &n5
   295  	}
   296  
   297  	// Allow either uint32 or uint64 as shift type,
   298  	// to avoid unnecessary conversion from uint32 to uint64
   299  	// just to do the comparison.
   300  	tcount := gc.Types[gc.Simtype[nr.Type.Etype]]
   301  
   302  	if tcount.Etype < gc.TUINT32 {
   303  		tcount = gc.Types[gc.TUINT32]
   304  	}
   305  
   306  	var n1 gc.Node
   307  	gc.Regalloc(&n1, nr.Type, nil) // to hold the shift type in CX
   308  	var n3 gc.Node
   309  	gc.Regalloc(&n3, tcount, &n1) // to clear high bits of CX
   310  
   311  	var n2 gc.Node
   312  	gc.Regalloc(&n2, nl.Type, res)
   313  
   314  	if nl.Ullman >= nr.Ullman {
   315  		gc.Cgen(nl, &n2)
   316  		gc.Cgen(nr, &n1)
   317  		gmove(&n1, &n3)
   318  	} else {
   319  		gc.Cgen(nr, &n1)
   320  		gmove(&n1, &n3)
   321  		gc.Cgen(nl, &n2)
   322  	}
   323  
   324  	gc.Regfree(&n3)
   325  
   326  	// test and fix up large shifts
   327  	if !bounded {
   328  		var rtmp gc.Node
   329  		gc.Nodreg(&rtmp, tcount, mips.REGTMP)
   330  		gc.Nodconst(&n3, tcount, nl.Type.Width*8)
   331  		gins3(mips.ASGTU, &n3, &n1, &rtmp)
   332  		p1 := ginsbranch(mips.ABNE, nil, &rtmp, nil, 0)
   333  		if op == gc.ORSH && nl.Type.IsSigned() {
   334  			gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
   335  			gins(a, &n3, &n2)
   336  		} else {
   337  			gc.Nodconst(&n3, nl.Type, 0)
   338  			gmove(&n3, &n2)
   339  		}
   340  
   341  		gc.Patch(p1, gc.Pc)
   342  	}
   343  
   344  	gins(a, &n1, &n2)
   345  
   346  	gmove(&n2, res)
   347  
   348  	gc.Regfree(&n1)
   349  	gc.Regfree(&n2)
   350  }
   351  
   352  func clearfat(nl *gc.Node) {
   353  	/* clear a fat object */
   354  	if gc.Debug['g'] != 0 {
   355  		fmt.Printf("clearfat %v (%v, size: %d)\n", nl, nl.Type, nl.Type.Width)
   356  	}
   357  
   358  	w := uint64(nl.Type.Width)
   359  
   360  	// Avoid taking the address for simple enough types.
   361  	if gc.Componentgen(nil, nl) {
   362  		return
   363  	}
   364  
   365  	c := w % 8 // bytes
   366  	q := w / 8 // dwords
   367  
   368  	if gc.Reginuse(mips.REGRT1) {
   369  		gc.Fatalf("%v in use during clearfat", obj.Rconv(mips.REGRT1))
   370  	}
   371  
   372  	var r0 gc.Node
   373  	gc.Nodreg(&r0, gc.Types[gc.TUINT64], mips.REGZERO)
   374  	var dst gc.Node
   375  	gc.Nodreg(&dst, gc.Types[gc.Tptr], mips.REGRT1)
   376  	gc.Regrealloc(&dst)
   377  	gc.Agen(nl, &dst)
   378  
   379  	var boff uint64
   380  	if q > 128 {
   381  		p := gins(mips.ASUBV, nil, &dst)
   382  		p.From.Type = obj.TYPE_CONST
   383  		p.From.Offset = 8
   384  
   385  		var end gc.Node
   386  		gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
   387  		p = gins(mips.AMOVV, &dst, &end)
   388  		p.From.Type = obj.TYPE_ADDR
   389  		p.From.Offset = int64(q * 8)
   390  
   391  		p = gins(mips.AMOVV, &r0, &dst)
   392  		p.To.Type = obj.TYPE_MEM
   393  		p.To.Offset = 8
   394  		pl := p
   395  
   396  		p = gins(mips.AADDV, nil, &dst)
   397  		p.From.Type = obj.TYPE_CONST
   398  		p.From.Offset = 8
   399  
   400  		gc.Patch(ginsbranch(mips.ABNE, nil, &dst, &end, 0), pl)
   401  
   402  		gc.Regfree(&end)
   403  
   404  		// The loop leaves R1 on the last zeroed dword
   405  		boff = 8
   406  		// TODO(dfc): https://golang.org/issue/12108
   407  		// If DUFFZERO is used inside a tail call (see genwrapper) it will
   408  		// overwrite the link register.
   409  	} else if false && q >= 4 {
   410  		p := gins(mips.ASUBV, nil, &dst)
   411  		p.From.Type = obj.TYPE_CONST
   412  		p.From.Offset = 8
   413  		f := gc.Sysfunc("duffzero")
   414  		p = gins(obj.ADUFFZERO, nil, f)
   415  		gc.Afunclit(&p.To, f)
   416  
   417  		// 8 and 128 = magic constants: see ../../runtime/asm_mips64x.s
   418  		p.To.Offset = int64(8 * (128 - q))
   419  
   420  		// duffzero leaves R1 on the last zeroed dword
   421  		boff = 8
   422  	} else {
   423  		var p *obj.Prog
   424  		for t := uint64(0); t < q; t++ {
   425  			p = gins(mips.AMOVV, &r0, &dst)
   426  			p.To.Type = obj.TYPE_MEM
   427  			p.To.Offset = int64(8 * t)
   428  		}
   429  
   430  		boff = 8 * q
   431  	}
   432  
   433  	var p *obj.Prog
   434  	for t := uint64(0); t < c; t++ {
   435  		p = gins(mips.AMOVB, &r0, &dst)
   436  		p.To.Type = obj.TYPE_MEM
   437  		p.To.Offset = int64(t + boff)
   438  	}
   439  
   440  	gc.Regfree(&dst)
   441  }
   442  
   443  // Called after regopt and peep have run.
   444  // Expand CHECKNIL pseudo-op into actual nil pointer check.
   445  func expandchecks(firstp *obj.Prog) {
   446  	var p1 *obj.Prog
   447  
   448  	for p := firstp; p != nil; p = p.Link {
   449  		if gc.Debug_checknil != 0 && gc.Ctxt.Debugvlog != 0 {
   450  			fmt.Printf("expandchecks: %v\n", p)
   451  		}
   452  		if p.As != obj.ACHECKNIL {
   453  			continue
   454  		}
   455  		if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers
   456  			gc.Warnl(p.Lineno, "generated nil check")
   457  		}
   458  		if p.From.Type != obj.TYPE_REG {
   459  			gc.Fatalf("invalid nil check %v\n", p)
   460  		}
   461  
   462  		// check is
   463  		//	BNE arg, 2(PC)
   464  		//	MOVV R0, 0(R0)
   465  		p1 = gc.Ctxt.NewProg()
   466  		gc.Clearp(p1)
   467  		p1.Link = p.Link
   468  		p.Link = p1
   469  		p1.Lineno = p.Lineno
   470  		p1.Pc = 9999
   471  
   472  		p.As = mips.ABNE
   473  		p.To.Type = obj.TYPE_BRANCH
   474  		p.To.Val = p1.Link
   475  
   476  		// crash by write to memory address 0.
   477  		p1.As = mips.AMOVV
   478  		p1.From.Type = obj.TYPE_REG
   479  		p1.From.Reg = mips.REGZERO
   480  		p1.To.Type = obj.TYPE_MEM
   481  		p1.To.Reg = mips.REGZERO
   482  		p1.To.Offset = 0
   483  	}
   484  }
   485  
   486  // res = runtime.getg()
   487  func getg(res *gc.Node) {
   488  	var n1 gc.Node
   489  	gc.Nodreg(&n1, res.Type, mips.REGG)
   490  	gmove(&n1, res)
   491  }