github.com/euank/go@v0.0.0-20160829210321-495514729181/src/cmd/compile/internal/ppc64/ggen.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package ppc64
     6  
     7  import (
     8  	"cmd/compile/internal/gc"
     9  	"cmd/internal/obj"
    10  	"cmd/internal/obj/ppc64"
    11  	"fmt"
    12  )
    13  
    14  func defframe(ptxt *obj.Prog) {
    15  	// fill in argument size, stack size
    16  	ptxt.To.Type = obj.TYPE_TEXTSIZE
    17  
    18  	ptxt.To.Val = int32(gc.Rnd(gc.Curfn.Type.ArgWidth(), int64(gc.Widthptr)))
    19  	frame := uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
    20  	ptxt.To.Offset = int64(frame)
    21  
    22  	// insert code to zero ambiguously live variables
    23  	// so that the garbage collector only sees initialized values
    24  	// when it looks for pointers.
    25  	p := ptxt
    26  
    27  	hi := int64(0)
    28  	lo := hi
    29  
    30  	// iterate through declarations - they are sorted in decreasing xoffset order.
    31  	for _, n := range gc.Curfn.Func.Dcl {
    32  		if !n.Name.Needzero {
    33  			continue
    34  		}
    35  		if n.Class != gc.PAUTO {
    36  			gc.Fatalf("needzero class %d", n.Class)
    37  		}
    38  		if n.Type.Width%int64(gc.Widthptr) != 0 || n.Xoffset%int64(gc.Widthptr) != 0 || n.Type.Width == 0 {
    39  			gc.Fatalf("var %v has size %d offset %d", gc.Nconv(n, gc.FmtLong), int(n.Type.Width), int(n.Xoffset))
    40  		}
    41  
    42  		if lo != hi && n.Xoffset+n.Type.Width >= lo-int64(2*gc.Widthreg) {
    43  			// merge with range we already have
    44  			lo = n.Xoffset
    45  
    46  			continue
    47  		}
    48  
    49  		// zero old range
    50  		p = zerorange(p, int64(frame), lo, hi)
    51  
    52  		// set new range
    53  		hi = n.Xoffset + n.Type.Width
    54  
    55  		lo = n.Xoffset
    56  	}
    57  
    58  	// zero final range
    59  	zerorange(p, int64(frame), lo, hi)
    60  }
    61  
    62  func zerorange(p *obj.Prog, frame int64, lo int64, hi int64) *obj.Prog {
    63  	cnt := hi - lo
    64  	if cnt == 0 {
    65  		return p
    66  	}
    67  	if cnt < int64(4*gc.Widthptr) {
    68  		for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
    69  			p = appendpp(p, ppc64.AMOVD, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGSP, gc.Ctxt.FixedFrameSize()+frame+lo+i)
    70  		}
    71  	} else if cnt <= int64(128*gc.Widthptr) {
    72  		p = appendpp(p, ppc64.AADD, obj.TYPE_CONST, 0, gc.Ctxt.FixedFrameSize()+frame+lo-8, obj.TYPE_REG, ppc64.REGRT1, 0)
    73  		p.Reg = ppc64.REGSP
    74  		p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
    75  		f := gc.Sysfunc("duffzero")
    76  		gc.Naddr(&p.To, f)
    77  		gc.Afunclit(&p.To, f)
    78  		p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
    79  	} else {
    80  		p = appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, gc.Ctxt.FixedFrameSize()+frame+lo-8, obj.TYPE_REG, ppc64.REGTMP, 0)
    81  		p = appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT1, 0)
    82  		p.Reg = ppc64.REGSP
    83  		p = appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, ppc64.REGTMP, 0)
    84  		p = appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
    85  		p.Reg = ppc64.REGRT1
    86  		p = appendpp(p, ppc64.AMOVDU, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGRT1, int64(gc.Widthptr))
    87  		p1 := p
    88  		p = appendpp(p, ppc64.ACMP, obj.TYPE_REG, ppc64.REGRT1, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
    89  		p = appendpp(p, ppc64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
    90  		gc.Patch(p, p1)
    91  	}
    92  
    93  	return p
    94  }
    95  
    96  func appendpp(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int, foffset int64, ttype obj.AddrType, treg int, toffset int64) *obj.Prog {
    97  	q := gc.Ctxt.NewProg()
    98  	gc.Clearp(q)
    99  	q.As = as
   100  	q.Lineno = p.Lineno
   101  	q.From.Type = ftype
   102  	q.From.Reg = int16(freg)
   103  	q.From.Offset = foffset
   104  	q.To.Type = ttype
   105  	q.To.Reg = int16(treg)
   106  	q.To.Offset = toffset
   107  	q.Link = p.Link
   108  	p.Link = q
   109  	return q
   110  }
   111  
   112  func ginsnop() {
   113  	var reg gc.Node
   114  	gc.Nodreg(&reg, gc.Types[gc.TINT], ppc64.REG_R0)
   115  	gins(ppc64.AOR, &reg, &reg)
   116  }
   117  
   118  var panicdiv *gc.Node
   119  
   120  /*
   121   * generate division.
   122   * generates one of:
   123   *	res = nl / nr
   124   *	res = nl % nr
   125   * according to op.
   126   */
   127  func dodiv(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) {
   128  	// Have to be careful about handling
   129  	// most negative int divided by -1 correctly.
   130  	// The hardware will generate undefined result.
   131  	// Also need to explicitly trap on division on zero,
   132  	// the hardware will silently generate undefined result.
   133  	// DIVW will leave unpredictable result in higher 32-bit,
   134  	// so always use DIVD/DIVDU.
   135  	t := nl.Type
   136  
   137  	t0 := t
   138  	check := false
   139  	if t.IsSigned() {
   140  		check = true
   141  		if gc.Isconst(nl, gc.CTINT) && nl.Int64() != -(1<<uint64(t.Width*8-1)) {
   142  			check = false
   143  		} else if gc.Isconst(nr, gc.CTINT) && nr.Int64() != -1 {
   144  			check = false
   145  		}
   146  	}
   147  
   148  	if t.Width < 8 {
   149  		if t.IsSigned() {
   150  			t = gc.Types[gc.TINT64]
   151  		} else {
   152  			t = gc.Types[gc.TUINT64]
   153  		}
   154  		check = false
   155  	}
   156  
   157  	a := optoas(gc.ODIV, t)
   158  
   159  	var tl gc.Node
   160  	gc.Regalloc(&tl, t0, nil)
   161  	var tr gc.Node
   162  	gc.Regalloc(&tr, t0, nil)
   163  	if nl.Ullman >= nr.Ullman {
   164  		gc.Cgen(nl, &tl)
   165  		gc.Cgen(nr, &tr)
   166  	} else {
   167  		gc.Cgen(nr, &tr)
   168  		gc.Cgen(nl, &tl)
   169  	}
   170  
   171  	if t != t0 {
   172  		// Convert
   173  		tl2 := tl
   174  
   175  		tr2 := tr
   176  		tl.Type = t
   177  		tr.Type = t
   178  		gmove(&tl2, &tl)
   179  		gmove(&tr2, &tr)
   180  	}
   181  
   182  	// Handle divide-by-zero panic.
   183  	p1 := gins(optoas(gc.OCMP, t), &tr, nil)
   184  
   185  	p1.To.Type = obj.TYPE_REG
   186  	p1.To.Reg = ppc64.REGZERO
   187  	p1 = gc.Gbranch(optoas(gc.ONE, t), nil, +1)
   188  	if panicdiv == nil {
   189  		panicdiv = gc.Sysfunc("panicdivide")
   190  	}
   191  	gc.Ginscall(panicdiv, -1)
   192  	gc.Patch(p1, gc.Pc)
   193  
   194  	var p2 *obj.Prog
   195  	if check {
   196  		var nm1 gc.Node
   197  		gc.Nodconst(&nm1, t, -1)
   198  		gins(optoas(gc.OCMP, t), &tr, &nm1)
   199  		p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
   200  		if op == gc.ODIV {
   201  			// a / (-1) is -a.
   202  			gins(optoas(gc.OMINUS, t), nil, &tl)
   203  
   204  			gmove(&tl, res)
   205  		} else {
   206  			// a % (-1) is 0.
   207  			var nz gc.Node
   208  			gc.Nodconst(&nz, t, 0)
   209  
   210  			gmove(&nz, res)
   211  		}
   212  
   213  		p2 = gc.Gbranch(obj.AJMP, nil, 0)
   214  		gc.Patch(p1, gc.Pc)
   215  	}
   216  
   217  	p1 = gins(a, &tr, &tl)
   218  	if op == gc.ODIV {
   219  		gc.Regfree(&tr)
   220  		gmove(&tl, res)
   221  	} else {
   222  		// A%B = A-(A/B*B)
   223  		var tm gc.Node
   224  		gc.Regalloc(&tm, t, nil)
   225  
   226  		// patch div to use the 3 register form
   227  		// TODO(minux): add gins3?
   228  		p1.Reg = p1.To.Reg
   229  
   230  		p1.To.Reg = tm.Reg
   231  		gins(optoas(gc.OMUL, t), &tr, &tm)
   232  		gc.Regfree(&tr)
   233  		gins(optoas(gc.OSUB, t), &tm, &tl)
   234  		gc.Regfree(&tm)
   235  		gmove(&tl, res)
   236  	}
   237  
   238  	gc.Regfree(&tl)
   239  	if check {
   240  		gc.Patch(p2, gc.Pc)
   241  	}
   242  }
   243  
   244  /*
   245   * generate high multiply:
   246   *   res = (nl*nr) >> width
   247   */
   248  func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
   249  	// largest ullman on left.
   250  	if nl.Ullman < nr.Ullman {
   251  		nl, nr = nr, nl
   252  	}
   253  
   254  	t := nl.Type
   255  	w := t.Width * 8
   256  	var n1 gc.Node
   257  	gc.Cgenr(nl, &n1, res)
   258  	var n2 gc.Node
   259  	gc.Cgenr(nr, &n2, nil)
   260  	switch gc.Simtype[t.Etype] {
   261  	case gc.TINT8,
   262  		gc.TINT16,
   263  		gc.TINT32:
   264  		gins(optoas(gc.OMUL, t), &n2, &n1)
   265  		p := gins(ppc64.ASRAD, nil, &n1)
   266  		p.From.Type = obj.TYPE_CONST
   267  		p.From.Offset = w
   268  
   269  	case gc.TUINT8,
   270  		gc.TUINT16,
   271  		gc.TUINT32:
   272  		gins(optoas(gc.OMUL, t), &n2, &n1)
   273  		p := gins(ppc64.ASRD, nil, &n1)
   274  		p.From.Type = obj.TYPE_CONST
   275  		p.From.Offset = w
   276  
   277  	case gc.TINT64,
   278  		gc.TUINT64:
   279  		if t.IsSigned() {
   280  			gins(ppc64.AMULHD, &n2, &n1)
   281  		} else {
   282  			gins(ppc64.AMULHDU, &n2, &n1)
   283  		}
   284  
   285  	default:
   286  		gc.Fatalf("cgen_hmul %v", t)
   287  	}
   288  
   289  	gc.Cgen(&n1, res)
   290  	gc.Regfree(&n1)
   291  	gc.Regfree(&n2)
   292  }
   293  
   294  /*
   295   * generate shift according to op, one of:
   296   *	res = nl << nr
   297   *	res = nl >> nr
   298   */
   299  func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
   300  	a := optoas(op, nl.Type)
   301  
   302  	if nr.Op == gc.OLITERAL {
   303  		var n1 gc.Node
   304  		gc.Regalloc(&n1, nl.Type, res)
   305  		gc.Cgen(nl, &n1)
   306  		sc := uint64(nr.Int64())
   307  		if sc >= uint64(nl.Type.Width*8) {
   308  			// large shift gets 2 shifts by width-1
   309  			var n3 gc.Node
   310  			gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
   311  
   312  			gins(a, &n3, &n1)
   313  			gins(a, &n3, &n1)
   314  		} else {
   315  			gins(a, nr, &n1)
   316  		}
   317  		gmove(&n1, res)
   318  		gc.Regfree(&n1)
   319  		return
   320  	}
   321  
   322  	if nl.Ullman >= gc.UINF {
   323  		var n4 gc.Node
   324  		gc.Tempname(&n4, nl.Type)
   325  		gc.Cgen(nl, &n4)
   326  		nl = &n4
   327  	}
   328  
   329  	if nr.Ullman >= gc.UINF {
   330  		var n5 gc.Node
   331  		gc.Tempname(&n5, nr.Type)
   332  		gc.Cgen(nr, &n5)
   333  		nr = &n5
   334  	}
   335  
   336  	// Allow either uint32 or uint64 as shift type,
   337  	// to avoid unnecessary conversion from uint32 to uint64
   338  	// just to do the comparison.
   339  	tcount := gc.Types[gc.Simtype[nr.Type.Etype]]
   340  
   341  	if tcount.Etype < gc.TUINT32 {
   342  		tcount = gc.Types[gc.TUINT32]
   343  	}
   344  
   345  	var n1 gc.Node
   346  	gc.Regalloc(&n1, nr.Type, nil) // to hold the shift type in CX
   347  	var n3 gc.Node
   348  	gc.Regalloc(&n3, tcount, &n1) // to clear high bits of CX
   349  
   350  	var n2 gc.Node
   351  	gc.Regalloc(&n2, nl.Type, res)
   352  
   353  	if nl.Ullman >= nr.Ullman {
   354  		gc.Cgen(nl, &n2)
   355  		gc.Cgen(nr, &n1)
   356  		gmove(&n1, &n3)
   357  	} else {
   358  		gc.Cgen(nr, &n1)
   359  		gmove(&n1, &n3)
   360  		gc.Cgen(nl, &n2)
   361  	}
   362  
   363  	gc.Regfree(&n3)
   364  
   365  	// test and fix up large shifts
   366  	if !bounded {
   367  		gc.Nodconst(&n3, tcount, nl.Type.Width*8)
   368  		gins(optoas(gc.OCMP, tcount), &n1, &n3)
   369  		p1 := gc.Gbranch(optoas(gc.OLT, tcount), nil, +1)
   370  		if op == gc.ORSH && nl.Type.IsSigned() {
   371  			gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
   372  			gins(a, &n3, &n2)
   373  		} else {
   374  			gc.Nodconst(&n3, nl.Type, 0)
   375  			gmove(&n3, &n2)
   376  		}
   377  
   378  		gc.Patch(p1, gc.Pc)
   379  	}
   380  
   381  	gins(a, &n1, &n2)
   382  
   383  	gmove(&n2, res)
   384  
   385  	gc.Regfree(&n1)
   386  	gc.Regfree(&n2)
   387  }
   388  
   389  func clearfat(nl *gc.Node) {
   390  	/* clear a fat object */
   391  	if gc.Debug['g'] != 0 {
   392  		fmt.Printf("clearfat %v (%v, size: %d)\n", nl, nl.Type, nl.Type.Width)
   393  	}
   394  
   395  	w := uint64(nl.Type.Width)
   396  
   397  	// Avoid taking the address for simple enough types.
   398  	if gc.Componentgen(nil, nl) {
   399  		return
   400  	}
   401  
   402  	c := w % 8 // bytes
   403  	q := w / 8 // dwords
   404  
   405  	if gc.Reginuse(ppc64.REGRT1) {
   406  		gc.Fatalf("%v in use during clearfat", obj.Rconv(ppc64.REGRT1))
   407  	}
   408  
   409  	var r0 gc.Node
   410  	gc.Nodreg(&r0, gc.Types[gc.TUINT64], ppc64.REGZERO)
   411  	var dst gc.Node
   412  	gc.Nodreg(&dst, gc.Types[gc.Tptr], ppc64.REGRT1)
   413  	gc.Regrealloc(&dst)
   414  	gc.Agen(nl, &dst)
   415  
   416  	var boff uint64
   417  	if q > 128 {
   418  		p := gins(ppc64.ASUB, nil, &dst)
   419  		p.From.Type = obj.TYPE_CONST
   420  		p.From.Offset = 8
   421  
   422  		var end gc.Node
   423  		gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
   424  		p = gins(ppc64.AMOVD, &dst, &end)
   425  		p.From.Type = obj.TYPE_ADDR
   426  		p.From.Offset = int64(q * 8)
   427  
   428  		p = gins(ppc64.AMOVDU, &r0, &dst)
   429  		p.To.Type = obj.TYPE_MEM
   430  		p.To.Offset = 8
   431  		pl := p
   432  
   433  		p = gins(ppc64.ACMP, &dst, &end)
   434  		gc.Patch(gc.Gbranch(ppc64.ABNE, nil, 0), pl)
   435  
   436  		gc.Regfree(&end)
   437  
   438  		// The loop leaves R3 on the last zeroed dword
   439  		boff = 8
   440  	} else if q >= 4 {
   441  		p := gins(ppc64.ASUB, nil, &dst)
   442  		p.From.Type = obj.TYPE_CONST
   443  		p.From.Offset = 8
   444  		f := gc.Sysfunc("duffzero")
   445  		p = gins(obj.ADUFFZERO, nil, f)
   446  		gc.Afunclit(&p.To, f)
   447  
   448  		// 4 and 128 = magic constants: see ../../runtime/asm_ppc64x.s
   449  		p.To.Offset = int64(4 * (128 - q))
   450  
   451  		// duffzero leaves R3 on the last zeroed dword
   452  		boff = 8
   453  	} else {
   454  		var p *obj.Prog
   455  		for t := uint64(0); t < q; t++ {
   456  			p = gins(ppc64.AMOVD, &r0, &dst)
   457  			p.To.Type = obj.TYPE_MEM
   458  			p.To.Offset = int64(8 * t)
   459  		}
   460  
   461  		boff = 8 * q
   462  	}
   463  
   464  	var p *obj.Prog
   465  	for t := uint64(0); t < c; t++ {
   466  		p = gins(ppc64.AMOVB, &r0, &dst)
   467  		p.To.Type = obj.TYPE_MEM
   468  		p.To.Offset = int64(t + boff)
   469  	}
   470  
   471  	gc.Regfree(&dst)
   472  }
   473  
   474  // Called after regopt and peep have run.
   475  // Expand CHECKNIL pseudo-op into actual nil pointer check.
   476  func expandchecks(firstp *obj.Prog) {
   477  	var p1 *obj.Prog
   478  	var p2 *obj.Prog
   479  
   480  	for p := firstp; p != nil; p = p.Link {
   481  		if gc.Debug_checknil != 0 && gc.Ctxt.Debugvlog != 0 {
   482  			fmt.Printf("expandchecks: %v\n", p)
   483  		}
   484  		if p.As != obj.ACHECKNIL {
   485  			continue
   486  		}
   487  		if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers
   488  			gc.Warnl(p.Lineno, "generated nil check")
   489  		}
   490  		if p.From.Type != obj.TYPE_REG {
   491  			gc.Fatalf("invalid nil check %v\n", p)
   492  		}
   493  
   494  		/*
   495  			// check is
   496  			//	TD $4, R0, arg (R0 is always zero)
   497  			// eqv. to:
   498  			// 	tdeq r0, arg
   499  			// NOTE: this needs special runtime support to make SIGTRAP recoverable.
   500  			reg = p->from.reg;
   501  			p->as = ATD;
   502  			p->from = p->to = p->from3 = zprog.from;
   503  			p->from.type = TYPE_CONST;
   504  			p->from.offset = 4;
   505  			p->from.reg = 0;
   506  			p->reg = REGZERO;
   507  			p->to.type = TYPE_REG;
   508  			p->to.reg = reg;
   509  		*/
   510  		// check is
   511  		//	CMP arg, R0
   512  		//	BNE 2(PC) [likely]
   513  		//	MOVD R0, 0(R0)
   514  		p1 = gc.Ctxt.NewProg()
   515  
   516  		p2 = gc.Ctxt.NewProg()
   517  		gc.Clearp(p1)
   518  		gc.Clearp(p2)
   519  		p1.Link = p2
   520  		p2.Link = p.Link
   521  		p.Link = p1
   522  		p1.Lineno = p.Lineno
   523  		p2.Lineno = p.Lineno
   524  		p1.Pc = 9999
   525  		p2.Pc = 9999
   526  		p.As = ppc64.ACMP
   527  		p.To.Type = obj.TYPE_REG
   528  		p.To.Reg = ppc64.REGZERO
   529  		p1.As = ppc64.ABNE
   530  
   531  		//p1->from.type = TYPE_CONST;
   532  		//p1->from.offset = 1; // likely
   533  		p1.To.Type = obj.TYPE_BRANCH
   534  
   535  		p1.To.Val = p2.Link
   536  
   537  		// crash by write to memory address 0.
   538  		p2.As = ppc64.AMOVD
   539  
   540  		p2.From.Type = obj.TYPE_REG
   541  		p2.From.Reg = ppc64.REGZERO
   542  		p2.To.Type = obj.TYPE_MEM
   543  		p2.To.Reg = ppc64.REGZERO
   544  		p2.To.Offset = 0
   545  	}
   546  }
   547  
   548  // res = runtime.getg()
   549  func getg(res *gc.Node) {
   550  	var n1 gc.Node
   551  	gc.Nodreg(&n1, res.Type, ppc64.REGG)
   552  	gmove(&n1, res)
   553  }