github.com/q45/go@v0.0.0-20151101211701-a4fb8c13db3f/src/cmd/compile/internal/mips64/ggen.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package ppc64
     6  
     7  import (
     8  	"cmd/compile/internal/gc"
     9  	"cmd/internal/obj"
    10  	"cmd/internal/obj/ppc64"
    11  	"fmt"
    12  )
    13  
    14  func defframe(ptxt *obj.Prog) {
    15  	var n *gc.Node
    16  
    17  	// fill in argument size, stack size
    18  	ptxt.To.Type = obj.TYPE_TEXTSIZE
    19  
    20  	ptxt.To.Val = int32(gc.Rnd(gc.Curfn.Type.Argwid, int64(gc.Widthptr)))
    21  	frame := uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
    22  	ptxt.To.Offset = int64(frame)
    23  
    24  	// insert code to zero ambiguously live variables
    25  	// so that the garbage collector only sees initialized values
    26  	// when it looks for pointers.
    27  	p := ptxt
    28  
    29  	hi := int64(0)
    30  	lo := hi
    31  
    32  	// iterate through declarations - they are sorted in decreasing xoffset order.
    33  	for l := gc.Curfn.Func.Dcl; l != nil; l = l.Next {
    34  		n = l.N
    35  		if !n.Name.Needzero {
    36  			continue
    37  		}
    38  		if n.Class != gc.PAUTO {
    39  			gc.Fatalf("needzero class %d", n.Class)
    40  		}
    41  		if n.Type.Width%int64(gc.Widthptr) != 0 || n.Xoffset%int64(gc.Widthptr) != 0 || n.Type.Width == 0 {
    42  			gc.Fatalf("var %v has size %d offset %d", gc.Nconv(n, obj.FmtLong), int(n.Type.Width), int(n.Xoffset))
    43  		}
    44  
    45  		if lo != hi && n.Xoffset+n.Type.Width >= lo-int64(2*gc.Widthreg) {
    46  			// merge with range we already have
    47  			lo = n.Xoffset
    48  
    49  			continue
    50  		}
    51  
    52  		// zero old range
    53  		p = zerorange(p, int64(frame), lo, hi)
    54  
    55  		// set new range
    56  		hi = n.Xoffset + n.Type.Width
    57  
    58  		lo = n.Xoffset
    59  	}
    60  
    61  	// zero final range
    62  	zerorange(p, int64(frame), lo, hi)
    63  }
    64  
    65  func zerorange(p *obj.Prog, frame int64, lo int64, hi int64) *obj.Prog {
    66  	cnt := hi - lo
    67  	if cnt == 0 {
    68  		return p
    69  	}
    70  	if cnt < int64(4*gc.Widthptr) {
    71  		for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
    72  			p = appendpp(p, ppc64.AMOVD, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGSP, 8+frame+lo+i)
    73  		}
    74  		// TODO(dfc): https://golang.org/issue/12108
    75  		// If DUFFZERO is used inside a tail call (see genwrapper) it will
    76  		// overwrite the link register.
    77  	} else if false && cnt <= int64(128*gc.Widthptr) {
    78  		p = appendpp(p, ppc64.AADD, obj.TYPE_CONST, 0, 8+frame+lo-8, obj.TYPE_REG, ppc64.REGRT1, 0)
    79  		p.Reg = ppc64.REGSP
    80  		p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
    81  		f := gc.Sysfunc("duffzero")
    82  		gc.Naddr(&p.To, f)
    83  		gc.Afunclit(&p.To, f)
    84  		p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
    85  	} else {
    86  		p = appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, 8+frame+lo-8, obj.TYPE_REG, ppc64.REGTMP, 0)
    87  		p = appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT1, 0)
    88  		p.Reg = ppc64.REGSP
    89  		p = appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, ppc64.REGTMP, 0)
    90  		p = appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
    91  		p.Reg = ppc64.REGRT1
    92  		p = appendpp(p, ppc64.AMOVDU, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGRT1, int64(gc.Widthptr))
    93  		p1 := p
    94  		p = appendpp(p, ppc64.ACMP, obj.TYPE_REG, ppc64.REGRT1, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
    95  		p = appendpp(p, ppc64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
    96  		gc.Patch(p, p1)
    97  	}
    98  
    99  	return p
   100  }
   101  
   102  func appendpp(p *obj.Prog, as int, ftype int, freg int, foffset int64, ttype int, treg int, toffset int64) *obj.Prog {
   103  	q := gc.Ctxt.NewProg()
   104  	gc.Clearp(q)
   105  	q.As = int16(as)
   106  	q.Lineno = p.Lineno
   107  	q.From.Type = int16(ftype)
   108  	q.From.Reg = int16(freg)
   109  	q.From.Offset = foffset
   110  	q.To.Type = int16(ttype)
   111  	q.To.Reg = int16(treg)
   112  	q.To.Offset = toffset
   113  	q.Link = p.Link
   114  	p.Link = q
   115  	return q
   116  }
   117  
   118  func ginsnop() {
   119  	var reg gc.Node
   120  	gc.Nodreg(&reg, gc.Types[gc.TINT], ppc64.REG_R0)
   121  	gins(ppc64.AOR, &reg, &reg)
   122  }
   123  
   124  var panicdiv *gc.Node
   125  
   126  /*
   127   * generate division.
   128   * generates one of:
   129   *	res = nl / nr
   130   *	res = nl % nr
   131   * according to op.
   132   */
   133  func dodiv(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) {
   134  	// Have to be careful about handling
   135  	// most negative int divided by -1 correctly.
   136  	// The hardware will generate undefined result.
   137  	// Also need to explicitly trap on division on zero,
   138  	// the hardware will silently generate undefined result.
   139  	// DIVW will leave unpredicable result in higher 32-bit,
   140  	// so always use DIVD/DIVDU.
   141  	t := nl.Type
   142  
   143  	t0 := t
   144  	check := 0
   145  	if gc.Issigned[t.Etype] {
   146  		check = 1
   147  		if gc.Isconst(nl, gc.CTINT) && nl.Int() != -(1<<uint64(t.Width*8-1)) {
   148  			check = 0
   149  		} else if gc.Isconst(nr, gc.CTINT) && nr.Int() != -1 {
   150  			check = 0
   151  		}
   152  	}
   153  
   154  	if t.Width < 8 {
   155  		if gc.Issigned[t.Etype] {
   156  			t = gc.Types[gc.TINT64]
   157  		} else {
   158  			t = gc.Types[gc.TUINT64]
   159  		}
   160  		check = 0
   161  	}
   162  
   163  	a := optoas(gc.ODIV, t)
   164  
   165  	var tl gc.Node
   166  	gc.Regalloc(&tl, t0, nil)
   167  	var tr gc.Node
   168  	gc.Regalloc(&tr, t0, nil)
   169  	if nl.Ullman >= nr.Ullman {
   170  		gc.Cgen(nl, &tl)
   171  		gc.Cgen(nr, &tr)
   172  	} else {
   173  		gc.Cgen(nr, &tr)
   174  		gc.Cgen(nl, &tl)
   175  	}
   176  
   177  	if t != t0 {
   178  		// Convert
   179  		tl2 := tl
   180  
   181  		tr2 := tr
   182  		tl.Type = t
   183  		tr.Type = t
   184  		gmove(&tl2, &tl)
   185  		gmove(&tr2, &tr)
   186  	}
   187  
   188  	// Handle divide-by-zero panic.
   189  	p1 := gins(optoas(gc.OCMP, t), &tr, nil)
   190  
   191  	p1.To.Type = obj.TYPE_REG
   192  	p1.To.Reg = ppc64.REGZERO
   193  	p1 = gc.Gbranch(optoas(gc.ONE, t), nil, +1)
   194  	if panicdiv == nil {
   195  		panicdiv = gc.Sysfunc("panicdivide")
   196  	}
   197  	gc.Ginscall(panicdiv, -1)
   198  	gc.Patch(p1, gc.Pc)
   199  
   200  	var p2 *obj.Prog
   201  	if check != 0 {
   202  		var nm1 gc.Node
   203  		gc.Nodconst(&nm1, t, -1)
   204  		gins(optoas(gc.OCMP, t), &tr, &nm1)
   205  		p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
   206  		if op == gc.ODIV {
   207  			// a / (-1) is -a.
   208  			gins(optoas(gc.OMINUS, t), nil, &tl)
   209  
   210  			gmove(&tl, res)
   211  		} else {
   212  			// a % (-1) is 0.
   213  			var nz gc.Node
   214  			gc.Nodconst(&nz, t, 0)
   215  
   216  			gmove(&nz, res)
   217  		}
   218  
   219  		p2 = gc.Gbranch(obj.AJMP, nil, 0)
   220  		gc.Patch(p1, gc.Pc)
   221  	}
   222  
   223  	p1 = gins(a, &tr, &tl)
   224  	if op == gc.ODIV {
   225  		gc.Regfree(&tr)
   226  		gmove(&tl, res)
   227  	} else {
   228  		// A%B = A-(A/B*B)
   229  		var tm gc.Node
   230  		gc.Regalloc(&tm, t, nil)
   231  
   232  		// patch div to use the 3 register form
   233  		// TODO(minux): add gins3?
   234  		p1.Reg = p1.To.Reg
   235  
   236  		p1.To.Reg = tm.Reg
   237  		gins(optoas(gc.OMUL, t), &tr, &tm)
   238  		gc.Regfree(&tr)
   239  		gins(optoas(gc.OSUB, t), &tm, &tl)
   240  		gc.Regfree(&tm)
   241  		gmove(&tl, res)
   242  	}
   243  
   244  	gc.Regfree(&tl)
   245  	if check != 0 {
   246  		gc.Patch(p2, gc.Pc)
   247  	}
   248  }
   249  
   250  /*
   251   * generate high multiply:
   252   *   res = (nl*nr) >> width
   253   */
   254  func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
   255  	// largest ullman on left.
   256  	if nl.Ullman < nr.Ullman {
   257  		tmp := (*gc.Node)(nl)
   258  		nl = nr
   259  		nr = tmp
   260  	}
   261  
   262  	t := (*gc.Type)(nl.Type)
   263  	w := int(int(t.Width * 8))
   264  	var n1 gc.Node
   265  	gc.Cgenr(nl, &n1, res)
   266  	var n2 gc.Node
   267  	gc.Cgenr(nr, &n2, nil)
   268  	switch gc.Simtype[t.Etype] {
   269  	case gc.TINT8,
   270  		gc.TINT16,
   271  		gc.TINT32:
   272  		gins(optoas(gc.OMUL, t), &n2, &n1)
   273  		p := (*obj.Prog)(gins(ppc64.ASRAD, nil, &n1))
   274  		p.From.Type = obj.TYPE_CONST
   275  		p.From.Offset = int64(w)
   276  
   277  	case gc.TUINT8,
   278  		gc.TUINT16,
   279  		gc.TUINT32:
   280  		gins(optoas(gc.OMUL, t), &n2, &n1)
   281  		p := (*obj.Prog)(gins(ppc64.ASRD, nil, &n1))
   282  		p.From.Type = obj.TYPE_CONST
   283  		p.From.Offset = int64(w)
   284  
   285  	case gc.TINT64,
   286  		gc.TUINT64:
   287  		if gc.Issigned[t.Etype] {
   288  			gins(ppc64.AMULHD, &n2, &n1)
   289  		} else {
   290  			gins(ppc64.AMULHDU, &n2, &n1)
   291  		}
   292  
   293  	default:
   294  		gc.Fatalf("cgen_hmul %v", t)
   295  	}
   296  
   297  	gc.Cgen(&n1, res)
   298  	gc.Regfree(&n1)
   299  	gc.Regfree(&n2)
   300  }
   301  
   302  /*
   303   * generate shift according to op, one of:
   304   *	res = nl << nr
   305   *	res = nl >> nr
   306   */
   307  func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
   308  	a := int(optoas(op, nl.Type))
   309  
   310  	if nr.Op == gc.OLITERAL {
   311  		var n1 gc.Node
   312  		gc.Regalloc(&n1, nl.Type, res)
   313  		gc.Cgen(nl, &n1)
   314  		sc := uint64(nr.Int())
   315  		if sc >= uint64(nl.Type.Width*8) {
   316  			// large shift gets 2 shifts by width-1
   317  			var n3 gc.Node
   318  			gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
   319  
   320  			gins(a, &n3, &n1)
   321  			gins(a, &n3, &n1)
   322  		} else {
   323  			gins(a, nr, &n1)
   324  		}
   325  		gmove(&n1, res)
   326  		gc.Regfree(&n1)
   327  		return
   328  	}
   329  
   330  	if nl.Ullman >= gc.UINF {
   331  		var n4 gc.Node
   332  		gc.Tempname(&n4, nl.Type)
   333  		gc.Cgen(nl, &n4)
   334  		nl = &n4
   335  	}
   336  
   337  	if nr.Ullman >= gc.UINF {
   338  		var n5 gc.Node
   339  		gc.Tempname(&n5, nr.Type)
   340  		gc.Cgen(nr, &n5)
   341  		nr = &n5
   342  	}
   343  
   344  	// Allow either uint32 or uint64 as shift type,
   345  	// to avoid unnecessary conversion from uint32 to uint64
   346  	// just to do the comparison.
   347  	tcount := gc.Types[gc.Simtype[nr.Type.Etype]]
   348  
   349  	if tcount.Etype < gc.TUINT32 {
   350  		tcount = gc.Types[gc.TUINT32]
   351  	}
   352  
   353  	var n1 gc.Node
   354  	gc.Regalloc(&n1, nr.Type, nil) // to hold the shift type in CX
   355  	var n3 gc.Node
   356  	gc.Regalloc(&n3, tcount, &n1) // to clear high bits of CX
   357  
   358  	var n2 gc.Node
   359  	gc.Regalloc(&n2, nl.Type, res)
   360  
   361  	if nl.Ullman >= nr.Ullman {
   362  		gc.Cgen(nl, &n2)
   363  		gc.Cgen(nr, &n1)
   364  		gmove(&n1, &n3)
   365  	} else {
   366  		gc.Cgen(nr, &n1)
   367  		gmove(&n1, &n3)
   368  		gc.Cgen(nl, &n2)
   369  	}
   370  
   371  	gc.Regfree(&n3)
   372  
   373  	// test and fix up large shifts
   374  	if !bounded {
   375  		gc.Nodconst(&n3, tcount, nl.Type.Width*8)
   376  		gins(optoas(gc.OCMP, tcount), &n1, &n3)
   377  		p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, tcount), nil, +1))
   378  		if op == gc.ORSH && gc.Issigned[nl.Type.Etype] {
   379  			gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
   380  			gins(a, &n3, &n2)
   381  		} else {
   382  			gc.Nodconst(&n3, nl.Type, 0)
   383  			gmove(&n3, &n2)
   384  		}
   385  
   386  		gc.Patch(p1, gc.Pc)
   387  	}
   388  
   389  	gins(a, &n1, &n2)
   390  
   391  	gmove(&n2, res)
   392  
   393  	gc.Regfree(&n1)
   394  	gc.Regfree(&n2)
   395  }
   396  
   397  func clearfat(nl *gc.Node) {
   398  	/* clear a fat object */
   399  	if gc.Debug['g'] != 0 {
   400  		fmt.Printf("clearfat %v (%v, size: %d)\n", nl, nl.Type, nl.Type.Width)
   401  	}
   402  
   403  	w := uint64(uint64(nl.Type.Width))
   404  
   405  	// Avoid taking the address for simple enough types.
   406  	if gc.Componentgen(nil, nl) {
   407  		return
   408  	}
   409  
   410  	c := uint64(w % 8) // bytes
   411  	q := uint64(w / 8) // dwords
   412  
   413  	if gc.Reginuse(ppc64.REGRT1) {
   414  		gc.Fatalf("%v in use during clearfat", obj.Rconv(ppc64.REGRT1))
   415  	}
   416  
   417  	var r0 gc.Node
   418  	gc.Nodreg(&r0, gc.Types[gc.TUINT64], ppc64.REGZERO)
   419  	var dst gc.Node
   420  	gc.Nodreg(&dst, gc.Types[gc.Tptr], ppc64.REGRT1)
   421  	gc.Regrealloc(&dst)
   422  	gc.Agen(nl, &dst)
   423  
   424  	var boff uint64
   425  	if q > 128 {
   426  		p := gins(ppc64.ASUB, nil, &dst)
   427  		p.From.Type = obj.TYPE_CONST
   428  		p.From.Offset = 8
   429  
   430  		var end gc.Node
   431  		gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
   432  		p = gins(ppc64.AMOVD, &dst, &end)
   433  		p.From.Type = obj.TYPE_ADDR
   434  		p.From.Offset = int64(q * 8)
   435  
   436  		p = gins(ppc64.AMOVDU, &r0, &dst)
   437  		p.To.Type = obj.TYPE_MEM
   438  		p.To.Offset = 8
   439  		pl := (*obj.Prog)(p)
   440  
   441  		p = gins(ppc64.ACMP, &dst, &end)
   442  		gc.Patch(gc.Gbranch(ppc64.ABNE, nil, 0), pl)
   443  
   444  		gc.Regfree(&end)
   445  
   446  		// The loop leaves R3 on the last zeroed dword
   447  		boff = 8
   448  		// TODO(dfc): https://golang.org/issue/12108
   449  		// If DUFFZERO is used inside a tail call (see genwrapper) it will
   450  		// overwrite the link register.
   451  	} else if false && q >= 4 {
   452  		p := gins(ppc64.ASUB, nil, &dst)
   453  		p.From.Type = obj.TYPE_CONST
   454  		p.From.Offset = 8
   455  		f := (*gc.Node)(gc.Sysfunc("duffzero"))
   456  		p = gins(obj.ADUFFZERO, nil, f)
   457  		gc.Afunclit(&p.To, f)
   458  
   459  		// 4 and 128 = magic constants: see ../../runtime/asm_ppc64x.s
   460  		p.To.Offset = int64(4 * (128 - q))
   461  
   462  		// duffzero leaves R3 on the last zeroed dword
   463  		boff = 8
   464  	} else {
   465  		var p *obj.Prog
   466  		for t := uint64(0); t < q; t++ {
   467  			p = gins(ppc64.AMOVD, &r0, &dst)
   468  			p.To.Type = obj.TYPE_MEM
   469  			p.To.Offset = int64(8 * t)
   470  		}
   471  
   472  		boff = 8 * q
   473  	}
   474  
   475  	var p *obj.Prog
   476  	for t := uint64(0); t < c; t++ {
   477  		p = gins(ppc64.AMOVB, &r0, &dst)
   478  		p.To.Type = obj.TYPE_MEM
   479  		p.To.Offset = int64(t + boff)
   480  	}
   481  
   482  	gc.Regfree(&dst)
   483  }
   484  
   485  // Called after regopt and peep have run.
   486  // Expand CHECKNIL pseudo-op into actual nil pointer check.
   487  func expandchecks(firstp *obj.Prog) {
   488  	var p1 *obj.Prog
   489  	var p2 *obj.Prog
   490  
   491  	for p := (*obj.Prog)(firstp); p != nil; p = p.Link {
   492  		if gc.Debug_checknil != 0 && gc.Ctxt.Debugvlog != 0 {
   493  			fmt.Printf("expandchecks: %v\n", p)
   494  		}
   495  		if p.As != obj.ACHECKNIL {
   496  			continue
   497  		}
   498  		if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers
   499  			gc.Warnl(int(p.Lineno), "generated nil check")
   500  		}
   501  		if p.From.Type != obj.TYPE_REG {
   502  			gc.Fatalf("invalid nil check %v\n", p)
   503  		}
   504  
   505  		/*
   506  			// check is
   507  			//	TD $4, R0, arg (R0 is always zero)
   508  			// eqv. to:
   509  			// 	tdeq r0, arg
   510  			// NOTE: this needs special runtime support to make SIGTRAP recoverable.
   511  			reg = p->from.reg;
   512  			p->as = ATD;
   513  			p->from = p->to = p->from3 = zprog.from;
   514  			p->from.type = TYPE_CONST;
   515  			p->from.offset = 4;
   516  			p->from.reg = 0;
   517  			p->reg = REGZERO;
   518  			p->to.type = TYPE_REG;
   519  			p->to.reg = reg;
   520  		*/
   521  		// check is
   522  		//	CMP arg, R0
   523  		//	BNE 2(PC) [likely]
   524  		//	MOVD R0, 0(R0)
   525  		p1 = gc.Ctxt.NewProg()
   526  
   527  		p2 = gc.Ctxt.NewProg()
   528  		gc.Clearp(p1)
   529  		gc.Clearp(p2)
   530  		p1.Link = p2
   531  		p2.Link = p.Link
   532  		p.Link = p1
   533  		p1.Lineno = p.Lineno
   534  		p2.Lineno = p.Lineno
   535  		p1.Pc = 9999
   536  		p2.Pc = 9999
   537  		p.As = ppc64.ACMP
   538  		p.To.Type = obj.TYPE_REG
   539  		p.To.Reg = ppc64.REGZERO
   540  		p1.As = ppc64.ABNE
   541  
   542  		//p1->from.type = TYPE_CONST;
   543  		//p1->from.offset = 1; // likely
   544  		p1.To.Type = obj.TYPE_BRANCH
   545  
   546  		p1.To.Val = p2.Link
   547  
   548  		// crash by write to memory address 0.
   549  		p2.As = ppc64.AMOVD
   550  
   551  		p2.From.Type = obj.TYPE_REG
   552  		p2.From.Reg = ppc64.REGZERO
   553  		p2.To.Type = obj.TYPE_MEM
   554  		p2.To.Reg = ppc64.REGZERO
   555  		p2.To.Offset = 0
   556  	}
   557  }
   558  
   559  // res = runtime.getg()
   560  func getg(res *gc.Node) {
   561  	var n1 gc.Node
   562  	gc.Nodreg(&n1, res.Type, ppc64.REGG)
   563  	gmove(&n1, res)
   564  }