github.com/mdempsky/go@v0.0.0-20151201204031-5dd372bd1e70/src/cmd/compile/internal/mips64/ggen.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package mips64
     6  
     7  import (
     8  	"cmd/compile/internal/gc"
     9  	"cmd/internal/obj"
    10  	"cmd/internal/obj/mips"
    11  	"fmt"
    12  )
    13  
    14  func defframe(ptxt *obj.Prog) {
    15  	var n *gc.Node
    16  
    17  	// fill in argument size, stack size
    18  	ptxt.To.Type = obj.TYPE_TEXTSIZE
    19  
    20  	ptxt.To.Val = int32(gc.Rnd(gc.Curfn.Type.Argwid, int64(gc.Widthptr)))
    21  	frame := uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
    22  	ptxt.To.Offset = int64(frame)
    23  
    24  	// insert code to zero ambiguously live variables
    25  	// so that the garbage collector only sees initialized values
    26  	// when it looks for pointers.
    27  	p := ptxt
    28  
    29  	hi := int64(0)
    30  	lo := hi
    31  
    32  	// iterate through declarations - they are sorted in decreasing xoffset order.
    33  	for l := gc.Curfn.Func.Dcl; l != nil; l = l.Next {
    34  		n = l.N
    35  		if !n.Name.Needzero {
    36  			continue
    37  		}
    38  		if n.Class != gc.PAUTO {
    39  			gc.Fatalf("needzero class %d", n.Class)
    40  		}
    41  		if n.Type.Width%int64(gc.Widthptr) != 0 || n.Xoffset%int64(gc.Widthptr) != 0 || n.Type.Width == 0 {
    42  			gc.Fatalf("var %v has size %d offset %d", gc.Nconv(n, obj.FmtLong), int(n.Type.Width), int(n.Xoffset))
    43  		}
    44  
    45  		if lo != hi && n.Xoffset+n.Type.Width >= lo-int64(2*gc.Widthreg) {
    46  			// merge with range we already have
    47  			lo = n.Xoffset
    48  
    49  			continue
    50  		}
    51  
    52  		// zero old range
    53  		p = zerorange(p, int64(frame), lo, hi)
    54  
    55  		// set new range
    56  		hi = n.Xoffset + n.Type.Width
    57  
    58  		lo = n.Xoffset
    59  	}
    60  
    61  	// zero final range
    62  	zerorange(p, int64(frame), lo, hi)
    63  }
    64  
    65  func zerorange(p *obj.Prog, frame int64, lo int64, hi int64) *obj.Prog {
    66  	cnt := hi - lo
    67  	if cnt == 0 {
    68  		return p
    69  	}
    70  	if cnt < int64(4*gc.Widthptr) {
    71  		for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
    72  			p = appendpp(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, 8+frame+lo+i)
    73  		}
    74  		// TODO(dfc): https://golang.org/issue/12108
    75  		// If DUFFZERO is used inside a tail call (see genwrapper) it will
    76  		// overwrite the link register.
    77  	} else if false && cnt <= int64(128*gc.Widthptr) {
    78  		p = appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, 8+frame+lo-8, obj.TYPE_REG, mips.REGRT1, 0)
    79  		p.Reg = mips.REGSP
    80  		p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
    81  		f := gc.Sysfunc("duffzero")
    82  		gc.Naddr(&p.To, f)
    83  		gc.Afunclit(&p.To, f)
    84  		p.To.Offset = 8 * (128 - cnt/int64(gc.Widthptr))
    85  	} else {
    86  		//	ADDV	$(8+frame+lo-8), SP, r1
    87  		//	ADDV	$cnt, r1, r2
    88  		// loop:
    89  		//	MOVV	R0, (Widthptr)r1
    90  		//	ADDV	$Widthptr, r1
    91  		//	BNE		r1, r2, loop
    92  		p = appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, 8+frame+lo-8, obj.TYPE_REG, mips.REGRT1, 0)
    93  		p.Reg = mips.REGSP
    94  		p = appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, mips.REGRT2, 0)
    95  		p.Reg = mips.REGRT1
    96  		p = appendpp(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGRT1, int64(gc.Widthptr))
    97  		p1 := p
    98  		p = appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, int64(gc.Widthptr), obj.TYPE_REG, mips.REGRT1, 0)
    99  		p = appendpp(p, mips.ABNE, obj.TYPE_REG, mips.REGRT1, 0, obj.TYPE_BRANCH, 0, 0)
   100  		p.Reg = mips.REGRT2
   101  		gc.Patch(p, p1)
   102  	}
   103  
   104  	return p
   105  }
   106  
   107  func appendpp(p *obj.Prog, as int, ftype int, freg int, foffset int64, ttype int, treg int, toffset int64) *obj.Prog {
   108  	q := gc.Ctxt.NewProg()
   109  	gc.Clearp(q)
   110  	q.As = int16(as)
   111  	q.Lineno = p.Lineno
   112  	q.From.Type = int16(ftype)
   113  	q.From.Reg = int16(freg)
   114  	q.From.Offset = foffset
   115  	q.To.Type = int16(ttype)
   116  	q.To.Reg = int16(treg)
   117  	q.To.Offset = toffset
   118  	q.Link = p.Link
   119  	p.Link = q
   120  	return q
   121  }
   122  
   123  func ginsnop() {
   124  	var reg gc.Node
   125  	gc.Nodreg(&reg, gc.Types[gc.TINT], mips.REG_R0)
   126  	gins(mips.ANOR, &reg, &reg)
   127  }
   128  
   129  var panicdiv *gc.Node
   130  
   131  /*
   132   * generate division.
   133   * generates one of:
   134   *	res = nl / nr
   135   *	res = nl % nr
   136   * according to op.
   137   */
   138  func dodiv(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) {
   139  	t := nl.Type
   140  
   141  	t0 := t
   142  
   143  	if t.Width < 8 {
   144  		if gc.Issigned[t.Etype] {
   145  			t = gc.Types[gc.TINT64]
   146  		} else {
   147  			t = gc.Types[gc.TUINT64]
   148  		}
   149  	}
   150  
   151  	a := optoas(gc.ODIV, t)
   152  
   153  	var tl gc.Node
   154  	gc.Regalloc(&tl, t0, nil)
   155  	var tr gc.Node
   156  	gc.Regalloc(&tr, t0, nil)
   157  	if nl.Ullman >= nr.Ullman {
   158  		gc.Cgen(nl, &tl)
   159  		gc.Cgen(nr, &tr)
   160  	} else {
   161  		gc.Cgen(nr, &tr)
   162  		gc.Cgen(nl, &tl)
   163  	}
   164  
   165  	if t != t0 {
   166  		// Convert
   167  		tl2 := tl
   168  
   169  		tr2 := tr
   170  		tl.Type = t
   171  		tr.Type = t
   172  		gmove(&tl2, &tl)
   173  		gmove(&tr2, &tr)
   174  	}
   175  
   176  	// Handle divide-by-zero panic.
   177  	p1 := ginsbranch(mips.ABNE, nil, &tr, nil, 0)
   178  	if panicdiv == nil {
   179  		panicdiv = gc.Sysfunc("panicdivide")
   180  	}
   181  	gc.Ginscall(panicdiv, -1)
   182  	gc.Patch(p1, gc.Pc)
   183  
   184  	gins3(a, &tr, &tl, nil)
   185  	gc.Regfree(&tr)
   186  	if op == gc.ODIV {
   187  		var lo gc.Node
   188  		gc.Nodreg(&lo, gc.Types[gc.TUINT64], mips.REG_LO)
   189  		gins(mips.AMOVV, &lo, &tl)
   190  	} else { // remainder in REG_HI
   191  		var hi gc.Node
   192  		gc.Nodreg(&hi, gc.Types[gc.TUINT64], mips.REG_HI)
   193  		gins(mips.AMOVV, &hi, &tl)
   194  	}
   195  	gmove(&tl, res)
   196  	gc.Regfree(&tl)
   197  }
   198  
   199  /*
   200   * generate high multiply:
   201   *   res = (nl*nr) >> width
   202   */
   203  func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
   204  	// largest ullman on left.
   205  	if nl.Ullman < nr.Ullman {
   206  		nl, nr = nr, nl
   207  	}
   208  
   209  	t := (*gc.Type)(nl.Type)
   210  	w := int(int(t.Width * 8))
   211  	var n1 gc.Node
   212  	gc.Cgenr(nl, &n1, res)
   213  	var n2 gc.Node
   214  	gc.Cgenr(nr, &n2, nil)
   215  	switch gc.Simtype[t.Etype] {
   216  	case gc.TINT8,
   217  		gc.TINT16,
   218  		gc.TINT32:
   219  		gins3(optoas(gc.OMUL, t), &n2, &n1, nil)
   220  		var lo gc.Node
   221  		gc.Nodreg(&lo, gc.Types[gc.TUINT64], mips.REG_LO)
   222  		gins(mips.AMOVV, &lo, &n1)
   223  		p := (*obj.Prog)(gins(mips.ASRAV, nil, &n1))
   224  		p.From.Type = obj.TYPE_CONST
   225  		p.From.Offset = int64(w)
   226  
   227  	case gc.TUINT8,
   228  		gc.TUINT16,
   229  		gc.TUINT32:
   230  		gins3(optoas(gc.OMUL, t), &n2, &n1, nil)
   231  		var lo gc.Node
   232  		gc.Nodreg(&lo, gc.Types[gc.TUINT64], mips.REG_LO)
   233  		gins(mips.AMOVV, &lo, &n1)
   234  		p := (*obj.Prog)(gins(mips.ASRLV, nil, &n1))
   235  		p.From.Type = obj.TYPE_CONST
   236  		p.From.Offset = int64(w)
   237  
   238  	case gc.TINT64,
   239  		gc.TUINT64:
   240  		if gc.Issigned[t.Etype] {
   241  			gins3(mips.AMULV, &n2, &n1, nil)
   242  		} else {
   243  			gins3(mips.AMULVU, &n2, &n1, nil)
   244  		}
   245  		var hi gc.Node
   246  		gc.Nodreg(&hi, gc.Types[gc.TUINT64], mips.REG_HI)
   247  		gins(mips.AMOVV, &hi, &n1)
   248  
   249  	default:
   250  		gc.Fatalf("cgen_hmul %v", t)
   251  	}
   252  
   253  	gc.Cgen(&n1, res)
   254  	gc.Regfree(&n1)
   255  	gc.Regfree(&n2)
   256  }
   257  
   258  /*
   259   * generate shift according to op, one of:
   260   *	res = nl << nr
   261   *	res = nl >> nr
   262   */
   263  func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
   264  	a := int(optoas(op, nl.Type))
   265  
   266  	if nr.Op == gc.OLITERAL {
   267  		var n1 gc.Node
   268  		gc.Regalloc(&n1, nl.Type, res)
   269  		gc.Cgen(nl, &n1)
   270  		sc := uint64(nr.Int())
   271  		if sc >= uint64(nl.Type.Width*8) {
   272  			// large shift gets 2 shifts by width-1
   273  			var n3 gc.Node
   274  			gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
   275  
   276  			gins(a, &n3, &n1)
   277  			gins(a, &n3, &n1)
   278  		} else {
   279  			gins(a, nr, &n1)
   280  		}
   281  		gmove(&n1, res)
   282  		gc.Regfree(&n1)
   283  		return
   284  	}
   285  
   286  	if nl.Ullman >= gc.UINF {
   287  		var n4 gc.Node
   288  		gc.Tempname(&n4, nl.Type)
   289  		gc.Cgen(nl, &n4)
   290  		nl = &n4
   291  	}
   292  
   293  	if nr.Ullman >= gc.UINF {
   294  		var n5 gc.Node
   295  		gc.Tempname(&n5, nr.Type)
   296  		gc.Cgen(nr, &n5)
   297  		nr = &n5
   298  	}
   299  
   300  	// Allow either uint32 or uint64 as shift type,
   301  	// to avoid unnecessary conversion from uint32 to uint64
   302  	// just to do the comparison.
   303  	tcount := gc.Types[gc.Simtype[nr.Type.Etype]]
   304  
   305  	if tcount.Etype < gc.TUINT32 {
   306  		tcount = gc.Types[gc.TUINT32]
   307  	}
   308  
   309  	var n1 gc.Node
   310  	gc.Regalloc(&n1, nr.Type, nil) // to hold the shift type in CX
   311  	var n3 gc.Node
   312  	gc.Regalloc(&n3, tcount, &n1) // to clear high bits of CX
   313  
   314  	var n2 gc.Node
   315  	gc.Regalloc(&n2, nl.Type, res)
   316  
   317  	if nl.Ullman >= nr.Ullman {
   318  		gc.Cgen(nl, &n2)
   319  		gc.Cgen(nr, &n1)
   320  		gmove(&n1, &n3)
   321  	} else {
   322  		gc.Cgen(nr, &n1)
   323  		gmove(&n1, &n3)
   324  		gc.Cgen(nl, &n2)
   325  	}
   326  
   327  	gc.Regfree(&n3)
   328  
   329  	// test and fix up large shifts
   330  	if !bounded {
   331  		var rtmp gc.Node
   332  		gc.Nodreg(&rtmp, tcount, mips.REGTMP)
   333  		gc.Nodconst(&n3, tcount, nl.Type.Width*8)
   334  		gins3(mips.ASGTU, &n3, &n1, &rtmp)
   335  		p1 := ginsbranch(mips.ABNE, nil, &rtmp, nil, 0)
   336  		if op == gc.ORSH && gc.Issigned[nl.Type.Etype] {
   337  			gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
   338  			gins(a, &n3, &n2)
   339  		} else {
   340  			gc.Nodconst(&n3, nl.Type, 0)
   341  			gmove(&n3, &n2)
   342  		}
   343  
   344  		gc.Patch(p1, gc.Pc)
   345  	}
   346  
   347  	gins(a, &n1, &n2)
   348  
   349  	gmove(&n2, res)
   350  
   351  	gc.Regfree(&n1)
   352  	gc.Regfree(&n2)
   353  }
   354  
   355  func clearfat(nl *gc.Node) {
   356  	/* clear a fat object */
   357  	if gc.Debug['g'] != 0 {
   358  		fmt.Printf("clearfat %v (%v, size: %d)\n", nl, nl.Type, nl.Type.Width)
   359  	}
   360  
   361  	w := uint64(uint64(nl.Type.Width))
   362  
   363  	// Avoid taking the address for simple enough types.
   364  	if gc.Componentgen(nil, nl) {
   365  		return
   366  	}
   367  
   368  	c := uint64(w % 8) // bytes
   369  	q := uint64(w / 8) // dwords
   370  
   371  	if gc.Reginuse(mips.REGRT1) {
   372  		gc.Fatalf("%v in use during clearfat", obj.Rconv(mips.REGRT1))
   373  	}
   374  
   375  	var r0 gc.Node
   376  	gc.Nodreg(&r0, gc.Types[gc.TUINT64], mips.REGZERO)
   377  	var dst gc.Node
   378  	gc.Nodreg(&dst, gc.Types[gc.Tptr], mips.REGRT1)
   379  	gc.Regrealloc(&dst)
   380  	gc.Agen(nl, &dst)
   381  
   382  	var boff uint64
   383  	if q > 128 {
   384  		p := gins(mips.ASUBV, nil, &dst)
   385  		p.From.Type = obj.TYPE_CONST
   386  		p.From.Offset = 8
   387  
   388  		var end gc.Node
   389  		gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
   390  		p = gins(mips.AMOVV, &dst, &end)
   391  		p.From.Type = obj.TYPE_ADDR
   392  		p.From.Offset = int64(q * 8)
   393  
   394  		p = gins(mips.AMOVV, &r0, &dst)
   395  		p.To.Type = obj.TYPE_MEM
   396  		p.To.Offset = 8
   397  		pl := (*obj.Prog)(p)
   398  
   399  		p = gins(mips.AADDV, nil, &dst)
   400  		p.From.Type = obj.TYPE_CONST
   401  		p.From.Offset = 8
   402  
   403  		gc.Patch(ginsbranch(mips.ABNE, nil, &dst, &end, 0), pl)
   404  
   405  		gc.Regfree(&end)
   406  
   407  		// The loop leaves R1 on the last zeroed dword
   408  		boff = 8
   409  		// TODO(dfc): https://golang.org/issue/12108
   410  		// If DUFFZERO is used inside a tail call (see genwrapper) it will
   411  		// overwrite the link register.
   412  	} else if false && q >= 4 {
   413  		p := gins(mips.ASUBV, nil, &dst)
   414  		p.From.Type = obj.TYPE_CONST
   415  		p.From.Offset = 8
   416  		f := (*gc.Node)(gc.Sysfunc("duffzero"))
   417  		p = gins(obj.ADUFFZERO, nil, f)
   418  		gc.Afunclit(&p.To, f)
   419  
   420  		// 8 and 128 = magic constants: see ../../runtime/asm_mips64x.s
   421  		p.To.Offset = int64(8 * (128 - q))
   422  
   423  		// duffzero leaves R1 on the last zeroed dword
   424  		boff = 8
   425  	} else {
   426  		var p *obj.Prog
   427  		for t := uint64(0); t < q; t++ {
   428  			p = gins(mips.AMOVV, &r0, &dst)
   429  			p.To.Type = obj.TYPE_MEM
   430  			p.To.Offset = int64(8 * t)
   431  		}
   432  
   433  		boff = 8 * q
   434  	}
   435  
   436  	var p *obj.Prog
   437  	for t := uint64(0); t < c; t++ {
   438  		p = gins(mips.AMOVB, &r0, &dst)
   439  		p.To.Type = obj.TYPE_MEM
   440  		p.To.Offset = int64(t + boff)
   441  	}
   442  
   443  	gc.Regfree(&dst)
   444  }
   445  
   446  // Called after regopt and peep have run.
   447  // Expand CHECKNIL pseudo-op into actual nil pointer check.
   448  func expandchecks(firstp *obj.Prog) {
   449  	var p1 *obj.Prog
   450  
   451  	for p := (*obj.Prog)(firstp); p != nil; p = p.Link {
   452  		if gc.Debug_checknil != 0 && gc.Ctxt.Debugvlog != 0 {
   453  			fmt.Printf("expandchecks: %v\n", p)
   454  		}
   455  		if p.As != obj.ACHECKNIL {
   456  			continue
   457  		}
   458  		if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers
   459  			gc.Warnl(int(p.Lineno), "generated nil check")
   460  		}
   461  		if p.From.Type != obj.TYPE_REG {
   462  			gc.Fatalf("invalid nil check %v\n", p)
   463  		}
   464  
   465  		// check is
   466  		//	BNE arg, 2(PC)
   467  		//	MOVV R0, 0(R0)
   468  		p1 = gc.Ctxt.NewProg()
   469  		gc.Clearp(p1)
   470  		p1.Link = p.Link
   471  		p.Link = p1
   472  		p1.Lineno = p.Lineno
   473  		p1.Pc = 9999
   474  
   475  		p.As = mips.ABNE
   476  		p.To.Type = obj.TYPE_BRANCH
   477  		p.To.Val = p1.Link
   478  
   479  		// crash by write to memory address 0.
   480  		p1.As = mips.AMOVV
   481  		p1.From.Type = obj.TYPE_REG
   482  		p1.From.Reg = mips.REGZERO
   483  		p1.To.Type = obj.TYPE_MEM
   484  		p1.To.Reg = mips.REGZERO
   485  		p1.To.Offset = 0
   486  	}
   487  }
   488  
   489  // res = runtime.getg()
   490  func getg(res *gc.Node) {
   491  	var n1 gc.Node
   492  	gc.Nodreg(&n1, res.Type, mips.REGG)
   493  	gmove(&n1, res)
   494  }