github.com/euank/go@v0.0.0-20160829210321-495514729181/src/cmd/compile/internal/arm/ggen.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package arm
     6  
     7  import (
     8  	"cmd/compile/internal/gc"
     9  	"cmd/internal/obj"
    10  	"cmd/internal/obj/arm"
    11  )
    12  
    13  func defframe(ptxt *obj.Prog) {
    14  	// fill in argument size, stack size
    15  	ptxt.To.Type = obj.TYPE_TEXTSIZE
    16  
    17  	ptxt.To.Val = int32(gc.Rnd(gc.Curfn.Type.ArgWidth(), int64(gc.Widthptr)))
    18  	frame := uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
    19  	ptxt.To.Offset = int64(frame)
    20  
    21  	// insert code to contain ambiguously live variables
    22  	// so that garbage collector only sees initialized values
    23  	// when it looks for pointers.
    24  	p := ptxt
    25  
    26  	hi := int64(0)
    27  	lo := hi
    28  	r0 := uint32(0)
    29  	for _, n := range gc.Curfn.Func.Dcl {
    30  		if !n.Name.Needzero {
    31  			continue
    32  		}
    33  		if n.Class != gc.PAUTO {
    34  			gc.Fatalf("needzero class %d", n.Class)
    35  		}
    36  		if n.Type.Width%int64(gc.Widthptr) != 0 || n.Xoffset%int64(gc.Widthptr) != 0 || n.Type.Width == 0 {
    37  			gc.Fatalf("var %v has size %d offset %d", gc.Nconv(n, gc.FmtLong), int(n.Type.Width), int(n.Xoffset))
    38  		}
    39  		if lo != hi && n.Xoffset+n.Type.Width >= lo-int64(2*gc.Widthptr) {
    40  			// merge with range we already have
    41  			lo = gc.Rnd(n.Xoffset, int64(gc.Widthptr))
    42  
    43  			continue
    44  		}
    45  
    46  		// zero old range
    47  		p = zerorange(p, int64(frame), lo, hi, &r0)
    48  
    49  		// set new range
    50  		hi = n.Xoffset + n.Type.Width
    51  
    52  		lo = n.Xoffset
    53  	}
    54  
    55  	// zero final range
    56  	zerorange(p, int64(frame), lo, hi, &r0)
    57  }
    58  
    59  func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, r0 *uint32) *obj.Prog {
    60  	cnt := hi - lo
    61  	if cnt == 0 {
    62  		return p
    63  	}
    64  	if *r0 == 0 {
    65  		p = appendpp(p, arm.AMOVW, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, arm.REG_R0, 0)
    66  		*r0 = 1
    67  	}
    68  
    69  	if cnt < int64(4*gc.Widthptr) {
    70  		for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
    71  			p = appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REGSP, int32(4+frame+lo+i))
    72  		}
    73  	} else if !gc.Nacl && (cnt <= int64(128*gc.Widthptr)) {
    74  		p = appendpp(p, arm.AADD, obj.TYPE_CONST, 0, int32(4+frame+lo), obj.TYPE_REG, arm.REG_R1, 0)
    75  		p.Reg = arm.REGSP
    76  		p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
    77  		f := gc.Sysfunc("duffzero")
    78  		gc.Naddr(&p.To, f)
    79  		gc.Afunclit(&p.To, f)
    80  		p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
    81  	} else {
    82  		p = appendpp(p, arm.AADD, obj.TYPE_CONST, 0, int32(4+frame+lo), obj.TYPE_REG, arm.REG_R1, 0)
    83  		p.Reg = arm.REGSP
    84  		p = appendpp(p, arm.AADD, obj.TYPE_CONST, 0, int32(cnt), obj.TYPE_REG, arm.REG_R2, 0)
    85  		p.Reg = arm.REG_R1
    86  		p = appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REG_R1, 4)
    87  		p1 := p
    88  		p.Scond |= arm.C_PBIT
    89  		p = appendpp(p, arm.ACMP, obj.TYPE_REG, arm.REG_R1, 0, obj.TYPE_NONE, 0, 0)
    90  		p.Reg = arm.REG_R2
    91  		p = appendpp(p, arm.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
    92  		gc.Patch(p, p1)
    93  	}
    94  
    95  	return p
    96  }
    97  
    98  func appendpp(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int, foffset int32, ttype obj.AddrType, treg int, toffset int32) *obj.Prog {
    99  	q := gc.Ctxt.NewProg()
   100  	gc.Clearp(q)
   101  	q.As = as
   102  	q.Lineno = p.Lineno
   103  	q.From.Type = ftype
   104  	q.From.Reg = int16(freg)
   105  	q.From.Offset = int64(foffset)
   106  	q.To.Type = ttype
   107  	q.To.Reg = int16(treg)
   108  	q.To.Offset = int64(toffset)
   109  	q.Link = p.Link
   110  	p.Link = q
   111  	return q
   112  }
   113  
   114  /*
   115   * generate high multiply
   116   *  res = (nl * nr) >> wordsize
   117   */
   118  func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
   119  	if nl.Ullman < nr.Ullman {
   120  		nl, nr = nr, nl
   121  	}
   122  
   123  	t := nl.Type
   124  	w := t.Width * 8
   125  	var n1 gc.Node
   126  	gc.Regalloc(&n1, t, res)
   127  	gc.Cgen(nl, &n1)
   128  	var n2 gc.Node
   129  	gc.Regalloc(&n2, t, nil)
   130  	gc.Cgen(nr, &n2)
   131  	switch gc.Simtype[t.Etype] {
   132  	case gc.TINT8,
   133  		gc.TINT16:
   134  		gins(optoas(gc.OMUL, t), &n2, &n1)
   135  		gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(w), &n1)
   136  
   137  	case gc.TUINT8,
   138  		gc.TUINT16:
   139  		gins(optoas(gc.OMUL, t), &n2, &n1)
   140  		gshift(arm.AMOVW, &n1, arm.SHIFT_LR, int32(w), &n1)
   141  
   142  		// perform a long multiplication.
   143  	case gc.TINT32,
   144  		gc.TUINT32:
   145  		var p *obj.Prog
   146  		if t.IsSigned() {
   147  			p = gins(arm.AMULL, &n2, nil)
   148  		} else {
   149  			p = gins(arm.AMULLU, &n2, nil)
   150  		}
   151  
   152  		// n2 * n1 -> (n1 n2)
   153  		p.Reg = n1.Reg
   154  
   155  		p.To.Type = obj.TYPE_REGREG
   156  		p.To.Reg = n1.Reg
   157  		p.To.Offset = int64(n2.Reg)
   158  
   159  	default:
   160  		gc.Fatalf("cgen_hmul %v", t)
   161  	}
   162  
   163  	gc.Cgen(&n1, res)
   164  	gc.Regfree(&n1)
   165  	gc.Regfree(&n2)
   166  }
   167  
   168  /*
   169   * generate shift according to op, one of:
   170   *	res = nl << nr
   171   *	res = nl >> nr
   172   */
   173  func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
   174  	if nl.Type.Width > 4 {
   175  		gc.Fatalf("cgen_shift %v", nl.Type)
   176  	}
   177  
   178  	w := int(nl.Type.Width * 8)
   179  
   180  	if op == gc.OLROT {
   181  		v := nr.Int64()
   182  		var n1 gc.Node
   183  		gc.Regalloc(&n1, nl.Type, res)
   184  		if w == 32 {
   185  			gc.Cgen(nl, &n1)
   186  			gshift(arm.AMOVW, &n1, arm.SHIFT_RR, int32(w)-int32(v), &n1)
   187  		} else {
   188  			var n2 gc.Node
   189  			gc.Regalloc(&n2, nl.Type, nil)
   190  			gc.Cgen(nl, &n2)
   191  			gshift(arm.AMOVW, &n2, arm.SHIFT_LL, int32(v), &n1)
   192  			gshift(arm.AORR, &n2, arm.SHIFT_LR, int32(w)-int32(v), &n1)
   193  			gc.Regfree(&n2)
   194  
   195  			// Ensure sign/zero-extended result.
   196  			gins(optoas(gc.OAS, nl.Type), &n1, &n1)
   197  		}
   198  
   199  		gmove(&n1, res)
   200  		gc.Regfree(&n1)
   201  		return
   202  	}
   203  
   204  	if nr.Op == gc.OLITERAL {
   205  		var n1 gc.Node
   206  		gc.Regalloc(&n1, nl.Type, res)
   207  		gc.Cgen(nl, &n1)
   208  		sc := uint64(nr.Int64())
   209  		if sc == 0 {
   210  		} else // nothing to do
   211  		if sc >= uint64(nl.Type.Width*8) {
   212  			if op == gc.ORSH && nl.Type.IsSigned() {
   213  				gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(w), &n1)
   214  			} else {
   215  				gins(arm.AEOR, &n1, &n1)
   216  			}
   217  		} else {
   218  			if op == gc.ORSH && nl.Type.IsSigned() {
   219  				gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(sc), &n1)
   220  			} else if op == gc.ORSH {
   221  				gshift(arm.AMOVW, &n1, arm.SHIFT_LR, int32(sc), &n1) // OLSH
   222  			} else {
   223  				gshift(arm.AMOVW, &n1, arm.SHIFT_LL, int32(sc), &n1)
   224  			}
   225  		}
   226  
   227  		if w < 32 && op == gc.OLSH {
   228  			gins(optoas(gc.OAS, nl.Type), &n1, &n1)
   229  		}
   230  		gmove(&n1, res)
   231  		gc.Regfree(&n1)
   232  		return
   233  	}
   234  
   235  	tr := nr.Type
   236  	var t gc.Node
   237  	var n1 gc.Node
   238  	var n2 gc.Node
   239  	var n3 gc.Node
   240  	if tr.Width > 4 {
   241  		var nt gc.Node
   242  		gc.Tempname(&nt, nr.Type)
   243  		if nl.Ullman >= nr.Ullman {
   244  			gc.Regalloc(&n2, nl.Type, res)
   245  			gc.Cgen(nl, &n2)
   246  			gc.Cgen(nr, &nt)
   247  			n1 = nt
   248  		} else {
   249  			gc.Cgen(nr, &nt)
   250  			gc.Regalloc(&n2, nl.Type, res)
   251  			gc.Cgen(nl, &n2)
   252  		}
   253  
   254  		var hi gc.Node
   255  		var lo gc.Node
   256  		split64(&nt, &lo, &hi)
   257  		gc.Regalloc(&n1, gc.Types[gc.TUINT32], nil)
   258  		gc.Regalloc(&n3, gc.Types[gc.TUINT32], nil)
   259  		gmove(&lo, &n1)
   260  		gmove(&hi, &n3)
   261  		splitclean()
   262  		gins(arm.ATST, &n3, nil)
   263  		gc.Nodconst(&t, gc.Types[gc.TUINT32], int64(w))
   264  		p1 := gins(arm.AMOVW, &t, &n1)
   265  		p1.Scond = arm.C_SCOND_NE
   266  		tr = gc.Types[gc.TUINT32]
   267  		gc.Regfree(&n3)
   268  	} else {
   269  		if nl.Ullman >= nr.Ullman {
   270  			gc.Regalloc(&n2, nl.Type, res)
   271  			gc.Cgen(nl, &n2)
   272  			gc.Regalloc(&n1, nr.Type, nil)
   273  			gc.Cgen(nr, &n1)
   274  		} else {
   275  			gc.Regalloc(&n1, nr.Type, nil)
   276  			gc.Cgen(nr, &n1)
   277  			gc.Regalloc(&n2, nl.Type, res)
   278  			gc.Cgen(nl, &n2)
   279  		}
   280  	}
   281  
   282  	// test for shift being 0
   283  	gins(arm.ATST, &n1, nil)
   284  
   285  	p3 := gc.Gbranch(arm.ABEQ, nil, -1)
   286  
   287  	// test and fix up large shifts
   288  	// TODO: if(!bounded), don't emit some of this.
   289  	gc.Regalloc(&n3, tr, nil)
   290  
   291  	gc.Nodconst(&t, gc.Types[gc.TUINT32], int64(w))
   292  	gmove(&t, &n3)
   293  	gins(arm.ACMP, &n1, &n3)
   294  	if op == gc.ORSH {
   295  		var p1 *obj.Prog
   296  		var p2 *obj.Prog
   297  		if nl.Type.IsSigned() {
   298  			p1 = gshift(arm.AMOVW, &n2, arm.SHIFT_AR, int32(w)-1, &n2)
   299  			p2 = gregshift(arm.AMOVW, &n2, arm.SHIFT_AR, &n1, &n2)
   300  		} else {
   301  			p1 = gins(arm.AEOR, &n2, &n2)
   302  			p2 = gregshift(arm.AMOVW, &n2, arm.SHIFT_LR, &n1, &n2)
   303  		}
   304  
   305  		p1.Scond = arm.C_SCOND_HS
   306  		p2.Scond = arm.C_SCOND_LO
   307  	} else {
   308  		p1 := gins(arm.AEOR, &n2, &n2)
   309  		p2 := gregshift(arm.AMOVW, &n2, arm.SHIFT_LL, &n1, &n2)
   310  		p1.Scond = arm.C_SCOND_HS
   311  		p2.Scond = arm.C_SCOND_LO
   312  	}
   313  
   314  	gc.Regfree(&n3)
   315  
   316  	gc.Patch(p3, gc.Pc)
   317  
   318  	// Left-shift of smaller word must be sign/zero-extended.
   319  	if w < 32 && op == gc.OLSH {
   320  		gins(optoas(gc.OAS, nl.Type), &n2, &n2)
   321  	}
   322  	gmove(&n2, res)
   323  
   324  	gc.Regfree(&n1)
   325  	gc.Regfree(&n2)
   326  }
   327  
   328  func clearfat(nl *gc.Node) {
   329  	/* clear a fat object */
   330  	if gc.Debug['g'] != 0 {
   331  		gc.Dump("\nclearfat", nl)
   332  	}
   333  
   334  	w := uint32(nl.Type.Width)
   335  
   336  	// Avoid taking the address for simple enough types.
   337  	if gc.Componentgen(nil, nl) {
   338  		return
   339  	}
   340  
   341  	c := w % 4 // bytes
   342  	q := w / 4 // quads
   343  
   344  	if nl.Type.Align < 4 {
   345  		q = 0
   346  		c = w
   347  	}
   348  
   349  	var r0 gc.Node
   350  	r0.Op = gc.OREGISTER
   351  
   352  	r0.Reg = arm.REG_R0
   353  	var r1 gc.Node
   354  	r1.Op = gc.OREGISTER
   355  	r1.Reg = arm.REG_R1
   356  	var dst gc.Node
   357  	gc.Regalloc(&dst, gc.Types[gc.Tptr], &r1)
   358  	gc.Agen(nl, &dst)
   359  	var nc gc.Node
   360  	gc.Nodconst(&nc, gc.Types[gc.TUINT32], 0)
   361  	var nz gc.Node
   362  	gc.Regalloc(&nz, gc.Types[gc.TUINT32], &r0)
   363  	gc.Cgen(&nc, &nz)
   364  
   365  	if q > 128 {
   366  		var end gc.Node
   367  		gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
   368  		p := gins(arm.AMOVW, &dst, &end)
   369  		p.From.Type = obj.TYPE_ADDR
   370  		p.From.Offset = int64(q) * 4
   371  
   372  		p = gins(arm.AMOVW, &nz, &dst)
   373  		p.To.Type = obj.TYPE_MEM
   374  		p.To.Offset = 4
   375  		p.Scond |= arm.C_PBIT
   376  		pl := p
   377  
   378  		p = gins(arm.ACMP, &dst, nil)
   379  		raddr(&end, p)
   380  		gc.Patch(gc.Gbranch(arm.ABNE, nil, 0), pl)
   381  
   382  		gc.Regfree(&end)
   383  	} else if q >= 4 && !gc.Nacl {
   384  		f := gc.Sysfunc("duffzero")
   385  		p := gins(obj.ADUFFZERO, nil, f)
   386  		gc.Afunclit(&p.To, f)
   387  
   388  		// 4 and 128 = magic constants: see ../../runtime/asm_arm.s
   389  		p.To.Offset = 4 * (128 - int64(q))
   390  	} else {
   391  		var p *obj.Prog
   392  		for q > 0 {
   393  			p = gins(arm.AMOVW, &nz, &dst)
   394  			p.To.Type = obj.TYPE_MEM
   395  			p.To.Offset = 4
   396  			p.Scond |= arm.C_PBIT
   397  
   398  			//print("1. %v\n", p);
   399  			q--
   400  		}
   401  	}
   402  
   403  	if c > 4 {
   404  		// Loop to zero unaligned memory.
   405  		var end gc.Node
   406  		gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
   407  		p := gins(arm.AMOVW, &dst, &end)
   408  		p.From.Type = obj.TYPE_ADDR
   409  		p.From.Offset = int64(c)
   410  
   411  		p = gins(arm.AMOVB, &nz, &dst)
   412  		p.To.Type = obj.TYPE_MEM
   413  		p.To.Offset = 1
   414  		p.Scond |= arm.C_PBIT
   415  		pl := p
   416  
   417  		p = gins(arm.ACMP, &dst, nil)
   418  		raddr(&end, p)
   419  		gc.Patch(gc.Gbranch(arm.ABNE, nil, 0), pl)
   420  
   421  		gc.Regfree(&end)
   422  		c = 0
   423  	}
   424  	var p *obj.Prog
   425  	for c > 0 {
   426  		p = gins(arm.AMOVB, &nz, &dst)
   427  		p.To.Type = obj.TYPE_MEM
   428  		p.To.Offset = 1
   429  		p.Scond |= arm.C_PBIT
   430  
   431  		//print("2. %v\n", p);
   432  		c--
   433  	}
   434  
   435  	gc.Regfree(&dst)
   436  	gc.Regfree(&nz)
   437  }
   438  
   439  // Called after regopt and peep have run.
   440  // Expand CHECKNIL pseudo-op into actual nil pointer check.
   441  func expandchecks(firstp *obj.Prog) {
   442  	var reg int
   443  	var p1 *obj.Prog
   444  
   445  	for p := firstp; p != nil; p = p.Link {
   446  		if p.As != obj.ACHECKNIL {
   447  			continue
   448  		}
   449  		if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers
   450  			gc.Warnl(p.Lineno, "generated nil check")
   451  		}
   452  		if p.From.Type != obj.TYPE_REG {
   453  			gc.Fatalf("invalid nil check %v", p)
   454  		}
   455  		reg = int(p.From.Reg)
   456  
   457  		// check is
   458  		//	CMP arg, $0
   459  		//	MOV.EQ arg, 0(arg)
   460  		p1 = gc.Ctxt.NewProg()
   461  
   462  		gc.Clearp(p1)
   463  		p1.Link = p.Link
   464  		p.Link = p1
   465  		p1.Lineno = p.Lineno
   466  		p1.Pc = 9999
   467  		p1.As = arm.AMOVW
   468  		p1.From.Type = obj.TYPE_REG
   469  		p1.From.Reg = int16(reg)
   470  		p1.To.Type = obj.TYPE_MEM
   471  		p1.To.Reg = int16(reg)
   472  		p1.To.Offset = 0
   473  		p1.Scond = arm.C_SCOND_EQ
   474  		p.As = arm.ACMP
   475  		p.From.Type = obj.TYPE_CONST
   476  		p.From.Reg = 0
   477  		p.From.Offset = 0
   478  		p.Reg = int16(reg)
   479  	}
   480  }
   481  
   482  func ginsnop() {
   483  	var r gc.Node
   484  	gc.Nodreg(&r, gc.Types[gc.TINT], arm.REG_R0)
   485  	p := gins(arm.AAND, &r, &r)
   486  	p.Scond = arm.C_SCOND_EQ
   487  }
   488  
   489  /*
   490   * generate
   491   *	as $c, n
   492   */
   493  func ginscon(as obj.As, c int64, n *gc.Node) {
   494  	var n1 gc.Node
   495  	gc.Nodconst(&n1, gc.Types[gc.TINT32], c)
   496  	var n2 gc.Node
   497  	gc.Regalloc(&n2, gc.Types[gc.TINT32], nil)
   498  	gmove(&n1, &n2)
   499  	gins(as, &n2, n)
   500  	gc.Regfree(&n2)
   501  }
   502  
   503  func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
   504  	if t.IsInteger() && n1.Op == gc.OLITERAL && n1.Int64() == 0 && n2.Op != gc.OLITERAL {
   505  		op = gc.Brrev(op)
   506  		n1, n2 = n2, n1
   507  	}
   508  	var r1, r2, g1, g2 gc.Node
   509  	gc.Regalloc(&r1, t, n1)
   510  	gc.Regalloc(&g1, n1.Type, &r1)
   511  	gc.Cgen(n1, &g1)
   512  	gmove(&g1, &r1)
   513  	if t.IsInteger() && n2.Op == gc.OLITERAL && n2.Int64() == 0 {
   514  		gins(arm.ACMP, &r1, n2)
   515  	} else {
   516  		gc.Regalloc(&r2, t, n2)
   517  		gc.Regalloc(&g2, n1.Type, &r2)
   518  		gc.Cgen(n2, &g2)
   519  		gmove(&g2, &r2)
   520  		gins(optoas(gc.OCMP, t), &r1, &r2)
   521  		gc.Regfree(&g2)
   522  		gc.Regfree(&r2)
   523  	}
   524  	gc.Regfree(&g1)
   525  	gc.Regfree(&r1)
   526  	return gc.Gbranch(optoas(op, t), nil, likely)
   527  }
   528  
   529  // addr += index*width if possible.
   530  func addindex(index *gc.Node, width int64, addr *gc.Node) bool {
   531  	switch width {
   532  	case 2:
   533  		gshift(arm.AADD, index, arm.SHIFT_LL, 1, addr)
   534  		return true
   535  	case 4:
   536  		gshift(arm.AADD, index, arm.SHIFT_LL, 2, addr)
   537  		return true
   538  	case 8:
   539  		gshift(arm.AADD, index, arm.SHIFT_LL, 3, addr)
   540  		return true
   541  	}
   542  	return false
   543  }
   544  
   545  // res = runtime.getg()
   546  func getg(res *gc.Node) {
   547  	var n1 gc.Node
   548  	gc.Nodreg(&n1, res.Type, arm.REGG)
   549  	gmove(&n1, res)
   550  }