github.com/q45/go@v0.0.0-20151101211701-a4fb8c13db3f/src/cmd/compile/internal/amd64/ggen.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package amd64
     6  
     7  import (
     8  	"cmd/compile/internal/gc"
     9  	"cmd/internal/obj"
    10  	"cmd/internal/obj/x86"
    11  )
    12  
    13  func defframe(ptxt *obj.Prog) {
    14  	var n *gc.Node
    15  
    16  	// fill in argument size, stack size
    17  	ptxt.To.Type = obj.TYPE_TEXTSIZE
    18  
    19  	ptxt.To.Val = int32(gc.Rnd(gc.Curfn.Type.Argwid, int64(gc.Widthptr)))
    20  	frame := uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
    21  	ptxt.To.Offset = int64(frame)
    22  
    23  	// insert code to zero ambiguously live variables
    24  	// so that the garbage collector only sees initialized values
    25  	// when it looks for pointers.
    26  	p := ptxt
    27  
    28  	hi := int64(0)
    29  	lo := hi
    30  	ax := uint32(0)
    31  	x0 := uint32(0)
    32  
    33  	// iterate through declarations - they are sorted in decreasing xoffset order.
    34  	for l := gc.Curfn.Func.Dcl; l != nil; l = l.Next {
    35  		n = l.N
    36  		if !n.Name.Needzero {
    37  			continue
    38  		}
    39  		if n.Class != gc.PAUTO {
    40  			gc.Fatalf("needzero class %d", n.Class)
    41  		}
    42  		if n.Type.Width%int64(gc.Widthptr) != 0 || n.Xoffset%int64(gc.Widthptr) != 0 || n.Type.Width == 0 {
    43  			gc.Fatalf("var %v has size %d offset %d", gc.Nconv(n, obj.FmtLong), int(n.Type.Width), int(n.Xoffset))
    44  		}
    45  
    46  		if lo != hi && n.Xoffset+n.Type.Width >= lo-int64(2*gc.Widthreg) {
    47  			// merge with range we already have
    48  			lo = n.Xoffset
    49  
    50  			continue
    51  		}
    52  
    53  		// zero old range
    54  		p = zerorange(p, int64(frame), lo, hi, &ax, &x0)
    55  
    56  		// set new range
    57  		hi = n.Xoffset + n.Type.Width
    58  
    59  		lo = n.Xoffset
    60  	}
    61  
    62  	// zero final range
    63  	zerorange(p, int64(frame), lo, hi, &ax, &x0)
    64  }
    65  
    66  // DUFFZERO consists of repeated blocks of 4 MOVUPSs + ADD,
    67  // See runtime/mkduff.go.
    68  const (
    69  	dzBlocks    = 16 // number of MOV/ADD blocks
    70  	dzBlockLen  = 4  // number of clears per block
    71  	dzBlockSize = 19 // size of instructions in a single block
    72  	dzMovSize   = 4  // size of single MOV instruction w/ offset
    73  	dzAddSize   = 4  // size of single ADD instruction
    74  	dzClearStep = 16 // number of bytes cleared by each MOV instruction
    75  
    76  	dzClearLen = dzClearStep * dzBlockLen // bytes cleared by one block
    77  	dzSize     = dzBlocks * dzBlockSize
    78  )
    79  
    80  // dzOff returns the offset for a jump into DUFFZERO.
    81  // b is the number of bytes to zero.
    82  func dzOff(b int64) int64 {
    83  	off := int64(dzSize)
    84  	off -= b / dzClearLen * dzBlockSize
    85  	tailLen := b % dzClearLen
    86  	if tailLen >= dzClearStep {
    87  		off -= dzAddSize + dzMovSize*(tailLen/dzClearStep)
    88  	}
    89  	return off
    90  }
    91  
    92  // duffzeroDI returns the pre-adjustment to DI for a call to DUFFZERO.
    93  // b is the number of bytes to zero.
    94  func dzDI(b int64) int64 {
    95  	tailLen := b % dzClearLen
    96  	if tailLen < dzClearStep {
    97  		return 0
    98  	}
    99  	tailSteps := tailLen / dzClearStep
   100  	return -dzClearStep * (dzBlockLen - tailSteps)
   101  }
   102  
   103  func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, ax *uint32, x0 *uint32) *obj.Prog {
   104  	cnt := hi - lo
   105  	if cnt == 0 {
   106  		return p
   107  	}
   108  
   109  	if cnt%int64(gc.Widthreg) != 0 {
   110  		// should only happen with nacl
   111  		if cnt%int64(gc.Widthptr) != 0 {
   112  			gc.Fatalf("zerorange count not a multiple of widthptr %d", cnt)
   113  		}
   114  		if *ax == 0 {
   115  			p = appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
   116  			*ax = 1
   117  		}
   118  		p = appendpp(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, frame+lo)
   119  		lo += int64(gc.Widthptr)
   120  		cnt -= int64(gc.Widthptr)
   121  	}
   122  
   123  	if cnt == 8 {
   124  		if *ax == 0 {
   125  			p = appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
   126  			*ax = 1
   127  		}
   128  		p = appendpp(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, frame+lo)
   129  	} else if cnt <= int64(8*gc.Widthreg) {
   130  		if *x0 == 0 {
   131  			p = appendpp(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
   132  			*x0 = 1
   133  		}
   134  
   135  		for i := int64(0); i < cnt/16; i++ {
   136  			p = appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, frame+lo+i*16)
   137  		}
   138  
   139  		if cnt%16 != 0 {
   140  			p = appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, frame+lo+cnt-int64(16))
   141  		}
   142  	} else if !gc.Nacl && (cnt <= int64(128*gc.Widthreg)) {
   143  		if *x0 == 0 {
   144  			p = appendpp(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
   145  			*x0 = 1
   146  		}
   147  
   148  		p = appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, frame+lo+dzDI(cnt), obj.TYPE_REG, x86.REG_DI, 0)
   149  		p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, dzOff(cnt))
   150  		p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
   151  
   152  		if cnt%16 != 0 {
   153  			p = appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_DI, -int64(8))
   154  		}
   155  	} else {
   156  		if *ax == 0 {
   157  			p = appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
   158  			*ax = 1
   159  		}
   160  
   161  		p = appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, cnt/int64(gc.Widthreg), obj.TYPE_REG, x86.REG_CX, 0)
   162  		p = appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, frame+lo, obj.TYPE_REG, x86.REG_DI, 0)
   163  		p = appendpp(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
   164  		p = appendpp(p, x86.ASTOSQ, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
   165  	}
   166  
   167  	return p
   168  }
   169  
   170  func appendpp(p *obj.Prog, as int, ftype int, freg int, foffset int64, ttype int, treg int, toffset int64) *obj.Prog {
   171  	q := gc.Ctxt.NewProg()
   172  	gc.Clearp(q)
   173  	q.As = int16(as)
   174  	q.Lineno = p.Lineno
   175  	q.From.Type = int16(ftype)
   176  	q.From.Reg = int16(freg)
   177  	q.From.Offset = foffset
   178  	q.To.Type = int16(ttype)
   179  	q.To.Reg = int16(treg)
   180  	q.To.Offset = toffset
   181  	q.Link = p.Link
   182  	p.Link = q
   183  	return q
   184  }
   185  
   186  var panicdiv *gc.Node
   187  
   188  /*
   189   * generate division.
   190   * generates one of:
   191   *	res = nl / nr
   192   *	res = nl % nr
   193   * according to op.
   194   */
   195  func dodiv(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) {
   196  	// Have to be careful about handling
   197  	// most negative int divided by -1 correctly.
   198  	// The hardware will trap.
   199  	// Also the byte divide instruction needs AH,
   200  	// which we otherwise don't have to deal with.
   201  	// Easiest way to avoid for int8, int16: use int32.
   202  	// For int32 and int64, use explicit test.
   203  	// Could use int64 hw for int32.
   204  	t := nl.Type
   205  
   206  	t0 := t
   207  	check := false
   208  	if gc.Issigned[t.Etype] {
   209  		check = true
   210  		if gc.Isconst(nl, gc.CTINT) && nl.Int() != -(1<<uint64(t.Width*8-1)) {
   211  			check = false
   212  		} else if gc.Isconst(nr, gc.CTINT) && nr.Int() != -1 {
   213  			check = false
   214  		}
   215  	}
   216  
   217  	if t.Width < 4 {
   218  		if gc.Issigned[t.Etype] {
   219  			t = gc.Types[gc.TINT32]
   220  		} else {
   221  			t = gc.Types[gc.TUINT32]
   222  		}
   223  		check = false
   224  	}
   225  
   226  	a := optoas(op, t)
   227  
   228  	var n3 gc.Node
   229  	gc.Regalloc(&n3, t0, nil)
   230  	var ax gc.Node
   231  	var oldax gc.Node
   232  	if nl.Ullman >= nr.Ullman {
   233  		savex(x86.REG_AX, &ax, &oldax, res, t0)
   234  		gc.Cgen(nl, &ax)
   235  		gc.Regalloc(&ax, t0, &ax) // mark ax live during cgen
   236  		gc.Cgen(nr, &n3)
   237  		gc.Regfree(&ax)
   238  	} else {
   239  		gc.Cgen(nr, &n3)
   240  		savex(x86.REG_AX, &ax, &oldax, res, t0)
   241  		gc.Cgen(nl, &ax)
   242  	}
   243  
   244  	if t != t0 {
   245  		// Convert
   246  		ax1 := ax
   247  
   248  		n31 := n3
   249  		ax.Type = t
   250  		n3.Type = t
   251  		gmove(&ax1, &ax)
   252  		gmove(&n31, &n3)
   253  	}
   254  
   255  	var n4 gc.Node
   256  	if gc.Nacl {
   257  		// Native Client does not relay the divide-by-zero trap
   258  		// to the executing program, so we must insert a check
   259  		// for ourselves.
   260  		gc.Nodconst(&n4, t, 0)
   261  
   262  		gins(optoas(gc.OCMP, t), &n3, &n4)
   263  		p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
   264  		if panicdiv == nil {
   265  			panicdiv = gc.Sysfunc("panicdivide")
   266  		}
   267  		gc.Ginscall(panicdiv, -1)
   268  		gc.Patch(p1, gc.Pc)
   269  	}
   270  
   271  	var p2 *obj.Prog
   272  	if check {
   273  		gc.Nodconst(&n4, t, -1)
   274  		gins(optoas(gc.OCMP, t), &n3, &n4)
   275  		p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
   276  		if op == gc.ODIV {
   277  			// a / (-1) is -a.
   278  			gins(optoas(gc.OMINUS, t), nil, &ax)
   279  
   280  			gmove(&ax, res)
   281  		} else {
   282  			// a % (-1) is 0.
   283  			gc.Nodconst(&n4, t, 0)
   284  
   285  			gmove(&n4, res)
   286  		}
   287  
   288  		p2 = gc.Gbranch(obj.AJMP, nil, 0)
   289  		gc.Patch(p1, gc.Pc)
   290  	}
   291  
   292  	var olddx gc.Node
   293  	var dx gc.Node
   294  	savex(x86.REG_DX, &dx, &olddx, res, t)
   295  	if !gc.Issigned[t.Etype] {
   296  		gc.Nodconst(&n4, t, 0)
   297  		gmove(&n4, &dx)
   298  	} else {
   299  		gins(optoas(gc.OEXTEND, t), nil, nil)
   300  	}
   301  	gins(a, &n3, nil)
   302  	gc.Regfree(&n3)
   303  	if op == gc.ODIV {
   304  		gmove(&ax, res)
   305  	} else {
   306  		gmove(&dx, res)
   307  	}
   308  	restx(&dx, &olddx)
   309  	if check {
   310  		gc.Patch(p2, gc.Pc)
   311  	}
   312  	restx(&ax, &oldax)
   313  }
   314  
   315  /*
   316   * register dr is one of the special ones (AX, CX, DI, SI, etc.).
   317   * we need to use it.  if it is already allocated as a temporary
   318   * (r > 1; can only happen if a routine like sgen passed a
   319   * special as cgen's res and then cgen used regalloc to reuse
   320   * it as its own temporary), then move it for now to another
   321   * register.  caller must call restx to move it back.
   322   * the move is not necessary if dr == res, because res is
   323   * known to be dead.
   324   */
   325  func savex(dr int, x *gc.Node, oldx *gc.Node, res *gc.Node, t *gc.Type) {
   326  	r := uint8(gc.GetReg(dr))
   327  
   328  	// save current ax and dx if they are live
   329  	// and not the destination
   330  	*oldx = gc.Node{}
   331  
   332  	gc.Nodreg(x, t, dr)
   333  	if r > 1 && !gc.Samereg(x, res) {
   334  		gc.Regalloc(oldx, gc.Types[gc.TINT64], nil)
   335  		x.Type = gc.Types[gc.TINT64]
   336  		gmove(x, oldx)
   337  		x.Type = t
   338  		// TODO(marvin): Fix Node.EType type union.
   339  		oldx.Etype = gc.EType(r) // squirrel away old r value
   340  		gc.SetReg(dr, 1)
   341  	}
   342  }
   343  
   344  func restx(x *gc.Node, oldx *gc.Node) {
   345  	if oldx.Op != 0 {
   346  		x.Type = gc.Types[gc.TINT64]
   347  		gc.SetReg(int(x.Reg), int(oldx.Etype))
   348  		gmove(oldx, x)
   349  		gc.Regfree(oldx)
   350  	}
   351  }
   352  
   353  /*
   354   * generate high multiply:
   355   *   res = (nl*nr) >> width
   356   */
   357  func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
   358  	t := nl.Type
   359  	a := optoas(gc.OHMUL, t)
   360  	if nl.Ullman < nr.Ullman {
   361  		nl, nr = nr, nl
   362  	}
   363  
   364  	var n1 gc.Node
   365  	gc.Cgenr(nl, &n1, res)
   366  	var n2 gc.Node
   367  	gc.Cgenr(nr, &n2, nil)
   368  	var ax gc.Node
   369  	gc.Nodreg(&ax, t, x86.REG_AX)
   370  	gmove(&n1, &ax)
   371  	gins(a, &n2, nil)
   372  	gc.Regfree(&n2)
   373  	gc.Regfree(&n1)
   374  
   375  	var dx gc.Node
   376  	if t.Width == 1 {
   377  		// byte multiply behaves differently.
   378  		gc.Nodreg(&ax, t, x86.REG_AH)
   379  
   380  		gc.Nodreg(&dx, t, x86.REG_DX)
   381  		gmove(&ax, &dx)
   382  	}
   383  
   384  	gc.Nodreg(&dx, t, x86.REG_DX)
   385  	gmove(&dx, res)
   386  }
   387  
   388  /*
   389   * generate shift according to op, one of:
   390   *	res = nl << nr
   391   *	res = nl >> nr
   392   */
   393  func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
   394  	a := optoas(op, nl.Type)
   395  
   396  	if nr.Op == gc.OLITERAL {
   397  		var n1 gc.Node
   398  		gc.Regalloc(&n1, nl.Type, res)
   399  		gc.Cgen(nl, &n1)
   400  		sc := uint64(nr.Int())
   401  		if sc >= uint64(nl.Type.Width*8) {
   402  			// large shift gets 2 shifts by width-1
   403  			var n3 gc.Node
   404  			gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
   405  
   406  			gins(a, &n3, &n1)
   407  			gins(a, &n3, &n1)
   408  		} else {
   409  			gins(a, nr, &n1)
   410  		}
   411  		gmove(&n1, res)
   412  		gc.Regfree(&n1)
   413  		return
   414  	}
   415  
   416  	if nl.Ullman >= gc.UINF {
   417  		var n4 gc.Node
   418  		gc.Tempname(&n4, nl.Type)
   419  		gc.Cgen(nl, &n4)
   420  		nl = &n4
   421  	}
   422  
   423  	if nr.Ullman >= gc.UINF {
   424  		var n5 gc.Node
   425  		gc.Tempname(&n5, nr.Type)
   426  		gc.Cgen(nr, &n5)
   427  		nr = &n5
   428  	}
   429  
   430  	rcx := gc.GetReg(x86.REG_CX)
   431  	var n1 gc.Node
   432  	gc.Nodreg(&n1, gc.Types[gc.TUINT32], x86.REG_CX)
   433  
   434  	// Allow either uint32 or uint64 as shift type,
   435  	// to avoid unnecessary conversion from uint32 to uint64
   436  	// just to do the comparison.
   437  	tcount := gc.Types[gc.Simtype[nr.Type.Etype]]
   438  
   439  	if tcount.Etype < gc.TUINT32 {
   440  		tcount = gc.Types[gc.TUINT32]
   441  	}
   442  
   443  	gc.Regalloc(&n1, nr.Type, &n1) // to hold the shift type in CX
   444  	var n3 gc.Node
   445  	gc.Regalloc(&n3, tcount, &n1) // to clear high bits of CX
   446  
   447  	var cx gc.Node
   448  	gc.Nodreg(&cx, gc.Types[gc.TUINT64], x86.REG_CX)
   449  
   450  	var oldcx gc.Node
   451  	if rcx > 0 && !gc.Samereg(&cx, res) {
   452  		gc.Regalloc(&oldcx, gc.Types[gc.TUINT64], nil)
   453  		gmove(&cx, &oldcx)
   454  	}
   455  
   456  	cx.Type = tcount
   457  
   458  	var n2 gc.Node
   459  	if gc.Samereg(&cx, res) {
   460  		gc.Regalloc(&n2, nl.Type, nil)
   461  	} else {
   462  		gc.Regalloc(&n2, nl.Type, res)
   463  	}
   464  	if nl.Ullman >= nr.Ullman {
   465  		gc.Cgen(nl, &n2)
   466  		gc.Cgen(nr, &n1)
   467  		gmove(&n1, &n3)
   468  	} else {
   469  		gc.Cgen(nr, &n1)
   470  		gmove(&n1, &n3)
   471  		gc.Cgen(nl, &n2)
   472  	}
   473  
   474  	gc.Regfree(&n3)
   475  
   476  	// test and fix up large shifts
   477  	if !bounded {
   478  		gc.Nodconst(&n3, tcount, nl.Type.Width*8)
   479  		gins(optoas(gc.OCMP, tcount), &n1, &n3)
   480  		p1 := gc.Gbranch(optoas(gc.OLT, tcount), nil, +1)
   481  		if op == gc.ORSH && gc.Issigned[nl.Type.Etype] {
   482  			gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
   483  			gins(a, &n3, &n2)
   484  		} else {
   485  			gc.Nodconst(&n3, nl.Type, 0)
   486  			gmove(&n3, &n2)
   487  		}
   488  
   489  		gc.Patch(p1, gc.Pc)
   490  	}
   491  
   492  	gins(a, &n1, &n2)
   493  
   494  	if oldcx.Op != 0 {
   495  		cx.Type = gc.Types[gc.TUINT64]
   496  		gmove(&oldcx, &cx)
   497  		gc.Regfree(&oldcx)
   498  	}
   499  
   500  	gmove(&n2, res)
   501  
   502  	gc.Regfree(&n1)
   503  	gc.Regfree(&n2)
   504  }
   505  
   506  /*
   507   * generate byte multiply:
   508   *	res = nl * nr
   509   * there is no 2-operand byte multiply instruction so
   510   * we do a full-width multiplication and truncate afterwards.
   511   */
   512  func cgen_bmul(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) bool {
   513  	if optoas(op, nl.Type) != x86.AIMULB {
   514  		return false
   515  	}
   516  
   517  	// largest ullman on left.
   518  	if nl.Ullman < nr.Ullman {
   519  		nl, nr = nr, nl
   520  	}
   521  
   522  	// generate operands in "8-bit" registers.
   523  	var n1b gc.Node
   524  	gc.Regalloc(&n1b, nl.Type, res)
   525  
   526  	gc.Cgen(nl, &n1b)
   527  	var n2b gc.Node
   528  	gc.Regalloc(&n2b, nr.Type, nil)
   529  	gc.Cgen(nr, &n2b)
   530  
   531  	// perform full-width multiplication.
   532  	t := gc.Types[gc.TUINT64]
   533  
   534  	if gc.Issigned[nl.Type.Etype] {
   535  		t = gc.Types[gc.TINT64]
   536  	}
   537  	var n1 gc.Node
   538  	gc.Nodreg(&n1, t, int(n1b.Reg))
   539  	var n2 gc.Node
   540  	gc.Nodreg(&n2, t, int(n2b.Reg))
   541  	a := optoas(op, t)
   542  	gins(a, &n2, &n1)
   543  
   544  	// truncate.
   545  	gmove(&n1, res)
   546  
   547  	gc.Regfree(&n1b)
   548  	gc.Regfree(&n2b)
   549  	return true
   550  }
   551  
   552  func clearfat(nl *gc.Node) {
   553  	/* clear a fat object */
   554  	if gc.Debug['g'] != 0 {
   555  		gc.Dump("\nclearfat", nl)
   556  	}
   557  
   558  	// Avoid taking the address for simple enough types.
   559  	if gc.Componentgen(nil, nl) {
   560  		return
   561  	}
   562  
   563  	w := nl.Type.Width
   564  
   565  	if w > 1024 || (gc.Nacl && w >= 64) {
   566  		var oldn1 gc.Node
   567  		var n1 gc.Node
   568  		savex(x86.REG_DI, &n1, &oldn1, nil, gc.Types[gc.Tptr])
   569  		gc.Agen(nl, &n1)
   570  
   571  		var ax gc.Node
   572  		var oldax gc.Node
   573  		savex(x86.REG_AX, &ax, &oldax, nil, gc.Types[gc.Tptr])
   574  		gconreg(x86.AMOVL, 0, x86.REG_AX)
   575  		gconreg(movptr, w/8, x86.REG_CX)
   576  
   577  		gins(x86.AREP, nil, nil)   // repeat
   578  		gins(x86.ASTOSQ, nil, nil) // STOQ AL,*(DI)+
   579  
   580  		if w%8 != 0 {
   581  			n1.Op = gc.OINDREG
   582  			clearfat_tail(&n1, w%8)
   583  		}
   584  
   585  		restx(&n1, &oldn1)
   586  		restx(&ax, &oldax)
   587  		return
   588  	}
   589  
   590  	if w >= 64 {
   591  		var oldn1 gc.Node
   592  		var n1 gc.Node
   593  		savex(x86.REG_DI, &n1, &oldn1, nil, gc.Types[gc.Tptr])
   594  		gc.Agen(nl, &n1)
   595  
   596  		var vec_zero gc.Node
   597  		var old_x0 gc.Node
   598  		savex(x86.REG_X0, &vec_zero, &old_x0, nil, gc.Types[gc.TFLOAT64])
   599  		gins(x86.AXORPS, &vec_zero, &vec_zero)
   600  
   601  		if di := dzDI(w); di != 0 {
   602  			gconreg(addptr, di, x86.REG_DI)
   603  		}
   604  		p := gins(obj.ADUFFZERO, nil, nil)
   605  		p.To.Type = obj.TYPE_ADDR
   606  		p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
   607  		p.To.Offset = dzOff(w)
   608  
   609  		if w%16 != 0 {
   610  			n1.Op = gc.OINDREG
   611  			n1.Xoffset -= 16 - w%16
   612  			gins(x86.AMOVUPS, &vec_zero, &n1)
   613  		}
   614  
   615  		restx(&vec_zero, &old_x0)
   616  		restx(&n1, &oldn1)
   617  		return
   618  	}
   619  
   620  	// NOTE: Must use agen, not igen, so that optimizer sees address
   621  	// being taken. We are not writing on field boundaries.
   622  	var n1 gc.Node
   623  	gc.Agenr(nl, &n1, nil)
   624  	n1.Op = gc.OINDREG
   625  
   626  	clearfat_tail(&n1, w)
   627  
   628  	gc.Regfree(&n1)
   629  }
   630  
   631  func clearfat_tail(n1 *gc.Node, b int64) {
   632  	if b >= 16 {
   633  		var vec_zero gc.Node
   634  		gc.Regalloc(&vec_zero, gc.Types[gc.TFLOAT64], nil)
   635  		gins(x86.AXORPS, &vec_zero, &vec_zero)
   636  
   637  		for b >= 16 {
   638  			gins(x86.AMOVUPS, &vec_zero, n1)
   639  			n1.Xoffset += 16
   640  			b -= 16
   641  		}
   642  
   643  		// MOVUPS X0, off(base) is a few bytes shorter than MOV 0, off(base)
   644  		if b != 0 {
   645  			n1.Xoffset -= 16 - b
   646  			gins(x86.AMOVUPS, &vec_zero, n1)
   647  		}
   648  
   649  		gc.Regfree(&vec_zero)
   650  		return
   651  	}
   652  
   653  	// Write sequence of MOV 0, off(base) instead of using STOSQ.
   654  	// The hope is that although the code will be slightly longer,
   655  	// the MOVs will have no dependencies and pipeline better
   656  	// than the unrolled STOSQ loop.
   657  	var z gc.Node
   658  	gc.Nodconst(&z, gc.Types[gc.TUINT64], 0)
   659  	if b >= 8 {
   660  		n1.Type = z.Type
   661  		gins(x86.AMOVQ, &z, n1)
   662  		n1.Xoffset += 8
   663  		b -= 8
   664  
   665  		if b != 0 {
   666  			n1.Xoffset -= 8 - b
   667  			gins(x86.AMOVQ, &z, n1)
   668  		}
   669  		return
   670  	}
   671  
   672  	if b >= 4 {
   673  		gc.Nodconst(&z, gc.Types[gc.TUINT32], 0)
   674  		n1.Type = z.Type
   675  		gins(x86.AMOVL, &z, n1)
   676  		n1.Xoffset += 4
   677  		b -= 4
   678  
   679  		if b != 0 {
   680  			n1.Xoffset -= 4 - b
   681  			gins(x86.AMOVL, &z, n1)
   682  		}
   683  		return
   684  	}
   685  
   686  	if b >= 2 {
   687  		gc.Nodconst(&z, gc.Types[gc.TUINT16], 0)
   688  		n1.Type = z.Type
   689  		gins(x86.AMOVW, &z, n1)
   690  		n1.Xoffset += 2
   691  		b -= 2
   692  	}
   693  
   694  	gc.Nodconst(&z, gc.Types[gc.TUINT8], 0)
   695  	for b > 0 {
   696  		n1.Type = z.Type
   697  		gins(x86.AMOVB, &z, n1)
   698  		n1.Xoffset++
   699  		b--
   700  	}
   701  
   702  }
   703  
   704  // Called after regopt and peep have run.
   705  // Expand CHECKNIL pseudo-op into actual nil pointer check.
   706  func expandchecks(firstp *obj.Prog) {
   707  	var p1 *obj.Prog
   708  	var p2 *obj.Prog
   709  
   710  	for p := firstp; p != nil; p = p.Link {
   711  		if p.As != obj.ACHECKNIL {
   712  			continue
   713  		}
   714  		if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers
   715  			gc.Warnl(int(p.Lineno), "generated nil check")
   716  		}
   717  
   718  		// check is
   719  		//	CMP arg, $0
   720  		//	JNE 2(PC) (likely)
   721  		//	MOV AX, 0
   722  		p1 = gc.Ctxt.NewProg()
   723  
   724  		p2 = gc.Ctxt.NewProg()
   725  		gc.Clearp(p1)
   726  		gc.Clearp(p2)
   727  		p1.Link = p2
   728  		p2.Link = p.Link
   729  		p.Link = p1
   730  		p1.Lineno = p.Lineno
   731  		p2.Lineno = p.Lineno
   732  		p1.Pc = 9999
   733  		p2.Pc = 9999
   734  		p.As = int16(cmpptr)
   735  		p.To.Type = obj.TYPE_CONST
   736  		p.To.Offset = 0
   737  		p1.As = x86.AJNE
   738  		p1.From.Type = obj.TYPE_CONST
   739  		p1.From.Offset = 1 // likely
   740  		p1.To.Type = obj.TYPE_BRANCH
   741  		p1.To.Val = p2.Link
   742  
   743  		// crash by write to memory address 0.
   744  		// if possible, since we know arg is 0, use 0(arg),
   745  		// which will be shorter to encode than plain 0.
   746  		p2.As = x86.AMOVL
   747  
   748  		p2.From.Type = obj.TYPE_REG
   749  		p2.From.Reg = x86.REG_AX
   750  		if regtyp(&p.From) {
   751  			p2.To.Type = obj.TYPE_MEM
   752  			p2.To.Reg = p.From.Reg
   753  		} else {
   754  			p2.To.Type = obj.TYPE_MEM
   755  			p2.To.Reg = x86.REG_NONE
   756  		}
   757  
   758  		p2.To.Offset = 0
   759  	}
   760  }
   761  
   762  // addr += index*width if possible.
   763  func addindex(index *gc.Node, width int64, addr *gc.Node) bool {
   764  	switch width {
   765  	case 1, 2, 4, 8:
   766  		p1 := gins(x86.ALEAQ, index, addr)
   767  		p1.From.Type = obj.TYPE_MEM
   768  		p1.From.Scale = int16(width)
   769  		p1.From.Index = p1.From.Reg
   770  		p1.From.Reg = p1.To.Reg
   771  		return true
   772  	}
   773  	return false
   774  }
   775  
   776  // res = runtime.getg()
   777  func getg(res *gc.Node) {
   778  	var n1 gc.Node
   779  	gc.Regalloc(&n1, res.Type, res)
   780  	mov := optoas(gc.OAS, gc.Types[gc.Tptr])
   781  	p := gins(mov, nil, &n1)
   782  	p.From.Type = obj.TYPE_REG
   783  	p.From.Reg = x86.REG_TLS
   784  	p = gins(mov, nil, &n1)
   785  	p.From = p.To
   786  	p.From.Type = obj.TYPE_MEM
   787  	p.From.Index = x86.REG_TLS
   788  	p.From.Scale = 1
   789  	gmove(&n1, res)
   790  	gc.Regfree(&n1)
   791  }