github.com/peggyl/go@v0.0.0-20151008231540-ae315999c2d5/src/cmd/compile/internal/amd64/ggen.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package amd64
     6  
     7  import (
     8  	"cmd/compile/internal/gc"
     9  	"cmd/internal/obj"
    10  	"cmd/internal/obj/x86"
    11  )
    12  
    13  func defframe(ptxt *obj.Prog) {
    14  	var n *gc.Node
    15  
    16  	// fill in argument size, stack size
    17  	ptxt.To.Type = obj.TYPE_TEXTSIZE
    18  
    19  	ptxt.To.Val = int32(gc.Rnd(gc.Curfn.Type.Argwid, int64(gc.Widthptr)))
    20  	frame := uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
    21  	ptxt.To.Offset = int64(frame)
    22  
    23  	// insert code to zero ambiguously live variables
    24  	// so that the garbage collector only sees initialized values
    25  	// when it looks for pointers.
    26  	p := ptxt
    27  
    28  	hi := int64(0)
    29  	lo := hi
    30  	ax := uint32(0)
    31  	x0 := uint32(0)
    32  
    33  	// iterate through declarations - they are sorted in decreasing xoffset order.
    34  	for l := gc.Curfn.Func.Dcl; l != nil; l = l.Next {
    35  		n = l.N
    36  		if !n.Name.Needzero {
    37  			continue
    38  		}
    39  		if n.Class != gc.PAUTO {
    40  			gc.Fatalf("needzero class %d", n.Class)
    41  		}
    42  		if n.Type.Width%int64(gc.Widthptr) != 0 || n.Xoffset%int64(gc.Widthptr) != 0 || n.Type.Width == 0 {
    43  			gc.Fatalf("var %v has size %d offset %d", gc.Nconv(n, obj.FmtLong), int(n.Type.Width), int(n.Xoffset))
    44  		}
    45  
    46  		if lo != hi && n.Xoffset+n.Type.Width >= lo-int64(2*gc.Widthreg) {
    47  			// merge with range we already have
    48  			lo = n.Xoffset
    49  
    50  			continue
    51  		}
    52  
    53  		// zero old range
    54  		p = zerorange(p, int64(frame), lo, hi, &ax, &x0)
    55  
    56  		// set new range
    57  		hi = n.Xoffset + n.Type.Width
    58  
    59  		lo = n.Xoffset
    60  	}
    61  
    62  	// zero final range
    63  	zerorange(p, int64(frame), lo, hi, &ax, &x0)
    64  }
    65  
    66  // DUFFZERO consists of repeated blocks of 4 MOVUPSs + ADD,
    67  // See runtime/mkduff.go.
    68  const (
    69  	dzBlocks    = 16 // number of MOV/ADD blocks
    70  	dzBlockLen  = 4  // number of clears per block
    71  	dzBlockSize = 19 // size of instructions in a single block
    72  	dzMovSize   = 4  // size of single MOV instruction w/ offset
    73  	dzAddSize   = 4  // size of single ADD instruction
    74  	dzClearStep = 16 // number of bytes cleared by each MOV instruction
    75  
    76  	dzClearLen = dzClearStep * dzBlockLen // bytes cleared by one block
    77  	dzSize     = dzBlocks * dzBlockSize
    78  )
    79  
    80  // dzOff returns the offset for a jump into DUFFZERO.
    81  // b is the number of bytes to zero.
    82  func dzOff(b int64) int64 {
    83  	off := int64(dzSize)
    84  	off -= b / dzClearLen * dzBlockSize
    85  	tailLen := b % dzClearLen
    86  	if tailLen >= dzClearStep {
    87  		off -= dzAddSize + dzMovSize*(tailLen/dzClearStep)
    88  	}
    89  	return off
    90  }
    91  
    92  // duffzeroDI returns the pre-adjustment to DI for a call to DUFFZERO.
    93  // b is the number of bytes to zero.
    94  func dzDI(b int64) int64 {
    95  	tailLen := b % dzClearLen
    96  	if tailLen < dzClearStep {
    97  		return 0
    98  	}
    99  	tailSteps := tailLen / dzClearStep
   100  	return -dzClearStep * (dzBlockLen - tailSteps)
   101  }
   102  
   103  func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, ax *uint32, x0 *uint32) *obj.Prog {
   104  	cnt := hi - lo
   105  	if cnt == 0 {
   106  		return p
   107  	}
   108  
   109  	if cnt%int64(gc.Widthreg) != 0 {
   110  		// should only happen with nacl
   111  		if cnt%int64(gc.Widthptr) != 0 {
   112  			gc.Fatalf("zerorange count not a multiple of widthptr %d", cnt)
   113  		}
   114  		if *ax == 0 {
   115  			p = appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
   116  			*ax = 1
   117  		}
   118  		p = appendpp(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, frame+lo)
   119  		lo += int64(gc.Widthptr)
   120  		cnt -= int64(gc.Widthptr)
   121  	}
   122  
   123  	if cnt == 8 {
   124  		if *ax == 0 {
   125  			p = appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
   126  			*ax = 1
   127  		}
   128  		p = appendpp(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, frame+lo)
   129  	} else if cnt <= int64(8*gc.Widthreg) {
   130  		if *x0 == 0 {
   131  			p = appendpp(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
   132  			*x0 = 1
   133  		}
   134  
   135  		for i := int64(0); i < cnt/16; i++ {
   136  			p = appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, frame+lo+i*16)
   137  		}
   138  
   139  		if cnt%16 != 0 {
   140  			p = appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, frame+lo+cnt-int64(16))
   141  		}
   142  	} else if !gc.Nacl && (cnt <= int64(128*gc.Widthreg)) {
   143  		if *x0 == 0 {
   144  			p = appendpp(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
   145  			*x0 = 1
   146  		}
   147  
   148  		p = appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, frame+lo+dzDI(cnt), obj.TYPE_REG, x86.REG_DI, 0)
   149  		p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, dzOff(cnt))
   150  		p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
   151  
   152  		if cnt%16 != 0 {
   153  			p = appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_DI, -int64(8))
   154  		}
   155  	} else {
   156  		if *ax == 0 {
   157  			p = appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
   158  			*ax = 1
   159  		}
   160  
   161  		p = appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, cnt/int64(gc.Widthreg), obj.TYPE_REG, x86.REG_CX, 0)
   162  		p = appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, frame+lo, obj.TYPE_REG, x86.REG_DI, 0)
   163  		p = appendpp(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
   164  		p = appendpp(p, x86.ASTOSQ, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
   165  	}
   166  
   167  	return p
   168  }
   169  
   170  func appendpp(p *obj.Prog, as int, ftype int, freg int, foffset int64, ttype int, treg int, toffset int64) *obj.Prog {
   171  	q := gc.Ctxt.NewProg()
   172  	gc.Clearp(q)
   173  	q.As = int16(as)
   174  	q.Lineno = p.Lineno
   175  	q.From.Type = int16(ftype)
   176  	q.From.Reg = int16(freg)
   177  	q.From.Offset = foffset
   178  	q.To.Type = int16(ttype)
   179  	q.To.Reg = int16(treg)
   180  	q.To.Offset = toffset
   181  	q.Link = p.Link
   182  	p.Link = q
   183  	return q
   184  }
   185  
   186  var panicdiv *gc.Node
   187  
   188  /*
   189   * generate division.
   190   * generates one of:
   191   *	res = nl / nr
   192   *	res = nl % nr
   193   * according to op.
   194   */
   195  func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
   196  	// Have to be careful about handling
   197  	// most negative int divided by -1 correctly.
   198  	// The hardware will trap.
   199  	// Also the byte divide instruction needs AH,
   200  	// which we otherwise don't have to deal with.
   201  	// Easiest way to avoid for int8, int16: use int32.
   202  	// For int32 and int64, use explicit test.
   203  	// Could use int64 hw for int32.
   204  	t := nl.Type
   205  
   206  	t0 := t
   207  	check := false
   208  	if gc.Issigned[t.Etype] {
   209  		check = true
   210  		if gc.Isconst(nl, gc.CTINT) && nl.Int() != -(1<<uint64(t.Width*8-1)) {
   211  			check = false
   212  		} else if gc.Isconst(nr, gc.CTINT) && nr.Int() != -1 {
   213  			check = false
   214  		}
   215  	}
   216  
   217  	if t.Width < 4 {
   218  		if gc.Issigned[t.Etype] {
   219  			t = gc.Types[gc.TINT32]
   220  		} else {
   221  			t = gc.Types[gc.TUINT32]
   222  		}
   223  		check = false
   224  	}
   225  
   226  	a := optoas(op, t)
   227  
   228  	var n3 gc.Node
   229  	gc.Regalloc(&n3, t0, nil)
   230  	var ax gc.Node
   231  	var oldax gc.Node
   232  	if nl.Ullman >= nr.Ullman {
   233  		savex(x86.REG_AX, &ax, &oldax, res, t0)
   234  		gc.Cgen(nl, &ax)
   235  		gc.Regalloc(&ax, t0, &ax) // mark ax live during cgen
   236  		gc.Cgen(nr, &n3)
   237  		gc.Regfree(&ax)
   238  	} else {
   239  		gc.Cgen(nr, &n3)
   240  		savex(x86.REG_AX, &ax, &oldax, res, t0)
   241  		gc.Cgen(nl, &ax)
   242  	}
   243  
   244  	if t != t0 {
   245  		// Convert
   246  		ax1 := ax
   247  
   248  		n31 := n3
   249  		ax.Type = t
   250  		n3.Type = t
   251  		gmove(&ax1, &ax)
   252  		gmove(&n31, &n3)
   253  	}
   254  
   255  	var n4 gc.Node
   256  	if gc.Nacl {
   257  		// Native Client does not relay the divide-by-zero trap
   258  		// to the executing program, so we must insert a check
   259  		// for ourselves.
   260  		gc.Nodconst(&n4, t, 0)
   261  
   262  		gins(optoas(gc.OCMP, t), &n3, &n4)
   263  		p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
   264  		if panicdiv == nil {
   265  			panicdiv = gc.Sysfunc("panicdivide")
   266  		}
   267  		gc.Ginscall(panicdiv, -1)
   268  		gc.Patch(p1, gc.Pc)
   269  	}
   270  
   271  	var p2 *obj.Prog
   272  	if check {
   273  		gc.Nodconst(&n4, t, -1)
   274  		gins(optoas(gc.OCMP, t), &n3, &n4)
   275  		p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
   276  		if op == gc.ODIV {
   277  			// a / (-1) is -a.
   278  			gins(optoas(gc.OMINUS, t), nil, &ax)
   279  
   280  			gmove(&ax, res)
   281  		} else {
   282  			// a % (-1) is 0.
   283  			gc.Nodconst(&n4, t, 0)
   284  
   285  			gmove(&n4, res)
   286  		}
   287  
   288  		p2 = gc.Gbranch(obj.AJMP, nil, 0)
   289  		gc.Patch(p1, gc.Pc)
   290  	}
   291  
   292  	var olddx gc.Node
   293  	var dx gc.Node
   294  	savex(x86.REG_DX, &dx, &olddx, res, t)
   295  	if !gc.Issigned[t.Etype] {
   296  		gc.Nodconst(&n4, t, 0)
   297  		gmove(&n4, &dx)
   298  	} else {
   299  		gins(optoas(gc.OEXTEND, t), nil, nil)
   300  	}
   301  	gins(a, &n3, nil)
   302  	gc.Regfree(&n3)
   303  	if op == gc.ODIV {
   304  		gmove(&ax, res)
   305  	} else {
   306  		gmove(&dx, res)
   307  	}
   308  	restx(&dx, &olddx)
   309  	if check {
   310  		gc.Patch(p2, gc.Pc)
   311  	}
   312  	restx(&ax, &oldax)
   313  }
   314  
   315  /*
   316   * register dr is one of the special ones (AX, CX, DI, SI, etc.).
   317   * we need to use it.  if it is already allocated as a temporary
   318   * (r > 1; can only happen if a routine like sgen passed a
   319   * special as cgen's res and then cgen used regalloc to reuse
   320   * it as its own temporary), then move it for now to another
   321   * register.  caller must call restx to move it back.
   322   * the move is not necessary if dr == res, because res is
   323   * known to be dead.
   324   */
   325  func savex(dr int, x *gc.Node, oldx *gc.Node, res *gc.Node, t *gc.Type) {
   326  	r := uint8(gc.GetReg(dr))
   327  
   328  	// save current ax and dx if they are live
   329  	// and not the destination
   330  	*oldx = gc.Node{}
   331  
   332  	gc.Nodreg(x, t, dr)
   333  	if r > 1 && !gc.Samereg(x, res) {
   334  		gc.Regalloc(oldx, gc.Types[gc.TINT64], nil)
   335  		x.Type = gc.Types[gc.TINT64]
   336  		gmove(x, oldx)
   337  		x.Type = t
   338  		oldx.Etype = r // squirrel away old r value
   339  		gc.SetReg(dr, 1)
   340  	}
   341  }
   342  
   343  func restx(x *gc.Node, oldx *gc.Node) {
   344  	if oldx.Op != 0 {
   345  		x.Type = gc.Types[gc.TINT64]
   346  		gc.SetReg(int(x.Reg), int(oldx.Etype))
   347  		gmove(oldx, x)
   348  		gc.Regfree(oldx)
   349  	}
   350  }
   351  
   352  /*
   353   * generate high multiply:
   354   *   res = (nl*nr) >> width
   355   */
   356  func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
   357  	t := nl.Type
   358  	a := optoas(gc.OHMUL, t)
   359  	if nl.Ullman < nr.Ullman {
   360  		nl, nr = nr, nl
   361  	}
   362  
   363  	var n1 gc.Node
   364  	gc.Cgenr(nl, &n1, res)
   365  	var n2 gc.Node
   366  	gc.Cgenr(nr, &n2, nil)
   367  	var ax gc.Node
   368  	gc.Nodreg(&ax, t, x86.REG_AX)
   369  	gmove(&n1, &ax)
   370  	gins(a, &n2, nil)
   371  	gc.Regfree(&n2)
   372  	gc.Regfree(&n1)
   373  
   374  	var dx gc.Node
   375  	if t.Width == 1 {
   376  		// byte multiply behaves differently.
   377  		gc.Nodreg(&ax, t, x86.REG_AH)
   378  
   379  		gc.Nodreg(&dx, t, x86.REG_DX)
   380  		gmove(&ax, &dx)
   381  	}
   382  
   383  	gc.Nodreg(&dx, t, x86.REG_DX)
   384  	gmove(&dx, res)
   385  }
   386  
   387  /*
   388   * generate shift according to op, one of:
   389   *	res = nl << nr
   390   *	res = nl >> nr
   391   */
   392  func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
   393  	a := optoas(op, nl.Type)
   394  
   395  	if nr.Op == gc.OLITERAL {
   396  		var n1 gc.Node
   397  		gc.Regalloc(&n1, nl.Type, res)
   398  		gc.Cgen(nl, &n1)
   399  		sc := uint64(nr.Int())
   400  		if sc >= uint64(nl.Type.Width*8) {
   401  			// large shift gets 2 shifts by width-1
   402  			var n3 gc.Node
   403  			gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
   404  
   405  			gins(a, &n3, &n1)
   406  			gins(a, &n3, &n1)
   407  		} else {
   408  			gins(a, nr, &n1)
   409  		}
   410  		gmove(&n1, res)
   411  		gc.Regfree(&n1)
   412  		return
   413  	}
   414  
   415  	if nl.Ullman >= gc.UINF {
   416  		var n4 gc.Node
   417  		gc.Tempname(&n4, nl.Type)
   418  		gc.Cgen(nl, &n4)
   419  		nl = &n4
   420  	}
   421  
   422  	if nr.Ullman >= gc.UINF {
   423  		var n5 gc.Node
   424  		gc.Tempname(&n5, nr.Type)
   425  		gc.Cgen(nr, &n5)
   426  		nr = &n5
   427  	}
   428  
   429  	rcx := gc.GetReg(x86.REG_CX)
   430  	var n1 gc.Node
   431  	gc.Nodreg(&n1, gc.Types[gc.TUINT32], x86.REG_CX)
   432  
   433  	// Allow either uint32 or uint64 as shift type,
   434  	// to avoid unnecessary conversion from uint32 to uint64
   435  	// just to do the comparison.
   436  	tcount := gc.Types[gc.Simtype[nr.Type.Etype]]
   437  
   438  	if tcount.Etype < gc.TUINT32 {
   439  		tcount = gc.Types[gc.TUINT32]
   440  	}
   441  
   442  	gc.Regalloc(&n1, nr.Type, &n1) // to hold the shift type in CX
   443  	var n3 gc.Node
   444  	gc.Regalloc(&n3, tcount, &n1) // to clear high bits of CX
   445  
   446  	var cx gc.Node
   447  	gc.Nodreg(&cx, gc.Types[gc.TUINT64], x86.REG_CX)
   448  
   449  	var oldcx gc.Node
   450  	if rcx > 0 && !gc.Samereg(&cx, res) {
   451  		gc.Regalloc(&oldcx, gc.Types[gc.TUINT64], nil)
   452  		gmove(&cx, &oldcx)
   453  	}
   454  
   455  	cx.Type = tcount
   456  
   457  	var n2 gc.Node
   458  	if gc.Samereg(&cx, res) {
   459  		gc.Regalloc(&n2, nl.Type, nil)
   460  	} else {
   461  		gc.Regalloc(&n2, nl.Type, res)
   462  	}
   463  	if nl.Ullman >= nr.Ullman {
   464  		gc.Cgen(nl, &n2)
   465  		gc.Cgen(nr, &n1)
   466  		gmove(&n1, &n3)
   467  	} else {
   468  		gc.Cgen(nr, &n1)
   469  		gmove(&n1, &n3)
   470  		gc.Cgen(nl, &n2)
   471  	}
   472  
   473  	gc.Regfree(&n3)
   474  
   475  	// test and fix up large shifts
   476  	if !bounded {
   477  		gc.Nodconst(&n3, tcount, nl.Type.Width*8)
   478  		gins(optoas(gc.OCMP, tcount), &n1, &n3)
   479  		p1 := gc.Gbranch(optoas(gc.OLT, tcount), nil, +1)
   480  		if op == gc.ORSH && gc.Issigned[nl.Type.Etype] {
   481  			gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
   482  			gins(a, &n3, &n2)
   483  		} else {
   484  			gc.Nodconst(&n3, nl.Type, 0)
   485  			gmove(&n3, &n2)
   486  		}
   487  
   488  		gc.Patch(p1, gc.Pc)
   489  	}
   490  
   491  	gins(a, &n1, &n2)
   492  
   493  	if oldcx.Op != 0 {
   494  		cx.Type = gc.Types[gc.TUINT64]
   495  		gmove(&oldcx, &cx)
   496  		gc.Regfree(&oldcx)
   497  	}
   498  
   499  	gmove(&n2, res)
   500  
   501  	gc.Regfree(&n1)
   502  	gc.Regfree(&n2)
   503  }
   504  
   505  /*
   506   * generate byte multiply:
   507   *	res = nl * nr
   508   * there is no 2-operand byte multiply instruction so
   509   * we do a full-width multiplication and truncate afterwards.
   510   */
   511  func cgen_bmul(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) bool {
   512  	if optoas(op, nl.Type) != x86.AIMULB {
   513  		return false
   514  	}
   515  
   516  	// largest ullman on left.
   517  	if nl.Ullman < nr.Ullman {
   518  		nl, nr = nr, nl
   519  	}
   520  
   521  	// generate operands in "8-bit" registers.
   522  	var n1b gc.Node
   523  	gc.Regalloc(&n1b, nl.Type, res)
   524  
   525  	gc.Cgen(nl, &n1b)
   526  	var n2b gc.Node
   527  	gc.Regalloc(&n2b, nr.Type, nil)
   528  	gc.Cgen(nr, &n2b)
   529  
   530  	// perform full-width multiplication.
   531  	t := gc.Types[gc.TUINT64]
   532  
   533  	if gc.Issigned[nl.Type.Etype] {
   534  		t = gc.Types[gc.TINT64]
   535  	}
   536  	var n1 gc.Node
   537  	gc.Nodreg(&n1, t, int(n1b.Reg))
   538  	var n2 gc.Node
   539  	gc.Nodreg(&n2, t, int(n2b.Reg))
   540  	a := optoas(op, t)
   541  	gins(a, &n2, &n1)
   542  
   543  	// truncate.
   544  	gmove(&n1, res)
   545  
   546  	gc.Regfree(&n1b)
   547  	gc.Regfree(&n2b)
   548  	return true
   549  }
   550  
   551  func clearfat(nl *gc.Node) {
   552  	/* clear a fat object */
   553  	if gc.Debug['g'] != 0 {
   554  		gc.Dump("\nclearfat", nl)
   555  	}
   556  
   557  	// Avoid taking the address for simple enough types.
   558  	if gc.Componentgen(nil, nl) {
   559  		return
   560  	}
   561  
   562  	w := nl.Type.Width
   563  
   564  	if w > 1024 || (gc.Nacl && w >= 64) {
   565  		var oldn1 gc.Node
   566  		var n1 gc.Node
   567  		savex(x86.REG_DI, &n1, &oldn1, nil, gc.Types[gc.Tptr])
   568  		gc.Agen(nl, &n1)
   569  
   570  		var ax gc.Node
   571  		var oldax gc.Node
   572  		savex(x86.REG_AX, &ax, &oldax, nil, gc.Types[gc.Tptr])
   573  		gconreg(x86.AMOVL, 0, x86.REG_AX)
   574  		gconreg(movptr, w/8, x86.REG_CX)
   575  
   576  		gins(x86.AREP, nil, nil)   // repeat
   577  		gins(x86.ASTOSQ, nil, nil) // STOQ AL,*(DI)+
   578  
   579  		if w%8 != 0 {
   580  			n1.Op = gc.OINDREG
   581  			clearfat_tail(&n1, w%8)
   582  		}
   583  
   584  		restx(&n1, &oldn1)
   585  		restx(&ax, &oldax)
   586  		return
   587  	}
   588  
   589  	if w >= 64 {
   590  		var oldn1 gc.Node
   591  		var n1 gc.Node
   592  		savex(x86.REG_DI, &n1, &oldn1, nil, gc.Types[gc.Tptr])
   593  		gc.Agen(nl, &n1)
   594  
   595  		var vec_zero gc.Node
   596  		var old_x0 gc.Node
   597  		savex(x86.REG_X0, &vec_zero, &old_x0, nil, gc.Types[gc.TFLOAT64])
   598  		gins(x86.AXORPS, &vec_zero, &vec_zero)
   599  
   600  		if di := dzDI(w); di != 0 {
   601  			gconreg(addptr, di, x86.REG_DI)
   602  		}
   603  		p := gins(obj.ADUFFZERO, nil, nil)
   604  		p.To.Type = obj.TYPE_ADDR
   605  		p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
   606  		p.To.Offset = dzOff(w)
   607  
   608  		if w%16 != 0 {
   609  			n1.Op = gc.OINDREG
   610  			n1.Xoffset -= 16 - w%16
   611  			gins(x86.AMOVUPS, &vec_zero, &n1)
   612  		}
   613  
   614  		restx(&vec_zero, &old_x0)
   615  		restx(&n1, &oldn1)
   616  		return
   617  	}
   618  
   619  	// NOTE: Must use agen, not igen, so that optimizer sees address
   620  	// being taken. We are not writing on field boundaries.
   621  	var n1 gc.Node
   622  	gc.Agenr(nl, &n1, nil)
   623  	n1.Op = gc.OINDREG
   624  
   625  	clearfat_tail(&n1, w)
   626  
   627  	gc.Regfree(&n1)
   628  }
   629  
   630  func clearfat_tail(n1 *gc.Node, b int64) {
   631  	if b >= 16 {
   632  		var vec_zero gc.Node
   633  		gc.Regalloc(&vec_zero, gc.Types[gc.TFLOAT64], nil)
   634  		gins(x86.AXORPS, &vec_zero, &vec_zero)
   635  
   636  		for b >= 16 {
   637  			gins(x86.AMOVUPS, &vec_zero, n1)
   638  			n1.Xoffset += 16
   639  			b -= 16
   640  		}
   641  
   642  		// MOVUPS X0, off(base) is a few bytes shorter than MOV 0, off(base)
   643  		if b != 0 {
   644  			n1.Xoffset -= 16 - b
   645  			gins(x86.AMOVUPS, &vec_zero, n1)
   646  		}
   647  
   648  		gc.Regfree(&vec_zero)
   649  		return
   650  	}
   651  
   652  	// Write sequence of MOV 0, off(base) instead of using STOSQ.
   653  	// The hope is that although the code will be slightly longer,
   654  	// the MOVs will have no dependencies and pipeline better
   655  	// than the unrolled STOSQ loop.
   656  	var z gc.Node
   657  	gc.Nodconst(&z, gc.Types[gc.TUINT64], 0)
   658  	if b >= 8 {
   659  		n1.Type = z.Type
   660  		gins(x86.AMOVQ, &z, n1)
   661  		n1.Xoffset += 8
   662  		b -= 8
   663  
   664  		if b != 0 {
   665  			n1.Xoffset -= 8 - b
   666  			gins(x86.AMOVQ, &z, n1)
   667  		}
   668  		return
   669  	}
   670  
   671  	if b >= 4 {
   672  		gc.Nodconst(&z, gc.Types[gc.TUINT32], 0)
   673  		n1.Type = z.Type
   674  		gins(x86.AMOVL, &z, n1)
   675  		n1.Xoffset += 4
   676  		b -= 4
   677  
   678  		if b != 0 {
   679  			n1.Xoffset -= 4 - b
   680  			gins(x86.AMOVL, &z, n1)
   681  		}
   682  		return
   683  	}
   684  
   685  	if b >= 2 {
   686  		gc.Nodconst(&z, gc.Types[gc.TUINT16], 0)
   687  		n1.Type = z.Type
   688  		gins(x86.AMOVW, &z, n1)
   689  		n1.Xoffset += 2
   690  		b -= 2
   691  	}
   692  
   693  	gc.Nodconst(&z, gc.Types[gc.TUINT8], 0)
   694  	for b > 0 {
   695  		n1.Type = z.Type
   696  		gins(x86.AMOVB, &z, n1)
   697  		n1.Xoffset++
   698  		b--
   699  	}
   700  
   701  }
   702  
   703  // Called after regopt and peep have run.
   704  // Expand CHECKNIL pseudo-op into actual nil pointer check.
   705  func expandchecks(firstp *obj.Prog) {
   706  	var p1 *obj.Prog
   707  	var p2 *obj.Prog
   708  
   709  	for p := firstp; p != nil; p = p.Link {
   710  		if p.As != obj.ACHECKNIL {
   711  			continue
   712  		}
   713  		if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers
   714  			gc.Warnl(int(p.Lineno), "generated nil check")
   715  		}
   716  
   717  		// check is
   718  		//	CMP arg, $0
   719  		//	JNE 2(PC) (likely)
   720  		//	MOV AX, 0
   721  		p1 = gc.Ctxt.NewProg()
   722  
   723  		p2 = gc.Ctxt.NewProg()
   724  		gc.Clearp(p1)
   725  		gc.Clearp(p2)
   726  		p1.Link = p2
   727  		p2.Link = p.Link
   728  		p.Link = p1
   729  		p1.Lineno = p.Lineno
   730  		p2.Lineno = p.Lineno
   731  		p1.Pc = 9999
   732  		p2.Pc = 9999
   733  		p.As = int16(cmpptr)
   734  		p.To.Type = obj.TYPE_CONST
   735  		p.To.Offset = 0
   736  		p1.As = x86.AJNE
   737  		p1.From.Type = obj.TYPE_CONST
   738  		p1.From.Offset = 1 // likely
   739  		p1.To.Type = obj.TYPE_BRANCH
   740  		p1.To.Val = p2.Link
   741  
   742  		// crash by write to memory address 0.
   743  		// if possible, since we know arg is 0, use 0(arg),
   744  		// which will be shorter to encode than plain 0.
   745  		p2.As = x86.AMOVL
   746  
   747  		p2.From.Type = obj.TYPE_REG
   748  		p2.From.Reg = x86.REG_AX
   749  		if regtyp(&p.From) {
   750  			p2.To.Type = obj.TYPE_MEM
   751  			p2.To.Reg = p.From.Reg
   752  		} else {
   753  			p2.To.Type = obj.TYPE_MEM
   754  			p2.To.Reg = x86.REG_NONE
   755  		}
   756  
   757  		p2.To.Offset = 0
   758  	}
   759  }
   760  
   761  // addr += index*width if possible.
   762  func addindex(index *gc.Node, width int64, addr *gc.Node) bool {
   763  	switch width {
   764  	case 1, 2, 4, 8:
   765  		p1 := gins(x86.ALEAQ, index, addr)
   766  		p1.From.Type = obj.TYPE_MEM
   767  		p1.From.Scale = int16(width)
   768  		p1.From.Index = p1.From.Reg
   769  		p1.From.Reg = p1.To.Reg
   770  		return true
   771  	}
   772  	return false
   773  }
   774  
   775  // res = runtime.getg()
   776  func getg(res *gc.Node) {
   777  	var n1 gc.Node
   778  	gc.Regalloc(&n1, res.Type, res)
   779  	mov := optoas(gc.OAS, gc.Types[gc.Tptr])
   780  	p := gins(mov, nil, &n1)
   781  	p.From.Type = obj.TYPE_REG
   782  	p.From.Reg = x86.REG_TLS
   783  	p = gins(mov, nil, &n1)
   784  	p.From = p.To
   785  	p.From.Type = obj.TYPE_MEM
   786  	p.From.Index = x86.REG_TLS
   787  	p.From.Scale = 1
   788  	gmove(&n1, res)
   789  	gc.Regfree(&n1)
   790  }