github.com/gocuntian/go@v0.0.0-20160610041250-fee02d270bf8/src/cmd/compile/internal/amd64/ssa.go (about)

     1  // Copyright 2016 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package amd64
     6  
     7  import (
     8  	"fmt"
     9  	"math"
    10  
    11  	"cmd/compile/internal/gc"
    12  	"cmd/compile/internal/ssa"
    13  	"cmd/internal/obj"
    14  	"cmd/internal/obj/x86"
    15  )
    16  
    17  // Smallest possible faulting page at address zero.
    18  const minZeroPage = 4096
    19  
    20  // ssaRegToReg maps ssa register numbers to obj register numbers.
    21  var ssaRegToReg = []int16{
    22  	x86.REG_AX,
    23  	x86.REG_CX,
    24  	x86.REG_DX,
    25  	x86.REG_BX,
    26  	x86.REG_SP,
    27  	x86.REG_BP,
    28  	x86.REG_SI,
    29  	x86.REG_DI,
    30  	x86.REG_R8,
    31  	x86.REG_R9,
    32  	x86.REG_R10,
    33  	x86.REG_R11,
    34  	x86.REG_R12,
    35  	x86.REG_R13,
    36  	x86.REG_R14,
    37  	x86.REG_R15,
    38  	x86.REG_X0,
    39  	x86.REG_X1,
    40  	x86.REG_X2,
    41  	x86.REG_X3,
    42  	x86.REG_X4,
    43  	x86.REG_X5,
    44  	x86.REG_X6,
    45  	x86.REG_X7,
    46  	x86.REG_X8,
    47  	x86.REG_X9,
    48  	x86.REG_X10,
    49  	x86.REG_X11,
    50  	x86.REG_X12,
    51  	x86.REG_X13,
    52  	x86.REG_X14,
    53  	x86.REG_X15,
    54  	0, // SB isn't a real register.  We fill an Addr.Reg field with 0 in this case.
    55  }
    56  
    57  // markMoves marks any MOVXconst ops that need to avoid clobbering flags.
    58  func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {
    59  	flive := b.FlagsLiveAtEnd
    60  	if b.Control != nil && b.Control.Type.IsFlags() {
    61  		flive = true
    62  	}
    63  	for i := len(b.Values) - 1; i >= 0; i-- {
    64  		v := b.Values[i]
    65  		if flive && (v.Op == ssa.OpAMD64MOVLconst || v.Op == ssa.OpAMD64MOVQconst) {
    66  			// The "mark" is any non-nil Aux value.
    67  			v.Aux = v
    68  		}
    69  		if v.Type.IsFlags() {
    70  			flive = false
    71  		}
    72  		for _, a := range v.Args {
    73  			if a.Type.IsFlags() {
    74  				flive = true
    75  			}
    76  		}
    77  	}
    78  }
    79  
    80  // loadByType returns the load instruction of the given type.
    81  func loadByType(t ssa.Type) obj.As {
    82  	// Avoid partial register write
    83  	if !t.IsFloat() && t.Size() <= 2 {
    84  		if t.Size() == 1 {
    85  			return x86.AMOVBLZX
    86  		} else {
    87  			return x86.AMOVWLZX
    88  		}
    89  	}
    90  	// Otherwise, there's no difference between load and store opcodes.
    91  	return storeByType(t)
    92  }
    93  
    94  // storeByType returns the store instruction of the given type.
    95  func storeByType(t ssa.Type) obj.As {
    96  	width := t.Size()
    97  	if t.IsFloat() {
    98  		switch width {
    99  		case 4:
   100  			return x86.AMOVSS
   101  		case 8:
   102  			return x86.AMOVSD
   103  		}
   104  	} else {
   105  		switch width {
   106  		case 1:
   107  			return x86.AMOVB
   108  		case 2:
   109  			return x86.AMOVW
   110  		case 4:
   111  			return x86.AMOVL
   112  		case 8:
   113  			return x86.AMOVQ
   114  		}
   115  	}
   116  	panic("bad store type")
   117  }
   118  
   119  // moveByType returns the reg->reg move instruction of the given type.
   120  func moveByType(t ssa.Type) obj.As {
   121  	if t.IsFloat() {
   122  		// Moving the whole sse2 register is faster
   123  		// than moving just the correct low portion of it.
   124  		// There is no xmm->xmm move with 1 byte opcode,
   125  		// so use movups, which has 2 byte opcode.
   126  		return x86.AMOVUPS
   127  	} else {
   128  		switch t.Size() {
   129  		case 1:
   130  			// Avoids partial register write
   131  			return x86.AMOVL
   132  		case 2:
   133  			return x86.AMOVL
   134  		case 4:
   135  			return x86.AMOVL
   136  		case 8:
   137  			return x86.AMOVQ
   138  		case 16:
   139  			return x86.AMOVUPS // int128s are in SSE registers
   140  		default:
   141  			panic(fmt.Sprintf("bad int register width %d:%s", t.Size(), t))
   142  		}
   143  	}
   144  }
   145  
   146  // opregreg emits instructions for
   147  //     dest := dest(To) op src(From)
   148  // and also returns the created obj.Prog so it
   149  // may be further adjusted (offset, scale, etc).
   150  func opregreg(op obj.As, dest, src int16) *obj.Prog {
   151  	p := gc.Prog(op)
   152  	p.From.Type = obj.TYPE_REG
   153  	p.To.Type = obj.TYPE_REG
   154  	p.To.Reg = dest
   155  	p.From.Reg = src
   156  	return p
   157  }
   158  
   159  func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
   160  	s.SetLineno(v.Line)
   161  	switch v.Op {
   162  	case ssa.OpAMD64ADDQ, ssa.OpAMD64ADDL:
   163  		r := gc.SSARegNum(v)
   164  		r1 := gc.SSARegNum(v.Args[0])
   165  		r2 := gc.SSARegNum(v.Args[1])
   166  		switch {
   167  		case r == r1:
   168  			p := gc.Prog(v.Op.Asm())
   169  			p.From.Type = obj.TYPE_REG
   170  			p.From.Reg = r2
   171  			p.To.Type = obj.TYPE_REG
   172  			p.To.Reg = r
   173  		case r == r2:
   174  			p := gc.Prog(v.Op.Asm())
   175  			p.From.Type = obj.TYPE_REG
   176  			p.From.Reg = r1
   177  			p.To.Type = obj.TYPE_REG
   178  			p.To.Reg = r
   179  		default:
   180  			var asm obj.As
   181  			if v.Op == ssa.OpAMD64ADDQ {
   182  				asm = x86.ALEAQ
   183  			} else {
   184  				asm = x86.ALEAL
   185  			}
   186  			p := gc.Prog(asm)
   187  			p.From.Type = obj.TYPE_MEM
   188  			p.From.Reg = r1
   189  			p.From.Scale = 1
   190  			p.From.Index = r2
   191  			p.To.Type = obj.TYPE_REG
   192  			p.To.Reg = r
   193  		}
   194  	// 2-address opcode arithmetic
   195  	case ssa.OpAMD64SUBQ, ssa.OpAMD64SUBL,
   196  		ssa.OpAMD64MULQ, ssa.OpAMD64MULL,
   197  		ssa.OpAMD64ANDQ, ssa.OpAMD64ANDL,
   198  		ssa.OpAMD64ORQ, ssa.OpAMD64ORL,
   199  		ssa.OpAMD64XORQ, ssa.OpAMD64XORL,
   200  		ssa.OpAMD64SHLQ, ssa.OpAMD64SHLL,
   201  		ssa.OpAMD64SHRQ, ssa.OpAMD64SHRL, ssa.OpAMD64SHRW, ssa.OpAMD64SHRB,
   202  		ssa.OpAMD64SARQ, ssa.OpAMD64SARL, ssa.OpAMD64SARW, ssa.OpAMD64SARB,
   203  		ssa.OpAMD64ADDSS, ssa.OpAMD64ADDSD, ssa.OpAMD64SUBSS, ssa.OpAMD64SUBSD,
   204  		ssa.OpAMD64MULSS, ssa.OpAMD64MULSD, ssa.OpAMD64DIVSS, ssa.OpAMD64DIVSD,
   205  		ssa.OpAMD64PXOR:
   206  		r := gc.SSARegNum(v)
   207  		if r != gc.SSARegNum(v.Args[0]) {
   208  			v.Fatalf("input[0] and output not in same register %s", v.LongString())
   209  		}
   210  		opregreg(v.Op.Asm(), r, gc.SSARegNum(v.Args[1]))
   211  
   212  	case ssa.OpAMD64DIVQ, ssa.OpAMD64DIVL, ssa.OpAMD64DIVW,
   213  		ssa.OpAMD64DIVQU, ssa.OpAMD64DIVLU, ssa.OpAMD64DIVWU,
   214  		ssa.OpAMD64MODQ, ssa.OpAMD64MODL, ssa.OpAMD64MODW,
   215  		ssa.OpAMD64MODQU, ssa.OpAMD64MODLU, ssa.OpAMD64MODWU:
   216  
   217  		// Arg[0] is already in AX as it's the only register we allow
   218  		// and AX is the only output
   219  		x := gc.SSARegNum(v.Args[1])
   220  
   221  		// CPU faults upon signed overflow, which occurs when most
   222  		// negative int is divided by -1.
   223  		var j *obj.Prog
   224  		if v.Op == ssa.OpAMD64DIVQ || v.Op == ssa.OpAMD64DIVL ||
   225  			v.Op == ssa.OpAMD64DIVW || v.Op == ssa.OpAMD64MODQ ||
   226  			v.Op == ssa.OpAMD64MODL || v.Op == ssa.OpAMD64MODW {
   227  
   228  			var c *obj.Prog
   229  			switch v.Op {
   230  			case ssa.OpAMD64DIVQ, ssa.OpAMD64MODQ:
   231  				c = gc.Prog(x86.ACMPQ)
   232  				j = gc.Prog(x86.AJEQ)
   233  				// go ahead and sign extend to save doing it later
   234  				gc.Prog(x86.ACQO)
   235  
   236  			case ssa.OpAMD64DIVL, ssa.OpAMD64MODL:
   237  				c = gc.Prog(x86.ACMPL)
   238  				j = gc.Prog(x86.AJEQ)
   239  				gc.Prog(x86.ACDQ)
   240  
   241  			case ssa.OpAMD64DIVW, ssa.OpAMD64MODW:
   242  				c = gc.Prog(x86.ACMPW)
   243  				j = gc.Prog(x86.AJEQ)
   244  				gc.Prog(x86.ACWD)
   245  			}
   246  			c.From.Type = obj.TYPE_REG
   247  			c.From.Reg = x
   248  			c.To.Type = obj.TYPE_CONST
   249  			c.To.Offset = -1
   250  
   251  			j.To.Type = obj.TYPE_BRANCH
   252  
   253  		}
   254  
   255  		// for unsigned ints, we sign extend by setting DX = 0
   256  		// signed ints were sign extended above
   257  		if v.Op == ssa.OpAMD64DIVQU || v.Op == ssa.OpAMD64MODQU ||
   258  			v.Op == ssa.OpAMD64DIVLU || v.Op == ssa.OpAMD64MODLU ||
   259  			v.Op == ssa.OpAMD64DIVWU || v.Op == ssa.OpAMD64MODWU {
   260  			c := gc.Prog(x86.AXORQ)
   261  			c.From.Type = obj.TYPE_REG
   262  			c.From.Reg = x86.REG_DX
   263  			c.To.Type = obj.TYPE_REG
   264  			c.To.Reg = x86.REG_DX
   265  		}
   266  
   267  		p := gc.Prog(v.Op.Asm())
   268  		p.From.Type = obj.TYPE_REG
   269  		p.From.Reg = x
   270  
   271  		// signed division, rest of the check for -1 case
   272  		if j != nil {
   273  			j2 := gc.Prog(obj.AJMP)
   274  			j2.To.Type = obj.TYPE_BRANCH
   275  
   276  			var n *obj.Prog
   277  			if v.Op == ssa.OpAMD64DIVQ || v.Op == ssa.OpAMD64DIVL ||
   278  				v.Op == ssa.OpAMD64DIVW {
   279  				// n * -1 = -n
   280  				n = gc.Prog(x86.ANEGQ)
   281  				n.To.Type = obj.TYPE_REG
   282  				n.To.Reg = x86.REG_AX
   283  			} else {
   284  				// n % -1 == 0
   285  				n = gc.Prog(x86.AXORQ)
   286  				n.From.Type = obj.TYPE_REG
   287  				n.From.Reg = x86.REG_DX
   288  				n.To.Type = obj.TYPE_REG
   289  				n.To.Reg = x86.REG_DX
   290  			}
   291  
   292  			j.To.Val = n
   293  			j2.To.Val = s.Pc()
   294  		}
   295  
   296  	case ssa.OpAMD64HMULQ, ssa.OpAMD64HMULL, ssa.OpAMD64HMULW, ssa.OpAMD64HMULB,
   297  		ssa.OpAMD64HMULQU, ssa.OpAMD64HMULLU, ssa.OpAMD64HMULWU, ssa.OpAMD64HMULBU:
   298  		// the frontend rewrites constant division by 8/16/32 bit integers into
   299  		// HMUL by a constant
   300  		// SSA rewrites generate the 64 bit versions
   301  
   302  		// Arg[0] is already in AX as it's the only register we allow
   303  		// and DX is the only output we care about (the high bits)
   304  		p := gc.Prog(v.Op.Asm())
   305  		p.From.Type = obj.TYPE_REG
   306  		p.From.Reg = gc.SSARegNum(v.Args[1])
   307  
   308  		// IMULB puts the high portion in AH instead of DL,
   309  		// so move it to DL for consistency
   310  		if v.Type.Size() == 1 {
   311  			m := gc.Prog(x86.AMOVB)
   312  			m.From.Type = obj.TYPE_REG
   313  			m.From.Reg = x86.REG_AH
   314  			m.To.Type = obj.TYPE_REG
   315  			m.To.Reg = x86.REG_DX
   316  		}
   317  
   318  	case ssa.OpAMD64AVGQU:
   319  		// compute (x+y)/2 unsigned.
   320  		// Do a 64-bit add, the overflow goes into the carry.
   321  		// Shift right once and pull the carry back into the 63rd bit.
   322  		r := gc.SSARegNum(v)
   323  		if r != gc.SSARegNum(v.Args[0]) {
   324  			v.Fatalf("input[0] and output not in same register %s", v.LongString())
   325  		}
   326  		p := gc.Prog(x86.AADDQ)
   327  		p.From.Type = obj.TYPE_REG
   328  		p.To.Type = obj.TYPE_REG
   329  		p.To.Reg = r
   330  		p.From.Reg = gc.SSARegNum(v.Args[1])
   331  		p = gc.Prog(x86.ARCRQ)
   332  		p.From.Type = obj.TYPE_CONST
   333  		p.From.Offset = 1
   334  		p.To.Type = obj.TYPE_REG
   335  		p.To.Reg = r
   336  
   337  	case ssa.OpAMD64ADDQconst, ssa.OpAMD64ADDLconst:
   338  		r := gc.SSARegNum(v)
   339  		a := gc.SSARegNum(v.Args[0])
   340  		if r == a {
   341  			if v.AuxInt == 1 {
   342  				var asm obj.As
   343  				// Software optimization manual recommends add $1,reg.
   344  				// But inc/dec is 1 byte smaller. ICC always uses inc
   345  				// Clang/GCC choose depending on flags, but prefer add.
   346  				// Experiments show that inc/dec is both a little faster
   347  				// and make a binary a little smaller.
   348  				if v.Op == ssa.OpAMD64ADDQconst {
   349  					asm = x86.AINCQ
   350  				} else {
   351  					asm = x86.AINCL
   352  				}
   353  				p := gc.Prog(asm)
   354  				p.To.Type = obj.TYPE_REG
   355  				p.To.Reg = r
   356  				return
   357  			}
   358  			if v.AuxInt == -1 {
   359  				var asm obj.As
   360  				if v.Op == ssa.OpAMD64ADDQconst {
   361  					asm = x86.ADECQ
   362  				} else {
   363  					asm = x86.ADECL
   364  				}
   365  				p := gc.Prog(asm)
   366  				p.To.Type = obj.TYPE_REG
   367  				p.To.Reg = r
   368  				return
   369  			}
   370  			p := gc.Prog(v.Op.Asm())
   371  			p.From.Type = obj.TYPE_CONST
   372  			p.From.Offset = v.AuxInt
   373  			p.To.Type = obj.TYPE_REG
   374  			p.To.Reg = r
   375  			return
   376  		}
   377  		var asm obj.As
   378  		if v.Op == ssa.OpAMD64ADDQconst {
   379  			asm = x86.ALEAQ
   380  		} else {
   381  			asm = x86.ALEAL
   382  		}
   383  		p := gc.Prog(asm)
   384  		p.From.Type = obj.TYPE_MEM
   385  		p.From.Reg = a
   386  		p.From.Offset = v.AuxInt
   387  		p.To.Type = obj.TYPE_REG
   388  		p.To.Reg = r
   389  
   390  	case ssa.OpAMD64CMOVQEQconst, ssa.OpAMD64CMOVLEQconst, ssa.OpAMD64CMOVWEQconst,
   391  		ssa.OpAMD64CMOVQNEconst, ssa.OpAMD64CMOVLNEconst, ssa.OpAMD64CMOVWNEconst:
   392  		r := gc.SSARegNum(v)
   393  		if r != gc.SSARegNum(v.Args[0]) {
   394  			v.Fatalf("input[0] and output not in same register %s", v.LongString())
   395  		}
   396  
   397  		// Constant into AX
   398  		p := gc.Prog(moveByType(v.Type))
   399  		p.From.Type = obj.TYPE_CONST
   400  		p.From.Offset = v.AuxInt
   401  		p.To.Type = obj.TYPE_REG
   402  		p.To.Reg = x86.REG_AX
   403  
   404  		p = gc.Prog(v.Op.Asm())
   405  		p.From.Type = obj.TYPE_REG
   406  		p.From.Reg = x86.REG_AX
   407  		p.To.Type = obj.TYPE_REG
   408  		p.To.Reg = r
   409  
   410  	case ssa.OpAMD64MULQconst, ssa.OpAMD64MULLconst:
   411  		r := gc.SSARegNum(v)
   412  		if r != gc.SSARegNum(v.Args[0]) {
   413  			v.Fatalf("input[0] and output not in same register %s", v.LongString())
   414  		}
   415  		p := gc.Prog(v.Op.Asm())
   416  		p.From.Type = obj.TYPE_CONST
   417  		p.From.Offset = v.AuxInt
   418  		p.To.Type = obj.TYPE_REG
   419  		p.To.Reg = r
   420  		// TODO: Teach doasm to compile the three-address multiply imul $c, r1, r2
   421  		// then we don't need to use resultInArg0 for these ops.
   422  		//p.From3 = new(obj.Addr)
   423  		//p.From3.Type = obj.TYPE_REG
   424  		//p.From3.Reg = gc.SSARegNum(v.Args[0])
   425  
   426  	case ssa.OpAMD64SUBQconst, ssa.OpAMD64SUBLconst,
   427  		ssa.OpAMD64ANDQconst, ssa.OpAMD64ANDLconst,
   428  		ssa.OpAMD64ORQconst, ssa.OpAMD64ORLconst,
   429  		ssa.OpAMD64XORQconst, ssa.OpAMD64XORLconst,
   430  		ssa.OpAMD64SHLQconst, ssa.OpAMD64SHLLconst,
   431  		ssa.OpAMD64SHRQconst, ssa.OpAMD64SHRLconst, ssa.OpAMD64SHRWconst, ssa.OpAMD64SHRBconst,
   432  		ssa.OpAMD64SARQconst, ssa.OpAMD64SARLconst, ssa.OpAMD64SARWconst, ssa.OpAMD64SARBconst,
   433  		ssa.OpAMD64ROLQconst, ssa.OpAMD64ROLLconst, ssa.OpAMD64ROLWconst, ssa.OpAMD64ROLBconst:
   434  		r := gc.SSARegNum(v)
   435  		if r != gc.SSARegNum(v.Args[0]) {
   436  			v.Fatalf("input[0] and output not in same register %s", v.LongString())
   437  		}
   438  		p := gc.Prog(v.Op.Asm())
   439  		p.From.Type = obj.TYPE_CONST
   440  		p.From.Offset = v.AuxInt
   441  		p.To.Type = obj.TYPE_REG
   442  		p.To.Reg = r
   443  	case ssa.OpAMD64SBBQcarrymask, ssa.OpAMD64SBBLcarrymask:
   444  		r := gc.SSARegNum(v)
   445  		p := gc.Prog(v.Op.Asm())
   446  		p.From.Type = obj.TYPE_REG
   447  		p.From.Reg = r
   448  		p.To.Type = obj.TYPE_REG
   449  		p.To.Reg = r
   450  	case ssa.OpAMD64LEAQ1, ssa.OpAMD64LEAQ2, ssa.OpAMD64LEAQ4, ssa.OpAMD64LEAQ8:
   451  		r := gc.SSARegNum(v.Args[0])
   452  		i := gc.SSARegNum(v.Args[1])
   453  		p := gc.Prog(x86.ALEAQ)
   454  		switch v.Op {
   455  		case ssa.OpAMD64LEAQ1:
   456  			p.From.Scale = 1
   457  			if i == x86.REG_SP {
   458  				r, i = i, r
   459  			}
   460  		case ssa.OpAMD64LEAQ2:
   461  			p.From.Scale = 2
   462  		case ssa.OpAMD64LEAQ4:
   463  			p.From.Scale = 4
   464  		case ssa.OpAMD64LEAQ8:
   465  			p.From.Scale = 8
   466  		}
   467  		p.From.Type = obj.TYPE_MEM
   468  		p.From.Reg = r
   469  		p.From.Index = i
   470  		gc.AddAux(&p.From, v)
   471  		p.To.Type = obj.TYPE_REG
   472  		p.To.Reg = gc.SSARegNum(v)
   473  	case ssa.OpAMD64LEAQ:
   474  		p := gc.Prog(x86.ALEAQ)
   475  		p.From.Type = obj.TYPE_MEM
   476  		p.From.Reg = gc.SSARegNum(v.Args[0])
   477  		gc.AddAux(&p.From, v)
   478  		p.To.Type = obj.TYPE_REG
   479  		p.To.Reg = gc.SSARegNum(v)
   480  	case ssa.OpAMD64CMPQ, ssa.OpAMD64CMPL, ssa.OpAMD64CMPW, ssa.OpAMD64CMPB,
   481  		ssa.OpAMD64TESTQ, ssa.OpAMD64TESTL, ssa.OpAMD64TESTW, ssa.OpAMD64TESTB:
   482  		opregreg(v.Op.Asm(), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[0]))
   483  	case ssa.OpAMD64UCOMISS, ssa.OpAMD64UCOMISD:
   484  		// Go assembler has swapped operands for UCOMISx relative to CMP,
   485  		// must account for that right here.
   486  		opregreg(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]))
   487  	case ssa.OpAMD64CMPQconst, ssa.OpAMD64CMPLconst, ssa.OpAMD64CMPWconst, ssa.OpAMD64CMPBconst:
   488  		p := gc.Prog(v.Op.Asm())
   489  		p.From.Type = obj.TYPE_REG
   490  		p.From.Reg = gc.SSARegNum(v.Args[0])
   491  		p.To.Type = obj.TYPE_CONST
   492  		p.To.Offset = v.AuxInt
   493  	case ssa.OpAMD64TESTQconst, ssa.OpAMD64TESTLconst, ssa.OpAMD64TESTWconst, ssa.OpAMD64TESTBconst:
   494  		p := gc.Prog(v.Op.Asm())
   495  		p.From.Type = obj.TYPE_CONST
   496  		p.From.Offset = v.AuxInt
   497  		p.To.Type = obj.TYPE_REG
   498  		p.To.Reg = gc.SSARegNum(v.Args[0])
   499  	case ssa.OpAMD64MOVLconst, ssa.OpAMD64MOVQconst:
   500  		x := gc.SSARegNum(v)
   501  		p := gc.Prog(v.Op.Asm())
   502  		p.From.Type = obj.TYPE_CONST
   503  		p.From.Offset = v.AuxInt
   504  		p.To.Type = obj.TYPE_REG
   505  		p.To.Reg = x
   506  		// If flags are live at this instruction, suppress the
   507  		// MOV $0,AX -> XOR AX,AX optimization.
   508  		if v.Aux != nil {
   509  			p.Mark |= x86.PRESERVEFLAGS
   510  		}
   511  	case ssa.OpAMD64MOVSSconst, ssa.OpAMD64MOVSDconst:
   512  		x := gc.SSARegNum(v)
   513  		p := gc.Prog(v.Op.Asm())
   514  		p.From.Type = obj.TYPE_FCONST
   515  		p.From.Val = math.Float64frombits(uint64(v.AuxInt))
   516  		p.To.Type = obj.TYPE_REG
   517  		p.To.Reg = x
   518  	case ssa.OpAMD64MOVQload, ssa.OpAMD64MOVSSload, ssa.OpAMD64MOVSDload, ssa.OpAMD64MOVLload, ssa.OpAMD64MOVWload, ssa.OpAMD64MOVBload, ssa.OpAMD64MOVBQSXload, ssa.OpAMD64MOVWQSXload, ssa.OpAMD64MOVLQSXload, ssa.OpAMD64MOVOload:
   519  		p := gc.Prog(v.Op.Asm())
   520  		p.From.Type = obj.TYPE_MEM
   521  		p.From.Reg = gc.SSARegNum(v.Args[0])
   522  		gc.AddAux(&p.From, v)
   523  		p.To.Type = obj.TYPE_REG
   524  		p.To.Reg = gc.SSARegNum(v)
   525  	case ssa.OpAMD64MOVQloadidx8, ssa.OpAMD64MOVSDloadidx8:
   526  		p := gc.Prog(v.Op.Asm())
   527  		p.From.Type = obj.TYPE_MEM
   528  		p.From.Reg = gc.SSARegNum(v.Args[0])
   529  		gc.AddAux(&p.From, v)
   530  		p.From.Scale = 8
   531  		p.From.Index = gc.SSARegNum(v.Args[1])
   532  		p.To.Type = obj.TYPE_REG
   533  		p.To.Reg = gc.SSARegNum(v)
   534  	case ssa.OpAMD64MOVLloadidx4, ssa.OpAMD64MOVSSloadidx4:
   535  		p := gc.Prog(v.Op.Asm())
   536  		p.From.Type = obj.TYPE_MEM
   537  		p.From.Reg = gc.SSARegNum(v.Args[0])
   538  		gc.AddAux(&p.From, v)
   539  		p.From.Scale = 4
   540  		p.From.Index = gc.SSARegNum(v.Args[1])
   541  		p.To.Type = obj.TYPE_REG
   542  		p.To.Reg = gc.SSARegNum(v)
   543  	case ssa.OpAMD64MOVWloadidx2:
   544  		p := gc.Prog(v.Op.Asm())
   545  		p.From.Type = obj.TYPE_MEM
   546  		p.From.Reg = gc.SSARegNum(v.Args[0])
   547  		gc.AddAux(&p.From, v)
   548  		p.From.Scale = 2
   549  		p.From.Index = gc.SSARegNum(v.Args[1])
   550  		p.To.Type = obj.TYPE_REG
   551  		p.To.Reg = gc.SSARegNum(v)
   552  	case ssa.OpAMD64MOVBloadidx1, ssa.OpAMD64MOVWloadidx1, ssa.OpAMD64MOVLloadidx1, ssa.OpAMD64MOVQloadidx1, ssa.OpAMD64MOVSSloadidx1, ssa.OpAMD64MOVSDloadidx1:
   553  		r := gc.SSARegNum(v.Args[0])
   554  		i := gc.SSARegNum(v.Args[1])
   555  		if i == x86.REG_SP {
   556  			r, i = i, r
   557  		}
   558  		p := gc.Prog(v.Op.Asm())
   559  		p.From.Type = obj.TYPE_MEM
   560  		p.From.Reg = r
   561  		p.From.Scale = 1
   562  		p.From.Index = i
   563  		gc.AddAux(&p.From, v)
   564  		p.To.Type = obj.TYPE_REG
   565  		p.To.Reg = gc.SSARegNum(v)
   566  	case ssa.OpAMD64MOVQstore, ssa.OpAMD64MOVSSstore, ssa.OpAMD64MOVSDstore, ssa.OpAMD64MOVLstore, ssa.OpAMD64MOVWstore, ssa.OpAMD64MOVBstore, ssa.OpAMD64MOVOstore:
   567  		p := gc.Prog(v.Op.Asm())
   568  		p.From.Type = obj.TYPE_REG
   569  		p.From.Reg = gc.SSARegNum(v.Args[1])
   570  		p.To.Type = obj.TYPE_MEM
   571  		p.To.Reg = gc.SSARegNum(v.Args[0])
   572  		gc.AddAux(&p.To, v)
   573  	case ssa.OpAMD64MOVQstoreidx8, ssa.OpAMD64MOVSDstoreidx8:
   574  		p := gc.Prog(v.Op.Asm())
   575  		p.From.Type = obj.TYPE_REG
   576  		p.From.Reg = gc.SSARegNum(v.Args[2])
   577  		p.To.Type = obj.TYPE_MEM
   578  		p.To.Reg = gc.SSARegNum(v.Args[0])
   579  		p.To.Scale = 8
   580  		p.To.Index = gc.SSARegNum(v.Args[1])
   581  		gc.AddAux(&p.To, v)
   582  	case ssa.OpAMD64MOVSSstoreidx4, ssa.OpAMD64MOVLstoreidx4:
   583  		p := gc.Prog(v.Op.Asm())
   584  		p.From.Type = obj.TYPE_REG
   585  		p.From.Reg = gc.SSARegNum(v.Args[2])
   586  		p.To.Type = obj.TYPE_MEM
   587  		p.To.Reg = gc.SSARegNum(v.Args[0])
   588  		p.To.Scale = 4
   589  		p.To.Index = gc.SSARegNum(v.Args[1])
   590  		gc.AddAux(&p.To, v)
   591  	case ssa.OpAMD64MOVWstoreidx2:
   592  		p := gc.Prog(v.Op.Asm())
   593  		p.From.Type = obj.TYPE_REG
   594  		p.From.Reg = gc.SSARegNum(v.Args[2])
   595  		p.To.Type = obj.TYPE_MEM
   596  		p.To.Reg = gc.SSARegNum(v.Args[0])
   597  		p.To.Scale = 2
   598  		p.To.Index = gc.SSARegNum(v.Args[1])
   599  		gc.AddAux(&p.To, v)
   600  	case ssa.OpAMD64MOVBstoreidx1, ssa.OpAMD64MOVWstoreidx1, ssa.OpAMD64MOVLstoreidx1, ssa.OpAMD64MOVQstoreidx1, ssa.OpAMD64MOVSSstoreidx1, ssa.OpAMD64MOVSDstoreidx1:
   601  		r := gc.SSARegNum(v.Args[0])
   602  		i := gc.SSARegNum(v.Args[1])
   603  		if i == x86.REG_SP {
   604  			r, i = i, r
   605  		}
   606  		p := gc.Prog(v.Op.Asm())
   607  		p.From.Type = obj.TYPE_REG
   608  		p.From.Reg = gc.SSARegNum(v.Args[2])
   609  		p.To.Type = obj.TYPE_MEM
   610  		p.To.Reg = r
   611  		p.To.Scale = 1
   612  		p.To.Index = i
   613  		gc.AddAux(&p.To, v)
   614  	case ssa.OpAMD64MOVQstoreconst, ssa.OpAMD64MOVLstoreconst, ssa.OpAMD64MOVWstoreconst, ssa.OpAMD64MOVBstoreconst:
   615  		p := gc.Prog(v.Op.Asm())
   616  		p.From.Type = obj.TYPE_CONST
   617  		sc := v.AuxValAndOff()
   618  		p.From.Offset = sc.Val()
   619  		p.To.Type = obj.TYPE_MEM
   620  		p.To.Reg = gc.SSARegNum(v.Args[0])
   621  		gc.AddAux2(&p.To, v, sc.Off())
   622  	case ssa.OpAMD64MOVQstoreconstidx1, ssa.OpAMD64MOVQstoreconstidx8, ssa.OpAMD64MOVLstoreconstidx1, ssa.OpAMD64MOVLstoreconstidx4, ssa.OpAMD64MOVWstoreconstidx1, ssa.OpAMD64MOVWstoreconstidx2, ssa.OpAMD64MOVBstoreconstidx1:
   623  		p := gc.Prog(v.Op.Asm())
   624  		p.From.Type = obj.TYPE_CONST
   625  		sc := v.AuxValAndOff()
   626  		p.From.Offset = sc.Val()
   627  		r := gc.SSARegNum(v.Args[0])
   628  		i := gc.SSARegNum(v.Args[1])
   629  		switch v.Op {
   630  		case ssa.OpAMD64MOVBstoreconstidx1, ssa.OpAMD64MOVWstoreconstidx1, ssa.OpAMD64MOVLstoreconstidx1, ssa.OpAMD64MOVQstoreconstidx1:
   631  			p.To.Scale = 1
   632  			if i == x86.REG_SP {
   633  				r, i = i, r
   634  			}
   635  		case ssa.OpAMD64MOVWstoreconstidx2:
   636  			p.To.Scale = 2
   637  		case ssa.OpAMD64MOVLstoreconstidx4:
   638  			p.To.Scale = 4
   639  		case ssa.OpAMD64MOVQstoreconstidx8:
   640  			p.To.Scale = 8
   641  		}
   642  		p.To.Type = obj.TYPE_MEM
   643  		p.To.Reg = r
   644  		p.To.Index = i
   645  		gc.AddAux2(&p.To, v, sc.Off())
   646  	case ssa.OpAMD64MOVLQSX, ssa.OpAMD64MOVWQSX, ssa.OpAMD64MOVBQSX, ssa.OpAMD64MOVLQZX, ssa.OpAMD64MOVWQZX, ssa.OpAMD64MOVBQZX,
   647  		ssa.OpAMD64CVTSL2SS, ssa.OpAMD64CVTSL2SD, ssa.OpAMD64CVTSQ2SS, ssa.OpAMD64CVTSQ2SD,
   648  		ssa.OpAMD64CVTTSS2SL, ssa.OpAMD64CVTTSD2SL, ssa.OpAMD64CVTTSS2SQ, ssa.OpAMD64CVTTSD2SQ,
   649  		ssa.OpAMD64CVTSS2SD, ssa.OpAMD64CVTSD2SS:
   650  		opregreg(v.Op.Asm(), gc.SSARegNum(v), gc.SSARegNum(v.Args[0]))
   651  	case ssa.OpAMD64DUFFZERO:
   652  		p := gc.Prog(obj.ADUFFZERO)
   653  		p.To.Type = obj.TYPE_ADDR
   654  		p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
   655  		p.To.Offset = v.AuxInt
   656  	case ssa.OpAMD64MOVOconst:
   657  		if v.AuxInt != 0 {
   658  			v.Unimplementedf("MOVOconst can only do constant=0")
   659  		}
   660  		r := gc.SSARegNum(v)
   661  		opregreg(x86.AXORPS, r, r)
   662  	case ssa.OpAMD64DUFFCOPY:
   663  		p := gc.Prog(obj.ADUFFCOPY)
   664  		p.To.Type = obj.TYPE_ADDR
   665  		p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg))
   666  		p.To.Offset = v.AuxInt
   667  
   668  	case ssa.OpCopy, ssa.OpAMD64MOVQconvert: // TODO: use MOVQreg for reg->reg copies instead of OpCopy?
   669  		if v.Type.IsMemory() {
   670  			return
   671  		}
   672  		x := gc.SSARegNum(v.Args[0])
   673  		y := gc.SSARegNum(v)
   674  		if x != y {
   675  			opregreg(moveByType(v.Type), y, x)
   676  		}
   677  	case ssa.OpLoadReg:
   678  		if v.Type.IsFlags() {
   679  			v.Unimplementedf("load flags not implemented: %v", v.LongString())
   680  			return
   681  		}
   682  		p := gc.Prog(loadByType(v.Type))
   683  		n, off := gc.AutoVar(v.Args[0])
   684  		p.From.Type = obj.TYPE_MEM
   685  		p.From.Node = n
   686  		p.From.Sym = gc.Linksym(n.Sym)
   687  		p.From.Offset = off
   688  		if n.Class == gc.PPARAM || n.Class == gc.PPARAMOUT {
   689  			p.From.Name = obj.NAME_PARAM
   690  			p.From.Offset += n.Xoffset
   691  		} else {
   692  			p.From.Name = obj.NAME_AUTO
   693  		}
   694  		p.To.Type = obj.TYPE_REG
   695  		p.To.Reg = gc.SSARegNum(v)
   696  
   697  	case ssa.OpStoreReg:
   698  		if v.Type.IsFlags() {
   699  			v.Unimplementedf("store flags not implemented: %v", v.LongString())
   700  			return
   701  		}
   702  		p := gc.Prog(storeByType(v.Type))
   703  		p.From.Type = obj.TYPE_REG
   704  		p.From.Reg = gc.SSARegNum(v.Args[0])
   705  		n, off := gc.AutoVar(v)
   706  		p.To.Type = obj.TYPE_MEM
   707  		p.To.Node = n
   708  		p.To.Sym = gc.Linksym(n.Sym)
   709  		p.To.Offset = off
   710  		if n.Class == gc.PPARAM || n.Class == gc.PPARAMOUT {
   711  			p.To.Name = obj.NAME_PARAM
   712  			p.To.Offset += n.Xoffset
   713  		} else {
   714  			p.To.Name = obj.NAME_AUTO
   715  		}
   716  	case ssa.OpPhi:
   717  		// just check to make sure regalloc and stackalloc did it right
   718  		if v.Type.IsMemory() {
   719  			return
   720  		}
   721  		f := v.Block.Func
   722  		loc := f.RegAlloc[v.ID]
   723  		for _, a := range v.Args {
   724  			if aloc := f.RegAlloc[a.ID]; aloc != loc { // TODO: .Equal() instead?
   725  				v.Fatalf("phi arg at different location than phi: %v @ %v, but arg %v @ %v\n%s\n", v, loc, a, aloc, v.Block.Func)
   726  			}
   727  		}
   728  	case ssa.OpInitMem:
   729  		// memory arg needs no code
   730  	case ssa.OpArg:
   731  		// input args need no code
   732  	case ssa.OpAMD64LoweredGetClosurePtr:
   733  		// Output is hardwired to DX only,
   734  		// and DX contains the closure pointer on
   735  		// closure entry, and this "instruction"
   736  		// is scheduled to the very beginning
   737  		// of the entry block.
   738  	case ssa.OpAMD64LoweredGetG:
   739  		r := gc.SSARegNum(v)
   740  		// See the comments in cmd/internal/obj/x86/obj6.go
   741  		// near CanUse1InsnTLS for a detailed explanation of these instructions.
   742  		if x86.CanUse1InsnTLS(gc.Ctxt) {
   743  			// MOVQ (TLS), r
   744  			p := gc.Prog(x86.AMOVQ)
   745  			p.From.Type = obj.TYPE_MEM
   746  			p.From.Reg = x86.REG_TLS
   747  			p.To.Type = obj.TYPE_REG
   748  			p.To.Reg = r
   749  		} else {
   750  			// MOVQ TLS, r
   751  			// MOVQ (r)(TLS*1), r
   752  			p := gc.Prog(x86.AMOVQ)
   753  			p.From.Type = obj.TYPE_REG
   754  			p.From.Reg = x86.REG_TLS
   755  			p.To.Type = obj.TYPE_REG
   756  			p.To.Reg = r
   757  			q := gc.Prog(x86.AMOVQ)
   758  			q.From.Type = obj.TYPE_MEM
   759  			q.From.Reg = r
   760  			q.From.Index = x86.REG_TLS
   761  			q.From.Scale = 1
   762  			q.To.Type = obj.TYPE_REG
   763  			q.To.Reg = r
   764  		}
   765  	case ssa.OpAMD64CALLstatic:
   766  		if v.Aux.(*gc.Sym) == gc.Deferreturn.Sym {
   767  			// Deferred calls will appear to be returning to
   768  			// the CALL deferreturn(SB) that we are about to emit.
   769  			// However, the stack trace code will show the line
   770  			// of the instruction byte before the return PC.
   771  			// To avoid that being an unrelated instruction,
   772  			// insert an actual hardware NOP that will have the right line number.
   773  			// This is different from obj.ANOP, which is a virtual no-op
   774  			// that doesn't make it into the instruction stream.
   775  			ginsnop()
   776  		}
   777  		p := gc.Prog(obj.ACALL)
   778  		p.To.Type = obj.TYPE_MEM
   779  		p.To.Name = obj.NAME_EXTERN
   780  		p.To.Sym = gc.Linksym(v.Aux.(*gc.Sym))
   781  		if gc.Maxarg < v.AuxInt {
   782  			gc.Maxarg = v.AuxInt
   783  		}
   784  	case ssa.OpAMD64CALLclosure:
   785  		p := gc.Prog(obj.ACALL)
   786  		p.To.Type = obj.TYPE_REG
   787  		p.To.Reg = gc.SSARegNum(v.Args[0])
   788  		if gc.Maxarg < v.AuxInt {
   789  			gc.Maxarg = v.AuxInt
   790  		}
   791  	case ssa.OpAMD64CALLdefer:
   792  		p := gc.Prog(obj.ACALL)
   793  		p.To.Type = obj.TYPE_MEM
   794  		p.To.Name = obj.NAME_EXTERN
   795  		p.To.Sym = gc.Linksym(gc.Deferproc.Sym)
   796  		if gc.Maxarg < v.AuxInt {
   797  			gc.Maxarg = v.AuxInt
   798  		}
   799  	case ssa.OpAMD64CALLgo:
   800  		p := gc.Prog(obj.ACALL)
   801  		p.To.Type = obj.TYPE_MEM
   802  		p.To.Name = obj.NAME_EXTERN
   803  		p.To.Sym = gc.Linksym(gc.Newproc.Sym)
   804  		if gc.Maxarg < v.AuxInt {
   805  			gc.Maxarg = v.AuxInt
   806  		}
   807  	case ssa.OpAMD64CALLinter:
   808  		p := gc.Prog(obj.ACALL)
   809  		p.To.Type = obj.TYPE_REG
   810  		p.To.Reg = gc.SSARegNum(v.Args[0])
   811  		if gc.Maxarg < v.AuxInt {
   812  			gc.Maxarg = v.AuxInt
   813  		}
   814  	case ssa.OpAMD64NEGQ, ssa.OpAMD64NEGL,
   815  		ssa.OpAMD64BSWAPQ, ssa.OpAMD64BSWAPL,
   816  		ssa.OpAMD64NOTQ, ssa.OpAMD64NOTL:
   817  		r := gc.SSARegNum(v)
   818  		if r != gc.SSARegNum(v.Args[0]) {
   819  			v.Fatalf("input[0] and output not in same register %s", v.LongString())
   820  		}
   821  		p := gc.Prog(v.Op.Asm())
   822  		p.To.Type = obj.TYPE_REG
   823  		p.To.Reg = r
   824  	case ssa.OpAMD64BSFQ, ssa.OpAMD64BSFL, ssa.OpAMD64BSFW,
   825  		ssa.OpAMD64BSRQ, ssa.OpAMD64BSRL, ssa.OpAMD64BSRW,
   826  		ssa.OpAMD64SQRTSD:
   827  		p := gc.Prog(v.Op.Asm())
   828  		p.From.Type = obj.TYPE_REG
   829  		p.From.Reg = gc.SSARegNum(v.Args[0])
   830  		p.To.Type = obj.TYPE_REG
   831  		p.To.Reg = gc.SSARegNum(v)
   832  	case ssa.OpSP, ssa.OpSB:
   833  		// nothing to do
   834  	case ssa.OpAMD64SETEQ, ssa.OpAMD64SETNE,
   835  		ssa.OpAMD64SETL, ssa.OpAMD64SETLE,
   836  		ssa.OpAMD64SETG, ssa.OpAMD64SETGE,
   837  		ssa.OpAMD64SETGF, ssa.OpAMD64SETGEF,
   838  		ssa.OpAMD64SETB, ssa.OpAMD64SETBE,
   839  		ssa.OpAMD64SETORD, ssa.OpAMD64SETNAN,
   840  		ssa.OpAMD64SETA, ssa.OpAMD64SETAE:
   841  		p := gc.Prog(v.Op.Asm())
   842  		p.To.Type = obj.TYPE_REG
   843  		p.To.Reg = gc.SSARegNum(v)
   844  
   845  	case ssa.OpAMD64SETNEF:
   846  		p := gc.Prog(v.Op.Asm())
   847  		p.To.Type = obj.TYPE_REG
   848  		p.To.Reg = gc.SSARegNum(v)
   849  		q := gc.Prog(x86.ASETPS)
   850  		q.To.Type = obj.TYPE_REG
   851  		q.To.Reg = x86.REG_AX
   852  		// ORL avoids partial register write and is smaller than ORQ, used by old compiler
   853  		opregreg(x86.AORL, gc.SSARegNum(v), x86.REG_AX)
   854  
   855  	case ssa.OpAMD64SETEQF:
   856  		p := gc.Prog(v.Op.Asm())
   857  		p.To.Type = obj.TYPE_REG
   858  		p.To.Reg = gc.SSARegNum(v)
   859  		q := gc.Prog(x86.ASETPC)
   860  		q.To.Type = obj.TYPE_REG
   861  		q.To.Reg = x86.REG_AX
   862  		// ANDL avoids partial register write and is smaller than ANDQ, used by old compiler
   863  		opregreg(x86.AANDL, gc.SSARegNum(v), x86.REG_AX)
   864  
   865  	case ssa.OpAMD64InvertFlags:
   866  		v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
   867  	case ssa.OpAMD64FlagEQ, ssa.OpAMD64FlagLT_ULT, ssa.OpAMD64FlagLT_UGT, ssa.OpAMD64FlagGT_ULT, ssa.OpAMD64FlagGT_UGT:
   868  		v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString())
   869  	case ssa.OpAMD64REPSTOSQ:
   870  		gc.Prog(x86.AREP)
   871  		gc.Prog(x86.ASTOSQ)
   872  	case ssa.OpAMD64REPMOVSQ:
   873  		gc.Prog(x86.AREP)
   874  		gc.Prog(x86.AMOVSQ)
   875  	case ssa.OpVarDef:
   876  		gc.Gvardef(v.Aux.(*gc.Node))
   877  	case ssa.OpVarKill:
   878  		gc.Gvarkill(v.Aux.(*gc.Node))
   879  	case ssa.OpVarLive:
   880  		gc.Gvarlive(v.Aux.(*gc.Node))
   881  	case ssa.OpKeepAlive:
   882  		if !v.Args[0].Type.IsPtrShaped() {
   883  			v.Fatalf("keeping non-pointer alive %v", v.Args[0])
   884  		}
   885  		n, off := gc.AutoVar(v.Args[0])
   886  		if n == nil {
   887  			v.Fatalf("KeepLive with non-spilled value %s %s", v, v.Args[0])
   888  		}
   889  		if off != 0 {
   890  			v.Fatalf("KeepLive with non-zero offset spill location %s:%d", n, off)
   891  		}
   892  		gc.Gvarlive(n)
   893  	case ssa.OpAMD64LoweredNilCheck:
   894  		// Optimization - if the subsequent block has a load or store
   895  		// at the same address, we don't need to issue this instruction.
   896  		mem := v.Args[1]
   897  		for _, w := range v.Block.Succs[0].Block().Values {
   898  			if w.Op == ssa.OpPhi {
   899  				if w.Type.IsMemory() {
   900  					mem = w
   901  				}
   902  				continue
   903  			}
   904  			if len(w.Args) == 0 || !w.Args[len(w.Args)-1].Type.IsMemory() {
   905  				// w doesn't use a store - can't be a memory op.
   906  				continue
   907  			}
   908  			if w.Args[len(w.Args)-1] != mem {
   909  				v.Fatalf("wrong store after nilcheck v=%s w=%s", v, w)
   910  			}
   911  			switch w.Op {
   912  			case ssa.OpAMD64MOVQload, ssa.OpAMD64MOVLload, ssa.OpAMD64MOVWload, ssa.OpAMD64MOVBload,
   913  				ssa.OpAMD64MOVQstore, ssa.OpAMD64MOVLstore, ssa.OpAMD64MOVWstore, ssa.OpAMD64MOVBstore,
   914  				ssa.OpAMD64MOVBQSXload, ssa.OpAMD64MOVWQSXload, ssa.OpAMD64MOVLQSXload,
   915  				ssa.OpAMD64MOVSSload, ssa.OpAMD64MOVSDload, ssa.OpAMD64MOVOload,
   916  				ssa.OpAMD64MOVSSstore, ssa.OpAMD64MOVSDstore, ssa.OpAMD64MOVOstore:
   917  				if w.Args[0] == v.Args[0] && w.Aux == nil && w.AuxInt >= 0 && w.AuxInt < minZeroPage {
   918  					if gc.Debug_checknil != 0 && int(v.Line) > 1 {
   919  						gc.Warnl(v.Line, "removed nil check")
   920  					}
   921  					return
   922  				}
   923  			case ssa.OpAMD64MOVQstoreconst, ssa.OpAMD64MOVLstoreconst, ssa.OpAMD64MOVWstoreconst, ssa.OpAMD64MOVBstoreconst:
   924  				off := ssa.ValAndOff(v.AuxInt).Off()
   925  				if w.Args[0] == v.Args[0] && w.Aux == nil && off >= 0 && off < minZeroPage {
   926  					if gc.Debug_checknil != 0 && int(v.Line) > 1 {
   927  						gc.Warnl(v.Line, "removed nil check")
   928  					}
   929  					return
   930  				}
   931  			}
   932  			if w.Type.IsMemory() {
   933  				if w.Op == ssa.OpVarDef || w.Op == ssa.OpVarKill || w.Op == ssa.OpVarLive {
   934  					// these ops are OK
   935  					mem = w
   936  					continue
   937  				}
   938  				// We can't delay the nil check past the next store.
   939  				break
   940  			}
   941  		}
   942  		// Issue a load which will fault if the input is nil.
   943  		// TODO: We currently use the 2-byte instruction TESTB AX, (reg).
   944  		// Should we use the 3-byte TESTB $0, (reg) instead?  It is larger
   945  		// but it doesn't have false dependency on AX.
   946  		// Or maybe allocate an output register and use MOVL (reg),reg2 ?
   947  		// That trades clobbering flags for clobbering a register.
   948  		p := gc.Prog(x86.ATESTB)
   949  		p.From.Type = obj.TYPE_REG
   950  		p.From.Reg = x86.REG_AX
   951  		p.To.Type = obj.TYPE_MEM
   952  		p.To.Reg = gc.SSARegNum(v.Args[0])
   953  		gc.AddAux(&p.To, v)
   954  		if gc.Debug_checknil != 0 && v.Line > 1 { // v.Line==1 in generated wrappers
   955  			gc.Warnl(v.Line, "generated nil check")
   956  		}
   957  	default:
   958  		v.Unimplementedf("genValue not implemented: %s", v.LongString())
   959  	}
   960  }
   961  
   962  var blockJump = [...]struct {
   963  	asm, invasm obj.As
   964  }{
   965  	ssa.BlockAMD64EQ:  {x86.AJEQ, x86.AJNE},
   966  	ssa.BlockAMD64NE:  {x86.AJNE, x86.AJEQ},
   967  	ssa.BlockAMD64LT:  {x86.AJLT, x86.AJGE},
   968  	ssa.BlockAMD64GE:  {x86.AJGE, x86.AJLT},
   969  	ssa.BlockAMD64LE:  {x86.AJLE, x86.AJGT},
   970  	ssa.BlockAMD64GT:  {x86.AJGT, x86.AJLE},
   971  	ssa.BlockAMD64ULT: {x86.AJCS, x86.AJCC},
   972  	ssa.BlockAMD64UGE: {x86.AJCC, x86.AJCS},
   973  	ssa.BlockAMD64UGT: {x86.AJHI, x86.AJLS},
   974  	ssa.BlockAMD64ULE: {x86.AJLS, x86.AJHI},
   975  	ssa.BlockAMD64ORD: {x86.AJPC, x86.AJPS},
   976  	ssa.BlockAMD64NAN: {x86.AJPS, x86.AJPC},
   977  }
   978  
   979  var eqfJumps = [2][2]gc.FloatingEQNEJump{
   980  	{{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPS, Index: 1}}, // next == b.Succs[0]
   981  	{{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPC, Index: 0}}, // next == b.Succs[1]
   982  }
   983  var nefJumps = [2][2]gc.FloatingEQNEJump{
   984  	{{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPC, Index: 1}}, // next == b.Succs[0]
   985  	{{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPS, Index: 0}}, // next == b.Succs[1]
   986  }
   987  
   988  func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
   989  	s.SetLineno(b.Line)
   990  
   991  	switch b.Kind {
   992  	case ssa.BlockPlain, ssa.BlockCall, ssa.BlockCheck:
   993  		if b.Succs[0].Block() != next {
   994  			p := gc.Prog(obj.AJMP)
   995  			p.To.Type = obj.TYPE_BRANCH
   996  			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
   997  		}
   998  	case ssa.BlockDefer:
   999  		// defer returns in rax:
  1000  		// 0 if we should continue executing
  1001  		// 1 if we should jump to deferreturn call
  1002  		p := gc.Prog(x86.ATESTL)
  1003  		p.From.Type = obj.TYPE_REG
  1004  		p.From.Reg = x86.REG_AX
  1005  		p.To.Type = obj.TYPE_REG
  1006  		p.To.Reg = x86.REG_AX
  1007  		p = gc.Prog(x86.AJNE)
  1008  		p.To.Type = obj.TYPE_BRANCH
  1009  		s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
  1010  		if b.Succs[0].Block() != next {
  1011  			p := gc.Prog(obj.AJMP)
  1012  			p.To.Type = obj.TYPE_BRANCH
  1013  			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
  1014  		}
  1015  	case ssa.BlockExit:
  1016  		gc.Prog(obj.AUNDEF) // tell plive.go that we never reach here
  1017  	case ssa.BlockRet:
  1018  		gc.Prog(obj.ARET)
  1019  	case ssa.BlockRetJmp:
  1020  		p := gc.Prog(obj.AJMP)
  1021  		p.To.Type = obj.TYPE_MEM
  1022  		p.To.Name = obj.NAME_EXTERN
  1023  		p.To.Sym = gc.Linksym(b.Aux.(*gc.Sym))
  1024  
  1025  	case ssa.BlockAMD64EQF:
  1026  		gc.SSAGenFPJump(s, b, next, &eqfJumps)
  1027  
  1028  	case ssa.BlockAMD64NEF:
  1029  		gc.SSAGenFPJump(s, b, next, &nefJumps)
  1030  
  1031  	case ssa.BlockAMD64EQ, ssa.BlockAMD64NE,
  1032  		ssa.BlockAMD64LT, ssa.BlockAMD64GE,
  1033  		ssa.BlockAMD64LE, ssa.BlockAMD64GT,
  1034  		ssa.BlockAMD64ULT, ssa.BlockAMD64UGT,
  1035  		ssa.BlockAMD64ULE, ssa.BlockAMD64UGE:
  1036  		jmp := blockJump[b.Kind]
  1037  		likely := b.Likely
  1038  		var p *obj.Prog
  1039  		switch next {
  1040  		case b.Succs[0].Block():
  1041  			p = gc.Prog(jmp.invasm)
  1042  			likely *= -1
  1043  			p.To.Type = obj.TYPE_BRANCH
  1044  			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
  1045  		case b.Succs[1].Block():
  1046  			p = gc.Prog(jmp.asm)
  1047  			p.To.Type = obj.TYPE_BRANCH
  1048  			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
  1049  		default:
  1050  			p = gc.Prog(jmp.asm)
  1051  			p.To.Type = obj.TYPE_BRANCH
  1052  			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
  1053  			q := gc.Prog(obj.AJMP)
  1054  			q.To.Type = obj.TYPE_BRANCH
  1055  			s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
  1056  		}
  1057  
  1058  		// liblink reorders the instruction stream as it sees fit.
  1059  		// Pass along what we know so liblink can make use of it.
  1060  		// TODO: Once we've fully switched to SSA,
  1061  		// make liblink leave our output alone.
  1062  		switch likely {
  1063  		case ssa.BranchUnlikely:
  1064  			p.From.Type = obj.TYPE_CONST
  1065  			p.From.Offset = 0
  1066  		case ssa.BranchLikely:
  1067  			p.From.Type = obj.TYPE_CONST
  1068  			p.From.Offset = 1
  1069  		}
  1070  
  1071  	default:
  1072  		b.Unimplementedf("branch not implemented: %s. Control: %s", b.LongString(), b.Control.LongString())
  1073  	}
  1074  }