github.com/euank/go@v0.0.0-20160829210321-495514729181/src/cmd/compile/internal/arm/ssa.go (about)

     1  // Copyright 2016 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package arm
     6  
     7  import (
     8  	"fmt"
     9  	"math"
    10  
    11  	"cmd/compile/internal/gc"
    12  	"cmd/compile/internal/ssa"
    13  	"cmd/internal/obj"
    14  	"cmd/internal/obj/arm"
    15  )
    16  
    17  var ssaRegToReg = []int16{
    18  	arm.REG_R0,
    19  	arm.REG_R1,
    20  	arm.REG_R2,
    21  	arm.REG_R3,
    22  	arm.REG_R4,
    23  	arm.REG_R5,
    24  	arm.REG_R6,
    25  	arm.REG_R7,
    26  	arm.REG_R8,
    27  	arm.REG_R9,
    28  	arm.REGG, // aka R10
    29  	arm.REG_R11,
    30  	arm.REG_R12,
    31  	arm.REGSP, // aka R13
    32  	arm.REG_R14,
    33  	arm.REG_R15,
    34  
    35  	arm.REG_F0,
    36  	arm.REG_F1,
    37  	arm.REG_F2,
    38  	arm.REG_F3,
    39  	arm.REG_F4,
    40  	arm.REG_F5,
    41  	arm.REG_F6,
    42  	arm.REG_F7,
    43  	arm.REG_F8,
    44  	arm.REG_F9,
    45  	arm.REG_F10,
    46  	arm.REG_F11,
    47  	arm.REG_F12,
    48  	arm.REG_F13,
    49  	arm.REG_F14,
    50  	arm.REG_F15,
    51  
    52  	arm.REG_CPSR, // flag
    53  	0,            // SB isn't a real register.  We fill an Addr.Reg field with 0 in this case.
    54  }
    55  
    56  // Smallest possible faulting page at address zero,
    57  // see ../../../../runtime/internal/sys/arch_arm.go
    58  const minZeroPage = 4096
    59  
    60  // loadByType returns the load instruction of the given type.
    61  func loadByType(t ssa.Type) obj.As {
    62  	if t.IsFloat() {
    63  		switch t.Size() {
    64  		case 4:
    65  			return arm.AMOVF
    66  		case 8:
    67  			return arm.AMOVD
    68  		}
    69  	} else {
    70  		switch t.Size() {
    71  		case 1:
    72  			if t.IsSigned() {
    73  				return arm.AMOVB
    74  			} else {
    75  				return arm.AMOVBU
    76  			}
    77  		case 2:
    78  			if t.IsSigned() {
    79  				return arm.AMOVH
    80  			} else {
    81  				return arm.AMOVHU
    82  			}
    83  		case 4:
    84  			return arm.AMOVW
    85  		}
    86  	}
    87  	panic("bad load type")
    88  }
    89  
    90  // storeByType returns the store instruction of the given type.
    91  func storeByType(t ssa.Type) obj.As {
    92  	if t.IsFloat() {
    93  		switch t.Size() {
    94  		case 4:
    95  			return arm.AMOVF
    96  		case 8:
    97  			return arm.AMOVD
    98  		}
    99  	} else {
   100  		switch t.Size() {
   101  		case 1:
   102  			return arm.AMOVB
   103  		case 2:
   104  			return arm.AMOVH
   105  		case 4:
   106  			return arm.AMOVW
   107  		}
   108  	}
   109  	panic("bad store type")
   110  }
   111  
   112  // shift type is used as Offset in obj.TYPE_SHIFT operands to encode shifted register operands
   113  type shift int64
   114  
   115  // copied from ../../../internal/obj/util.go:/TYPE_SHIFT
   116  func (v shift) String() string {
   117  	op := "<<>>->@>"[((v>>5)&3)<<1:]
   118  	if v&(1<<4) != 0 {
   119  		// register shift
   120  		return fmt.Sprintf("R%d%c%cR%d", v&15, op[0], op[1], (v>>8)&15)
   121  	} else {
   122  		// constant shift
   123  		return fmt.Sprintf("R%d%c%c%d", v&15, op[0], op[1], (v>>7)&31)
   124  	}
   125  }
   126  
   127  // makeshift encodes a register shifted by a constant
   128  func makeshift(reg int16, typ int64, s int64) shift {
   129  	return shift(int64(reg&0xf) | typ | (s&31)<<7)
   130  }
   131  
   132  // genshift generates a Prog for r = r0 op (r1 shifted by s)
   133  func genshift(as obj.As, r0, r1, r int16, typ int64, s int64) *obj.Prog {
   134  	p := gc.Prog(as)
   135  	p.From.Type = obj.TYPE_SHIFT
   136  	p.From.Offset = int64(makeshift(r1, typ, s))
   137  	p.Reg = r0
   138  	if r != 0 {
   139  		p.To.Type = obj.TYPE_REG
   140  		p.To.Reg = r
   141  	}
   142  	return p
   143  }
   144  
   145  // makeregshift encodes a register shifted by a register
   146  func makeregshift(r1 int16, typ int64, r2 int16) shift {
   147  	return shift(int64(r1&0xf) | typ | int64(r2&0xf)<<8 | 1<<4)
   148  }
   149  
   150  // genregshift generates a Prog for r = r0 op (r1 shifted by r2)
   151  func genregshift(as obj.As, r0, r1, r2, r int16, typ int64) *obj.Prog {
   152  	p := gc.Prog(as)
   153  	p.From.Type = obj.TYPE_SHIFT
   154  	p.From.Offset = int64(makeregshift(r1, typ, r2))
   155  	p.Reg = r0
   156  	if r != 0 {
   157  		p.To.Type = obj.TYPE_REG
   158  		p.To.Reg = r
   159  	}
   160  	return p
   161  }
   162  
   163  func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
   164  	s.SetLineno(v.Line)
   165  	switch v.Op {
   166  	case ssa.OpInitMem:
   167  		// memory arg needs no code
   168  	case ssa.OpArg:
   169  		// input args need no code
   170  	case ssa.OpSP, ssa.OpSB, ssa.OpGetG:
   171  		// nothing to do
   172  	case ssa.OpCopy, ssa.OpARMMOVWconvert, ssa.OpARMMOVWreg:
   173  		if v.Type.IsMemory() {
   174  			return
   175  		}
   176  		x := gc.SSARegNum(v.Args[0])
   177  		y := gc.SSARegNum(v)
   178  		if x == y {
   179  			return
   180  		}
   181  		as := arm.AMOVW
   182  		if v.Type.IsFloat() {
   183  			switch v.Type.Size() {
   184  			case 4:
   185  				as = arm.AMOVF
   186  			case 8:
   187  				as = arm.AMOVD
   188  			default:
   189  				panic("bad float size")
   190  			}
   191  		}
   192  		p := gc.Prog(as)
   193  		p.From.Type = obj.TYPE_REG
   194  		p.From.Reg = x
   195  		p.To.Type = obj.TYPE_REG
   196  		p.To.Reg = y
   197  	case ssa.OpARMMOVWnop:
   198  		if gc.SSARegNum(v) != gc.SSARegNum(v.Args[0]) {
   199  			v.Fatalf("input[0] and output not in same register %s", v.LongString())
   200  		}
   201  		// nothing to do
   202  	case ssa.OpLoadReg:
   203  		if v.Type.IsFlags() {
   204  			v.Unimplementedf("load flags not implemented: %v", v.LongString())
   205  			return
   206  		}
   207  		p := gc.Prog(loadByType(v.Type))
   208  		n, off := gc.AutoVar(v.Args[0])
   209  		p.From.Type = obj.TYPE_MEM
   210  		p.From.Node = n
   211  		p.From.Sym = gc.Linksym(n.Sym)
   212  		p.From.Offset = off
   213  		if n.Class == gc.PPARAM || n.Class == gc.PPARAMOUT {
   214  			p.From.Name = obj.NAME_PARAM
   215  			p.From.Offset += n.Xoffset
   216  		} else {
   217  			p.From.Name = obj.NAME_AUTO
   218  		}
   219  		p.To.Type = obj.TYPE_REG
   220  		p.To.Reg = gc.SSARegNum(v)
   221  	case ssa.OpPhi:
   222  		gc.CheckLoweredPhi(v)
   223  	case ssa.OpStoreReg:
   224  		if v.Type.IsFlags() {
   225  			v.Unimplementedf("store flags not implemented: %v", v.LongString())
   226  			return
   227  		}
   228  		p := gc.Prog(storeByType(v.Type))
   229  		p.From.Type = obj.TYPE_REG
   230  		p.From.Reg = gc.SSARegNum(v.Args[0])
   231  		n, off := gc.AutoVar(v)
   232  		p.To.Type = obj.TYPE_MEM
   233  		p.To.Node = n
   234  		p.To.Sym = gc.Linksym(n.Sym)
   235  		p.To.Offset = off
   236  		if n.Class == gc.PPARAM || n.Class == gc.PPARAMOUT {
   237  			p.To.Name = obj.NAME_PARAM
   238  			p.To.Offset += n.Xoffset
   239  		} else {
   240  			p.To.Name = obj.NAME_AUTO
   241  		}
   242  	case ssa.OpARMDIV,
   243  		ssa.OpARMDIVU,
   244  		ssa.OpARMMOD,
   245  		ssa.OpARMMODU:
   246  		// Note: for software division the assembler rewrite these
   247  		// instructions to sequence of instructions:
   248  		// - it puts numerator in R11 and denominator in g.m.divmod
   249  		//	and call (say) _udiv
   250  		// - _udiv saves R0-R3 on stack and call udiv, restores R0-R3
   251  		//	before return
   252  		// - udiv does the actual work
   253  		//TODO: set approperiate regmasks and call udiv directly?
   254  		// need to be careful for negative case
   255  		// Or, as soft div is already expensive, we don't care?
   256  		fallthrough
   257  	case ssa.OpARMADD,
   258  		ssa.OpARMADC,
   259  		ssa.OpARMSUB,
   260  		ssa.OpARMSBC,
   261  		ssa.OpARMRSB,
   262  		ssa.OpARMAND,
   263  		ssa.OpARMOR,
   264  		ssa.OpARMXOR,
   265  		ssa.OpARMBIC,
   266  		ssa.OpARMMUL,
   267  		ssa.OpARMADDF,
   268  		ssa.OpARMADDD,
   269  		ssa.OpARMSUBF,
   270  		ssa.OpARMSUBD,
   271  		ssa.OpARMMULF,
   272  		ssa.OpARMMULD,
   273  		ssa.OpARMDIVF,
   274  		ssa.OpARMDIVD:
   275  		r := gc.SSARegNum(v)
   276  		r1 := gc.SSARegNum(v.Args[0])
   277  		r2 := gc.SSARegNum(v.Args[1])
   278  		p := gc.Prog(v.Op.Asm())
   279  		p.From.Type = obj.TYPE_REG
   280  		p.From.Reg = r2
   281  		p.Reg = r1
   282  		p.To.Type = obj.TYPE_REG
   283  		p.To.Reg = r
   284  	case ssa.OpARMADDS,
   285  		ssa.OpARMSUBS:
   286  		r := gc.SSARegNum0(v)
   287  		r1 := gc.SSARegNum(v.Args[0])
   288  		r2 := gc.SSARegNum(v.Args[1])
   289  		p := gc.Prog(v.Op.Asm())
   290  		p.Scond = arm.C_SBIT
   291  		p.From.Type = obj.TYPE_REG
   292  		p.From.Reg = r2
   293  		p.Reg = r1
   294  		p.To.Type = obj.TYPE_REG
   295  		p.To.Reg = r
   296  	case ssa.OpARMSLL,
   297  		ssa.OpARMSRL,
   298  		ssa.OpARMSRA:
   299  		r := gc.SSARegNum(v)
   300  		r1 := gc.SSARegNum(v.Args[0])
   301  		r2 := gc.SSARegNum(v.Args[1])
   302  		p := gc.Prog(v.Op.Asm())
   303  		p.From.Type = obj.TYPE_REG
   304  		p.From.Reg = r2
   305  		p.Reg = r1
   306  		p.To.Type = obj.TYPE_REG
   307  		p.To.Reg = r
   308  	case ssa.OpARMSRAcond:
   309  		// ARM shift instructions uses only the low-order byte of the shift amount
   310  		// generate conditional instructions to deal with large shifts
   311  		// flag is already set
   312  		// SRA.HS	$31, Rarg0, Rdst // shift 31 bits to get the sign bit
   313  		// SRA.LO	Rarg1, Rarg0, Rdst
   314  		r := gc.SSARegNum(v)
   315  		r1 := gc.SSARegNum(v.Args[0])
   316  		r2 := gc.SSARegNum(v.Args[1])
   317  		p := gc.Prog(arm.ASRA)
   318  		p.Scond = arm.C_SCOND_HS
   319  		p.From.Type = obj.TYPE_CONST
   320  		p.From.Offset = 31
   321  		p.Reg = r1
   322  		p.To.Type = obj.TYPE_REG
   323  		p.To.Reg = r
   324  		p = gc.Prog(arm.ASRA)
   325  		p.Scond = arm.C_SCOND_LO
   326  		p.From.Type = obj.TYPE_REG
   327  		p.From.Reg = r2
   328  		p.Reg = r1
   329  		p.To.Type = obj.TYPE_REG
   330  		p.To.Reg = r
   331  	case ssa.OpARMADDconst,
   332  		ssa.OpARMADCconst,
   333  		ssa.OpARMSUBconst,
   334  		ssa.OpARMSBCconst,
   335  		ssa.OpARMRSBconst,
   336  		ssa.OpARMRSCconst,
   337  		ssa.OpARMANDconst,
   338  		ssa.OpARMORconst,
   339  		ssa.OpARMXORconst,
   340  		ssa.OpARMBICconst,
   341  		ssa.OpARMSLLconst,
   342  		ssa.OpARMSRLconst,
   343  		ssa.OpARMSRAconst:
   344  		p := gc.Prog(v.Op.Asm())
   345  		p.From.Type = obj.TYPE_CONST
   346  		p.From.Offset = v.AuxInt
   347  		p.Reg = gc.SSARegNum(v.Args[0])
   348  		p.To.Type = obj.TYPE_REG
   349  		p.To.Reg = gc.SSARegNum(v)
   350  	case ssa.OpARMADDSconst,
   351  		ssa.OpARMSUBSconst,
   352  		ssa.OpARMRSBSconst:
   353  		p := gc.Prog(v.Op.Asm())
   354  		p.Scond = arm.C_SBIT
   355  		p.From.Type = obj.TYPE_CONST
   356  		p.From.Offset = v.AuxInt
   357  		p.Reg = gc.SSARegNum(v.Args[0])
   358  		p.To.Type = obj.TYPE_REG
   359  		p.To.Reg = gc.SSARegNum0(v)
   360  	case ssa.OpARMSRRconst:
   361  		genshift(arm.AMOVW, 0, gc.SSARegNum(v.Args[0]), gc.SSARegNum(v), arm.SHIFT_RR, v.AuxInt)
   362  	case ssa.OpARMADDshiftLL,
   363  		ssa.OpARMADCshiftLL,
   364  		ssa.OpARMSUBshiftLL,
   365  		ssa.OpARMSBCshiftLL,
   366  		ssa.OpARMRSBshiftLL,
   367  		ssa.OpARMRSCshiftLL,
   368  		ssa.OpARMANDshiftLL,
   369  		ssa.OpARMORshiftLL,
   370  		ssa.OpARMXORshiftLL,
   371  		ssa.OpARMBICshiftLL:
   372  		genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_LL, v.AuxInt)
   373  	case ssa.OpARMADDSshiftLL,
   374  		ssa.OpARMSUBSshiftLL,
   375  		ssa.OpARMRSBSshiftLL:
   376  		p := genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum0(v), arm.SHIFT_LL, v.AuxInt)
   377  		p.Scond = arm.C_SBIT
   378  	case ssa.OpARMADDshiftRL,
   379  		ssa.OpARMADCshiftRL,
   380  		ssa.OpARMSUBshiftRL,
   381  		ssa.OpARMSBCshiftRL,
   382  		ssa.OpARMRSBshiftRL,
   383  		ssa.OpARMRSCshiftRL,
   384  		ssa.OpARMANDshiftRL,
   385  		ssa.OpARMORshiftRL,
   386  		ssa.OpARMXORshiftRL,
   387  		ssa.OpARMBICshiftRL:
   388  		genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_LR, v.AuxInt)
   389  	case ssa.OpARMADDSshiftRL,
   390  		ssa.OpARMSUBSshiftRL,
   391  		ssa.OpARMRSBSshiftRL:
   392  		p := genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum0(v), arm.SHIFT_LR, v.AuxInt)
   393  		p.Scond = arm.C_SBIT
   394  	case ssa.OpARMADDshiftRA,
   395  		ssa.OpARMADCshiftRA,
   396  		ssa.OpARMSUBshiftRA,
   397  		ssa.OpARMSBCshiftRA,
   398  		ssa.OpARMRSBshiftRA,
   399  		ssa.OpARMRSCshiftRA,
   400  		ssa.OpARMANDshiftRA,
   401  		ssa.OpARMORshiftRA,
   402  		ssa.OpARMXORshiftRA,
   403  		ssa.OpARMBICshiftRA:
   404  		genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_AR, v.AuxInt)
   405  	case ssa.OpARMADDSshiftRA,
   406  		ssa.OpARMSUBSshiftRA,
   407  		ssa.OpARMRSBSshiftRA:
   408  		p := genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum0(v), arm.SHIFT_AR, v.AuxInt)
   409  		p.Scond = arm.C_SBIT
   410  	case ssa.OpARMMVNshiftLL:
   411  		genshift(v.Op.Asm(), 0, gc.SSARegNum(v.Args[0]), gc.SSARegNum(v), arm.SHIFT_LL, v.AuxInt)
   412  	case ssa.OpARMMVNshiftRL:
   413  		genshift(v.Op.Asm(), 0, gc.SSARegNum(v.Args[0]), gc.SSARegNum(v), arm.SHIFT_LR, v.AuxInt)
   414  	case ssa.OpARMMVNshiftRA:
   415  		genshift(v.Op.Asm(), 0, gc.SSARegNum(v.Args[0]), gc.SSARegNum(v), arm.SHIFT_AR, v.AuxInt)
   416  	case ssa.OpARMMVNshiftLLreg:
   417  		genregshift(v.Op.Asm(), 0, gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_LL)
   418  	case ssa.OpARMMVNshiftRLreg:
   419  		genregshift(v.Op.Asm(), 0, gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_LR)
   420  	case ssa.OpARMMVNshiftRAreg:
   421  		genregshift(v.Op.Asm(), 0, gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_AR)
   422  	case ssa.OpARMADDshiftLLreg,
   423  		ssa.OpARMADCshiftLLreg,
   424  		ssa.OpARMSUBshiftLLreg,
   425  		ssa.OpARMSBCshiftLLreg,
   426  		ssa.OpARMRSBshiftLLreg,
   427  		ssa.OpARMRSCshiftLLreg,
   428  		ssa.OpARMANDshiftLLreg,
   429  		ssa.OpARMORshiftLLreg,
   430  		ssa.OpARMXORshiftLLreg,
   431  		ssa.OpARMBICshiftLLreg:
   432  		genregshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[2]), gc.SSARegNum(v), arm.SHIFT_LL)
   433  	case ssa.OpARMADDSshiftLLreg,
   434  		ssa.OpARMSUBSshiftLLreg,
   435  		ssa.OpARMRSBSshiftLLreg:
   436  		p := genregshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[2]), gc.SSARegNum0(v), arm.SHIFT_LL)
   437  		p.Scond = arm.C_SBIT
   438  	case ssa.OpARMADDshiftRLreg,
   439  		ssa.OpARMADCshiftRLreg,
   440  		ssa.OpARMSUBshiftRLreg,
   441  		ssa.OpARMSBCshiftRLreg,
   442  		ssa.OpARMRSBshiftRLreg,
   443  		ssa.OpARMRSCshiftRLreg,
   444  		ssa.OpARMANDshiftRLreg,
   445  		ssa.OpARMORshiftRLreg,
   446  		ssa.OpARMXORshiftRLreg,
   447  		ssa.OpARMBICshiftRLreg:
   448  		genregshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[2]), gc.SSARegNum(v), arm.SHIFT_LR)
   449  	case ssa.OpARMADDSshiftRLreg,
   450  		ssa.OpARMSUBSshiftRLreg,
   451  		ssa.OpARMRSBSshiftRLreg:
   452  		p := genregshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[2]), gc.SSARegNum0(v), arm.SHIFT_LR)
   453  		p.Scond = arm.C_SBIT
   454  	case ssa.OpARMADDshiftRAreg,
   455  		ssa.OpARMADCshiftRAreg,
   456  		ssa.OpARMSUBshiftRAreg,
   457  		ssa.OpARMSBCshiftRAreg,
   458  		ssa.OpARMRSBshiftRAreg,
   459  		ssa.OpARMRSCshiftRAreg,
   460  		ssa.OpARMANDshiftRAreg,
   461  		ssa.OpARMORshiftRAreg,
   462  		ssa.OpARMXORshiftRAreg,
   463  		ssa.OpARMBICshiftRAreg:
   464  		genregshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[2]), gc.SSARegNum(v), arm.SHIFT_AR)
   465  	case ssa.OpARMADDSshiftRAreg,
   466  		ssa.OpARMSUBSshiftRAreg,
   467  		ssa.OpARMRSBSshiftRAreg:
   468  		p := genregshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[2]), gc.SSARegNum0(v), arm.SHIFT_AR)
   469  		p.Scond = arm.C_SBIT
   470  	case ssa.OpARMHMUL,
   471  		ssa.OpARMHMULU:
   472  		// 32-bit high multiplication
   473  		p := gc.Prog(v.Op.Asm())
   474  		p.From.Type = obj.TYPE_REG
   475  		p.From.Reg = gc.SSARegNum(v.Args[0])
   476  		p.Reg = gc.SSARegNum(v.Args[1])
   477  		p.To.Type = obj.TYPE_REGREG
   478  		p.To.Reg = gc.SSARegNum(v)
   479  		p.To.Offset = arm.REGTMP // throw away low 32-bit into tmp register
   480  	case ssa.OpARMMULLU:
   481  		// 32-bit multiplication, results 64-bit, high 32-bit in out0, low 32-bit in out1
   482  		p := gc.Prog(v.Op.Asm())
   483  		p.From.Type = obj.TYPE_REG
   484  		p.From.Reg = gc.SSARegNum(v.Args[0])
   485  		p.Reg = gc.SSARegNum(v.Args[1])
   486  		p.To.Type = obj.TYPE_REGREG
   487  		p.To.Reg = gc.SSARegNum0(v)           // high 32-bit
   488  		p.To.Offset = int64(gc.SSARegNum1(v)) // low 32-bit
   489  	case ssa.OpARMMULA:
   490  		p := gc.Prog(v.Op.Asm())
   491  		p.From.Type = obj.TYPE_REG
   492  		p.From.Reg = gc.SSARegNum(v.Args[0])
   493  		p.Reg = gc.SSARegNum(v.Args[1])
   494  		p.To.Type = obj.TYPE_REGREG2
   495  		p.To.Reg = gc.SSARegNum(v)                   // result
   496  		p.To.Offset = int64(gc.SSARegNum(v.Args[2])) // addend
   497  	case ssa.OpARMMOVWconst:
   498  		p := gc.Prog(v.Op.Asm())
   499  		p.From.Type = obj.TYPE_CONST
   500  		p.From.Offset = v.AuxInt
   501  		p.To.Type = obj.TYPE_REG
   502  		p.To.Reg = gc.SSARegNum(v)
   503  	case ssa.OpARMMOVFconst,
   504  		ssa.OpARMMOVDconst:
   505  		p := gc.Prog(v.Op.Asm())
   506  		p.From.Type = obj.TYPE_FCONST
   507  		p.From.Val = math.Float64frombits(uint64(v.AuxInt))
   508  		p.To.Type = obj.TYPE_REG
   509  		p.To.Reg = gc.SSARegNum(v)
   510  	case ssa.OpARMCMP,
   511  		ssa.OpARMCMN,
   512  		ssa.OpARMTST,
   513  		ssa.OpARMTEQ,
   514  		ssa.OpARMCMPF,
   515  		ssa.OpARMCMPD:
   516  		p := gc.Prog(v.Op.Asm())
   517  		p.From.Type = obj.TYPE_REG
   518  		// Special layout in ARM assembly
   519  		// Comparing to x86, the operands of ARM's CMP are reversed.
   520  		p.From.Reg = gc.SSARegNum(v.Args[1])
   521  		p.Reg = gc.SSARegNum(v.Args[0])
   522  	case ssa.OpARMCMPconst,
   523  		ssa.OpARMCMNconst,
   524  		ssa.OpARMTSTconst,
   525  		ssa.OpARMTEQconst:
   526  		// Special layout in ARM assembly
   527  		p := gc.Prog(v.Op.Asm())
   528  		p.From.Type = obj.TYPE_CONST
   529  		p.From.Offset = v.AuxInt
   530  		p.Reg = gc.SSARegNum(v.Args[0])
   531  	case ssa.OpARMCMPF0,
   532  		ssa.OpARMCMPD0:
   533  		p := gc.Prog(v.Op.Asm())
   534  		p.From.Type = obj.TYPE_REG
   535  		p.From.Reg = gc.SSARegNum(v.Args[0])
   536  	case ssa.OpARMCMPshiftLL:
   537  		genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), 0, arm.SHIFT_LL, v.AuxInt)
   538  	case ssa.OpARMCMPshiftRL:
   539  		genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), 0, arm.SHIFT_LR, v.AuxInt)
   540  	case ssa.OpARMCMPshiftRA:
   541  		genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), 0, arm.SHIFT_AR, v.AuxInt)
   542  	case ssa.OpARMCMPshiftLLreg:
   543  		genregshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[2]), 0, arm.SHIFT_LL)
   544  	case ssa.OpARMCMPshiftRLreg:
   545  		genregshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[2]), 0, arm.SHIFT_LR)
   546  	case ssa.OpARMCMPshiftRAreg:
   547  		genregshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[2]), 0, arm.SHIFT_AR)
   548  	case ssa.OpARMMOVWaddr:
   549  		p := gc.Prog(arm.AMOVW)
   550  		p.From.Type = obj.TYPE_ADDR
   551  		p.To.Type = obj.TYPE_REG
   552  		p.To.Reg = gc.SSARegNum(v)
   553  
   554  		var wantreg string
   555  		// MOVW $sym+off(base), R
   556  		// the assembler expands it as the following:
   557  		// - base is SP: add constant offset to SP (R13)
   558  		//               when constant is large, tmp register (R11) may be used
   559  		// - base is SB: load external address from constant pool (use relocation)
   560  		switch v.Aux.(type) {
   561  		default:
   562  			v.Fatalf("aux is of unknown type %T", v.Aux)
   563  		case *ssa.ExternSymbol:
   564  			wantreg = "SB"
   565  			gc.AddAux(&p.From, v)
   566  		case *ssa.ArgSymbol, *ssa.AutoSymbol:
   567  			wantreg = "SP"
   568  			gc.AddAux(&p.From, v)
   569  		case nil:
   570  			// No sym, just MOVW $off(SP), R
   571  			wantreg = "SP"
   572  			p.From.Reg = arm.REGSP
   573  			p.From.Offset = v.AuxInt
   574  		}
   575  		if reg := gc.SSAReg(v.Args[0]); reg.Name() != wantreg {
   576  			v.Fatalf("bad reg %s for symbol type %T, want %s", reg.Name(), v.Aux, wantreg)
   577  		}
   578  
   579  	case ssa.OpARMMOVBload,
   580  		ssa.OpARMMOVBUload,
   581  		ssa.OpARMMOVHload,
   582  		ssa.OpARMMOVHUload,
   583  		ssa.OpARMMOVWload,
   584  		ssa.OpARMMOVFload,
   585  		ssa.OpARMMOVDload:
   586  		p := gc.Prog(v.Op.Asm())
   587  		p.From.Type = obj.TYPE_MEM
   588  		p.From.Reg = gc.SSARegNum(v.Args[0])
   589  		gc.AddAux(&p.From, v)
   590  		p.To.Type = obj.TYPE_REG
   591  		p.To.Reg = gc.SSARegNum(v)
   592  	case ssa.OpARMMOVBstore,
   593  		ssa.OpARMMOVHstore,
   594  		ssa.OpARMMOVWstore,
   595  		ssa.OpARMMOVFstore,
   596  		ssa.OpARMMOVDstore:
   597  		p := gc.Prog(v.Op.Asm())
   598  		p.From.Type = obj.TYPE_REG
   599  		p.From.Reg = gc.SSARegNum(v.Args[1])
   600  		p.To.Type = obj.TYPE_MEM
   601  		p.To.Reg = gc.SSARegNum(v.Args[0])
   602  		gc.AddAux(&p.To, v)
   603  	case ssa.OpARMMOVWloadidx:
   604  		// this is just shift 0 bits
   605  		fallthrough
   606  	case ssa.OpARMMOVWloadshiftLL:
   607  		p := genshift(v.Op.Asm(), 0, gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_LL, v.AuxInt)
   608  		p.From.Reg = gc.SSARegNum(v.Args[0])
   609  	case ssa.OpARMMOVWloadshiftRL:
   610  		p := genshift(v.Op.Asm(), 0, gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_LR, v.AuxInt)
   611  		p.From.Reg = gc.SSARegNum(v.Args[0])
   612  	case ssa.OpARMMOVWloadshiftRA:
   613  		p := genshift(v.Op.Asm(), 0, gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_AR, v.AuxInt)
   614  		p.From.Reg = gc.SSARegNum(v.Args[0])
   615  	case ssa.OpARMMOVWstoreidx:
   616  		// this is just shift 0 bits
   617  		fallthrough
   618  	case ssa.OpARMMOVWstoreshiftLL:
   619  		p := gc.Prog(v.Op.Asm())
   620  		p.From.Type = obj.TYPE_REG
   621  		p.From.Reg = gc.SSARegNum(v.Args[2])
   622  		p.To.Type = obj.TYPE_SHIFT
   623  		p.To.Reg = gc.SSARegNum(v.Args[0])
   624  		p.To.Offset = int64(makeshift(gc.SSARegNum(v.Args[1]), arm.SHIFT_LL, v.AuxInt))
   625  	case ssa.OpARMMOVWstoreshiftRL:
   626  		p := gc.Prog(v.Op.Asm())
   627  		p.From.Type = obj.TYPE_REG
   628  		p.From.Reg = gc.SSARegNum(v.Args[2])
   629  		p.To.Type = obj.TYPE_SHIFT
   630  		p.To.Reg = gc.SSARegNum(v.Args[0])
   631  		p.To.Offset = int64(makeshift(gc.SSARegNum(v.Args[1]), arm.SHIFT_LR, v.AuxInt))
   632  	case ssa.OpARMMOVWstoreshiftRA:
   633  		p := gc.Prog(v.Op.Asm())
   634  		p.From.Type = obj.TYPE_REG
   635  		p.From.Reg = gc.SSARegNum(v.Args[2])
   636  		p.To.Type = obj.TYPE_SHIFT
   637  		p.To.Reg = gc.SSARegNum(v.Args[0])
   638  		p.To.Offset = int64(makeshift(gc.SSARegNum(v.Args[1]), arm.SHIFT_AR, v.AuxInt))
   639  	case ssa.OpARMMOVBreg,
   640  		ssa.OpARMMOVBUreg,
   641  		ssa.OpARMMOVHreg,
   642  		ssa.OpARMMOVHUreg:
   643  		a := v.Args[0]
   644  		for a.Op == ssa.OpCopy || a.Op == ssa.OpARMMOVWreg || a.Op == ssa.OpARMMOVWnop {
   645  			a = a.Args[0]
   646  		}
   647  		if a.Op == ssa.OpLoadReg {
   648  			t := a.Type
   649  			switch {
   650  			case v.Op == ssa.OpARMMOVBreg && t.Size() == 1 && t.IsSigned(),
   651  				v.Op == ssa.OpARMMOVBUreg && t.Size() == 1 && !t.IsSigned(),
   652  				v.Op == ssa.OpARMMOVHreg && t.Size() == 2 && t.IsSigned(),
   653  				v.Op == ssa.OpARMMOVHUreg && t.Size() == 2 && !t.IsSigned():
   654  				// arg is a proper-typed load, already zero/sign-extended, don't extend again
   655  				if gc.SSARegNum(v) == gc.SSARegNum(v.Args[0]) {
   656  					return
   657  				}
   658  				p := gc.Prog(arm.AMOVW)
   659  				p.From.Type = obj.TYPE_REG
   660  				p.From.Reg = gc.SSARegNum(v.Args[0])
   661  				p.To.Type = obj.TYPE_REG
   662  				p.To.Reg = gc.SSARegNum(v)
   663  				return
   664  			default:
   665  			}
   666  		}
   667  		fallthrough
   668  	case ssa.OpARMMVN,
   669  		ssa.OpARMSQRTD,
   670  		ssa.OpARMNEGF,
   671  		ssa.OpARMNEGD,
   672  		ssa.OpARMMOVWF,
   673  		ssa.OpARMMOVWD,
   674  		ssa.OpARMMOVFW,
   675  		ssa.OpARMMOVDW,
   676  		ssa.OpARMMOVFD,
   677  		ssa.OpARMMOVDF:
   678  		p := gc.Prog(v.Op.Asm())
   679  		p.From.Type = obj.TYPE_REG
   680  		p.From.Reg = gc.SSARegNum(v.Args[0])
   681  		p.To.Type = obj.TYPE_REG
   682  		p.To.Reg = gc.SSARegNum(v)
   683  	case ssa.OpARMMOVWUF,
   684  		ssa.OpARMMOVWUD,
   685  		ssa.OpARMMOVFWU,
   686  		ssa.OpARMMOVDWU:
   687  		p := gc.Prog(v.Op.Asm())
   688  		p.Scond = arm.C_UBIT
   689  		p.From.Type = obj.TYPE_REG
   690  		p.From.Reg = gc.SSARegNum(v.Args[0])
   691  		p.To.Type = obj.TYPE_REG
   692  		p.To.Reg = gc.SSARegNum(v)
   693  	case ssa.OpARMCMOVWHSconst:
   694  		p := gc.Prog(arm.AMOVW)
   695  		p.Scond = arm.C_SCOND_HS
   696  		p.From.Type = obj.TYPE_CONST
   697  		p.From.Offset = v.AuxInt
   698  		p.To.Type = obj.TYPE_REG
   699  		p.To.Reg = gc.SSARegNum(v)
   700  	case ssa.OpARMCMOVWLSconst:
   701  		p := gc.Prog(arm.AMOVW)
   702  		p.Scond = arm.C_SCOND_LS
   703  		p.From.Type = obj.TYPE_CONST
   704  		p.From.Offset = v.AuxInt
   705  		p.To.Type = obj.TYPE_REG
   706  		p.To.Reg = gc.SSARegNum(v)
   707  	case ssa.OpARMCALLstatic:
   708  		if v.Aux.(*gc.Sym) == gc.Deferreturn.Sym {
   709  			// Deferred calls will appear to be returning to
   710  			// the CALL deferreturn(SB) that we are about to emit.
   711  			// However, the stack trace code will show the line
   712  			// of the instruction byte before the return PC.
   713  			// To avoid that being an unrelated instruction,
   714  			// insert an actual hardware NOP that will have the right line number.
   715  			// This is different from obj.ANOP, which is a virtual no-op
   716  			// that doesn't make it into the instruction stream.
   717  			ginsnop()
   718  		}
   719  		p := gc.Prog(obj.ACALL)
   720  		p.To.Type = obj.TYPE_MEM
   721  		p.To.Name = obj.NAME_EXTERN
   722  		p.To.Sym = gc.Linksym(v.Aux.(*gc.Sym))
   723  		if gc.Maxarg < v.AuxInt {
   724  			gc.Maxarg = v.AuxInt
   725  		}
   726  	case ssa.OpARMCALLclosure:
   727  		p := gc.Prog(obj.ACALL)
   728  		p.To.Type = obj.TYPE_MEM
   729  		p.To.Offset = 0
   730  		p.To.Reg = gc.SSARegNum(v.Args[0])
   731  		if gc.Maxarg < v.AuxInt {
   732  			gc.Maxarg = v.AuxInt
   733  		}
   734  	case ssa.OpARMCALLdefer:
   735  		p := gc.Prog(obj.ACALL)
   736  		p.To.Type = obj.TYPE_MEM
   737  		p.To.Name = obj.NAME_EXTERN
   738  		p.To.Sym = gc.Linksym(gc.Deferproc.Sym)
   739  		if gc.Maxarg < v.AuxInt {
   740  			gc.Maxarg = v.AuxInt
   741  		}
   742  	case ssa.OpARMCALLgo:
   743  		p := gc.Prog(obj.ACALL)
   744  		p.To.Type = obj.TYPE_MEM
   745  		p.To.Name = obj.NAME_EXTERN
   746  		p.To.Sym = gc.Linksym(gc.Newproc.Sym)
   747  		if gc.Maxarg < v.AuxInt {
   748  			gc.Maxarg = v.AuxInt
   749  		}
   750  	case ssa.OpARMCALLinter:
   751  		p := gc.Prog(obj.ACALL)
   752  		p.To.Type = obj.TYPE_MEM
   753  		p.To.Offset = 0
   754  		p.To.Reg = gc.SSARegNum(v.Args[0])
   755  		if gc.Maxarg < v.AuxInt {
   756  			gc.Maxarg = v.AuxInt
   757  		}
   758  	case ssa.OpARMDUFFZERO:
   759  		p := gc.Prog(obj.ADUFFZERO)
   760  		p.To.Type = obj.TYPE_MEM
   761  		p.To.Name = obj.NAME_EXTERN
   762  		p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
   763  		p.To.Offset = v.AuxInt
   764  	case ssa.OpARMDUFFCOPY:
   765  		p := gc.Prog(obj.ADUFFCOPY)
   766  		p.To.Type = obj.TYPE_MEM
   767  		p.To.Name = obj.NAME_EXTERN
   768  		p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg))
   769  		p.To.Offset = v.AuxInt
   770  	case ssa.OpARMLoweredNilCheck:
   771  		// Optimization - if the subsequent block has a load or store
   772  		// at the same address, we don't need to issue this instruction.
   773  		mem := v.Args[1]
   774  		for _, w := range v.Block.Succs[0].Block().Values {
   775  			if w.Op == ssa.OpPhi {
   776  				if w.Type.IsMemory() {
   777  					mem = w
   778  				}
   779  				continue
   780  			}
   781  			if len(w.Args) == 0 || !w.Args[len(w.Args)-1].Type.IsMemory() {
   782  				// w doesn't use a store - can't be a memory op.
   783  				continue
   784  			}
   785  			if w.Args[len(w.Args)-1] != mem {
   786  				v.Fatalf("wrong store after nilcheck v=%s w=%s", v, w)
   787  			}
   788  			switch w.Op {
   789  			case ssa.OpARMMOVBload, ssa.OpARMMOVBUload, ssa.OpARMMOVHload, ssa.OpARMMOVHUload,
   790  				ssa.OpARMMOVWload, ssa.OpARMMOVFload, ssa.OpARMMOVDload,
   791  				ssa.OpARMMOVBstore, ssa.OpARMMOVHstore, ssa.OpARMMOVWstore,
   792  				ssa.OpARMMOVFstore, ssa.OpARMMOVDstore:
   793  				// arg0 is ptr, auxint is offset
   794  				if w.Args[0] == v.Args[0] && w.Aux == nil && w.AuxInt >= 0 && w.AuxInt < minZeroPage {
   795  					if gc.Debug_checknil != 0 && int(v.Line) > 1 {
   796  						gc.Warnl(v.Line, "removed nil check")
   797  					}
   798  					return
   799  				}
   800  			case ssa.OpARMDUFFZERO, ssa.OpARMLoweredZero:
   801  				// arg0 is ptr
   802  				if w.Args[0] == v.Args[0] {
   803  					if gc.Debug_checknil != 0 && int(v.Line) > 1 {
   804  						gc.Warnl(v.Line, "removed nil check")
   805  					}
   806  					return
   807  				}
   808  			case ssa.OpARMDUFFCOPY, ssa.OpARMLoweredMove:
   809  				// arg0 is dst ptr, arg1 is src ptr
   810  				if w.Args[0] == v.Args[0] || w.Args[1] == v.Args[0] {
   811  					if gc.Debug_checknil != 0 && int(v.Line) > 1 {
   812  						gc.Warnl(v.Line, "removed nil check")
   813  					}
   814  					return
   815  				}
   816  			default:
   817  			}
   818  			if w.Type.IsMemory() {
   819  				if w.Op == ssa.OpVarDef || w.Op == ssa.OpVarKill || w.Op == ssa.OpVarLive {
   820  					// these ops are OK
   821  					mem = w
   822  					continue
   823  				}
   824  				// We can't delay the nil check past the next store.
   825  				break
   826  			}
   827  		}
   828  		// Issue a load which will fault if arg is nil.
   829  		p := gc.Prog(arm.AMOVB)
   830  		p.From.Type = obj.TYPE_MEM
   831  		p.From.Reg = gc.SSARegNum(v.Args[0])
   832  		gc.AddAux(&p.From, v)
   833  		p.To.Type = obj.TYPE_REG
   834  		p.To.Reg = arm.REGTMP
   835  		if gc.Debug_checknil != 0 && v.Line > 1 { // v.Line==1 in generated wrappers
   836  			gc.Warnl(v.Line, "generated nil check")
   837  		}
   838  	case ssa.OpARMLoweredZero:
   839  		// MOVW.P	Rarg2, 4(R1)
   840  		// CMP	Rarg1, R1
   841  		// BLE	-2(PC)
   842  		// arg1 is the address of the last element to zero
   843  		// arg2 is known to be zero
   844  		// auxint is alignment
   845  		var sz int64
   846  		var mov obj.As
   847  		switch {
   848  		case v.AuxInt%4 == 0:
   849  			sz = 4
   850  			mov = arm.AMOVW
   851  		case v.AuxInt%2 == 0:
   852  			sz = 2
   853  			mov = arm.AMOVH
   854  		default:
   855  			sz = 1
   856  			mov = arm.AMOVB
   857  		}
   858  		p := gc.Prog(mov)
   859  		p.Scond = arm.C_PBIT
   860  		p.From.Type = obj.TYPE_REG
   861  		p.From.Reg = gc.SSARegNum(v.Args[2])
   862  		p.To.Type = obj.TYPE_MEM
   863  		p.To.Reg = arm.REG_R1
   864  		p.To.Offset = sz
   865  		p2 := gc.Prog(arm.ACMP)
   866  		p2.From.Type = obj.TYPE_REG
   867  		p2.From.Reg = gc.SSARegNum(v.Args[1])
   868  		p2.Reg = arm.REG_R1
   869  		p3 := gc.Prog(arm.ABLE)
   870  		p3.To.Type = obj.TYPE_BRANCH
   871  		gc.Patch(p3, p)
   872  	case ssa.OpARMLoweredMove:
   873  		// MOVW.P	4(R1), Rtmp
   874  		// MOVW.P	Rtmp, 4(R2)
   875  		// CMP	Rarg2, R1
   876  		// BLE	-3(PC)
   877  		// arg2 is the address of the last element of src
   878  		// auxint is alignment
   879  		var sz int64
   880  		var mov obj.As
   881  		switch {
   882  		case v.AuxInt%4 == 0:
   883  			sz = 4
   884  			mov = arm.AMOVW
   885  		case v.AuxInt%2 == 0:
   886  			sz = 2
   887  			mov = arm.AMOVH
   888  		default:
   889  			sz = 1
   890  			mov = arm.AMOVB
   891  		}
   892  		p := gc.Prog(mov)
   893  		p.Scond = arm.C_PBIT
   894  		p.From.Type = obj.TYPE_MEM
   895  		p.From.Reg = arm.REG_R1
   896  		p.From.Offset = sz
   897  		p.To.Type = obj.TYPE_REG
   898  		p.To.Reg = arm.REGTMP
   899  		p2 := gc.Prog(mov)
   900  		p2.Scond = arm.C_PBIT
   901  		p2.From.Type = obj.TYPE_REG
   902  		p2.From.Reg = arm.REGTMP
   903  		p2.To.Type = obj.TYPE_MEM
   904  		p2.To.Reg = arm.REG_R2
   905  		p2.To.Offset = sz
   906  		p3 := gc.Prog(arm.ACMP)
   907  		p3.From.Type = obj.TYPE_REG
   908  		p3.From.Reg = gc.SSARegNum(v.Args[2])
   909  		p3.Reg = arm.REG_R1
   910  		p4 := gc.Prog(arm.ABLE)
   911  		p4.To.Type = obj.TYPE_BRANCH
   912  		gc.Patch(p4, p)
   913  	case ssa.OpVarDef:
   914  		gc.Gvardef(v.Aux.(*gc.Node))
   915  	case ssa.OpVarKill:
   916  		gc.Gvarkill(v.Aux.(*gc.Node))
   917  	case ssa.OpVarLive:
   918  		gc.Gvarlive(v.Aux.(*gc.Node))
   919  	case ssa.OpKeepAlive:
   920  		if !v.Args[0].Type.IsPtrShaped() {
   921  			v.Fatalf("keeping non-pointer alive %v", v.Args[0])
   922  		}
   923  		n, off := gc.AutoVar(v.Args[0])
   924  		if n == nil {
   925  			v.Fatalf("KeepLive with non-spilled value %s %s", v, v.Args[0])
   926  		}
   927  		if off != 0 {
   928  			v.Fatalf("KeepLive with non-zero offset spill location %s:%d", n, off)
   929  		}
   930  		gc.Gvarlive(n)
   931  	case ssa.OpARMEqual,
   932  		ssa.OpARMNotEqual,
   933  		ssa.OpARMLessThan,
   934  		ssa.OpARMLessEqual,
   935  		ssa.OpARMGreaterThan,
   936  		ssa.OpARMGreaterEqual,
   937  		ssa.OpARMLessThanU,
   938  		ssa.OpARMLessEqualU,
   939  		ssa.OpARMGreaterThanU,
   940  		ssa.OpARMGreaterEqualU:
   941  		// generate boolean values
   942  		// use conditional move
   943  		p := gc.Prog(arm.AMOVW)
   944  		p.From.Type = obj.TYPE_CONST
   945  		p.From.Offset = 0
   946  		p.To.Type = obj.TYPE_REG
   947  		p.To.Reg = gc.SSARegNum(v)
   948  		p = gc.Prog(arm.AMOVW)
   949  		p.Scond = condBits[v.Op]
   950  		p.From.Type = obj.TYPE_CONST
   951  		p.From.Offset = 1
   952  		p.To.Type = obj.TYPE_REG
   953  		p.To.Reg = gc.SSARegNum(v)
   954  	case ssa.OpSelect0, ssa.OpSelect1:
   955  		// nothing to do
   956  	case ssa.OpARMLoweredGetClosurePtr:
   957  		// Closure pointer is R7 (arm.REGCTXT).
   958  		gc.CheckLoweredGetClosurePtr(v)
   959  	case ssa.OpARMFlagEQ,
   960  		ssa.OpARMFlagLT_ULT,
   961  		ssa.OpARMFlagLT_UGT,
   962  		ssa.OpARMFlagGT_ULT,
   963  		ssa.OpARMFlagGT_UGT:
   964  		v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString())
   965  	case ssa.OpARMInvertFlags:
   966  		v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
   967  	default:
   968  		v.Unimplementedf("genValue not implemented: %s", v.LongString())
   969  	}
   970  }
   971  
   972  var condBits = map[ssa.Op]uint8{
   973  	ssa.OpARMEqual:         arm.C_SCOND_EQ,
   974  	ssa.OpARMNotEqual:      arm.C_SCOND_NE,
   975  	ssa.OpARMLessThan:      arm.C_SCOND_LT,
   976  	ssa.OpARMLessThanU:     arm.C_SCOND_LO,
   977  	ssa.OpARMLessEqual:     arm.C_SCOND_LE,
   978  	ssa.OpARMLessEqualU:    arm.C_SCOND_LS,
   979  	ssa.OpARMGreaterThan:   arm.C_SCOND_GT,
   980  	ssa.OpARMGreaterThanU:  arm.C_SCOND_HI,
   981  	ssa.OpARMGreaterEqual:  arm.C_SCOND_GE,
   982  	ssa.OpARMGreaterEqualU: arm.C_SCOND_HS,
   983  }
   984  
   985  var blockJump = map[ssa.BlockKind]struct {
   986  	asm, invasm obj.As
   987  }{
   988  	ssa.BlockARMEQ:  {arm.ABEQ, arm.ABNE},
   989  	ssa.BlockARMNE:  {arm.ABNE, arm.ABEQ},
   990  	ssa.BlockARMLT:  {arm.ABLT, arm.ABGE},
   991  	ssa.BlockARMGE:  {arm.ABGE, arm.ABLT},
   992  	ssa.BlockARMLE:  {arm.ABLE, arm.ABGT},
   993  	ssa.BlockARMGT:  {arm.ABGT, arm.ABLE},
   994  	ssa.BlockARMULT: {arm.ABLO, arm.ABHS},
   995  	ssa.BlockARMUGE: {arm.ABHS, arm.ABLO},
   996  	ssa.BlockARMUGT: {arm.ABHI, arm.ABLS},
   997  	ssa.BlockARMULE: {arm.ABLS, arm.ABHI},
   998  }
   999  
  1000  func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
  1001  	s.SetLineno(b.Line)
  1002  
  1003  	switch b.Kind {
  1004  	case ssa.BlockPlain, ssa.BlockCall, ssa.BlockCheck:
  1005  		if b.Succs[0].Block() != next {
  1006  			p := gc.Prog(obj.AJMP)
  1007  			p.To.Type = obj.TYPE_BRANCH
  1008  			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
  1009  		}
  1010  
  1011  	case ssa.BlockDefer:
  1012  		// defer returns in R0:
  1013  		// 0 if we should continue executing
  1014  		// 1 if we should jump to deferreturn call
  1015  		p := gc.Prog(arm.ACMP)
  1016  		p.From.Type = obj.TYPE_CONST
  1017  		p.From.Offset = 0
  1018  		p.Reg = arm.REG_R0
  1019  		p = gc.Prog(arm.ABNE)
  1020  		p.To.Type = obj.TYPE_BRANCH
  1021  		s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
  1022  		if b.Succs[0].Block() != next {
  1023  			p := gc.Prog(obj.AJMP)
  1024  			p.To.Type = obj.TYPE_BRANCH
  1025  			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
  1026  		}
  1027  
  1028  	case ssa.BlockExit:
  1029  		gc.Prog(obj.AUNDEF) // tell plive.go that we never reach here
  1030  
  1031  	case ssa.BlockRet:
  1032  		gc.Prog(obj.ARET)
  1033  
  1034  	case ssa.BlockRetJmp:
  1035  		p := gc.Prog(obj.ARET)
  1036  		p.To.Type = obj.TYPE_MEM
  1037  		p.To.Name = obj.NAME_EXTERN
  1038  		p.To.Sym = gc.Linksym(b.Aux.(*gc.Sym))
  1039  
  1040  	case ssa.BlockARMEQ, ssa.BlockARMNE,
  1041  		ssa.BlockARMLT, ssa.BlockARMGE,
  1042  		ssa.BlockARMLE, ssa.BlockARMGT,
  1043  		ssa.BlockARMULT, ssa.BlockARMUGT,
  1044  		ssa.BlockARMULE, ssa.BlockARMUGE:
  1045  		jmp := blockJump[b.Kind]
  1046  		var p *obj.Prog
  1047  		switch next {
  1048  		case b.Succs[0].Block():
  1049  			p = gc.Prog(jmp.invasm)
  1050  			p.To.Type = obj.TYPE_BRANCH
  1051  			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
  1052  		case b.Succs[1].Block():
  1053  			p = gc.Prog(jmp.asm)
  1054  			p.To.Type = obj.TYPE_BRANCH
  1055  			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
  1056  		default:
  1057  			p = gc.Prog(jmp.asm)
  1058  			p.To.Type = obj.TYPE_BRANCH
  1059  			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
  1060  			q := gc.Prog(obj.AJMP)
  1061  			q.To.Type = obj.TYPE_BRANCH
  1062  			s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
  1063  		}
  1064  
  1065  	default:
  1066  		b.Unimplementedf("branch not implemented: %s. Control: %s", b.LongString(), b.Control.LongString())
  1067  	}
  1068  }