github.com/bir3/gocompiler@v0.9.2202/src/cmd/compile/internal/arm/ssa.go (about)

     1  // Copyright 2016 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package arm
     6  
     7  import (
     8  	"fmt"
     9  	"github.com/bir3/gocompiler/src/internal/buildcfg"
    10  	"math"
    11  	"math/bits"
    12  
    13  	"github.com/bir3/gocompiler/src/cmd/compile/internal/base"
    14  	"github.com/bir3/gocompiler/src/cmd/compile/internal/ir"
    15  	"github.com/bir3/gocompiler/src/cmd/compile/internal/logopt"
    16  	"github.com/bir3/gocompiler/src/cmd/compile/internal/ssa"
    17  	"github.com/bir3/gocompiler/src/cmd/compile/internal/ssagen"
    18  	"github.com/bir3/gocompiler/src/cmd/compile/internal/types"
    19  	"github.com/bir3/gocompiler/src/cmd/internal/obj"
    20  	"github.com/bir3/gocompiler/src/cmd/internal/obj/arm"
    21  )
    22  
    23  // loadByType returns the load instruction of the given type.
    24  func loadByType(t *types.Type) obj.As {
    25  	if t.IsFloat() {
    26  		switch t.Size() {
    27  		case 4:
    28  			return arm.AMOVF
    29  		case 8:
    30  			return arm.AMOVD
    31  		}
    32  	} else {
    33  		switch t.Size() {
    34  		case 1:
    35  			if t.IsSigned() {
    36  				return arm.AMOVB
    37  			} else {
    38  				return arm.AMOVBU
    39  			}
    40  		case 2:
    41  			if t.IsSigned() {
    42  				return arm.AMOVH
    43  			} else {
    44  				return arm.AMOVHU
    45  			}
    46  		case 4:
    47  			return arm.AMOVW
    48  		}
    49  	}
    50  	panic("bad load type")
    51  }
    52  
    53  // storeByType returns the store instruction of the given type.
    54  func storeByType(t *types.Type) obj.As {
    55  	if t.IsFloat() {
    56  		switch t.Size() {
    57  		case 4:
    58  			return arm.AMOVF
    59  		case 8:
    60  			return arm.AMOVD
    61  		}
    62  	} else {
    63  		switch t.Size() {
    64  		case 1:
    65  			return arm.AMOVB
    66  		case 2:
    67  			return arm.AMOVH
    68  		case 4:
    69  			return arm.AMOVW
    70  		}
    71  	}
    72  	panic("bad store type")
    73  }
    74  
    75  // shift type is used as Offset in obj.TYPE_SHIFT operands to encode shifted register operands.
    76  type shift int64
    77  
    78  // copied from ../../../internal/obj/util.go:/TYPE_SHIFT
    79  func (v shift) String() string {
    80  	op := "<<>>->@>"[((v>>5)&3)<<1:]
    81  	if v&(1<<4) != 0 {
    82  		// register shift
    83  		return fmt.Sprintf("R%d%c%cR%d", v&15, op[0], op[1], (v>>8)&15)
    84  	} else {
    85  		// constant shift
    86  		return fmt.Sprintf("R%d%c%c%d", v&15, op[0], op[1], (v>>7)&31)
    87  	}
    88  }
    89  
    90  // makeshift encodes a register shifted by a constant.
    91  func makeshift(v *ssa.Value, reg int16, typ int64, s int64) shift {
    92  	if s < 0 || s >= 32 {
    93  		v.Fatalf("shift out of range: %d", s)
    94  	}
    95  	return shift(int64(reg&0xf) | typ | (s&31)<<7)
    96  }
    97  
    98  // genshift generates a Prog for r = r0 op (r1 shifted by n).
    99  func genshift(s *ssagen.State, v *ssa.Value, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog {
   100  	p := s.Prog(as)
   101  	p.From.Type = obj.TYPE_SHIFT
   102  	p.From.Offset = int64(makeshift(v, r1, typ, n))
   103  	p.Reg = r0
   104  	if r != 0 {
   105  		p.To.Type = obj.TYPE_REG
   106  		p.To.Reg = r
   107  	}
   108  	return p
   109  }
   110  
   111  // makeregshift encodes a register shifted by a register.
   112  func makeregshift(r1 int16, typ int64, r2 int16) shift {
   113  	return shift(int64(r1&0xf) | typ | int64(r2&0xf)<<8 | 1<<4)
   114  }
   115  
   116  // genregshift generates a Prog for r = r0 op (r1 shifted by r2).
   117  func genregshift(s *ssagen.State, as obj.As, r0, r1, r2, r int16, typ int64) *obj.Prog {
   118  	p := s.Prog(as)
   119  	p.From.Type = obj.TYPE_SHIFT
   120  	p.From.Offset = int64(makeregshift(r1, typ, r2))
   121  	p.Reg = r0
   122  	if r != 0 {
   123  		p.To.Type = obj.TYPE_REG
   124  		p.To.Reg = r
   125  	}
   126  	return p
   127  }
   128  
   129  // find a (lsb, width) pair for BFC
   130  // lsb must be in [0, 31], width must be in [1, 32 - lsb]
   131  // return (0xffffffff, 0) if v is not a binary like 0...01...10...0
   132  func getBFC(v uint32) (uint32, uint32) {
   133  	var m, l uint32
   134  	// BFC is not applicable with zero
   135  	if v == 0 {
   136  		return 0xffffffff, 0
   137  	}
   138  	// find the lowest set bit, for example l=2 for 0x3ffffffc
   139  	l = uint32(bits.TrailingZeros32(v))
   140  	// m-1 represents the highest set bit index, for example m=30 for 0x3ffffffc
   141  	m = 32 - uint32(bits.LeadingZeros32(v))
   142  	// check if v is a binary like 0...01...10...0
   143  	if (1<<m)-(1<<l) == v {
   144  		// it must be m > l for non-zero v
   145  		return l, m - l
   146  	}
   147  	// invalid
   148  	return 0xffffffff, 0
   149  }
   150  
   151  func ssaGenValue(s *ssagen.State, v *ssa.Value) {
   152  	switch v.Op {
   153  	case ssa.OpCopy, ssa.OpARMMOVWreg:
   154  		if v.Type.IsMemory() {
   155  			return
   156  		}
   157  		x := v.Args[0].Reg()
   158  		y := v.Reg()
   159  		if x == y {
   160  			return
   161  		}
   162  		as := arm.AMOVW
   163  		if v.Type.IsFloat() {
   164  			switch v.Type.Size() {
   165  			case 4:
   166  				as = arm.AMOVF
   167  			case 8:
   168  				as = arm.AMOVD
   169  			default:
   170  				panic("bad float size")
   171  			}
   172  		}
   173  		p := s.Prog(as)
   174  		p.From.Type = obj.TYPE_REG
   175  		p.From.Reg = x
   176  		p.To.Type = obj.TYPE_REG
   177  		p.To.Reg = y
   178  	case ssa.OpARMMOVWnop:
   179  		// nothing to do
   180  	case ssa.OpLoadReg:
   181  		if v.Type.IsFlags() {
   182  			v.Fatalf("load flags not implemented: %v", v.LongString())
   183  			return
   184  		}
   185  		p := s.Prog(loadByType(v.Type))
   186  		ssagen.AddrAuto(&p.From, v.Args[0])
   187  		p.To.Type = obj.TYPE_REG
   188  		p.To.Reg = v.Reg()
   189  	case ssa.OpStoreReg:
   190  		if v.Type.IsFlags() {
   191  			v.Fatalf("store flags not implemented: %v", v.LongString())
   192  			return
   193  		}
   194  		p := s.Prog(storeByType(v.Type))
   195  		p.From.Type = obj.TYPE_REG
   196  		p.From.Reg = v.Args[0].Reg()
   197  		ssagen.AddrAuto(&p.To, v)
   198  	case ssa.OpARMADD,
   199  		ssa.OpARMADC,
   200  		ssa.OpARMSUB,
   201  		ssa.OpARMSBC,
   202  		ssa.OpARMRSB,
   203  		ssa.OpARMAND,
   204  		ssa.OpARMOR,
   205  		ssa.OpARMXOR,
   206  		ssa.OpARMBIC,
   207  		ssa.OpARMMUL,
   208  		ssa.OpARMADDF,
   209  		ssa.OpARMADDD,
   210  		ssa.OpARMSUBF,
   211  		ssa.OpARMSUBD,
   212  		ssa.OpARMSLL,
   213  		ssa.OpARMSRL,
   214  		ssa.OpARMSRA,
   215  		ssa.OpARMMULF,
   216  		ssa.OpARMMULD,
   217  		ssa.OpARMNMULF,
   218  		ssa.OpARMNMULD,
   219  		ssa.OpARMDIVF,
   220  		ssa.OpARMDIVD:
   221  		r := v.Reg()
   222  		r1 := v.Args[0].Reg()
   223  		r2 := v.Args[1].Reg()
   224  		p := s.Prog(v.Op.Asm())
   225  		p.From.Type = obj.TYPE_REG
   226  		p.From.Reg = r2
   227  		p.Reg = r1
   228  		p.To.Type = obj.TYPE_REG
   229  		p.To.Reg = r
   230  	case ssa.OpARMSRR:
   231  		genregshift(s, arm.AMOVW, 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_RR)
   232  	case ssa.OpARMMULAF, ssa.OpARMMULAD, ssa.OpARMMULSF, ssa.OpARMMULSD, ssa.OpARMFMULAD:
   233  		r := v.Reg()
   234  		r0 := v.Args[0].Reg()
   235  		r1 := v.Args[1].Reg()
   236  		r2 := v.Args[2].Reg()
   237  		if r != r0 {
   238  			v.Fatalf("result and addend are not in the same register: %v", v.LongString())
   239  		}
   240  		p := s.Prog(v.Op.Asm())
   241  		p.From.Type = obj.TYPE_REG
   242  		p.From.Reg = r2
   243  		p.Reg = r1
   244  		p.To.Type = obj.TYPE_REG
   245  		p.To.Reg = r
   246  	case ssa.OpARMADDS,
   247  		ssa.OpARMSUBS:
   248  		r := v.Reg0()
   249  		r1 := v.Args[0].Reg()
   250  		r2 := v.Args[1].Reg()
   251  		p := s.Prog(v.Op.Asm())
   252  		p.Scond = arm.C_SBIT
   253  		p.From.Type = obj.TYPE_REG
   254  		p.From.Reg = r2
   255  		p.Reg = r1
   256  		p.To.Type = obj.TYPE_REG
   257  		p.To.Reg = r
   258  	case ssa.OpARMSRAcond:
   259  		// ARM shift instructions uses only the low-order byte of the shift amount
   260  		// generate conditional instructions to deal with large shifts
   261  		// flag is already set
   262  		// SRA.HS	$31, Rarg0, Rdst // shift 31 bits to get the sign bit
   263  		// SRA.LO	Rarg1, Rarg0, Rdst
   264  		r := v.Reg()
   265  		r1 := v.Args[0].Reg()
   266  		r2 := v.Args[1].Reg()
   267  		p := s.Prog(arm.ASRA)
   268  		p.Scond = arm.C_SCOND_HS
   269  		p.From.Type = obj.TYPE_CONST
   270  		p.From.Offset = 31
   271  		p.Reg = r1
   272  		p.To.Type = obj.TYPE_REG
   273  		p.To.Reg = r
   274  		p = s.Prog(arm.ASRA)
   275  		p.Scond = arm.C_SCOND_LO
   276  		p.From.Type = obj.TYPE_REG
   277  		p.From.Reg = r2
   278  		p.Reg = r1
   279  		p.To.Type = obj.TYPE_REG
   280  		p.To.Reg = r
   281  	case ssa.OpARMBFX, ssa.OpARMBFXU:
   282  		p := s.Prog(v.Op.Asm())
   283  		p.From.Type = obj.TYPE_CONST
   284  		p.From.Offset = v.AuxInt >> 8
   285  		p.AddRestSourceConst(v.AuxInt & 0xff)
   286  		p.Reg = v.Args[0].Reg()
   287  		p.To.Type = obj.TYPE_REG
   288  		p.To.Reg = v.Reg()
   289  	case ssa.OpARMANDconst, ssa.OpARMBICconst:
   290  		// try to optimize ANDconst and BICconst to BFC, which saves bytes and ticks
   291  		// BFC is only available on ARMv7, and its result and source are in the same register
   292  		if buildcfg.GOARM.Version == 7 && v.Reg() == v.Args[0].Reg() {
   293  			var val uint32
   294  			if v.Op == ssa.OpARMANDconst {
   295  				val = ^uint32(v.AuxInt)
   296  			} else {	// BICconst
   297  				val = uint32(v.AuxInt)
   298  			}
   299  			lsb, width := getBFC(val)
   300  			// omit BFC for ARM's imm12
   301  			if 8 < width && width < 24 {
   302  				p := s.Prog(arm.ABFC)
   303  				p.From.Type = obj.TYPE_CONST
   304  				p.From.Offset = int64(width)
   305  				p.AddRestSourceConst(int64(lsb))
   306  				p.To.Type = obj.TYPE_REG
   307  				p.To.Reg = v.Reg()
   308  				break
   309  			}
   310  		}
   311  		// fall back to ordinary form
   312  		fallthrough
   313  	case ssa.OpARMADDconst,
   314  		ssa.OpARMADCconst,
   315  		ssa.OpARMSUBconst,
   316  		ssa.OpARMSBCconst,
   317  		ssa.OpARMRSBconst,
   318  		ssa.OpARMRSCconst,
   319  		ssa.OpARMORconst,
   320  		ssa.OpARMXORconst,
   321  		ssa.OpARMSLLconst,
   322  		ssa.OpARMSRLconst,
   323  		ssa.OpARMSRAconst:
   324  		p := s.Prog(v.Op.Asm())
   325  		p.From.Type = obj.TYPE_CONST
   326  		p.From.Offset = v.AuxInt
   327  		p.Reg = v.Args[0].Reg()
   328  		p.To.Type = obj.TYPE_REG
   329  		p.To.Reg = v.Reg()
   330  	case ssa.OpARMADDSconst,
   331  		ssa.OpARMSUBSconst,
   332  		ssa.OpARMRSBSconst:
   333  		p := s.Prog(v.Op.Asm())
   334  		p.Scond = arm.C_SBIT
   335  		p.From.Type = obj.TYPE_CONST
   336  		p.From.Offset = v.AuxInt
   337  		p.Reg = v.Args[0].Reg()
   338  		p.To.Type = obj.TYPE_REG
   339  		p.To.Reg = v.Reg0()
   340  	case ssa.OpARMSRRconst:
   341  		genshift(s, v, arm.AMOVW, 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_RR, v.AuxInt)
   342  	case ssa.OpARMADDshiftLL,
   343  		ssa.OpARMADCshiftLL,
   344  		ssa.OpARMSUBshiftLL,
   345  		ssa.OpARMSBCshiftLL,
   346  		ssa.OpARMRSBshiftLL,
   347  		ssa.OpARMRSCshiftLL,
   348  		ssa.OpARMANDshiftLL,
   349  		ssa.OpARMORshiftLL,
   350  		ssa.OpARMXORshiftLL,
   351  		ssa.OpARMBICshiftLL:
   352  		genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt)
   353  	case ssa.OpARMADDSshiftLL,
   354  		ssa.OpARMSUBSshiftLL,
   355  		ssa.OpARMRSBSshiftLL:
   356  		p := genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_LL, v.AuxInt)
   357  		p.Scond = arm.C_SBIT
   358  	case ssa.OpARMADDshiftRL,
   359  		ssa.OpARMADCshiftRL,
   360  		ssa.OpARMSUBshiftRL,
   361  		ssa.OpARMSBCshiftRL,
   362  		ssa.OpARMRSBshiftRL,
   363  		ssa.OpARMRSCshiftRL,
   364  		ssa.OpARMANDshiftRL,
   365  		ssa.OpARMORshiftRL,
   366  		ssa.OpARMXORshiftRL,
   367  		ssa.OpARMBICshiftRL:
   368  		genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt)
   369  	case ssa.OpARMADDSshiftRL,
   370  		ssa.OpARMSUBSshiftRL,
   371  		ssa.OpARMRSBSshiftRL:
   372  		p := genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_LR, v.AuxInt)
   373  		p.Scond = arm.C_SBIT
   374  	case ssa.OpARMADDshiftRA,
   375  		ssa.OpARMADCshiftRA,
   376  		ssa.OpARMSUBshiftRA,
   377  		ssa.OpARMSBCshiftRA,
   378  		ssa.OpARMRSBshiftRA,
   379  		ssa.OpARMRSCshiftRA,
   380  		ssa.OpARMANDshiftRA,
   381  		ssa.OpARMORshiftRA,
   382  		ssa.OpARMXORshiftRA,
   383  		ssa.OpARMBICshiftRA:
   384  		genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt)
   385  	case ssa.OpARMADDSshiftRA,
   386  		ssa.OpARMSUBSshiftRA,
   387  		ssa.OpARMRSBSshiftRA:
   388  		p := genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_AR, v.AuxInt)
   389  		p.Scond = arm.C_SBIT
   390  	case ssa.OpARMXORshiftRR:
   391  		genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_RR, v.AuxInt)
   392  	case ssa.OpARMMVNshiftLL:
   393  		genshift(s, v, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt)
   394  	case ssa.OpARMMVNshiftRL:
   395  		genshift(s, v, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt)
   396  	case ssa.OpARMMVNshiftRA:
   397  		genshift(s, v, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt)
   398  	case ssa.OpARMMVNshiftLLreg:
   399  		genregshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL)
   400  	case ssa.OpARMMVNshiftRLreg:
   401  		genregshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR)
   402  	case ssa.OpARMMVNshiftRAreg:
   403  		genregshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR)
   404  	case ssa.OpARMADDshiftLLreg,
   405  		ssa.OpARMADCshiftLLreg,
   406  		ssa.OpARMSUBshiftLLreg,
   407  		ssa.OpARMSBCshiftLLreg,
   408  		ssa.OpARMRSBshiftLLreg,
   409  		ssa.OpARMRSCshiftLLreg,
   410  		ssa.OpARMANDshiftLLreg,
   411  		ssa.OpARMORshiftLLreg,
   412  		ssa.OpARMXORshiftLLreg,
   413  		ssa.OpARMBICshiftLLreg:
   414  		genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_LL)
   415  	case ssa.OpARMADDSshiftLLreg,
   416  		ssa.OpARMSUBSshiftLLreg,
   417  		ssa.OpARMRSBSshiftLLreg:
   418  		p := genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_LL)
   419  		p.Scond = arm.C_SBIT
   420  	case ssa.OpARMADDshiftRLreg,
   421  		ssa.OpARMADCshiftRLreg,
   422  		ssa.OpARMSUBshiftRLreg,
   423  		ssa.OpARMSBCshiftRLreg,
   424  		ssa.OpARMRSBshiftRLreg,
   425  		ssa.OpARMRSCshiftRLreg,
   426  		ssa.OpARMANDshiftRLreg,
   427  		ssa.OpARMORshiftRLreg,
   428  		ssa.OpARMXORshiftRLreg,
   429  		ssa.OpARMBICshiftRLreg:
   430  		genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_LR)
   431  	case ssa.OpARMADDSshiftRLreg,
   432  		ssa.OpARMSUBSshiftRLreg,
   433  		ssa.OpARMRSBSshiftRLreg:
   434  		p := genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_LR)
   435  		p.Scond = arm.C_SBIT
   436  	case ssa.OpARMADDshiftRAreg,
   437  		ssa.OpARMADCshiftRAreg,
   438  		ssa.OpARMSUBshiftRAreg,
   439  		ssa.OpARMSBCshiftRAreg,
   440  		ssa.OpARMRSBshiftRAreg,
   441  		ssa.OpARMRSCshiftRAreg,
   442  		ssa.OpARMANDshiftRAreg,
   443  		ssa.OpARMORshiftRAreg,
   444  		ssa.OpARMXORshiftRAreg,
   445  		ssa.OpARMBICshiftRAreg:
   446  		genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_AR)
   447  	case ssa.OpARMADDSshiftRAreg,
   448  		ssa.OpARMSUBSshiftRAreg,
   449  		ssa.OpARMRSBSshiftRAreg:
   450  		p := genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_AR)
   451  		p.Scond = arm.C_SBIT
   452  	case ssa.OpARMHMUL,
   453  		ssa.OpARMHMULU:
   454  		// 32-bit high multiplication
   455  		p := s.Prog(v.Op.Asm())
   456  		p.From.Type = obj.TYPE_REG
   457  		p.From.Reg = v.Args[0].Reg()
   458  		p.Reg = v.Args[1].Reg()
   459  		p.To.Type = obj.TYPE_REGREG
   460  		p.To.Reg = v.Reg()
   461  		p.To.Offset = arm.REGTMP	// throw away low 32-bit into tmp register
   462  	case ssa.OpARMMULLU:
   463  		// 32-bit multiplication, results 64-bit, high 32-bit in out0, low 32-bit in out1
   464  		p := s.Prog(v.Op.Asm())
   465  		p.From.Type = obj.TYPE_REG
   466  		p.From.Reg = v.Args[0].Reg()
   467  		p.Reg = v.Args[1].Reg()
   468  		p.To.Type = obj.TYPE_REGREG
   469  		p.To.Reg = v.Reg0()		// high 32-bit
   470  		p.To.Offset = int64(v.Reg1())	// low 32-bit
   471  	case ssa.OpARMMULA, ssa.OpARMMULS:
   472  		p := s.Prog(v.Op.Asm())
   473  		p.From.Type = obj.TYPE_REG
   474  		p.From.Reg = v.Args[0].Reg()
   475  		p.Reg = v.Args[1].Reg()
   476  		p.To.Type = obj.TYPE_REGREG2
   477  		p.To.Reg = v.Reg()			// result
   478  		p.To.Offset = int64(v.Args[2].Reg())	// addend
   479  	case ssa.OpARMMOVWconst:
   480  		p := s.Prog(v.Op.Asm())
   481  		p.From.Type = obj.TYPE_CONST
   482  		p.From.Offset = v.AuxInt
   483  		p.To.Type = obj.TYPE_REG
   484  		p.To.Reg = v.Reg()
   485  	case ssa.OpARMMOVFconst,
   486  		ssa.OpARMMOVDconst:
   487  		p := s.Prog(v.Op.Asm())
   488  		p.From.Type = obj.TYPE_FCONST
   489  		p.From.Val = math.Float64frombits(uint64(v.AuxInt))
   490  		p.To.Type = obj.TYPE_REG
   491  		p.To.Reg = v.Reg()
   492  	case ssa.OpARMCMP,
   493  		ssa.OpARMCMN,
   494  		ssa.OpARMTST,
   495  		ssa.OpARMTEQ,
   496  		ssa.OpARMCMPF,
   497  		ssa.OpARMCMPD:
   498  		p := s.Prog(v.Op.Asm())
   499  		p.From.Type = obj.TYPE_REG
   500  		// Special layout in ARM assembly
   501  		// Comparing to x86, the operands of ARM's CMP are reversed.
   502  		p.From.Reg = v.Args[1].Reg()
   503  		p.Reg = v.Args[0].Reg()
   504  	case ssa.OpARMCMPconst,
   505  		ssa.OpARMCMNconst,
   506  		ssa.OpARMTSTconst,
   507  		ssa.OpARMTEQconst:
   508  		// Special layout in ARM assembly
   509  		p := s.Prog(v.Op.Asm())
   510  		p.From.Type = obj.TYPE_CONST
   511  		p.From.Offset = v.AuxInt
   512  		p.Reg = v.Args[0].Reg()
   513  	case ssa.OpARMCMPF0,
   514  		ssa.OpARMCMPD0:
   515  		p := s.Prog(v.Op.Asm())
   516  		p.From.Type = obj.TYPE_REG
   517  		p.From.Reg = v.Args[0].Reg()
   518  	case ssa.OpARMCMPshiftLL, ssa.OpARMCMNshiftLL, ssa.OpARMTSTshiftLL, ssa.OpARMTEQshiftLL:
   519  		genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_LL, v.AuxInt)
   520  	case ssa.OpARMCMPshiftRL, ssa.OpARMCMNshiftRL, ssa.OpARMTSTshiftRL, ssa.OpARMTEQshiftRL:
   521  		genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_LR, v.AuxInt)
   522  	case ssa.OpARMCMPshiftRA, ssa.OpARMCMNshiftRA, ssa.OpARMTSTshiftRA, ssa.OpARMTEQshiftRA:
   523  		genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_AR, v.AuxInt)
   524  	case ssa.OpARMCMPshiftLLreg, ssa.OpARMCMNshiftLLreg, ssa.OpARMTSTshiftLLreg, ssa.OpARMTEQshiftLLreg:
   525  		genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_LL)
   526  	case ssa.OpARMCMPshiftRLreg, ssa.OpARMCMNshiftRLreg, ssa.OpARMTSTshiftRLreg, ssa.OpARMTEQshiftRLreg:
   527  		genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_LR)
   528  	case ssa.OpARMCMPshiftRAreg, ssa.OpARMCMNshiftRAreg, ssa.OpARMTSTshiftRAreg, ssa.OpARMTEQshiftRAreg:
   529  		genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_AR)
   530  	case ssa.OpARMMOVWaddr:
   531  		p := s.Prog(arm.AMOVW)
   532  		p.From.Type = obj.TYPE_ADDR
   533  		p.From.Reg = v.Args[0].Reg()
   534  		p.To.Type = obj.TYPE_REG
   535  		p.To.Reg = v.Reg()
   536  
   537  		var wantreg string
   538  		// MOVW $sym+off(base), R
   539  		// the assembler expands it as the following:
   540  		// - base is SP: add constant offset to SP (R13)
   541  		//               when constant is large, tmp register (R11) may be used
   542  		// - base is SB: load external address from constant pool (use relocation)
   543  		switch v.Aux.(type) {
   544  		default:
   545  			v.Fatalf("aux is of unknown type %T", v.Aux)
   546  		case *obj.LSym:
   547  			wantreg = "SB"
   548  			ssagen.AddAux(&p.From, v)
   549  		case *ir.Name:
   550  			wantreg = "SP"
   551  			ssagen.AddAux(&p.From, v)
   552  		case nil:
   553  			// No sym, just MOVW $off(SP), R
   554  			wantreg = "SP"
   555  			p.From.Offset = v.AuxInt
   556  		}
   557  		if reg := v.Args[0].RegName(); reg != wantreg {
   558  			v.Fatalf("bad reg %s for symbol type %T, want %s", reg, v.Aux, wantreg)
   559  		}
   560  
   561  	case ssa.OpARMMOVBload,
   562  		ssa.OpARMMOVBUload,
   563  		ssa.OpARMMOVHload,
   564  		ssa.OpARMMOVHUload,
   565  		ssa.OpARMMOVWload,
   566  		ssa.OpARMMOVFload,
   567  		ssa.OpARMMOVDload:
   568  		p := s.Prog(v.Op.Asm())
   569  		p.From.Type = obj.TYPE_MEM
   570  		p.From.Reg = v.Args[0].Reg()
   571  		ssagen.AddAux(&p.From, v)
   572  		p.To.Type = obj.TYPE_REG
   573  		p.To.Reg = v.Reg()
   574  	case ssa.OpARMMOVBstore,
   575  		ssa.OpARMMOVHstore,
   576  		ssa.OpARMMOVWstore,
   577  		ssa.OpARMMOVFstore,
   578  		ssa.OpARMMOVDstore:
   579  		p := s.Prog(v.Op.Asm())
   580  		p.From.Type = obj.TYPE_REG
   581  		p.From.Reg = v.Args[1].Reg()
   582  		p.To.Type = obj.TYPE_MEM
   583  		p.To.Reg = v.Args[0].Reg()
   584  		ssagen.AddAux(&p.To, v)
   585  	case ssa.OpARMMOVWloadidx, ssa.OpARMMOVBUloadidx, ssa.OpARMMOVBloadidx, ssa.OpARMMOVHUloadidx, ssa.OpARMMOVHloadidx:
   586  		// this is just shift 0 bits
   587  		fallthrough
   588  	case ssa.OpARMMOVWloadshiftLL:
   589  		p := genshift(s, v, v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt)
   590  		p.From.Reg = v.Args[0].Reg()
   591  	case ssa.OpARMMOVWloadshiftRL:
   592  		p := genshift(s, v, v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt)
   593  		p.From.Reg = v.Args[0].Reg()
   594  	case ssa.OpARMMOVWloadshiftRA:
   595  		p := genshift(s, v, v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt)
   596  		p.From.Reg = v.Args[0].Reg()
   597  	case ssa.OpARMMOVWstoreidx, ssa.OpARMMOVBstoreidx, ssa.OpARMMOVHstoreidx:
   598  		// this is just shift 0 bits
   599  		fallthrough
   600  	case ssa.OpARMMOVWstoreshiftLL:
   601  		p := s.Prog(v.Op.Asm())
   602  		p.From.Type = obj.TYPE_REG
   603  		p.From.Reg = v.Args[2].Reg()
   604  		p.To.Type = obj.TYPE_SHIFT
   605  		p.To.Reg = v.Args[0].Reg()
   606  		p.To.Offset = int64(makeshift(v, v.Args[1].Reg(), arm.SHIFT_LL, v.AuxInt))
   607  	case ssa.OpARMMOVWstoreshiftRL:
   608  		p := s.Prog(v.Op.Asm())
   609  		p.From.Type = obj.TYPE_REG
   610  		p.From.Reg = v.Args[2].Reg()
   611  		p.To.Type = obj.TYPE_SHIFT
   612  		p.To.Reg = v.Args[0].Reg()
   613  		p.To.Offset = int64(makeshift(v, v.Args[1].Reg(), arm.SHIFT_LR, v.AuxInt))
   614  	case ssa.OpARMMOVWstoreshiftRA:
   615  		p := s.Prog(v.Op.Asm())
   616  		p.From.Type = obj.TYPE_REG
   617  		p.From.Reg = v.Args[2].Reg()
   618  		p.To.Type = obj.TYPE_SHIFT
   619  		p.To.Reg = v.Args[0].Reg()
   620  		p.To.Offset = int64(makeshift(v, v.Args[1].Reg(), arm.SHIFT_AR, v.AuxInt))
   621  	case ssa.OpARMMOVBreg,
   622  		ssa.OpARMMOVBUreg,
   623  		ssa.OpARMMOVHreg,
   624  		ssa.OpARMMOVHUreg:
   625  		a := v.Args[0]
   626  		for a.Op == ssa.OpCopy || a.Op == ssa.OpARMMOVWreg || a.Op == ssa.OpARMMOVWnop {
   627  			a = a.Args[0]
   628  		}
   629  		if a.Op == ssa.OpLoadReg {
   630  			t := a.Type
   631  			switch {
   632  			case v.Op == ssa.OpARMMOVBreg && t.Size() == 1 && t.IsSigned(),
   633  				v.Op == ssa.OpARMMOVBUreg && t.Size() == 1 && !t.IsSigned(),
   634  				v.Op == ssa.OpARMMOVHreg && t.Size() == 2 && t.IsSigned(),
   635  				v.Op == ssa.OpARMMOVHUreg && t.Size() == 2 && !t.IsSigned():
   636  				// arg is a proper-typed load, already zero/sign-extended, don't extend again
   637  				if v.Reg() == v.Args[0].Reg() {
   638  					return
   639  				}
   640  				p := s.Prog(arm.AMOVW)
   641  				p.From.Type = obj.TYPE_REG
   642  				p.From.Reg = v.Args[0].Reg()
   643  				p.To.Type = obj.TYPE_REG
   644  				p.To.Reg = v.Reg()
   645  				return
   646  			default:
   647  			}
   648  		}
   649  		if buildcfg.GOARM.Version >= 6 {
   650  			// generate more efficient "MOVB/MOVBU/MOVH/MOVHU Reg@>0, Reg" on ARMv6 & ARMv7
   651  			genshift(s, v, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_RR, 0)
   652  			return
   653  		}
   654  		fallthrough
   655  	case ssa.OpARMMVN,
   656  		ssa.OpARMCLZ,
   657  		ssa.OpARMREV,
   658  		ssa.OpARMREV16,
   659  		ssa.OpARMRBIT,
   660  		ssa.OpARMSQRTF,
   661  		ssa.OpARMSQRTD,
   662  		ssa.OpARMNEGF,
   663  		ssa.OpARMNEGD,
   664  		ssa.OpARMABSD,
   665  		ssa.OpARMMOVWF,
   666  		ssa.OpARMMOVWD,
   667  		ssa.OpARMMOVFW,
   668  		ssa.OpARMMOVDW,
   669  		ssa.OpARMMOVFD,
   670  		ssa.OpARMMOVDF:
   671  		p := s.Prog(v.Op.Asm())
   672  		p.From.Type = obj.TYPE_REG
   673  		p.From.Reg = v.Args[0].Reg()
   674  		p.To.Type = obj.TYPE_REG
   675  		p.To.Reg = v.Reg()
   676  	case ssa.OpARMMOVWUF,
   677  		ssa.OpARMMOVWUD,
   678  		ssa.OpARMMOVFWU,
   679  		ssa.OpARMMOVDWU:
   680  		p := s.Prog(v.Op.Asm())
   681  		p.Scond = arm.C_UBIT
   682  		p.From.Type = obj.TYPE_REG
   683  		p.From.Reg = v.Args[0].Reg()
   684  		p.To.Type = obj.TYPE_REG
   685  		p.To.Reg = v.Reg()
   686  	case ssa.OpARMCMOVWHSconst:
   687  		p := s.Prog(arm.AMOVW)
   688  		p.Scond = arm.C_SCOND_HS
   689  		p.From.Type = obj.TYPE_CONST
   690  		p.From.Offset = v.AuxInt
   691  		p.To.Type = obj.TYPE_REG
   692  		p.To.Reg = v.Reg()
   693  	case ssa.OpARMCMOVWLSconst:
   694  		p := s.Prog(arm.AMOVW)
   695  		p.Scond = arm.C_SCOND_LS
   696  		p.From.Type = obj.TYPE_CONST
   697  		p.From.Offset = v.AuxInt
   698  		p.To.Type = obj.TYPE_REG
   699  		p.To.Reg = v.Reg()
   700  	case ssa.OpARMCALLstatic, ssa.OpARMCALLclosure, ssa.OpARMCALLinter:
   701  		s.Call(v)
   702  	case ssa.OpARMCALLtail:
   703  		s.TailCall(v)
   704  	case ssa.OpARMCALLudiv:
   705  		p := s.Prog(obj.ACALL)
   706  		p.To.Type = obj.TYPE_MEM
   707  		p.To.Name = obj.NAME_EXTERN
   708  		p.To.Sym = ir.Syms.Udiv
   709  	case ssa.OpARMLoweredWB:
   710  		p := s.Prog(obj.ACALL)
   711  		p.To.Type = obj.TYPE_MEM
   712  		p.To.Name = obj.NAME_EXTERN
   713  		// AuxInt encodes how many buffer entries we need.
   714  		p.To.Sym = ir.Syms.GCWriteBarrier[v.AuxInt-1]
   715  	case ssa.OpARMLoweredPanicBoundsA, ssa.OpARMLoweredPanicBoundsB, ssa.OpARMLoweredPanicBoundsC:
   716  		p := s.Prog(obj.ACALL)
   717  		p.To.Type = obj.TYPE_MEM
   718  		p.To.Name = obj.NAME_EXTERN
   719  		p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
   720  		s.UseArgs(8)	// space used in callee args area by assembly stubs
   721  	case ssa.OpARMLoweredPanicExtendA, ssa.OpARMLoweredPanicExtendB, ssa.OpARMLoweredPanicExtendC:
   722  		p := s.Prog(obj.ACALL)
   723  		p.To.Type = obj.TYPE_MEM
   724  		p.To.Name = obj.NAME_EXTERN
   725  		p.To.Sym = ssagen.ExtendCheckFunc[v.AuxInt]
   726  		s.UseArgs(12)	// space used in callee args area by assembly stubs
   727  	case ssa.OpARMDUFFZERO:
   728  		p := s.Prog(obj.ADUFFZERO)
   729  		p.To.Type = obj.TYPE_MEM
   730  		p.To.Name = obj.NAME_EXTERN
   731  		p.To.Sym = ir.Syms.Duffzero
   732  		p.To.Offset = v.AuxInt
   733  	case ssa.OpARMDUFFCOPY:
   734  		p := s.Prog(obj.ADUFFCOPY)
   735  		p.To.Type = obj.TYPE_MEM
   736  		p.To.Name = obj.NAME_EXTERN
   737  		p.To.Sym = ir.Syms.Duffcopy
   738  		p.To.Offset = v.AuxInt
   739  	case ssa.OpARMLoweredNilCheck:
   740  		// Issue a load which will fault if arg is nil.
   741  		p := s.Prog(arm.AMOVB)
   742  		p.From.Type = obj.TYPE_MEM
   743  		p.From.Reg = v.Args[0].Reg()
   744  		ssagen.AddAux(&p.From, v)
   745  		p.To.Type = obj.TYPE_REG
   746  		p.To.Reg = arm.REGTMP
   747  		if logopt.Enabled() {
   748  			logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
   749  		}
   750  		if base.Debug.Nil != 0 && v.Pos.Line() > 1 {	// v.Pos.Line()==1 in generated wrappers
   751  			base.WarnfAt(v.Pos, "generated nil check")
   752  		}
   753  	case ssa.OpARMLoweredZero:
   754  		// MOVW.P	Rarg2, 4(R1)
   755  		// CMP	Rarg1, R1
   756  		// BLE	-2(PC)
   757  		// arg1 is the address of the last element to zero
   758  		// arg2 is known to be zero
   759  		// auxint is alignment
   760  		var sz int64
   761  		var mov obj.As
   762  		switch {
   763  		case v.AuxInt%4 == 0:
   764  			sz = 4
   765  			mov = arm.AMOVW
   766  		case v.AuxInt%2 == 0:
   767  			sz = 2
   768  			mov = arm.AMOVH
   769  		default:
   770  			sz = 1
   771  			mov = arm.AMOVB
   772  		}
   773  		p := s.Prog(mov)
   774  		p.Scond = arm.C_PBIT
   775  		p.From.Type = obj.TYPE_REG
   776  		p.From.Reg = v.Args[2].Reg()
   777  		p.To.Type = obj.TYPE_MEM
   778  		p.To.Reg = arm.REG_R1
   779  		p.To.Offset = sz
   780  		p2 := s.Prog(arm.ACMP)
   781  		p2.From.Type = obj.TYPE_REG
   782  		p2.From.Reg = v.Args[1].Reg()
   783  		p2.Reg = arm.REG_R1
   784  		p3 := s.Prog(arm.ABLE)
   785  		p3.To.Type = obj.TYPE_BRANCH
   786  		p3.To.SetTarget(p)
   787  	case ssa.OpARMLoweredMove:
   788  		// MOVW.P	4(R1), Rtmp
   789  		// MOVW.P	Rtmp, 4(R2)
   790  		// CMP	Rarg2, R1
   791  		// BLE	-3(PC)
   792  		// arg2 is the address of the last element of src
   793  		// auxint is alignment
   794  		var sz int64
   795  		var mov obj.As
   796  		switch {
   797  		case v.AuxInt%4 == 0:
   798  			sz = 4
   799  			mov = arm.AMOVW
   800  		case v.AuxInt%2 == 0:
   801  			sz = 2
   802  			mov = arm.AMOVH
   803  		default:
   804  			sz = 1
   805  			mov = arm.AMOVB
   806  		}
   807  		p := s.Prog(mov)
   808  		p.Scond = arm.C_PBIT
   809  		p.From.Type = obj.TYPE_MEM
   810  		p.From.Reg = arm.REG_R1
   811  		p.From.Offset = sz
   812  		p.To.Type = obj.TYPE_REG
   813  		p.To.Reg = arm.REGTMP
   814  		p2 := s.Prog(mov)
   815  		p2.Scond = arm.C_PBIT
   816  		p2.From.Type = obj.TYPE_REG
   817  		p2.From.Reg = arm.REGTMP
   818  		p2.To.Type = obj.TYPE_MEM
   819  		p2.To.Reg = arm.REG_R2
   820  		p2.To.Offset = sz
   821  		p3 := s.Prog(arm.ACMP)
   822  		p3.From.Type = obj.TYPE_REG
   823  		p3.From.Reg = v.Args[2].Reg()
   824  		p3.Reg = arm.REG_R1
   825  		p4 := s.Prog(arm.ABLE)
   826  		p4.To.Type = obj.TYPE_BRANCH
   827  		p4.To.SetTarget(p)
   828  	case ssa.OpARMEqual,
   829  		ssa.OpARMNotEqual,
   830  		ssa.OpARMLessThan,
   831  		ssa.OpARMLessEqual,
   832  		ssa.OpARMGreaterThan,
   833  		ssa.OpARMGreaterEqual,
   834  		ssa.OpARMLessThanU,
   835  		ssa.OpARMLessEqualU,
   836  		ssa.OpARMGreaterThanU,
   837  		ssa.OpARMGreaterEqualU:
   838  		// generate boolean values
   839  		// use conditional move
   840  		p := s.Prog(arm.AMOVW)
   841  		p.From.Type = obj.TYPE_CONST
   842  		p.From.Offset = 0
   843  		p.To.Type = obj.TYPE_REG
   844  		p.To.Reg = v.Reg()
   845  		p = s.Prog(arm.AMOVW)
   846  		p.Scond = condBits[v.Op]
   847  		p.From.Type = obj.TYPE_CONST
   848  		p.From.Offset = 1
   849  		p.To.Type = obj.TYPE_REG
   850  		p.To.Reg = v.Reg()
   851  	case ssa.OpARMLoweredGetClosurePtr:
   852  		// Closure pointer is R7 (arm.REGCTXT).
   853  		ssagen.CheckLoweredGetClosurePtr(v)
   854  	case ssa.OpARMLoweredGetCallerSP:
   855  		// caller's SP is FixedFrameSize below the address of the first arg
   856  		p := s.Prog(arm.AMOVW)
   857  		p.From.Type = obj.TYPE_ADDR
   858  		p.From.Offset = -base.Ctxt.Arch.FixedFrameSize
   859  		p.From.Name = obj.NAME_PARAM
   860  		p.To.Type = obj.TYPE_REG
   861  		p.To.Reg = v.Reg()
   862  	case ssa.OpARMLoweredGetCallerPC:
   863  		p := s.Prog(obj.AGETCALLERPC)
   864  		p.To.Type = obj.TYPE_REG
   865  		p.To.Reg = v.Reg()
   866  	case ssa.OpARMFlagConstant:
   867  		v.Fatalf("FlagConstant op should never make it to codegen %v", v.LongString())
   868  	case ssa.OpARMInvertFlags:
   869  		v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
   870  	case ssa.OpClobber, ssa.OpClobberReg:
   871  		// TODO: implement for clobberdead experiment. Nop is ok for now.
   872  	default:
   873  		v.Fatalf("genValue not implemented: %s", v.LongString())
   874  	}
   875  }
   876  
   877  var condBits = map[ssa.Op]uint8{
   878  	ssa.OpARMEqual:		arm.C_SCOND_EQ,
   879  	ssa.OpARMNotEqual:	arm.C_SCOND_NE,
   880  	ssa.OpARMLessThan:	arm.C_SCOND_LT,
   881  	ssa.OpARMLessThanU:	arm.C_SCOND_LO,
   882  	ssa.OpARMLessEqual:	arm.C_SCOND_LE,
   883  	ssa.OpARMLessEqualU:	arm.C_SCOND_LS,
   884  	ssa.OpARMGreaterThan:	arm.C_SCOND_GT,
   885  	ssa.OpARMGreaterThanU:	arm.C_SCOND_HI,
   886  	ssa.OpARMGreaterEqual:	arm.C_SCOND_GE,
   887  	ssa.OpARMGreaterEqualU:	arm.C_SCOND_HS,
   888  }
   889  
   890  var blockJump = map[ssa.BlockKind]struct {
   891  	asm, invasm obj.As
   892  }{
   893  	ssa.BlockARMEQ:		{arm.ABEQ, arm.ABNE},
   894  	ssa.BlockARMNE:		{arm.ABNE, arm.ABEQ},
   895  	ssa.BlockARMLT:		{arm.ABLT, arm.ABGE},
   896  	ssa.BlockARMGE:		{arm.ABGE, arm.ABLT},
   897  	ssa.BlockARMLE:		{arm.ABLE, arm.ABGT},
   898  	ssa.BlockARMGT:		{arm.ABGT, arm.ABLE},
   899  	ssa.BlockARMULT:	{arm.ABLO, arm.ABHS},
   900  	ssa.BlockARMUGE:	{arm.ABHS, arm.ABLO},
   901  	ssa.BlockARMUGT:	{arm.ABHI, arm.ABLS},
   902  	ssa.BlockARMULE:	{arm.ABLS, arm.ABHI},
   903  	ssa.BlockARMLTnoov:	{arm.ABMI, arm.ABPL},
   904  	ssa.BlockARMGEnoov:	{arm.ABPL, arm.ABMI},
   905  }
   906  
   907  // To model a 'LEnoov' ('<=' without overflow checking) branching.
   908  var leJumps = [2][2]ssagen.IndexJump{
   909  	{{Jump: arm.ABEQ, Index: 0}, {Jump: arm.ABPL, Index: 1}},	// next == b.Succs[0]
   910  	{{Jump: arm.ABMI, Index: 0}, {Jump: arm.ABEQ, Index: 0}},	// next == b.Succs[1]
   911  }
   912  
   913  // To model a 'GTnoov' ('>' without overflow checking) branching.
   914  var gtJumps = [2][2]ssagen.IndexJump{
   915  	{{Jump: arm.ABMI, Index: 1}, {Jump: arm.ABEQ, Index: 1}},	// next == b.Succs[0]
   916  	{{Jump: arm.ABEQ, Index: 1}, {Jump: arm.ABPL, Index: 0}},	// next == b.Succs[1]
   917  }
   918  
   919  func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
   920  	switch b.Kind {
   921  	case ssa.BlockPlain:
   922  		if b.Succs[0].Block() != next {
   923  			p := s.Prog(obj.AJMP)
   924  			p.To.Type = obj.TYPE_BRANCH
   925  			s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
   926  		}
   927  
   928  	case ssa.BlockDefer:
   929  		// defer returns in R0:
   930  		// 0 if we should continue executing
   931  		// 1 if we should jump to deferreturn call
   932  		p := s.Prog(arm.ACMP)
   933  		p.From.Type = obj.TYPE_CONST
   934  		p.From.Offset = 0
   935  		p.Reg = arm.REG_R0
   936  		p = s.Prog(arm.ABNE)
   937  		p.To.Type = obj.TYPE_BRANCH
   938  		s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
   939  		if b.Succs[0].Block() != next {
   940  			p := s.Prog(obj.AJMP)
   941  			p.To.Type = obj.TYPE_BRANCH
   942  			s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
   943  		}
   944  
   945  	case ssa.BlockExit, ssa.BlockRetJmp:
   946  
   947  	case ssa.BlockRet:
   948  		s.Prog(obj.ARET)
   949  
   950  	case ssa.BlockARMEQ, ssa.BlockARMNE,
   951  		ssa.BlockARMLT, ssa.BlockARMGE,
   952  		ssa.BlockARMLE, ssa.BlockARMGT,
   953  		ssa.BlockARMULT, ssa.BlockARMUGT,
   954  		ssa.BlockARMULE, ssa.BlockARMUGE,
   955  		ssa.BlockARMLTnoov, ssa.BlockARMGEnoov:
   956  		jmp := blockJump[b.Kind]
   957  		switch next {
   958  		case b.Succs[0].Block():
   959  			s.Br(jmp.invasm, b.Succs[1].Block())
   960  		case b.Succs[1].Block():
   961  			s.Br(jmp.asm, b.Succs[0].Block())
   962  		default:
   963  			if b.Likely != ssa.BranchUnlikely {
   964  				s.Br(jmp.asm, b.Succs[0].Block())
   965  				s.Br(obj.AJMP, b.Succs[1].Block())
   966  			} else {
   967  				s.Br(jmp.invasm, b.Succs[1].Block())
   968  				s.Br(obj.AJMP, b.Succs[0].Block())
   969  			}
   970  		}
   971  
   972  	case ssa.BlockARMLEnoov:
   973  		s.CombJump(b, next, &leJumps)
   974  
   975  	case ssa.BlockARMGTnoov:
   976  		s.CombJump(b, next, &gtJumps)
   977  
   978  	default:
   979  		b.Fatalf("branch not implemented: %s", b.LongString())
   980  	}
   981  }