github.com/megatontech/mynoteforgo@v0.0.0-20200507084910-5d0c6ea6e890/源码/cmd/compile/internal/arm/ssa.go (about)

     1  // Copyright 2016 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package arm
     6  
     7  import (
     8  	"fmt"
     9  	"math"
    10  	"math/bits"
    11  
    12  	"cmd/compile/internal/gc"
    13  	"cmd/compile/internal/ssa"
    14  	"cmd/compile/internal/types"
    15  	"cmd/internal/obj"
    16  	"cmd/internal/obj/arm"
    17  	"cmd/internal/objabi"
    18  )
    19  
    20  // loadByType returns the load instruction of the given type.
    21  func loadByType(t *types.Type) obj.As {
    22  	if t.IsFloat() {
    23  		switch t.Size() {
    24  		case 4:
    25  			return arm.AMOVF
    26  		case 8:
    27  			return arm.AMOVD
    28  		}
    29  	} else {
    30  		switch t.Size() {
    31  		case 1:
    32  			if t.IsSigned() {
    33  				return arm.AMOVB
    34  			} else {
    35  				return arm.AMOVBU
    36  			}
    37  		case 2:
    38  			if t.IsSigned() {
    39  				return arm.AMOVH
    40  			} else {
    41  				return arm.AMOVHU
    42  			}
    43  		case 4:
    44  			return arm.AMOVW
    45  		}
    46  	}
    47  	panic("bad load type")
    48  }
    49  
    50  // storeByType returns the store instruction of the given type.
    51  func storeByType(t *types.Type) obj.As {
    52  	if t.IsFloat() {
    53  		switch t.Size() {
    54  		case 4:
    55  			return arm.AMOVF
    56  		case 8:
    57  			return arm.AMOVD
    58  		}
    59  	} else {
    60  		switch t.Size() {
    61  		case 1:
    62  			return arm.AMOVB
    63  		case 2:
    64  			return arm.AMOVH
    65  		case 4:
    66  			return arm.AMOVW
    67  		}
    68  	}
    69  	panic("bad store type")
    70  }
    71  
    72  // shift type is used as Offset in obj.TYPE_SHIFT operands to encode shifted register operands
    73  type shift int64
    74  
    75  // copied from ../../../internal/obj/util.go:/TYPE_SHIFT
    76  func (v shift) String() string {
    77  	op := "<<>>->@>"[((v>>5)&3)<<1:]
    78  	if v&(1<<4) != 0 {
    79  		// register shift
    80  		return fmt.Sprintf("R%d%c%cR%d", v&15, op[0], op[1], (v>>8)&15)
    81  	} else {
    82  		// constant shift
    83  		return fmt.Sprintf("R%d%c%c%d", v&15, op[0], op[1], (v>>7)&31)
    84  	}
    85  }
    86  
    87  // makeshift encodes a register shifted by a constant
    88  func makeshift(reg int16, typ int64, s int64) shift {
    89  	return shift(int64(reg&0xf) | typ | (s&31)<<7)
    90  }
    91  
    92  // genshift generates a Prog for r = r0 op (r1 shifted by n)
    93  func genshift(s *gc.SSAGenState, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog {
    94  	p := s.Prog(as)
    95  	p.From.Type = obj.TYPE_SHIFT
    96  	p.From.Offset = int64(makeshift(r1, typ, n))
    97  	p.Reg = r0
    98  	if r != 0 {
    99  		p.To.Type = obj.TYPE_REG
   100  		p.To.Reg = r
   101  	}
   102  	return p
   103  }
   104  
   105  // makeregshift encodes a register shifted by a register
   106  func makeregshift(r1 int16, typ int64, r2 int16) shift {
   107  	return shift(int64(r1&0xf) | typ | int64(r2&0xf)<<8 | 1<<4)
   108  }
   109  
   110  // genregshift generates a Prog for r = r0 op (r1 shifted by r2)
   111  func genregshift(s *gc.SSAGenState, as obj.As, r0, r1, r2, r int16, typ int64) *obj.Prog {
   112  	p := s.Prog(as)
   113  	p.From.Type = obj.TYPE_SHIFT
   114  	p.From.Offset = int64(makeregshift(r1, typ, r2))
   115  	p.Reg = r0
   116  	if r != 0 {
   117  		p.To.Type = obj.TYPE_REG
   118  		p.To.Reg = r
   119  	}
   120  	return p
   121  }
   122  
   123  // find a (lsb, width) pair for BFC
   124  // lsb must be in [0, 31], width must be in [1, 32 - lsb]
   125  // return (0xffffffff, 0) if v is not a binary like 0...01...10...0
   126  func getBFC(v uint32) (uint32, uint32) {
   127  	var m, l uint32
   128  	// BFC is not applicable with zero
   129  	if v == 0 {
   130  		return 0xffffffff, 0
   131  	}
   132  	// find the lowest set bit, for example l=2 for 0x3ffffffc
   133  	l = uint32(bits.TrailingZeros32(v))
   134  	// m-1 represents the highest set bit index, for example m=30 for 0x3ffffffc
   135  	m = 32 - uint32(bits.LeadingZeros32(v))
   136  	// check if v is a binary like 0...01...10...0
   137  	if (1<<m)-(1<<l) == v {
   138  		// it must be m > l for non-zero v
   139  		return l, m - l
   140  	}
   141  	// invalid
   142  	return 0xffffffff, 0
   143  }
   144  
   145  func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
   146  	switch v.Op {
   147  	case ssa.OpCopy, ssa.OpARMMOVWreg:
   148  		if v.Type.IsMemory() {
   149  			return
   150  		}
   151  		x := v.Args[0].Reg()
   152  		y := v.Reg()
   153  		if x == y {
   154  			return
   155  		}
   156  		as := arm.AMOVW
   157  		if v.Type.IsFloat() {
   158  			switch v.Type.Size() {
   159  			case 4:
   160  				as = arm.AMOVF
   161  			case 8:
   162  				as = arm.AMOVD
   163  			default:
   164  				panic("bad float size")
   165  			}
   166  		}
   167  		p := s.Prog(as)
   168  		p.From.Type = obj.TYPE_REG
   169  		p.From.Reg = x
   170  		p.To.Type = obj.TYPE_REG
   171  		p.To.Reg = y
   172  	case ssa.OpARMMOVWnop:
   173  		if v.Reg() != v.Args[0].Reg() {
   174  			v.Fatalf("input[0] and output not in same register %s", v.LongString())
   175  		}
   176  		// nothing to do
   177  	case ssa.OpLoadReg:
   178  		if v.Type.IsFlags() {
   179  			v.Fatalf("load flags not implemented: %v", v.LongString())
   180  			return
   181  		}
   182  		p := s.Prog(loadByType(v.Type))
   183  		gc.AddrAuto(&p.From, v.Args[0])
   184  		p.To.Type = obj.TYPE_REG
   185  		p.To.Reg = v.Reg()
   186  	case ssa.OpStoreReg:
   187  		if v.Type.IsFlags() {
   188  			v.Fatalf("store flags not implemented: %v", v.LongString())
   189  			return
   190  		}
   191  		p := s.Prog(storeByType(v.Type))
   192  		p.From.Type = obj.TYPE_REG
   193  		p.From.Reg = v.Args[0].Reg()
   194  		gc.AddrAuto(&p.To, v)
   195  	case ssa.OpARMADD,
   196  		ssa.OpARMADC,
   197  		ssa.OpARMSUB,
   198  		ssa.OpARMSBC,
   199  		ssa.OpARMRSB,
   200  		ssa.OpARMAND,
   201  		ssa.OpARMOR,
   202  		ssa.OpARMXOR,
   203  		ssa.OpARMBIC,
   204  		ssa.OpARMMUL,
   205  		ssa.OpARMADDF,
   206  		ssa.OpARMADDD,
   207  		ssa.OpARMSUBF,
   208  		ssa.OpARMSUBD,
   209  		ssa.OpARMMULF,
   210  		ssa.OpARMMULD,
   211  		ssa.OpARMNMULF,
   212  		ssa.OpARMNMULD,
   213  		ssa.OpARMDIVF,
   214  		ssa.OpARMDIVD:
   215  		r := v.Reg()
   216  		r1 := v.Args[0].Reg()
   217  		r2 := v.Args[1].Reg()
   218  		p := s.Prog(v.Op.Asm())
   219  		p.From.Type = obj.TYPE_REG
   220  		p.From.Reg = r2
   221  		p.Reg = r1
   222  		p.To.Type = obj.TYPE_REG
   223  		p.To.Reg = r
   224  	case ssa.OpARMMULAF, ssa.OpARMMULAD, ssa.OpARMMULSF, ssa.OpARMMULSD:
   225  		r := v.Reg()
   226  		r0 := v.Args[0].Reg()
   227  		r1 := v.Args[1].Reg()
   228  		r2 := v.Args[2].Reg()
   229  		if r != r0 {
   230  			v.Fatalf("result and addend are not in the same register: %v", v.LongString())
   231  		}
   232  		p := s.Prog(v.Op.Asm())
   233  		p.From.Type = obj.TYPE_REG
   234  		p.From.Reg = r2
   235  		p.Reg = r1
   236  		p.To.Type = obj.TYPE_REG
   237  		p.To.Reg = r
   238  	case ssa.OpARMADDS,
   239  		ssa.OpARMSUBS:
   240  		r := v.Reg0()
   241  		r1 := v.Args[0].Reg()
   242  		r2 := v.Args[1].Reg()
   243  		p := s.Prog(v.Op.Asm())
   244  		p.Scond = arm.C_SBIT
   245  		p.From.Type = obj.TYPE_REG
   246  		p.From.Reg = r2
   247  		p.Reg = r1
   248  		p.To.Type = obj.TYPE_REG
   249  		p.To.Reg = r
   250  	case ssa.OpARMSLL,
   251  		ssa.OpARMSRL,
   252  		ssa.OpARMSRA:
   253  		r := v.Reg()
   254  		r1 := v.Args[0].Reg()
   255  		r2 := v.Args[1].Reg()
   256  		p := s.Prog(v.Op.Asm())
   257  		p.From.Type = obj.TYPE_REG
   258  		p.From.Reg = r2
   259  		p.Reg = r1
   260  		p.To.Type = obj.TYPE_REG
   261  		p.To.Reg = r
   262  	case ssa.OpARMSRAcond:
   263  		// ARM shift instructions uses only the low-order byte of the shift amount
   264  		// generate conditional instructions to deal with large shifts
   265  		// flag is already set
   266  		// SRA.HS	$31, Rarg0, Rdst // shift 31 bits to get the sign bit
   267  		// SRA.LO	Rarg1, Rarg0, Rdst
   268  		r := v.Reg()
   269  		r1 := v.Args[0].Reg()
   270  		r2 := v.Args[1].Reg()
   271  		p := s.Prog(arm.ASRA)
   272  		p.Scond = arm.C_SCOND_HS
   273  		p.From.Type = obj.TYPE_CONST
   274  		p.From.Offset = 31
   275  		p.Reg = r1
   276  		p.To.Type = obj.TYPE_REG
   277  		p.To.Reg = r
   278  		p = s.Prog(arm.ASRA)
   279  		p.Scond = arm.C_SCOND_LO
   280  		p.From.Type = obj.TYPE_REG
   281  		p.From.Reg = r2
   282  		p.Reg = r1
   283  		p.To.Type = obj.TYPE_REG
   284  		p.To.Reg = r
   285  	case ssa.OpARMBFX, ssa.OpARMBFXU:
   286  		p := s.Prog(v.Op.Asm())
   287  		p.From.Type = obj.TYPE_CONST
   288  		p.From.Offset = v.AuxInt >> 8
   289  		p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: v.AuxInt & 0xff})
   290  		p.Reg = v.Args[0].Reg()
   291  		p.To.Type = obj.TYPE_REG
   292  		p.To.Reg = v.Reg()
   293  	case ssa.OpARMANDconst, ssa.OpARMBICconst:
   294  		// try to optimize ANDconst and BICconst to BFC, which saves bytes and ticks
   295  		// BFC is only available on ARMv7, and its result and source are in the same register
   296  		if objabi.GOARM == 7 && v.Reg() == v.Args[0].Reg() {
   297  			var val uint32
   298  			if v.Op == ssa.OpARMANDconst {
   299  				val = ^uint32(v.AuxInt)
   300  			} else { // BICconst
   301  				val = uint32(v.AuxInt)
   302  			}
   303  			lsb, width := getBFC(val)
   304  			// omit BFC for ARM's imm12
   305  			if 8 < width && width < 24 {
   306  				p := s.Prog(arm.ABFC)
   307  				p.From.Type = obj.TYPE_CONST
   308  				p.From.Offset = int64(width)
   309  				p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: int64(lsb)})
   310  				p.To.Type = obj.TYPE_REG
   311  				p.To.Reg = v.Reg()
   312  				break
   313  			}
   314  		}
   315  		// fall back to ordinary form
   316  		fallthrough
   317  	case ssa.OpARMADDconst,
   318  		ssa.OpARMADCconst,
   319  		ssa.OpARMSUBconst,
   320  		ssa.OpARMSBCconst,
   321  		ssa.OpARMRSBconst,
   322  		ssa.OpARMRSCconst,
   323  		ssa.OpARMORconst,
   324  		ssa.OpARMXORconst,
   325  		ssa.OpARMSLLconst,
   326  		ssa.OpARMSRLconst,
   327  		ssa.OpARMSRAconst:
   328  		p := s.Prog(v.Op.Asm())
   329  		p.From.Type = obj.TYPE_CONST
   330  		p.From.Offset = v.AuxInt
   331  		p.Reg = v.Args[0].Reg()
   332  		p.To.Type = obj.TYPE_REG
   333  		p.To.Reg = v.Reg()
   334  	case ssa.OpARMADDSconst,
   335  		ssa.OpARMSUBSconst,
   336  		ssa.OpARMRSBSconst:
   337  		p := s.Prog(v.Op.Asm())
   338  		p.Scond = arm.C_SBIT
   339  		p.From.Type = obj.TYPE_CONST
   340  		p.From.Offset = v.AuxInt
   341  		p.Reg = v.Args[0].Reg()
   342  		p.To.Type = obj.TYPE_REG
   343  		p.To.Reg = v.Reg0()
   344  	case ssa.OpARMSRRconst:
   345  		genshift(s, arm.AMOVW, 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_RR, v.AuxInt)
   346  	case ssa.OpARMADDshiftLL,
   347  		ssa.OpARMADCshiftLL,
   348  		ssa.OpARMSUBshiftLL,
   349  		ssa.OpARMSBCshiftLL,
   350  		ssa.OpARMRSBshiftLL,
   351  		ssa.OpARMRSCshiftLL,
   352  		ssa.OpARMANDshiftLL,
   353  		ssa.OpARMORshiftLL,
   354  		ssa.OpARMXORshiftLL,
   355  		ssa.OpARMBICshiftLL:
   356  		genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt)
   357  	case ssa.OpARMADDSshiftLL,
   358  		ssa.OpARMSUBSshiftLL,
   359  		ssa.OpARMRSBSshiftLL:
   360  		p := genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_LL, v.AuxInt)
   361  		p.Scond = arm.C_SBIT
   362  	case ssa.OpARMADDshiftRL,
   363  		ssa.OpARMADCshiftRL,
   364  		ssa.OpARMSUBshiftRL,
   365  		ssa.OpARMSBCshiftRL,
   366  		ssa.OpARMRSBshiftRL,
   367  		ssa.OpARMRSCshiftRL,
   368  		ssa.OpARMANDshiftRL,
   369  		ssa.OpARMORshiftRL,
   370  		ssa.OpARMXORshiftRL,
   371  		ssa.OpARMBICshiftRL:
   372  		genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt)
   373  	case ssa.OpARMADDSshiftRL,
   374  		ssa.OpARMSUBSshiftRL,
   375  		ssa.OpARMRSBSshiftRL:
   376  		p := genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_LR, v.AuxInt)
   377  		p.Scond = arm.C_SBIT
   378  	case ssa.OpARMADDshiftRA,
   379  		ssa.OpARMADCshiftRA,
   380  		ssa.OpARMSUBshiftRA,
   381  		ssa.OpARMSBCshiftRA,
   382  		ssa.OpARMRSBshiftRA,
   383  		ssa.OpARMRSCshiftRA,
   384  		ssa.OpARMANDshiftRA,
   385  		ssa.OpARMORshiftRA,
   386  		ssa.OpARMXORshiftRA,
   387  		ssa.OpARMBICshiftRA:
   388  		genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt)
   389  	case ssa.OpARMADDSshiftRA,
   390  		ssa.OpARMSUBSshiftRA,
   391  		ssa.OpARMRSBSshiftRA:
   392  		p := genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_AR, v.AuxInt)
   393  		p.Scond = arm.C_SBIT
   394  	case ssa.OpARMXORshiftRR:
   395  		genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_RR, v.AuxInt)
   396  	case ssa.OpARMMVNshiftLL:
   397  		genshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt)
   398  	case ssa.OpARMMVNshiftRL:
   399  		genshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt)
   400  	case ssa.OpARMMVNshiftRA:
   401  		genshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt)
   402  	case ssa.OpARMMVNshiftLLreg:
   403  		genregshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL)
   404  	case ssa.OpARMMVNshiftRLreg:
   405  		genregshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR)
   406  	case ssa.OpARMMVNshiftRAreg:
   407  		genregshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR)
   408  	case ssa.OpARMADDshiftLLreg,
   409  		ssa.OpARMADCshiftLLreg,
   410  		ssa.OpARMSUBshiftLLreg,
   411  		ssa.OpARMSBCshiftLLreg,
   412  		ssa.OpARMRSBshiftLLreg,
   413  		ssa.OpARMRSCshiftLLreg,
   414  		ssa.OpARMANDshiftLLreg,
   415  		ssa.OpARMORshiftLLreg,
   416  		ssa.OpARMXORshiftLLreg,
   417  		ssa.OpARMBICshiftLLreg:
   418  		genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_LL)
   419  	case ssa.OpARMADDSshiftLLreg,
   420  		ssa.OpARMSUBSshiftLLreg,
   421  		ssa.OpARMRSBSshiftLLreg:
   422  		p := genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_LL)
   423  		p.Scond = arm.C_SBIT
   424  	case ssa.OpARMADDshiftRLreg,
   425  		ssa.OpARMADCshiftRLreg,
   426  		ssa.OpARMSUBshiftRLreg,
   427  		ssa.OpARMSBCshiftRLreg,
   428  		ssa.OpARMRSBshiftRLreg,
   429  		ssa.OpARMRSCshiftRLreg,
   430  		ssa.OpARMANDshiftRLreg,
   431  		ssa.OpARMORshiftRLreg,
   432  		ssa.OpARMXORshiftRLreg,
   433  		ssa.OpARMBICshiftRLreg:
   434  		genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_LR)
   435  	case ssa.OpARMADDSshiftRLreg,
   436  		ssa.OpARMSUBSshiftRLreg,
   437  		ssa.OpARMRSBSshiftRLreg:
   438  		p := genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_LR)
   439  		p.Scond = arm.C_SBIT
   440  	case ssa.OpARMADDshiftRAreg,
   441  		ssa.OpARMADCshiftRAreg,
   442  		ssa.OpARMSUBshiftRAreg,
   443  		ssa.OpARMSBCshiftRAreg,
   444  		ssa.OpARMRSBshiftRAreg,
   445  		ssa.OpARMRSCshiftRAreg,
   446  		ssa.OpARMANDshiftRAreg,
   447  		ssa.OpARMORshiftRAreg,
   448  		ssa.OpARMXORshiftRAreg,
   449  		ssa.OpARMBICshiftRAreg:
   450  		genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_AR)
   451  	case ssa.OpARMADDSshiftRAreg,
   452  		ssa.OpARMSUBSshiftRAreg,
   453  		ssa.OpARMRSBSshiftRAreg:
   454  		p := genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_AR)
   455  		p.Scond = arm.C_SBIT
   456  	case ssa.OpARMHMUL,
   457  		ssa.OpARMHMULU:
   458  		// 32-bit high multiplication
   459  		p := s.Prog(v.Op.Asm())
   460  		p.From.Type = obj.TYPE_REG
   461  		p.From.Reg = v.Args[0].Reg()
   462  		p.Reg = v.Args[1].Reg()
   463  		p.To.Type = obj.TYPE_REGREG
   464  		p.To.Reg = v.Reg()
   465  		p.To.Offset = arm.REGTMP // throw away low 32-bit into tmp register
   466  	case ssa.OpARMMULLU:
   467  		// 32-bit multiplication, results 64-bit, high 32-bit in out0, low 32-bit in out1
   468  		p := s.Prog(v.Op.Asm())
   469  		p.From.Type = obj.TYPE_REG
   470  		p.From.Reg = v.Args[0].Reg()
   471  		p.Reg = v.Args[1].Reg()
   472  		p.To.Type = obj.TYPE_REGREG
   473  		p.To.Reg = v.Reg0()           // high 32-bit
   474  		p.To.Offset = int64(v.Reg1()) // low 32-bit
   475  	case ssa.OpARMMULA, ssa.OpARMMULS:
   476  		p := s.Prog(v.Op.Asm())
   477  		p.From.Type = obj.TYPE_REG
   478  		p.From.Reg = v.Args[0].Reg()
   479  		p.Reg = v.Args[1].Reg()
   480  		p.To.Type = obj.TYPE_REGREG2
   481  		p.To.Reg = v.Reg()                   // result
   482  		p.To.Offset = int64(v.Args[2].Reg()) // addend
   483  	case ssa.OpARMMOVWconst:
   484  		p := s.Prog(v.Op.Asm())
   485  		p.From.Type = obj.TYPE_CONST
   486  		p.From.Offset = v.AuxInt
   487  		p.To.Type = obj.TYPE_REG
   488  		p.To.Reg = v.Reg()
   489  	case ssa.OpARMMOVFconst,
   490  		ssa.OpARMMOVDconst:
   491  		p := s.Prog(v.Op.Asm())
   492  		p.From.Type = obj.TYPE_FCONST
   493  		p.From.Val = math.Float64frombits(uint64(v.AuxInt))
   494  		p.To.Type = obj.TYPE_REG
   495  		p.To.Reg = v.Reg()
   496  	case ssa.OpARMCMP,
   497  		ssa.OpARMCMN,
   498  		ssa.OpARMTST,
   499  		ssa.OpARMTEQ,
   500  		ssa.OpARMCMPF,
   501  		ssa.OpARMCMPD:
   502  		p := s.Prog(v.Op.Asm())
   503  		p.From.Type = obj.TYPE_REG
   504  		// Special layout in ARM assembly
   505  		// Comparing to x86, the operands of ARM's CMP are reversed.
   506  		p.From.Reg = v.Args[1].Reg()
   507  		p.Reg = v.Args[0].Reg()
   508  	case ssa.OpARMCMPconst,
   509  		ssa.OpARMCMNconst,
   510  		ssa.OpARMTSTconst,
   511  		ssa.OpARMTEQconst:
   512  		// Special layout in ARM assembly
   513  		p := s.Prog(v.Op.Asm())
   514  		p.From.Type = obj.TYPE_CONST
   515  		p.From.Offset = v.AuxInt
   516  		p.Reg = v.Args[0].Reg()
   517  	case ssa.OpARMCMPF0,
   518  		ssa.OpARMCMPD0:
   519  		p := s.Prog(v.Op.Asm())
   520  		p.From.Type = obj.TYPE_REG
   521  		p.From.Reg = v.Args[0].Reg()
   522  	case ssa.OpARMCMPshiftLL, ssa.OpARMCMNshiftLL, ssa.OpARMTSTshiftLL, ssa.OpARMTEQshiftLL:
   523  		genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_LL, v.AuxInt)
   524  	case ssa.OpARMCMPshiftRL, ssa.OpARMCMNshiftRL, ssa.OpARMTSTshiftRL, ssa.OpARMTEQshiftRL:
   525  		genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_LR, v.AuxInt)
   526  	case ssa.OpARMCMPshiftRA, ssa.OpARMCMNshiftRA, ssa.OpARMTSTshiftRA, ssa.OpARMTEQshiftRA:
   527  		genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_AR, v.AuxInt)
   528  	case ssa.OpARMCMPshiftLLreg, ssa.OpARMCMNshiftLLreg, ssa.OpARMTSTshiftLLreg, ssa.OpARMTEQshiftLLreg:
   529  		genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_LL)
   530  	case ssa.OpARMCMPshiftRLreg, ssa.OpARMCMNshiftRLreg, ssa.OpARMTSTshiftRLreg, ssa.OpARMTEQshiftRLreg:
   531  		genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_LR)
   532  	case ssa.OpARMCMPshiftRAreg, ssa.OpARMCMNshiftRAreg, ssa.OpARMTSTshiftRAreg, ssa.OpARMTEQshiftRAreg:
   533  		genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_AR)
   534  	case ssa.OpARMMOVWaddr:
   535  		p := s.Prog(arm.AMOVW)
   536  		p.From.Type = obj.TYPE_ADDR
   537  		p.From.Reg = v.Args[0].Reg()
   538  		p.To.Type = obj.TYPE_REG
   539  		p.To.Reg = v.Reg()
   540  
   541  		var wantreg string
   542  		// MOVW $sym+off(base), R
   543  		// the assembler expands it as the following:
   544  		// - base is SP: add constant offset to SP (R13)
   545  		//               when constant is large, tmp register (R11) may be used
   546  		// - base is SB: load external address from constant pool (use relocation)
   547  		switch v.Aux.(type) {
   548  		default:
   549  			v.Fatalf("aux is of unknown type %T", v.Aux)
   550  		case *obj.LSym:
   551  			wantreg = "SB"
   552  			gc.AddAux(&p.From, v)
   553  		case *gc.Node:
   554  			wantreg = "SP"
   555  			gc.AddAux(&p.From, v)
   556  		case nil:
   557  			// No sym, just MOVW $off(SP), R
   558  			wantreg = "SP"
   559  			p.From.Offset = v.AuxInt
   560  		}
   561  		if reg := v.Args[0].RegName(); reg != wantreg {
   562  			v.Fatalf("bad reg %s for symbol type %T, want %s", reg, v.Aux, wantreg)
   563  		}
   564  
   565  	case ssa.OpARMMOVBload,
   566  		ssa.OpARMMOVBUload,
   567  		ssa.OpARMMOVHload,
   568  		ssa.OpARMMOVHUload,
   569  		ssa.OpARMMOVWload,
   570  		ssa.OpARMMOVFload,
   571  		ssa.OpARMMOVDload:
   572  		p := s.Prog(v.Op.Asm())
   573  		p.From.Type = obj.TYPE_MEM
   574  		p.From.Reg = v.Args[0].Reg()
   575  		gc.AddAux(&p.From, v)
   576  		p.To.Type = obj.TYPE_REG
   577  		p.To.Reg = v.Reg()
   578  	case ssa.OpARMMOVBstore,
   579  		ssa.OpARMMOVHstore,
   580  		ssa.OpARMMOVWstore,
   581  		ssa.OpARMMOVFstore,
   582  		ssa.OpARMMOVDstore:
   583  		p := s.Prog(v.Op.Asm())
   584  		p.From.Type = obj.TYPE_REG
   585  		p.From.Reg = v.Args[1].Reg()
   586  		p.To.Type = obj.TYPE_MEM
   587  		p.To.Reg = v.Args[0].Reg()
   588  		gc.AddAux(&p.To, v)
   589  	case ssa.OpARMMOVWloadidx, ssa.OpARMMOVBUloadidx, ssa.OpARMMOVBloadidx, ssa.OpARMMOVHUloadidx, ssa.OpARMMOVHloadidx:
   590  		// this is just shift 0 bits
   591  		fallthrough
   592  	case ssa.OpARMMOVWloadshiftLL:
   593  		p := genshift(s, v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt)
   594  		p.From.Reg = v.Args[0].Reg()
   595  	case ssa.OpARMMOVWloadshiftRL:
   596  		p := genshift(s, v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt)
   597  		p.From.Reg = v.Args[0].Reg()
   598  	case ssa.OpARMMOVWloadshiftRA:
   599  		p := genshift(s, v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt)
   600  		p.From.Reg = v.Args[0].Reg()
   601  	case ssa.OpARMMOVWstoreidx, ssa.OpARMMOVBstoreidx, ssa.OpARMMOVHstoreidx:
   602  		// this is just shift 0 bits
   603  		fallthrough
   604  	case ssa.OpARMMOVWstoreshiftLL:
   605  		p := s.Prog(v.Op.Asm())
   606  		p.From.Type = obj.TYPE_REG
   607  		p.From.Reg = v.Args[2].Reg()
   608  		p.To.Type = obj.TYPE_SHIFT
   609  		p.To.Reg = v.Args[0].Reg()
   610  		p.To.Offset = int64(makeshift(v.Args[1].Reg(), arm.SHIFT_LL, v.AuxInt))
   611  	case ssa.OpARMMOVWstoreshiftRL:
   612  		p := s.Prog(v.Op.Asm())
   613  		p.From.Type = obj.TYPE_REG
   614  		p.From.Reg = v.Args[2].Reg()
   615  		p.To.Type = obj.TYPE_SHIFT
   616  		p.To.Reg = v.Args[0].Reg()
   617  		p.To.Offset = int64(makeshift(v.Args[1].Reg(), arm.SHIFT_LR, v.AuxInt))
   618  	case ssa.OpARMMOVWstoreshiftRA:
   619  		p := s.Prog(v.Op.Asm())
   620  		p.From.Type = obj.TYPE_REG
   621  		p.From.Reg = v.Args[2].Reg()
   622  		p.To.Type = obj.TYPE_SHIFT
   623  		p.To.Reg = v.Args[0].Reg()
   624  		p.To.Offset = int64(makeshift(v.Args[1].Reg(), arm.SHIFT_AR, v.AuxInt))
   625  	case ssa.OpARMMOVBreg,
   626  		ssa.OpARMMOVBUreg,
   627  		ssa.OpARMMOVHreg,
   628  		ssa.OpARMMOVHUreg:
   629  		a := v.Args[0]
   630  		for a.Op == ssa.OpCopy || a.Op == ssa.OpARMMOVWreg || a.Op == ssa.OpARMMOVWnop {
   631  			a = a.Args[0]
   632  		}
   633  		if a.Op == ssa.OpLoadReg {
   634  			t := a.Type
   635  			switch {
   636  			case v.Op == ssa.OpARMMOVBreg && t.Size() == 1 && t.IsSigned(),
   637  				v.Op == ssa.OpARMMOVBUreg && t.Size() == 1 && !t.IsSigned(),
   638  				v.Op == ssa.OpARMMOVHreg && t.Size() == 2 && t.IsSigned(),
   639  				v.Op == ssa.OpARMMOVHUreg && t.Size() == 2 && !t.IsSigned():
   640  				// arg is a proper-typed load, already zero/sign-extended, don't extend again
   641  				if v.Reg() == v.Args[0].Reg() {
   642  					return
   643  				}
   644  				p := s.Prog(arm.AMOVW)
   645  				p.From.Type = obj.TYPE_REG
   646  				p.From.Reg = v.Args[0].Reg()
   647  				p.To.Type = obj.TYPE_REG
   648  				p.To.Reg = v.Reg()
   649  				return
   650  			default:
   651  			}
   652  		}
   653  		if objabi.GOARM >= 6 {
   654  			// generate more efficient "MOVB/MOVBU/MOVH/MOVHU Reg@>0, Reg" on ARMv6 & ARMv7
   655  			genshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_RR, 0)
   656  			return
   657  		}
   658  		fallthrough
   659  	case ssa.OpARMMVN,
   660  		ssa.OpARMCLZ,
   661  		ssa.OpARMREV,
   662  		ssa.OpARMRBIT,
   663  		ssa.OpARMSQRTD,
   664  		ssa.OpARMNEGF,
   665  		ssa.OpARMNEGD,
   666  		ssa.OpARMMOVWF,
   667  		ssa.OpARMMOVWD,
   668  		ssa.OpARMMOVFW,
   669  		ssa.OpARMMOVDW,
   670  		ssa.OpARMMOVFD,
   671  		ssa.OpARMMOVDF:
   672  		p := s.Prog(v.Op.Asm())
   673  		p.From.Type = obj.TYPE_REG
   674  		p.From.Reg = v.Args[0].Reg()
   675  		p.To.Type = obj.TYPE_REG
   676  		p.To.Reg = v.Reg()
   677  	case ssa.OpARMMOVWUF,
   678  		ssa.OpARMMOVWUD,
   679  		ssa.OpARMMOVFWU,
   680  		ssa.OpARMMOVDWU:
   681  		p := s.Prog(v.Op.Asm())
   682  		p.Scond = arm.C_UBIT
   683  		p.From.Type = obj.TYPE_REG
   684  		p.From.Reg = v.Args[0].Reg()
   685  		p.To.Type = obj.TYPE_REG
   686  		p.To.Reg = v.Reg()
   687  	case ssa.OpARMCMOVWHSconst:
   688  		p := s.Prog(arm.AMOVW)
   689  		p.Scond = arm.C_SCOND_HS
   690  		p.From.Type = obj.TYPE_CONST
   691  		p.From.Offset = v.AuxInt
   692  		p.To.Type = obj.TYPE_REG
   693  		p.To.Reg = v.Reg()
   694  	case ssa.OpARMCMOVWLSconst:
   695  		p := s.Prog(arm.AMOVW)
   696  		p.Scond = arm.C_SCOND_LS
   697  		p.From.Type = obj.TYPE_CONST
   698  		p.From.Offset = v.AuxInt
   699  		p.To.Type = obj.TYPE_REG
   700  		p.To.Reg = v.Reg()
   701  	case ssa.OpARMCALLstatic, ssa.OpARMCALLclosure, ssa.OpARMCALLinter:
   702  		s.Call(v)
   703  	case ssa.OpARMCALLudiv:
   704  		p := s.Prog(obj.ACALL)
   705  		p.To.Type = obj.TYPE_MEM
   706  		p.To.Name = obj.NAME_EXTERN
   707  		p.To.Sym = gc.Udiv
   708  	case ssa.OpARMLoweredWB:
   709  		p := s.Prog(obj.ACALL)
   710  		p.To.Type = obj.TYPE_MEM
   711  		p.To.Name = obj.NAME_EXTERN
   712  		p.To.Sym = v.Aux.(*obj.LSym)
   713  	case ssa.OpARMDUFFZERO:
   714  		p := s.Prog(obj.ADUFFZERO)
   715  		p.To.Type = obj.TYPE_MEM
   716  		p.To.Name = obj.NAME_EXTERN
   717  		p.To.Sym = gc.Duffzero
   718  		p.To.Offset = v.AuxInt
   719  	case ssa.OpARMDUFFCOPY:
   720  		p := s.Prog(obj.ADUFFCOPY)
   721  		p.To.Type = obj.TYPE_MEM
   722  		p.To.Name = obj.NAME_EXTERN
   723  		p.To.Sym = gc.Duffcopy
   724  		p.To.Offset = v.AuxInt
   725  	case ssa.OpARMLoweredNilCheck:
   726  		// Issue a load which will fault if arg is nil.
   727  		p := s.Prog(arm.AMOVB)
   728  		p.From.Type = obj.TYPE_MEM
   729  		p.From.Reg = v.Args[0].Reg()
   730  		gc.AddAux(&p.From, v)
   731  		p.To.Type = obj.TYPE_REG
   732  		p.To.Reg = arm.REGTMP
   733  		if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
   734  			gc.Warnl(v.Pos, "generated nil check")
   735  		}
   736  	case ssa.OpARMLoweredZero:
   737  		// MOVW.P	Rarg2, 4(R1)
   738  		// CMP	Rarg1, R1
   739  		// BLE	-2(PC)
   740  		// arg1 is the address of the last element to zero
   741  		// arg2 is known to be zero
   742  		// auxint is alignment
   743  		var sz int64
   744  		var mov obj.As
   745  		switch {
   746  		case v.AuxInt%4 == 0:
   747  			sz = 4
   748  			mov = arm.AMOVW
   749  		case v.AuxInt%2 == 0:
   750  			sz = 2
   751  			mov = arm.AMOVH
   752  		default:
   753  			sz = 1
   754  			mov = arm.AMOVB
   755  		}
   756  		p := s.Prog(mov)
   757  		p.Scond = arm.C_PBIT
   758  		p.From.Type = obj.TYPE_REG
   759  		p.From.Reg = v.Args[2].Reg()
   760  		p.To.Type = obj.TYPE_MEM
   761  		p.To.Reg = arm.REG_R1
   762  		p.To.Offset = sz
   763  		p2 := s.Prog(arm.ACMP)
   764  		p2.From.Type = obj.TYPE_REG
   765  		p2.From.Reg = v.Args[1].Reg()
   766  		p2.Reg = arm.REG_R1
   767  		p3 := s.Prog(arm.ABLE)
   768  		p3.To.Type = obj.TYPE_BRANCH
   769  		gc.Patch(p3, p)
   770  	case ssa.OpARMLoweredMove:
   771  		// MOVW.P	4(R1), Rtmp
   772  		// MOVW.P	Rtmp, 4(R2)
   773  		// CMP	Rarg2, R1
   774  		// BLE	-3(PC)
   775  		// arg2 is the address of the last element of src
   776  		// auxint is alignment
   777  		var sz int64
   778  		var mov obj.As
   779  		switch {
   780  		case v.AuxInt%4 == 0:
   781  			sz = 4
   782  			mov = arm.AMOVW
   783  		case v.AuxInt%2 == 0:
   784  			sz = 2
   785  			mov = arm.AMOVH
   786  		default:
   787  			sz = 1
   788  			mov = arm.AMOVB
   789  		}
   790  		p := s.Prog(mov)
   791  		p.Scond = arm.C_PBIT
   792  		p.From.Type = obj.TYPE_MEM
   793  		p.From.Reg = arm.REG_R1
   794  		p.From.Offset = sz
   795  		p.To.Type = obj.TYPE_REG
   796  		p.To.Reg = arm.REGTMP
   797  		p2 := s.Prog(mov)
   798  		p2.Scond = arm.C_PBIT
   799  		p2.From.Type = obj.TYPE_REG
   800  		p2.From.Reg = arm.REGTMP
   801  		p2.To.Type = obj.TYPE_MEM
   802  		p2.To.Reg = arm.REG_R2
   803  		p2.To.Offset = sz
   804  		p3 := s.Prog(arm.ACMP)
   805  		p3.From.Type = obj.TYPE_REG
   806  		p3.From.Reg = v.Args[2].Reg()
   807  		p3.Reg = arm.REG_R1
   808  		p4 := s.Prog(arm.ABLE)
   809  		p4.To.Type = obj.TYPE_BRANCH
   810  		gc.Patch(p4, p)
   811  	case ssa.OpARMEqual,
   812  		ssa.OpARMNotEqual,
   813  		ssa.OpARMLessThan,
   814  		ssa.OpARMLessEqual,
   815  		ssa.OpARMGreaterThan,
   816  		ssa.OpARMGreaterEqual,
   817  		ssa.OpARMLessThanU,
   818  		ssa.OpARMLessEqualU,
   819  		ssa.OpARMGreaterThanU,
   820  		ssa.OpARMGreaterEqualU:
   821  		// generate boolean values
   822  		// use conditional move
   823  		p := s.Prog(arm.AMOVW)
   824  		p.From.Type = obj.TYPE_CONST
   825  		p.From.Offset = 0
   826  		p.To.Type = obj.TYPE_REG
   827  		p.To.Reg = v.Reg()
   828  		p = s.Prog(arm.AMOVW)
   829  		p.Scond = condBits[v.Op]
   830  		p.From.Type = obj.TYPE_CONST
   831  		p.From.Offset = 1
   832  		p.To.Type = obj.TYPE_REG
   833  		p.To.Reg = v.Reg()
   834  	case ssa.OpARMLoweredGetClosurePtr:
   835  		// Closure pointer is R7 (arm.REGCTXT).
   836  		gc.CheckLoweredGetClosurePtr(v)
   837  	case ssa.OpARMLoweredGetCallerSP:
   838  		// caller's SP is FixedFrameSize below the address of the first arg
   839  		p := s.Prog(arm.AMOVW)
   840  		p.From.Type = obj.TYPE_ADDR
   841  		p.From.Offset = -gc.Ctxt.FixedFrameSize()
   842  		p.From.Name = obj.NAME_PARAM
   843  		p.To.Type = obj.TYPE_REG
   844  		p.To.Reg = v.Reg()
   845  	case ssa.OpARMLoweredGetCallerPC:
   846  		p := s.Prog(obj.AGETCALLERPC)
   847  		p.To.Type = obj.TYPE_REG
   848  		p.To.Reg = v.Reg()
   849  	case ssa.OpARMFlagEQ,
   850  		ssa.OpARMFlagLT_ULT,
   851  		ssa.OpARMFlagLT_UGT,
   852  		ssa.OpARMFlagGT_ULT,
   853  		ssa.OpARMFlagGT_UGT:
   854  		v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString())
   855  	case ssa.OpARMInvertFlags:
   856  		v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
   857  	case ssa.OpClobber:
   858  		// TODO: implement for clobberdead experiment. Nop is ok for now.
   859  	default:
   860  		v.Fatalf("genValue not implemented: %s", v.LongString())
   861  	}
   862  }
   863  
   864  var condBits = map[ssa.Op]uint8{
   865  	ssa.OpARMEqual:         arm.C_SCOND_EQ,
   866  	ssa.OpARMNotEqual:      arm.C_SCOND_NE,
   867  	ssa.OpARMLessThan:      arm.C_SCOND_LT,
   868  	ssa.OpARMLessThanU:     arm.C_SCOND_LO,
   869  	ssa.OpARMLessEqual:     arm.C_SCOND_LE,
   870  	ssa.OpARMLessEqualU:    arm.C_SCOND_LS,
   871  	ssa.OpARMGreaterThan:   arm.C_SCOND_GT,
   872  	ssa.OpARMGreaterThanU:  arm.C_SCOND_HI,
   873  	ssa.OpARMGreaterEqual:  arm.C_SCOND_GE,
   874  	ssa.OpARMGreaterEqualU: arm.C_SCOND_HS,
   875  }
   876  
   877  var blockJump = map[ssa.BlockKind]struct {
   878  	asm, invasm obj.As
   879  }{
   880  	ssa.BlockARMEQ:  {arm.ABEQ, arm.ABNE},
   881  	ssa.BlockARMNE:  {arm.ABNE, arm.ABEQ},
   882  	ssa.BlockARMLT:  {arm.ABLT, arm.ABGE},
   883  	ssa.BlockARMGE:  {arm.ABGE, arm.ABLT},
   884  	ssa.BlockARMLE:  {arm.ABLE, arm.ABGT},
   885  	ssa.BlockARMGT:  {arm.ABGT, arm.ABLE},
   886  	ssa.BlockARMULT: {arm.ABLO, arm.ABHS},
   887  	ssa.BlockARMUGE: {arm.ABHS, arm.ABLO},
   888  	ssa.BlockARMUGT: {arm.ABHI, arm.ABLS},
   889  	ssa.BlockARMULE: {arm.ABLS, arm.ABHI},
   890  }
   891  
   892  func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
   893  	switch b.Kind {
   894  	case ssa.BlockPlain:
   895  		if b.Succs[0].Block() != next {
   896  			p := s.Prog(obj.AJMP)
   897  			p.To.Type = obj.TYPE_BRANCH
   898  			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
   899  		}
   900  
   901  	case ssa.BlockDefer:
   902  		// defer returns in R0:
   903  		// 0 if we should continue executing
   904  		// 1 if we should jump to deferreturn call
   905  		p := s.Prog(arm.ACMP)
   906  		p.From.Type = obj.TYPE_CONST
   907  		p.From.Offset = 0
   908  		p.Reg = arm.REG_R0
   909  		p = s.Prog(arm.ABNE)
   910  		p.To.Type = obj.TYPE_BRANCH
   911  		s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
   912  		if b.Succs[0].Block() != next {
   913  			p := s.Prog(obj.AJMP)
   914  			p.To.Type = obj.TYPE_BRANCH
   915  			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
   916  		}
   917  
   918  	case ssa.BlockExit:
   919  		s.Prog(obj.AUNDEF) // tell plive.go that we never reach here
   920  
   921  	case ssa.BlockRet:
   922  		s.Prog(obj.ARET)
   923  
   924  	case ssa.BlockRetJmp:
   925  		p := s.Prog(obj.ARET)
   926  		p.To.Type = obj.TYPE_MEM
   927  		p.To.Name = obj.NAME_EXTERN
   928  		p.To.Sym = b.Aux.(*obj.LSym)
   929  
   930  	case ssa.BlockARMEQ, ssa.BlockARMNE,
   931  		ssa.BlockARMLT, ssa.BlockARMGE,
   932  		ssa.BlockARMLE, ssa.BlockARMGT,
   933  		ssa.BlockARMULT, ssa.BlockARMUGT,
   934  		ssa.BlockARMULE, ssa.BlockARMUGE:
   935  		jmp := blockJump[b.Kind]
   936  		switch next {
   937  		case b.Succs[0].Block():
   938  			s.Br(jmp.invasm, b.Succs[1].Block())
   939  		case b.Succs[1].Block():
   940  			s.Br(jmp.asm, b.Succs[0].Block())
   941  		default:
   942  			if b.Likely != ssa.BranchUnlikely {
   943  				s.Br(jmp.asm, b.Succs[0].Block())
   944  				s.Br(obj.AJMP, b.Succs[1].Block())
   945  			} else {
   946  				s.Br(jmp.invasm, b.Succs[1].Block())
   947  				s.Br(obj.AJMP, b.Succs[0].Block())
   948  			}
   949  		}
   950  
   951  	default:
   952  		b.Fatalf("branch not implemented: %s. Control: %s", b.LongString(), b.Control.LongString())
   953  	}
   954  }