github.com/corona10/go@v0.0.0-20180224231303-7a218942be57/src/cmd/compile/internal/arm/ssa.go (about)

     1  // Copyright 2016 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package arm
     6  
     7  import (
     8  	"fmt"
     9  	"math"
    10  
    11  	"cmd/compile/internal/gc"
    12  	"cmd/compile/internal/ssa"
    13  	"cmd/compile/internal/types"
    14  	"cmd/internal/obj"
    15  	"cmd/internal/obj/arm"
    16  	"cmd/internal/objabi"
    17  )
    18  
    19  // loadByType returns the load instruction of the given type.
    20  func loadByType(t *types.Type) obj.As {
    21  	if t.IsFloat() {
    22  		switch t.Size() {
    23  		case 4:
    24  			return arm.AMOVF
    25  		case 8:
    26  			return arm.AMOVD
    27  		}
    28  	} else {
    29  		switch t.Size() {
    30  		case 1:
    31  			if t.IsSigned() {
    32  				return arm.AMOVB
    33  			} else {
    34  				return arm.AMOVBU
    35  			}
    36  		case 2:
    37  			if t.IsSigned() {
    38  				return arm.AMOVH
    39  			} else {
    40  				return arm.AMOVHU
    41  			}
    42  		case 4:
    43  			return arm.AMOVW
    44  		}
    45  	}
    46  	panic("bad load type")
    47  }
    48  
    49  // storeByType returns the store instruction of the given type.
    50  func storeByType(t *types.Type) obj.As {
    51  	if t.IsFloat() {
    52  		switch t.Size() {
    53  		case 4:
    54  			return arm.AMOVF
    55  		case 8:
    56  			return arm.AMOVD
    57  		}
    58  	} else {
    59  		switch t.Size() {
    60  		case 1:
    61  			return arm.AMOVB
    62  		case 2:
    63  			return arm.AMOVH
    64  		case 4:
    65  			return arm.AMOVW
    66  		}
    67  	}
    68  	panic("bad store type")
    69  }
    70  
    71  // shift type is used as Offset in obj.TYPE_SHIFT operands to encode shifted register operands
    72  type shift int64
    73  
    74  // copied from ../../../internal/obj/util.go:/TYPE_SHIFT
    75  func (v shift) String() string {
    76  	op := "<<>>->@>"[((v>>5)&3)<<1:]
    77  	if v&(1<<4) != 0 {
    78  		// register shift
    79  		return fmt.Sprintf("R%d%c%cR%d", v&15, op[0], op[1], (v>>8)&15)
    80  	} else {
    81  		// constant shift
    82  		return fmt.Sprintf("R%d%c%c%d", v&15, op[0], op[1], (v>>7)&31)
    83  	}
    84  }
    85  
    86  // makeshift encodes a register shifted by a constant
    87  func makeshift(reg int16, typ int64, s int64) shift {
    88  	return shift(int64(reg&0xf) | typ | (s&31)<<7)
    89  }
    90  
    91  // genshift generates a Prog for r = r0 op (r1 shifted by n)
    92  func genshift(s *gc.SSAGenState, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog {
    93  	p := s.Prog(as)
    94  	p.From.Type = obj.TYPE_SHIFT
    95  	p.From.Offset = int64(makeshift(r1, typ, n))
    96  	p.Reg = r0
    97  	if r != 0 {
    98  		p.To.Type = obj.TYPE_REG
    99  		p.To.Reg = r
   100  	}
   101  	return p
   102  }
   103  
   104  // makeregshift encodes a register shifted by a register
   105  func makeregshift(r1 int16, typ int64, r2 int16) shift {
   106  	return shift(int64(r1&0xf) | typ | int64(r2&0xf)<<8 | 1<<4)
   107  }
   108  
   109  // genregshift generates a Prog for r = r0 op (r1 shifted by r2)
   110  func genregshift(s *gc.SSAGenState, as obj.As, r0, r1, r2, r int16, typ int64) *obj.Prog {
   111  	p := s.Prog(as)
   112  	p.From.Type = obj.TYPE_SHIFT
   113  	p.From.Offset = int64(makeregshift(r1, typ, r2))
   114  	p.Reg = r0
   115  	if r != 0 {
   116  		p.To.Type = obj.TYPE_REG
   117  		p.To.Reg = r
   118  	}
   119  	return p
   120  }
   121  
   122  func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
   123  	switch v.Op {
   124  	case ssa.OpCopy, ssa.OpARMMOVWconvert, ssa.OpARMMOVWreg:
   125  		if v.Type.IsMemory() {
   126  			return
   127  		}
   128  		x := v.Args[0].Reg()
   129  		y := v.Reg()
   130  		if x == y {
   131  			return
   132  		}
   133  		as := arm.AMOVW
   134  		if v.Type.IsFloat() {
   135  			switch v.Type.Size() {
   136  			case 4:
   137  				as = arm.AMOVF
   138  			case 8:
   139  				as = arm.AMOVD
   140  			default:
   141  				panic("bad float size")
   142  			}
   143  		}
   144  		p := s.Prog(as)
   145  		p.From.Type = obj.TYPE_REG
   146  		p.From.Reg = x
   147  		p.To.Type = obj.TYPE_REG
   148  		p.To.Reg = y
   149  	case ssa.OpARMMOVWnop:
   150  		if v.Reg() != v.Args[0].Reg() {
   151  			v.Fatalf("input[0] and output not in same register %s", v.LongString())
   152  		}
   153  		// nothing to do
   154  	case ssa.OpLoadReg:
   155  		if v.Type.IsFlags() {
   156  			v.Fatalf("load flags not implemented: %v", v.LongString())
   157  			return
   158  		}
   159  		p := s.Prog(loadByType(v.Type))
   160  		gc.AddrAuto(&p.From, v.Args[0])
   161  		p.To.Type = obj.TYPE_REG
   162  		p.To.Reg = v.Reg()
   163  	case ssa.OpStoreReg:
   164  		if v.Type.IsFlags() {
   165  			v.Fatalf("store flags not implemented: %v", v.LongString())
   166  			return
   167  		}
   168  		p := s.Prog(storeByType(v.Type))
   169  		p.From.Type = obj.TYPE_REG
   170  		p.From.Reg = v.Args[0].Reg()
   171  		gc.AddrAuto(&p.To, v)
   172  	case ssa.OpARMADD,
   173  		ssa.OpARMADC,
   174  		ssa.OpARMSUB,
   175  		ssa.OpARMSBC,
   176  		ssa.OpARMRSB,
   177  		ssa.OpARMAND,
   178  		ssa.OpARMOR,
   179  		ssa.OpARMXOR,
   180  		ssa.OpARMBIC,
   181  		ssa.OpARMMUL,
   182  		ssa.OpARMADDF,
   183  		ssa.OpARMADDD,
   184  		ssa.OpARMSUBF,
   185  		ssa.OpARMSUBD,
   186  		ssa.OpARMMULF,
   187  		ssa.OpARMMULD,
   188  		ssa.OpARMNMULF,
   189  		ssa.OpARMNMULD,
   190  		ssa.OpARMDIVF,
   191  		ssa.OpARMDIVD:
   192  		r := v.Reg()
   193  		r1 := v.Args[0].Reg()
   194  		r2 := v.Args[1].Reg()
   195  		p := s.Prog(v.Op.Asm())
   196  		p.From.Type = obj.TYPE_REG
   197  		p.From.Reg = r2
   198  		p.Reg = r1
   199  		p.To.Type = obj.TYPE_REG
   200  		p.To.Reg = r
   201  	case ssa.OpARMMULAF, ssa.OpARMMULAD, ssa.OpARMMULSF, ssa.OpARMMULSD:
   202  		r := v.Reg()
   203  		r0 := v.Args[0].Reg()
   204  		r1 := v.Args[1].Reg()
   205  		r2 := v.Args[2].Reg()
   206  		if r != r0 {
   207  			v.Fatalf("result and addend are not in the same register: %v", v.LongString())
   208  		}
   209  		p := s.Prog(v.Op.Asm())
   210  		p.From.Type = obj.TYPE_REG
   211  		p.From.Reg = r2
   212  		p.Reg = r1
   213  		p.To.Type = obj.TYPE_REG
   214  		p.To.Reg = r
   215  	case ssa.OpARMADDS,
   216  		ssa.OpARMSUBS:
   217  		r := v.Reg0()
   218  		r1 := v.Args[0].Reg()
   219  		r2 := v.Args[1].Reg()
   220  		p := s.Prog(v.Op.Asm())
   221  		p.Scond = arm.C_SBIT
   222  		p.From.Type = obj.TYPE_REG
   223  		p.From.Reg = r2
   224  		p.Reg = r1
   225  		p.To.Type = obj.TYPE_REG
   226  		p.To.Reg = r
   227  	case ssa.OpARMSLL,
   228  		ssa.OpARMSRL,
   229  		ssa.OpARMSRA:
   230  		r := v.Reg()
   231  		r1 := v.Args[0].Reg()
   232  		r2 := v.Args[1].Reg()
   233  		p := s.Prog(v.Op.Asm())
   234  		p.From.Type = obj.TYPE_REG
   235  		p.From.Reg = r2
   236  		p.Reg = r1
   237  		p.To.Type = obj.TYPE_REG
   238  		p.To.Reg = r
   239  	case ssa.OpARMSRAcond:
   240  		// ARM shift instructions uses only the low-order byte of the shift amount
   241  		// generate conditional instructions to deal with large shifts
   242  		// flag is already set
   243  		// SRA.HS	$31, Rarg0, Rdst // shift 31 bits to get the sign bit
   244  		// SRA.LO	Rarg1, Rarg0, Rdst
   245  		r := v.Reg()
   246  		r1 := v.Args[0].Reg()
   247  		r2 := v.Args[1].Reg()
   248  		p := s.Prog(arm.ASRA)
   249  		p.Scond = arm.C_SCOND_HS
   250  		p.From.Type = obj.TYPE_CONST
   251  		p.From.Offset = 31
   252  		p.Reg = r1
   253  		p.To.Type = obj.TYPE_REG
   254  		p.To.Reg = r
   255  		p = s.Prog(arm.ASRA)
   256  		p.Scond = arm.C_SCOND_LO
   257  		p.From.Type = obj.TYPE_REG
   258  		p.From.Reg = r2
   259  		p.Reg = r1
   260  		p.To.Type = obj.TYPE_REG
   261  		p.To.Reg = r
   262  	case ssa.OpARMBFX, ssa.OpARMBFXU:
   263  		p := s.Prog(v.Op.Asm())
   264  		p.From.Type = obj.TYPE_CONST
   265  		p.From.Offset = v.AuxInt >> 8
   266  		p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: v.AuxInt & 0xff})
   267  		p.Reg = v.Args[0].Reg()
   268  		p.To.Type = obj.TYPE_REG
   269  		p.To.Reg = v.Reg()
   270  	case ssa.OpARMADDconst,
   271  		ssa.OpARMADCconst,
   272  		ssa.OpARMSUBconst,
   273  		ssa.OpARMSBCconst,
   274  		ssa.OpARMRSBconst,
   275  		ssa.OpARMRSCconst,
   276  		ssa.OpARMANDconst,
   277  		ssa.OpARMORconst,
   278  		ssa.OpARMXORconst,
   279  		ssa.OpARMBICconst,
   280  		ssa.OpARMSLLconst,
   281  		ssa.OpARMSRLconst,
   282  		ssa.OpARMSRAconst:
   283  		p := s.Prog(v.Op.Asm())
   284  		p.From.Type = obj.TYPE_CONST
   285  		p.From.Offset = v.AuxInt
   286  		p.Reg = v.Args[0].Reg()
   287  		p.To.Type = obj.TYPE_REG
   288  		p.To.Reg = v.Reg()
   289  	case ssa.OpARMADDSconst,
   290  		ssa.OpARMSUBSconst,
   291  		ssa.OpARMRSBSconst:
   292  		p := s.Prog(v.Op.Asm())
   293  		p.Scond = arm.C_SBIT
   294  		p.From.Type = obj.TYPE_CONST
   295  		p.From.Offset = v.AuxInt
   296  		p.Reg = v.Args[0].Reg()
   297  		p.To.Type = obj.TYPE_REG
   298  		p.To.Reg = v.Reg0()
   299  	case ssa.OpARMSRRconst:
   300  		genshift(s, arm.AMOVW, 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_RR, v.AuxInt)
   301  	case ssa.OpARMADDshiftLL,
   302  		ssa.OpARMADCshiftLL,
   303  		ssa.OpARMSUBshiftLL,
   304  		ssa.OpARMSBCshiftLL,
   305  		ssa.OpARMRSBshiftLL,
   306  		ssa.OpARMRSCshiftLL,
   307  		ssa.OpARMANDshiftLL,
   308  		ssa.OpARMORshiftLL,
   309  		ssa.OpARMXORshiftLL,
   310  		ssa.OpARMBICshiftLL:
   311  		genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt)
   312  	case ssa.OpARMADDSshiftLL,
   313  		ssa.OpARMSUBSshiftLL,
   314  		ssa.OpARMRSBSshiftLL:
   315  		p := genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_LL, v.AuxInt)
   316  		p.Scond = arm.C_SBIT
   317  	case ssa.OpARMADDshiftRL,
   318  		ssa.OpARMADCshiftRL,
   319  		ssa.OpARMSUBshiftRL,
   320  		ssa.OpARMSBCshiftRL,
   321  		ssa.OpARMRSBshiftRL,
   322  		ssa.OpARMRSCshiftRL,
   323  		ssa.OpARMANDshiftRL,
   324  		ssa.OpARMORshiftRL,
   325  		ssa.OpARMXORshiftRL,
   326  		ssa.OpARMBICshiftRL:
   327  		genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt)
   328  	case ssa.OpARMADDSshiftRL,
   329  		ssa.OpARMSUBSshiftRL,
   330  		ssa.OpARMRSBSshiftRL:
   331  		p := genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_LR, v.AuxInt)
   332  		p.Scond = arm.C_SBIT
   333  	case ssa.OpARMADDshiftRA,
   334  		ssa.OpARMADCshiftRA,
   335  		ssa.OpARMSUBshiftRA,
   336  		ssa.OpARMSBCshiftRA,
   337  		ssa.OpARMRSBshiftRA,
   338  		ssa.OpARMRSCshiftRA,
   339  		ssa.OpARMANDshiftRA,
   340  		ssa.OpARMORshiftRA,
   341  		ssa.OpARMXORshiftRA,
   342  		ssa.OpARMBICshiftRA:
   343  		genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt)
   344  	case ssa.OpARMADDSshiftRA,
   345  		ssa.OpARMSUBSshiftRA,
   346  		ssa.OpARMRSBSshiftRA:
   347  		p := genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_AR, v.AuxInt)
   348  		p.Scond = arm.C_SBIT
   349  	case ssa.OpARMXORshiftRR:
   350  		genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_RR, v.AuxInt)
   351  	case ssa.OpARMMVNshiftLL:
   352  		genshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt)
   353  	case ssa.OpARMMVNshiftRL:
   354  		genshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt)
   355  	case ssa.OpARMMVNshiftRA:
   356  		genshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt)
   357  	case ssa.OpARMMVNshiftLLreg:
   358  		genregshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL)
   359  	case ssa.OpARMMVNshiftRLreg:
   360  		genregshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR)
   361  	case ssa.OpARMMVNshiftRAreg:
   362  		genregshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR)
   363  	case ssa.OpARMADDshiftLLreg,
   364  		ssa.OpARMADCshiftLLreg,
   365  		ssa.OpARMSUBshiftLLreg,
   366  		ssa.OpARMSBCshiftLLreg,
   367  		ssa.OpARMRSBshiftLLreg,
   368  		ssa.OpARMRSCshiftLLreg,
   369  		ssa.OpARMANDshiftLLreg,
   370  		ssa.OpARMORshiftLLreg,
   371  		ssa.OpARMXORshiftLLreg,
   372  		ssa.OpARMBICshiftLLreg:
   373  		genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_LL)
   374  	case ssa.OpARMADDSshiftLLreg,
   375  		ssa.OpARMSUBSshiftLLreg,
   376  		ssa.OpARMRSBSshiftLLreg:
   377  		p := genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_LL)
   378  		p.Scond = arm.C_SBIT
   379  	case ssa.OpARMADDshiftRLreg,
   380  		ssa.OpARMADCshiftRLreg,
   381  		ssa.OpARMSUBshiftRLreg,
   382  		ssa.OpARMSBCshiftRLreg,
   383  		ssa.OpARMRSBshiftRLreg,
   384  		ssa.OpARMRSCshiftRLreg,
   385  		ssa.OpARMANDshiftRLreg,
   386  		ssa.OpARMORshiftRLreg,
   387  		ssa.OpARMXORshiftRLreg,
   388  		ssa.OpARMBICshiftRLreg:
   389  		genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_LR)
   390  	case ssa.OpARMADDSshiftRLreg,
   391  		ssa.OpARMSUBSshiftRLreg,
   392  		ssa.OpARMRSBSshiftRLreg:
   393  		p := genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_LR)
   394  		p.Scond = arm.C_SBIT
   395  	case ssa.OpARMADDshiftRAreg,
   396  		ssa.OpARMADCshiftRAreg,
   397  		ssa.OpARMSUBshiftRAreg,
   398  		ssa.OpARMSBCshiftRAreg,
   399  		ssa.OpARMRSBshiftRAreg,
   400  		ssa.OpARMRSCshiftRAreg,
   401  		ssa.OpARMANDshiftRAreg,
   402  		ssa.OpARMORshiftRAreg,
   403  		ssa.OpARMXORshiftRAreg,
   404  		ssa.OpARMBICshiftRAreg:
   405  		genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_AR)
   406  	case ssa.OpARMADDSshiftRAreg,
   407  		ssa.OpARMSUBSshiftRAreg,
   408  		ssa.OpARMRSBSshiftRAreg:
   409  		p := genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_AR)
   410  		p.Scond = arm.C_SBIT
   411  	case ssa.OpARMHMUL,
   412  		ssa.OpARMHMULU:
   413  		// 32-bit high multiplication
   414  		p := s.Prog(v.Op.Asm())
   415  		p.From.Type = obj.TYPE_REG
   416  		p.From.Reg = v.Args[0].Reg()
   417  		p.Reg = v.Args[1].Reg()
   418  		p.To.Type = obj.TYPE_REGREG
   419  		p.To.Reg = v.Reg()
   420  		p.To.Offset = arm.REGTMP // throw away low 32-bit into tmp register
   421  	case ssa.OpARMMULLU:
   422  		// 32-bit multiplication, results 64-bit, high 32-bit in out0, low 32-bit in out1
   423  		p := s.Prog(v.Op.Asm())
   424  		p.From.Type = obj.TYPE_REG
   425  		p.From.Reg = v.Args[0].Reg()
   426  		p.Reg = v.Args[1].Reg()
   427  		p.To.Type = obj.TYPE_REGREG
   428  		p.To.Reg = v.Reg0()           // high 32-bit
   429  		p.To.Offset = int64(v.Reg1()) // low 32-bit
   430  	case ssa.OpARMMULA, ssa.OpARMMULS:
   431  		p := s.Prog(v.Op.Asm())
   432  		p.From.Type = obj.TYPE_REG
   433  		p.From.Reg = v.Args[0].Reg()
   434  		p.Reg = v.Args[1].Reg()
   435  		p.To.Type = obj.TYPE_REGREG2
   436  		p.To.Reg = v.Reg()                   // result
   437  		p.To.Offset = int64(v.Args[2].Reg()) // addend
   438  	case ssa.OpARMMOVWconst:
   439  		p := s.Prog(v.Op.Asm())
   440  		p.From.Type = obj.TYPE_CONST
   441  		p.From.Offset = v.AuxInt
   442  		p.To.Type = obj.TYPE_REG
   443  		p.To.Reg = v.Reg()
   444  	case ssa.OpARMMOVFconst,
   445  		ssa.OpARMMOVDconst:
   446  		p := s.Prog(v.Op.Asm())
   447  		p.From.Type = obj.TYPE_FCONST
   448  		p.From.Val = math.Float64frombits(uint64(v.AuxInt))
   449  		p.To.Type = obj.TYPE_REG
   450  		p.To.Reg = v.Reg()
   451  	case ssa.OpARMCMP,
   452  		ssa.OpARMCMN,
   453  		ssa.OpARMTST,
   454  		ssa.OpARMTEQ,
   455  		ssa.OpARMCMPF,
   456  		ssa.OpARMCMPD:
   457  		p := s.Prog(v.Op.Asm())
   458  		p.From.Type = obj.TYPE_REG
   459  		// Special layout in ARM assembly
   460  		// Comparing to x86, the operands of ARM's CMP are reversed.
   461  		p.From.Reg = v.Args[1].Reg()
   462  		p.Reg = v.Args[0].Reg()
   463  	case ssa.OpARMCMPconst,
   464  		ssa.OpARMCMNconst,
   465  		ssa.OpARMTSTconst,
   466  		ssa.OpARMTEQconst:
   467  		// Special layout in ARM assembly
   468  		p := s.Prog(v.Op.Asm())
   469  		p.From.Type = obj.TYPE_CONST
   470  		p.From.Offset = v.AuxInt
   471  		p.Reg = v.Args[0].Reg()
   472  	case ssa.OpARMCMPF0,
   473  		ssa.OpARMCMPD0:
   474  		p := s.Prog(v.Op.Asm())
   475  		p.From.Type = obj.TYPE_REG
   476  		p.From.Reg = v.Args[0].Reg()
   477  	case ssa.OpARMCMPshiftLL, ssa.OpARMCMNshiftLL, ssa.OpARMTSTshiftLL, ssa.OpARMTEQshiftLL:
   478  		genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_LL, v.AuxInt)
   479  	case ssa.OpARMCMPshiftRL, ssa.OpARMCMNshiftRL, ssa.OpARMTSTshiftRL, ssa.OpARMTEQshiftRL:
   480  		genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_LR, v.AuxInt)
   481  	case ssa.OpARMCMPshiftRA, ssa.OpARMCMNshiftRA, ssa.OpARMTSTshiftRA, ssa.OpARMTEQshiftRA:
   482  		genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_AR, v.AuxInt)
   483  	case ssa.OpARMCMPshiftLLreg, ssa.OpARMCMNshiftLLreg, ssa.OpARMTSTshiftLLreg, ssa.OpARMTEQshiftLLreg:
   484  		genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_LL)
   485  	case ssa.OpARMCMPshiftRLreg, ssa.OpARMCMNshiftRLreg, ssa.OpARMTSTshiftRLreg, ssa.OpARMTEQshiftRLreg:
   486  		genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_LR)
   487  	case ssa.OpARMCMPshiftRAreg, ssa.OpARMCMNshiftRAreg, ssa.OpARMTSTshiftRAreg, ssa.OpARMTEQshiftRAreg:
   488  		genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_AR)
   489  	case ssa.OpARMMOVWaddr:
   490  		p := s.Prog(arm.AMOVW)
   491  		p.From.Type = obj.TYPE_ADDR
   492  		p.From.Reg = v.Args[0].Reg()
   493  		p.To.Type = obj.TYPE_REG
   494  		p.To.Reg = v.Reg()
   495  
   496  		var wantreg string
   497  		// MOVW $sym+off(base), R
   498  		// the assembler expands it as the following:
   499  		// - base is SP: add constant offset to SP (R13)
   500  		//               when constant is large, tmp register (R11) may be used
   501  		// - base is SB: load external address from constant pool (use relocation)
   502  		switch v.Aux.(type) {
   503  		default:
   504  			v.Fatalf("aux is of unknown type %T", v.Aux)
   505  		case *obj.LSym:
   506  			wantreg = "SB"
   507  			gc.AddAux(&p.From, v)
   508  		case *gc.Node:
   509  			wantreg = "SP"
   510  			gc.AddAux(&p.From, v)
   511  		case nil:
   512  			// No sym, just MOVW $off(SP), R
   513  			wantreg = "SP"
   514  			p.From.Offset = v.AuxInt
   515  		}
   516  		if reg := v.Args[0].RegName(); reg != wantreg {
   517  			v.Fatalf("bad reg %s for symbol type %T, want %s", reg, v.Aux, wantreg)
   518  		}
   519  
   520  	case ssa.OpARMMOVBload,
   521  		ssa.OpARMMOVBUload,
   522  		ssa.OpARMMOVHload,
   523  		ssa.OpARMMOVHUload,
   524  		ssa.OpARMMOVWload,
   525  		ssa.OpARMMOVFload,
   526  		ssa.OpARMMOVDload:
   527  		p := s.Prog(v.Op.Asm())
   528  		p.From.Type = obj.TYPE_MEM
   529  		p.From.Reg = v.Args[0].Reg()
   530  		gc.AddAux(&p.From, v)
   531  		p.To.Type = obj.TYPE_REG
   532  		p.To.Reg = v.Reg()
   533  	case ssa.OpARMMOVBstore,
   534  		ssa.OpARMMOVHstore,
   535  		ssa.OpARMMOVWstore,
   536  		ssa.OpARMMOVFstore,
   537  		ssa.OpARMMOVDstore:
   538  		p := s.Prog(v.Op.Asm())
   539  		p.From.Type = obj.TYPE_REG
   540  		p.From.Reg = v.Args[1].Reg()
   541  		p.To.Type = obj.TYPE_MEM
   542  		p.To.Reg = v.Args[0].Reg()
   543  		gc.AddAux(&p.To, v)
   544  	case ssa.OpARMMOVWloadidx, ssa.OpARMMOVBUloadidx, ssa.OpARMMOVBloadidx, ssa.OpARMMOVHUloadidx, ssa.OpARMMOVHloadidx:
   545  		// this is just shift 0 bits
   546  		fallthrough
   547  	case ssa.OpARMMOVWloadshiftLL:
   548  		p := genshift(s, v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt)
   549  		p.From.Reg = v.Args[0].Reg()
   550  	case ssa.OpARMMOVWloadshiftRL:
   551  		p := genshift(s, v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt)
   552  		p.From.Reg = v.Args[0].Reg()
   553  	case ssa.OpARMMOVWloadshiftRA:
   554  		p := genshift(s, v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt)
   555  		p.From.Reg = v.Args[0].Reg()
   556  	case ssa.OpARMMOVWstoreidx, ssa.OpARMMOVBstoreidx, ssa.OpARMMOVHstoreidx:
   557  		// this is just shift 0 bits
   558  		fallthrough
   559  	case ssa.OpARMMOVWstoreshiftLL:
   560  		p := s.Prog(v.Op.Asm())
   561  		p.From.Type = obj.TYPE_REG
   562  		p.From.Reg = v.Args[2].Reg()
   563  		p.To.Type = obj.TYPE_SHIFT
   564  		p.To.Reg = v.Args[0].Reg()
   565  		p.To.Offset = int64(makeshift(v.Args[1].Reg(), arm.SHIFT_LL, v.AuxInt))
   566  	case ssa.OpARMMOVWstoreshiftRL:
   567  		p := s.Prog(v.Op.Asm())
   568  		p.From.Type = obj.TYPE_REG
   569  		p.From.Reg = v.Args[2].Reg()
   570  		p.To.Type = obj.TYPE_SHIFT
   571  		p.To.Reg = v.Args[0].Reg()
   572  		p.To.Offset = int64(makeshift(v.Args[1].Reg(), arm.SHIFT_LR, v.AuxInt))
   573  	case ssa.OpARMMOVWstoreshiftRA:
   574  		p := s.Prog(v.Op.Asm())
   575  		p.From.Type = obj.TYPE_REG
   576  		p.From.Reg = v.Args[2].Reg()
   577  		p.To.Type = obj.TYPE_SHIFT
   578  		p.To.Reg = v.Args[0].Reg()
   579  		p.To.Offset = int64(makeshift(v.Args[1].Reg(), arm.SHIFT_AR, v.AuxInt))
   580  	case ssa.OpARMMOVBreg,
   581  		ssa.OpARMMOVBUreg,
   582  		ssa.OpARMMOVHreg,
   583  		ssa.OpARMMOVHUreg:
   584  		a := v.Args[0]
   585  		for a.Op == ssa.OpCopy || a.Op == ssa.OpARMMOVWreg || a.Op == ssa.OpARMMOVWnop {
   586  			a = a.Args[0]
   587  		}
   588  		if a.Op == ssa.OpLoadReg {
   589  			t := a.Type
   590  			switch {
   591  			case v.Op == ssa.OpARMMOVBreg && t.Size() == 1 && t.IsSigned(),
   592  				v.Op == ssa.OpARMMOVBUreg && t.Size() == 1 && !t.IsSigned(),
   593  				v.Op == ssa.OpARMMOVHreg && t.Size() == 2 && t.IsSigned(),
   594  				v.Op == ssa.OpARMMOVHUreg && t.Size() == 2 && !t.IsSigned():
   595  				// arg is a proper-typed load, already zero/sign-extended, don't extend again
   596  				if v.Reg() == v.Args[0].Reg() {
   597  					return
   598  				}
   599  				p := s.Prog(arm.AMOVW)
   600  				p.From.Type = obj.TYPE_REG
   601  				p.From.Reg = v.Args[0].Reg()
   602  				p.To.Type = obj.TYPE_REG
   603  				p.To.Reg = v.Reg()
   604  				return
   605  			default:
   606  			}
   607  		}
   608  		if objabi.GOARM >= 6 {
   609  			// generate more efficient "MOVB/MOVBU/MOVH/MOVHU Reg@>0, Reg" on ARMv6 & ARMv7
   610  			genshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_RR, 0)
   611  			return
   612  		}
   613  		fallthrough
   614  	case ssa.OpARMMVN,
   615  		ssa.OpARMCLZ,
   616  		ssa.OpARMREV,
   617  		ssa.OpARMRBIT,
   618  		ssa.OpARMSQRTD,
   619  		ssa.OpARMNEGF,
   620  		ssa.OpARMNEGD,
   621  		ssa.OpARMMOVWF,
   622  		ssa.OpARMMOVWD,
   623  		ssa.OpARMMOVFW,
   624  		ssa.OpARMMOVDW,
   625  		ssa.OpARMMOVFD,
   626  		ssa.OpARMMOVDF:
   627  		p := s.Prog(v.Op.Asm())
   628  		p.From.Type = obj.TYPE_REG
   629  		p.From.Reg = v.Args[0].Reg()
   630  		p.To.Type = obj.TYPE_REG
   631  		p.To.Reg = v.Reg()
   632  	case ssa.OpARMMOVWUF,
   633  		ssa.OpARMMOVWUD,
   634  		ssa.OpARMMOVFWU,
   635  		ssa.OpARMMOVDWU:
   636  		p := s.Prog(v.Op.Asm())
   637  		p.Scond = arm.C_UBIT
   638  		p.From.Type = obj.TYPE_REG
   639  		p.From.Reg = v.Args[0].Reg()
   640  		p.To.Type = obj.TYPE_REG
   641  		p.To.Reg = v.Reg()
   642  	case ssa.OpARMCMOVWHSconst:
   643  		p := s.Prog(arm.AMOVW)
   644  		p.Scond = arm.C_SCOND_HS
   645  		p.From.Type = obj.TYPE_CONST
   646  		p.From.Offset = v.AuxInt
   647  		p.To.Type = obj.TYPE_REG
   648  		p.To.Reg = v.Reg()
   649  	case ssa.OpARMCMOVWLSconst:
   650  		p := s.Prog(arm.AMOVW)
   651  		p.Scond = arm.C_SCOND_LS
   652  		p.From.Type = obj.TYPE_CONST
   653  		p.From.Offset = v.AuxInt
   654  		p.To.Type = obj.TYPE_REG
   655  		p.To.Reg = v.Reg()
   656  	case ssa.OpARMCALLstatic, ssa.OpARMCALLclosure, ssa.OpARMCALLinter:
   657  		s.Call(v)
   658  	case ssa.OpARMCALLudiv:
   659  		p := s.Prog(obj.ACALL)
   660  		p.To.Type = obj.TYPE_MEM
   661  		p.To.Name = obj.NAME_EXTERN
   662  		p.To.Sym = gc.Udiv
   663  	case ssa.OpARMLoweredWB:
   664  		p := s.Prog(obj.ACALL)
   665  		p.To.Type = obj.TYPE_MEM
   666  		p.To.Name = obj.NAME_EXTERN
   667  		p.To.Sym = v.Aux.(*obj.LSym)
   668  	case ssa.OpARMDUFFZERO:
   669  		p := s.Prog(obj.ADUFFZERO)
   670  		p.To.Type = obj.TYPE_MEM
   671  		p.To.Name = obj.NAME_EXTERN
   672  		p.To.Sym = gc.Duffzero
   673  		p.To.Offset = v.AuxInt
   674  	case ssa.OpARMDUFFCOPY:
   675  		p := s.Prog(obj.ADUFFCOPY)
   676  		p.To.Type = obj.TYPE_MEM
   677  		p.To.Name = obj.NAME_EXTERN
   678  		p.To.Sym = gc.Duffcopy
   679  		p.To.Offset = v.AuxInt
   680  	case ssa.OpARMLoweredNilCheck:
   681  		// Issue a load which will fault if arg is nil.
   682  		p := s.Prog(arm.AMOVB)
   683  		p.From.Type = obj.TYPE_MEM
   684  		p.From.Reg = v.Args[0].Reg()
   685  		gc.AddAux(&p.From, v)
   686  		p.To.Type = obj.TYPE_REG
   687  		p.To.Reg = arm.REGTMP
   688  		if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
   689  			gc.Warnl(v.Pos, "generated nil check")
   690  		}
   691  	case ssa.OpARMLoweredZero:
   692  		// MOVW.P	Rarg2, 4(R1)
   693  		// CMP	Rarg1, R1
   694  		// BLE	-2(PC)
   695  		// arg1 is the address of the last element to zero
   696  		// arg2 is known to be zero
   697  		// auxint is alignment
   698  		var sz int64
   699  		var mov obj.As
   700  		switch {
   701  		case v.AuxInt%4 == 0:
   702  			sz = 4
   703  			mov = arm.AMOVW
   704  		case v.AuxInt%2 == 0:
   705  			sz = 2
   706  			mov = arm.AMOVH
   707  		default:
   708  			sz = 1
   709  			mov = arm.AMOVB
   710  		}
   711  		p := s.Prog(mov)
   712  		p.Scond = arm.C_PBIT
   713  		p.From.Type = obj.TYPE_REG
   714  		p.From.Reg = v.Args[2].Reg()
   715  		p.To.Type = obj.TYPE_MEM
   716  		p.To.Reg = arm.REG_R1
   717  		p.To.Offset = sz
   718  		p2 := s.Prog(arm.ACMP)
   719  		p2.From.Type = obj.TYPE_REG
   720  		p2.From.Reg = v.Args[1].Reg()
   721  		p2.Reg = arm.REG_R1
   722  		p3 := s.Prog(arm.ABLE)
   723  		p3.To.Type = obj.TYPE_BRANCH
   724  		gc.Patch(p3, p)
   725  	case ssa.OpARMLoweredMove:
   726  		// MOVW.P	4(R1), Rtmp
   727  		// MOVW.P	Rtmp, 4(R2)
   728  		// CMP	Rarg2, R1
   729  		// BLE	-3(PC)
   730  		// arg2 is the address of the last element of src
   731  		// auxint is alignment
   732  		var sz int64
   733  		var mov obj.As
   734  		switch {
   735  		case v.AuxInt%4 == 0:
   736  			sz = 4
   737  			mov = arm.AMOVW
   738  		case v.AuxInt%2 == 0:
   739  			sz = 2
   740  			mov = arm.AMOVH
   741  		default:
   742  			sz = 1
   743  			mov = arm.AMOVB
   744  		}
   745  		p := s.Prog(mov)
   746  		p.Scond = arm.C_PBIT
   747  		p.From.Type = obj.TYPE_MEM
   748  		p.From.Reg = arm.REG_R1
   749  		p.From.Offset = sz
   750  		p.To.Type = obj.TYPE_REG
   751  		p.To.Reg = arm.REGTMP
   752  		p2 := s.Prog(mov)
   753  		p2.Scond = arm.C_PBIT
   754  		p2.From.Type = obj.TYPE_REG
   755  		p2.From.Reg = arm.REGTMP
   756  		p2.To.Type = obj.TYPE_MEM
   757  		p2.To.Reg = arm.REG_R2
   758  		p2.To.Offset = sz
   759  		p3 := s.Prog(arm.ACMP)
   760  		p3.From.Type = obj.TYPE_REG
   761  		p3.From.Reg = v.Args[2].Reg()
   762  		p3.Reg = arm.REG_R1
   763  		p4 := s.Prog(arm.ABLE)
   764  		p4.To.Type = obj.TYPE_BRANCH
   765  		gc.Patch(p4, p)
   766  	case ssa.OpARMEqual,
   767  		ssa.OpARMNotEqual,
   768  		ssa.OpARMLessThan,
   769  		ssa.OpARMLessEqual,
   770  		ssa.OpARMGreaterThan,
   771  		ssa.OpARMGreaterEqual,
   772  		ssa.OpARMLessThanU,
   773  		ssa.OpARMLessEqualU,
   774  		ssa.OpARMGreaterThanU,
   775  		ssa.OpARMGreaterEqualU:
   776  		// generate boolean values
   777  		// use conditional move
   778  		p := s.Prog(arm.AMOVW)
   779  		p.From.Type = obj.TYPE_CONST
   780  		p.From.Offset = 0
   781  		p.To.Type = obj.TYPE_REG
   782  		p.To.Reg = v.Reg()
   783  		p = s.Prog(arm.AMOVW)
   784  		p.Scond = condBits[v.Op]
   785  		p.From.Type = obj.TYPE_CONST
   786  		p.From.Offset = 1
   787  		p.To.Type = obj.TYPE_REG
   788  		p.To.Reg = v.Reg()
   789  	case ssa.OpARMLoweredGetClosurePtr:
   790  		// Closure pointer is R7 (arm.REGCTXT).
   791  		gc.CheckLoweredGetClosurePtr(v)
   792  	case ssa.OpARMLoweredGetCallerSP:
   793  		// caller's SP is FixedFrameSize below the address of the first arg
   794  		p := s.Prog(arm.AMOVW)
   795  		p.From.Type = obj.TYPE_ADDR
   796  		p.From.Offset = -gc.Ctxt.FixedFrameSize()
   797  		p.From.Name = obj.NAME_PARAM
   798  		p.To.Type = obj.TYPE_REG
   799  		p.To.Reg = v.Reg()
   800  	case ssa.OpARMFlagEQ,
   801  		ssa.OpARMFlagLT_ULT,
   802  		ssa.OpARMFlagLT_UGT,
   803  		ssa.OpARMFlagGT_ULT,
   804  		ssa.OpARMFlagGT_UGT:
   805  		v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString())
   806  	case ssa.OpARMInvertFlags:
   807  		v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
   808  	case ssa.OpClobber:
   809  		// TODO: implement for clobberdead experiment. Nop is ok for now.
   810  	default:
   811  		v.Fatalf("genValue not implemented: %s", v.LongString())
   812  	}
   813  }
   814  
   815  var condBits = map[ssa.Op]uint8{
   816  	ssa.OpARMEqual:         arm.C_SCOND_EQ,
   817  	ssa.OpARMNotEqual:      arm.C_SCOND_NE,
   818  	ssa.OpARMLessThan:      arm.C_SCOND_LT,
   819  	ssa.OpARMLessThanU:     arm.C_SCOND_LO,
   820  	ssa.OpARMLessEqual:     arm.C_SCOND_LE,
   821  	ssa.OpARMLessEqualU:    arm.C_SCOND_LS,
   822  	ssa.OpARMGreaterThan:   arm.C_SCOND_GT,
   823  	ssa.OpARMGreaterThanU:  arm.C_SCOND_HI,
   824  	ssa.OpARMGreaterEqual:  arm.C_SCOND_GE,
   825  	ssa.OpARMGreaterEqualU: arm.C_SCOND_HS,
   826  }
   827  
   828  var blockJump = map[ssa.BlockKind]struct {
   829  	asm, invasm obj.As
   830  }{
   831  	ssa.BlockARMEQ:  {arm.ABEQ, arm.ABNE},
   832  	ssa.BlockARMNE:  {arm.ABNE, arm.ABEQ},
   833  	ssa.BlockARMLT:  {arm.ABLT, arm.ABGE},
   834  	ssa.BlockARMGE:  {arm.ABGE, arm.ABLT},
   835  	ssa.BlockARMLE:  {arm.ABLE, arm.ABGT},
   836  	ssa.BlockARMGT:  {arm.ABGT, arm.ABLE},
   837  	ssa.BlockARMULT: {arm.ABLO, arm.ABHS},
   838  	ssa.BlockARMUGE: {arm.ABHS, arm.ABLO},
   839  	ssa.BlockARMUGT: {arm.ABHI, arm.ABLS},
   840  	ssa.BlockARMULE: {arm.ABLS, arm.ABHI},
   841  }
   842  
   843  func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
   844  	switch b.Kind {
   845  	case ssa.BlockPlain:
   846  		if b.Succs[0].Block() != next {
   847  			p := s.Prog(obj.AJMP)
   848  			p.To.Type = obj.TYPE_BRANCH
   849  			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
   850  		}
   851  
   852  	case ssa.BlockDefer:
   853  		// defer returns in R0:
   854  		// 0 if we should continue executing
   855  		// 1 if we should jump to deferreturn call
   856  		p := s.Prog(arm.ACMP)
   857  		p.From.Type = obj.TYPE_CONST
   858  		p.From.Offset = 0
   859  		p.Reg = arm.REG_R0
   860  		p = s.Prog(arm.ABNE)
   861  		p.To.Type = obj.TYPE_BRANCH
   862  		s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
   863  		if b.Succs[0].Block() != next {
   864  			p := s.Prog(obj.AJMP)
   865  			p.To.Type = obj.TYPE_BRANCH
   866  			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
   867  		}
   868  
   869  	case ssa.BlockExit:
   870  		s.Prog(obj.AUNDEF) // tell plive.go that we never reach here
   871  
   872  	case ssa.BlockRet:
   873  		s.Prog(obj.ARET)
   874  
   875  	case ssa.BlockRetJmp:
   876  		p := s.Prog(obj.ARET)
   877  		p.To.Type = obj.TYPE_MEM
   878  		p.To.Name = obj.NAME_EXTERN
   879  		p.To.Sym = b.Aux.(*obj.LSym)
   880  
   881  	case ssa.BlockARMEQ, ssa.BlockARMNE,
   882  		ssa.BlockARMLT, ssa.BlockARMGE,
   883  		ssa.BlockARMLE, ssa.BlockARMGT,
   884  		ssa.BlockARMULT, ssa.BlockARMUGT,
   885  		ssa.BlockARMULE, ssa.BlockARMUGE:
   886  		jmp := blockJump[b.Kind]
   887  		var p *obj.Prog
   888  		switch next {
   889  		case b.Succs[0].Block():
   890  			p = s.Prog(jmp.invasm)
   891  			p.To.Type = obj.TYPE_BRANCH
   892  			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
   893  		case b.Succs[1].Block():
   894  			p = s.Prog(jmp.asm)
   895  			p.To.Type = obj.TYPE_BRANCH
   896  			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
   897  		default:
   898  			p = s.Prog(jmp.asm)
   899  			p.To.Type = obj.TYPE_BRANCH
   900  			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
   901  			q := s.Prog(obj.AJMP)
   902  			q.To.Type = obj.TYPE_BRANCH
   903  			s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
   904  		}
   905  
   906  	default:
   907  		b.Fatalf("branch not implemented: %s. Control: %s", b.LongString(), b.Control.LongString())
   908  	}
   909  }