github.com/zebozhuang/go@v0.0.0-20200207033046-f8a98f6f5c5d/src/cmd/compile/internal/arm/ssa.go (about) 1 // Copyright 2016 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package arm 6 7 import ( 8 "fmt" 9 "math" 10 11 "cmd/compile/internal/gc" 12 "cmd/compile/internal/ssa" 13 "cmd/compile/internal/types" 14 "cmd/internal/obj" 15 "cmd/internal/obj/arm" 16 ) 17 18 // loadByType returns the load instruction of the given type. 19 func loadByType(t *types.Type) obj.As { 20 if t.IsFloat() { 21 switch t.Size() { 22 case 4: 23 return arm.AMOVF 24 case 8: 25 return arm.AMOVD 26 } 27 } else { 28 switch t.Size() { 29 case 1: 30 if t.IsSigned() { 31 return arm.AMOVB 32 } else { 33 return arm.AMOVBU 34 } 35 case 2: 36 if t.IsSigned() { 37 return arm.AMOVH 38 } else { 39 return arm.AMOVHU 40 } 41 case 4: 42 return arm.AMOVW 43 } 44 } 45 panic("bad load type") 46 } 47 48 // storeByType returns the store instruction of the given type. 49 func storeByType(t *types.Type) obj.As { 50 if t.IsFloat() { 51 switch t.Size() { 52 case 4: 53 return arm.AMOVF 54 case 8: 55 return arm.AMOVD 56 } 57 } else { 58 switch t.Size() { 59 case 1: 60 return arm.AMOVB 61 case 2: 62 return arm.AMOVH 63 case 4: 64 return arm.AMOVW 65 } 66 } 67 panic("bad store type") 68 } 69 70 // shift type is used as Offset in obj.TYPE_SHIFT operands to encode shifted register operands 71 type shift int64 72 73 // copied from ../../../internal/obj/util.go:/TYPE_SHIFT 74 func (v shift) String() string { 75 op := "<<>>->@>"[((v>>5)&3)<<1:] 76 if v&(1<<4) != 0 { 77 // register shift 78 return fmt.Sprintf("R%d%c%cR%d", v&15, op[0], op[1], (v>>8)&15) 79 } else { 80 // constant shift 81 return fmt.Sprintf("R%d%c%c%d", v&15, op[0], op[1], (v>>7)&31) 82 } 83 } 84 85 // makeshift encodes a register shifted by a constant 86 func makeshift(reg int16, typ int64, s int64) shift { 87 return shift(int64(reg&0xf) | typ | (s&31)<<7) 88 } 89 90 // genshift generates a Prog for r = r0 op (r1 shifted by n) 91 func genshift(s *gc.SSAGenState, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog { 92 p := s.Prog(as) 93 p.From.Type = obj.TYPE_SHIFT 94 p.From.Offset = int64(makeshift(r1, typ, n)) 95 p.Reg = r0 96 if r != 0 { 97 p.To.Type = obj.TYPE_REG 98 p.To.Reg = r 99 } 100 return p 101 } 102 103 // makeregshift encodes a register shifted by a register 104 func makeregshift(r1 int16, typ int64, r2 int16) shift { 105 return shift(int64(r1&0xf) | typ | int64(r2&0xf)<<8 | 1<<4) 106 } 107 108 // genregshift generates a Prog for r = r0 op (r1 shifted by r2) 109 func genregshift(s *gc.SSAGenState, as obj.As, r0, r1, r2, r int16, typ int64) *obj.Prog { 110 p := s.Prog(as) 111 p.From.Type = obj.TYPE_SHIFT 112 p.From.Offset = int64(makeregshift(r1, typ, r2)) 113 p.Reg = r0 114 if r != 0 { 115 p.To.Type = obj.TYPE_REG 116 p.To.Reg = r 117 } 118 return p 119 } 120 121 func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { 122 switch v.Op { 123 case ssa.OpCopy, ssa.OpARMMOVWconvert, ssa.OpARMMOVWreg: 124 if v.Type.IsMemory() { 125 return 126 } 127 x := v.Args[0].Reg() 128 y := v.Reg() 129 if x == y { 130 return 131 } 132 as := arm.AMOVW 133 if v.Type.IsFloat() { 134 switch v.Type.Size() { 135 case 4: 136 as = arm.AMOVF 137 case 8: 138 as = arm.AMOVD 139 default: 140 panic("bad float size") 141 } 142 } 143 p := s.Prog(as) 144 p.From.Type = obj.TYPE_REG 145 p.From.Reg = x 146 p.To.Type = obj.TYPE_REG 147 p.To.Reg = y 148 case ssa.OpARMMOVWnop: 149 if v.Reg() != v.Args[0].Reg() { 150 v.Fatalf("input[0] and output not in same register %s", v.LongString()) 151 } 152 // nothing to do 153 case ssa.OpLoadReg: 154 if v.Type.IsFlags() { 155 v.Fatalf("load flags not implemented: %v", v.LongString()) 156 return 157 } 158 p := s.Prog(loadByType(v.Type)) 159 gc.AddrAuto(&p.From, v.Args[0]) 160 p.To.Type = obj.TYPE_REG 161 p.To.Reg = v.Reg() 162 case ssa.OpStoreReg: 163 if v.Type.IsFlags() { 164 v.Fatalf("store flags not implemented: %v", v.LongString()) 165 return 166 } 167 p := s.Prog(storeByType(v.Type)) 168 p.From.Type = obj.TYPE_REG 169 p.From.Reg = v.Args[0].Reg() 170 gc.AddrAuto(&p.To, v) 171 case ssa.OpARMADD, 172 ssa.OpARMADC, 173 ssa.OpARMSUB, 174 ssa.OpARMSBC, 175 ssa.OpARMRSB, 176 ssa.OpARMAND, 177 ssa.OpARMOR, 178 ssa.OpARMXOR, 179 ssa.OpARMBIC, 180 ssa.OpARMMUL, 181 ssa.OpARMADDF, 182 ssa.OpARMADDD, 183 ssa.OpARMSUBF, 184 ssa.OpARMSUBD, 185 ssa.OpARMMULF, 186 ssa.OpARMMULD, 187 ssa.OpARMDIVF, 188 ssa.OpARMDIVD: 189 r := v.Reg() 190 r1 := v.Args[0].Reg() 191 r2 := v.Args[1].Reg() 192 p := s.Prog(v.Op.Asm()) 193 p.From.Type = obj.TYPE_REG 194 p.From.Reg = r2 195 p.Reg = r1 196 p.To.Type = obj.TYPE_REG 197 p.To.Reg = r 198 case ssa.OpARMADDS, 199 ssa.OpARMSUBS: 200 r := v.Reg0() 201 r1 := v.Args[0].Reg() 202 r2 := v.Args[1].Reg() 203 p := s.Prog(v.Op.Asm()) 204 p.Scond = arm.C_SBIT 205 p.From.Type = obj.TYPE_REG 206 p.From.Reg = r2 207 p.Reg = r1 208 p.To.Type = obj.TYPE_REG 209 p.To.Reg = r 210 case ssa.OpARMSLL, 211 ssa.OpARMSRL, 212 ssa.OpARMSRA: 213 r := v.Reg() 214 r1 := v.Args[0].Reg() 215 r2 := v.Args[1].Reg() 216 p := s.Prog(v.Op.Asm()) 217 p.From.Type = obj.TYPE_REG 218 p.From.Reg = r2 219 p.Reg = r1 220 p.To.Type = obj.TYPE_REG 221 p.To.Reg = r 222 case ssa.OpARMSRAcond: 223 // ARM shift instructions uses only the low-order byte of the shift amount 224 // generate conditional instructions to deal with large shifts 225 // flag is already set 226 // SRA.HS $31, Rarg0, Rdst // shift 31 bits to get the sign bit 227 // SRA.LO Rarg1, Rarg0, Rdst 228 r := v.Reg() 229 r1 := v.Args[0].Reg() 230 r2 := v.Args[1].Reg() 231 p := s.Prog(arm.ASRA) 232 p.Scond = arm.C_SCOND_HS 233 p.From.Type = obj.TYPE_CONST 234 p.From.Offset = 31 235 p.Reg = r1 236 p.To.Type = obj.TYPE_REG 237 p.To.Reg = r 238 p = s.Prog(arm.ASRA) 239 p.Scond = arm.C_SCOND_LO 240 p.From.Type = obj.TYPE_REG 241 p.From.Reg = r2 242 p.Reg = r1 243 p.To.Type = obj.TYPE_REG 244 p.To.Reg = r 245 case ssa.OpARMADDconst, 246 ssa.OpARMADCconst, 247 ssa.OpARMSUBconst, 248 ssa.OpARMSBCconst, 249 ssa.OpARMRSBconst, 250 ssa.OpARMRSCconst, 251 ssa.OpARMANDconst, 252 ssa.OpARMORconst, 253 ssa.OpARMXORconst, 254 ssa.OpARMBICconst, 255 ssa.OpARMSLLconst, 256 ssa.OpARMSRLconst, 257 ssa.OpARMSRAconst: 258 p := s.Prog(v.Op.Asm()) 259 p.From.Type = obj.TYPE_CONST 260 p.From.Offset = v.AuxInt 261 p.Reg = v.Args[0].Reg() 262 p.To.Type = obj.TYPE_REG 263 p.To.Reg = v.Reg() 264 case ssa.OpARMADDSconst, 265 ssa.OpARMSUBSconst, 266 ssa.OpARMRSBSconst: 267 p := s.Prog(v.Op.Asm()) 268 p.Scond = arm.C_SBIT 269 p.From.Type = obj.TYPE_CONST 270 p.From.Offset = v.AuxInt 271 p.Reg = v.Args[0].Reg() 272 p.To.Type = obj.TYPE_REG 273 p.To.Reg = v.Reg0() 274 case ssa.OpARMSRRconst: 275 genshift(s, arm.AMOVW, 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_RR, v.AuxInt) 276 case ssa.OpARMADDshiftLL, 277 ssa.OpARMADCshiftLL, 278 ssa.OpARMSUBshiftLL, 279 ssa.OpARMSBCshiftLL, 280 ssa.OpARMRSBshiftLL, 281 ssa.OpARMRSCshiftLL, 282 ssa.OpARMANDshiftLL, 283 ssa.OpARMORshiftLL, 284 ssa.OpARMXORshiftLL, 285 ssa.OpARMBICshiftLL: 286 genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt) 287 case ssa.OpARMADDSshiftLL, 288 ssa.OpARMSUBSshiftLL, 289 ssa.OpARMRSBSshiftLL: 290 p := genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_LL, v.AuxInt) 291 p.Scond = arm.C_SBIT 292 case ssa.OpARMADDshiftRL, 293 ssa.OpARMADCshiftRL, 294 ssa.OpARMSUBshiftRL, 295 ssa.OpARMSBCshiftRL, 296 ssa.OpARMRSBshiftRL, 297 ssa.OpARMRSCshiftRL, 298 ssa.OpARMANDshiftRL, 299 ssa.OpARMORshiftRL, 300 ssa.OpARMXORshiftRL, 301 ssa.OpARMBICshiftRL: 302 genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt) 303 case ssa.OpARMADDSshiftRL, 304 ssa.OpARMSUBSshiftRL, 305 ssa.OpARMRSBSshiftRL: 306 p := genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_LR, v.AuxInt) 307 p.Scond = arm.C_SBIT 308 case ssa.OpARMADDshiftRA, 309 ssa.OpARMADCshiftRA, 310 ssa.OpARMSUBshiftRA, 311 ssa.OpARMSBCshiftRA, 312 ssa.OpARMRSBshiftRA, 313 ssa.OpARMRSCshiftRA, 314 ssa.OpARMANDshiftRA, 315 ssa.OpARMORshiftRA, 316 ssa.OpARMXORshiftRA, 317 ssa.OpARMBICshiftRA: 318 genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt) 319 case ssa.OpARMADDSshiftRA, 320 ssa.OpARMSUBSshiftRA, 321 ssa.OpARMRSBSshiftRA: 322 p := genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_AR, v.AuxInt) 323 p.Scond = arm.C_SBIT 324 case ssa.OpARMXORshiftRR: 325 genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_RR, v.AuxInt) 326 case ssa.OpARMMVNshiftLL: 327 genshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt) 328 case ssa.OpARMMVNshiftRL: 329 genshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt) 330 case ssa.OpARMMVNshiftRA: 331 genshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt) 332 case ssa.OpARMMVNshiftLLreg: 333 genregshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL) 334 case ssa.OpARMMVNshiftRLreg: 335 genregshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR) 336 case ssa.OpARMMVNshiftRAreg: 337 genregshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR) 338 case ssa.OpARMADDshiftLLreg, 339 ssa.OpARMADCshiftLLreg, 340 ssa.OpARMSUBshiftLLreg, 341 ssa.OpARMSBCshiftLLreg, 342 ssa.OpARMRSBshiftLLreg, 343 ssa.OpARMRSCshiftLLreg, 344 ssa.OpARMANDshiftLLreg, 345 ssa.OpARMORshiftLLreg, 346 ssa.OpARMXORshiftLLreg, 347 ssa.OpARMBICshiftLLreg: 348 genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_LL) 349 case ssa.OpARMADDSshiftLLreg, 350 ssa.OpARMSUBSshiftLLreg, 351 ssa.OpARMRSBSshiftLLreg: 352 p := genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_LL) 353 p.Scond = arm.C_SBIT 354 case ssa.OpARMADDshiftRLreg, 355 ssa.OpARMADCshiftRLreg, 356 ssa.OpARMSUBshiftRLreg, 357 ssa.OpARMSBCshiftRLreg, 358 ssa.OpARMRSBshiftRLreg, 359 ssa.OpARMRSCshiftRLreg, 360 ssa.OpARMANDshiftRLreg, 361 ssa.OpARMORshiftRLreg, 362 ssa.OpARMXORshiftRLreg, 363 ssa.OpARMBICshiftRLreg: 364 genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_LR) 365 case ssa.OpARMADDSshiftRLreg, 366 ssa.OpARMSUBSshiftRLreg, 367 ssa.OpARMRSBSshiftRLreg: 368 p := genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_LR) 369 p.Scond = arm.C_SBIT 370 case ssa.OpARMADDshiftRAreg, 371 ssa.OpARMADCshiftRAreg, 372 ssa.OpARMSUBshiftRAreg, 373 ssa.OpARMSBCshiftRAreg, 374 ssa.OpARMRSBshiftRAreg, 375 ssa.OpARMRSCshiftRAreg, 376 ssa.OpARMANDshiftRAreg, 377 ssa.OpARMORshiftRAreg, 378 ssa.OpARMXORshiftRAreg, 379 ssa.OpARMBICshiftRAreg: 380 genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_AR) 381 case ssa.OpARMADDSshiftRAreg, 382 ssa.OpARMSUBSshiftRAreg, 383 ssa.OpARMRSBSshiftRAreg: 384 p := genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_AR) 385 p.Scond = arm.C_SBIT 386 case ssa.OpARMHMUL, 387 ssa.OpARMHMULU: 388 // 32-bit high multiplication 389 p := s.Prog(v.Op.Asm()) 390 p.From.Type = obj.TYPE_REG 391 p.From.Reg = v.Args[0].Reg() 392 p.Reg = v.Args[1].Reg() 393 p.To.Type = obj.TYPE_REGREG 394 p.To.Reg = v.Reg() 395 p.To.Offset = arm.REGTMP // throw away low 32-bit into tmp register 396 case ssa.OpARMMULLU: 397 // 32-bit multiplication, results 64-bit, high 32-bit in out0, low 32-bit in out1 398 p := s.Prog(v.Op.Asm()) 399 p.From.Type = obj.TYPE_REG 400 p.From.Reg = v.Args[0].Reg() 401 p.Reg = v.Args[1].Reg() 402 p.To.Type = obj.TYPE_REGREG 403 p.To.Reg = v.Reg0() // high 32-bit 404 p.To.Offset = int64(v.Reg1()) // low 32-bit 405 case ssa.OpARMMULA: 406 p := s.Prog(v.Op.Asm()) 407 p.From.Type = obj.TYPE_REG 408 p.From.Reg = v.Args[0].Reg() 409 p.Reg = v.Args[1].Reg() 410 p.To.Type = obj.TYPE_REGREG2 411 p.To.Reg = v.Reg() // result 412 p.To.Offset = int64(v.Args[2].Reg()) // addend 413 case ssa.OpARMMOVWconst: 414 p := s.Prog(v.Op.Asm()) 415 p.From.Type = obj.TYPE_CONST 416 p.From.Offset = v.AuxInt 417 p.To.Type = obj.TYPE_REG 418 p.To.Reg = v.Reg() 419 case ssa.OpARMMOVFconst, 420 ssa.OpARMMOVDconst: 421 p := s.Prog(v.Op.Asm()) 422 p.From.Type = obj.TYPE_FCONST 423 p.From.Val = math.Float64frombits(uint64(v.AuxInt)) 424 p.To.Type = obj.TYPE_REG 425 p.To.Reg = v.Reg() 426 case ssa.OpARMCMP, 427 ssa.OpARMCMN, 428 ssa.OpARMTST, 429 ssa.OpARMTEQ, 430 ssa.OpARMCMPF, 431 ssa.OpARMCMPD: 432 p := s.Prog(v.Op.Asm()) 433 p.From.Type = obj.TYPE_REG 434 // Special layout in ARM assembly 435 // Comparing to x86, the operands of ARM's CMP are reversed. 436 p.From.Reg = v.Args[1].Reg() 437 p.Reg = v.Args[0].Reg() 438 case ssa.OpARMCMPconst, 439 ssa.OpARMCMNconst, 440 ssa.OpARMTSTconst, 441 ssa.OpARMTEQconst: 442 // Special layout in ARM assembly 443 p := s.Prog(v.Op.Asm()) 444 p.From.Type = obj.TYPE_CONST 445 p.From.Offset = v.AuxInt 446 p.Reg = v.Args[0].Reg() 447 case ssa.OpARMCMPF0, 448 ssa.OpARMCMPD0: 449 p := s.Prog(v.Op.Asm()) 450 p.From.Type = obj.TYPE_REG 451 p.From.Reg = v.Args[0].Reg() 452 case ssa.OpARMCMPshiftLL: 453 genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_LL, v.AuxInt) 454 case ssa.OpARMCMPshiftRL: 455 genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_LR, v.AuxInt) 456 case ssa.OpARMCMPshiftRA: 457 genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_AR, v.AuxInt) 458 case ssa.OpARMCMPshiftLLreg: 459 genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_LL) 460 case ssa.OpARMCMPshiftRLreg: 461 genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_LR) 462 case ssa.OpARMCMPshiftRAreg: 463 genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_AR) 464 case ssa.OpARMMOVWaddr: 465 p := s.Prog(arm.AMOVW) 466 p.From.Type = obj.TYPE_ADDR 467 p.From.Reg = v.Args[0].Reg() 468 p.To.Type = obj.TYPE_REG 469 p.To.Reg = v.Reg() 470 471 var wantreg string 472 // MOVW $sym+off(base), R 473 // the assembler expands it as the following: 474 // - base is SP: add constant offset to SP (R13) 475 // when constant is large, tmp register (R11) may be used 476 // - base is SB: load external address from constant pool (use relocation) 477 switch v.Aux.(type) { 478 default: 479 v.Fatalf("aux is of unknown type %T", v.Aux) 480 case *ssa.ExternSymbol: 481 wantreg = "SB" 482 gc.AddAux(&p.From, v) 483 case *ssa.ArgSymbol, *ssa.AutoSymbol: 484 wantreg = "SP" 485 gc.AddAux(&p.From, v) 486 case nil: 487 // No sym, just MOVW $off(SP), R 488 wantreg = "SP" 489 p.From.Offset = v.AuxInt 490 } 491 if reg := v.Args[0].RegName(); reg != wantreg { 492 v.Fatalf("bad reg %s for symbol type %T, want %s", reg, v.Aux, wantreg) 493 } 494 495 case ssa.OpARMMOVBload, 496 ssa.OpARMMOVBUload, 497 ssa.OpARMMOVHload, 498 ssa.OpARMMOVHUload, 499 ssa.OpARMMOVWload, 500 ssa.OpARMMOVFload, 501 ssa.OpARMMOVDload: 502 p := s.Prog(v.Op.Asm()) 503 p.From.Type = obj.TYPE_MEM 504 p.From.Reg = v.Args[0].Reg() 505 gc.AddAux(&p.From, v) 506 p.To.Type = obj.TYPE_REG 507 p.To.Reg = v.Reg() 508 case ssa.OpARMMOVBstore, 509 ssa.OpARMMOVHstore, 510 ssa.OpARMMOVWstore, 511 ssa.OpARMMOVFstore, 512 ssa.OpARMMOVDstore: 513 p := s.Prog(v.Op.Asm()) 514 p.From.Type = obj.TYPE_REG 515 p.From.Reg = v.Args[1].Reg() 516 p.To.Type = obj.TYPE_MEM 517 p.To.Reg = v.Args[0].Reg() 518 gc.AddAux(&p.To, v) 519 case ssa.OpARMMOVWloadidx: 520 // this is just shift 0 bits 521 fallthrough 522 case ssa.OpARMMOVWloadshiftLL: 523 p := genshift(s, v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt) 524 p.From.Reg = v.Args[0].Reg() 525 case ssa.OpARMMOVWloadshiftRL: 526 p := genshift(s, v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt) 527 p.From.Reg = v.Args[0].Reg() 528 case ssa.OpARMMOVWloadshiftRA: 529 p := genshift(s, v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt) 530 p.From.Reg = v.Args[0].Reg() 531 case ssa.OpARMMOVWstoreidx: 532 // this is just shift 0 bits 533 fallthrough 534 case ssa.OpARMMOVWstoreshiftLL: 535 p := s.Prog(v.Op.Asm()) 536 p.From.Type = obj.TYPE_REG 537 p.From.Reg = v.Args[2].Reg() 538 p.To.Type = obj.TYPE_SHIFT 539 p.To.Reg = v.Args[0].Reg() 540 p.To.Offset = int64(makeshift(v.Args[1].Reg(), arm.SHIFT_LL, v.AuxInt)) 541 case ssa.OpARMMOVWstoreshiftRL: 542 p := s.Prog(v.Op.Asm()) 543 p.From.Type = obj.TYPE_REG 544 p.From.Reg = v.Args[2].Reg() 545 p.To.Type = obj.TYPE_SHIFT 546 p.To.Reg = v.Args[0].Reg() 547 p.To.Offset = int64(makeshift(v.Args[1].Reg(), arm.SHIFT_LR, v.AuxInt)) 548 case ssa.OpARMMOVWstoreshiftRA: 549 p := s.Prog(v.Op.Asm()) 550 p.From.Type = obj.TYPE_REG 551 p.From.Reg = v.Args[2].Reg() 552 p.To.Type = obj.TYPE_SHIFT 553 p.To.Reg = v.Args[0].Reg() 554 p.To.Offset = int64(makeshift(v.Args[1].Reg(), arm.SHIFT_AR, v.AuxInt)) 555 case ssa.OpARMMOVBreg, 556 ssa.OpARMMOVBUreg, 557 ssa.OpARMMOVHreg, 558 ssa.OpARMMOVHUreg: 559 a := v.Args[0] 560 for a.Op == ssa.OpCopy || a.Op == ssa.OpARMMOVWreg || a.Op == ssa.OpARMMOVWnop { 561 a = a.Args[0] 562 } 563 if a.Op == ssa.OpLoadReg { 564 t := a.Type 565 switch { 566 case v.Op == ssa.OpARMMOVBreg && t.Size() == 1 && t.IsSigned(), 567 v.Op == ssa.OpARMMOVBUreg && t.Size() == 1 && !t.IsSigned(), 568 v.Op == ssa.OpARMMOVHreg && t.Size() == 2 && t.IsSigned(), 569 v.Op == ssa.OpARMMOVHUreg && t.Size() == 2 && !t.IsSigned(): 570 // arg is a proper-typed load, already zero/sign-extended, don't extend again 571 if v.Reg() == v.Args[0].Reg() { 572 return 573 } 574 p := s.Prog(arm.AMOVW) 575 p.From.Type = obj.TYPE_REG 576 p.From.Reg = v.Args[0].Reg() 577 p.To.Type = obj.TYPE_REG 578 p.To.Reg = v.Reg() 579 return 580 default: 581 } 582 } 583 fallthrough 584 case ssa.OpARMMVN, 585 ssa.OpARMCLZ, 586 ssa.OpARMREV, 587 ssa.OpARMRBIT, 588 ssa.OpARMSQRTD, 589 ssa.OpARMNEGF, 590 ssa.OpARMNEGD, 591 ssa.OpARMMOVWF, 592 ssa.OpARMMOVWD, 593 ssa.OpARMMOVFW, 594 ssa.OpARMMOVDW, 595 ssa.OpARMMOVFD, 596 ssa.OpARMMOVDF: 597 p := s.Prog(v.Op.Asm()) 598 p.From.Type = obj.TYPE_REG 599 p.From.Reg = v.Args[0].Reg() 600 p.To.Type = obj.TYPE_REG 601 p.To.Reg = v.Reg() 602 case ssa.OpARMMOVWUF, 603 ssa.OpARMMOVWUD, 604 ssa.OpARMMOVFWU, 605 ssa.OpARMMOVDWU: 606 p := s.Prog(v.Op.Asm()) 607 p.Scond = arm.C_UBIT 608 p.From.Type = obj.TYPE_REG 609 p.From.Reg = v.Args[0].Reg() 610 p.To.Type = obj.TYPE_REG 611 p.To.Reg = v.Reg() 612 case ssa.OpARMCMOVWHSconst: 613 p := s.Prog(arm.AMOVW) 614 p.Scond = arm.C_SCOND_HS 615 p.From.Type = obj.TYPE_CONST 616 p.From.Offset = v.AuxInt 617 p.To.Type = obj.TYPE_REG 618 p.To.Reg = v.Reg() 619 case ssa.OpARMCMOVWLSconst: 620 p := s.Prog(arm.AMOVW) 621 p.Scond = arm.C_SCOND_LS 622 p.From.Type = obj.TYPE_CONST 623 p.From.Offset = v.AuxInt 624 p.To.Type = obj.TYPE_REG 625 p.To.Reg = v.Reg() 626 case ssa.OpARMCALLstatic, ssa.OpARMCALLclosure, ssa.OpARMCALLinter: 627 s.Call(v) 628 case ssa.OpARMCALLudiv: 629 p := s.Prog(obj.ACALL) 630 p.To.Type = obj.TYPE_MEM 631 p.To.Name = obj.NAME_EXTERN 632 p.To.Sym = gc.Udiv 633 case ssa.OpARMDUFFZERO: 634 p := s.Prog(obj.ADUFFZERO) 635 p.To.Type = obj.TYPE_MEM 636 p.To.Name = obj.NAME_EXTERN 637 p.To.Sym = gc.Duffzero 638 p.To.Offset = v.AuxInt 639 case ssa.OpARMDUFFCOPY: 640 p := s.Prog(obj.ADUFFCOPY) 641 p.To.Type = obj.TYPE_MEM 642 p.To.Name = obj.NAME_EXTERN 643 p.To.Sym = gc.Duffcopy 644 p.To.Offset = v.AuxInt 645 case ssa.OpARMLoweredNilCheck: 646 // Issue a load which will fault if arg is nil. 647 p := s.Prog(arm.AMOVB) 648 p.From.Type = obj.TYPE_MEM 649 p.From.Reg = v.Args[0].Reg() 650 gc.AddAux(&p.From, v) 651 p.To.Type = obj.TYPE_REG 652 p.To.Reg = arm.REGTMP 653 if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers 654 gc.Warnl(v.Pos, "generated nil check") 655 } 656 case ssa.OpARMLoweredZero: 657 // MOVW.P Rarg2, 4(R1) 658 // CMP Rarg1, R1 659 // BLE -2(PC) 660 // arg1 is the address of the last element to zero 661 // arg2 is known to be zero 662 // auxint is alignment 663 var sz int64 664 var mov obj.As 665 switch { 666 case v.AuxInt%4 == 0: 667 sz = 4 668 mov = arm.AMOVW 669 case v.AuxInt%2 == 0: 670 sz = 2 671 mov = arm.AMOVH 672 default: 673 sz = 1 674 mov = arm.AMOVB 675 } 676 p := s.Prog(mov) 677 p.Scond = arm.C_PBIT 678 p.From.Type = obj.TYPE_REG 679 p.From.Reg = v.Args[2].Reg() 680 p.To.Type = obj.TYPE_MEM 681 p.To.Reg = arm.REG_R1 682 p.To.Offset = sz 683 p2 := s.Prog(arm.ACMP) 684 p2.From.Type = obj.TYPE_REG 685 p2.From.Reg = v.Args[1].Reg() 686 p2.Reg = arm.REG_R1 687 p3 := s.Prog(arm.ABLE) 688 p3.To.Type = obj.TYPE_BRANCH 689 gc.Patch(p3, p) 690 case ssa.OpARMLoweredMove: 691 // MOVW.P 4(R1), Rtmp 692 // MOVW.P Rtmp, 4(R2) 693 // CMP Rarg2, R1 694 // BLE -3(PC) 695 // arg2 is the address of the last element of src 696 // auxint is alignment 697 var sz int64 698 var mov obj.As 699 switch { 700 case v.AuxInt%4 == 0: 701 sz = 4 702 mov = arm.AMOVW 703 case v.AuxInt%2 == 0: 704 sz = 2 705 mov = arm.AMOVH 706 default: 707 sz = 1 708 mov = arm.AMOVB 709 } 710 p := s.Prog(mov) 711 p.Scond = arm.C_PBIT 712 p.From.Type = obj.TYPE_MEM 713 p.From.Reg = arm.REG_R1 714 p.From.Offset = sz 715 p.To.Type = obj.TYPE_REG 716 p.To.Reg = arm.REGTMP 717 p2 := s.Prog(mov) 718 p2.Scond = arm.C_PBIT 719 p2.From.Type = obj.TYPE_REG 720 p2.From.Reg = arm.REGTMP 721 p2.To.Type = obj.TYPE_MEM 722 p2.To.Reg = arm.REG_R2 723 p2.To.Offset = sz 724 p3 := s.Prog(arm.ACMP) 725 p3.From.Type = obj.TYPE_REG 726 p3.From.Reg = v.Args[2].Reg() 727 p3.Reg = arm.REG_R1 728 p4 := s.Prog(arm.ABLE) 729 p4.To.Type = obj.TYPE_BRANCH 730 gc.Patch(p4, p) 731 case ssa.OpARMEqual, 732 ssa.OpARMNotEqual, 733 ssa.OpARMLessThan, 734 ssa.OpARMLessEqual, 735 ssa.OpARMGreaterThan, 736 ssa.OpARMGreaterEqual, 737 ssa.OpARMLessThanU, 738 ssa.OpARMLessEqualU, 739 ssa.OpARMGreaterThanU, 740 ssa.OpARMGreaterEqualU: 741 // generate boolean values 742 // use conditional move 743 p := s.Prog(arm.AMOVW) 744 p.From.Type = obj.TYPE_CONST 745 p.From.Offset = 0 746 p.To.Type = obj.TYPE_REG 747 p.To.Reg = v.Reg() 748 p = s.Prog(arm.AMOVW) 749 p.Scond = condBits[v.Op] 750 p.From.Type = obj.TYPE_CONST 751 p.From.Offset = 1 752 p.To.Type = obj.TYPE_REG 753 p.To.Reg = v.Reg() 754 case ssa.OpARMLoweredGetClosurePtr: 755 // Closure pointer is R7 (arm.REGCTXT). 756 gc.CheckLoweredGetClosurePtr(v) 757 case ssa.OpARMFlagEQ, 758 ssa.OpARMFlagLT_ULT, 759 ssa.OpARMFlagLT_UGT, 760 ssa.OpARMFlagGT_ULT, 761 ssa.OpARMFlagGT_UGT: 762 v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString()) 763 case ssa.OpARMInvertFlags: 764 v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString()) 765 case ssa.OpClobber: 766 // TODO: implement for clobberdead experiment. Nop is ok for now. 767 default: 768 v.Fatalf("genValue not implemented: %s", v.LongString()) 769 } 770 } 771 772 var condBits = map[ssa.Op]uint8{ 773 ssa.OpARMEqual: arm.C_SCOND_EQ, 774 ssa.OpARMNotEqual: arm.C_SCOND_NE, 775 ssa.OpARMLessThan: arm.C_SCOND_LT, 776 ssa.OpARMLessThanU: arm.C_SCOND_LO, 777 ssa.OpARMLessEqual: arm.C_SCOND_LE, 778 ssa.OpARMLessEqualU: arm.C_SCOND_LS, 779 ssa.OpARMGreaterThan: arm.C_SCOND_GT, 780 ssa.OpARMGreaterThanU: arm.C_SCOND_HI, 781 ssa.OpARMGreaterEqual: arm.C_SCOND_GE, 782 ssa.OpARMGreaterEqualU: arm.C_SCOND_HS, 783 } 784 785 var blockJump = map[ssa.BlockKind]struct { 786 asm, invasm obj.As 787 }{ 788 ssa.BlockARMEQ: {arm.ABEQ, arm.ABNE}, 789 ssa.BlockARMNE: {arm.ABNE, arm.ABEQ}, 790 ssa.BlockARMLT: {arm.ABLT, arm.ABGE}, 791 ssa.BlockARMGE: {arm.ABGE, arm.ABLT}, 792 ssa.BlockARMLE: {arm.ABLE, arm.ABGT}, 793 ssa.BlockARMGT: {arm.ABGT, arm.ABLE}, 794 ssa.BlockARMULT: {arm.ABLO, arm.ABHS}, 795 ssa.BlockARMUGE: {arm.ABHS, arm.ABLO}, 796 ssa.BlockARMUGT: {arm.ABHI, arm.ABLS}, 797 ssa.BlockARMULE: {arm.ABLS, arm.ABHI}, 798 } 799 800 func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { 801 switch b.Kind { 802 case ssa.BlockPlain: 803 if b.Succs[0].Block() != next { 804 p := s.Prog(obj.AJMP) 805 p.To.Type = obj.TYPE_BRANCH 806 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) 807 } 808 809 case ssa.BlockDefer: 810 // defer returns in R0: 811 // 0 if we should continue executing 812 // 1 if we should jump to deferreturn call 813 p := s.Prog(arm.ACMP) 814 p.From.Type = obj.TYPE_CONST 815 p.From.Offset = 0 816 p.Reg = arm.REG_R0 817 p = s.Prog(arm.ABNE) 818 p.To.Type = obj.TYPE_BRANCH 819 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()}) 820 if b.Succs[0].Block() != next { 821 p := s.Prog(obj.AJMP) 822 p.To.Type = obj.TYPE_BRANCH 823 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) 824 } 825 826 case ssa.BlockExit: 827 s.Prog(obj.AUNDEF) // tell plive.go that we never reach here 828 829 case ssa.BlockRet: 830 s.Prog(obj.ARET) 831 832 case ssa.BlockRetJmp: 833 p := s.Prog(obj.ARET) 834 p.To.Type = obj.TYPE_MEM 835 p.To.Name = obj.NAME_EXTERN 836 p.To.Sym = b.Aux.(*obj.LSym) 837 838 case ssa.BlockARMEQ, ssa.BlockARMNE, 839 ssa.BlockARMLT, ssa.BlockARMGE, 840 ssa.BlockARMLE, ssa.BlockARMGT, 841 ssa.BlockARMULT, ssa.BlockARMUGT, 842 ssa.BlockARMULE, ssa.BlockARMUGE: 843 jmp := blockJump[b.Kind] 844 var p *obj.Prog 845 switch next { 846 case b.Succs[0].Block(): 847 p = s.Prog(jmp.invasm) 848 p.To.Type = obj.TYPE_BRANCH 849 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()}) 850 case b.Succs[1].Block(): 851 p = s.Prog(jmp.asm) 852 p.To.Type = obj.TYPE_BRANCH 853 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) 854 default: 855 p = s.Prog(jmp.asm) 856 p.To.Type = obj.TYPE_BRANCH 857 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) 858 q := s.Prog(obj.AJMP) 859 q.To.Type = obj.TYPE_BRANCH 860 s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()}) 861 } 862 863 default: 864 b.Fatalf("branch not implemented: %s. Control: %s", b.LongString(), b.Control.LongString()) 865 } 866 }