github.com/tidwall/go@v0.0.0-20170415222209-6694a6888b7d/src/cmd/compile/internal/arm/ssa.go (about) 1 // Copyright 2016 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package arm 6 7 import ( 8 "fmt" 9 "math" 10 11 "cmd/compile/internal/gc" 12 "cmd/compile/internal/ssa" 13 "cmd/internal/obj" 14 "cmd/internal/obj/arm" 15 ) 16 17 // loadByType returns the load instruction of the given type. 18 func loadByType(t ssa.Type) obj.As { 19 if t.IsFloat() { 20 switch t.Size() { 21 case 4: 22 return arm.AMOVF 23 case 8: 24 return arm.AMOVD 25 } 26 } else { 27 switch t.Size() { 28 case 1: 29 if t.IsSigned() { 30 return arm.AMOVB 31 } else { 32 return arm.AMOVBU 33 } 34 case 2: 35 if t.IsSigned() { 36 return arm.AMOVH 37 } else { 38 return arm.AMOVHU 39 } 40 case 4: 41 return arm.AMOVW 42 } 43 } 44 panic("bad load type") 45 } 46 47 // storeByType returns the store instruction of the given type. 48 func storeByType(t ssa.Type) obj.As { 49 if t.IsFloat() { 50 switch t.Size() { 51 case 4: 52 return arm.AMOVF 53 case 8: 54 return arm.AMOVD 55 } 56 } else { 57 switch t.Size() { 58 case 1: 59 return arm.AMOVB 60 case 2: 61 return arm.AMOVH 62 case 4: 63 return arm.AMOVW 64 } 65 } 66 panic("bad store type") 67 } 68 69 // shift type is used as Offset in obj.TYPE_SHIFT operands to encode shifted register operands 70 type shift int64 71 72 // copied from ../../../internal/obj/util.go:/TYPE_SHIFT 73 func (v shift) String() string { 74 op := "<<>>->@>"[((v>>5)&3)<<1:] 75 if v&(1<<4) != 0 { 76 // register shift 77 return fmt.Sprintf("R%d%c%cR%d", v&15, op[0], op[1], (v>>8)&15) 78 } else { 79 // constant shift 80 return fmt.Sprintf("R%d%c%c%d", v&15, op[0], op[1], (v>>7)&31) 81 } 82 } 83 84 // makeshift encodes a register shifted by a constant 85 func makeshift(reg int16, typ int64, s int64) shift { 86 return shift(int64(reg&0xf) | typ | (s&31)<<7) 87 } 88 89 // genshift generates a Prog for r = r0 op (r1 shifted by n) 90 func genshift(s *gc.SSAGenState, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog { 91 p := s.Prog(as) 92 p.From.Type = obj.TYPE_SHIFT 93 p.From.Offset = int64(makeshift(r1, typ, n)) 94 p.Reg = r0 95 if r != 0 { 96 p.To.Type = obj.TYPE_REG 97 p.To.Reg = r 98 } 99 return p 100 } 101 102 // makeregshift encodes a register shifted by a register 103 func makeregshift(r1 int16, typ int64, r2 int16) shift { 104 return shift(int64(r1&0xf) | typ | int64(r2&0xf)<<8 | 1<<4) 105 } 106 107 // genregshift generates a Prog for r = r0 op (r1 shifted by r2) 108 func genregshift(s *gc.SSAGenState, as obj.As, r0, r1, r2, r int16, typ int64) *obj.Prog { 109 p := s.Prog(as) 110 p.From.Type = obj.TYPE_SHIFT 111 p.From.Offset = int64(makeregshift(r1, typ, r2)) 112 p.Reg = r0 113 if r != 0 { 114 p.To.Type = obj.TYPE_REG 115 p.To.Reg = r 116 } 117 return p 118 } 119 120 func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { 121 switch v.Op { 122 case ssa.OpCopy, ssa.OpARMMOVWconvert, ssa.OpARMMOVWreg: 123 if v.Type.IsMemory() { 124 return 125 } 126 x := v.Args[0].Reg() 127 y := v.Reg() 128 if x == y { 129 return 130 } 131 as := arm.AMOVW 132 if v.Type.IsFloat() { 133 switch v.Type.Size() { 134 case 4: 135 as = arm.AMOVF 136 case 8: 137 as = arm.AMOVD 138 default: 139 panic("bad float size") 140 } 141 } 142 p := s.Prog(as) 143 p.From.Type = obj.TYPE_REG 144 p.From.Reg = x 145 p.To.Type = obj.TYPE_REG 146 p.To.Reg = y 147 case ssa.OpARMMOVWnop: 148 if v.Reg() != v.Args[0].Reg() { 149 v.Fatalf("input[0] and output not in same register %s", v.LongString()) 150 } 151 // nothing to do 152 case ssa.OpLoadReg: 153 if v.Type.IsFlags() { 154 v.Fatalf("load flags not implemented: %v", v.LongString()) 155 return 156 } 157 p := s.Prog(loadByType(v.Type)) 158 gc.AddrAuto(&p.From, v.Args[0]) 159 p.To.Type = obj.TYPE_REG 160 p.To.Reg = v.Reg() 161 case ssa.OpStoreReg: 162 if v.Type.IsFlags() { 163 v.Fatalf("store flags not implemented: %v", v.LongString()) 164 return 165 } 166 p := s.Prog(storeByType(v.Type)) 167 p.From.Type = obj.TYPE_REG 168 p.From.Reg = v.Args[0].Reg() 169 gc.AddrAuto(&p.To, v) 170 case ssa.OpARMADD, 171 ssa.OpARMADC, 172 ssa.OpARMSUB, 173 ssa.OpARMSBC, 174 ssa.OpARMRSB, 175 ssa.OpARMAND, 176 ssa.OpARMOR, 177 ssa.OpARMXOR, 178 ssa.OpARMBIC, 179 ssa.OpARMMUL, 180 ssa.OpARMADDF, 181 ssa.OpARMADDD, 182 ssa.OpARMSUBF, 183 ssa.OpARMSUBD, 184 ssa.OpARMMULF, 185 ssa.OpARMMULD, 186 ssa.OpARMDIVF, 187 ssa.OpARMDIVD: 188 r := v.Reg() 189 r1 := v.Args[0].Reg() 190 r2 := v.Args[1].Reg() 191 p := s.Prog(v.Op.Asm()) 192 p.From.Type = obj.TYPE_REG 193 p.From.Reg = r2 194 p.Reg = r1 195 p.To.Type = obj.TYPE_REG 196 p.To.Reg = r 197 case ssa.OpARMADDS, 198 ssa.OpARMSUBS: 199 r := v.Reg0() 200 r1 := v.Args[0].Reg() 201 r2 := v.Args[1].Reg() 202 p := s.Prog(v.Op.Asm()) 203 p.Scond = arm.C_SBIT 204 p.From.Type = obj.TYPE_REG 205 p.From.Reg = r2 206 p.Reg = r1 207 p.To.Type = obj.TYPE_REG 208 p.To.Reg = r 209 case ssa.OpARMSLL, 210 ssa.OpARMSRL, 211 ssa.OpARMSRA: 212 r := v.Reg() 213 r1 := v.Args[0].Reg() 214 r2 := v.Args[1].Reg() 215 p := s.Prog(v.Op.Asm()) 216 p.From.Type = obj.TYPE_REG 217 p.From.Reg = r2 218 p.Reg = r1 219 p.To.Type = obj.TYPE_REG 220 p.To.Reg = r 221 case ssa.OpARMSRAcond: 222 // ARM shift instructions uses only the low-order byte of the shift amount 223 // generate conditional instructions to deal with large shifts 224 // flag is already set 225 // SRA.HS $31, Rarg0, Rdst // shift 31 bits to get the sign bit 226 // SRA.LO Rarg1, Rarg0, Rdst 227 r := v.Reg() 228 r1 := v.Args[0].Reg() 229 r2 := v.Args[1].Reg() 230 p := s.Prog(arm.ASRA) 231 p.Scond = arm.C_SCOND_HS 232 p.From.Type = obj.TYPE_CONST 233 p.From.Offset = 31 234 p.Reg = r1 235 p.To.Type = obj.TYPE_REG 236 p.To.Reg = r 237 p = s.Prog(arm.ASRA) 238 p.Scond = arm.C_SCOND_LO 239 p.From.Type = obj.TYPE_REG 240 p.From.Reg = r2 241 p.Reg = r1 242 p.To.Type = obj.TYPE_REG 243 p.To.Reg = r 244 case ssa.OpARMADDconst, 245 ssa.OpARMADCconst, 246 ssa.OpARMSUBconst, 247 ssa.OpARMSBCconst, 248 ssa.OpARMRSBconst, 249 ssa.OpARMRSCconst, 250 ssa.OpARMANDconst, 251 ssa.OpARMORconst, 252 ssa.OpARMXORconst, 253 ssa.OpARMBICconst, 254 ssa.OpARMSLLconst, 255 ssa.OpARMSRLconst, 256 ssa.OpARMSRAconst: 257 p := s.Prog(v.Op.Asm()) 258 p.From.Type = obj.TYPE_CONST 259 p.From.Offset = v.AuxInt 260 p.Reg = v.Args[0].Reg() 261 p.To.Type = obj.TYPE_REG 262 p.To.Reg = v.Reg() 263 case ssa.OpARMADDSconst, 264 ssa.OpARMSUBSconst, 265 ssa.OpARMRSBSconst: 266 p := s.Prog(v.Op.Asm()) 267 p.Scond = arm.C_SBIT 268 p.From.Type = obj.TYPE_CONST 269 p.From.Offset = v.AuxInt 270 p.Reg = v.Args[0].Reg() 271 p.To.Type = obj.TYPE_REG 272 p.To.Reg = v.Reg0() 273 case ssa.OpARMSRRconst: 274 genshift(s, arm.AMOVW, 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_RR, v.AuxInt) 275 case ssa.OpARMADDshiftLL, 276 ssa.OpARMADCshiftLL, 277 ssa.OpARMSUBshiftLL, 278 ssa.OpARMSBCshiftLL, 279 ssa.OpARMRSBshiftLL, 280 ssa.OpARMRSCshiftLL, 281 ssa.OpARMANDshiftLL, 282 ssa.OpARMORshiftLL, 283 ssa.OpARMXORshiftLL, 284 ssa.OpARMBICshiftLL: 285 genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt) 286 case ssa.OpARMADDSshiftLL, 287 ssa.OpARMSUBSshiftLL, 288 ssa.OpARMRSBSshiftLL: 289 p := genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_LL, v.AuxInt) 290 p.Scond = arm.C_SBIT 291 case ssa.OpARMADDshiftRL, 292 ssa.OpARMADCshiftRL, 293 ssa.OpARMSUBshiftRL, 294 ssa.OpARMSBCshiftRL, 295 ssa.OpARMRSBshiftRL, 296 ssa.OpARMRSCshiftRL, 297 ssa.OpARMANDshiftRL, 298 ssa.OpARMORshiftRL, 299 ssa.OpARMXORshiftRL, 300 ssa.OpARMBICshiftRL: 301 genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt) 302 case ssa.OpARMADDSshiftRL, 303 ssa.OpARMSUBSshiftRL, 304 ssa.OpARMRSBSshiftRL: 305 p := genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_LR, v.AuxInt) 306 p.Scond = arm.C_SBIT 307 case ssa.OpARMADDshiftRA, 308 ssa.OpARMADCshiftRA, 309 ssa.OpARMSUBshiftRA, 310 ssa.OpARMSBCshiftRA, 311 ssa.OpARMRSBshiftRA, 312 ssa.OpARMRSCshiftRA, 313 ssa.OpARMANDshiftRA, 314 ssa.OpARMORshiftRA, 315 ssa.OpARMXORshiftRA, 316 ssa.OpARMBICshiftRA: 317 genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt) 318 case ssa.OpARMADDSshiftRA, 319 ssa.OpARMSUBSshiftRA, 320 ssa.OpARMRSBSshiftRA: 321 p := genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_AR, v.AuxInt) 322 p.Scond = arm.C_SBIT 323 case ssa.OpARMXORshiftRR: 324 genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_RR, v.AuxInt) 325 case ssa.OpARMMVNshiftLL: 326 genshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt) 327 case ssa.OpARMMVNshiftRL: 328 genshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt) 329 case ssa.OpARMMVNshiftRA: 330 genshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt) 331 case ssa.OpARMMVNshiftLLreg: 332 genregshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL) 333 case ssa.OpARMMVNshiftRLreg: 334 genregshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR) 335 case ssa.OpARMMVNshiftRAreg: 336 genregshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR) 337 case ssa.OpARMADDshiftLLreg, 338 ssa.OpARMADCshiftLLreg, 339 ssa.OpARMSUBshiftLLreg, 340 ssa.OpARMSBCshiftLLreg, 341 ssa.OpARMRSBshiftLLreg, 342 ssa.OpARMRSCshiftLLreg, 343 ssa.OpARMANDshiftLLreg, 344 ssa.OpARMORshiftLLreg, 345 ssa.OpARMXORshiftLLreg, 346 ssa.OpARMBICshiftLLreg: 347 genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_LL) 348 case ssa.OpARMADDSshiftLLreg, 349 ssa.OpARMSUBSshiftLLreg, 350 ssa.OpARMRSBSshiftLLreg: 351 p := genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_LL) 352 p.Scond = arm.C_SBIT 353 case ssa.OpARMADDshiftRLreg, 354 ssa.OpARMADCshiftRLreg, 355 ssa.OpARMSUBshiftRLreg, 356 ssa.OpARMSBCshiftRLreg, 357 ssa.OpARMRSBshiftRLreg, 358 ssa.OpARMRSCshiftRLreg, 359 ssa.OpARMANDshiftRLreg, 360 ssa.OpARMORshiftRLreg, 361 ssa.OpARMXORshiftRLreg, 362 ssa.OpARMBICshiftRLreg: 363 genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_LR) 364 case ssa.OpARMADDSshiftRLreg, 365 ssa.OpARMSUBSshiftRLreg, 366 ssa.OpARMRSBSshiftRLreg: 367 p := genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_LR) 368 p.Scond = arm.C_SBIT 369 case ssa.OpARMADDshiftRAreg, 370 ssa.OpARMADCshiftRAreg, 371 ssa.OpARMSUBshiftRAreg, 372 ssa.OpARMSBCshiftRAreg, 373 ssa.OpARMRSBshiftRAreg, 374 ssa.OpARMRSCshiftRAreg, 375 ssa.OpARMANDshiftRAreg, 376 ssa.OpARMORshiftRAreg, 377 ssa.OpARMXORshiftRAreg, 378 ssa.OpARMBICshiftRAreg: 379 genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_AR) 380 case ssa.OpARMADDSshiftRAreg, 381 ssa.OpARMSUBSshiftRAreg, 382 ssa.OpARMRSBSshiftRAreg: 383 p := genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_AR) 384 p.Scond = arm.C_SBIT 385 case ssa.OpARMHMUL, 386 ssa.OpARMHMULU: 387 // 32-bit high multiplication 388 p := s.Prog(v.Op.Asm()) 389 p.From.Type = obj.TYPE_REG 390 p.From.Reg = v.Args[0].Reg() 391 p.Reg = v.Args[1].Reg() 392 p.To.Type = obj.TYPE_REGREG 393 p.To.Reg = v.Reg() 394 p.To.Offset = arm.REGTMP // throw away low 32-bit into tmp register 395 case ssa.OpARMMULLU: 396 // 32-bit multiplication, results 64-bit, high 32-bit in out0, low 32-bit in out1 397 p := s.Prog(v.Op.Asm()) 398 p.From.Type = obj.TYPE_REG 399 p.From.Reg = v.Args[0].Reg() 400 p.Reg = v.Args[1].Reg() 401 p.To.Type = obj.TYPE_REGREG 402 p.To.Reg = v.Reg0() // high 32-bit 403 p.To.Offset = int64(v.Reg1()) // low 32-bit 404 case ssa.OpARMMULA: 405 p := s.Prog(v.Op.Asm()) 406 p.From.Type = obj.TYPE_REG 407 p.From.Reg = v.Args[0].Reg() 408 p.Reg = v.Args[1].Reg() 409 p.To.Type = obj.TYPE_REGREG2 410 p.To.Reg = v.Reg() // result 411 p.To.Offset = int64(v.Args[2].Reg()) // addend 412 case ssa.OpARMMOVWconst: 413 p := s.Prog(v.Op.Asm()) 414 p.From.Type = obj.TYPE_CONST 415 p.From.Offset = v.AuxInt 416 p.To.Type = obj.TYPE_REG 417 p.To.Reg = v.Reg() 418 case ssa.OpARMMOVFconst, 419 ssa.OpARMMOVDconst: 420 p := s.Prog(v.Op.Asm()) 421 p.From.Type = obj.TYPE_FCONST 422 p.From.Val = math.Float64frombits(uint64(v.AuxInt)) 423 p.To.Type = obj.TYPE_REG 424 p.To.Reg = v.Reg() 425 case ssa.OpARMCMP, 426 ssa.OpARMCMN, 427 ssa.OpARMTST, 428 ssa.OpARMTEQ, 429 ssa.OpARMCMPF, 430 ssa.OpARMCMPD: 431 p := s.Prog(v.Op.Asm()) 432 p.From.Type = obj.TYPE_REG 433 // Special layout in ARM assembly 434 // Comparing to x86, the operands of ARM's CMP are reversed. 435 p.From.Reg = v.Args[1].Reg() 436 p.Reg = v.Args[0].Reg() 437 case ssa.OpARMCMPconst, 438 ssa.OpARMCMNconst, 439 ssa.OpARMTSTconst, 440 ssa.OpARMTEQconst: 441 // Special layout in ARM assembly 442 p := s.Prog(v.Op.Asm()) 443 p.From.Type = obj.TYPE_CONST 444 p.From.Offset = v.AuxInt 445 p.Reg = v.Args[0].Reg() 446 case ssa.OpARMCMPF0, 447 ssa.OpARMCMPD0: 448 p := s.Prog(v.Op.Asm()) 449 p.From.Type = obj.TYPE_REG 450 p.From.Reg = v.Args[0].Reg() 451 case ssa.OpARMCMPshiftLL: 452 genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_LL, v.AuxInt) 453 case ssa.OpARMCMPshiftRL: 454 genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_LR, v.AuxInt) 455 case ssa.OpARMCMPshiftRA: 456 genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_AR, v.AuxInt) 457 case ssa.OpARMCMPshiftLLreg: 458 genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_LL) 459 case ssa.OpARMCMPshiftRLreg: 460 genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_LR) 461 case ssa.OpARMCMPshiftRAreg: 462 genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_AR) 463 case ssa.OpARMMOVWaddr: 464 p := s.Prog(arm.AMOVW) 465 p.From.Type = obj.TYPE_ADDR 466 p.To.Type = obj.TYPE_REG 467 p.To.Reg = v.Reg() 468 469 var wantreg string 470 // MOVW $sym+off(base), R 471 // the assembler expands it as the following: 472 // - base is SP: add constant offset to SP (R13) 473 // when constant is large, tmp register (R11) may be used 474 // - base is SB: load external address from constant pool (use relocation) 475 switch v.Aux.(type) { 476 default: 477 v.Fatalf("aux is of unknown type %T", v.Aux) 478 case *ssa.ExternSymbol: 479 wantreg = "SB" 480 gc.AddAux(&p.From, v) 481 case *ssa.ArgSymbol, *ssa.AutoSymbol: 482 wantreg = "SP" 483 gc.AddAux(&p.From, v) 484 case nil: 485 // No sym, just MOVW $off(SP), R 486 wantreg = "SP" 487 p.From.Reg = arm.REGSP 488 p.From.Offset = v.AuxInt 489 } 490 if reg := v.Args[0].RegName(); reg != wantreg { 491 v.Fatalf("bad reg %s for symbol type %T, want %s", reg, v.Aux, wantreg) 492 } 493 494 case ssa.OpARMMOVBload, 495 ssa.OpARMMOVBUload, 496 ssa.OpARMMOVHload, 497 ssa.OpARMMOVHUload, 498 ssa.OpARMMOVWload, 499 ssa.OpARMMOVFload, 500 ssa.OpARMMOVDload: 501 p := s.Prog(v.Op.Asm()) 502 p.From.Type = obj.TYPE_MEM 503 p.From.Reg = v.Args[0].Reg() 504 gc.AddAux(&p.From, v) 505 p.To.Type = obj.TYPE_REG 506 p.To.Reg = v.Reg() 507 case ssa.OpARMMOVBstore, 508 ssa.OpARMMOVHstore, 509 ssa.OpARMMOVWstore, 510 ssa.OpARMMOVFstore, 511 ssa.OpARMMOVDstore: 512 p := s.Prog(v.Op.Asm()) 513 p.From.Type = obj.TYPE_REG 514 p.From.Reg = v.Args[1].Reg() 515 p.To.Type = obj.TYPE_MEM 516 p.To.Reg = v.Args[0].Reg() 517 gc.AddAux(&p.To, v) 518 case ssa.OpARMMOVWloadidx: 519 // this is just shift 0 bits 520 fallthrough 521 case ssa.OpARMMOVWloadshiftLL: 522 p := genshift(s, v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt) 523 p.From.Reg = v.Args[0].Reg() 524 case ssa.OpARMMOVWloadshiftRL: 525 p := genshift(s, v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt) 526 p.From.Reg = v.Args[0].Reg() 527 case ssa.OpARMMOVWloadshiftRA: 528 p := genshift(s, v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt) 529 p.From.Reg = v.Args[0].Reg() 530 case ssa.OpARMMOVWstoreidx: 531 // this is just shift 0 bits 532 fallthrough 533 case ssa.OpARMMOVWstoreshiftLL: 534 p := s.Prog(v.Op.Asm()) 535 p.From.Type = obj.TYPE_REG 536 p.From.Reg = v.Args[2].Reg() 537 p.To.Type = obj.TYPE_SHIFT 538 p.To.Reg = v.Args[0].Reg() 539 p.To.Offset = int64(makeshift(v.Args[1].Reg(), arm.SHIFT_LL, v.AuxInt)) 540 case ssa.OpARMMOVWstoreshiftRL: 541 p := s.Prog(v.Op.Asm()) 542 p.From.Type = obj.TYPE_REG 543 p.From.Reg = v.Args[2].Reg() 544 p.To.Type = obj.TYPE_SHIFT 545 p.To.Reg = v.Args[0].Reg() 546 p.To.Offset = int64(makeshift(v.Args[1].Reg(), arm.SHIFT_LR, v.AuxInt)) 547 case ssa.OpARMMOVWstoreshiftRA: 548 p := s.Prog(v.Op.Asm()) 549 p.From.Type = obj.TYPE_REG 550 p.From.Reg = v.Args[2].Reg() 551 p.To.Type = obj.TYPE_SHIFT 552 p.To.Reg = v.Args[0].Reg() 553 p.To.Offset = int64(makeshift(v.Args[1].Reg(), arm.SHIFT_AR, v.AuxInt)) 554 case ssa.OpARMMOVBreg, 555 ssa.OpARMMOVBUreg, 556 ssa.OpARMMOVHreg, 557 ssa.OpARMMOVHUreg: 558 a := v.Args[0] 559 for a.Op == ssa.OpCopy || a.Op == ssa.OpARMMOVWreg || a.Op == ssa.OpARMMOVWnop { 560 a = a.Args[0] 561 } 562 if a.Op == ssa.OpLoadReg { 563 t := a.Type 564 switch { 565 case v.Op == ssa.OpARMMOVBreg && t.Size() == 1 && t.IsSigned(), 566 v.Op == ssa.OpARMMOVBUreg && t.Size() == 1 && !t.IsSigned(), 567 v.Op == ssa.OpARMMOVHreg && t.Size() == 2 && t.IsSigned(), 568 v.Op == ssa.OpARMMOVHUreg && t.Size() == 2 && !t.IsSigned(): 569 // arg is a proper-typed load, already zero/sign-extended, don't extend again 570 if v.Reg() == v.Args[0].Reg() { 571 return 572 } 573 p := s.Prog(arm.AMOVW) 574 p.From.Type = obj.TYPE_REG 575 p.From.Reg = v.Args[0].Reg() 576 p.To.Type = obj.TYPE_REG 577 p.To.Reg = v.Reg() 578 return 579 default: 580 } 581 } 582 fallthrough 583 case ssa.OpARMMVN, 584 ssa.OpARMCLZ, 585 ssa.OpARMREV, 586 ssa.OpARMRBIT, 587 ssa.OpARMSQRTD, 588 ssa.OpARMNEGF, 589 ssa.OpARMNEGD, 590 ssa.OpARMMOVWF, 591 ssa.OpARMMOVWD, 592 ssa.OpARMMOVFW, 593 ssa.OpARMMOVDW, 594 ssa.OpARMMOVFD, 595 ssa.OpARMMOVDF: 596 p := s.Prog(v.Op.Asm()) 597 p.From.Type = obj.TYPE_REG 598 p.From.Reg = v.Args[0].Reg() 599 p.To.Type = obj.TYPE_REG 600 p.To.Reg = v.Reg() 601 case ssa.OpARMMOVWUF, 602 ssa.OpARMMOVWUD, 603 ssa.OpARMMOVFWU, 604 ssa.OpARMMOVDWU: 605 p := s.Prog(v.Op.Asm()) 606 p.Scond = arm.C_UBIT 607 p.From.Type = obj.TYPE_REG 608 p.From.Reg = v.Args[0].Reg() 609 p.To.Type = obj.TYPE_REG 610 p.To.Reg = v.Reg() 611 case ssa.OpARMCMOVWHSconst: 612 p := s.Prog(arm.AMOVW) 613 p.Scond = arm.C_SCOND_HS 614 p.From.Type = obj.TYPE_CONST 615 p.From.Offset = v.AuxInt 616 p.To.Type = obj.TYPE_REG 617 p.To.Reg = v.Reg() 618 case ssa.OpARMCMOVWLSconst: 619 p := s.Prog(arm.AMOVW) 620 p.Scond = arm.C_SCOND_LS 621 p.From.Type = obj.TYPE_CONST 622 p.From.Offset = v.AuxInt 623 p.To.Type = obj.TYPE_REG 624 p.To.Reg = v.Reg() 625 case ssa.OpARMCALLstatic, ssa.OpARMCALLclosure, ssa.OpARMCALLinter, ssa.OpARMCALLudiv: 626 s.Call(v) 627 case ssa.OpARMDUFFZERO: 628 p := s.Prog(obj.ADUFFZERO) 629 p.To.Type = obj.TYPE_MEM 630 p.To.Name = obj.NAME_EXTERN 631 p.To.Sym = gc.Duffzero 632 p.To.Offset = v.AuxInt 633 case ssa.OpARMDUFFCOPY: 634 p := s.Prog(obj.ADUFFCOPY) 635 p.To.Type = obj.TYPE_MEM 636 p.To.Name = obj.NAME_EXTERN 637 p.To.Sym = gc.Duffcopy 638 p.To.Offset = v.AuxInt 639 case ssa.OpARMLoweredNilCheck: 640 // Issue a load which will fault if arg is nil. 641 p := s.Prog(arm.AMOVB) 642 p.From.Type = obj.TYPE_MEM 643 p.From.Reg = v.Args[0].Reg() 644 gc.AddAux(&p.From, v) 645 p.To.Type = obj.TYPE_REG 646 p.To.Reg = arm.REGTMP 647 if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers 648 gc.Warnl(v.Pos, "generated nil check") 649 } 650 case ssa.OpARMLoweredZero: 651 // MOVW.P Rarg2, 4(R1) 652 // CMP Rarg1, R1 653 // BLE -2(PC) 654 // arg1 is the address of the last element to zero 655 // arg2 is known to be zero 656 // auxint is alignment 657 var sz int64 658 var mov obj.As 659 switch { 660 case v.AuxInt%4 == 0: 661 sz = 4 662 mov = arm.AMOVW 663 case v.AuxInt%2 == 0: 664 sz = 2 665 mov = arm.AMOVH 666 default: 667 sz = 1 668 mov = arm.AMOVB 669 } 670 p := s.Prog(mov) 671 p.Scond = arm.C_PBIT 672 p.From.Type = obj.TYPE_REG 673 p.From.Reg = v.Args[2].Reg() 674 p.To.Type = obj.TYPE_MEM 675 p.To.Reg = arm.REG_R1 676 p.To.Offset = sz 677 p2 := s.Prog(arm.ACMP) 678 p2.From.Type = obj.TYPE_REG 679 p2.From.Reg = v.Args[1].Reg() 680 p2.Reg = arm.REG_R1 681 p3 := s.Prog(arm.ABLE) 682 p3.To.Type = obj.TYPE_BRANCH 683 gc.Patch(p3, p) 684 case ssa.OpARMLoweredMove: 685 // MOVW.P 4(R1), Rtmp 686 // MOVW.P Rtmp, 4(R2) 687 // CMP Rarg2, R1 688 // BLE -3(PC) 689 // arg2 is the address of the last element of src 690 // auxint is alignment 691 var sz int64 692 var mov obj.As 693 switch { 694 case v.AuxInt%4 == 0: 695 sz = 4 696 mov = arm.AMOVW 697 case v.AuxInt%2 == 0: 698 sz = 2 699 mov = arm.AMOVH 700 default: 701 sz = 1 702 mov = arm.AMOVB 703 } 704 p := s.Prog(mov) 705 p.Scond = arm.C_PBIT 706 p.From.Type = obj.TYPE_MEM 707 p.From.Reg = arm.REG_R1 708 p.From.Offset = sz 709 p.To.Type = obj.TYPE_REG 710 p.To.Reg = arm.REGTMP 711 p2 := s.Prog(mov) 712 p2.Scond = arm.C_PBIT 713 p2.From.Type = obj.TYPE_REG 714 p2.From.Reg = arm.REGTMP 715 p2.To.Type = obj.TYPE_MEM 716 p2.To.Reg = arm.REG_R2 717 p2.To.Offset = sz 718 p3 := s.Prog(arm.ACMP) 719 p3.From.Type = obj.TYPE_REG 720 p3.From.Reg = v.Args[2].Reg() 721 p3.Reg = arm.REG_R1 722 p4 := s.Prog(arm.ABLE) 723 p4.To.Type = obj.TYPE_BRANCH 724 gc.Patch(p4, p) 725 case ssa.OpARMEqual, 726 ssa.OpARMNotEqual, 727 ssa.OpARMLessThan, 728 ssa.OpARMLessEqual, 729 ssa.OpARMGreaterThan, 730 ssa.OpARMGreaterEqual, 731 ssa.OpARMLessThanU, 732 ssa.OpARMLessEqualU, 733 ssa.OpARMGreaterThanU, 734 ssa.OpARMGreaterEqualU: 735 // generate boolean values 736 // use conditional move 737 p := s.Prog(arm.AMOVW) 738 p.From.Type = obj.TYPE_CONST 739 p.From.Offset = 0 740 p.To.Type = obj.TYPE_REG 741 p.To.Reg = v.Reg() 742 p = s.Prog(arm.AMOVW) 743 p.Scond = condBits[v.Op] 744 p.From.Type = obj.TYPE_CONST 745 p.From.Offset = 1 746 p.To.Type = obj.TYPE_REG 747 p.To.Reg = v.Reg() 748 case ssa.OpARMLoweredGetClosurePtr: 749 // Closure pointer is R7 (arm.REGCTXT). 750 gc.CheckLoweredGetClosurePtr(v) 751 case ssa.OpARMFlagEQ, 752 ssa.OpARMFlagLT_ULT, 753 ssa.OpARMFlagLT_UGT, 754 ssa.OpARMFlagGT_ULT, 755 ssa.OpARMFlagGT_UGT: 756 v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString()) 757 case ssa.OpARMInvertFlags: 758 v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString()) 759 default: 760 v.Fatalf("genValue not implemented: %s", v.LongString()) 761 } 762 } 763 764 var condBits = map[ssa.Op]uint8{ 765 ssa.OpARMEqual: arm.C_SCOND_EQ, 766 ssa.OpARMNotEqual: arm.C_SCOND_NE, 767 ssa.OpARMLessThan: arm.C_SCOND_LT, 768 ssa.OpARMLessThanU: arm.C_SCOND_LO, 769 ssa.OpARMLessEqual: arm.C_SCOND_LE, 770 ssa.OpARMLessEqualU: arm.C_SCOND_LS, 771 ssa.OpARMGreaterThan: arm.C_SCOND_GT, 772 ssa.OpARMGreaterThanU: arm.C_SCOND_HI, 773 ssa.OpARMGreaterEqual: arm.C_SCOND_GE, 774 ssa.OpARMGreaterEqualU: arm.C_SCOND_HS, 775 } 776 777 var blockJump = map[ssa.BlockKind]struct { 778 asm, invasm obj.As 779 }{ 780 ssa.BlockARMEQ: {arm.ABEQ, arm.ABNE}, 781 ssa.BlockARMNE: {arm.ABNE, arm.ABEQ}, 782 ssa.BlockARMLT: {arm.ABLT, arm.ABGE}, 783 ssa.BlockARMGE: {arm.ABGE, arm.ABLT}, 784 ssa.BlockARMLE: {arm.ABLE, arm.ABGT}, 785 ssa.BlockARMGT: {arm.ABGT, arm.ABLE}, 786 ssa.BlockARMULT: {arm.ABLO, arm.ABHS}, 787 ssa.BlockARMUGE: {arm.ABHS, arm.ABLO}, 788 ssa.BlockARMUGT: {arm.ABHI, arm.ABLS}, 789 ssa.BlockARMULE: {arm.ABLS, arm.ABHI}, 790 } 791 792 func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { 793 switch b.Kind { 794 case ssa.BlockPlain: 795 if b.Succs[0].Block() != next { 796 p := s.Prog(obj.AJMP) 797 p.To.Type = obj.TYPE_BRANCH 798 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) 799 } 800 801 case ssa.BlockDefer: 802 // defer returns in R0: 803 // 0 if we should continue executing 804 // 1 if we should jump to deferreturn call 805 p := s.Prog(arm.ACMP) 806 p.From.Type = obj.TYPE_CONST 807 p.From.Offset = 0 808 p.Reg = arm.REG_R0 809 p = s.Prog(arm.ABNE) 810 p.To.Type = obj.TYPE_BRANCH 811 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()}) 812 if b.Succs[0].Block() != next { 813 p := s.Prog(obj.AJMP) 814 p.To.Type = obj.TYPE_BRANCH 815 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) 816 } 817 818 case ssa.BlockExit: 819 s.Prog(obj.AUNDEF) // tell plive.go that we never reach here 820 821 case ssa.BlockRet: 822 s.Prog(obj.ARET) 823 824 case ssa.BlockRetJmp: 825 p := s.Prog(obj.ARET) 826 p.To.Type = obj.TYPE_MEM 827 p.To.Name = obj.NAME_EXTERN 828 p.To.Sym = b.Aux.(*obj.LSym) 829 830 case ssa.BlockARMEQ, ssa.BlockARMNE, 831 ssa.BlockARMLT, ssa.BlockARMGE, 832 ssa.BlockARMLE, ssa.BlockARMGT, 833 ssa.BlockARMULT, ssa.BlockARMUGT, 834 ssa.BlockARMULE, ssa.BlockARMUGE: 835 jmp := blockJump[b.Kind] 836 var p *obj.Prog 837 switch next { 838 case b.Succs[0].Block(): 839 p = s.Prog(jmp.invasm) 840 p.To.Type = obj.TYPE_BRANCH 841 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()}) 842 case b.Succs[1].Block(): 843 p = s.Prog(jmp.asm) 844 p.To.Type = obj.TYPE_BRANCH 845 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) 846 default: 847 p = s.Prog(jmp.asm) 848 p.To.Type = obj.TYPE_BRANCH 849 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) 850 q := s.Prog(obj.AJMP) 851 q.To.Type = obj.TYPE_BRANCH 852 s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()}) 853 } 854 855 default: 856 b.Fatalf("branch not implemented: %s. Control: %s", b.LongString(), b.Control.LongString()) 857 } 858 }