github.com/slayercat/go@v0.0.0-20170428012452-c51559813f61/src/cmd/compile/internal/arm/ssa.go (about) 1 // Copyright 2016 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package arm 6 7 import ( 8 "fmt" 9 "math" 10 11 "cmd/compile/internal/gc" 12 "cmd/compile/internal/ssa" 13 "cmd/internal/obj" 14 "cmd/internal/obj/arm" 15 ) 16 17 // loadByType returns the load instruction of the given type. 18 func loadByType(t ssa.Type) obj.As { 19 if t.IsFloat() { 20 switch t.Size() { 21 case 4: 22 return arm.AMOVF 23 case 8: 24 return arm.AMOVD 25 } 26 } else { 27 switch t.Size() { 28 case 1: 29 if t.IsSigned() { 30 return arm.AMOVB 31 } else { 32 return arm.AMOVBU 33 } 34 case 2: 35 if t.IsSigned() { 36 return arm.AMOVH 37 } else { 38 return arm.AMOVHU 39 } 40 case 4: 41 return arm.AMOVW 42 } 43 } 44 panic("bad load type") 45 } 46 47 // storeByType returns the store instruction of the given type. 48 func storeByType(t ssa.Type) obj.As { 49 if t.IsFloat() { 50 switch t.Size() { 51 case 4: 52 return arm.AMOVF 53 case 8: 54 return arm.AMOVD 55 } 56 } else { 57 switch t.Size() { 58 case 1: 59 return arm.AMOVB 60 case 2: 61 return arm.AMOVH 62 case 4: 63 return arm.AMOVW 64 } 65 } 66 panic("bad store type") 67 } 68 69 // shift type is used as Offset in obj.TYPE_SHIFT operands to encode shifted register operands 70 type shift int64 71 72 // copied from ../../../internal/obj/util.go:/TYPE_SHIFT 73 func (v shift) String() string { 74 op := "<<>>->@>"[((v>>5)&3)<<1:] 75 if v&(1<<4) != 0 { 76 // register shift 77 return fmt.Sprintf("R%d%c%cR%d", v&15, op[0], op[1], (v>>8)&15) 78 } else { 79 // constant shift 80 return fmt.Sprintf("R%d%c%c%d", v&15, op[0], op[1], (v>>7)&31) 81 } 82 } 83 84 // makeshift encodes a register shifted by a constant 85 func makeshift(reg int16, typ int64, s int64) shift { 86 return shift(int64(reg&0xf) | typ | (s&31)<<7) 87 } 88 89 // genshift generates a Prog for r = r0 op (r1 shifted by n) 90 func genshift(s *gc.SSAGenState, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog { 91 p := s.Prog(as) 92 p.From.Type = obj.TYPE_SHIFT 93 p.From.Offset = int64(makeshift(r1, typ, n)) 94 p.Reg = r0 95 if r != 0 { 96 p.To.Type = obj.TYPE_REG 97 p.To.Reg = r 98 } 99 return p 100 } 101 102 // makeregshift encodes a register shifted by a register 103 func makeregshift(r1 int16, typ int64, r2 int16) shift { 104 return shift(int64(r1&0xf) | typ | int64(r2&0xf)<<8 | 1<<4) 105 } 106 107 // genregshift generates a Prog for r = r0 op (r1 shifted by r2) 108 func genregshift(s *gc.SSAGenState, as obj.As, r0, r1, r2, r int16, typ int64) *obj.Prog { 109 p := s.Prog(as) 110 p.From.Type = obj.TYPE_SHIFT 111 p.From.Offset = int64(makeregshift(r1, typ, r2)) 112 p.Reg = r0 113 if r != 0 { 114 p.To.Type = obj.TYPE_REG 115 p.To.Reg = r 116 } 117 return p 118 } 119 120 func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { 121 switch v.Op { 122 case ssa.OpCopy, ssa.OpARMMOVWconvert, ssa.OpARMMOVWreg: 123 if v.Type.IsMemory() { 124 return 125 } 126 x := v.Args[0].Reg() 127 y := v.Reg() 128 if x == y { 129 return 130 } 131 as := arm.AMOVW 132 if v.Type.IsFloat() { 133 switch v.Type.Size() { 134 case 4: 135 as = arm.AMOVF 136 case 8: 137 as = arm.AMOVD 138 default: 139 panic("bad float size") 140 } 141 } 142 p := s.Prog(as) 143 p.From.Type = obj.TYPE_REG 144 p.From.Reg = x 145 p.To.Type = obj.TYPE_REG 146 p.To.Reg = y 147 case ssa.OpARMMOVWnop: 148 if v.Reg() != v.Args[0].Reg() { 149 v.Fatalf("input[0] and output not in same register %s", v.LongString()) 150 } 151 // nothing to do 152 case ssa.OpLoadReg: 153 if v.Type.IsFlags() { 154 v.Fatalf("load flags not implemented: %v", v.LongString()) 155 return 156 } 157 p := s.Prog(loadByType(v.Type)) 158 gc.AddrAuto(&p.From, v.Args[0]) 159 p.To.Type = obj.TYPE_REG 160 p.To.Reg = v.Reg() 161 case ssa.OpStoreReg: 162 if v.Type.IsFlags() { 163 v.Fatalf("store flags not implemented: %v", v.LongString()) 164 return 165 } 166 p := s.Prog(storeByType(v.Type)) 167 p.From.Type = obj.TYPE_REG 168 p.From.Reg = v.Args[0].Reg() 169 gc.AddrAuto(&p.To, v) 170 case ssa.OpARMADD, 171 ssa.OpARMADC, 172 ssa.OpARMSUB, 173 ssa.OpARMSBC, 174 ssa.OpARMRSB, 175 ssa.OpARMAND, 176 ssa.OpARMOR, 177 ssa.OpARMXOR, 178 ssa.OpARMBIC, 179 ssa.OpARMMUL, 180 ssa.OpARMADDF, 181 ssa.OpARMADDD, 182 ssa.OpARMSUBF, 183 ssa.OpARMSUBD, 184 ssa.OpARMMULF, 185 ssa.OpARMMULD, 186 ssa.OpARMDIVF, 187 ssa.OpARMDIVD: 188 r := v.Reg() 189 r1 := v.Args[0].Reg() 190 r2 := v.Args[1].Reg() 191 p := s.Prog(v.Op.Asm()) 192 p.From.Type = obj.TYPE_REG 193 p.From.Reg = r2 194 p.Reg = r1 195 p.To.Type = obj.TYPE_REG 196 p.To.Reg = r 197 case ssa.OpARMADDS, 198 ssa.OpARMSUBS: 199 r := v.Reg0() 200 r1 := v.Args[0].Reg() 201 r2 := v.Args[1].Reg() 202 p := s.Prog(v.Op.Asm()) 203 p.Scond = arm.C_SBIT 204 p.From.Type = obj.TYPE_REG 205 p.From.Reg = r2 206 p.Reg = r1 207 p.To.Type = obj.TYPE_REG 208 p.To.Reg = r 209 case ssa.OpARMSLL, 210 ssa.OpARMSRL, 211 ssa.OpARMSRA: 212 r := v.Reg() 213 r1 := v.Args[0].Reg() 214 r2 := v.Args[1].Reg() 215 p := s.Prog(v.Op.Asm()) 216 p.From.Type = obj.TYPE_REG 217 p.From.Reg = r2 218 p.Reg = r1 219 p.To.Type = obj.TYPE_REG 220 p.To.Reg = r 221 case ssa.OpARMSRAcond: 222 // ARM shift instructions uses only the low-order byte of the shift amount 223 // generate conditional instructions to deal with large shifts 224 // flag is already set 225 // SRA.HS $31, Rarg0, Rdst // shift 31 bits to get the sign bit 226 // SRA.LO Rarg1, Rarg0, Rdst 227 r := v.Reg() 228 r1 := v.Args[0].Reg() 229 r2 := v.Args[1].Reg() 230 p := s.Prog(arm.ASRA) 231 p.Scond = arm.C_SCOND_HS 232 p.From.Type = obj.TYPE_CONST 233 p.From.Offset = 31 234 p.Reg = r1 235 p.To.Type = obj.TYPE_REG 236 p.To.Reg = r 237 p = s.Prog(arm.ASRA) 238 p.Scond = arm.C_SCOND_LO 239 p.From.Type = obj.TYPE_REG 240 p.From.Reg = r2 241 p.Reg = r1 242 p.To.Type = obj.TYPE_REG 243 p.To.Reg = r 244 case ssa.OpARMADDconst, 245 ssa.OpARMADCconst, 246 ssa.OpARMSUBconst, 247 ssa.OpARMSBCconst, 248 ssa.OpARMRSBconst, 249 ssa.OpARMRSCconst, 250 ssa.OpARMANDconst, 251 ssa.OpARMORconst, 252 ssa.OpARMXORconst, 253 ssa.OpARMBICconst, 254 ssa.OpARMSLLconst, 255 ssa.OpARMSRLconst, 256 ssa.OpARMSRAconst: 257 p := s.Prog(v.Op.Asm()) 258 p.From.Type = obj.TYPE_CONST 259 p.From.Offset = v.AuxInt 260 p.Reg = v.Args[0].Reg() 261 p.To.Type = obj.TYPE_REG 262 p.To.Reg = v.Reg() 263 case ssa.OpARMADDSconst, 264 ssa.OpARMSUBSconst, 265 ssa.OpARMRSBSconst: 266 p := s.Prog(v.Op.Asm()) 267 p.Scond = arm.C_SBIT 268 p.From.Type = obj.TYPE_CONST 269 p.From.Offset = v.AuxInt 270 p.Reg = v.Args[0].Reg() 271 p.To.Type = obj.TYPE_REG 272 p.To.Reg = v.Reg0() 273 case ssa.OpARMSRRconst: 274 genshift(s, arm.AMOVW, 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_RR, v.AuxInt) 275 case ssa.OpARMADDshiftLL, 276 ssa.OpARMADCshiftLL, 277 ssa.OpARMSUBshiftLL, 278 ssa.OpARMSBCshiftLL, 279 ssa.OpARMRSBshiftLL, 280 ssa.OpARMRSCshiftLL, 281 ssa.OpARMANDshiftLL, 282 ssa.OpARMORshiftLL, 283 ssa.OpARMXORshiftLL, 284 ssa.OpARMBICshiftLL: 285 genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt) 286 case ssa.OpARMADDSshiftLL, 287 ssa.OpARMSUBSshiftLL, 288 ssa.OpARMRSBSshiftLL: 289 p := genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_LL, v.AuxInt) 290 p.Scond = arm.C_SBIT 291 case ssa.OpARMADDshiftRL, 292 ssa.OpARMADCshiftRL, 293 ssa.OpARMSUBshiftRL, 294 ssa.OpARMSBCshiftRL, 295 ssa.OpARMRSBshiftRL, 296 ssa.OpARMRSCshiftRL, 297 ssa.OpARMANDshiftRL, 298 ssa.OpARMORshiftRL, 299 ssa.OpARMXORshiftRL, 300 ssa.OpARMBICshiftRL: 301 genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt) 302 case ssa.OpARMADDSshiftRL, 303 ssa.OpARMSUBSshiftRL, 304 ssa.OpARMRSBSshiftRL: 305 p := genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_LR, v.AuxInt) 306 p.Scond = arm.C_SBIT 307 case ssa.OpARMADDshiftRA, 308 ssa.OpARMADCshiftRA, 309 ssa.OpARMSUBshiftRA, 310 ssa.OpARMSBCshiftRA, 311 ssa.OpARMRSBshiftRA, 312 ssa.OpARMRSCshiftRA, 313 ssa.OpARMANDshiftRA, 314 ssa.OpARMORshiftRA, 315 ssa.OpARMXORshiftRA, 316 ssa.OpARMBICshiftRA: 317 genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt) 318 case ssa.OpARMADDSshiftRA, 319 ssa.OpARMSUBSshiftRA, 320 ssa.OpARMRSBSshiftRA: 321 p := genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_AR, v.AuxInt) 322 p.Scond = arm.C_SBIT 323 case ssa.OpARMXORshiftRR: 324 genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_RR, v.AuxInt) 325 case ssa.OpARMMVNshiftLL: 326 genshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt) 327 case ssa.OpARMMVNshiftRL: 328 genshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt) 329 case ssa.OpARMMVNshiftRA: 330 genshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt) 331 case ssa.OpARMMVNshiftLLreg: 332 genregshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL) 333 case ssa.OpARMMVNshiftRLreg: 334 genregshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR) 335 case ssa.OpARMMVNshiftRAreg: 336 genregshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR) 337 case ssa.OpARMADDshiftLLreg, 338 ssa.OpARMADCshiftLLreg, 339 ssa.OpARMSUBshiftLLreg, 340 ssa.OpARMSBCshiftLLreg, 341 ssa.OpARMRSBshiftLLreg, 342 ssa.OpARMRSCshiftLLreg, 343 ssa.OpARMANDshiftLLreg, 344 ssa.OpARMORshiftLLreg, 345 ssa.OpARMXORshiftLLreg, 346 ssa.OpARMBICshiftLLreg: 347 genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_LL) 348 case ssa.OpARMADDSshiftLLreg, 349 ssa.OpARMSUBSshiftLLreg, 350 ssa.OpARMRSBSshiftLLreg: 351 p := genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_LL) 352 p.Scond = arm.C_SBIT 353 case ssa.OpARMADDshiftRLreg, 354 ssa.OpARMADCshiftRLreg, 355 ssa.OpARMSUBshiftRLreg, 356 ssa.OpARMSBCshiftRLreg, 357 ssa.OpARMRSBshiftRLreg, 358 ssa.OpARMRSCshiftRLreg, 359 ssa.OpARMANDshiftRLreg, 360 ssa.OpARMORshiftRLreg, 361 ssa.OpARMXORshiftRLreg, 362 ssa.OpARMBICshiftRLreg: 363 genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_LR) 364 case ssa.OpARMADDSshiftRLreg, 365 ssa.OpARMSUBSshiftRLreg, 366 ssa.OpARMRSBSshiftRLreg: 367 p := genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_LR) 368 p.Scond = arm.C_SBIT 369 case ssa.OpARMADDshiftRAreg, 370 ssa.OpARMADCshiftRAreg, 371 ssa.OpARMSUBshiftRAreg, 372 ssa.OpARMSBCshiftRAreg, 373 ssa.OpARMRSBshiftRAreg, 374 ssa.OpARMRSCshiftRAreg, 375 ssa.OpARMANDshiftRAreg, 376 ssa.OpARMORshiftRAreg, 377 ssa.OpARMXORshiftRAreg, 378 ssa.OpARMBICshiftRAreg: 379 genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_AR) 380 case ssa.OpARMADDSshiftRAreg, 381 ssa.OpARMSUBSshiftRAreg, 382 ssa.OpARMRSBSshiftRAreg: 383 p := genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_AR) 384 p.Scond = arm.C_SBIT 385 case ssa.OpARMHMUL, 386 ssa.OpARMHMULU: 387 // 32-bit high multiplication 388 p := s.Prog(v.Op.Asm()) 389 p.From.Type = obj.TYPE_REG 390 p.From.Reg = v.Args[0].Reg() 391 p.Reg = v.Args[1].Reg() 392 p.To.Type = obj.TYPE_REGREG 393 p.To.Reg = v.Reg() 394 p.To.Offset = arm.REGTMP // throw away low 32-bit into tmp register 395 case ssa.OpARMMULLU: 396 // 32-bit multiplication, results 64-bit, high 32-bit in out0, low 32-bit in out1 397 p := s.Prog(v.Op.Asm()) 398 p.From.Type = obj.TYPE_REG 399 p.From.Reg = v.Args[0].Reg() 400 p.Reg = v.Args[1].Reg() 401 p.To.Type = obj.TYPE_REGREG 402 p.To.Reg = v.Reg0() // high 32-bit 403 p.To.Offset = int64(v.Reg1()) // low 32-bit 404 case ssa.OpARMMULA: 405 p := s.Prog(v.Op.Asm()) 406 p.From.Type = obj.TYPE_REG 407 p.From.Reg = v.Args[0].Reg() 408 p.Reg = v.Args[1].Reg() 409 p.To.Type = obj.TYPE_REGREG2 410 p.To.Reg = v.Reg() // result 411 p.To.Offset = int64(v.Args[2].Reg()) // addend 412 case ssa.OpARMMOVWconst: 413 p := s.Prog(v.Op.Asm()) 414 p.From.Type = obj.TYPE_CONST 415 p.From.Offset = v.AuxInt 416 p.To.Type = obj.TYPE_REG 417 p.To.Reg = v.Reg() 418 case ssa.OpARMMOVFconst, 419 ssa.OpARMMOVDconst: 420 p := s.Prog(v.Op.Asm()) 421 p.From.Type = obj.TYPE_FCONST 422 p.From.Val = math.Float64frombits(uint64(v.AuxInt)) 423 p.To.Type = obj.TYPE_REG 424 p.To.Reg = v.Reg() 425 case ssa.OpARMCMP, 426 ssa.OpARMCMN, 427 ssa.OpARMTST, 428 ssa.OpARMTEQ, 429 ssa.OpARMCMPF, 430 ssa.OpARMCMPD: 431 p := s.Prog(v.Op.Asm()) 432 p.From.Type = obj.TYPE_REG 433 // Special layout in ARM assembly 434 // Comparing to x86, the operands of ARM's CMP are reversed. 435 p.From.Reg = v.Args[1].Reg() 436 p.Reg = v.Args[0].Reg() 437 case ssa.OpARMCMPconst, 438 ssa.OpARMCMNconst, 439 ssa.OpARMTSTconst, 440 ssa.OpARMTEQconst: 441 // Special layout in ARM assembly 442 p := s.Prog(v.Op.Asm()) 443 p.From.Type = obj.TYPE_CONST 444 p.From.Offset = v.AuxInt 445 p.Reg = v.Args[0].Reg() 446 case ssa.OpARMCMPF0, 447 ssa.OpARMCMPD0: 448 p := s.Prog(v.Op.Asm()) 449 p.From.Type = obj.TYPE_REG 450 p.From.Reg = v.Args[0].Reg() 451 case ssa.OpARMCMPshiftLL: 452 genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_LL, v.AuxInt) 453 case ssa.OpARMCMPshiftRL: 454 genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_LR, v.AuxInt) 455 case ssa.OpARMCMPshiftRA: 456 genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_AR, v.AuxInt) 457 case ssa.OpARMCMPshiftLLreg: 458 genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_LL) 459 case ssa.OpARMCMPshiftRLreg: 460 genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_LR) 461 case ssa.OpARMCMPshiftRAreg: 462 genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_AR) 463 case ssa.OpARMMOVWaddr: 464 p := s.Prog(arm.AMOVW) 465 p.From.Type = obj.TYPE_ADDR 466 p.To.Type = obj.TYPE_REG 467 p.To.Reg = v.Reg() 468 469 var wantreg string 470 // MOVW $sym+off(base), R 471 // the assembler expands it as the following: 472 // - base is SP: add constant offset to SP (R13) 473 // when constant is large, tmp register (R11) may be used 474 // - base is SB: load external address from constant pool (use relocation) 475 switch v.Aux.(type) { 476 default: 477 v.Fatalf("aux is of unknown type %T", v.Aux) 478 case *ssa.ExternSymbol: 479 wantreg = "SB" 480 gc.AddAux(&p.From, v) 481 case *ssa.ArgSymbol, *ssa.AutoSymbol: 482 wantreg = "SP" 483 gc.AddAux(&p.From, v) 484 case nil: 485 // No sym, just MOVW $off(SP), R 486 wantreg = "SP" 487 p.From.Reg = arm.REGSP 488 p.From.Offset = v.AuxInt 489 } 490 if reg := v.Args[0].RegName(); reg != wantreg { 491 v.Fatalf("bad reg %s for symbol type %T, want %s", reg, v.Aux, wantreg) 492 } 493 494 case ssa.OpARMMOVBload, 495 ssa.OpARMMOVBUload, 496 ssa.OpARMMOVHload, 497 ssa.OpARMMOVHUload, 498 ssa.OpARMMOVWload, 499 ssa.OpARMMOVFload, 500 ssa.OpARMMOVDload: 501 p := s.Prog(v.Op.Asm()) 502 p.From.Type = obj.TYPE_MEM 503 p.From.Reg = v.Args[0].Reg() 504 gc.AddAux(&p.From, v) 505 p.To.Type = obj.TYPE_REG 506 p.To.Reg = v.Reg() 507 case ssa.OpARMMOVBstore, 508 ssa.OpARMMOVHstore, 509 ssa.OpARMMOVWstore, 510 ssa.OpARMMOVFstore, 511 ssa.OpARMMOVDstore: 512 p := s.Prog(v.Op.Asm()) 513 p.From.Type = obj.TYPE_REG 514 p.From.Reg = v.Args[1].Reg() 515 p.To.Type = obj.TYPE_MEM 516 p.To.Reg = v.Args[0].Reg() 517 gc.AddAux(&p.To, v) 518 case ssa.OpARMMOVWloadidx: 519 // this is just shift 0 bits 520 fallthrough 521 case ssa.OpARMMOVWloadshiftLL: 522 p := genshift(s, v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt) 523 p.From.Reg = v.Args[0].Reg() 524 case ssa.OpARMMOVWloadshiftRL: 525 p := genshift(s, v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt) 526 p.From.Reg = v.Args[0].Reg() 527 case ssa.OpARMMOVWloadshiftRA: 528 p := genshift(s, v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt) 529 p.From.Reg = v.Args[0].Reg() 530 case ssa.OpARMMOVWstoreidx: 531 // this is just shift 0 bits 532 fallthrough 533 case ssa.OpARMMOVWstoreshiftLL: 534 p := s.Prog(v.Op.Asm()) 535 p.From.Type = obj.TYPE_REG 536 p.From.Reg = v.Args[2].Reg() 537 p.To.Type = obj.TYPE_SHIFT 538 p.To.Reg = v.Args[0].Reg() 539 p.To.Offset = int64(makeshift(v.Args[1].Reg(), arm.SHIFT_LL, v.AuxInt)) 540 case ssa.OpARMMOVWstoreshiftRL: 541 p := s.Prog(v.Op.Asm()) 542 p.From.Type = obj.TYPE_REG 543 p.From.Reg = v.Args[2].Reg() 544 p.To.Type = obj.TYPE_SHIFT 545 p.To.Reg = v.Args[0].Reg() 546 p.To.Offset = int64(makeshift(v.Args[1].Reg(), arm.SHIFT_LR, v.AuxInt)) 547 case ssa.OpARMMOVWstoreshiftRA: 548 p := s.Prog(v.Op.Asm()) 549 p.From.Type = obj.TYPE_REG 550 p.From.Reg = v.Args[2].Reg() 551 p.To.Type = obj.TYPE_SHIFT 552 p.To.Reg = v.Args[0].Reg() 553 p.To.Offset = int64(makeshift(v.Args[1].Reg(), arm.SHIFT_AR, v.AuxInt)) 554 case ssa.OpARMMOVBreg, 555 ssa.OpARMMOVBUreg, 556 ssa.OpARMMOVHreg, 557 ssa.OpARMMOVHUreg: 558 a := v.Args[0] 559 for a.Op == ssa.OpCopy || a.Op == ssa.OpARMMOVWreg || a.Op == ssa.OpARMMOVWnop { 560 a = a.Args[0] 561 } 562 if a.Op == ssa.OpLoadReg { 563 t := a.Type 564 switch { 565 case v.Op == ssa.OpARMMOVBreg && t.Size() == 1 && t.IsSigned(), 566 v.Op == ssa.OpARMMOVBUreg && t.Size() == 1 && !t.IsSigned(), 567 v.Op == ssa.OpARMMOVHreg && t.Size() == 2 && t.IsSigned(), 568 v.Op == ssa.OpARMMOVHUreg && t.Size() == 2 && !t.IsSigned(): 569 // arg is a proper-typed load, already zero/sign-extended, don't extend again 570 if v.Reg() == v.Args[0].Reg() { 571 return 572 } 573 p := s.Prog(arm.AMOVW) 574 p.From.Type = obj.TYPE_REG 575 p.From.Reg = v.Args[0].Reg() 576 p.To.Type = obj.TYPE_REG 577 p.To.Reg = v.Reg() 578 return 579 default: 580 } 581 } 582 fallthrough 583 case ssa.OpARMMVN, 584 ssa.OpARMCLZ, 585 ssa.OpARMREV, 586 ssa.OpARMRBIT, 587 ssa.OpARMSQRTD, 588 ssa.OpARMNEGF, 589 ssa.OpARMNEGD, 590 ssa.OpARMMOVWF, 591 ssa.OpARMMOVWD, 592 ssa.OpARMMOVFW, 593 ssa.OpARMMOVDW, 594 ssa.OpARMMOVFD, 595 ssa.OpARMMOVDF: 596 p := s.Prog(v.Op.Asm()) 597 p.From.Type = obj.TYPE_REG 598 p.From.Reg = v.Args[0].Reg() 599 p.To.Type = obj.TYPE_REG 600 p.To.Reg = v.Reg() 601 case ssa.OpARMMOVWUF, 602 ssa.OpARMMOVWUD, 603 ssa.OpARMMOVFWU, 604 ssa.OpARMMOVDWU: 605 p := s.Prog(v.Op.Asm()) 606 p.Scond = arm.C_UBIT 607 p.From.Type = obj.TYPE_REG 608 p.From.Reg = v.Args[0].Reg() 609 p.To.Type = obj.TYPE_REG 610 p.To.Reg = v.Reg() 611 case ssa.OpARMCMOVWHSconst: 612 p := s.Prog(arm.AMOVW) 613 p.Scond = arm.C_SCOND_HS 614 p.From.Type = obj.TYPE_CONST 615 p.From.Offset = v.AuxInt 616 p.To.Type = obj.TYPE_REG 617 p.To.Reg = v.Reg() 618 case ssa.OpARMCMOVWLSconst: 619 p := s.Prog(arm.AMOVW) 620 p.Scond = arm.C_SCOND_LS 621 p.From.Type = obj.TYPE_CONST 622 p.From.Offset = v.AuxInt 623 p.To.Type = obj.TYPE_REG 624 p.To.Reg = v.Reg() 625 case ssa.OpARMCALLstatic, ssa.OpARMCALLclosure, ssa.OpARMCALLinter: 626 s.Call(v) 627 case ssa.OpARMCALLudiv: 628 p := s.Prog(obj.ACALL) 629 p.To.Type = obj.TYPE_MEM 630 p.To.Name = obj.NAME_EXTERN 631 p.To.Sym = gc.Udiv 632 case ssa.OpARMDUFFZERO: 633 p := s.Prog(obj.ADUFFZERO) 634 p.To.Type = obj.TYPE_MEM 635 p.To.Name = obj.NAME_EXTERN 636 p.To.Sym = gc.Duffzero 637 p.To.Offset = v.AuxInt 638 case ssa.OpARMDUFFCOPY: 639 p := s.Prog(obj.ADUFFCOPY) 640 p.To.Type = obj.TYPE_MEM 641 p.To.Name = obj.NAME_EXTERN 642 p.To.Sym = gc.Duffcopy 643 p.To.Offset = v.AuxInt 644 case ssa.OpARMLoweredNilCheck: 645 // Issue a load which will fault if arg is nil. 646 p := s.Prog(arm.AMOVB) 647 p.From.Type = obj.TYPE_MEM 648 p.From.Reg = v.Args[0].Reg() 649 gc.AddAux(&p.From, v) 650 p.To.Type = obj.TYPE_REG 651 p.To.Reg = arm.REGTMP 652 if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers 653 gc.Warnl(v.Pos, "generated nil check") 654 } 655 case ssa.OpARMLoweredZero: 656 // MOVW.P Rarg2, 4(R1) 657 // CMP Rarg1, R1 658 // BLE -2(PC) 659 // arg1 is the address of the last element to zero 660 // arg2 is known to be zero 661 // auxint is alignment 662 var sz int64 663 var mov obj.As 664 switch { 665 case v.AuxInt%4 == 0: 666 sz = 4 667 mov = arm.AMOVW 668 case v.AuxInt%2 == 0: 669 sz = 2 670 mov = arm.AMOVH 671 default: 672 sz = 1 673 mov = arm.AMOVB 674 } 675 p := s.Prog(mov) 676 p.Scond = arm.C_PBIT 677 p.From.Type = obj.TYPE_REG 678 p.From.Reg = v.Args[2].Reg() 679 p.To.Type = obj.TYPE_MEM 680 p.To.Reg = arm.REG_R1 681 p.To.Offset = sz 682 p2 := s.Prog(arm.ACMP) 683 p2.From.Type = obj.TYPE_REG 684 p2.From.Reg = v.Args[1].Reg() 685 p2.Reg = arm.REG_R1 686 p3 := s.Prog(arm.ABLE) 687 p3.To.Type = obj.TYPE_BRANCH 688 gc.Patch(p3, p) 689 case ssa.OpARMLoweredMove: 690 // MOVW.P 4(R1), Rtmp 691 // MOVW.P Rtmp, 4(R2) 692 // CMP Rarg2, R1 693 // BLE -3(PC) 694 // arg2 is the address of the last element of src 695 // auxint is alignment 696 var sz int64 697 var mov obj.As 698 switch { 699 case v.AuxInt%4 == 0: 700 sz = 4 701 mov = arm.AMOVW 702 case v.AuxInt%2 == 0: 703 sz = 2 704 mov = arm.AMOVH 705 default: 706 sz = 1 707 mov = arm.AMOVB 708 } 709 p := s.Prog(mov) 710 p.Scond = arm.C_PBIT 711 p.From.Type = obj.TYPE_MEM 712 p.From.Reg = arm.REG_R1 713 p.From.Offset = sz 714 p.To.Type = obj.TYPE_REG 715 p.To.Reg = arm.REGTMP 716 p2 := s.Prog(mov) 717 p2.Scond = arm.C_PBIT 718 p2.From.Type = obj.TYPE_REG 719 p2.From.Reg = arm.REGTMP 720 p2.To.Type = obj.TYPE_MEM 721 p2.To.Reg = arm.REG_R2 722 p2.To.Offset = sz 723 p3 := s.Prog(arm.ACMP) 724 p3.From.Type = obj.TYPE_REG 725 p3.From.Reg = v.Args[2].Reg() 726 p3.Reg = arm.REG_R1 727 p4 := s.Prog(arm.ABLE) 728 p4.To.Type = obj.TYPE_BRANCH 729 gc.Patch(p4, p) 730 case ssa.OpARMEqual, 731 ssa.OpARMNotEqual, 732 ssa.OpARMLessThan, 733 ssa.OpARMLessEqual, 734 ssa.OpARMGreaterThan, 735 ssa.OpARMGreaterEqual, 736 ssa.OpARMLessThanU, 737 ssa.OpARMLessEqualU, 738 ssa.OpARMGreaterThanU, 739 ssa.OpARMGreaterEqualU: 740 // generate boolean values 741 // use conditional move 742 p := s.Prog(arm.AMOVW) 743 p.From.Type = obj.TYPE_CONST 744 p.From.Offset = 0 745 p.To.Type = obj.TYPE_REG 746 p.To.Reg = v.Reg() 747 p = s.Prog(arm.AMOVW) 748 p.Scond = condBits[v.Op] 749 p.From.Type = obj.TYPE_CONST 750 p.From.Offset = 1 751 p.To.Type = obj.TYPE_REG 752 p.To.Reg = v.Reg() 753 case ssa.OpARMLoweredGetClosurePtr: 754 // Closure pointer is R7 (arm.REGCTXT). 755 gc.CheckLoweredGetClosurePtr(v) 756 case ssa.OpARMFlagEQ, 757 ssa.OpARMFlagLT_ULT, 758 ssa.OpARMFlagLT_UGT, 759 ssa.OpARMFlagGT_ULT, 760 ssa.OpARMFlagGT_UGT: 761 v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString()) 762 case ssa.OpARMInvertFlags: 763 v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString()) 764 case ssa.OpClobber: 765 // TODO: implement for clobberdead experiment. Nop is ok for now. 766 default: 767 v.Fatalf("genValue not implemented: %s", v.LongString()) 768 } 769 } 770 771 var condBits = map[ssa.Op]uint8{ 772 ssa.OpARMEqual: arm.C_SCOND_EQ, 773 ssa.OpARMNotEqual: arm.C_SCOND_NE, 774 ssa.OpARMLessThan: arm.C_SCOND_LT, 775 ssa.OpARMLessThanU: arm.C_SCOND_LO, 776 ssa.OpARMLessEqual: arm.C_SCOND_LE, 777 ssa.OpARMLessEqualU: arm.C_SCOND_LS, 778 ssa.OpARMGreaterThan: arm.C_SCOND_GT, 779 ssa.OpARMGreaterThanU: arm.C_SCOND_HI, 780 ssa.OpARMGreaterEqual: arm.C_SCOND_GE, 781 ssa.OpARMGreaterEqualU: arm.C_SCOND_HS, 782 } 783 784 var blockJump = map[ssa.BlockKind]struct { 785 asm, invasm obj.As 786 }{ 787 ssa.BlockARMEQ: {arm.ABEQ, arm.ABNE}, 788 ssa.BlockARMNE: {arm.ABNE, arm.ABEQ}, 789 ssa.BlockARMLT: {arm.ABLT, arm.ABGE}, 790 ssa.BlockARMGE: {arm.ABGE, arm.ABLT}, 791 ssa.BlockARMLE: {arm.ABLE, arm.ABGT}, 792 ssa.BlockARMGT: {arm.ABGT, arm.ABLE}, 793 ssa.BlockARMULT: {arm.ABLO, arm.ABHS}, 794 ssa.BlockARMUGE: {arm.ABHS, arm.ABLO}, 795 ssa.BlockARMUGT: {arm.ABHI, arm.ABLS}, 796 ssa.BlockARMULE: {arm.ABLS, arm.ABHI}, 797 } 798 799 func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { 800 switch b.Kind { 801 case ssa.BlockPlain: 802 if b.Succs[0].Block() != next { 803 p := s.Prog(obj.AJMP) 804 p.To.Type = obj.TYPE_BRANCH 805 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) 806 } 807 808 case ssa.BlockDefer: 809 // defer returns in R0: 810 // 0 if we should continue executing 811 // 1 if we should jump to deferreturn call 812 p := s.Prog(arm.ACMP) 813 p.From.Type = obj.TYPE_CONST 814 p.From.Offset = 0 815 p.Reg = arm.REG_R0 816 p = s.Prog(arm.ABNE) 817 p.To.Type = obj.TYPE_BRANCH 818 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()}) 819 if b.Succs[0].Block() != next { 820 p := s.Prog(obj.AJMP) 821 p.To.Type = obj.TYPE_BRANCH 822 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) 823 } 824 825 case ssa.BlockExit: 826 s.Prog(obj.AUNDEF) // tell plive.go that we never reach here 827 828 case ssa.BlockRet: 829 s.Prog(obj.ARET) 830 831 case ssa.BlockRetJmp: 832 p := s.Prog(obj.ARET) 833 p.To.Type = obj.TYPE_MEM 834 p.To.Name = obj.NAME_EXTERN 835 p.To.Sym = b.Aux.(*obj.LSym) 836 837 case ssa.BlockARMEQ, ssa.BlockARMNE, 838 ssa.BlockARMLT, ssa.BlockARMGE, 839 ssa.BlockARMLE, ssa.BlockARMGT, 840 ssa.BlockARMULT, ssa.BlockARMUGT, 841 ssa.BlockARMULE, ssa.BlockARMUGE: 842 jmp := blockJump[b.Kind] 843 var p *obj.Prog 844 switch next { 845 case b.Succs[0].Block(): 846 p = s.Prog(jmp.invasm) 847 p.To.Type = obj.TYPE_BRANCH 848 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()}) 849 case b.Succs[1].Block(): 850 p = s.Prog(jmp.asm) 851 p.To.Type = obj.TYPE_BRANCH 852 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) 853 default: 854 p = s.Prog(jmp.asm) 855 p.To.Type = obj.TYPE_BRANCH 856 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) 857 q := s.Prog(obj.AJMP) 858 q.To.Type = obj.TYPE_BRANCH 859 s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()}) 860 } 861 862 default: 863 b.Fatalf("branch not implemented: %s. Control: %s", b.LongString(), b.Control.LongString()) 864 } 865 }