github.com/euank/go@v0.0.0-20160829210321-495514729181/src/cmd/compile/internal/arm64/ssa.go (about) 1 // Copyright 2016 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package arm64 6 7 import ( 8 "math" 9 10 "cmd/compile/internal/gc" 11 "cmd/compile/internal/ssa" 12 "cmd/internal/obj" 13 "cmd/internal/obj/arm64" 14 ) 15 16 var ssaRegToReg = []int16{ 17 arm64.REG_R0, 18 arm64.REG_R1, 19 arm64.REG_R2, 20 arm64.REG_R3, 21 arm64.REG_R4, 22 arm64.REG_R5, 23 arm64.REG_R6, 24 arm64.REG_R7, 25 arm64.REG_R8, 26 arm64.REG_R9, 27 arm64.REG_R10, 28 arm64.REG_R11, 29 arm64.REG_R12, 30 arm64.REG_R13, 31 arm64.REG_R14, 32 arm64.REG_R15, 33 arm64.REG_R16, 34 arm64.REG_R17, 35 arm64.REG_R18, // platform register, not used 36 arm64.REG_R19, 37 arm64.REG_R20, 38 arm64.REG_R21, 39 arm64.REG_R22, 40 arm64.REG_R23, 41 arm64.REG_R24, 42 arm64.REG_R25, 43 arm64.REG_R26, 44 // R27 = REGTMP not used in regalloc 45 arm64.REGG, // R28 46 arm64.REG_R29, // frame pointer, not used 47 // R30 = REGLINK not used in regalloc 48 arm64.REGSP, // R31 49 50 arm64.REG_F0, 51 arm64.REG_F1, 52 arm64.REG_F2, 53 arm64.REG_F3, 54 arm64.REG_F4, 55 arm64.REG_F5, 56 arm64.REG_F6, 57 arm64.REG_F7, 58 arm64.REG_F8, 59 arm64.REG_F9, 60 arm64.REG_F10, 61 arm64.REG_F11, 62 arm64.REG_F12, 63 arm64.REG_F13, 64 arm64.REG_F14, 65 arm64.REG_F15, 66 arm64.REG_F16, 67 arm64.REG_F17, 68 arm64.REG_F18, 69 arm64.REG_F19, 70 arm64.REG_F20, 71 arm64.REG_F21, 72 arm64.REG_F22, 73 arm64.REG_F23, 74 arm64.REG_F24, 75 arm64.REG_F25, 76 arm64.REG_F26, 77 arm64.REG_F27, 78 arm64.REG_F28, 79 arm64.REG_F29, 80 arm64.REG_F30, 81 arm64.REG_F31, 82 83 arm64.REG_NZCV, // flag 84 0, // SB isn't a real register. We fill an Addr.Reg field with 0 in this case. 85 } 86 87 // Smallest possible faulting page at address zero, 88 // see ../../../../runtime/mheap.go:/minPhysPageSize 89 const minZeroPage = 4096 90 91 // loadByType returns the load instruction of the given type. 92 func loadByType(t ssa.Type) obj.As { 93 if t.IsFloat() { 94 switch t.Size() { 95 case 4: 96 return arm64.AFMOVS 97 case 8: 98 return arm64.AFMOVD 99 } 100 } else { 101 switch t.Size() { 102 case 1: 103 if t.IsSigned() { 104 return arm64.AMOVB 105 } else { 106 return arm64.AMOVBU 107 } 108 case 2: 109 if t.IsSigned() { 110 return arm64.AMOVH 111 } else { 112 return arm64.AMOVHU 113 } 114 case 4: 115 if t.IsSigned() { 116 return arm64.AMOVW 117 } else { 118 return arm64.AMOVWU 119 } 120 case 8: 121 return arm64.AMOVD 122 } 123 } 124 panic("bad load type") 125 } 126 127 // storeByType returns the store instruction of the given type. 128 func storeByType(t ssa.Type) obj.As { 129 if t.IsFloat() { 130 switch t.Size() { 131 case 4: 132 return arm64.AFMOVS 133 case 8: 134 return arm64.AFMOVD 135 } 136 } else { 137 switch t.Size() { 138 case 1: 139 return arm64.AMOVB 140 case 2: 141 return arm64.AMOVH 142 case 4: 143 return arm64.AMOVW 144 case 8: 145 return arm64.AMOVD 146 } 147 } 148 panic("bad store type") 149 } 150 151 // makeshift encodes a register shifted by a constant, used as an Offset in Prog 152 func makeshift(reg int16, typ int64, s int64) int64 { 153 return int64(reg&31)<<16 | typ | (s&63)<<10 154 } 155 156 // genshift generates a Prog for r = r0 op (r1 shifted by s) 157 func genshift(as obj.As, r0, r1, r int16, typ int64, s int64) *obj.Prog { 158 p := gc.Prog(as) 159 p.From.Type = obj.TYPE_SHIFT 160 p.From.Offset = makeshift(r1, typ, s) 161 p.Reg = r0 162 if r != 0 { 163 p.To.Type = obj.TYPE_REG 164 p.To.Reg = r 165 } 166 return p 167 } 168 169 func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { 170 s.SetLineno(v.Line) 171 switch v.Op { 172 case ssa.OpInitMem: 173 // memory arg needs no code 174 case ssa.OpArg: 175 // input args need no code 176 case ssa.OpSP, ssa.OpSB, ssa.OpGetG: 177 // nothing to do 178 case ssa.OpCopy, ssa.OpARM64MOVDconvert, ssa.OpARM64MOVDreg: 179 if v.Type.IsMemory() { 180 return 181 } 182 x := gc.SSARegNum(v.Args[0]) 183 y := gc.SSARegNum(v) 184 if x == y { 185 return 186 } 187 as := arm64.AMOVD 188 if v.Type.IsFloat() { 189 switch v.Type.Size() { 190 case 4: 191 as = arm64.AFMOVS 192 case 8: 193 as = arm64.AFMOVD 194 default: 195 panic("bad float size") 196 } 197 } 198 p := gc.Prog(as) 199 p.From.Type = obj.TYPE_REG 200 p.From.Reg = x 201 p.To.Type = obj.TYPE_REG 202 p.To.Reg = y 203 case ssa.OpARM64MOVDnop: 204 if gc.SSARegNum(v) != gc.SSARegNum(v.Args[0]) { 205 v.Fatalf("input[0] and output not in same register %s", v.LongString()) 206 } 207 // nothing to do 208 case ssa.OpLoadReg: 209 if v.Type.IsFlags() { 210 v.Unimplementedf("load flags not implemented: %v", v.LongString()) 211 return 212 } 213 p := gc.Prog(loadByType(v.Type)) 214 n, off := gc.AutoVar(v.Args[0]) 215 p.From.Type = obj.TYPE_MEM 216 p.From.Node = n 217 p.From.Sym = gc.Linksym(n.Sym) 218 p.From.Offset = off 219 if n.Class == gc.PPARAM || n.Class == gc.PPARAMOUT { 220 p.From.Name = obj.NAME_PARAM 221 p.From.Offset += n.Xoffset 222 } else { 223 p.From.Name = obj.NAME_AUTO 224 } 225 p.To.Type = obj.TYPE_REG 226 p.To.Reg = gc.SSARegNum(v) 227 case ssa.OpPhi: 228 gc.CheckLoweredPhi(v) 229 case ssa.OpStoreReg: 230 if v.Type.IsFlags() { 231 v.Unimplementedf("store flags not implemented: %v", v.LongString()) 232 return 233 } 234 p := gc.Prog(storeByType(v.Type)) 235 p.From.Type = obj.TYPE_REG 236 p.From.Reg = gc.SSARegNum(v.Args[0]) 237 n, off := gc.AutoVar(v) 238 p.To.Type = obj.TYPE_MEM 239 p.To.Node = n 240 p.To.Sym = gc.Linksym(n.Sym) 241 p.To.Offset = off 242 if n.Class == gc.PPARAM || n.Class == gc.PPARAMOUT { 243 p.To.Name = obj.NAME_PARAM 244 p.To.Offset += n.Xoffset 245 } else { 246 p.To.Name = obj.NAME_AUTO 247 } 248 case ssa.OpARM64ADD, 249 ssa.OpARM64SUB, 250 ssa.OpARM64AND, 251 ssa.OpARM64OR, 252 ssa.OpARM64XOR, 253 ssa.OpARM64BIC, 254 ssa.OpARM64MUL, 255 ssa.OpARM64MULW, 256 ssa.OpARM64MULH, 257 ssa.OpARM64UMULH, 258 ssa.OpARM64MULL, 259 ssa.OpARM64UMULL, 260 ssa.OpARM64DIV, 261 ssa.OpARM64UDIV, 262 ssa.OpARM64DIVW, 263 ssa.OpARM64UDIVW, 264 ssa.OpARM64MOD, 265 ssa.OpARM64UMOD, 266 ssa.OpARM64MODW, 267 ssa.OpARM64UMODW, 268 ssa.OpARM64SLL, 269 ssa.OpARM64SRL, 270 ssa.OpARM64SRA, 271 ssa.OpARM64FADDS, 272 ssa.OpARM64FADDD, 273 ssa.OpARM64FSUBS, 274 ssa.OpARM64FSUBD, 275 ssa.OpARM64FMULS, 276 ssa.OpARM64FMULD, 277 ssa.OpARM64FDIVS, 278 ssa.OpARM64FDIVD: 279 r := gc.SSARegNum(v) 280 r1 := gc.SSARegNum(v.Args[0]) 281 r2 := gc.SSARegNum(v.Args[1]) 282 p := gc.Prog(v.Op.Asm()) 283 p.From.Type = obj.TYPE_REG 284 p.From.Reg = r2 285 p.Reg = r1 286 p.To.Type = obj.TYPE_REG 287 p.To.Reg = r 288 case ssa.OpARM64ADDconst, 289 ssa.OpARM64SUBconst, 290 ssa.OpARM64ANDconst, 291 ssa.OpARM64ORconst, 292 ssa.OpARM64XORconst, 293 ssa.OpARM64BICconst, 294 ssa.OpARM64SLLconst, 295 ssa.OpARM64SRLconst, 296 ssa.OpARM64SRAconst, 297 ssa.OpARM64RORconst, 298 ssa.OpARM64RORWconst: 299 p := gc.Prog(v.Op.Asm()) 300 p.From.Type = obj.TYPE_CONST 301 p.From.Offset = v.AuxInt 302 p.Reg = gc.SSARegNum(v.Args[0]) 303 p.To.Type = obj.TYPE_REG 304 p.To.Reg = gc.SSARegNum(v) 305 case ssa.OpARM64ADDshiftLL, 306 ssa.OpARM64SUBshiftLL, 307 ssa.OpARM64ANDshiftLL, 308 ssa.OpARM64ORshiftLL, 309 ssa.OpARM64XORshiftLL, 310 ssa.OpARM64BICshiftLL: 311 genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm64.SHIFT_LL, v.AuxInt) 312 case ssa.OpARM64ADDshiftRL, 313 ssa.OpARM64SUBshiftRL, 314 ssa.OpARM64ANDshiftRL, 315 ssa.OpARM64ORshiftRL, 316 ssa.OpARM64XORshiftRL, 317 ssa.OpARM64BICshiftRL: 318 genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm64.SHIFT_LR, v.AuxInt) 319 case ssa.OpARM64ADDshiftRA, 320 ssa.OpARM64SUBshiftRA, 321 ssa.OpARM64ANDshiftRA, 322 ssa.OpARM64ORshiftRA, 323 ssa.OpARM64XORshiftRA, 324 ssa.OpARM64BICshiftRA: 325 genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm64.SHIFT_AR, v.AuxInt) 326 case ssa.OpARM64MOVDconst: 327 p := gc.Prog(v.Op.Asm()) 328 p.From.Type = obj.TYPE_CONST 329 p.From.Offset = v.AuxInt 330 p.To.Type = obj.TYPE_REG 331 p.To.Reg = gc.SSARegNum(v) 332 case ssa.OpARM64FMOVSconst, 333 ssa.OpARM64FMOVDconst: 334 p := gc.Prog(v.Op.Asm()) 335 p.From.Type = obj.TYPE_FCONST 336 p.From.Val = math.Float64frombits(uint64(v.AuxInt)) 337 p.To.Type = obj.TYPE_REG 338 p.To.Reg = gc.SSARegNum(v) 339 case ssa.OpARM64CMP, 340 ssa.OpARM64CMPW, 341 ssa.OpARM64CMN, 342 ssa.OpARM64CMNW, 343 ssa.OpARM64FCMPS, 344 ssa.OpARM64FCMPD: 345 p := gc.Prog(v.Op.Asm()) 346 p.From.Type = obj.TYPE_REG 347 p.From.Reg = gc.SSARegNum(v.Args[1]) 348 p.Reg = gc.SSARegNum(v.Args[0]) 349 case ssa.OpARM64CMPconst, 350 ssa.OpARM64CMPWconst, 351 ssa.OpARM64CMNconst, 352 ssa.OpARM64CMNWconst: 353 p := gc.Prog(v.Op.Asm()) 354 p.From.Type = obj.TYPE_CONST 355 p.From.Offset = v.AuxInt 356 p.Reg = gc.SSARegNum(v.Args[0]) 357 case ssa.OpARM64CMPshiftLL: 358 genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), 0, arm64.SHIFT_LL, v.AuxInt) 359 case ssa.OpARM64CMPshiftRL: 360 genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), 0, arm64.SHIFT_LR, v.AuxInt) 361 case ssa.OpARM64CMPshiftRA: 362 genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), 0, arm64.SHIFT_AR, v.AuxInt) 363 case ssa.OpARM64MOVDaddr: 364 p := gc.Prog(arm64.AMOVD) 365 p.From.Type = obj.TYPE_ADDR 366 p.To.Type = obj.TYPE_REG 367 p.To.Reg = gc.SSARegNum(v) 368 369 var wantreg string 370 // MOVD $sym+off(base), R 371 // the assembler expands it as the following: 372 // - base is SP: add constant offset to SP (R13) 373 // when constant is large, tmp register (R11) may be used 374 // - base is SB: load external address from constant pool (use relocation) 375 switch v.Aux.(type) { 376 default: 377 v.Fatalf("aux is of unknown type %T", v.Aux) 378 case *ssa.ExternSymbol: 379 wantreg = "SB" 380 gc.AddAux(&p.From, v) 381 case *ssa.ArgSymbol, *ssa.AutoSymbol: 382 wantreg = "SP" 383 gc.AddAux(&p.From, v) 384 case nil: 385 // No sym, just MOVD $off(SP), R 386 wantreg = "SP" 387 p.From.Reg = arm64.REGSP 388 p.From.Offset = v.AuxInt 389 } 390 if reg := gc.SSAReg(v.Args[0]); reg.Name() != wantreg { 391 v.Fatalf("bad reg %s for symbol type %T, want %s", reg.Name(), v.Aux, wantreg) 392 } 393 case ssa.OpARM64MOVBload, 394 ssa.OpARM64MOVBUload, 395 ssa.OpARM64MOVHload, 396 ssa.OpARM64MOVHUload, 397 ssa.OpARM64MOVWload, 398 ssa.OpARM64MOVWUload, 399 ssa.OpARM64MOVDload, 400 ssa.OpARM64FMOVSload, 401 ssa.OpARM64FMOVDload: 402 p := gc.Prog(v.Op.Asm()) 403 p.From.Type = obj.TYPE_MEM 404 p.From.Reg = gc.SSARegNum(v.Args[0]) 405 gc.AddAux(&p.From, v) 406 p.To.Type = obj.TYPE_REG 407 p.To.Reg = gc.SSARegNum(v) 408 case ssa.OpARM64MOVBstore, 409 ssa.OpARM64MOVHstore, 410 ssa.OpARM64MOVWstore, 411 ssa.OpARM64MOVDstore, 412 ssa.OpARM64FMOVSstore, 413 ssa.OpARM64FMOVDstore: 414 p := gc.Prog(v.Op.Asm()) 415 p.From.Type = obj.TYPE_REG 416 p.From.Reg = gc.SSARegNum(v.Args[1]) 417 p.To.Type = obj.TYPE_MEM 418 p.To.Reg = gc.SSARegNum(v.Args[0]) 419 gc.AddAux(&p.To, v) 420 case ssa.OpARM64MOVBstorezero, 421 ssa.OpARM64MOVHstorezero, 422 ssa.OpARM64MOVWstorezero, 423 ssa.OpARM64MOVDstorezero: 424 p := gc.Prog(v.Op.Asm()) 425 p.From.Type = obj.TYPE_REG 426 p.From.Reg = arm64.REGZERO 427 p.To.Type = obj.TYPE_MEM 428 p.To.Reg = gc.SSARegNum(v.Args[0]) 429 gc.AddAux(&p.To, v) 430 case ssa.OpARM64MOVBreg, 431 ssa.OpARM64MOVBUreg, 432 ssa.OpARM64MOVHreg, 433 ssa.OpARM64MOVHUreg, 434 ssa.OpARM64MOVWreg, 435 ssa.OpARM64MOVWUreg: 436 a := v.Args[0] 437 for a.Op == ssa.OpCopy || a.Op == ssa.OpARM64MOVDreg { 438 a = a.Args[0] 439 } 440 if a.Op == ssa.OpLoadReg { 441 t := a.Type 442 switch { 443 case v.Op == ssa.OpARM64MOVBreg && t.Size() == 1 && t.IsSigned(), 444 v.Op == ssa.OpARM64MOVBUreg && t.Size() == 1 && !t.IsSigned(), 445 v.Op == ssa.OpARM64MOVHreg && t.Size() == 2 && t.IsSigned(), 446 v.Op == ssa.OpARM64MOVHUreg && t.Size() == 2 && !t.IsSigned(), 447 v.Op == ssa.OpARM64MOVWreg && t.Size() == 4 && t.IsSigned(), 448 v.Op == ssa.OpARM64MOVWUreg && t.Size() == 4 && !t.IsSigned(): 449 // arg is a proper-typed load, already zero/sign-extended, don't extend again 450 if gc.SSARegNum(v) == gc.SSARegNum(v.Args[0]) { 451 return 452 } 453 p := gc.Prog(arm64.AMOVD) 454 p.From.Type = obj.TYPE_REG 455 p.From.Reg = gc.SSARegNum(v.Args[0]) 456 p.To.Type = obj.TYPE_REG 457 p.To.Reg = gc.SSARegNum(v) 458 return 459 default: 460 } 461 } 462 fallthrough 463 case ssa.OpARM64MVN, 464 ssa.OpARM64NEG, 465 ssa.OpARM64FNEGS, 466 ssa.OpARM64FNEGD, 467 ssa.OpARM64FSQRTD, 468 ssa.OpARM64FCVTZSSW, 469 ssa.OpARM64FCVTZSDW, 470 ssa.OpARM64FCVTZUSW, 471 ssa.OpARM64FCVTZUDW, 472 ssa.OpARM64FCVTZSS, 473 ssa.OpARM64FCVTZSD, 474 ssa.OpARM64FCVTZUS, 475 ssa.OpARM64FCVTZUD, 476 ssa.OpARM64SCVTFWS, 477 ssa.OpARM64SCVTFWD, 478 ssa.OpARM64SCVTFS, 479 ssa.OpARM64SCVTFD, 480 ssa.OpARM64UCVTFWS, 481 ssa.OpARM64UCVTFWD, 482 ssa.OpARM64UCVTFS, 483 ssa.OpARM64UCVTFD, 484 ssa.OpARM64FCVTSD, 485 ssa.OpARM64FCVTDS, 486 ssa.OpARM64REV, 487 ssa.OpARM64REVW, 488 ssa.OpARM64REV16W: 489 p := gc.Prog(v.Op.Asm()) 490 p.From.Type = obj.TYPE_REG 491 p.From.Reg = gc.SSARegNum(v.Args[0]) 492 p.To.Type = obj.TYPE_REG 493 p.To.Reg = gc.SSARegNum(v) 494 case ssa.OpARM64CSELULT, 495 ssa.OpARM64CSELULT0: 496 r1 := int16(arm64.REGZERO) 497 if v.Op == ssa.OpARM64CSELULT { 498 r1 = gc.SSARegNum(v.Args[1]) 499 } 500 p := gc.Prog(v.Op.Asm()) 501 p.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg 502 p.From.Reg = arm64.COND_LO 503 p.Reg = gc.SSARegNum(v.Args[0]) 504 p.From3 = &obj.Addr{Type: obj.TYPE_REG, Reg: r1} 505 p.To.Type = obj.TYPE_REG 506 p.To.Reg = gc.SSARegNum(v) 507 case ssa.OpARM64DUFFZERO: 508 // runtime.duffzero expects start address - 8 in R16 509 p := gc.Prog(arm64.ASUB) 510 p.From.Type = obj.TYPE_CONST 511 p.From.Offset = 8 512 p.Reg = gc.SSARegNum(v.Args[0]) 513 p.To.Type = obj.TYPE_REG 514 p.To.Reg = arm64.REG_R16 515 p = gc.Prog(obj.ADUFFZERO) 516 p.To.Type = obj.TYPE_MEM 517 p.To.Name = obj.NAME_EXTERN 518 p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg)) 519 p.To.Offset = v.AuxInt 520 case ssa.OpARM64LoweredZero: 521 // MOVD.P ZR, 8(R16) 522 // CMP Rarg1, R16 523 // BLE -2(PC) 524 // arg1 is the address of the last element to zero 525 p := gc.Prog(arm64.AMOVD) 526 p.Scond = arm64.C_XPOST 527 p.From.Type = obj.TYPE_REG 528 p.From.Reg = arm64.REGZERO 529 p.To.Type = obj.TYPE_MEM 530 p.To.Reg = arm64.REG_R16 531 p.To.Offset = 8 532 p2 := gc.Prog(arm64.ACMP) 533 p2.From.Type = obj.TYPE_REG 534 p2.From.Reg = gc.SSARegNum(v.Args[1]) 535 p2.Reg = arm64.REG_R16 536 p3 := gc.Prog(arm64.ABLE) 537 p3.To.Type = obj.TYPE_BRANCH 538 gc.Patch(p3, p) 539 case ssa.OpARM64LoweredMove: 540 // MOVD.P 8(R16), Rtmp 541 // MOVD.P Rtmp, 8(R17) 542 // CMP Rarg2, R16 543 // BLE -3(PC) 544 // arg2 is the address of the last element of src 545 p := gc.Prog(arm64.AMOVD) 546 p.Scond = arm64.C_XPOST 547 p.From.Type = obj.TYPE_MEM 548 p.From.Reg = arm64.REG_R16 549 p.From.Offset = 8 550 p.To.Type = obj.TYPE_REG 551 p.To.Reg = arm64.REGTMP 552 p2 := gc.Prog(arm64.AMOVD) 553 p2.Scond = arm64.C_XPOST 554 p2.From.Type = obj.TYPE_REG 555 p2.From.Reg = arm64.REGTMP 556 p2.To.Type = obj.TYPE_MEM 557 p2.To.Reg = arm64.REG_R17 558 p2.To.Offset = 8 559 p3 := gc.Prog(arm64.ACMP) 560 p3.From.Type = obj.TYPE_REG 561 p3.From.Reg = gc.SSARegNum(v.Args[2]) 562 p3.Reg = arm64.REG_R16 563 p4 := gc.Prog(arm64.ABLE) 564 p4.To.Type = obj.TYPE_BRANCH 565 gc.Patch(p4, p) 566 case ssa.OpARM64CALLstatic: 567 if v.Aux.(*gc.Sym) == gc.Deferreturn.Sym { 568 // Deferred calls will appear to be returning to 569 // the CALL deferreturn(SB) that we are about to emit. 570 // However, the stack trace code will show the line 571 // of the instruction byte before the return PC. 572 // To avoid that being an unrelated instruction, 573 // insert an actual hardware NOP that will have the right line number. 574 // This is different from obj.ANOP, which is a virtual no-op 575 // that doesn't make it into the instruction stream. 576 ginsnop() 577 } 578 p := gc.Prog(obj.ACALL) 579 p.To.Type = obj.TYPE_MEM 580 p.To.Name = obj.NAME_EXTERN 581 p.To.Sym = gc.Linksym(v.Aux.(*gc.Sym)) 582 if gc.Maxarg < v.AuxInt { 583 gc.Maxarg = v.AuxInt 584 } 585 case ssa.OpARM64CALLclosure: 586 p := gc.Prog(obj.ACALL) 587 p.To.Type = obj.TYPE_MEM 588 p.To.Offset = 0 589 p.To.Reg = gc.SSARegNum(v.Args[0]) 590 if gc.Maxarg < v.AuxInt { 591 gc.Maxarg = v.AuxInt 592 } 593 case ssa.OpARM64CALLdefer: 594 p := gc.Prog(obj.ACALL) 595 p.To.Type = obj.TYPE_MEM 596 p.To.Name = obj.NAME_EXTERN 597 p.To.Sym = gc.Linksym(gc.Deferproc.Sym) 598 if gc.Maxarg < v.AuxInt { 599 gc.Maxarg = v.AuxInt 600 } 601 case ssa.OpARM64CALLgo: 602 p := gc.Prog(obj.ACALL) 603 p.To.Type = obj.TYPE_MEM 604 p.To.Name = obj.NAME_EXTERN 605 p.To.Sym = gc.Linksym(gc.Newproc.Sym) 606 if gc.Maxarg < v.AuxInt { 607 gc.Maxarg = v.AuxInt 608 } 609 case ssa.OpARM64CALLinter: 610 p := gc.Prog(obj.ACALL) 611 p.To.Type = obj.TYPE_MEM 612 p.To.Offset = 0 613 p.To.Reg = gc.SSARegNum(v.Args[0]) 614 if gc.Maxarg < v.AuxInt { 615 gc.Maxarg = v.AuxInt 616 } 617 case ssa.OpARM64LoweredNilCheck: 618 // Optimization - if the subsequent block has a load or store 619 // at the same address, we don't need to issue this instruction. 620 mem := v.Args[1] 621 for _, w := range v.Block.Succs[0].Block().Values { 622 if w.Op == ssa.OpPhi { 623 if w.Type.IsMemory() { 624 mem = w 625 } 626 continue 627 } 628 if len(w.Args) == 0 || !w.Args[len(w.Args)-1].Type.IsMemory() { 629 // w doesn't use a store - can't be a memory op. 630 continue 631 } 632 if w.Args[len(w.Args)-1] != mem { 633 v.Fatalf("wrong store after nilcheck v=%s w=%s", v, w) 634 } 635 switch w.Op { 636 case ssa.OpARM64MOVBload, ssa.OpARM64MOVBUload, ssa.OpARM64MOVHload, ssa.OpARM64MOVHUload, 637 ssa.OpARM64MOVWload, ssa.OpARM64MOVWUload, ssa.OpARM64MOVDload, 638 ssa.OpARM64FMOVSload, ssa.OpARM64FMOVDload, 639 ssa.OpARM64MOVBstore, ssa.OpARM64MOVHstore, ssa.OpARM64MOVWstore, ssa.OpARM64MOVDstore, 640 ssa.OpARM64FMOVSstore, ssa.OpARM64FMOVDstore: 641 // arg0 is ptr, auxint is offset 642 if w.Args[0] == v.Args[0] && w.Aux == nil && w.AuxInt >= 0 && w.AuxInt < minZeroPage { 643 if gc.Debug_checknil != 0 && int(v.Line) > 1 { 644 gc.Warnl(v.Line, "removed nil check") 645 } 646 return 647 } 648 case ssa.OpARM64DUFFZERO, ssa.OpARM64LoweredZero: 649 // arg0 is ptr 650 if w.Args[0] == v.Args[0] { 651 if gc.Debug_checknil != 0 && int(v.Line) > 1 { 652 gc.Warnl(v.Line, "removed nil check") 653 } 654 return 655 } 656 case ssa.OpARM64LoweredMove: 657 // arg0 is dst ptr, arg1 is src ptr 658 if w.Args[0] == v.Args[0] || w.Args[1] == v.Args[0] { 659 if gc.Debug_checknil != 0 && int(v.Line) > 1 { 660 gc.Warnl(v.Line, "removed nil check") 661 } 662 return 663 } 664 default: 665 } 666 if w.Type.IsMemory() { 667 if w.Op == ssa.OpVarDef || w.Op == ssa.OpVarKill || w.Op == ssa.OpVarLive { 668 // these ops are OK 669 mem = w 670 continue 671 } 672 // We can't delay the nil check past the next store. 673 break 674 } 675 } 676 // Issue a load which will fault if arg is nil. 677 p := gc.Prog(arm64.AMOVB) 678 p.From.Type = obj.TYPE_MEM 679 p.From.Reg = gc.SSARegNum(v.Args[0]) 680 gc.AddAux(&p.From, v) 681 p.To.Type = obj.TYPE_REG 682 p.To.Reg = arm64.REGTMP 683 if gc.Debug_checknil != 0 && v.Line > 1 { // v.Line==1 in generated wrappers 684 gc.Warnl(v.Line, "generated nil check") 685 } 686 case ssa.OpVarDef: 687 gc.Gvardef(v.Aux.(*gc.Node)) 688 case ssa.OpVarKill: 689 gc.Gvarkill(v.Aux.(*gc.Node)) 690 case ssa.OpVarLive: 691 gc.Gvarlive(v.Aux.(*gc.Node)) 692 case ssa.OpKeepAlive: 693 if !v.Args[0].Type.IsPtrShaped() { 694 v.Fatalf("keeping non-pointer alive %v", v.Args[0]) 695 } 696 n, off := gc.AutoVar(v.Args[0]) 697 if n == nil { 698 v.Fatalf("KeepLive with non-spilled value %s %s", v, v.Args[0]) 699 } 700 if off != 0 { 701 v.Fatalf("KeepLive with non-zero offset spill location %s:%d", n, off) 702 } 703 gc.Gvarlive(n) 704 case ssa.OpARM64Equal, 705 ssa.OpARM64NotEqual, 706 ssa.OpARM64LessThan, 707 ssa.OpARM64LessEqual, 708 ssa.OpARM64GreaterThan, 709 ssa.OpARM64GreaterEqual, 710 ssa.OpARM64LessThanU, 711 ssa.OpARM64LessEqualU, 712 ssa.OpARM64GreaterThanU, 713 ssa.OpARM64GreaterEqualU: 714 // generate boolean values using CSET 715 p := gc.Prog(arm64.ACSET) 716 p.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg 717 p.From.Reg = condBits[v.Op] 718 p.To.Type = obj.TYPE_REG 719 p.To.Reg = gc.SSARegNum(v) 720 case ssa.OpSelect0, ssa.OpSelect1: 721 // nothing to do 722 case ssa.OpARM64LoweredGetClosurePtr: 723 // Closure pointer is R26 (arm64.REGCTXT). 724 gc.CheckLoweredGetClosurePtr(v) 725 case ssa.OpARM64FlagEQ, 726 ssa.OpARM64FlagLT_ULT, 727 ssa.OpARM64FlagLT_UGT, 728 ssa.OpARM64FlagGT_ULT, 729 ssa.OpARM64FlagGT_UGT: 730 v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString()) 731 case ssa.OpARM64InvertFlags: 732 v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString()) 733 default: 734 v.Unimplementedf("genValue not implemented: %s", v.LongString()) 735 } 736 } 737 738 var condBits = map[ssa.Op]int16{ 739 ssa.OpARM64Equal: arm64.COND_EQ, 740 ssa.OpARM64NotEqual: arm64.COND_NE, 741 ssa.OpARM64LessThan: arm64.COND_LT, 742 ssa.OpARM64LessThanU: arm64.COND_LO, 743 ssa.OpARM64LessEqual: arm64.COND_LE, 744 ssa.OpARM64LessEqualU: arm64.COND_LS, 745 ssa.OpARM64GreaterThan: arm64.COND_GT, 746 ssa.OpARM64GreaterThanU: arm64.COND_HI, 747 ssa.OpARM64GreaterEqual: arm64.COND_GE, 748 ssa.OpARM64GreaterEqualU: arm64.COND_HS, 749 } 750 751 var blockJump = map[ssa.BlockKind]struct { 752 asm, invasm obj.As 753 }{ 754 ssa.BlockARM64EQ: {arm64.ABEQ, arm64.ABNE}, 755 ssa.BlockARM64NE: {arm64.ABNE, arm64.ABEQ}, 756 ssa.BlockARM64LT: {arm64.ABLT, arm64.ABGE}, 757 ssa.BlockARM64GE: {arm64.ABGE, arm64.ABLT}, 758 ssa.BlockARM64LE: {arm64.ABLE, arm64.ABGT}, 759 ssa.BlockARM64GT: {arm64.ABGT, arm64.ABLE}, 760 ssa.BlockARM64ULT: {arm64.ABLO, arm64.ABHS}, 761 ssa.BlockARM64UGE: {arm64.ABHS, arm64.ABLO}, 762 ssa.BlockARM64UGT: {arm64.ABHI, arm64.ABLS}, 763 ssa.BlockARM64ULE: {arm64.ABLS, arm64.ABHI}, 764 } 765 766 func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { 767 s.SetLineno(b.Line) 768 769 switch b.Kind { 770 case ssa.BlockPlain, ssa.BlockCall, ssa.BlockCheck: 771 if b.Succs[0].Block() != next { 772 p := gc.Prog(obj.AJMP) 773 p.To.Type = obj.TYPE_BRANCH 774 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) 775 } 776 777 case ssa.BlockDefer: 778 // defer returns in R0: 779 // 0 if we should continue executing 780 // 1 if we should jump to deferreturn call 781 p := gc.Prog(arm64.ACMP) 782 p.From.Type = obj.TYPE_CONST 783 p.From.Offset = 0 784 p.Reg = arm64.REG_R0 785 p = gc.Prog(arm64.ABNE) 786 p.To.Type = obj.TYPE_BRANCH 787 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()}) 788 if b.Succs[0].Block() != next { 789 p := gc.Prog(obj.AJMP) 790 p.To.Type = obj.TYPE_BRANCH 791 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) 792 } 793 794 case ssa.BlockExit: 795 gc.Prog(obj.AUNDEF) // tell plive.go that we never reach here 796 797 case ssa.BlockRet: 798 gc.Prog(obj.ARET) 799 800 case ssa.BlockRetJmp: 801 p := gc.Prog(obj.ARET) 802 p.To.Type = obj.TYPE_MEM 803 p.To.Name = obj.NAME_EXTERN 804 p.To.Sym = gc.Linksym(b.Aux.(*gc.Sym)) 805 806 case ssa.BlockARM64EQ, ssa.BlockARM64NE, 807 ssa.BlockARM64LT, ssa.BlockARM64GE, 808 ssa.BlockARM64LE, ssa.BlockARM64GT, 809 ssa.BlockARM64ULT, ssa.BlockARM64UGT, 810 ssa.BlockARM64ULE, ssa.BlockARM64UGE: 811 jmp := blockJump[b.Kind] 812 var p *obj.Prog 813 switch next { 814 case b.Succs[0].Block(): 815 p = gc.Prog(jmp.invasm) 816 p.To.Type = obj.TYPE_BRANCH 817 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()}) 818 case b.Succs[1].Block(): 819 p = gc.Prog(jmp.asm) 820 p.To.Type = obj.TYPE_BRANCH 821 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) 822 default: 823 p = gc.Prog(jmp.asm) 824 p.To.Type = obj.TYPE_BRANCH 825 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) 826 q := gc.Prog(obj.AJMP) 827 q.To.Type = obj.TYPE_BRANCH 828 s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()}) 829 } 830 831 default: 832 b.Unimplementedf("branch not implemented: %s. Control: %s", b.LongString(), b.Control.LongString()) 833 } 834 }