github.com/bir3/gocompiler@v0.3.205/src/cmd/compile/internal/riscv64/ssa.go (about) 1 // Copyright 2016 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package riscv64 6 7 import ( 8 "github.com/bir3/gocompiler/src/cmd/compile/internal/base" 9 "github.com/bir3/gocompiler/src/cmd/compile/internal/ir" 10 "github.com/bir3/gocompiler/src/cmd/compile/internal/objw" 11 "github.com/bir3/gocompiler/src/cmd/compile/internal/ssa" 12 "github.com/bir3/gocompiler/src/cmd/compile/internal/ssagen" 13 "github.com/bir3/gocompiler/src/cmd/compile/internal/types" 14 "github.com/bir3/gocompiler/src/cmd/internal/obj" 15 "github.com/bir3/gocompiler/src/cmd/internal/obj/riscv" 16 ) 17 18 // ssaRegToReg maps ssa register numbers to obj register numbers. 19 var ssaRegToReg = []int16{ 20 riscv.REG_X0, 21 // X1 (LR): unused 22 riscv.REG_X2, 23 riscv.REG_X3, 24 riscv.REG_X4, 25 riscv.REG_X5, 26 riscv.REG_X6, 27 riscv.REG_X7, 28 riscv.REG_X8, 29 riscv.REG_X9, 30 riscv.REG_X10, 31 riscv.REG_X11, 32 riscv.REG_X12, 33 riscv.REG_X13, 34 riscv.REG_X14, 35 riscv.REG_X15, 36 riscv.REG_X16, 37 riscv.REG_X17, 38 riscv.REG_X18, 39 riscv.REG_X19, 40 riscv.REG_X20, 41 riscv.REG_X21, 42 riscv.REG_X22, 43 riscv.REG_X23, 44 riscv.REG_X24, 45 riscv.REG_X25, 46 riscv.REG_X26, 47 riscv.REG_X27, 48 riscv.REG_X28, 49 riscv.REG_X29, 50 riscv.REG_X30, 51 riscv.REG_X31, 52 riscv.REG_F0, 53 riscv.REG_F1, 54 riscv.REG_F2, 55 riscv.REG_F3, 56 riscv.REG_F4, 57 riscv.REG_F5, 58 riscv.REG_F6, 59 riscv.REG_F7, 60 riscv.REG_F8, 61 riscv.REG_F9, 62 riscv.REG_F10, 63 riscv.REG_F11, 64 riscv.REG_F12, 65 riscv.REG_F13, 66 riscv.REG_F14, 67 riscv.REG_F15, 68 riscv.REG_F16, 69 riscv.REG_F17, 70 riscv.REG_F18, 71 riscv.REG_F19, 72 riscv.REG_F20, 73 riscv.REG_F21, 74 riscv.REG_F22, 75 riscv.REG_F23, 76 riscv.REG_F24, 77 riscv.REG_F25, 78 riscv.REG_F26, 79 riscv.REG_F27, 80 riscv.REG_F28, 81 riscv.REG_F29, 82 riscv.REG_F30, 83 riscv.REG_F31, 84 0, // SB isn't a real register. We fill an Addr.Reg field with 0 in this case. 85 } 86 87 func loadByType(t *types.Type) obj.As { 88 width := t.Size() 89 90 if t.IsFloat() { 91 switch width { 92 case 4: 93 return riscv.AMOVF 94 case 8: 95 return riscv.AMOVD 96 default: 97 base.Fatalf("unknown float width for load %d in type %v", width, t) 98 return 0 99 } 100 } 101 102 switch width { 103 case 1: 104 if t.IsSigned() { 105 return riscv.AMOVB 106 } else { 107 return riscv.AMOVBU 108 } 109 case 2: 110 if t.IsSigned() { 111 return riscv.AMOVH 112 } else { 113 return riscv.AMOVHU 114 } 115 case 4: 116 if t.IsSigned() { 117 return riscv.AMOVW 118 } else { 119 return riscv.AMOVWU 120 } 121 case 8: 122 return riscv.AMOV 123 default: 124 base.Fatalf("unknown width for load %d in type %v", width, t) 125 return 0 126 } 127 } 128 129 // storeByType returns the store instruction of the given type. 130 func storeByType(t *types.Type) obj.As { 131 width := t.Size() 132 133 if t.IsFloat() { 134 switch width { 135 case 4: 136 return riscv.AMOVF 137 case 8: 138 return riscv.AMOVD 139 default: 140 base.Fatalf("unknown float width for store %d in type %v", width, t) 141 return 0 142 } 143 } 144 145 switch width { 146 case 1: 147 return riscv.AMOVB 148 case 2: 149 return riscv.AMOVH 150 case 4: 151 return riscv.AMOVW 152 case 8: 153 return riscv.AMOV 154 default: 155 base.Fatalf("unknown width for store %d in type %v", width, t) 156 return 0 157 } 158 } 159 160 // largestMove returns the largest move instruction possible and its size, 161 // given the alignment of the total size of the move. 162 // 163 // e.g., a 16-byte move may use MOV, but an 11-byte move must use MOVB. 164 // 165 // Note that the moves may not be on naturally aligned addresses depending on 166 // the source and destination. 167 // 168 // This matches the calculation in ssa.moveSize. 169 func largestMove(alignment int64) (obj.As, int64) { 170 switch { 171 case alignment%8 == 0: 172 return riscv.AMOV, 8 173 case alignment%4 == 0: 174 return riscv.AMOVW, 4 175 case alignment%2 == 0: 176 return riscv.AMOVH, 2 177 default: 178 return riscv.AMOVB, 1 179 } 180 } 181 182 // ssaMarkMoves marks any MOVXconst ops that need to avoid clobbering flags. 183 // RISC-V has no flags, so this is a no-op. 184 func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {} 185 186 func ssaGenValue(s *ssagen.State, v *ssa.Value) { 187 s.SetPos(v.Pos) 188 189 switch v.Op { 190 case ssa.OpInitMem: 191 // memory arg needs no code 192 case ssa.OpArg: 193 // input args need no code 194 case ssa.OpPhi: 195 ssagen.CheckLoweredPhi(v) 196 case ssa.OpCopy, ssa.OpRISCV64MOVconvert, ssa.OpRISCV64MOVDreg: 197 if v.Type.IsMemory() { 198 return 199 } 200 rs := v.Args[0].Reg() 201 rd := v.Reg() 202 if rs == rd { 203 return 204 } 205 as := riscv.AMOV 206 if v.Type.IsFloat() { 207 as = riscv.AMOVD 208 } 209 p := s.Prog(as) 210 p.From.Type = obj.TYPE_REG 211 p.From.Reg = rs 212 p.To.Type = obj.TYPE_REG 213 p.To.Reg = rd 214 case ssa.OpRISCV64MOVDnop: 215 // nothing to do 216 case ssa.OpLoadReg: 217 if v.Type.IsFlags() { 218 v.Fatalf("load flags not implemented: %v", v.LongString()) 219 return 220 } 221 p := s.Prog(loadByType(v.Type)) 222 ssagen.AddrAuto(&p.From, v.Args[0]) 223 p.To.Type = obj.TYPE_REG 224 p.To.Reg = v.Reg() 225 case ssa.OpStoreReg: 226 if v.Type.IsFlags() { 227 v.Fatalf("store flags not implemented: %v", v.LongString()) 228 return 229 } 230 p := s.Prog(storeByType(v.Type)) 231 p.From.Type = obj.TYPE_REG 232 p.From.Reg = v.Args[0].Reg() 233 ssagen.AddrAuto(&p.To, v) 234 case ssa.OpArgIntReg, ssa.OpArgFloatReg: 235 // The assembler needs to wrap the entry safepoint/stack growth code with spill/unspill 236 // The loop only runs once. 237 for _, a := range v.Block.Func.RegArgs { 238 // Pass the spill/unspill information along to the assembler, offset by size of 239 // the saved LR slot. 240 addr := ssagen.SpillSlotAddr(a, riscv.REG_SP, base.Ctxt.Arch.FixedFrameSize) 241 s.FuncInfo().AddSpill( 242 obj.RegSpill{Reg: a.Reg, Addr: addr, Unspill: loadByType(a.Type), Spill: storeByType(a.Type)}) 243 } 244 v.Block.Func.RegArgs = nil 245 246 ssagen.CheckArgReg(v) 247 case ssa.OpSP, ssa.OpSB, ssa.OpGetG: 248 // nothing to do 249 case ssa.OpRISCV64MOVBreg, ssa.OpRISCV64MOVHreg, ssa.OpRISCV64MOVWreg, 250 ssa.OpRISCV64MOVBUreg, ssa.OpRISCV64MOVHUreg, ssa.OpRISCV64MOVWUreg: 251 a := v.Args[0] 252 for a.Op == ssa.OpCopy || a.Op == ssa.OpRISCV64MOVDreg { 253 a = a.Args[0] 254 } 255 as := v.Op.Asm() 256 rs := v.Args[0].Reg() 257 rd := v.Reg() 258 if a.Op == ssa.OpLoadReg { 259 t := a.Type 260 switch { 261 case v.Op == ssa.OpRISCV64MOVBreg && t.Size() == 1 && t.IsSigned(), 262 v.Op == ssa.OpRISCV64MOVHreg && t.Size() == 2 && t.IsSigned(), 263 v.Op == ssa.OpRISCV64MOVWreg && t.Size() == 4 && t.IsSigned(), 264 v.Op == ssa.OpRISCV64MOVBUreg && t.Size() == 1 && !t.IsSigned(), 265 v.Op == ssa.OpRISCV64MOVHUreg && t.Size() == 2 && !t.IsSigned(), 266 v.Op == ssa.OpRISCV64MOVWUreg && t.Size() == 4 && !t.IsSigned(): 267 // arg is a proper-typed load and already sign/zero-extended 268 if rs == rd { 269 return 270 } 271 as = riscv.AMOV 272 default: 273 } 274 } 275 p := s.Prog(as) 276 p.From.Type = obj.TYPE_REG 277 p.From.Reg = rs 278 p.To.Type = obj.TYPE_REG 279 p.To.Reg = rd 280 case ssa.OpRISCV64ADD, ssa.OpRISCV64SUB, ssa.OpRISCV64SUBW, ssa.OpRISCV64XOR, ssa.OpRISCV64OR, ssa.OpRISCV64AND, 281 ssa.OpRISCV64SLL, ssa.OpRISCV64SRA, ssa.OpRISCV64SRL, 282 ssa.OpRISCV64SLT, ssa.OpRISCV64SLTU, ssa.OpRISCV64MUL, ssa.OpRISCV64MULW, ssa.OpRISCV64MULH, 283 ssa.OpRISCV64MULHU, ssa.OpRISCV64DIV, ssa.OpRISCV64DIVU, ssa.OpRISCV64DIVW, 284 ssa.OpRISCV64DIVUW, ssa.OpRISCV64REM, ssa.OpRISCV64REMU, ssa.OpRISCV64REMW, 285 ssa.OpRISCV64REMUW, 286 ssa.OpRISCV64FADDS, ssa.OpRISCV64FSUBS, ssa.OpRISCV64FMULS, ssa.OpRISCV64FDIVS, 287 ssa.OpRISCV64FEQS, ssa.OpRISCV64FNES, ssa.OpRISCV64FLTS, ssa.OpRISCV64FLES, 288 ssa.OpRISCV64FADDD, ssa.OpRISCV64FSUBD, ssa.OpRISCV64FMULD, ssa.OpRISCV64FDIVD, 289 ssa.OpRISCV64FEQD, ssa.OpRISCV64FNED, ssa.OpRISCV64FLTD, ssa.OpRISCV64FLED, 290 ssa.OpRISCV64FSGNJD: 291 r := v.Reg() 292 r1 := v.Args[0].Reg() 293 r2 := v.Args[1].Reg() 294 p := s.Prog(v.Op.Asm()) 295 p.From.Type = obj.TYPE_REG 296 p.From.Reg = r2 297 p.Reg = r1 298 p.To.Type = obj.TYPE_REG 299 p.To.Reg = r 300 case ssa.OpRISCV64LoweredMuluhilo: 301 r0 := v.Args[0].Reg() 302 r1 := v.Args[1].Reg() 303 p := s.Prog(riscv.AMULHU) 304 p.From.Type = obj.TYPE_REG 305 p.From.Reg = r1 306 p.Reg = r0 307 p.To.Type = obj.TYPE_REG 308 p.To.Reg = v.Reg0() 309 p1 := s.Prog(riscv.AMUL) 310 p1.From.Type = obj.TYPE_REG 311 p1.From.Reg = r1 312 p1.Reg = r0 313 p1.To.Type = obj.TYPE_REG 314 p1.To.Reg = v.Reg1() 315 case ssa.OpRISCV64LoweredMuluover: 316 r0 := v.Args[0].Reg() 317 r1 := v.Args[1].Reg() 318 p := s.Prog(riscv.AMULHU) 319 p.From.Type = obj.TYPE_REG 320 p.From.Reg = r1 321 p.Reg = r0 322 p.To.Type = obj.TYPE_REG 323 p.To.Reg = v.Reg1() 324 p1 := s.Prog(riscv.AMUL) 325 p1.From.Type = obj.TYPE_REG 326 p1.From.Reg = r1 327 p1.Reg = r0 328 p1.To.Type = obj.TYPE_REG 329 p1.To.Reg = v.Reg0() 330 p2 := s.Prog(riscv.ASNEZ) 331 p2.From.Type = obj.TYPE_REG 332 p2.From.Reg = v.Reg1() 333 p2.To.Type = obj.TYPE_REG 334 p2.To.Reg = v.Reg1() 335 case ssa.OpRISCV64FMADDD, ssa.OpRISCV64FMSUBD, ssa.OpRISCV64FNMADDD, ssa.OpRISCV64FNMSUBD: 336 r := v.Reg() 337 r1 := v.Args[0].Reg() 338 r2 := v.Args[1].Reg() 339 r3 := v.Args[2].Reg() 340 p := s.Prog(v.Op.Asm()) 341 p.From.Type = obj.TYPE_REG 342 p.From.Reg = r2 343 p.Reg = r1 344 p.SetRestArgs([]obj.Addr{{Type: obj.TYPE_REG, Reg: r3}}) 345 p.To.Type = obj.TYPE_REG 346 p.To.Reg = r 347 case ssa.OpRISCV64FSQRTS, ssa.OpRISCV64FNEGS, ssa.OpRISCV64FABSD, ssa.OpRISCV64FSQRTD, ssa.OpRISCV64FNEGD, 348 ssa.OpRISCV64FMVSX, ssa.OpRISCV64FMVDX, 349 ssa.OpRISCV64FCVTSW, ssa.OpRISCV64FCVTSL, ssa.OpRISCV64FCVTWS, ssa.OpRISCV64FCVTLS, 350 ssa.OpRISCV64FCVTDW, ssa.OpRISCV64FCVTDL, ssa.OpRISCV64FCVTWD, ssa.OpRISCV64FCVTLD, ssa.OpRISCV64FCVTDS, ssa.OpRISCV64FCVTSD, 351 ssa.OpRISCV64NOT, ssa.OpRISCV64NEG, ssa.OpRISCV64NEGW: 352 p := s.Prog(v.Op.Asm()) 353 p.From.Type = obj.TYPE_REG 354 p.From.Reg = v.Args[0].Reg() 355 p.To.Type = obj.TYPE_REG 356 p.To.Reg = v.Reg() 357 case ssa.OpRISCV64ADDI, ssa.OpRISCV64ADDIW, ssa.OpRISCV64XORI, ssa.OpRISCV64ORI, ssa.OpRISCV64ANDI, 358 ssa.OpRISCV64SLLI, ssa.OpRISCV64SRAI, ssa.OpRISCV64SRLI, ssa.OpRISCV64SLTI, 359 ssa.OpRISCV64SLTIU: 360 p := s.Prog(v.Op.Asm()) 361 p.From.Type = obj.TYPE_CONST 362 p.From.Offset = v.AuxInt 363 p.Reg = v.Args[0].Reg() 364 p.To.Type = obj.TYPE_REG 365 p.To.Reg = v.Reg() 366 case ssa.OpRISCV64MOVDconst: 367 p := s.Prog(v.Op.Asm()) 368 p.From.Type = obj.TYPE_CONST 369 p.From.Offset = v.AuxInt 370 p.To.Type = obj.TYPE_REG 371 p.To.Reg = v.Reg() 372 case ssa.OpRISCV64MOVaddr: 373 p := s.Prog(v.Op.Asm()) 374 p.From.Type = obj.TYPE_ADDR 375 p.To.Type = obj.TYPE_REG 376 p.To.Reg = v.Reg() 377 378 var wantreg string 379 // MOVW $sym+off(base), R 380 switch v.Aux.(type) { 381 default: 382 v.Fatalf("aux is of unknown type %T", v.Aux) 383 case *obj.LSym: 384 wantreg = "SB" 385 ssagen.AddAux(&p.From, v) 386 case *ir.Name: 387 wantreg = "SP" 388 ssagen.AddAux(&p.From, v) 389 case nil: 390 // No sym, just MOVW $off(SP), R 391 wantreg = "SP" 392 p.From.Reg = riscv.REG_SP 393 p.From.Offset = v.AuxInt 394 } 395 if reg := v.Args[0].RegName(); reg != wantreg { 396 v.Fatalf("bad reg %s for symbol type %T, want %s", reg, v.Aux, wantreg) 397 } 398 case ssa.OpRISCV64MOVBload, ssa.OpRISCV64MOVHload, ssa.OpRISCV64MOVWload, ssa.OpRISCV64MOVDload, 399 ssa.OpRISCV64MOVBUload, ssa.OpRISCV64MOVHUload, ssa.OpRISCV64MOVWUload, 400 ssa.OpRISCV64FMOVWload, ssa.OpRISCV64FMOVDload: 401 p := s.Prog(v.Op.Asm()) 402 p.From.Type = obj.TYPE_MEM 403 p.From.Reg = v.Args[0].Reg() 404 ssagen.AddAux(&p.From, v) 405 p.To.Type = obj.TYPE_REG 406 p.To.Reg = v.Reg() 407 case ssa.OpRISCV64MOVBstore, ssa.OpRISCV64MOVHstore, ssa.OpRISCV64MOVWstore, ssa.OpRISCV64MOVDstore, 408 ssa.OpRISCV64FMOVWstore, ssa.OpRISCV64FMOVDstore: 409 p := s.Prog(v.Op.Asm()) 410 p.From.Type = obj.TYPE_REG 411 p.From.Reg = v.Args[1].Reg() 412 p.To.Type = obj.TYPE_MEM 413 p.To.Reg = v.Args[0].Reg() 414 ssagen.AddAux(&p.To, v) 415 case ssa.OpRISCV64MOVBstorezero, ssa.OpRISCV64MOVHstorezero, ssa.OpRISCV64MOVWstorezero, ssa.OpRISCV64MOVDstorezero: 416 p := s.Prog(v.Op.Asm()) 417 p.From.Type = obj.TYPE_REG 418 p.From.Reg = riscv.REG_ZERO 419 p.To.Type = obj.TYPE_MEM 420 p.To.Reg = v.Args[0].Reg() 421 ssagen.AddAux(&p.To, v) 422 case ssa.OpRISCV64SEQZ, ssa.OpRISCV64SNEZ: 423 p := s.Prog(v.Op.Asm()) 424 p.From.Type = obj.TYPE_REG 425 p.From.Reg = v.Args[0].Reg() 426 p.To.Type = obj.TYPE_REG 427 p.To.Reg = v.Reg() 428 case ssa.OpRISCV64CALLstatic, ssa.OpRISCV64CALLclosure, ssa.OpRISCV64CALLinter: 429 s.Call(v) 430 case ssa.OpRISCV64CALLtail: 431 s.TailCall(v) 432 case ssa.OpRISCV64LoweredWB: 433 p := s.Prog(obj.ACALL) 434 p.To.Type = obj.TYPE_MEM 435 p.To.Name = obj.NAME_EXTERN 436 p.To.Sym = v.Aux.(*obj.LSym) 437 case ssa.OpRISCV64LoweredPanicBoundsA, ssa.OpRISCV64LoweredPanicBoundsB, ssa.OpRISCV64LoweredPanicBoundsC: 438 p := s.Prog(obj.ACALL) 439 p.To.Type = obj.TYPE_MEM 440 p.To.Name = obj.NAME_EXTERN 441 p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt] 442 s.UseArgs(16) // space used in callee args area by assembly stubs 443 444 case ssa.OpRISCV64LoweredAtomicLoad8: 445 s.Prog(riscv.AFENCE) 446 p := s.Prog(riscv.AMOVBU) 447 p.From.Type = obj.TYPE_MEM 448 p.From.Reg = v.Args[0].Reg() 449 p.To.Type = obj.TYPE_REG 450 p.To.Reg = v.Reg0() 451 s.Prog(riscv.AFENCE) 452 453 case ssa.OpRISCV64LoweredAtomicLoad32, ssa.OpRISCV64LoweredAtomicLoad64: 454 as := riscv.ALRW 455 if v.Op == ssa.OpRISCV64LoweredAtomicLoad64 { 456 as = riscv.ALRD 457 } 458 p := s.Prog(as) 459 p.From.Type = obj.TYPE_MEM 460 p.From.Reg = v.Args[0].Reg() 461 p.To.Type = obj.TYPE_REG 462 p.To.Reg = v.Reg0() 463 464 case ssa.OpRISCV64LoweredAtomicStore8: 465 s.Prog(riscv.AFENCE) 466 p := s.Prog(riscv.AMOVB) 467 p.From.Type = obj.TYPE_REG 468 p.From.Reg = v.Args[1].Reg() 469 p.To.Type = obj.TYPE_MEM 470 p.To.Reg = v.Args[0].Reg() 471 s.Prog(riscv.AFENCE) 472 473 case ssa.OpRISCV64LoweredAtomicStore32, ssa.OpRISCV64LoweredAtomicStore64: 474 as := riscv.AAMOSWAPW 475 if v.Op == ssa.OpRISCV64LoweredAtomicStore64 { 476 as = riscv.AAMOSWAPD 477 } 478 p := s.Prog(as) 479 p.From.Type = obj.TYPE_REG 480 p.From.Reg = v.Args[1].Reg() 481 p.To.Type = obj.TYPE_MEM 482 p.To.Reg = v.Args[0].Reg() 483 p.RegTo2 = riscv.REG_ZERO 484 485 case ssa.OpRISCV64LoweredAtomicAdd32, ssa.OpRISCV64LoweredAtomicAdd64: 486 as := riscv.AAMOADDW 487 if v.Op == ssa.OpRISCV64LoweredAtomicAdd64 { 488 as = riscv.AAMOADDD 489 } 490 p := s.Prog(as) 491 p.From.Type = obj.TYPE_REG 492 p.From.Reg = v.Args[1].Reg() 493 p.To.Type = obj.TYPE_MEM 494 p.To.Reg = v.Args[0].Reg() 495 p.RegTo2 = riscv.REG_TMP 496 497 p2 := s.Prog(riscv.AADD) 498 p2.From.Type = obj.TYPE_REG 499 p2.From.Reg = riscv.REG_TMP 500 p2.Reg = v.Args[1].Reg() 501 p2.To.Type = obj.TYPE_REG 502 p2.To.Reg = v.Reg0() 503 504 case ssa.OpRISCV64LoweredAtomicExchange32, ssa.OpRISCV64LoweredAtomicExchange64: 505 as := riscv.AAMOSWAPW 506 if v.Op == ssa.OpRISCV64LoweredAtomicExchange64 { 507 as = riscv.AAMOSWAPD 508 } 509 p := s.Prog(as) 510 p.From.Type = obj.TYPE_REG 511 p.From.Reg = v.Args[1].Reg() 512 p.To.Type = obj.TYPE_MEM 513 p.To.Reg = v.Args[0].Reg() 514 p.RegTo2 = v.Reg0() 515 516 case ssa.OpRISCV64LoweredAtomicCas32, ssa.OpRISCV64LoweredAtomicCas64: 517 // MOV ZERO, Rout 518 // LR (Rarg0), Rtmp 519 // BNE Rtmp, Rarg1, 3(PC) 520 // SC Rarg2, (Rarg0), Rtmp 521 // BNE Rtmp, ZERO, -3(PC) 522 // MOV $1, Rout 523 524 lr := riscv.ALRW 525 sc := riscv.ASCW 526 if v.Op == ssa.OpRISCV64LoweredAtomicCas64 { 527 lr = riscv.ALRD 528 sc = riscv.ASCD 529 } 530 531 r0 := v.Args[0].Reg() 532 r1 := v.Args[1].Reg() 533 r2 := v.Args[2].Reg() 534 out := v.Reg0() 535 536 p := s.Prog(riscv.AMOV) 537 p.From.Type = obj.TYPE_REG 538 p.From.Reg = riscv.REG_ZERO 539 p.To.Type = obj.TYPE_REG 540 p.To.Reg = out 541 542 p1 := s.Prog(lr) 543 p1.From.Type = obj.TYPE_MEM 544 p1.From.Reg = r0 545 p1.To.Type = obj.TYPE_REG 546 p1.To.Reg = riscv.REG_TMP 547 548 p2 := s.Prog(riscv.ABNE) 549 p2.From.Type = obj.TYPE_REG 550 p2.From.Reg = r1 551 p2.Reg = riscv.REG_TMP 552 p2.To.Type = obj.TYPE_BRANCH 553 554 p3 := s.Prog(sc) 555 p3.From.Type = obj.TYPE_REG 556 p3.From.Reg = r2 557 p3.To.Type = obj.TYPE_MEM 558 p3.To.Reg = r0 559 p3.RegTo2 = riscv.REG_TMP 560 561 p4 := s.Prog(riscv.ABNE) 562 p4.From.Type = obj.TYPE_REG 563 p4.From.Reg = riscv.REG_TMP 564 p4.Reg = riscv.REG_ZERO 565 p4.To.Type = obj.TYPE_BRANCH 566 p4.To.SetTarget(p1) 567 568 p5 := s.Prog(riscv.AMOV) 569 p5.From.Type = obj.TYPE_CONST 570 p5.From.Offset = 1 571 p5.To.Type = obj.TYPE_REG 572 p5.To.Reg = out 573 574 p6 := s.Prog(obj.ANOP) 575 p2.To.SetTarget(p6) 576 577 case ssa.OpRISCV64LoweredAtomicAnd32, ssa.OpRISCV64LoweredAtomicOr32: 578 p := s.Prog(v.Op.Asm()) 579 p.From.Type = obj.TYPE_REG 580 p.From.Reg = v.Args[1].Reg() 581 p.To.Type = obj.TYPE_MEM 582 p.To.Reg = v.Args[0].Reg() 583 p.RegTo2 = riscv.REG_ZERO 584 585 case ssa.OpRISCV64LoweredZero: 586 mov, sz := largestMove(v.AuxInt) 587 588 // mov ZERO, (Rarg0) 589 // ADD $sz, Rarg0 590 // BGEU Rarg1, Rarg0, -2(PC) 591 592 p := s.Prog(mov) 593 p.From.Type = obj.TYPE_REG 594 p.From.Reg = riscv.REG_ZERO 595 p.To.Type = obj.TYPE_MEM 596 p.To.Reg = v.Args[0].Reg() 597 598 p2 := s.Prog(riscv.AADD) 599 p2.From.Type = obj.TYPE_CONST 600 p2.From.Offset = sz 601 p2.To.Type = obj.TYPE_REG 602 p2.To.Reg = v.Args[0].Reg() 603 604 p3 := s.Prog(riscv.ABGEU) 605 p3.To.Type = obj.TYPE_BRANCH 606 p3.Reg = v.Args[0].Reg() 607 p3.From.Type = obj.TYPE_REG 608 p3.From.Reg = v.Args[1].Reg() 609 p3.To.SetTarget(p) 610 611 case ssa.OpRISCV64LoweredMove: 612 mov, sz := largestMove(v.AuxInt) 613 614 // mov (Rarg1), T2 615 // mov T2, (Rarg0) 616 // ADD $sz, Rarg0 617 // ADD $sz, Rarg1 618 // BGEU Rarg2, Rarg0, -4(PC) 619 620 p := s.Prog(mov) 621 p.From.Type = obj.TYPE_MEM 622 p.From.Reg = v.Args[1].Reg() 623 p.To.Type = obj.TYPE_REG 624 p.To.Reg = riscv.REG_T2 625 626 p2 := s.Prog(mov) 627 p2.From.Type = obj.TYPE_REG 628 p2.From.Reg = riscv.REG_T2 629 p2.To.Type = obj.TYPE_MEM 630 p2.To.Reg = v.Args[0].Reg() 631 632 p3 := s.Prog(riscv.AADD) 633 p3.From.Type = obj.TYPE_CONST 634 p3.From.Offset = sz 635 p3.To.Type = obj.TYPE_REG 636 p3.To.Reg = v.Args[0].Reg() 637 638 p4 := s.Prog(riscv.AADD) 639 p4.From.Type = obj.TYPE_CONST 640 p4.From.Offset = sz 641 p4.To.Type = obj.TYPE_REG 642 p4.To.Reg = v.Args[1].Reg() 643 644 p5 := s.Prog(riscv.ABGEU) 645 p5.To.Type = obj.TYPE_BRANCH 646 p5.Reg = v.Args[1].Reg() 647 p5.From.Type = obj.TYPE_REG 648 p5.From.Reg = v.Args[2].Reg() 649 p5.To.SetTarget(p) 650 651 case ssa.OpRISCV64LoweredNilCheck: 652 // Issue a load which will fault if arg is nil. 653 // TODO: optimizations. See arm and amd64 LoweredNilCheck. 654 p := s.Prog(riscv.AMOVB) 655 p.From.Type = obj.TYPE_MEM 656 p.From.Reg = v.Args[0].Reg() 657 ssagen.AddAux(&p.From, v) 658 p.To.Type = obj.TYPE_REG 659 p.To.Reg = riscv.REG_ZERO 660 if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos == 1 in generated wrappers 661 base.WarnfAt(v.Pos, "generated nil check") 662 } 663 664 case ssa.OpRISCV64LoweredGetClosurePtr: 665 // Closure pointer is S10 (riscv.REG_CTXT). 666 ssagen.CheckLoweredGetClosurePtr(v) 667 668 case ssa.OpRISCV64LoweredGetCallerSP: 669 // caller's SP is FixedFrameSize below the address of the first arg 670 p := s.Prog(riscv.AMOV) 671 p.From.Type = obj.TYPE_ADDR 672 p.From.Offset = -base.Ctxt.Arch.FixedFrameSize 673 p.From.Name = obj.NAME_PARAM 674 p.To.Type = obj.TYPE_REG 675 p.To.Reg = v.Reg() 676 677 case ssa.OpRISCV64LoweredGetCallerPC: 678 p := s.Prog(obj.AGETCALLERPC) 679 p.To.Type = obj.TYPE_REG 680 p.To.Reg = v.Reg() 681 682 case ssa.OpRISCV64DUFFZERO: 683 p := s.Prog(obj.ADUFFZERO) 684 p.To.Type = obj.TYPE_MEM 685 p.To.Name = obj.NAME_EXTERN 686 p.To.Sym = ir.Syms.Duffzero 687 p.To.Offset = v.AuxInt 688 689 case ssa.OpRISCV64DUFFCOPY: 690 p := s.Prog(obj.ADUFFCOPY) 691 p.To.Type = obj.TYPE_MEM 692 p.To.Name = obj.NAME_EXTERN 693 p.To.Sym = ir.Syms.Duffcopy 694 p.To.Offset = v.AuxInt 695 696 case ssa.OpClobber, ssa.OpClobberReg: 697 // TODO: implement for clobberdead experiment. Nop is ok for now. 698 699 default: 700 v.Fatalf("Unhandled op %v", v.Op) 701 } 702 } 703 704 var blockBranch = [...]obj.As{ 705 ssa.BlockRISCV64BEQ: riscv.ABEQ, 706 ssa.BlockRISCV64BEQZ: riscv.ABEQZ, 707 ssa.BlockRISCV64BGE: riscv.ABGE, 708 ssa.BlockRISCV64BGEU: riscv.ABGEU, 709 ssa.BlockRISCV64BGEZ: riscv.ABGEZ, 710 ssa.BlockRISCV64BGTZ: riscv.ABGTZ, 711 ssa.BlockRISCV64BLEZ: riscv.ABLEZ, 712 ssa.BlockRISCV64BLT: riscv.ABLT, 713 ssa.BlockRISCV64BLTU: riscv.ABLTU, 714 ssa.BlockRISCV64BLTZ: riscv.ABLTZ, 715 ssa.BlockRISCV64BNE: riscv.ABNE, 716 ssa.BlockRISCV64BNEZ: riscv.ABNEZ, 717 } 718 719 func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) { 720 s.SetPos(b.Pos) 721 722 switch b.Kind { 723 case ssa.BlockDefer: 724 // defer returns in A0: 725 // 0 if we should continue executing 726 // 1 if we should jump to deferreturn call 727 p := s.Prog(riscv.ABNE) 728 p.To.Type = obj.TYPE_BRANCH 729 p.From.Type = obj.TYPE_REG 730 p.From.Reg = riscv.REG_ZERO 731 p.Reg = riscv.REG_A0 732 s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()}) 733 if b.Succs[0].Block() != next { 734 p := s.Prog(obj.AJMP) 735 p.To.Type = obj.TYPE_BRANCH 736 s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()}) 737 } 738 case ssa.BlockPlain: 739 if b.Succs[0].Block() != next { 740 p := s.Prog(obj.AJMP) 741 p.To.Type = obj.TYPE_BRANCH 742 s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()}) 743 } 744 case ssa.BlockExit, ssa.BlockRetJmp: 745 case ssa.BlockRet: 746 s.Prog(obj.ARET) 747 case ssa.BlockRISCV64BEQ, ssa.BlockRISCV64BEQZ, ssa.BlockRISCV64BNE, ssa.BlockRISCV64BNEZ, 748 ssa.BlockRISCV64BLT, ssa.BlockRISCV64BLEZ, ssa.BlockRISCV64BGE, ssa.BlockRISCV64BGEZ, 749 ssa.BlockRISCV64BLTZ, ssa.BlockRISCV64BGTZ, ssa.BlockRISCV64BLTU, ssa.BlockRISCV64BGEU: 750 751 as := blockBranch[b.Kind] 752 invAs := riscv.InvertBranch(as) 753 754 var p *obj.Prog 755 switch next { 756 case b.Succs[0].Block(): 757 p = s.Br(invAs, b.Succs[1].Block()) 758 case b.Succs[1].Block(): 759 p = s.Br(as, b.Succs[0].Block()) 760 default: 761 if b.Likely != ssa.BranchUnlikely { 762 p = s.Br(as, b.Succs[0].Block()) 763 s.Br(obj.AJMP, b.Succs[1].Block()) 764 } else { 765 p = s.Br(invAs, b.Succs[1].Block()) 766 s.Br(obj.AJMP, b.Succs[0].Block()) 767 } 768 } 769 770 p.From.Type = obj.TYPE_REG 771 switch b.Kind { 772 case ssa.BlockRISCV64BEQ, ssa.BlockRISCV64BNE, ssa.BlockRISCV64BLT, ssa.BlockRISCV64BGE, ssa.BlockRISCV64BLTU, ssa.BlockRISCV64BGEU: 773 if b.NumControls() != 2 { 774 b.Fatalf("Unexpected number of controls (%d != 2): %s", b.NumControls(), b.LongString()) 775 } 776 p.From.Reg = b.Controls[0].Reg() 777 p.Reg = b.Controls[1].Reg() 778 779 case ssa.BlockRISCV64BEQZ, ssa.BlockRISCV64BNEZ, ssa.BlockRISCV64BGEZ, ssa.BlockRISCV64BLEZ, ssa.BlockRISCV64BLTZ, ssa.BlockRISCV64BGTZ: 780 if b.NumControls() != 1 { 781 b.Fatalf("Unexpected number of controls (%d != 1): %s", b.NumControls(), b.LongString()) 782 } 783 p.From.Reg = b.Controls[0].Reg() 784 } 785 786 default: 787 b.Fatalf("Unhandled block: %s", b.LongString()) 788 } 789 } 790 791 func loadRegResult(s *ssagen.State, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog { 792 p := s.Prog(loadByType(t)) 793 p.From.Type = obj.TYPE_MEM 794 p.From.Name = obj.NAME_AUTO 795 p.From.Sym = n.Linksym() 796 p.From.Offset = n.FrameOffset() + off 797 p.To.Type = obj.TYPE_REG 798 p.To.Reg = reg 799 return p 800 } 801 802 func spillArgReg(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog { 803 p = pp.Append(p, storeByType(t), obj.TYPE_REG, reg, 0, obj.TYPE_MEM, 0, n.FrameOffset()+off) 804 p.To.Name = obj.NAME_PARAM 805 p.To.Sym = n.Linksym() 806 p.Pos = p.Pos.WithNotStmt() 807 return p 808 }