github.com/riscv/riscv-go@v0.0.0-20200123204226-124ebd6fcc8e/src/cmd/compile/internal/s390x/ssa.go (about) 1 // Copyright 2016 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package s390x 6 7 import ( 8 "math" 9 10 "cmd/compile/internal/gc" 11 "cmd/compile/internal/ssa" 12 "cmd/internal/obj" 13 "cmd/internal/obj/s390x" 14 ) 15 16 // markMoves marks any MOVXconst ops that need to avoid clobbering flags. 17 func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) { 18 flive := b.FlagsLiveAtEnd 19 if b.Control != nil && b.Control.Type.IsFlags() { 20 flive = true 21 } 22 for i := len(b.Values) - 1; i >= 0; i-- { 23 v := b.Values[i] 24 if flive && v.Op == ssa.OpS390XMOVDconst { 25 // The "mark" is any non-nil Aux value. 26 v.Aux = v 27 } 28 if v.Type.IsFlags() { 29 flive = false 30 } 31 for _, a := range v.Args { 32 if a.Type.IsFlags() { 33 flive = true 34 } 35 } 36 } 37 } 38 39 // loadByType returns the load instruction of the given type. 40 func loadByType(t ssa.Type) obj.As { 41 if t.IsFloat() { 42 switch t.Size() { 43 case 4: 44 return s390x.AFMOVS 45 case 8: 46 return s390x.AFMOVD 47 } 48 } else { 49 switch t.Size() { 50 case 1: 51 if t.IsSigned() { 52 return s390x.AMOVB 53 } else { 54 return s390x.AMOVBZ 55 } 56 case 2: 57 if t.IsSigned() { 58 return s390x.AMOVH 59 } else { 60 return s390x.AMOVHZ 61 } 62 case 4: 63 if t.IsSigned() { 64 return s390x.AMOVW 65 } else { 66 return s390x.AMOVWZ 67 } 68 case 8: 69 return s390x.AMOVD 70 } 71 } 72 panic("bad load type") 73 } 74 75 // storeByType returns the store instruction of the given type. 76 func storeByType(t ssa.Type) obj.As { 77 width := t.Size() 78 if t.IsFloat() { 79 switch width { 80 case 4: 81 return s390x.AFMOVS 82 case 8: 83 return s390x.AFMOVD 84 } 85 } else { 86 switch width { 87 case 1: 88 return s390x.AMOVB 89 case 2: 90 return s390x.AMOVH 91 case 4: 92 return s390x.AMOVW 93 case 8: 94 return s390x.AMOVD 95 } 96 } 97 panic("bad store type") 98 } 99 100 // moveByType returns the reg->reg move instruction of the given type. 101 func moveByType(t ssa.Type) obj.As { 102 if t.IsFloat() { 103 return s390x.AFMOVD 104 } else { 105 switch t.Size() { 106 case 1: 107 if t.IsSigned() { 108 return s390x.AMOVB 109 } else { 110 return s390x.AMOVBZ 111 } 112 case 2: 113 if t.IsSigned() { 114 return s390x.AMOVH 115 } else { 116 return s390x.AMOVHZ 117 } 118 case 4: 119 if t.IsSigned() { 120 return s390x.AMOVW 121 } else { 122 return s390x.AMOVWZ 123 } 124 case 8: 125 return s390x.AMOVD 126 } 127 } 128 panic("bad load type") 129 } 130 131 // opregreg emits instructions for 132 // dest := dest(To) op src(From) 133 // and also returns the created obj.Prog so it 134 // may be further adjusted (offset, scale, etc). 135 func opregreg(op obj.As, dest, src int16) *obj.Prog { 136 p := gc.Prog(op) 137 p.From.Type = obj.TYPE_REG 138 p.To.Type = obj.TYPE_REG 139 p.To.Reg = dest 140 p.From.Reg = src 141 return p 142 } 143 144 // opregregimm emits instructions for 145 // dest := src(From) op off 146 // and also returns the created obj.Prog so it 147 // may be further adjusted (offset, scale, etc). 148 func opregregimm(op obj.As, dest, src int16, off int64) *obj.Prog { 149 p := gc.Prog(op) 150 p.From.Type = obj.TYPE_CONST 151 p.From.Offset = off 152 p.Reg = src 153 p.To.Reg = dest 154 p.To.Type = obj.TYPE_REG 155 return p 156 } 157 158 func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { 159 s.SetPos(v.Pos) 160 switch v.Op { 161 case ssa.OpS390XSLD, ssa.OpS390XSLW, 162 ssa.OpS390XSRD, ssa.OpS390XSRW, 163 ssa.OpS390XSRAD, ssa.OpS390XSRAW: 164 r := v.Reg() 165 r1 := v.Args[0].Reg() 166 r2 := v.Args[1].Reg() 167 if r2 == s390x.REG_R0 { 168 v.Fatalf("cannot use R0 as shift value %s", v.LongString()) 169 } 170 p := opregreg(v.Op.Asm(), r, r2) 171 if r != r1 { 172 p.Reg = r1 173 } 174 case ssa.OpS390XADD, ssa.OpS390XADDW, 175 ssa.OpS390XSUB, ssa.OpS390XSUBW, 176 ssa.OpS390XAND, ssa.OpS390XANDW, 177 ssa.OpS390XOR, ssa.OpS390XORW, 178 ssa.OpS390XXOR, ssa.OpS390XXORW: 179 r := v.Reg() 180 r1 := v.Args[0].Reg() 181 r2 := v.Args[1].Reg() 182 p := opregreg(v.Op.Asm(), r, r2) 183 if r != r1 { 184 p.Reg = r1 185 } 186 // 2-address opcode arithmetic 187 case ssa.OpS390XMULLD, ssa.OpS390XMULLW, 188 ssa.OpS390XMULHD, ssa.OpS390XMULHDU, 189 ssa.OpS390XFADDS, ssa.OpS390XFADD, ssa.OpS390XFSUBS, ssa.OpS390XFSUB, 190 ssa.OpS390XFMULS, ssa.OpS390XFMUL, ssa.OpS390XFDIVS, ssa.OpS390XFDIV: 191 r := v.Reg() 192 if r != v.Args[0].Reg() { 193 v.Fatalf("input[0] and output not in same register %s", v.LongString()) 194 } 195 opregreg(v.Op.Asm(), r, v.Args[1].Reg()) 196 case ssa.OpS390XDIVD, ssa.OpS390XDIVW, 197 ssa.OpS390XDIVDU, ssa.OpS390XDIVWU, 198 ssa.OpS390XMODD, ssa.OpS390XMODW, 199 ssa.OpS390XMODDU, ssa.OpS390XMODWU: 200 201 // TODO(mundaym): use the temp registers every time like x86 does with AX? 202 dividend := v.Args[0].Reg() 203 divisor := v.Args[1].Reg() 204 205 // CPU faults upon signed overflow, which occurs when most 206 // negative int is divided by -1. 207 var j *obj.Prog 208 if v.Op == ssa.OpS390XDIVD || v.Op == ssa.OpS390XDIVW || 209 v.Op == ssa.OpS390XMODD || v.Op == ssa.OpS390XMODW { 210 211 var c *obj.Prog 212 c = gc.Prog(s390x.ACMP) 213 j = gc.Prog(s390x.ABEQ) 214 215 c.From.Type = obj.TYPE_REG 216 c.From.Reg = divisor 217 c.To.Type = obj.TYPE_CONST 218 c.To.Offset = -1 219 220 j.To.Type = obj.TYPE_BRANCH 221 222 } 223 224 p := gc.Prog(v.Op.Asm()) 225 p.From.Type = obj.TYPE_REG 226 p.From.Reg = divisor 227 p.Reg = 0 228 p.To.Type = obj.TYPE_REG 229 p.To.Reg = dividend 230 231 // signed division, rest of the check for -1 case 232 if j != nil { 233 j2 := gc.Prog(s390x.ABR) 234 j2.To.Type = obj.TYPE_BRANCH 235 236 var n *obj.Prog 237 if v.Op == ssa.OpS390XDIVD || v.Op == ssa.OpS390XDIVW { 238 // n * -1 = -n 239 n = gc.Prog(s390x.ANEG) 240 n.To.Type = obj.TYPE_REG 241 n.To.Reg = dividend 242 } else { 243 // n % -1 == 0 244 n = gc.Prog(s390x.AXOR) 245 n.From.Type = obj.TYPE_REG 246 n.From.Reg = dividend 247 n.To.Type = obj.TYPE_REG 248 n.To.Reg = dividend 249 } 250 251 j.To.Val = n 252 j2.To.Val = s.Pc() 253 } 254 case ssa.OpS390XADDconst, ssa.OpS390XADDWconst: 255 opregregimm(v.Op.Asm(), v.Reg(), v.Args[0].Reg(), v.AuxInt) 256 case ssa.OpS390XMULLDconst, ssa.OpS390XMULLWconst, 257 ssa.OpS390XSUBconst, ssa.OpS390XSUBWconst, 258 ssa.OpS390XANDconst, ssa.OpS390XANDWconst, 259 ssa.OpS390XORconst, ssa.OpS390XORWconst, 260 ssa.OpS390XXORconst, ssa.OpS390XXORWconst: 261 r := v.Reg() 262 if r != v.Args[0].Reg() { 263 v.Fatalf("input[0] and output not in same register %s", v.LongString()) 264 } 265 p := gc.Prog(v.Op.Asm()) 266 p.From.Type = obj.TYPE_CONST 267 p.From.Offset = v.AuxInt 268 p.To.Type = obj.TYPE_REG 269 p.To.Reg = r 270 case ssa.OpS390XSLDconst, ssa.OpS390XSLWconst, 271 ssa.OpS390XSRDconst, ssa.OpS390XSRWconst, 272 ssa.OpS390XSRADconst, ssa.OpS390XSRAWconst, 273 ssa.OpS390XRLLGconst, ssa.OpS390XRLLconst: 274 p := gc.Prog(v.Op.Asm()) 275 p.From.Type = obj.TYPE_CONST 276 p.From.Offset = v.AuxInt 277 r := v.Reg() 278 r1 := v.Args[0].Reg() 279 if r != r1 { 280 p.Reg = r1 281 } 282 p.To.Type = obj.TYPE_REG 283 p.To.Reg = r 284 case ssa.OpS390XSUBEcarrymask, ssa.OpS390XSUBEWcarrymask: 285 r := v.Reg() 286 p := gc.Prog(v.Op.Asm()) 287 p.From.Type = obj.TYPE_REG 288 p.From.Reg = r 289 p.To.Type = obj.TYPE_REG 290 p.To.Reg = r 291 case ssa.OpS390XMOVDaddridx: 292 r := v.Args[0].Reg() 293 i := v.Args[1].Reg() 294 p := gc.Prog(s390x.AMOVD) 295 p.From.Scale = 1 296 if i == s390x.REGSP { 297 r, i = i, r 298 } 299 p.From.Type = obj.TYPE_ADDR 300 p.From.Reg = r 301 p.From.Index = i 302 gc.AddAux(&p.From, v) 303 p.To.Type = obj.TYPE_REG 304 p.To.Reg = v.Reg() 305 case ssa.OpS390XMOVDaddr: 306 p := gc.Prog(s390x.AMOVD) 307 p.From.Type = obj.TYPE_ADDR 308 p.From.Reg = v.Args[0].Reg() 309 gc.AddAux(&p.From, v) 310 p.To.Type = obj.TYPE_REG 311 p.To.Reg = v.Reg() 312 case ssa.OpS390XCMP, ssa.OpS390XCMPW, ssa.OpS390XCMPU, ssa.OpS390XCMPWU: 313 opregreg(v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg()) 314 case ssa.OpS390XFCMPS, ssa.OpS390XFCMP: 315 opregreg(v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg()) 316 case ssa.OpS390XCMPconst, ssa.OpS390XCMPWconst, ssa.OpS390XCMPUconst, ssa.OpS390XCMPWUconst: 317 p := gc.Prog(v.Op.Asm()) 318 p.From.Type = obj.TYPE_REG 319 p.From.Reg = v.Args[0].Reg() 320 p.To.Type = obj.TYPE_CONST 321 p.To.Offset = v.AuxInt 322 case ssa.OpS390XMOVDconst: 323 x := v.Reg() 324 p := gc.Prog(v.Op.Asm()) 325 p.From.Type = obj.TYPE_CONST 326 p.From.Offset = v.AuxInt 327 p.To.Type = obj.TYPE_REG 328 p.To.Reg = x 329 case ssa.OpS390XFMOVSconst, ssa.OpS390XFMOVDconst: 330 x := v.Reg() 331 p := gc.Prog(v.Op.Asm()) 332 p.From.Type = obj.TYPE_FCONST 333 p.From.Val = math.Float64frombits(uint64(v.AuxInt)) 334 p.To.Type = obj.TYPE_REG 335 p.To.Reg = x 336 case ssa.OpS390XADDWload, ssa.OpS390XADDload, 337 ssa.OpS390XMULLWload, ssa.OpS390XMULLDload, 338 ssa.OpS390XSUBWload, ssa.OpS390XSUBload, 339 ssa.OpS390XANDWload, ssa.OpS390XANDload, 340 ssa.OpS390XORWload, ssa.OpS390XORload, 341 ssa.OpS390XXORWload, ssa.OpS390XXORload: 342 r := v.Reg() 343 if r != v.Args[0].Reg() { 344 v.Fatalf("input[0] and output not in same register %s", v.LongString()) 345 } 346 p := gc.Prog(v.Op.Asm()) 347 p.From.Type = obj.TYPE_MEM 348 p.From.Reg = v.Args[1].Reg() 349 gc.AddAux(&p.From, v) 350 p.To.Type = obj.TYPE_REG 351 p.To.Reg = r 352 case ssa.OpS390XMOVDload, 353 ssa.OpS390XMOVWZload, ssa.OpS390XMOVHZload, ssa.OpS390XMOVBZload, 354 ssa.OpS390XMOVDBRload, ssa.OpS390XMOVWBRload, ssa.OpS390XMOVHBRload, 355 ssa.OpS390XMOVBload, ssa.OpS390XMOVHload, ssa.OpS390XMOVWload, 356 ssa.OpS390XFMOVSload, ssa.OpS390XFMOVDload: 357 p := gc.Prog(v.Op.Asm()) 358 p.From.Type = obj.TYPE_MEM 359 p.From.Reg = v.Args[0].Reg() 360 gc.AddAux(&p.From, v) 361 p.To.Type = obj.TYPE_REG 362 p.To.Reg = v.Reg() 363 case ssa.OpS390XMOVBZloadidx, ssa.OpS390XMOVHZloadidx, ssa.OpS390XMOVWZloadidx, ssa.OpS390XMOVDloadidx, 364 ssa.OpS390XMOVHBRloadidx, ssa.OpS390XMOVWBRloadidx, ssa.OpS390XMOVDBRloadidx, 365 ssa.OpS390XFMOVSloadidx, ssa.OpS390XFMOVDloadidx: 366 r := v.Args[0].Reg() 367 i := v.Args[1].Reg() 368 if i == s390x.REGSP { 369 r, i = i, r 370 } 371 p := gc.Prog(v.Op.Asm()) 372 p.From.Type = obj.TYPE_MEM 373 p.From.Reg = r 374 p.From.Scale = 1 375 p.From.Index = i 376 gc.AddAux(&p.From, v) 377 p.To.Type = obj.TYPE_REG 378 p.To.Reg = v.Reg() 379 case ssa.OpS390XMOVBstore, ssa.OpS390XMOVHstore, ssa.OpS390XMOVWstore, ssa.OpS390XMOVDstore, 380 ssa.OpS390XMOVHBRstore, ssa.OpS390XMOVWBRstore, ssa.OpS390XMOVDBRstore, 381 ssa.OpS390XFMOVSstore, ssa.OpS390XFMOVDstore: 382 p := gc.Prog(v.Op.Asm()) 383 p.From.Type = obj.TYPE_REG 384 p.From.Reg = v.Args[1].Reg() 385 p.To.Type = obj.TYPE_MEM 386 p.To.Reg = v.Args[0].Reg() 387 gc.AddAux(&p.To, v) 388 case ssa.OpS390XMOVBstoreidx, ssa.OpS390XMOVHstoreidx, ssa.OpS390XMOVWstoreidx, ssa.OpS390XMOVDstoreidx, 389 ssa.OpS390XMOVHBRstoreidx, ssa.OpS390XMOVWBRstoreidx, ssa.OpS390XMOVDBRstoreidx, 390 ssa.OpS390XFMOVSstoreidx, ssa.OpS390XFMOVDstoreidx: 391 r := v.Args[0].Reg() 392 i := v.Args[1].Reg() 393 if i == s390x.REGSP { 394 r, i = i, r 395 } 396 p := gc.Prog(v.Op.Asm()) 397 p.From.Type = obj.TYPE_REG 398 p.From.Reg = v.Args[2].Reg() 399 p.To.Type = obj.TYPE_MEM 400 p.To.Reg = r 401 p.To.Scale = 1 402 p.To.Index = i 403 gc.AddAux(&p.To, v) 404 case ssa.OpS390XMOVDstoreconst, ssa.OpS390XMOVWstoreconst, ssa.OpS390XMOVHstoreconst, ssa.OpS390XMOVBstoreconst: 405 p := gc.Prog(v.Op.Asm()) 406 p.From.Type = obj.TYPE_CONST 407 sc := v.AuxValAndOff() 408 p.From.Offset = sc.Val() 409 p.To.Type = obj.TYPE_MEM 410 p.To.Reg = v.Args[0].Reg() 411 gc.AddAux2(&p.To, v, sc.Off()) 412 case ssa.OpS390XMOVBreg, ssa.OpS390XMOVHreg, ssa.OpS390XMOVWreg, 413 ssa.OpS390XMOVBZreg, ssa.OpS390XMOVHZreg, ssa.OpS390XMOVWZreg, 414 ssa.OpS390XCEFBRA, ssa.OpS390XCDFBRA, ssa.OpS390XCEGBRA, ssa.OpS390XCDGBRA, 415 ssa.OpS390XCFEBRA, ssa.OpS390XCFDBRA, ssa.OpS390XCGEBRA, ssa.OpS390XCGDBRA, 416 ssa.OpS390XLDEBR, ssa.OpS390XLEDBR, 417 ssa.OpS390XFNEG, ssa.OpS390XFNEGS: 418 opregreg(v.Op.Asm(), v.Reg(), v.Args[0].Reg()) 419 case ssa.OpS390XCLEAR: 420 p := gc.Prog(v.Op.Asm()) 421 p.From.Type = obj.TYPE_CONST 422 sc := v.AuxValAndOff() 423 p.From.Offset = sc.Val() 424 p.To.Type = obj.TYPE_MEM 425 p.To.Reg = v.Args[0].Reg() 426 gc.AddAux2(&p.To, v, sc.Off()) 427 case ssa.OpCopy, ssa.OpS390XMOVDconvert, ssa.OpS390XMOVDreg: 428 if v.Type.IsMemory() { 429 return 430 } 431 x := v.Args[0].Reg() 432 y := v.Reg() 433 if x != y { 434 opregreg(moveByType(v.Type), y, x) 435 } 436 case ssa.OpS390XMOVDnop: 437 if v.Reg() != v.Args[0].Reg() { 438 v.Fatalf("input[0] and output not in same register %s", v.LongString()) 439 } 440 // nothing to do 441 case ssa.OpLoadReg: 442 if v.Type.IsFlags() { 443 v.Fatalf("load flags not implemented: %v", v.LongString()) 444 return 445 } 446 p := gc.Prog(loadByType(v.Type)) 447 gc.AddrAuto(&p.From, v.Args[0]) 448 p.To.Type = obj.TYPE_REG 449 p.To.Reg = v.Reg() 450 case ssa.OpStoreReg: 451 if v.Type.IsFlags() { 452 v.Fatalf("store flags not implemented: %v", v.LongString()) 453 return 454 } 455 p := gc.Prog(storeByType(v.Type)) 456 p.From.Type = obj.TYPE_REG 457 p.From.Reg = v.Args[0].Reg() 458 gc.AddrAuto(&p.To, v) 459 case ssa.OpPhi: 460 gc.CheckLoweredPhi(v) 461 case ssa.OpInitMem: 462 // memory arg needs no code 463 case ssa.OpArg: 464 // input args need no code 465 case ssa.OpS390XLoweredGetClosurePtr: 466 // Closure pointer is R12 (already) 467 gc.CheckLoweredGetClosurePtr(v) 468 case ssa.OpS390XLoweredGetG: 469 r := v.Reg() 470 p := gc.Prog(s390x.AMOVD) 471 p.From.Type = obj.TYPE_REG 472 p.From.Reg = s390x.REGG 473 p.To.Type = obj.TYPE_REG 474 p.To.Reg = r 475 case ssa.OpS390XCALLstatic: 476 if v.Aux.(*gc.Sym) == gc.Deferreturn.Sym { 477 // Deferred calls will appear to be returning to 478 // the CALL deferreturn(SB) that we are about to emit. 479 // However, the stack trace code will show the line 480 // of the instruction byte before the return PC. 481 // To avoid that being an unrelated instruction, 482 // insert an actual hardware NOP that will have the right line number. 483 // This is different from obj.ANOP, which is a virtual no-op 484 // that doesn't make it into the instruction stream. 485 ginsnop() 486 } 487 p := gc.Prog(obj.ACALL) 488 p.To.Type = obj.TYPE_MEM 489 p.To.Name = obj.NAME_EXTERN 490 p.To.Sym = gc.Linksym(v.Aux.(*gc.Sym)) 491 if gc.Maxarg < v.AuxInt { 492 gc.Maxarg = v.AuxInt 493 } 494 case ssa.OpS390XCALLclosure: 495 p := gc.Prog(obj.ACALL) 496 p.To.Type = obj.TYPE_REG 497 p.To.Reg = v.Args[0].Reg() 498 if gc.Maxarg < v.AuxInt { 499 gc.Maxarg = v.AuxInt 500 } 501 case ssa.OpS390XCALLdefer: 502 p := gc.Prog(obj.ACALL) 503 p.To.Type = obj.TYPE_MEM 504 p.To.Name = obj.NAME_EXTERN 505 p.To.Sym = gc.Linksym(gc.Deferproc.Sym) 506 if gc.Maxarg < v.AuxInt { 507 gc.Maxarg = v.AuxInt 508 } 509 case ssa.OpS390XCALLgo: 510 p := gc.Prog(obj.ACALL) 511 p.To.Type = obj.TYPE_MEM 512 p.To.Name = obj.NAME_EXTERN 513 p.To.Sym = gc.Linksym(gc.Newproc.Sym) 514 if gc.Maxarg < v.AuxInt { 515 gc.Maxarg = v.AuxInt 516 } 517 case ssa.OpS390XCALLinter: 518 p := gc.Prog(obj.ACALL) 519 p.To.Type = obj.TYPE_REG 520 p.To.Reg = v.Args[0].Reg() 521 if gc.Maxarg < v.AuxInt { 522 gc.Maxarg = v.AuxInt 523 } 524 case ssa.OpS390XFLOGR, ssa.OpS390XNEG, ssa.OpS390XNEGW, 525 ssa.OpS390XMOVWBR, ssa.OpS390XMOVDBR: 526 p := gc.Prog(v.Op.Asm()) 527 p.From.Type = obj.TYPE_REG 528 p.From.Reg = v.Args[0].Reg() 529 p.To.Type = obj.TYPE_REG 530 p.To.Reg = v.Reg() 531 case ssa.OpS390XNOT, ssa.OpS390XNOTW: 532 v.Fatalf("NOT/NOTW generated %s", v.LongString()) 533 case ssa.OpS390XMOVDEQ, ssa.OpS390XMOVDNE, 534 ssa.OpS390XMOVDLT, ssa.OpS390XMOVDLE, 535 ssa.OpS390XMOVDGT, ssa.OpS390XMOVDGE, 536 ssa.OpS390XMOVDGTnoinv, ssa.OpS390XMOVDGEnoinv: 537 r := v.Reg() 538 if r != v.Args[0].Reg() { 539 v.Fatalf("input[0] and output not in same register %s", v.LongString()) 540 } 541 p := gc.Prog(v.Op.Asm()) 542 p.From.Type = obj.TYPE_REG 543 p.From.Reg = v.Args[1].Reg() 544 p.To.Type = obj.TYPE_REG 545 p.To.Reg = r 546 case ssa.OpS390XFSQRT: 547 p := gc.Prog(v.Op.Asm()) 548 p.From.Type = obj.TYPE_REG 549 p.From.Reg = v.Args[0].Reg() 550 p.To.Type = obj.TYPE_REG 551 p.To.Reg = v.Reg() 552 case ssa.OpSP, ssa.OpSB: 553 // nothing to do 554 case ssa.OpSelect0, ssa.OpSelect1: 555 // nothing to do 556 case ssa.OpVarDef: 557 gc.Gvardef(v.Aux.(*gc.Node)) 558 case ssa.OpVarKill: 559 gc.Gvarkill(v.Aux.(*gc.Node)) 560 case ssa.OpVarLive: 561 gc.Gvarlive(v.Aux.(*gc.Node)) 562 case ssa.OpKeepAlive: 563 gc.KeepAlive(v) 564 case ssa.OpS390XInvertFlags: 565 v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString()) 566 case ssa.OpS390XFlagEQ, ssa.OpS390XFlagLT, ssa.OpS390XFlagGT: 567 v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString()) 568 case ssa.OpS390XAddTupleFirst32, ssa.OpS390XAddTupleFirst64: 569 v.Fatalf("AddTupleFirst* should never make it to codegen %v", v.LongString()) 570 case ssa.OpS390XLoweredNilCheck: 571 // Issue a load which will fault if the input is nil. 572 p := gc.Prog(s390x.AMOVBZ) 573 p.From.Type = obj.TYPE_MEM 574 p.From.Reg = v.Args[0].Reg() 575 gc.AddAux(&p.From, v) 576 p.To.Type = obj.TYPE_REG 577 p.To.Reg = s390x.REGTMP 578 if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers 579 gc.Warnl(v.Pos, "generated nil check") 580 } 581 case ssa.OpS390XMVC: 582 vo := v.AuxValAndOff() 583 p := gc.Prog(s390x.AMVC) 584 p.From.Type = obj.TYPE_MEM 585 p.From.Reg = v.Args[1].Reg() 586 p.From.Offset = vo.Off() 587 p.To.Type = obj.TYPE_MEM 588 p.To.Reg = v.Args[0].Reg() 589 p.To.Offset = vo.Off() 590 p.From3 = new(obj.Addr) 591 p.From3.Type = obj.TYPE_CONST 592 p.From3.Offset = vo.Val() 593 case ssa.OpS390XSTMG2, ssa.OpS390XSTMG3, ssa.OpS390XSTMG4, 594 ssa.OpS390XSTM2, ssa.OpS390XSTM3, ssa.OpS390XSTM4: 595 for i := 2; i < len(v.Args)-1; i++ { 596 if v.Args[i].Reg() != v.Args[i-1].Reg()+1 { 597 v.Fatalf("invalid store multiple %s", v.LongString()) 598 } 599 } 600 p := gc.Prog(v.Op.Asm()) 601 p.From.Type = obj.TYPE_REG 602 p.From.Reg = v.Args[1].Reg() 603 p.Reg = v.Args[len(v.Args)-2].Reg() 604 p.To.Type = obj.TYPE_MEM 605 p.To.Reg = v.Args[0].Reg() 606 gc.AddAux(&p.To, v) 607 case ssa.OpS390XLoweredMove: 608 // Inputs must be valid pointers to memory, 609 // so adjust arg0 and arg1 as part of the expansion. 610 // arg2 should be src+size, 611 // 612 // mvc: MVC $256, 0(R2), 0(R1) 613 // MOVD $256(R1), R1 614 // MOVD $256(R2), R2 615 // CMP R2, Rarg2 616 // BNE mvc 617 // MVC $rem, 0(R2), 0(R1) // if rem > 0 618 // arg2 is the last address to move in the loop + 256 619 mvc := gc.Prog(s390x.AMVC) 620 mvc.From.Type = obj.TYPE_MEM 621 mvc.From.Reg = v.Args[1].Reg() 622 mvc.To.Type = obj.TYPE_MEM 623 mvc.To.Reg = v.Args[0].Reg() 624 mvc.From3 = new(obj.Addr) 625 mvc.From3.Type = obj.TYPE_CONST 626 mvc.From3.Offset = 256 627 628 for i := 0; i < 2; i++ { 629 movd := gc.Prog(s390x.AMOVD) 630 movd.From.Type = obj.TYPE_ADDR 631 movd.From.Reg = v.Args[i].Reg() 632 movd.From.Offset = 256 633 movd.To.Type = obj.TYPE_REG 634 movd.To.Reg = v.Args[i].Reg() 635 } 636 637 cmpu := gc.Prog(s390x.ACMPU) 638 cmpu.From.Reg = v.Args[1].Reg() 639 cmpu.From.Type = obj.TYPE_REG 640 cmpu.To.Reg = v.Args[2].Reg() 641 cmpu.To.Type = obj.TYPE_REG 642 643 bne := gc.Prog(s390x.ABLT) 644 bne.To.Type = obj.TYPE_BRANCH 645 gc.Patch(bne, mvc) 646 647 if v.AuxInt > 0 { 648 mvc := gc.Prog(s390x.AMVC) 649 mvc.From.Type = obj.TYPE_MEM 650 mvc.From.Reg = v.Args[1].Reg() 651 mvc.To.Type = obj.TYPE_MEM 652 mvc.To.Reg = v.Args[0].Reg() 653 mvc.From3 = new(obj.Addr) 654 mvc.From3.Type = obj.TYPE_CONST 655 mvc.From3.Offset = v.AuxInt 656 } 657 case ssa.OpS390XLoweredZero: 658 // Input must be valid pointers to memory, 659 // so adjust arg0 as part of the expansion. 660 // arg1 should be src+size, 661 // 662 // clear: CLEAR $256, 0(R1) 663 // MOVD $256(R1), R1 664 // CMP R1, Rarg1 665 // BNE clear 666 // CLEAR $rem, 0(R1) // if rem > 0 667 // arg1 is the last address to zero in the loop + 256 668 clear := gc.Prog(s390x.ACLEAR) 669 clear.From.Type = obj.TYPE_CONST 670 clear.From.Offset = 256 671 clear.To.Type = obj.TYPE_MEM 672 clear.To.Reg = v.Args[0].Reg() 673 674 movd := gc.Prog(s390x.AMOVD) 675 movd.From.Type = obj.TYPE_ADDR 676 movd.From.Reg = v.Args[0].Reg() 677 movd.From.Offset = 256 678 movd.To.Type = obj.TYPE_REG 679 movd.To.Reg = v.Args[0].Reg() 680 681 cmpu := gc.Prog(s390x.ACMPU) 682 cmpu.From.Reg = v.Args[0].Reg() 683 cmpu.From.Type = obj.TYPE_REG 684 cmpu.To.Reg = v.Args[1].Reg() 685 cmpu.To.Type = obj.TYPE_REG 686 687 bne := gc.Prog(s390x.ABLT) 688 bne.To.Type = obj.TYPE_BRANCH 689 gc.Patch(bne, clear) 690 691 if v.AuxInt > 0 { 692 clear := gc.Prog(s390x.ACLEAR) 693 clear.From.Type = obj.TYPE_CONST 694 clear.From.Offset = v.AuxInt 695 clear.To.Type = obj.TYPE_MEM 696 clear.To.Reg = v.Args[0].Reg() 697 } 698 case ssa.OpS390XMOVWZatomicload, ssa.OpS390XMOVDatomicload: 699 p := gc.Prog(v.Op.Asm()) 700 p.From.Type = obj.TYPE_MEM 701 p.From.Reg = v.Args[0].Reg() 702 gc.AddAux(&p.From, v) 703 p.To.Type = obj.TYPE_REG 704 p.To.Reg = v.Reg0() 705 case ssa.OpS390XMOVWatomicstore, ssa.OpS390XMOVDatomicstore: 706 p := gc.Prog(v.Op.Asm()) 707 p.From.Type = obj.TYPE_REG 708 p.From.Reg = v.Args[1].Reg() 709 p.To.Type = obj.TYPE_MEM 710 p.To.Reg = v.Args[0].Reg() 711 gc.AddAux(&p.To, v) 712 case ssa.OpS390XLAA, ssa.OpS390XLAAG: 713 p := gc.Prog(v.Op.Asm()) 714 p.Reg = v.Reg0() 715 p.From.Type = obj.TYPE_REG 716 p.From.Reg = v.Args[1].Reg() 717 p.To.Type = obj.TYPE_MEM 718 p.To.Reg = v.Args[0].Reg() 719 gc.AddAux(&p.To, v) 720 case ssa.OpS390XLoweredAtomicCas32, ssa.OpS390XLoweredAtomicCas64: 721 // Convert the flags output of CS{,G} into a bool. 722 // CS{,G} arg1, arg2, arg0 723 // MOVD $0, ret 724 // BNE 2(PC) 725 // MOVD $1, ret 726 // NOP (so the BNE has somewhere to land) 727 728 // CS{,G} arg1, arg2, arg0 729 cs := gc.Prog(v.Op.Asm()) 730 cs.From.Type = obj.TYPE_REG 731 cs.From.Reg = v.Args[1].Reg() // old 732 cs.Reg = v.Args[2].Reg() // new 733 cs.To.Type = obj.TYPE_MEM 734 cs.To.Reg = v.Args[0].Reg() 735 gc.AddAux(&cs.To, v) 736 737 // MOVD $0, ret 738 movd := gc.Prog(s390x.AMOVD) 739 movd.From.Type = obj.TYPE_CONST 740 movd.From.Offset = 0 741 movd.To.Type = obj.TYPE_REG 742 movd.To.Reg = v.Reg0() 743 744 // BNE 2(PC) 745 bne := gc.Prog(s390x.ABNE) 746 bne.To.Type = obj.TYPE_BRANCH 747 748 // MOVD $1, ret 749 movd = gc.Prog(s390x.AMOVD) 750 movd.From.Type = obj.TYPE_CONST 751 movd.From.Offset = 1 752 movd.To.Type = obj.TYPE_REG 753 movd.To.Reg = v.Reg0() 754 755 // NOP (so the BNE has somewhere to land) 756 nop := gc.Prog(obj.ANOP) 757 gc.Patch(bne, nop) 758 case ssa.OpS390XLoweredAtomicExchange32, ssa.OpS390XLoweredAtomicExchange64: 759 // Loop until the CS{,G} succeeds. 760 // MOV{WZ,D} arg0, ret 761 // cs: CS{,G} ret, arg1, arg0 762 // BNE cs 763 764 // MOV{WZ,D} arg0, ret 765 load := gc.Prog(loadByType(v.Type.FieldType(0))) 766 load.From.Type = obj.TYPE_MEM 767 load.From.Reg = v.Args[0].Reg() 768 load.To.Type = obj.TYPE_REG 769 load.To.Reg = v.Reg0() 770 gc.AddAux(&load.From, v) 771 772 // CS{,G} ret, arg1, arg0 773 cs := gc.Prog(v.Op.Asm()) 774 cs.From.Type = obj.TYPE_REG 775 cs.From.Reg = v.Reg0() // old 776 cs.Reg = v.Args[1].Reg() // new 777 cs.To.Type = obj.TYPE_MEM 778 cs.To.Reg = v.Args[0].Reg() 779 gc.AddAux(&cs.To, v) 780 781 // BNE cs 782 bne := gc.Prog(s390x.ABNE) 783 bne.To.Type = obj.TYPE_BRANCH 784 gc.Patch(bne, cs) 785 default: 786 v.Fatalf("genValue not implemented: %s", v.LongString()) 787 } 788 } 789 790 var blockJump = [...]struct { 791 asm, invasm obj.As 792 }{ 793 ssa.BlockS390XEQ: {s390x.ABEQ, s390x.ABNE}, 794 ssa.BlockS390XNE: {s390x.ABNE, s390x.ABEQ}, 795 ssa.BlockS390XLT: {s390x.ABLT, s390x.ABGE}, 796 ssa.BlockS390XGE: {s390x.ABGE, s390x.ABLT}, 797 ssa.BlockS390XLE: {s390x.ABLE, s390x.ABGT}, 798 ssa.BlockS390XGT: {s390x.ABGT, s390x.ABLE}, 799 ssa.BlockS390XGTF: {s390x.ABGT, s390x.ABLEU}, 800 ssa.BlockS390XGEF: {s390x.ABGE, s390x.ABLTU}, 801 } 802 803 func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { 804 s.SetPos(b.Pos) 805 806 switch b.Kind { 807 case ssa.BlockPlain: 808 if b.Succs[0].Block() != next { 809 p := gc.Prog(s390x.ABR) 810 p.To.Type = obj.TYPE_BRANCH 811 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) 812 } 813 case ssa.BlockDefer: 814 // defer returns in R3: 815 // 0 if we should continue executing 816 // 1 if we should jump to deferreturn call 817 p := gc.Prog(s390x.ACMPW) 818 p.From.Type = obj.TYPE_REG 819 p.From.Reg = s390x.REG_R3 820 p.To.Type = obj.TYPE_CONST 821 p.To.Offset = 0 822 p = gc.Prog(s390x.ABNE) 823 p.To.Type = obj.TYPE_BRANCH 824 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()}) 825 if b.Succs[0].Block() != next { 826 p := gc.Prog(s390x.ABR) 827 p.To.Type = obj.TYPE_BRANCH 828 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) 829 } 830 case ssa.BlockExit: 831 gc.Prog(obj.AUNDEF) // tell plive.go that we never reach here 832 case ssa.BlockRet: 833 gc.Prog(obj.ARET) 834 case ssa.BlockRetJmp: 835 p := gc.Prog(s390x.ABR) 836 p.To.Type = obj.TYPE_MEM 837 p.To.Name = obj.NAME_EXTERN 838 p.To.Sym = gc.Linksym(b.Aux.(*gc.Sym)) 839 case ssa.BlockS390XEQ, ssa.BlockS390XNE, 840 ssa.BlockS390XLT, ssa.BlockS390XGE, 841 ssa.BlockS390XLE, ssa.BlockS390XGT, 842 ssa.BlockS390XGEF, ssa.BlockS390XGTF: 843 jmp := blockJump[b.Kind] 844 likely := b.Likely 845 var p *obj.Prog 846 switch next { 847 case b.Succs[0].Block(): 848 p = gc.Prog(jmp.invasm) 849 likely *= -1 850 p.To.Type = obj.TYPE_BRANCH 851 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()}) 852 case b.Succs[1].Block(): 853 p = gc.Prog(jmp.asm) 854 p.To.Type = obj.TYPE_BRANCH 855 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) 856 default: 857 p = gc.Prog(jmp.asm) 858 p.To.Type = obj.TYPE_BRANCH 859 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) 860 q := gc.Prog(s390x.ABR) 861 q.To.Type = obj.TYPE_BRANCH 862 s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()}) 863 } 864 default: 865 b.Fatalf("branch not implemented: %s. Control: %s", b.LongString(), b.Control.LongString()) 866 } 867 }