github.com/corona10/go@v0.0.0-20180224231303-7a218942be57/src/cmd/compile/internal/s390x/ssa.go (about) 1 // Copyright 2016 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package s390x 6 7 import ( 8 "math" 9 10 "cmd/compile/internal/gc" 11 "cmd/compile/internal/ssa" 12 "cmd/compile/internal/types" 13 "cmd/internal/obj" 14 "cmd/internal/obj/s390x" 15 ) 16 17 // markMoves marks any MOVXconst ops that need to avoid clobbering flags. 18 func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) { 19 flive := b.FlagsLiveAtEnd 20 if b.Control != nil && b.Control.Type.IsFlags() { 21 flive = true 22 } 23 for i := len(b.Values) - 1; i >= 0; i-- { 24 v := b.Values[i] 25 if flive && v.Op == ssa.OpS390XMOVDconst { 26 // The "mark" is any non-nil Aux value. 27 v.Aux = v 28 } 29 if v.Type.IsFlags() { 30 flive = false 31 } 32 for _, a := range v.Args { 33 if a.Type.IsFlags() { 34 flive = true 35 } 36 } 37 } 38 } 39 40 // loadByType returns the load instruction of the given type. 41 func loadByType(t *types.Type) obj.As { 42 if t.IsFloat() { 43 switch t.Size() { 44 case 4: 45 return s390x.AFMOVS 46 case 8: 47 return s390x.AFMOVD 48 } 49 } else { 50 switch t.Size() { 51 case 1: 52 if t.IsSigned() { 53 return s390x.AMOVB 54 } else { 55 return s390x.AMOVBZ 56 } 57 case 2: 58 if t.IsSigned() { 59 return s390x.AMOVH 60 } else { 61 return s390x.AMOVHZ 62 } 63 case 4: 64 if t.IsSigned() { 65 return s390x.AMOVW 66 } else { 67 return s390x.AMOVWZ 68 } 69 case 8: 70 return s390x.AMOVD 71 } 72 } 73 panic("bad load type") 74 } 75 76 // storeByType returns the store instruction of the given type. 77 func storeByType(t *types.Type) obj.As { 78 width := t.Size() 79 if t.IsFloat() { 80 switch width { 81 case 4: 82 return s390x.AFMOVS 83 case 8: 84 return s390x.AFMOVD 85 } 86 } else { 87 switch width { 88 case 1: 89 return s390x.AMOVB 90 case 2: 91 return s390x.AMOVH 92 case 4: 93 return s390x.AMOVW 94 case 8: 95 return s390x.AMOVD 96 } 97 } 98 panic("bad store type") 99 } 100 101 // moveByType returns the reg->reg move instruction of the given type. 102 func moveByType(t *types.Type) obj.As { 103 if t.IsFloat() { 104 return s390x.AFMOVD 105 } else { 106 switch t.Size() { 107 case 1: 108 if t.IsSigned() { 109 return s390x.AMOVB 110 } else { 111 return s390x.AMOVBZ 112 } 113 case 2: 114 if t.IsSigned() { 115 return s390x.AMOVH 116 } else { 117 return s390x.AMOVHZ 118 } 119 case 4: 120 if t.IsSigned() { 121 return s390x.AMOVW 122 } else { 123 return s390x.AMOVWZ 124 } 125 case 8: 126 return s390x.AMOVD 127 } 128 } 129 panic("bad load type") 130 } 131 132 // opregreg emits instructions for 133 // dest := dest(To) op src(From) 134 // and also returns the created obj.Prog so it 135 // may be further adjusted (offset, scale, etc). 136 func opregreg(s *gc.SSAGenState, op obj.As, dest, src int16) *obj.Prog { 137 p := s.Prog(op) 138 p.From.Type = obj.TYPE_REG 139 p.To.Type = obj.TYPE_REG 140 p.To.Reg = dest 141 p.From.Reg = src 142 return p 143 } 144 145 // opregregimm emits instructions for 146 // dest := src(From) op off 147 // and also returns the created obj.Prog so it 148 // may be further adjusted (offset, scale, etc). 149 func opregregimm(s *gc.SSAGenState, op obj.As, dest, src int16, off int64) *obj.Prog { 150 p := s.Prog(op) 151 p.From.Type = obj.TYPE_CONST 152 p.From.Offset = off 153 p.Reg = src 154 p.To.Reg = dest 155 p.To.Type = obj.TYPE_REG 156 return p 157 } 158 159 func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { 160 switch v.Op { 161 case ssa.OpS390XSLD, ssa.OpS390XSLW, 162 ssa.OpS390XSRD, ssa.OpS390XSRW, 163 ssa.OpS390XSRAD, ssa.OpS390XSRAW: 164 r := v.Reg() 165 r1 := v.Args[0].Reg() 166 r2 := v.Args[1].Reg() 167 if r2 == s390x.REG_R0 { 168 v.Fatalf("cannot use R0 as shift value %s", v.LongString()) 169 } 170 p := opregreg(s, v.Op.Asm(), r, r2) 171 if r != r1 { 172 p.Reg = r1 173 } 174 case ssa.OpS390XADD, ssa.OpS390XADDW, 175 ssa.OpS390XSUB, ssa.OpS390XSUBW, 176 ssa.OpS390XAND, ssa.OpS390XANDW, 177 ssa.OpS390XOR, ssa.OpS390XORW, 178 ssa.OpS390XXOR, ssa.OpS390XXORW: 179 r := v.Reg() 180 r1 := v.Args[0].Reg() 181 r2 := v.Args[1].Reg() 182 p := opregreg(s, v.Op.Asm(), r, r2) 183 if r != r1 { 184 p.Reg = r1 185 } 186 // 2-address opcode arithmetic 187 case ssa.OpS390XMULLD, ssa.OpS390XMULLW, 188 ssa.OpS390XMULHD, ssa.OpS390XMULHDU, 189 ssa.OpS390XFADDS, ssa.OpS390XFADD, ssa.OpS390XFSUBS, ssa.OpS390XFSUB, 190 ssa.OpS390XFMULS, ssa.OpS390XFMUL, ssa.OpS390XFDIVS, ssa.OpS390XFDIV: 191 r := v.Reg() 192 if r != v.Args[0].Reg() { 193 v.Fatalf("input[0] and output not in same register %s", v.LongString()) 194 } 195 opregreg(s, v.Op.Asm(), r, v.Args[1].Reg()) 196 case ssa.OpS390XFMADD, ssa.OpS390XFMADDS, 197 ssa.OpS390XFMSUB, ssa.OpS390XFMSUBS: 198 r := v.Reg() 199 if r != v.Args[0].Reg() { 200 v.Fatalf("input[0] and output not in same register %s", v.LongString()) 201 } 202 r1 := v.Args[1].Reg() 203 r2 := v.Args[2].Reg() 204 p := s.Prog(v.Op.Asm()) 205 p.From.Type = obj.TYPE_REG 206 p.From.Reg = r1 207 p.Reg = r2 208 p.To.Type = obj.TYPE_REG 209 p.To.Reg = r 210 case ssa.OpS390XFIDBR: 211 switch v.AuxInt { 212 case 0, 1, 3, 4, 5, 6, 7: 213 opregregimm(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg(), v.AuxInt) 214 default: 215 v.Fatalf("invalid FIDBR mask: %v", v.AuxInt) 216 } 217 case ssa.OpS390XCPSDR: 218 p := opregreg(s, v.Op.Asm(), v.Reg(), v.Args[1].Reg()) 219 p.Reg = v.Args[0].Reg() 220 case ssa.OpS390XDIVD, ssa.OpS390XDIVW, 221 ssa.OpS390XDIVDU, ssa.OpS390XDIVWU, 222 ssa.OpS390XMODD, ssa.OpS390XMODW, 223 ssa.OpS390XMODDU, ssa.OpS390XMODWU: 224 225 // TODO(mundaym): use the temp registers every time like x86 does with AX? 226 dividend := v.Args[0].Reg() 227 divisor := v.Args[1].Reg() 228 229 // CPU faults upon signed overflow, which occurs when most 230 // negative int is divided by -1. 231 var j *obj.Prog 232 if v.Op == ssa.OpS390XDIVD || v.Op == ssa.OpS390XDIVW || 233 v.Op == ssa.OpS390XMODD || v.Op == ssa.OpS390XMODW { 234 235 var c *obj.Prog 236 c = s.Prog(s390x.ACMP) 237 j = s.Prog(s390x.ABEQ) 238 239 c.From.Type = obj.TYPE_REG 240 c.From.Reg = divisor 241 c.To.Type = obj.TYPE_CONST 242 c.To.Offset = -1 243 244 j.To.Type = obj.TYPE_BRANCH 245 246 } 247 248 p := s.Prog(v.Op.Asm()) 249 p.From.Type = obj.TYPE_REG 250 p.From.Reg = divisor 251 p.Reg = 0 252 p.To.Type = obj.TYPE_REG 253 p.To.Reg = dividend 254 255 // signed division, rest of the check for -1 case 256 if j != nil { 257 j2 := s.Prog(s390x.ABR) 258 j2.To.Type = obj.TYPE_BRANCH 259 260 var n *obj.Prog 261 if v.Op == ssa.OpS390XDIVD || v.Op == ssa.OpS390XDIVW { 262 // n * -1 = -n 263 n = s.Prog(s390x.ANEG) 264 n.To.Type = obj.TYPE_REG 265 n.To.Reg = dividend 266 } else { 267 // n % -1 == 0 268 n = s.Prog(s390x.AXOR) 269 n.From.Type = obj.TYPE_REG 270 n.From.Reg = dividend 271 n.To.Type = obj.TYPE_REG 272 n.To.Reg = dividend 273 } 274 275 j.To.Val = n 276 j2.To.Val = s.Pc() 277 } 278 case ssa.OpS390XADDconst, ssa.OpS390XADDWconst: 279 opregregimm(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg(), v.AuxInt) 280 case ssa.OpS390XMULLDconst, ssa.OpS390XMULLWconst, 281 ssa.OpS390XSUBconst, ssa.OpS390XSUBWconst, 282 ssa.OpS390XANDconst, ssa.OpS390XANDWconst, 283 ssa.OpS390XORconst, ssa.OpS390XORWconst, 284 ssa.OpS390XXORconst, ssa.OpS390XXORWconst: 285 r := v.Reg() 286 if r != v.Args[0].Reg() { 287 v.Fatalf("input[0] and output not in same register %s", v.LongString()) 288 } 289 p := s.Prog(v.Op.Asm()) 290 p.From.Type = obj.TYPE_CONST 291 p.From.Offset = v.AuxInt 292 p.To.Type = obj.TYPE_REG 293 p.To.Reg = r 294 case ssa.OpS390XSLDconst, ssa.OpS390XSLWconst, 295 ssa.OpS390XSRDconst, ssa.OpS390XSRWconst, 296 ssa.OpS390XSRADconst, ssa.OpS390XSRAWconst, 297 ssa.OpS390XRLLGconst, ssa.OpS390XRLLconst: 298 p := s.Prog(v.Op.Asm()) 299 p.From.Type = obj.TYPE_CONST 300 p.From.Offset = v.AuxInt 301 r := v.Reg() 302 r1 := v.Args[0].Reg() 303 if r != r1 { 304 p.Reg = r1 305 } 306 p.To.Type = obj.TYPE_REG 307 p.To.Reg = r 308 case ssa.OpS390XSUBEcarrymask, ssa.OpS390XSUBEWcarrymask: 309 r := v.Reg() 310 p := s.Prog(v.Op.Asm()) 311 p.From.Type = obj.TYPE_REG 312 p.From.Reg = r 313 p.To.Type = obj.TYPE_REG 314 p.To.Reg = r 315 case ssa.OpS390XMOVDaddridx: 316 r := v.Args[0].Reg() 317 i := v.Args[1].Reg() 318 p := s.Prog(s390x.AMOVD) 319 p.From.Scale = 1 320 if i == s390x.REGSP { 321 r, i = i, r 322 } 323 p.From.Type = obj.TYPE_ADDR 324 p.From.Reg = r 325 p.From.Index = i 326 gc.AddAux(&p.From, v) 327 p.To.Type = obj.TYPE_REG 328 p.To.Reg = v.Reg() 329 case ssa.OpS390XMOVDaddr: 330 p := s.Prog(s390x.AMOVD) 331 p.From.Type = obj.TYPE_ADDR 332 p.From.Reg = v.Args[0].Reg() 333 gc.AddAux(&p.From, v) 334 p.To.Type = obj.TYPE_REG 335 p.To.Reg = v.Reg() 336 case ssa.OpS390XCMP, ssa.OpS390XCMPW, ssa.OpS390XCMPU, ssa.OpS390XCMPWU: 337 opregreg(s, v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg()) 338 case ssa.OpS390XFCMPS, ssa.OpS390XFCMP: 339 opregreg(s, v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg()) 340 case ssa.OpS390XCMPconst, ssa.OpS390XCMPWconst: 341 p := s.Prog(v.Op.Asm()) 342 p.From.Type = obj.TYPE_REG 343 p.From.Reg = v.Args[0].Reg() 344 p.To.Type = obj.TYPE_CONST 345 p.To.Offset = v.AuxInt 346 case ssa.OpS390XCMPUconst, ssa.OpS390XCMPWUconst: 347 p := s.Prog(v.Op.Asm()) 348 p.From.Type = obj.TYPE_REG 349 p.From.Reg = v.Args[0].Reg() 350 p.To.Type = obj.TYPE_CONST 351 p.To.Offset = int64(uint32(v.AuxInt)) 352 case ssa.OpS390XMOVDconst: 353 x := v.Reg() 354 p := s.Prog(v.Op.Asm()) 355 p.From.Type = obj.TYPE_CONST 356 p.From.Offset = v.AuxInt 357 p.To.Type = obj.TYPE_REG 358 p.To.Reg = x 359 case ssa.OpS390XFMOVSconst, ssa.OpS390XFMOVDconst: 360 x := v.Reg() 361 p := s.Prog(v.Op.Asm()) 362 p.From.Type = obj.TYPE_FCONST 363 p.From.Val = math.Float64frombits(uint64(v.AuxInt)) 364 p.To.Type = obj.TYPE_REG 365 p.To.Reg = x 366 case ssa.OpS390XADDWload, ssa.OpS390XADDload, 367 ssa.OpS390XMULLWload, ssa.OpS390XMULLDload, 368 ssa.OpS390XSUBWload, ssa.OpS390XSUBload, 369 ssa.OpS390XANDWload, ssa.OpS390XANDload, 370 ssa.OpS390XORWload, ssa.OpS390XORload, 371 ssa.OpS390XXORWload, ssa.OpS390XXORload: 372 r := v.Reg() 373 if r != v.Args[0].Reg() { 374 v.Fatalf("input[0] and output not in same register %s", v.LongString()) 375 } 376 p := s.Prog(v.Op.Asm()) 377 p.From.Type = obj.TYPE_MEM 378 p.From.Reg = v.Args[1].Reg() 379 gc.AddAux(&p.From, v) 380 p.To.Type = obj.TYPE_REG 381 p.To.Reg = r 382 case ssa.OpS390XMOVDload, 383 ssa.OpS390XMOVWZload, ssa.OpS390XMOVHZload, ssa.OpS390XMOVBZload, 384 ssa.OpS390XMOVDBRload, ssa.OpS390XMOVWBRload, ssa.OpS390XMOVHBRload, 385 ssa.OpS390XMOVBload, ssa.OpS390XMOVHload, ssa.OpS390XMOVWload, 386 ssa.OpS390XFMOVSload, ssa.OpS390XFMOVDload: 387 p := s.Prog(v.Op.Asm()) 388 p.From.Type = obj.TYPE_MEM 389 p.From.Reg = v.Args[0].Reg() 390 gc.AddAux(&p.From, v) 391 p.To.Type = obj.TYPE_REG 392 p.To.Reg = v.Reg() 393 case ssa.OpS390XMOVBZloadidx, ssa.OpS390XMOVHZloadidx, ssa.OpS390XMOVWZloadidx, 394 ssa.OpS390XMOVBloadidx, ssa.OpS390XMOVHloadidx, ssa.OpS390XMOVWloadidx, ssa.OpS390XMOVDloadidx, 395 ssa.OpS390XMOVHBRloadidx, ssa.OpS390XMOVWBRloadidx, ssa.OpS390XMOVDBRloadidx, 396 ssa.OpS390XFMOVSloadidx, ssa.OpS390XFMOVDloadidx: 397 r := v.Args[0].Reg() 398 i := v.Args[1].Reg() 399 if i == s390x.REGSP { 400 r, i = i, r 401 } 402 p := s.Prog(v.Op.Asm()) 403 p.From.Type = obj.TYPE_MEM 404 p.From.Reg = r 405 p.From.Scale = 1 406 p.From.Index = i 407 gc.AddAux(&p.From, v) 408 p.To.Type = obj.TYPE_REG 409 p.To.Reg = v.Reg() 410 case ssa.OpS390XMOVBstore, ssa.OpS390XMOVHstore, ssa.OpS390XMOVWstore, ssa.OpS390XMOVDstore, 411 ssa.OpS390XMOVHBRstore, ssa.OpS390XMOVWBRstore, ssa.OpS390XMOVDBRstore, 412 ssa.OpS390XFMOVSstore, ssa.OpS390XFMOVDstore: 413 p := s.Prog(v.Op.Asm()) 414 p.From.Type = obj.TYPE_REG 415 p.From.Reg = v.Args[1].Reg() 416 p.To.Type = obj.TYPE_MEM 417 p.To.Reg = v.Args[0].Reg() 418 gc.AddAux(&p.To, v) 419 case ssa.OpS390XMOVBstoreidx, ssa.OpS390XMOVHstoreidx, ssa.OpS390XMOVWstoreidx, ssa.OpS390XMOVDstoreidx, 420 ssa.OpS390XMOVHBRstoreidx, ssa.OpS390XMOVWBRstoreidx, ssa.OpS390XMOVDBRstoreidx, 421 ssa.OpS390XFMOVSstoreidx, ssa.OpS390XFMOVDstoreidx: 422 r := v.Args[0].Reg() 423 i := v.Args[1].Reg() 424 if i == s390x.REGSP { 425 r, i = i, r 426 } 427 p := s.Prog(v.Op.Asm()) 428 p.From.Type = obj.TYPE_REG 429 p.From.Reg = v.Args[2].Reg() 430 p.To.Type = obj.TYPE_MEM 431 p.To.Reg = r 432 p.To.Scale = 1 433 p.To.Index = i 434 gc.AddAux(&p.To, v) 435 case ssa.OpS390XMOVDstoreconst, ssa.OpS390XMOVWstoreconst, ssa.OpS390XMOVHstoreconst, ssa.OpS390XMOVBstoreconst: 436 p := s.Prog(v.Op.Asm()) 437 p.From.Type = obj.TYPE_CONST 438 sc := v.AuxValAndOff() 439 p.From.Offset = sc.Val() 440 p.To.Type = obj.TYPE_MEM 441 p.To.Reg = v.Args[0].Reg() 442 gc.AddAux2(&p.To, v, sc.Off()) 443 case ssa.OpS390XMOVBreg, ssa.OpS390XMOVHreg, ssa.OpS390XMOVWreg, 444 ssa.OpS390XMOVBZreg, ssa.OpS390XMOVHZreg, ssa.OpS390XMOVWZreg, 445 ssa.OpS390XLDGR, ssa.OpS390XLGDR, 446 ssa.OpS390XCEFBRA, ssa.OpS390XCDFBRA, ssa.OpS390XCEGBRA, ssa.OpS390XCDGBRA, 447 ssa.OpS390XCFEBRA, ssa.OpS390XCFDBRA, ssa.OpS390XCGEBRA, ssa.OpS390XCGDBRA, 448 ssa.OpS390XLDEBR, ssa.OpS390XLEDBR, 449 ssa.OpS390XFNEG, ssa.OpS390XFNEGS, 450 ssa.OpS390XLPDFR, ssa.OpS390XLNDFR: 451 opregreg(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg()) 452 case ssa.OpS390XCLEAR: 453 p := s.Prog(v.Op.Asm()) 454 p.From.Type = obj.TYPE_CONST 455 sc := v.AuxValAndOff() 456 p.From.Offset = sc.Val() 457 p.To.Type = obj.TYPE_MEM 458 p.To.Reg = v.Args[0].Reg() 459 gc.AddAux2(&p.To, v, sc.Off()) 460 case ssa.OpCopy, ssa.OpS390XMOVDconvert, ssa.OpS390XMOVDreg: 461 if v.Type.IsMemory() { 462 return 463 } 464 x := v.Args[0].Reg() 465 y := v.Reg() 466 if x != y { 467 opregreg(s, moveByType(v.Type), y, x) 468 } 469 case ssa.OpS390XMOVDnop: 470 if v.Reg() != v.Args[0].Reg() { 471 v.Fatalf("input[0] and output not in same register %s", v.LongString()) 472 } 473 // nothing to do 474 case ssa.OpLoadReg: 475 if v.Type.IsFlags() { 476 v.Fatalf("load flags not implemented: %v", v.LongString()) 477 return 478 } 479 p := s.Prog(loadByType(v.Type)) 480 gc.AddrAuto(&p.From, v.Args[0]) 481 p.To.Type = obj.TYPE_REG 482 p.To.Reg = v.Reg() 483 case ssa.OpStoreReg: 484 if v.Type.IsFlags() { 485 v.Fatalf("store flags not implemented: %v", v.LongString()) 486 return 487 } 488 p := s.Prog(storeByType(v.Type)) 489 p.From.Type = obj.TYPE_REG 490 p.From.Reg = v.Args[0].Reg() 491 gc.AddrAuto(&p.To, v) 492 case ssa.OpS390XLoweredGetClosurePtr: 493 // Closure pointer is R12 (already) 494 gc.CheckLoweredGetClosurePtr(v) 495 case ssa.OpS390XLoweredRound32F, ssa.OpS390XLoweredRound64F: 496 // input is already rounded 497 case ssa.OpS390XLoweredGetG: 498 r := v.Reg() 499 p := s.Prog(s390x.AMOVD) 500 p.From.Type = obj.TYPE_REG 501 p.From.Reg = s390x.REGG 502 p.To.Type = obj.TYPE_REG 503 p.To.Reg = r 504 case ssa.OpS390XLoweredGetCallerSP: 505 // caller's SP is FixedFrameSize below the address of the first arg 506 p := s.Prog(s390x.AMOVD) 507 p.From.Type = obj.TYPE_ADDR 508 p.From.Offset = -gc.Ctxt.FixedFrameSize() 509 p.From.Name = obj.NAME_PARAM 510 p.To.Type = obj.TYPE_REG 511 p.To.Reg = v.Reg() 512 case ssa.OpS390XCALLstatic, ssa.OpS390XCALLclosure, ssa.OpS390XCALLinter: 513 s.Call(v) 514 case ssa.OpS390XLoweredWB: 515 p := s.Prog(obj.ACALL) 516 p.To.Type = obj.TYPE_MEM 517 p.To.Name = obj.NAME_EXTERN 518 p.To.Sym = v.Aux.(*obj.LSym) 519 case ssa.OpS390XFLOGR, ssa.OpS390XNEG, ssa.OpS390XNEGW, 520 ssa.OpS390XMOVWBR, ssa.OpS390XMOVDBR: 521 p := s.Prog(v.Op.Asm()) 522 p.From.Type = obj.TYPE_REG 523 p.From.Reg = v.Args[0].Reg() 524 p.To.Type = obj.TYPE_REG 525 p.To.Reg = v.Reg() 526 case ssa.OpS390XNOT, ssa.OpS390XNOTW: 527 v.Fatalf("NOT/NOTW generated %s", v.LongString()) 528 case ssa.OpS390XMOVDEQ, ssa.OpS390XMOVDNE, 529 ssa.OpS390XMOVDLT, ssa.OpS390XMOVDLE, 530 ssa.OpS390XMOVDGT, ssa.OpS390XMOVDGE, 531 ssa.OpS390XMOVDGTnoinv, ssa.OpS390XMOVDGEnoinv: 532 r := v.Reg() 533 if r != v.Args[0].Reg() { 534 v.Fatalf("input[0] and output not in same register %s", v.LongString()) 535 } 536 p := s.Prog(v.Op.Asm()) 537 p.From.Type = obj.TYPE_REG 538 p.From.Reg = v.Args[1].Reg() 539 p.To.Type = obj.TYPE_REG 540 p.To.Reg = r 541 case ssa.OpS390XFSQRT: 542 p := s.Prog(v.Op.Asm()) 543 p.From.Type = obj.TYPE_REG 544 p.From.Reg = v.Args[0].Reg() 545 p.To.Type = obj.TYPE_REG 546 p.To.Reg = v.Reg() 547 case ssa.OpS390XInvertFlags: 548 v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString()) 549 case ssa.OpS390XFlagEQ, ssa.OpS390XFlagLT, ssa.OpS390XFlagGT: 550 v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString()) 551 case ssa.OpS390XAddTupleFirst32, ssa.OpS390XAddTupleFirst64: 552 v.Fatalf("AddTupleFirst* should never make it to codegen %v", v.LongString()) 553 case ssa.OpS390XLoweredNilCheck: 554 // Issue a load which will fault if the input is nil. 555 p := s.Prog(s390x.AMOVBZ) 556 p.From.Type = obj.TYPE_MEM 557 p.From.Reg = v.Args[0].Reg() 558 gc.AddAux(&p.From, v) 559 p.To.Type = obj.TYPE_REG 560 p.To.Reg = s390x.REGTMP 561 if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers 562 gc.Warnl(v.Pos, "generated nil check") 563 } 564 case ssa.OpS390XMVC: 565 vo := v.AuxValAndOff() 566 p := s.Prog(s390x.AMVC) 567 p.From.Type = obj.TYPE_CONST 568 p.From.Offset = vo.Val() 569 p.SetFrom3(obj.Addr{ 570 Type: obj.TYPE_MEM, 571 Reg: v.Args[1].Reg(), 572 Offset: vo.Off(), 573 }) 574 p.To.Type = obj.TYPE_MEM 575 p.To.Reg = v.Args[0].Reg() 576 p.To.Offset = vo.Off() 577 case ssa.OpS390XSTMG2, ssa.OpS390XSTMG3, ssa.OpS390XSTMG4, 578 ssa.OpS390XSTM2, ssa.OpS390XSTM3, ssa.OpS390XSTM4: 579 for i := 2; i < len(v.Args)-1; i++ { 580 if v.Args[i].Reg() != v.Args[i-1].Reg()+1 { 581 v.Fatalf("invalid store multiple %s", v.LongString()) 582 } 583 } 584 p := s.Prog(v.Op.Asm()) 585 p.From.Type = obj.TYPE_REG 586 p.From.Reg = v.Args[1].Reg() 587 p.Reg = v.Args[len(v.Args)-2].Reg() 588 p.To.Type = obj.TYPE_MEM 589 p.To.Reg = v.Args[0].Reg() 590 gc.AddAux(&p.To, v) 591 case ssa.OpS390XLoweredMove: 592 // Inputs must be valid pointers to memory, 593 // so adjust arg0 and arg1 as part of the expansion. 594 // arg2 should be src+size, 595 // 596 // mvc: MVC $256, 0(R2), 0(R1) 597 // MOVD $256(R1), R1 598 // MOVD $256(R2), R2 599 // CMP R2, Rarg2 600 // BNE mvc 601 // MVC $rem, 0(R2), 0(R1) // if rem > 0 602 // arg2 is the last address to move in the loop + 256 603 mvc := s.Prog(s390x.AMVC) 604 mvc.From.Type = obj.TYPE_CONST 605 mvc.From.Offset = 256 606 mvc.SetFrom3(obj.Addr{Type: obj.TYPE_MEM, Reg: v.Args[1].Reg()}) 607 mvc.To.Type = obj.TYPE_MEM 608 mvc.To.Reg = v.Args[0].Reg() 609 610 for i := 0; i < 2; i++ { 611 movd := s.Prog(s390x.AMOVD) 612 movd.From.Type = obj.TYPE_ADDR 613 movd.From.Reg = v.Args[i].Reg() 614 movd.From.Offset = 256 615 movd.To.Type = obj.TYPE_REG 616 movd.To.Reg = v.Args[i].Reg() 617 } 618 619 cmpu := s.Prog(s390x.ACMPU) 620 cmpu.From.Reg = v.Args[1].Reg() 621 cmpu.From.Type = obj.TYPE_REG 622 cmpu.To.Reg = v.Args[2].Reg() 623 cmpu.To.Type = obj.TYPE_REG 624 625 bne := s.Prog(s390x.ABLT) 626 bne.To.Type = obj.TYPE_BRANCH 627 gc.Patch(bne, mvc) 628 629 if v.AuxInt > 0 { 630 mvc := s.Prog(s390x.AMVC) 631 mvc.From.Type = obj.TYPE_CONST 632 mvc.From.Offset = v.AuxInt 633 mvc.SetFrom3(obj.Addr{Type: obj.TYPE_MEM, Reg: v.Args[1].Reg()}) 634 mvc.To.Type = obj.TYPE_MEM 635 mvc.To.Reg = v.Args[0].Reg() 636 } 637 case ssa.OpS390XLoweredZero: 638 // Input must be valid pointers to memory, 639 // so adjust arg0 as part of the expansion. 640 // arg1 should be src+size, 641 // 642 // clear: CLEAR $256, 0(R1) 643 // MOVD $256(R1), R1 644 // CMP R1, Rarg1 645 // BNE clear 646 // CLEAR $rem, 0(R1) // if rem > 0 647 // arg1 is the last address to zero in the loop + 256 648 clear := s.Prog(s390x.ACLEAR) 649 clear.From.Type = obj.TYPE_CONST 650 clear.From.Offset = 256 651 clear.To.Type = obj.TYPE_MEM 652 clear.To.Reg = v.Args[0].Reg() 653 654 movd := s.Prog(s390x.AMOVD) 655 movd.From.Type = obj.TYPE_ADDR 656 movd.From.Reg = v.Args[0].Reg() 657 movd.From.Offset = 256 658 movd.To.Type = obj.TYPE_REG 659 movd.To.Reg = v.Args[0].Reg() 660 661 cmpu := s.Prog(s390x.ACMPU) 662 cmpu.From.Reg = v.Args[0].Reg() 663 cmpu.From.Type = obj.TYPE_REG 664 cmpu.To.Reg = v.Args[1].Reg() 665 cmpu.To.Type = obj.TYPE_REG 666 667 bne := s.Prog(s390x.ABLT) 668 bne.To.Type = obj.TYPE_BRANCH 669 gc.Patch(bne, clear) 670 671 if v.AuxInt > 0 { 672 clear := s.Prog(s390x.ACLEAR) 673 clear.From.Type = obj.TYPE_CONST 674 clear.From.Offset = v.AuxInt 675 clear.To.Type = obj.TYPE_MEM 676 clear.To.Reg = v.Args[0].Reg() 677 } 678 case ssa.OpS390XMOVWZatomicload, ssa.OpS390XMOVDatomicload: 679 p := s.Prog(v.Op.Asm()) 680 p.From.Type = obj.TYPE_MEM 681 p.From.Reg = v.Args[0].Reg() 682 gc.AddAux(&p.From, v) 683 p.To.Type = obj.TYPE_REG 684 p.To.Reg = v.Reg0() 685 case ssa.OpS390XMOVWatomicstore, ssa.OpS390XMOVDatomicstore: 686 p := s.Prog(v.Op.Asm()) 687 p.From.Type = obj.TYPE_REG 688 p.From.Reg = v.Args[1].Reg() 689 p.To.Type = obj.TYPE_MEM 690 p.To.Reg = v.Args[0].Reg() 691 gc.AddAux(&p.To, v) 692 case ssa.OpS390XLAA, ssa.OpS390XLAAG: 693 p := s.Prog(v.Op.Asm()) 694 p.Reg = v.Reg0() 695 p.From.Type = obj.TYPE_REG 696 p.From.Reg = v.Args[1].Reg() 697 p.To.Type = obj.TYPE_MEM 698 p.To.Reg = v.Args[0].Reg() 699 gc.AddAux(&p.To, v) 700 case ssa.OpS390XLoweredAtomicCas32, ssa.OpS390XLoweredAtomicCas64: 701 // Convert the flags output of CS{,G} into a bool. 702 // CS{,G} arg1, arg2, arg0 703 // MOVD $0, ret 704 // BNE 2(PC) 705 // MOVD $1, ret 706 // NOP (so the BNE has somewhere to land) 707 708 // CS{,G} arg1, arg2, arg0 709 cs := s.Prog(v.Op.Asm()) 710 cs.From.Type = obj.TYPE_REG 711 cs.From.Reg = v.Args[1].Reg() // old 712 cs.Reg = v.Args[2].Reg() // new 713 cs.To.Type = obj.TYPE_MEM 714 cs.To.Reg = v.Args[0].Reg() 715 gc.AddAux(&cs.To, v) 716 717 // MOVD $0, ret 718 movd := s.Prog(s390x.AMOVD) 719 movd.From.Type = obj.TYPE_CONST 720 movd.From.Offset = 0 721 movd.To.Type = obj.TYPE_REG 722 movd.To.Reg = v.Reg0() 723 724 // BNE 2(PC) 725 bne := s.Prog(s390x.ABNE) 726 bne.To.Type = obj.TYPE_BRANCH 727 728 // MOVD $1, ret 729 movd = s.Prog(s390x.AMOVD) 730 movd.From.Type = obj.TYPE_CONST 731 movd.From.Offset = 1 732 movd.To.Type = obj.TYPE_REG 733 movd.To.Reg = v.Reg0() 734 735 // NOP (so the BNE has somewhere to land) 736 nop := s.Prog(obj.ANOP) 737 gc.Patch(bne, nop) 738 case ssa.OpS390XLoweredAtomicExchange32, ssa.OpS390XLoweredAtomicExchange64: 739 // Loop until the CS{,G} succeeds. 740 // MOV{WZ,D} arg0, ret 741 // cs: CS{,G} ret, arg1, arg0 742 // BNE cs 743 744 // MOV{WZ,D} arg0, ret 745 load := s.Prog(loadByType(v.Type.FieldType(0))) 746 load.From.Type = obj.TYPE_MEM 747 load.From.Reg = v.Args[0].Reg() 748 load.To.Type = obj.TYPE_REG 749 load.To.Reg = v.Reg0() 750 gc.AddAux(&load.From, v) 751 752 // CS{,G} ret, arg1, arg0 753 cs := s.Prog(v.Op.Asm()) 754 cs.From.Type = obj.TYPE_REG 755 cs.From.Reg = v.Reg0() // old 756 cs.Reg = v.Args[1].Reg() // new 757 cs.To.Type = obj.TYPE_MEM 758 cs.To.Reg = v.Args[0].Reg() 759 gc.AddAux(&cs.To, v) 760 761 // BNE cs 762 bne := s.Prog(s390x.ABNE) 763 bne.To.Type = obj.TYPE_BRANCH 764 gc.Patch(bne, cs) 765 case ssa.OpClobber: 766 // TODO: implement for clobberdead experiment. Nop is ok for now. 767 default: 768 v.Fatalf("genValue not implemented: %s", v.LongString()) 769 } 770 } 771 772 var blockJump = [...]struct { 773 asm, invasm obj.As 774 }{ 775 ssa.BlockS390XEQ: {s390x.ABEQ, s390x.ABNE}, 776 ssa.BlockS390XNE: {s390x.ABNE, s390x.ABEQ}, 777 ssa.BlockS390XLT: {s390x.ABLT, s390x.ABGE}, 778 ssa.BlockS390XGE: {s390x.ABGE, s390x.ABLT}, 779 ssa.BlockS390XLE: {s390x.ABLE, s390x.ABGT}, 780 ssa.BlockS390XGT: {s390x.ABGT, s390x.ABLE}, 781 ssa.BlockS390XGTF: {s390x.ABGT, s390x.ABLEU}, 782 ssa.BlockS390XGEF: {s390x.ABGE, s390x.ABLTU}, 783 } 784 785 func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { 786 switch b.Kind { 787 case ssa.BlockPlain: 788 if b.Succs[0].Block() != next { 789 p := s.Prog(s390x.ABR) 790 p.To.Type = obj.TYPE_BRANCH 791 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) 792 } 793 case ssa.BlockDefer: 794 // defer returns in R3: 795 // 0 if we should continue executing 796 // 1 if we should jump to deferreturn call 797 p := s.Prog(s390x.ACMPW) 798 p.From.Type = obj.TYPE_REG 799 p.From.Reg = s390x.REG_R3 800 p.To.Type = obj.TYPE_CONST 801 p.To.Offset = 0 802 p = s.Prog(s390x.ABNE) 803 p.To.Type = obj.TYPE_BRANCH 804 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()}) 805 if b.Succs[0].Block() != next { 806 p := s.Prog(s390x.ABR) 807 p.To.Type = obj.TYPE_BRANCH 808 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) 809 } 810 case ssa.BlockExit: 811 s.Prog(obj.AUNDEF) // tell plive.go that we never reach here 812 case ssa.BlockRet: 813 s.Prog(obj.ARET) 814 case ssa.BlockRetJmp: 815 p := s.Prog(s390x.ABR) 816 p.To.Type = obj.TYPE_MEM 817 p.To.Name = obj.NAME_EXTERN 818 p.To.Sym = b.Aux.(*obj.LSym) 819 case ssa.BlockS390XEQ, ssa.BlockS390XNE, 820 ssa.BlockS390XLT, ssa.BlockS390XGE, 821 ssa.BlockS390XLE, ssa.BlockS390XGT, 822 ssa.BlockS390XGEF, ssa.BlockS390XGTF: 823 jmp := blockJump[b.Kind] 824 var p *obj.Prog 825 switch next { 826 case b.Succs[0].Block(): 827 p = s.Prog(jmp.invasm) 828 p.To.Type = obj.TYPE_BRANCH 829 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()}) 830 case b.Succs[1].Block(): 831 p = s.Prog(jmp.asm) 832 p.To.Type = obj.TYPE_BRANCH 833 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) 834 default: 835 p = s.Prog(jmp.asm) 836 p.To.Type = obj.TYPE_BRANCH 837 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) 838 q := s.Prog(s390x.ABR) 839 q.To.Type = obj.TYPE_BRANCH 840 s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()}) 841 } 842 default: 843 b.Fatalf("branch not implemented: %s. Control: %s", b.LongString(), b.Control.LongString()) 844 } 845 }