github.com/goproxy0/go@v0.0.0-20171111080102-49cc0c489d2c/src/cmd/compile/internal/s390x/ssa.go (about) 1 // Copyright 2016 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package s390x 6 7 import ( 8 "math" 9 10 "cmd/compile/internal/gc" 11 "cmd/compile/internal/ssa" 12 "cmd/compile/internal/types" 13 "cmd/internal/obj" 14 "cmd/internal/obj/s390x" 15 ) 16 17 // markMoves marks any MOVXconst ops that need to avoid clobbering flags. 18 func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) { 19 flive := b.FlagsLiveAtEnd 20 if b.Control != nil && b.Control.Type.IsFlags() { 21 flive = true 22 } 23 for i := len(b.Values) - 1; i >= 0; i-- { 24 v := b.Values[i] 25 if flive && v.Op == ssa.OpS390XMOVDconst { 26 // The "mark" is any non-nil Aux value. 27 v.Aux = v 28 } 29 if v.Type.IsFlags() { 30 flive = false 31 } 32 for _, a := range v.Args { 33 if a.Type.IsFlags() { 34 flive = true 35 } 36 } 37 } 38 } 39 40 // loadByType returns the load instruction of the given type. 41 func loadByType(t *types.Type) obj.As { 42 if t.IsFloat() { 43 switch t.Size() { 44 case 4: 45 return s390x.AFMOVS 46 case 8: 47 return s390x.AFMOVD 48 } 49 } else { 50 switch t.Size() { 51 case 1: 52 if t.IsSigned() { 53 return s390x.AMOVB 54 } else { 55 return s390x.AMOVBZ 56 } 57 case 2: 58 if t.IsSigned() { 59 return s390x.AMOVH 60 } else { 61 return s390x.AMOVHZ 62 } 63 case 4: 64 if t.IsSigned() { 65 return s390x.AMOVW 66 } else { 67 return s390x.AMOVWZ 68 } 69 case 8: 70 return s390x.AMOVD 71 } 72 } 73 panic("bad load type") 74 } 75 76 // storeByType returns the store instruction of the given type. 77 func storeByType(t *types.Type) obj.As { 78 width := t.Size() 79 if t.IsFloat() { 80 switch width { 81 case 4: 82 return s390x.AFMOVS 83 case 8: 84 return s390x.AFMOVD 85 } 86 } else { 87 switch width { 88 case 1: 89 return s390x.AMOVB 90 case 2: 91 return s390x.AMOVH 92 case 4: 93 return s390x.AMOVW 94 case 8: 95 return s390x.AMOVD 96 } 97 } 98 panic("bad store type") 99 } 100 101 // moveByType returns the reg->reg move instruction of the given type. 102 func moveByType(t *types.Type) obj.As { 103 if t.IsFloat() { 104 return s390x.AFMOVD 105 } else { 106 switch t.Size() { 107 case 1: 108 if t.IsSigned() { 109 return s390x.AMOVB 110 } else { 111 return s390x.AMOVBZ 112 } 113 case 2: 114 if t.IsSigned() { 115 return s390x.AMOVH 116 } else { 117 return s390x.AMOVHZ 118 } 119 case 4: 120 if t.IsSigned() { 121 return s390x.AMOVW 122 } else { 123 return s390x.AMOVWZ 124 } 125 case 8: 126 return s390x.AMOVD 127 } 128 } 129 panic("bad load type") 130 } 131 132 // opregreg emits instructions for 133 // dest := dest(To) op src(From) 134 // and also returns the created obj.Prog so it 135 // may be further adjusted (offset, scale, etc). 136 func opregreg(s *gc.SSAGenState, op obj.As, dest, src int16) *obj.Prog { 137 p := s.Prog(op) 138 p.From.Type = obj.TYPE_REG 139 p.To.Type = obj.TYPE_REG 140 p.To.Reg = dest 141 p.From.Reg = src 142 return p 143 } 144 145 // opregregimm emits instructions for 146 // dest := src(From) op off 147 // and also returns the created obj.Prog so it 148 // may be further adjusted (offset, scale, etc). 149 func opregregimm(s *gc.SSAGenState, op obj.As, dest, src int16, off int64) *obj.Prog { 150 p := s.Prog(op) 151 p.From.Type = obj.TYPE_CONST 152 p.From.Offset = off 153 p.Reg = src 154 p.To.Reg = dest 155 p.To.Type = obj.TYPE_REG 156 return p 157 } 158 159 func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { 160 switch v.Op { 161 case ssa.OpS390XSLD, ssa.OpS390XSLW, 162 ssa.OpS390XSRD, ssa.OpS390XSRW, 163 ssa.OpS390XSRAD, ssa.OpS390XSRAW: 164 r := v.Reg() 165 r1 := v.Args[0].Reg() 166 r2 := v.Args[1].Reg() 167 if r2 == s390x.REG_R0 { 168 v.Fatalf("cannot use R0 as shift value %s", v.LongString()) 169 } 170 p := opregreg(s, v.Op.Asm(), r, r2) 171 if r != r1 { 172 p.Reg = r1 173 } 174 case ssa.OpS390XADD, ssa.OpS390XADDW, 175 ssa.OpS390XSUB, ssa.OpS390XSUBW, 176 ssa.OpS390XAND, ssa.OpS390XANDW, 177 ssa.OpS390XOR, ssa.OpS390XORW, 178 ssa.OpS390XXOR, ssa.OpS390XXORW: 179 r := v.Reg() 180 r1 := v.Args[0].Reg() 181 r2 := v.Args[1].Reg() 182 p := opregreg(s, v.Op.Asm(), r, r2) 183 if r != r1 { 184 p.Reg = r1 185 } 186 // 2-address opcode arithmetic 187 case ssa.OpS390XMULLD, ssa.OpS390XMULLW, 188 ssa.OpS390XMULHD, ssa.OpS390XMULHDU, 189 ssa.OpS390XFADDS, ssa.OpS390XFADD, ssa.OpS390XFSUBS, ssa.OpS390XFSUB, 190 ssa.OpS390XFMULS, ssa.OpS390XFMUL, ssa.OpS390XFDIVS, ssa.OpS390XFDIV: 191 r := v.Reg() 192 if r != v.Args[0].Reg() { 193 v.Fatalf("input[0] and output not in same register %s", v.LongString()) 194 } 195 opregreg(s, v.Op.Asm(), r, v.Args[1].Reg()) 196 case ssa.OpS390XFMADD, ssa.OpS390XFMADDS, 197 ssa.OpS390XFMSUB, ssa.OpS390XFMSUBS: 198 r := v.Reg() 199 if r != v.Args[0].Reg() { 200 v.Fatalf("input[0] and output not in same register %s", v.LongString()) 201 } 202 r1 := v.Args[1].Reg() 203 r2 := v.Args[2].Reg() 204 p := s.Prog(v.Op.Asm()) 205 p.From.Type = obj.TYPE_REG 206 p.From.Reg = r1 207 p.Reg = r2 208 p.To.Type = obj.TYPE_REG 209 p.To.Reg = r 210 case ssa.OpS390XFIDBR: 211 switch v.AuxInt { 212 case 0, 1, 3, 4, 5, 6, 7: 213 opregregimm(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg(), v.AuxInt) 214 default: 215 v.Fatalf("invalid FIDBR mask: %v", v.AuxInt) 216 } 217 case ssa.OpS390XCPSDR: 218 p := opregreg(s, v.Op.Asm(), v.Reg(), v.Args[1].Reg()) 219 p.Reg = v.Args[0].Reg() 220 case ssa.OpS390XDIVD, ssa.OpS390XDIVW, 221 ssa.OpS390XDIVDU, ssa.OpS390XDIVWU, 222 ssa.OpS390XMODD, ssa.OpS390XMODW, 223 ssa.OpS390XMODDU, ssa.OpS390XMODWU: 224 225 // TODO(mundaym): use the temp registers every time like x86 does with AX? 226 dividend := v.Args[0].Reg() 227 divisor := v.Args[1].Reg() 228 229 // CPU faults upon signed overflow, which occurs when most 230 // negative int is divided by -1. 231 var j *obj.Prog 232 if v.Op == ssa.OpS390XDIVD || v.Op == ssa.OpS390XDIVW || 233 v.Op == ssa.OpS390XMODD || v.Op == ssa.OpS390XMODW { 234 235 var c *obj.Prog 236 c = s.Prog(s390x.ACMP) 237 j = s.Prog(s390x.ABEQ) 238 239 c.From.Type = obj.TYPE_REG 240 c.From.Reg = divisor 241 c.To.Type = obj.TYPE_CONST 242 c.To.Offset = -1 243 244 j.To.Type = obj.TYPE_BRANCH 245 246 } 247 248 p := s.Prog(v.Op.Asm()) 249 p.From.Type = obj.TYPE_REG 250 p.From.Reg = divisor 251 p.Reg = 0 252 p.To.Type = obj.TYPE_REG 253 p.To.Reg = dividend 254 255 // signed division, rest of the check for -1 case 256 if j != nil { 257 j2 := s.Prog(s390x.ABR) 258 j2.To.Type = obj.TYPE_BRANCH 259 260 var n *obj.Prog 261 if v.Op == ssa.OpS390XDIVD || v.Op == ssa.OpS390XDIVW { 262 // n * -1 = -n 263 n = s.Prog(s390x.ANEG) 264 n.To.Type = obj.TYPE_REG 265 n.To.Reg = dividend 266 } else { 267 // n % -1 == 0 268 n = s.Prog(s390x.AXOR) 269 n.From.Type = obj.TYPE_REG 270 n.From.Reg = dividend 271 n.To.Type = obj.TYPE_REG 272 n.To.Reg = dividend 273 } 274 275 j.To.Val = n 276 j2.To.Val = s.Pc() 277 } 278 case ssa.OpS390XADDconst, ssa.OpS390XADDWconst: 279 opregregimm(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg(), v.AuxInt) 280 case ssa.OpS390XMULLDconst, ssa.OpS390XMULLWconst, 281 ssa.OpS390XSUBconst, ssa.OpS390XSUBWconst, 282 ssa.OpS390XANDconst, ssa.OpS390XANDWconst, 283 ssa.OpS390XORconst, ssa.OpS390XORWconst, 284 ssa.OpS390XXORconst, ssa.OpS390XXORWconst: 285 r := v.Reg() 286 if r != v.Args[0].Reg() { 287 v.Fatalf("input[0] and output not in same register %s", v.LongString()) 288 } 289 p := s.Prog(v.Op.Asm()) 290 p.From.Type = obj.TYPE_CONST 291 p.From.Offset = v.AuxInt 292 p.To.Type = obj.TYPE_REG 293 p.To.Reg = r 294 case ssa.OpS390XSLDconst, ssa.OpS390XSLWconst, 295 ssa.OpS390XSRDconst, ssa.OpS390XSRWconst, 296 ssa.OpS390XSRADconst, ssa.OpS390XSRAWconst, 297 ssa.OpS390XRLLGconst, ssa.OpS390XRLLconst: 298 p := s.Prog(v.Op.Asm()) 299 p.From.Type = obj.TYPE_CONST 300 p.From.Offset = v.AuxInt 301 r := v.Reg() 302 r1 := v.Args[0].Reg() 303 if r != r1 { 304 p.Reg = r1 305 } 306 p.To.Type = obj.TYPE_REG 307 p.To.Reg = r 308 case ssa.OpS390XSUBEcarrymask, ssa.OpS390XSUBEWcarrymask: 309 r := v.Reg() 310 p := s.Prog(v.Op.Asm()) 311 p.From.Type = obj.TYPE_REG 312 p.From.Reg = r 313 p.To.Type = obj.TYPE_REG 314 p.To.Reg = r 315 case ssa.OpS390XMOVDaddridx: 316 r := v.Args[0].Reg() 317 i := v.Args[1].Reg() 318 p := s.Prog(s390x.AMOVD) 319 p.From.Scale = 1 320 if i == s390x.REGSP { 321 r, i = i, r 322 } 323 p.From.Type = obj.TYPE_ADDR 324 p.From.Reg = r 325 p.From.Index = i 326 gc.AddAux(&p.From, v) 327 p.To.Type = obj.TYPE_REG 328 p.To.Reg = v.Reg() 329 case ssa.OpS390XMOVDaddr: 330 p := s.Prog(s390x.AMOVD) 331 p.From.Type = obj.TYPE_ADDR 332 p.From.Reg = v.Args[0].Reg() 333 gc.AddAux(&p.From, v) 334 p.To.Type = obj.TYPE_REG 335 p.To.Reg = v.Reg() 336 case ssa.OpS390XCMP, ssa.OpS390XCMPW, ssa.OpS390XCMPU, ssa.OpS390XCMPWU: 337 opregreg(s, v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg()) 338 case ssa.OpS390XFCMPS, ssa.OpS390XFCMP: 339 opregreg(s, v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg()) 340 case ssa.OpS390XCMPconst, ssa.OpS390XCMPWconst: 341 p := s.Prog(v.Op.Asm()) 342 p.From.Type = obj.TYPE_REG 343 p.From.Reg = v.Args[0].Reg() 344 p.To.Type = obj.TYPE_CONST 345 p.To.Offset = v.AuxInt 346 case ssa.OpS390XCMPUconst, ssa.OpS390XCMPWUconst: 347 p := s.Prog(v.Op.Asm()) 348 p.From.Type = obj.TYPE_REG 349 p.From.Reg = v.Args[0].Reg() 350 p.To.Type = obj.TYPE_CONST 351 p.To.Offset = int64(uint32(v.AuxInt)) 352 case ssa.OpS390XMOVDconst: 353 x := v.Reg() 354 p := s.Prog(v.Op.Asm()) 355 p.From.Type = obj.TYPE_CONST 356 p.From.Offset = v.AuxInt 357 p.To.Type = obj.TYPE_REG 358 p.To.Reg = x 359 case ssa.OpS390XFMOVSconst, ssa.OpS390XFMOVDconst: 360 x := v.Reg() 361 p := s.Prog(v.Op.Asm()) 362 p.From.Type = obj.TYPE_FCONST 363 p.From.Val = math.Float64frombits(uint64(v.AuxInt)) 364 p.To.Type = obj.TYPE_REG 365 p.To.Reg = x 366 case ssa.OpS390XADDWload, ssa.OpS390XADDload, 367 ssa.OpS390XMULLWload, ssa.OpS390XMULLDload, 368 ssa.OpS390XSUBWload, ssa.OpS390XSUBload, 369 ssa.OpS390XANDWload, ssa.OpS390XANDload, 370 ssa.OpS390XORWload, ssa.OpS390XORload, 371 ssa.OpS390XXORWload, ssa.OpS390XXORload: 372 r := v.Reg() 373 if r != v.Args[0].Reg() { 374 v.Fatalf("input[0] and output not in same register %s", v.LongString()) 375 } 376 p := s.Prog(v.Op.Asm()) 377 p.From.Type = obj.TYPE_MEM 378 p.From.Reg = v.Args[1].Reg() 379 gc.AddAux(&p.From, v) 380 p.To.Type = obj.TYPE_REG 381 p.To.Reg = r 382 case ssa.OpS390XMOVDload, 383 ssa.OpS390XMOVWZload, ssa.OpS390XMOVHZload, ssa.OpS390XMOVBZload, 384 ssa.OpS390XMOVDBRload, ssa.OpS390XMOVWBRload, ssa.OpS390XMOVHBRload, 385 ssa.OpS390XMOVBload, ssa.OpS390XMOVHload, ssa.OpS390XMOVWload, 386 ssa.OpS390XFMOVSload, ssa.OpS390XFMOVDload: 387 p := s.Prog(v.Op.Asm()) 388 p.From.Type = obj.TYPE_MEM 389 p.From.Reg = v.Args[0].Reg() 390 gc.AddAux(&p.From, v) 391 p.To.Type = obj.TYPE_REG 392 p.To.Reg = v.Reg() 393 case ssa.OpS390XMOVBZloadidx, ssa.OpS390XMOVHZloadidx, ssa.OpS390XMOVWZloadidx, 394 ssa.OpS390XMOVBloadidx, ssa.OpS390XMOVHloadidx, ssa.OpS390XMOVWloadidx, ssa.OpS390XMOVDloadidx, 395 ssa.OpS390XMOVHBRloadidx, ssa.OpS390XMOVWBRloadidx, ssa.OpS390XMOVDBRloadidx, 396 ssa.OpS390XFMOVSloadidx, ssa.OpS390XFMOVDloadidx: 397 r := v.Args[0].Reg() 398 i := v.Args[1].Reg() 399 if i == s390x.REGSP { 400 r, i = i, r 401 } 402 p := s.Prog(v.Op.Asm()) 403 p.From.Type = obj.TYPE_MEM 404 p.From.Reg = r 405 p.From.Scale = 1 406 p.From.Index = i 407 gc.AddAux(&p.From, v) 408 p.To.Type = obj.TYPE_REG 409 p.To.Reg = v.Reg() 410 case ssa.OpS390XMOVBstore, ssa.OpS390XMOVHstore, ssa.OpS390XMOVWstore, ssa.OpS390XMOVDstore, 411 ssa.OpS390XMOVHBRstore, ssa.OpS390XMOVWBRstore, ssa.OpS390XMOVDBRstore, 412 ssa.OpS390XFMOVSstore, ssa.OpS390XFMOVDstore: 413 p := s.Prog(v.Op.Asm()) 414 p.From.Type = obj.TYPE_REG 415 p.From.Reg = v.Args[1].Reg() 416 p.To.Type = obj.TYPE_MEM 417 p.To.Reg = v.Args[0].Reg() 418 gc.AddAux(&p.To, v) 419 case ssa.OpS390XMOVBstoreidx, ssa.OpS390XMOVHstoreidx, ssa.OpS390XMOVWstoreidx, ssa.OpS390XMOVDstoreidx, 420 ssa.OpS390XMOVHBRstoreidx, ssa.OpS390XMOVWBRstoreidx, ssa.OpS390XMOVDBRstoreidx, 421 ssa.OpS390XFMOVSstoreidx, ssa.OpS390XFMOVDstoreidx: 422 r := v.Args[0].Reg() 423 i := v.Args[1].Reg() 424 if i == s390x.REGSP { 425 r, i = i, r 426 } 427 p := s.Prog(v.Op.Asm()) 428 p.From.Type = obj.TYPE_REG 429 p.From.Reg = v.Args[2].Reg() 430 p.To.Type = obj.TYPE_MEM 431 p.To.Reg = r 432 p.To.Scale = 1 433 p.To.Index = i 434 gc.AddAux(&p.To, v) 435 case ssa.OpS390XMOVDstoreconst, ssa.OpS390XMOVWstoreconst, ssa.OpS390XMOVHstoreconst, ssa.OpS390XMOVBstoreconst: 436 p := s.Prog(v.Op.Asm()) 437 p.From.Type = obj.TYPE_CONST 438 sc := v.AuxValAndOff() 439 p.From.Offset = sc.Val() 440 p.To.Type = obj.TYPE_MEM 441 p.To.Reg = v.Args[0].Reg() 442 gc.AddAux2(&p.To, v, sc.Off()) 443 case ssa.OpS390XMOVBreg, ssa.OpS390XMOVHreg, ssa.OpS390XMOVWreg, 444 ssa.OpS390XMOVBZreg, ssa.OpS390XMOVHZreg, ssa.OpS390XMOVWZreg, 445 ssa.OpS390XLDGR, ssa.OpS390XLGDR, 446 ssa.OpS390XCEFBRA, ssa.OpS390XCDFBRA, ssa.OpS390XCEGBRA, ssa.OpS390XCDGBRA, 447 ssa.OpS390XCFEBRA, ssa.OpS390XCFDBRA, ssa.OpS390XCGEBRA, ssa.OpS390XCGDBRA, 448 ssa.OpS390XLDEBR, ssa.OpS390XLEDBR, 449 ssa.OpS390XFNEG, ssa.OpS390XFNEGS, 450 ssa.OpS390XLPDFR, ssa.OpS390XLNDFR: 451 opregreg(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg()) 452 case ssa.OpS390XCLEAR: 453 p := s.Prog(v.Op.Asm()) 454 p.From.Type = obj.TYPE_CONST 455 sc := v.AuxValAndOff() 456 p.From.Offset = sc.Val() 457 p.To.Type = obj.TYPE_MEM 458 p.To.Reg = v.Args[0].Reg() 459 gc.AddAux2(&p.To, v, sc.Off()) 460 case ssa.OpCopy, ssa.OpS390XMOVDconvert, ssa.OpS390XMOVDreg: 461 if v.Type.IsMemory() { 462 return 463 } 464 x := v.Args[0].Reg() 465 y := v.Reg() 466 if x != y { 467 opregreg(s, moveByType(v.Type), y, x) 468 } 469 case ssa.OpS390XMOVDnop: 470 if v.Reg() != v.Args[0].Reg() { 471 v.Fatalf("input[0] and output not in same register %s", v.LongString()) 472 } 473 // nothing to do 474 case ssa.OpLoadReg: 475 if v.Type.IsFlags() { 476 v.Fatalf("load flags not implemented: %v", v.LongString()) 477 return 478 } 479 p := s.Prog(loadByType(v.Type)) 480 gc.AddrAuto(&p.From, v.Args[0]) 481 p.To.Type = obj.TYPE_REG 482 p.To.Reg = v.Reg() 483 case ssa.OpStoreReg: 484 if v.Type.IsFlags() { 485 v.Fatalf("store flags not implemented: %v", v.LongString()) 486 return 487 } 488 p := s.Prog(storeByType(v.Type)) 489 p.From.Type = obj.TYPE_REG 490 p.From.Reg = v.Args[0].Reg() 491 gc.AddrAuto(&p.To, v) 492 case ssa.OpS390XLoweredGetClosurePtr: 493 // Closure pointer is R12 (already) 494 gc.CheckLoweredGetClosurePtr(v) 495 case ssa.OpS390XLoweredRound32F, ssa.OpS390XLoweredRound64F: 496 // input is already rounded 497 case ssa.OpS390XLoweredGetG: 498 r := v.Reg() 499 p := s.Prog(s390x.AMOVD) 500 p.From.Type = obj.TYPE_REG 501 p.From.Reg = s390x.REGG 502 p.To.Type = obj.TYPE_REG 503 p.To.Reg = r 504 case ssa.OpS390XLoweredGetCallerSP: 505 // caller's SP is FixedFrameSize below the address of the first arg 506 p := s.Prog(s390x.AMOVD) 507 p.From.Type = obj.TYPE_ADDR 508 p.From.Offset = -gc.Ctxt.FixedFrameSize() 509 p.From.Name = obj.NAME_PARAM 510 p.To.Type = obj.TYPE_REG 511 p.To.Reg = v.Reg() 512 case ssa.OpS390XCALLstatic, ssa.OpS390XCALLclosure, ssa.OpS390XCALLinter: 513 s.Call(v) 514 case ssa.OpS390XFLOGR, ssa.OpS390XNEG, ssa.OpS390XNEGW, 515 ssa.OpS390XMOVWBR, ssa.OpS390XMOVDBR: 516 p := s.Prog(v.Op.Asm()) 517 p.From.Type = obj.TYPE_REG 518 p.From.Reg = v.Args[0].Reg() 519 p.To.Type = obj.TYPE_REG 520 p.To.Reg = v.Reg() 521 case ssa.OpS390XNOT, ssa.OpS390XNOTW: 522 v.Fatalf("NOT/NOTW generated %s", v.LongString()) 523 case ssa.OpS390XMOVDEQ, ssa.OpS390XMOVDNE, 524 ssa.OpS390XMOVDLT, ssa.OpS390XMOVDLE, 525 ssa.OpS390XMOVDGT, ssa.OpS390XMOVDGE, 526 ssa.OpS390XMOVDGTnoinv, ssa.OpS390XMOVDGEnoinv: 527 r := v.Reg() 528 if r != v.Args[0].Reg() { 529 v.Fatalf("input[0] and output not in same register %s", v.LongString()) 530 } 531 p := s.Prog(v.Op.Asm()) 532 p.From.Type = obj.TYPE_REG 533 p.From.Reg = v.Args[1].Reg() 534 p.To.Type = obj.TYPE_REG 535 p.To.Reg = r 536 case ssa.OpS390XFSQRT: 537 p := s.Prog(v.Op.Asm()) 538 p.From.Type = obj.TYPE_REG 539 p.From.Reg = v.Args[0].Reg() 540 p.To.Type = obj.TYPE_REG 541 p.To.Reg = v.Reg() 542 case ssa.OpS390XInvertFlags: 543 v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString()) 544 case ssa.OpS390XFlagEQ, ssa.OpS390XFlagLT, ssa.OpS390XFlagGT: 545 v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString()) 546 case ssa.OpS390XAddTupleFirst32, ssa.OpS390XAddTupleFirst64: 547 v.Fatalf("AddTupleFirst* should never make it to codegen %v", v.LongString()) 548 case ssa.OpS390XLoweredNilCheck: 549 // Issue a load which will fault if the input is nil. 550 p := s.Prog(s390x.AMOVBZ) 551 p.From.Type = obj.TYPE_MEM 552 p.From.Reg = v.Args[0].Reg() 553 gc.AddAux(&p.From, v) 554 p.To.Type = obj.TYPE_REG 555 p.To.Reg = s390x.REGTMP 556 if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers 557 gc.Warnl(v.Pos, "generated nil check") 558 } 559 case ssa.OpS390XMVC: 560 vo := v.AuxValAndOff() 561 p := s.Prog(s390x.AMVC) 562 p.From.Type = obj.TYPE_CONST 563 p.From.Offset = vo.Val() 564 p.SetFrom3(obj.Addr{ 565 Type: obj.TYPE_MEM, 566 Reg: v.Args[1].Reg(), 567 Offset: vo.Off(), 568 }) 569 p.To.Type = obj.TYPE_MEM 570 p.To.Reg = v.Args[0].Reg() 571 p.To.Offset = vo.Off() 572 case ssa.OpS390XSTMG2, ssa.OpS390XSTMG3, ssa.OpS390XSTMG4, 573 ssa.OpS390XSTM2, ssa.OpS390XSTM3, ssa.OpS390XSTM4: 574 for i := 2; i < len(v.Args)-1; i++ { 575 if v.Args[i].Reg() != v.Args[i-1].Reg()+1 { 576 v.Fatalf("invalid store multiple %s", v.LongString()) 577 } 578 } 579 p := s.Prog(v.Op.Asm()) 580 p.From.Type = obj.TYPE_REG 581 p.From.Reg = v.Args[1].Reg() 582 p.Reg = v.Args[len(v.Args)-2].Reg() 583 p.To.Type = obj.TYPE_MEM 584 p.To.Reg = v.Args[0].Reg() 585 gc.AddAux(&p.To, v) 586 case ssa.OpS390XLoweredMove: 587 // Inputs must be valid pointers to memory, 588 // so adjust arg0 and arg1 as part of the expansion. 589 // arg2 should be src+size, 590 // 591 // mvc: MVC $256, 0(R2), 0(R1) 592 // MOVD $256(R1), R1 593 // MOVD $256(R2), R2 594 // CMP R2, Rarg2 595 // BNE mvc 596 // MVC $rem, 0(R2), 0(R1) // if rem > 0 597 // arg2 is the last address to move in the loop + 256 598 mvc := s.Prog(s390x.AMVC) 599 mvc.From.Type = obj.TYPE_CONST 600 mvc.From.Offset = 256 601 mvc.SetFrom3(obj.Addr{Type: obj.TYPE_MEM, Reg: v.Args[1].Reg()}) 602 mvc.To.Type = obj.TYPE_MEM 603 mvc.To.Reg = v.Args[0].Reg() 604 605 for i := 0; i < 2; i++ { 606 movd := s.Prog(s390x.AMOVD) 607 movd.From.Type = obj.TYPE_ADDR 608 movd.From.Reg = v.Args[i].Reg() 609 movd.From.Offset = 256 610 movd.To.Type = obj.TYPE_REG 611 movd.To.Reg = v.Args[i].Reg() 612 } 613 614 cmpu := s.Prog(s390x.ACMPU) 615 cmpu.From.Reg = v.Args[1].Reg() 616 cmpu.From.Type = obj.TYPE_REG 617 cmpu.To.Reg = v.Args[2].Reg() 618 cmpu.To.Type = obj.TYPE_REG 619 620 bne := s.Prog(s390x.ABLT) 621 bne.To.Type = obj.TYPE_BRANCH 622 gc.Patch(bne, mvc) 623 624 if v.AuxInt > 0 { 625 mvc := s.Prog(s390x.AMVC) 626 mvc.From.Type = obj.TYPE_CONST 627 mvc.From.Offset = v.AuxInt 628 mvc.SetFrom3(obj.Addr{Type: obj.TYPE_MEM, Reg: v.Args[1].Reg()}) 629 mvc.To.Type = obj.TYPE_MEM 630 mvc.To.Reg = v.Args[0].Reg() 631 } 632 case ssa.OpS390XLoweredZero: 633 // Input must be valid pointers to memory, 634 // so adjust arg0 as part of the expansion. 635 // arg1 should be src+size, 636 // 637 // clear: CLEAR $256, 0(R1) 638 // MOVD $256(R1), R1 639 // CMP R1, Rarg1 640 // BNE clear 641 // CLEAR $rem, 0(R1) // if rem > 0 642 // arg1 is the last address to zero in the loop + 256 643 clear := s.Prog(s390x.ACLEAR) 644 clear.From.Type = obj.TYPE_CONST 645 clear.From.Offset = 256 646 clear.To.Type = obj.TYPE_MEM 647 clear.To.Reg = v.Args[0].Reg() 648 649 movd := s.Prog(s390x.AMOVD) 650 movd.From.Type = obj.TYPE_ADDR 651 movd.From.Reg = v.Args[0].Reg() 652 movd.From.Offset = 256 653 movd.To.Type = obj.TYPE_REG 654 movd.To.Reg = v.Args[0].Reg() 655 656 cmpu := s.Prog(s390x.ACMPU) 657 cmpu.From.Reg = v.Args[0].Reg() 658 cmpu.From.Type = obj.TYPE_REG 659 cmpu.To.Reg = v.Args[1].Reg() 660 cmpu.To.Type = obj.TYPE_REG 661 662 bne := s.Prog(s390x.ABLT) 663 bne.To.Type = obj.TYPE_BRANCH 664 gc.Patch(bne, clear) 665 666 if v.AuxInt > 0 { 667 clear := s.Prog(s390x.ACLEAR) 668 clear.From.Type = obj.TYPE_CONST 669 clear.From.Offset = v.AuxInt 670 clear.To.Type = obj.TYPE_MEM 671 clear.To.Reg = v.Args[0].Reg() 672 } 673 case ssa.OpS390XMOVWZatomicload, ssa.OpS390XMOVDatomicload: 674 p := s.Prog(v.Op.Asm()) 675 p.From.Type = obj.TYPE_MEM 676 p.From.Reg = v.Args[0].Reg() 677 gc.AddAux(&p.From, v) 678 p.To.Type = obj.TYPE_REG 679 p.To.Reg = v.Reg0() 680 case ssa.OpS390XMOVWatomicstore, ssa.OpS390XMOVDatomicstore: 681 p := s.Prog(v.Op.Asm()) 682 p.From.Type = obj.TYPE_REG 683 p.From.Reg = v.Args[1].Reg() 684 p.To.Type = obj.TYPE_MEM 685 p.To.Reg = v.Args[0].Reg() 686 gc.AddAux(&p.To, v) 687 case ssa.OpS390XLAA, ssa.OpS390XLAAG: 688 p := s.Prog(v.Op.Asm()) 689 p.Reg = v.Reg0() 690 p.From.Type = obj.TYPE_REG 691 p.From.Reg = v.Args[1].Reg() 692 p.To.Type = obj.TYPE_MEM 693 p.To.Reg = v.Args[0].Reg() 694 gc.AddAux(&p.To, v) 695 case ssa.OpS390XLoweredAtomicCas32, ssa.OpS390XLoweredAtomicCas64: 696 // Convert the flags output of CS{,G} into a bool. 697 // CS{,G} arg1, arg2, arg0 698 // MOVD $0, ret 699 // BNE 2(PC) 700 // MOVD $1, ret 701 // NOP (so the BNE has somewhere to land) 702 703 // CS{,G} arg1, arg2, arg0 704 cs := s.Prog(v.Op.Asm()) 705 cs.From.Type = obj.TYPE_REG 706 cs.From.Reg = v.Args[1].Reg() // old 707 cs.Reg = v.Args[2].Reg() // new 708 cs.To.Type = obj.TYPE_MEM 709 cs.To.Reg = v.Args[0].Reg() 710 gc.AddAux(&cs.To, v) 711 712 // MOVD $0, ret 713 movd := s.Prog(s390x.AMOVD) 714 movd.From.Type = obj.TYPE_CONST 715 movd.From.Offset = 0 716 movd.To.Type = obj.TYPE_REG 717 movd.To.Reg = v.Reg0() 718 719 // BNE 2(PC) 720 bne := s.Prog(s390x.ABNE) 721 bne.To.Type = obj.TYPE_BRANCH 722 723 // MOVD $1, ret 724 movd = s.Prog(s390x.AMOVD) 725 movd.From.Type = obj.TYPE_CONST 726 movd.From.Offset = 1 727 movd.To.Type = obj.TYPE_REG 728 movd.To.Reg = v.Reg0() 729 730 // NOP (so the BNE has somewhere to land) 731 nop := s.Prog(obj.ANOP) 732 gc.Patch(bne, nop) 733 case ssa.OpS390XLoweredAtomicExchange32, ssa.OpS390XLoweredAtomicExchange64: 734 // Loop until the CS{,G} succeeds. 735 // MOV{WZ,D} arg0, ret 736 // cs: CS{,G} ret, arg1, arg0 737 // BNE cs 738 739 // MOV{WZ,D} arg0, ret 740 load := s.Prog(loadByType(v.Type.FieldType(0))) 741 load.From.Type = obj.TYPE_MEM 742 load.From.Reg = v.Args[0].Reg() 743 load.To.Type = obj.TYPE_REG 744 load.To.Reg = v.Reg0() 745 gc.AddAux(&load.From, v) 746 747 // CS{,G} ret, arg1, arg0 748 cs := s.Prog(v.Op.Asm()) 749 cs.From.Type = obj.TYPE_REG 750 cs.From.Reg = v.Reg0() // old 751 cs.Reg = v.Args[1].Reg() // new 752 cs.To.Type = obj.TYPE_MEM 753 cs.To.Reg = v.Args[0].Reg() 754 gc.AddAux(&cs.To, v) 755 756 // BNE cs 757 bne := s.Prog(s390x.ABNE) 758 bne.To.Type = obj.TYPE_BRANCH 759 gc.Patch(bne, cs) 760 case ssa.OpClobber: 761 // TODO: implement for clobberdead experiment. Nop is ok for now. 762 default: 763 v.Fatalf("genValue not implemented: %s", v.LongString()) 764 } 765 } 766 767 var blockJump = [...]struct { 768 asm, invasm obj.As 769 }{ 770 ssa.BlockS390XEQ: {s390x.ABEQ, s390x.ABNE}, 771 ssa.BlockS390XNE: {s390x.ABNE, s390x.ABEQ}, 772 ssa.BlockS390XLT: {s390x.ABLT, s390x.ABGE}, 773 ssa.BlockS390XGE: {s390x.ABGE, s390x.ABLT}, 774 ssa.BlockS390XLE: {s390x.ABLE, s390x.ABGT}, 775 ssa.BlockS390XGT: {s390x.ABGT, s390x.ABLE}, 776 ssa.BlockS390XGTF: {s390x.ABGT, s390x.ABLEU}, 777 ssa.BlockS390XGEF: {s390x.ABGE, s390x.ABLTU}, 778 } 779 780 func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { 781 switch b.Kind { 782 case ssa.BlockPlain: 783 if b.Succs[0].Block() != next { 784 p := s.Prog(s390x.ABR) 785 p.To.Type = obj.TYPE_BRANCH 786 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) 787 } 788 case ssa.BlockDefer: 789 // defer returns in R3: 790 // 0 if we should continue executing 791 // 1 if we should jump to deferreturn call 792 p := s.Prog(s390x.ACMPW) 793 p.From.Type = obj.TYPE_REG 794 p.From.Reg = s390x.REG_R3 795 p.To.Type = obj.TYPE_CONST 796 p.To.Offset = 0 797 p = s.Prog(s390x.ABNE) 798 p.To.Type = obj.TYPE_BRANCH 799 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()}) 800 if b.Succs[0].Block() != next { 801 p := s.Prog(s390x.ABR) 802 p.To.Type = obj.TYPE_BRANCH 803 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) 804 } 805 case ssa.BlockExit: 806 s.Prog(obj.AUNDEF) // tell plive.go that we never reach here 807 case ssa.BlockRet: 808 s.Prog(obj.ARET) 809 case ssa.BlockRetJmp: 810 p := s.Prog(s390x.ABR) 811 p.To.Type = obj.TYPE_MEM 812 p.To.Name = obj.NAME_EXTERN 813 p.To.Sym = b.Aux.(*obj.LSym) 814 case ssa.BlockS390XEQ, ssa.BlockS390XNE, 815 ssa.BlockS390XLT, ssa.BlockS390XGE, 816 ssa.BlockS390XLE, ssa.BlockS390XGT, 817 ssa.BlockS390XGEF, ssa.BlockS390XGTF: 818 jmp := blockJump[b.Kind] 819 var p *obj.Prog 820 switch next { 821 case b.Succs[0].Block(): 822 p = s.Prog(jmp.invasm) 823 p.To.Type = obj.TYPE_BRANCH 824 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()}) 825 case b.Succs[1].Block(): 826 p = s.Prog(jmp.asm) 827 p.To.Type = obj.TYPE_BRANCH 828 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) 829 default: 830 p = s.Prog(jmp.asm) 831 p.To.Type = obj.TYPE_BRANCH 832 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) 833 q := s.Prog(s390x.ABR) 834 q.To.Type = obj.TYPE_BRANCH 835 s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()}) 836 } 837 default: 838 b.Fatalf("branch not implemented: %s. Control: %s", b.LongString(), b.Control.LongString()) 839 } 840 }