github.com/sanprasirt/go@v0.0.0-20170607001320-a027466e4b6d/src/cmd/compile/internal/s390x/ssa.go (about) 1 // Copyright 2016 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package s390x 6 7 import ( 8 "math" 9 10 "cmd/compile/internal/gc" 11 "cmd/compile/internal/ssa" 12 "cmd/compile/internal/types" 13 "cmd/internal/obj" 14 "cmd/internal/obj/s390x" 15 ) 16 17 // markMoves marks any MOVXconst ops that need to avoid clobbering flags. 18 func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) { 19 flive := b.FlagsLiveAtEnd 20 if b.Control != nil && b.Control.Type.IsFlags() { 21 flive = true 22 } 23 for i := len(b.Values) - 1; i >= 0; i-- { 24 v := b.Values[i] 25 if flive && v.Op == ssa.OpS390XMOVDconst { 26 // The "mark" is any non-nil Aux value. 27 v.Aux = v 28 } 29 if v.Type.IsFlags() { 30 flive = false 31 } 32 for _, a := range v.Args { 33 if a.Type.IsFlags() { 34 flive = true 35 } 36 } 37 } 38 } 39 40 // loadByType returns the load instruction of the given type. 41 func loadByType(t *types.Type) obj.As { 42 if t.IsFloat() { 43 switch t.Size() { 44 case 4: 45 return s390x.AFMOVS 46 case 8: 47 return s390x.AFMOVD 48 } 49 } else { 50 switch t.Size() { 51 case 1: 52 if t.IsSigned() { 53 return s390x.AMOVB 54 } else { 55 return s390x.AMOVBZ 56 } 57 case 2: 58 if t.IsSigned() { 59 return s390x.AMOVH 60 } else { 61 return s390x.AMOVHZ 62 } 63 case 4: 64 if t.IsSigned() { 65 return s390x.AMOVW 66 } else { 67 return s390x.AMOVWZ 68 } 69 case 8: 70 return s390x.AMOVD 71 } 72 } 73 panic("bad load type") 74 } 75 76 // storeByType returns the store instruction of the given type. 77 func storeByType(t *types.Type) obj.As { 78 width := t.Size() 79 if t.IsFloat() { 80 switch width { 81 case 4: 82 return s390x.AFMOVS 83 case 8: 84 return s390x.AFMOVD 85 } 86 } else { 87 switch width { 88 case 1: 89 return s390x.AMOVB 90 case 2: 91 return s390x.AMOVH 92 case 4: 93 return s390x.AMOVW 94 case 8: 95 return s390x.AMOVD 96 } 97 } 98 panic("bad store type") 99 } 100 101 // moveByType returns the reg->reg move instruction of the given type. 102 func moveByType(t *types.Type) obj.As { 103 if t.IsFloat() { 104 return s390x.AFMOVD 105 } else { 106 switch t.Size() { 107 case 1: 108 if t.IsSigned() { 109 return s390x.AMOVB 110 } else { 111 return s390x.AMOVBZ 112 } 113 case 2: 114 if t.IsSigned() { 115 return s390x.AMOVH 116 } else { 117 return s390x.AMOVHZ 118 } 119 case 4: 120 if t.IsSigned() { 121 return s390x.AMOVW 122 } else { 123 return s390x.AMOVWZ 124 } 125 case 8: 126 return s390x.AMOVD 127 } 128 } 129 panic("bad load type") 130 } 131 132 // opregreg emits instructions for 133 // dest := dest(To) op src(From) 134 // and also returns the created obj.Prog so it 135 // may be further adjusted (offset, scale, etc). 136 func opregreg(s *gc.SSAGenState, op obj.As, dest, src int16) *obj.Prog { 137 p := s.Prog(op) 138 p.From.Type = obj.TYPE_REG 139 p.To.Type = obj.TYPE_REG 140 p.To.Reg = dest 141 p.From.Reg = src 142 return p 143 } 144 145 // opregregimm emits instructions for 146 // dest := src(From) op off 147 // and also returns the created obj.Prog so it 148 // may be further adjusted (offset, scale, etc). 149 func opregregimm(s *gc.SSAGenState, op obj.As, dest, src int16, off int64) *obj.Prog { 150 p := s.Prog(op) 151 p.From.Type = obj.TYPE_CONST 152 p.From.Offset = off 153 p.Reg = src 154 p.To.Reg = dest 155 p.To.Type = obj.TYPE_REG 156 return p 157 } 158 159 func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { 160 switch v.Op { 161 case ssa.OpS390XSLD, ssa.OpS390XSLW, 162 ssa.OpS390XSRD, ssa.OpS390XSRW, 163 ssa.OpS390XSRAD, ssa.OpS390XSRAW: 164 r := v.Reg() 165 r1 := v.Args[0].Reg() 166 r2 := v.Args[1].Reg() 167 if r2 == s390x.REG_R0 { 168 v.Fatalf("cannot use R0 as shift value %s", v.LongString()) 169 } 170 p := opregreg(s, v.Op.Asm(), r, r2) 171 if r != r1 { 172 p.Reg = r1 173 } 174 case ssa.OpS390XADD, ssa.OpS390XADDW, 175 ssa.OpS390XSUB, ssa.OpS390XSUBW, 176 ssa.OpS390XAND, ssa.OpS390XANDW, 177 ssa.OpS390XOR, ssa.OpS390XORW, 178 ssa.OpS390XXOR, ssa.OpS390XXORW: 179 r := v.Reg() 180 r1 := v.Args[0].Reg() 181 r2 := v.Args[1].Reg() 182 p := opregreg(s, v.Op.Asm(), r, r2) 183 if r != r1 { 184 p.Reg = r1 185 } 186 // 2-address opcode arithmetic 187 case ssa.OpS390XMULLD, ssa.OpS390XMULLW, 188 ssa.OpS390XMULHD, ssa.OpS390XMULHDU, 189 ssa.OpS390XFADDS, ssa.OpS390XFADD, ssa.OpS390XFSUBS, ssa.OpS390XFSUB, 190 ssa.OpS390XFMULS, ssa.OpS390XFMUL, ssa.OpS390XFDIVS, ssa.OpS390XFDIV: 191 r := v.Reg() 192 if r != v.Args[0].Reg() { 193 v.Fatalf("input[0] and output not in same register %s", v.LongString()) 194 } 195 opregreg(s, v.Op.Asm(), r, v.Args[1].Reg()) 196 case ssa.OpS390XFMADD, ssa.OpS390XFMADDS, 197 ssa.OpS390XFMSUB, ssa.OpS390XFMSUBS: 198 r := v.Reg() 199 if r != v.Args[0].Reg() { 200 v.Fatalf("input[0] and output not in same register %s", v.LongString()) 201 } 202 r1 := v.Args[1].Reg() 203 r2 := v.Args[2].Reg() 204 p := s.Prog(v.Op.Asm()) 205 p.From.Type = obj.TYPE_REG 206 p.From.Reg = r1 207 p.Reg = r2 208 p.To.Type = obj.TYPE_REG 209 p.To.Reg = r 210 case ssa.OpS390XDIVD, ssa.OpS390XDIVW, 211 ssa.OpS390XDIVDU, ssa.OpS390XDIVWU, 212 ssa.OpS390XMODD, ssa.OpS390XMODW, 213 ssa.OpS390XMODDU, ssa.OpS390XMODWU: 214 215 // TODO(mundaym): use the temp registers every time like x86 does with AX? 216 dividend := v.Args[0].Reg() 217 divisor := v.Args[1].Reg() 218 219 // CPU faults upon signed overflow, which occurs when most 220 // negative int is divided by -1. 221 var j *obj.Prog 222 if v.Op == ssa.OpS390XDIVD || v.Op == ssa.OpS390XDIVW || 223 v.Op == ssa.OpS390XMODD || v.Op == ssa.OpS390XMODW { 224 225 var c *obj.Prog 226 c = s.Prog(s390x.ACMP) 227 j = s.Prog(s390x.ABEQ) 228 229 c.From.Type = obj.TYPE_REG 230 c.From.Reg = divisor 231 c.To.Type = obj.TYPE_CONST 232 c.To.Offset = -1 233 234 j.To.Type = obj.TYPE_BRANCH 235 236 } 237 238 p := s.Prog(v.Op.Asm()) 239 p.From.Type = obj.TYPE_REG 240 p.From.Reg = divisor 241 p.Reg = 0 242 p.To.Type = obj.TYPE_REG 243 p.To.Reg = dividend 244 245 // signed division, rest of the check for -1 case 246 if j != nil { 247 j2 := s.Prog(s390x.ABR) 248 j2.To.Type = obj.TYPE_BRANCH 249 250 var n *obj.Prog 251 if v.Op == ssa.OpS390XDIVD || v.Op == ssa.OpS390XDIVW { 252 // n * -1 = -n 253 n = s.Prog(s390x.ANEG) 254 n.To.Type = obj.TYPE_REG 255 n.To.Reg = dividend 256 } else { 257 // n % -1 == 0 258 n = s.Prog(s390x.AXOR) 259 n.From.Type = obj.TYPE_REG 260 n.From.Reg = dividend 261 n.To.Type = obj.TYPE_REG 262 n.To.Reg = dividend 263 } 264 265 j.To.Val = n 266 j2.To.Val = s.Pc() 267 } 268 case ssa.OpS390XADDconst, ssa.OpS390XADDWconst: 269 opregregimm(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg(), v.AuxInt) 270 case ssa.OpS390XMULLDconst, ssa.OpS390XMULLWconst, 271 ssa.OpS390XSUBconst, ssa.OpS390XSUBWconst, 272 ssa.OpS390XANDconst, ssa.OpS390XANDWconst, 273 ssa.OpS390XORconst, ssa.OpS390XORWconst, 274 ssa.OpS390XXORconst, ssa.OpS390XXORWconst: 275 r := v.Reg() 276 if r != v.Args[0].Reg() { 277 v.Fatalf("input[0] and output not in same register %s", v.LongString()) 278 } 279 p := s.Prog(v.Op.Asm()) 280 p.From.Type = obj.TYPE_CONST 281 p.From.Offset = v.AuxInt 282 p.To.Type = obj.TYPE_REG 283 p.To.Reg = r 284 case ssa.OpS390XSLDconst, ssa.OpS390XSLWconst, 285 ssa.OpS390XSRDconst, ssa.OpS390XSRWconst, 286 ssa.OpS390XSRADconst, ssa.OpS390XSRAWconst, 287 ssa.OpS390XRLLGconst, ssa.OpS390XRLLconst: 288 p := s.Prog(v.Op.Asm()) 289 p.From.Type = obj.TYPE_CONST 290 p.From.Offset = v.AuxInt 291 r := v.Reg() 292 r1 := v.Args[0].Reg() 293 if r != r1 { 294 p.Reg = r1 295 } 296 p.To.Type = obj.TYPE_REG 297 p.To.Reg = r 298 case ssa.OpS390XSUBEcarrymask, ssa.OpS390XSUBEWcarrymask: 299 r := v.Reg() 300 p := s.Prog(v.Op.Asm()) 301 p.From.Type = obj.TYPE_REG 302 p.From.Reg = r 303 p.To.Type = obj.TYPE_REG 304 p.To.Reg = r 305 case ssa.OpS390XMOVDaddridx: 306 r := v.Args[0].Reg() 307 i := v.Args[1].Reg() 308 p := s.Prog(s390x.AMOVD) 309 p.From.Scale = 1 310 if i == s390x.REGSP { 311 r, i = i, r 312 } 313 p.From.Type = obj.TYPE_ADDR 314 p.From.Reg = r 315 p.From.Index = i 316 gc.AddAux(&p.From, v) 317 p.To.Type = obj.TYPE_REG 318 p.To.Reg = v.Reg() 319 case ssa.OpS390XMOVDaddr: 320 p := s.Prog(s390x.AMOVD) 321 p.From.Type = obj.TYPE_ADDR 322 p.From.Reg = v.Args[0].Reg() 323 gc.AddAux(&p.From, v) 324 p.To.Type = obj.TYPE_REG 325 p.To.Reg = v.Reg() 326 case ssa.OpS390XCMP, ssa.OpS390XCMPW, ssa.OpS390XCMPU, ssa.OpS390XCMPWU: 327 opregreg(s, v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg()) 328 case ssa.OpS390XFCMPS, ssa.OpS390XFCMP: 329 opregreg(s, v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg()) 330 case ssa.OpS390XCMPconst, ssa.OpS390XCMPWconst, ssa.OpS390XCMPUconst, ssa.OpS390XCMPWUconst: 331 p := s.Prog(v.Op.Asm()) 332 p.From.Type = obj.TYPE_REG 333 p.From.Reg = v.Args[0].Reg() 334 p.To.Type = obj.TYPE_CONST 335 p.To.Offset = v.AuxInt 336 case ssa.OpS390XMOVDconst: 337 x := v.Reg() 338 p := s.Prog(v.Op.Asm()) 339 p.From.Type = obj.TYPE_CONST 340 p.From.Offset = v.AuxInt 341 p.To.Type = obj.TYPE_REG 342 p.To.Reg = x 343 case ssa.OpS390XFMOVSconst, ssa.OpS390XFMOVDconst: 344 x := v.Reg() 345 p := s.Prog(v.Op.Asm()) 346 p.From.Type = obj.TYPE_FCONST 347 p.From.Val = math.Float64frombits(uint64(v.AuxInt)) 348 p.To.Type = obj.TYPE_REG 349 p.To.Reg = x 350 case ssa.OpS390XADDWload, ssa.OpS390XADDload, 351 ssa.OpS390XMULLWload, ssa.OpS390XMULLDload, 352 ssa.OpS390XSUBWload, ssa.OpS390XSUBload, 353 ssa.OpS390XANDWload, ssa.OpS390XANDload, 354 ssa.OpS390XORWload, ssa.OpS390XORload, 355 ssa.OpS390XXORWload, ssa.OpS390XXORload: 356 r := v.Reg() 357 if r != v.Args[0].Reg() { 358 v.Fatalf("input[0] and output not in same register %s", v.LongString()) 359 } 360 p := s.Prog(v.Op.Asm()) 361 p.From.Type = obj.TYPE_MEM 362 p.From.Reg = v.Args[1].Reg() 363 gc.AddAux(&p.From, v) 364 p.To.Type = obj.TYPE_REG 365 p.To.Reg = r 366 case ssa.OpS390XMOVDload, 367 ssa.OpS390XMOVWZload, ssa.OpS390XMOVHZload, ssa.OpS390XMOVBZload, 368 ssa.OpS390XMOVDBRload, ssa.OpS390XMOVWBRload, ssa.OpS390XMOVHBRload, 369 ssa.OpS390XMOVBload, ssa.OpS390XMOVHload, ssa.OpS390XMOVWload, 370 ssa.OpS390XFMOVSload, ssa.OpS390XFMOVDload: 371 p := s.Prog(v.Op.Asm()) 372 p.From.Type = obj.TYPE_MEM 373 p.From.Reg = v.Args[0].Reg() 374 gc.AddAux(&p.From, v) 375 p.To.Type = obj.TYPE_REG 376 p.To.Reg = v.Reg() 377 case ssa.OpS390XMOVBZloadidx, ssa.OpS390XMOVHZloadidx, ssa.OpS390XMOVWZloadidx, ssa.OpS390XMOVDloadidx, 378 ssa.OpS390XMOVHBRloadidx, ssa.OpS390XMOVWBRloadidx, ssa.OpS390XMOVDBRloadidx, 379 ssa.OpS390XFMOVSloadidx, ssa.OpS390XFMOVDloadidx: 380 r := v.Args[0].Reg() 381 i := v.Args[1].Reg() 382 if i == s390x.REGSP { 383 r, i = i, r 384 } 385 p := s.Prog(v.Op.Asm()) 386 p.From.Type = obj.TYPE_MEM 387 p.From.Reg = r 388 p.From.Scale = 1 389 p.From.Index = i 390 gc.AddAux(&p.From, v) 391 p.To.Type = obj.TYPE_REG 392 p.To.Reg = v.Reg() 393 case ssa.OpS390XMOVBstore, ssa.OpS390XMOVHstore, ssa.OpS390XMOVWstore, ssa.OpS390XMOVDstore, 394 ssa.OpS390XMOVHBRstore, ssa.OpS390XMOVWBRstore, ssa.OpS390XMOVDBRstore, 395 ssa.OpS390XFMOVSstore, ssa.OpS390XFMOVDstore: 396 p := s.Prog(v.Op.Asm()) 397 p.From.Type = obj.TYPE_REG 398 p.From.Reg = v.Args[1].Reg() 399 p.To.Type = obj.TYPE_MEM 400 p.To.Reg = v.Args[0].Reg() 401 gc.AddAux(&p.To, v) 402 case ssa.OpS390XMOVBstoreidx, ssa.OpS390XMOVHstoreidx, ssa.OpS390XMOVWstoreidx, ssa.OpS390XMOVDstoreidx, 403 ssa.OpS390XMOVHBRstoreidx, ssa.OpS390XMOVWBRstoreidx, ssa.OpS390XMOVDBRstoreidx, 404 ssa.OpS390XFMOVSstoreidx, ssa.OpS390XFMOVDstoreidx: 405 r := v.Args[0].Reg() 406 i := v.Args[1].Reg() 407 if i == s390x.REGSP { 408 r, i = i, r 409 } 410 p := s.Prog(v.Op.Asm()) 411 p.From.Type = obj.TYPE_REG 412 p.From.Reg = v.Args[2].Reg() 413 p.To.Type = obj.TYPE_MEM 414 p.To.Reg = r 415 p.To.Scale = 1 416 p.To.Index = i 417 gc.AddAux(&p.To, v) 418 case ssa.OpS390XMOVDstoreconst, ssa.OpS390XMOVWstoreconst, ssa.OpS390XMOVHstoreconst, ssa.OpS390XMOVBstoreconst: 419 p := s.Prog(v.Op.Asm()) 420 p.From.Type = obj.TYPE_CONST 421 sc := v.AuxValAndOff() 422 p.From.Offset = sc.Val() 423 p.To.Type = obj.TYPE_MEM 424 p.To.Reg = v.Args[0].Reg() 425 gc.AddAux2(&p.To, v, sc.Off()) 426 case ssa.OpS390XMOVBreg, ssa.OpS390XMOVHreg, ssa.OpS390XMOVWreg, 427 ssa.OpS390XMOVBZreg, ssa.OpS390XMOVHZreg, ssa.OpS390XMOVWZreg, 428 ssa.OpS390XCEFBRA, ssa.OpS390XCDFBRA, ssa.OpS390XCEGBRA, ssa.OpS390XCDGBRA, 429 ssa.OpS390XCFEBRA, ssa.OpS390XCFDBRA, ssa.OpS390XCGEBRA, ssa.OpS390XCGDBRA, 430 ssa.OpS390XLDEBR, ssa.OpS390XLEDBR, 431 ssa.OpS390XFNEG, ssa.OpS390XFNEGS: 432 opregreg(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg()) 433 case ssa.OpS390XCLEAR: 434 p := s.Prog(v.Op.Asm()) 435 p.From.Type = obj.TYPE_CONST 436 sc := v.AuxValAndOff() 437 p.From.Offset = sc.Val() 438 p.To.Type = obj.TYPE_MEM 439 p.To.Reg = v.Args[0].Reg() 440 gc.AddAux2(&p.To, v, sc.Off()) 441 case ssa.OpCopy, ssa.OpS390XMOVDconvert, ssa.OpS390XMOVDreg: 442 if v.Type.IsMemory() { 443 return 444 } 445 x := v.Args[0].Reg() 446 y := v.Reg() 447 if x != y { 448 opregreg(s, moveByType(v.Type), y, x) 449 } 450 case ssa.OpS390XMOVDnop: 451 if v.Reg() != v.Args[0].Reg() { 452 v.Fatalf("input[0] and output not in same register %s", v.LongString()) 453 } 454 // nothing to do 455 case ssa.OpLoadReg: 456 if v.Type.IsFlags() { 457 v.Fatalf("load flags not implemented: %v", v.LongString()) 458 return 459 } 460 p := s.Prog(loadByType(v.Type)) 461 gc.AddrAuto(&p.From, v.Args[0]) 462 p.To.Type = obj.TYPE_REG 463 p.To.Reg = v.Reg() 464 case ssa.OpStoreReg: 465 if v.Type.IsFlags() { 466 v.Fatalf("store flags not implemented: %v", v.LongString()) 467 return 468 } 469 p := s.Prog(storeByType(v.Type)) 470 p.From.Type = obj.TYPE_REG 471 p.From.Reg = v.Args[0].Reg() 472 gc.AddrAuto(&p.To, v) 473 case ssa.OpS390XLoweredGetClosurePtr: 474 // Closure pointer is R12 (already) 475 gc.CheckLoweredGetClosurePtr(v) 476 case ssa.OpS390XLoweredRound32F, ssa.OpS390XLoweredRound64F: 477 // input is already rounded 478 case ssa.OpS390XLoweredGetG: 479 r := v.Reg() 480 p := s.Prog(s390x.AMOVD) 481 p.From.Type = obj.TYPE_REG 482 p.From.Reg = s390x.REGG 483 p.To.Type = obj.TYPE_REG 484 p.To.Reg = r 485 case ssa.OpS390XCALLstatic, ssa.OpS390XCALLclosure, ssa.OpS390XCALLinter: 486 s.Call(v) 487 case ssa.OpS390XFLOGR, ssa.OpS390XNEG, ssa.OpS390XNEGW, 488 ssa.OpS390XMOVWBR, ssa.OpS390XMOVDBR: 489 p := s.Prog(v.Op.Asm()) 490 p.From.Type = obj.TYPE_REG 491 p.From.Reg = v.Args[0].Reg() 492 p.To.Type = obj.TYPE_REG 493 p.To.Reg = v.Reg() 494 case ssa.OpS390XNOT, ssa.OpS390XNOTW: 495 v.Fatalf("NOT/NOTW generated %s", v.LongString()) 496 case ssa.OpS390XMOVDEQ, ssa.OpS390XMOVDNE, 497 ssa.OpS390XMOVDLT, ssa.OpS390XMOVDLE, 498 ssa.OpS390XMOVDGT, ssa.OpS390XMOVDGE, 499 ssa.OpS390XMOVDGTnoinv, ssa.OpS390XMOVDGEnoinv: 500 r := v.Reg() 501 if r != v.Args[0].Reg() { 502 v.Fatalf("input[0] and output not in same register %s", v.LongString()) 503 } 504 p := s.Prog(v.Op.Asm()) 505 p.From.Type = obj.TYPE_REG 506 p.From.Reg = v.Args[1].Reg() 507 p.To.Type = obj.TYPE_REG 508 p.To.Reg = r 509 case ssa.OpS390XFSQRT: 510 p := s.Prog(v.Op.Asm()) 511 p.From.Type = obj.TYPE_REG 512 p.From.Reg = v.Args[0].Reg() 513 p.To.Type = obj.TYPE_REG 514 p.To.Reg = v.Reg() 515 case ssa.OpS390XInvertFlags: 516 v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString()) 517 case ssa.OpS390XFlagEQ, ssa.OpS390XFlagLT, ssa.OpS390XFlagGT: 518 v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString()) 519 case ssa.OpS390XAddTupleFirst32, ssa.OpS390XAddTupleFirst64: 520 v.Fatalf("AddTupleFirst* should never make it to codegen %v", v.LongString()) 521 case ssa.OpS390XLoweredNilCheck: 522 // Issue a load which will fault if the input is nil. 523 p := s.Prog(s390x.AMOVBZ) 524 p.From.Type = obj.TYPE_MEM 525 p.From.Reg = v.Args[0].Reg() 526 gc.AddAux(&p.From, v) 527 p.To.Type = obj.TYPE_REG 528 p.To.Reg = s390x.REGTMP 529 if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers 530 gc.Warnl(v.Pos, "generated nil check") 531 } 532 case ssa.OpS390XMVC: 533 vo := v.AuxValAndOff() 534 p := s.Prog(s390x.AMVC) 535 p.From.Type = obj.TYPE_CONST 536 p.From.Offset = vo.Val() 537 p.From3 = new(obj.Addr) 538 p.From3.Type = obj.TYPE_MEM 539 p.From3.Reg = v.Args[1].Reg() 540 p.From3.Offset = vo.Off() 541 p.To.Type = obj.TYPE_MEM 542 p.To.Reg = v.Args[0].Reg() 543 p.To.Offset = vo.Off() 544 case ssa.OpS390XSTMG2, ssa.OpS390XSTMG3, ssa.OpS390XSTMG4, 545 ssa.OpS390XSTM2, ssa.OpS390XSTM3, ssa.OpS390XSTM4: 546 for i := 2; i < len(v.Args)-1; i++ { 547 if v.Args[i].Reg() != v.Args[i-1].Reg()+1 { 548 v.Fatalf("invalid store multiple %s", v.LongString()) 549 } 550 } 551 p := s.Prog(v.Op.Asm()) 552 p.From.Type = obj.TYPE_REG 553 p.From.Reg = v.Args[1].Reg() 554 p.Reg = v.Args[len(v.Args)-2].Reg() 555 p.To.Type = obj.TYPE_MEM 556 p.To.Reg = v.Args[0].Reg() 557 gc.AddAux(&p.To, v) 558 case ssa.OpS390XLoweredMove: 559 // Inputs must be valid pointers to memory, 560 // so adjust arg0 and arg1 as part of the expansion. 561 // arg2 should be src+size, 562 // 563 // mvc: MVC $256, 0(R2), 0(R1) 564 // MOVD $256(R1), R1 565 // MOVD $256(R2), R2 566 // CMP R2, Rarg2 567 // BNE mvc 568 // MVC $rem, 0(R2), 0(R1) // if rem > 0 569 // arg2 is the last address to move in the loop + 256 570 mvc := s.Prog(s390x.AMVC) 571 mvc.From.Type = obj.TYPE_CONST 572 mvc.From.Offset = 256 573 mvc.From3 = new(obj.Addr) 574 mvc.From3.Type = obj.TYPE_MEM 575 mvc.From3.Reg = v.Args[1].Reg() 576 mvc.To.Type = obj.TYPE_MEM 577 mvc.To.Reg = v.Args[0].Reg() 578 579 for i := 0; i < 2; i++ { 580 movd := s.Prog(s390x.AMOVD) 581 movd.From.Type = obj.TYPE_ADDR 582 movd.From.Reg = v.Args[i].Reg() 583 movd.From.Offset = 256 584 movd.To.Type = obj.TYPE_REG 585 movd.To.Reg = v.Args[i].Reg() 586 } 587 588 cmpu := s.Prog(s390x.ACMPU) 589 cmpu.From.Reg = v.Args[1].Reg() 590 cmpu.From.Type = obj.TYPE_REG 591 cmpu.To.Reg = v.Args[2].Reg() 592 cmpu.To.Type = obj.TYPE_REG 593 594 bne := s.Prog(s390x.ABLT) 595 bne.To.Type = obj.TYPE_BRANCH 596 gc.Patch(bne, mvc) 597 598 if v.AuxInt > 0 { 599 mvc := s.Prog(s390x.AMVC) 600 mvc.From.Type = obj.TYPE_CONST 601 mvc.From.Offset = v.AuxInt 602 mvc.From3 = new(obj.Addr) 603 mvc.From3.Type = obj.TYPE_MEM 604 mvc.From3.Reg = v.Args[1].Reg() 605 mvc.To.Type = obj.TYPE_MEM 606 mvc.To.Reg = v.Args[0].Reg() 607 } 608 case ssa.OpS390XLoweredZero: 609 // Input must be valid pointers to memory, 610 // so adjust arg0 as part of the expansion. 611 // arg1 should be src+size, 612 // 613 // clear: CLEAR $256, 0(R1) 614 // MOVD $256(R1), R1 615 // CMP R1, Rarg1 616 // BNE clear 617 // CLEAR $rem, 0(R1) // if rem > 0 618 // arg1 is the last address to zero in the loop + 256 619 clear := s.Prog(s390x.ACLEAR) 620 clear.From.Type = obj.TYPE_CONST 621 clear.From.Offset = 256 622 clear.To.Type = obj.TYPE_MEM 623 clear.To.Reg = v.Args[0].Reg() 624 625 movd := s.Prog(s390x.AMOVD) 626 movd.From.Type = obj.TYPE_ADDR 627 movd.From.Reg = v.Args[0].Reg() 628 movd.From.Offset = 256 629 movd.To.Type = obj.TYPE_REG 630 movd.To.Reg = v.Args[0].Reg() 631 632 cmpu := s.Prog(s390x.ACMPU) 633 cmpu.From.Reg = v.Args[0].Reg() 634 cmpu.From.Type = obj.TYPE_REG 635 cmpu.To.Reg = v.Args[1].Reg() 636 cmpu.To.Type = obj.TYPE_REG 637 638 bne := s.Prog(s390x.ABLT) 639 bne.To.Type = obj.TYPE_BRANCH 640 gc.Patch(bne, clear) 641 642 if v.AuxInt > 0 { 643 clear := s.Prog(s390x.ACLEAR) 644 clear.From.Type = obj.TYPE_CONST 645 clear.From.Offset = v.AuxInt 646 clear.To.Type = obj.TYPE_MEM 647 clear.To.Reg = v.Args[0].Reg() 648 } 649 case ssa.OpS390XMOVWZatomicload, ssa.OpS390XMOVDatomicload: 650 p := s.Prog(v.Op.Asm()) 651 p.From.Type = obj.TYPE_MEM 652 p.From.Reg = v.Args[0].Reg() 653 gc.AddAux(&p.From, v) 654 p.To.Type = obj.TYPE_REG 655 p.To.Reg = v.Reg0() 656 case ssa.OpS390XMOVWatomicstore, ssa.OpS390XMOVDatomicstore: 657 p := s.Prog(v.Op.Asm()) 658 p.From.Type = obj.TYPE_REG 659 p.From.Reg = v.Args[1].Reg() 660 p.To.Type = obj.TYPE_MEM 661 p.To.Reg = v.Args[0].Reg() 662 gc.AddAux(&p.To, v) 663 case ssa.OpS390XLAA, ssa.OpS390XLAAG: 664 p := s.Prog(v.Op.Asm()) 665 p.Reg = v.Reg0() 666 p.From.Type = obj.TYPE_REG 667 p.From.Reg = v.Args[1].Reg() 668 p.To.Type = obj.TYPE_MEM 669 p.To.Reg = v.Args[0].Reg() 670 gc.AddAux(&p.To, v) 671 case ssa.OpS390XLoweredAtomicCas32, ssa.OpS390XLoweredAtomicCas64: 672 // Convert the flags output of CS{,G} into a bool. 673 // CS{,G} arg1, arg2, arg0 674 // MOVD $0, ret 675 // BNE 2(PC) 676 // MOVD $1, ret 677 // NOP (so the BNE has somewhere to land) 678 679 // CS{,G} arg1, arg2, arg0 680 cs := s.Prog(v.Op.Asm()) 681 cs.From.Type = obj.TYPE_REG 682 cs.From.Reg = v.Args[1].Reg() // old 683 cs.Reg = v.Args[2].Reg() // new 684 cs.To.Type = obj.TYPE_MEM 685 cs.To.Reg = v.Args[0].Reg() 686 gc.AddAux(&cs.To, v) 687 688 // MOVD $0, ret 689 movd := s.Prog(s390x.AMOVD) 690 movd.From.Type = obj.TYPE_CONST 691 movd.From.Offset = 0 692 movd.To.Type = obj.TYPE_REG 693 movd.To.Reg = v.Reg0() 694 695 // BNE 2(PC) 696 bne := s.Prog(s390x.ABNE) 697 bne.To.Type = obj.TYPE_BRANCH 698 699 // MOVD $1, ret 700 movd = s.Prog(s390x.AMOVD) 701 movd.From.Type = obj.TYPE_CONST 702 movd.From.Offset = 1 703 movd.To.Type = obj.TYPE_REG 704 movd.To.Reg = v.Reg0() 705 706 // NOP (so the BNE has somewhere to land) 707 nop := s.Prog(obj.ANOP) 708 gc.Patch(bne, nop) 709 case ssa.OpS390XLoweredAtomicExchange32, ssa.OpS390XLoweredAtomicExchange64: 710 // Loop until the CS{,G} succeeds. 711 // MOV{WZ,D} arg0, ret 712 // cs: CS{,G} ret, arg1, arg0 713 // BNE cs 714 715 // MOV{WZ,D} arg0, ret 716 load := s.Prog(loadByType(v.Type.FieldType(0))) 717 load.From.Type = obj.TYPE_MEM 718 load.From.Reg = v.Args[0].Reg() 719 load.To.Type = obj.TYPE_REG 720 load.To.Reg = v.Reg0() 721 gc.AddAux(&load.From, v) 722 723 // CS{,G} ret, arg1, arg0 724 cs := s.Prog(v.Op.Asm()) 725 cs.From.Type = obj.TYPE_REG 726 cs.From.Reg = v.Reg0() // old 727 cs.Reg = v.Args[1].Reg() // new 728 cs.To.Type = obj.TYPE_MEM 729 cs.To.Reg = v.Args[0].Reg() 730 gc.AddAux(&cs.To, v) 731 732 // BNE cs 733 bne := s.Prog(s390x.ABNE) 734 bne.To.Type = obj.TYPE_BRANCH 735 gc.Patch(bne, cs) 736 case ssa.OpClobber: 737 // TODO: implement for clobberdead experiment. Nop is ok for now. 738 default: 739 v.Fatalf("genValue not implemented: %s", v.LongString()) 740 } 741 } 742 743 var blockJump = [...]struct { 744 asm, invasm obj.As 745 }{ 746 ssa.BlockS390XEQ: {s390x.ABEQ, s390x.ABNE}, 747 ssa.BlockS390XNE: {s390x.ABNE, s390x.ABEQ}, 748 ssa.BlockS390XLT: {s390x.ABLT, s390x.ABGE}, 749 ssa.BlockS390XGE: {s390x.ABGE, s390x.ABLT}, 750 ssa.BlockS390XLE: {s390x.ABLE, s390x.ABGT}, 751 ssa.BlockS390XGT: {s390x.ABGT, s390x.ABLE}, 752 ssa.BlockS390XGTF: {s390x.ABGT, s390x.ABLEU}, 753 ssa.BlockS390XGEF: {s390x.ABGE, s390x.ABLTU}, 754 } 755 756 func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { 757 switch b.Kind { 758 case ssa.BlockPlain: 759 if b.Succs[0].Block() != next { 760 p := s.Prog(s390x.ABR) 761 p.To.Type = obj.TYPE_BRANCH 762 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) 763 } 764 case ssa.BlockDefer: 765 // defer returns in R3: 766 // 0 if we should continue executing 767 // 1 if we should jump to deferreturn call 768 p := s.Prog(s390x.ACMPW) 769 p.From.Type = obj.TYPE_REG 770 p.From.Reg = s390x.REG_R3 771 p.To.Type = obj.TYPE_CONST 772 p.To.Offset = 0 773 p = s.Prog(s390x.ABNE) 774 p.To.Type = obj.TYPE_BRANCH 775 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()}) 776 if b.Succs[0].Block() != next { 777 p := s.Prog(s390x.ABR) 778 p.To.Type = obj.TYPE_BRANCH 779 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) 780 } 781 case ssa.BlockExit: 782 s.Prog(obj.AUNDEF) // tell plive.go that we never reach here 783 case ssa.BlockRet: 784 s.Prog(obj.ARET) 785 case ssa.BlockRetJmp: 786 p := s.Prog(s390x.ABR) 787 p.To.Type = obj.TYPE_MEM 788 p.To.Name = obj.NAME_EXTERN 789 p.To.Sym = b.Aux.(*obj.LSym) 790 case ssa.BlockS390XEQ, ssa.BlockS390XNE, 791 ssa.BlockS390XLT, ssa.BlockS390XGE, 792 ssa.BlockS390XLE, ssa.BlockS390XGT, 793 ssa.BlockS390XGEF, ssa.BlockS390XGTF: 794 jmp := blockJump[b.Kind] 795 var p *obj.Prog 796 switch next { 797 case b.Succs[0].Block(): 798 p = s.Prog(jmp.invasm) 799 p.To.Type = obj.TYPE_BRANCH 800 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()}) 801 case b.Succs[1].Block(): 802 p = s.Prog(jmp.asm) 803 p.To.Type = obj.TYPE_BRANCH 804 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) 805 default: 806 p = s.Prog(jmp.asm) 807 p.To.Type = obj.TYPE_BRANCH 808 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) 809 q := s.Prog(s390x.ABR) 810 q.To.Type = obj.TYPE_BRANCH 811 s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()}) 812 } 813 default: 814 b.Fatalf("branch not implemented: %s. Control: %s", b.LongString(), b.Control.LongString()) 815 } 816 }