github.com/megatontech/mynoteforgo@v0.0.0-20200507084910-5d0c6ea6e890/源码/cmd/compile/internal/s390x/ssa.go (about) 1 // Copyright 2016 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package s390x 6 7 import ( 8 "math" 9 10 "cmd/compile/internal/gc" 11 "cmd/compile/internal/ssa" 12 "cmd/compile/internal/types" 13 "cmd/internal/obj" 14 "cmd/internal/obj/s390x" 15 ) 16 17 // markMoves marks any MOVXconst ops that need to avoid clobbering flags. 18 func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) { 19 flive := b.FlagsLiveAtEnd 20 if b.Control != nil && b.Control.Type.IsFlags() { 21 flive = true 22 } 23 for i := len(b.Values) - 1; i >= 0; i-- { 24 v := b.Values[i] 25 if flive && v.Op == ssa.OpS390XMOVDconst { 26 // The "mark" is any non-nil Aux value. 27 v.Aux = v 28 } 29 if v.Type.IsFlags() { 30 flive = false 31 } 32 for _, a := range v.Args { 33 if a.Type.IsFlags() { 34 flive = true 35 } 36 } 37 } 38 } 39 40 // loadByType returns the load instruction of the given type. 41 func loadByType(t *types.Type) obj.As { 42 if t.IsFloat() { 43 switch t.Size() { 44 case 4: 45 return s390x.AFMOVS 46 case 8: 47 return s390x.AFMOVD 48 } 49 } else { 50 switch t.Size() { 51 case 1: 52 if t.IsSigned() { 53 return s390x.AMOVB 54 } else { 55 return s390x.AMOVBZ 56 } 57 case 2: 58 if t.IsSigned() { 59 return s390x.AMOVH 60 } else { 61 return s390x.AMOVHZ 62 } 63 case 4: 64 if t.IsSigned() { 65 return s390x.AMOVW 66 } else { 67 return s390x.AMOVWZ 68 } 69 case 8: 70 return s390x.AMOVD 71 } 72 } 73 panic("bad load type") 74 } 75 76 // storeByType returns the store instruction of the given type. 77 func storeByType(t *types.Type) obj.As { 78 width := t.Size() 79 if t.IsFloat() { 80 switch width { 81 case 4: 82 return s390x.AFMOVS 83 case 8: 84 return s390x.AFMOVD 85 } 86 } else { 87 switch width { 88 case 1: 89 return s390x.AMOVB 90 case 2: 91 return s390x.AMOVH 92 case 4: 93 return s390x.AMOVW 94 case 8: 95 return s390x.AMOVD 96 } 97 } 98 panic("bad store type") 99 } 100 101 // moveByType returns the reg->reg move instruction of the given type. 102 func moveByType(t *types.Type) obj.As { 103 if t.IsFloat() { 104 return s390x.AFMOVD 105 } else { 106 switch t.Size() { 107 case 1: 108 if t.IsSigned() { 109 return s390x.AMOVB 110 } else { 111 return s390x.AMOVBZ 112 } 113 case 2: 114 if t.IsSigned() { 115 return s390x.AMOVH 116 } else { 117 return s390x.AMOVHZ 118 } 119 case 4: 120 if t.IsSigned() { 121 return s390x.AMOVW 122 } else { 123 return s390x.AMOVWZ 124 } 125 case 8: 126 return s390x.AMOVD 127 } 128 } 129 panic("bad load type") 130 } 131 132 // opregreg emits instructions for 133 // dest := dest(To) op src(From) 134 // and also returns the created obj.Prog so it 135 // may be further adjusted (offset, scale, etc). 136 func opregreg(s *gc.SSAGenState, op obj.As, dest, src int16) *obj.Prog { 137 p := s.Prog(op) 138 p.From.Type = obj.TYPE_REG 139 p.To.Type = obj.TYPE_REG 140 p.To.Reg = dest 141 p.From.Reg = src 142 return p 143 } 144 145 // opregregimm emits instructions for 146 // dest := src(From) op off 147 // and also returns the created obj.Prog so it 148 // may be further adjusted (offset, scale, etc). 149 func opregregimm(s *gc.SSAGenState, op obj.As, dest, src int16, off int64) *obj.Prog { 150 p := s.Prog(op) 151 p.From.Type = obj.TYPE_CONST 152 p.From.Offset = off 153 p.Reg = src 154 p.To.Reg = dest 155 p.To.Type = obj.TYPE_REG 156 return p 157 } 158 159 func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { 160 switch v.Op { 161 case ssa.OpS390XSLD, ssa.OpS390XSLW, 162 ssa.OpS390XSRD, ssa.OpS390XSRW, 163 ssa.OpS390XSRAD, ssa.OpS390XSRAW, 164 ssa.OpS390XRLLG, ssa.OpS390XRLL: 165 r := v.Reg() 166 r1 := v.Args[0].Reg() 167 r2 := v.Args[1].Reg() 168 if r2 == s390x.REG_R0 { 169 v.Fatalf("cannot use R0 as shift value %s", v.LongString()) 170 } 171 p := opregreg(s, v.Op.Asm(), r, r2) 172 if r != r1 { 173 p.Reg = r1 174 } 175 case ssa.OpS390XADD, ssa.OpS390XADDW, 176 ssa.OpS390XSUB, ssa.OpS390XSUBW, 177 ssa.OpS390XAND, ssa.OpS390XANDW, 178 ssa.OpS390XOR, ssa.OpS390XORW, 179 ssa.OpS390XXOR, ssa.OpS390XXORW: 180 r := v.Reg() 181 r1 := v.Args[0].Reg() 182 r2 := v.Args[1].Reg() 183 p := opregreg(s, v.Op.Asm(), r, r2) 184 if r != r1 { 185 p.Reg = r1 186 } 187 // 2-address opcode arithmetic 188 case ssa.OpS390XMULLD, ssa.OpS390XMULLW, 189 ssa.OpS390XMULHD, ssa.OpS390XMULHDU, 190 ssa.OpS390XFADDS, ssa.OpS390XFADD, ssa.OpS390XFSUBS, ssa.OpS390XFSUB, 191 ssa.OpS390XFMULS, ssa.OpS390XFMUL, ssa.OpS390XFDIVS, ssa.OpS390XFDIV: 192 r := v.Reg() 193 if r != v.Args[0].Reg() { 194 v.Fatalf("input[0] and output not in same register %s", v.LongString()) 195 } 196 opregreg(s, v.Op.Asm(), r, v.Args[1].Reg()) 197 case ssa.OpS390XFMADD, ssa.OpS390XFMADDS, 198 ssa.OpS390XFMSUB, ssa.OpS390XFMSUBS: 199 r := v.Reg() 200 if r != v.Args[0].Reg() { 201 v.Fatalf("input[0] and output not in same register %s", v.LongString()) 202 } 203 r1 := v.Args[1].Reg() 204 r2 := v.Args[2].Reg() 205 p := s.Prog(v.Op.Asm()) 206 p.From.Type = obj.TYPE_REG 207 p.From.Reg = r1 208 p.Reg = r2 209 p.To.Type = obj.TYPE_REG 210 p.To.Reg = r 211 case ssa.OpS390XFIDBR: 212 switch v.AuxInt { 213 case 0, 1, 3, 4, 5, 6, 7: 214 opregregimm(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg(), v.AuxInt) 215 default: 216 v.Fatalf("invalid FIDBR mask: %v", v.AuxInt) 217 } 218 case ssa.OpS390XCPSDR: 219 p := opregreg(s, v.Op.Asm(), v.Reg(), v.Args[1].Reg()) 220 p.Reg = v.Args[0].Reg() 221 case ssa.OpS390XDIVD, ssa.OpS390XDIVW, 222 ssa.OpS390XDIVDU, ssa.OpS390XDIVWU, 223 ssa.OpS390XMODD, ssa.OpS390XMODW, 224 ssa.OpS390XMODDU, ssa.OpS390XMODWU: 225 226 // TODO(mundaym): use the temp registers every time like x86 does with AX? 227 dividend := v.Args[0].Reg() 228 divisor := v.Args[1].Reg() 229 230 // CPU faults upon signed overflow, which occurs when most 231 // negative int is divided by -1. 232 var j *obj.Prog 233 if v.Op == ssa.OpS390XDIVD || v.Op == ssa.OpS390XDIVW || 234 v.Op == ssa.OpS390XMODD || v.Op == ssa.OpS390XMODW { 235 236 var c *obj.Prog 237 c = s.Prog(s390x.ACMP) 238 j = s.Prog(s390x.ABEQ) 239 240 c.From.Type = obj.TYPE_REG 241 c.From.Reg = divisor 242 c.To.Type = obj.TYPE_CONST 243 c.To.Offset = -1 244 245 j.To.Type = obj.TYPE_BRANCH 246 247 } 248 249 p := s.Prog(v.Op.Asm()) 250 p.From.Type = obj.TYPE_REG 251 p.From.Reg = divisor 252 p.Reg = 0 253 p.To.Type = obj.TYPE_REG 254 p.To.Reg = dividend 255 256 // signed division, rest of the check for -1 case 257 if j != nil { 258 j2 := s.Prog(s390x.ABR) 259 j2.To.Type = obj.TYPE_BRANCH 260 261 var n *obj.Prog 262 if v.Op == ssa.OpS390XDIVD || v.Op == ssa.OpS390XDIVW { 263 // n * -1 = -n 264 n = s.Prog(s390x.ANEG) 265 n.To.Type = obj.TYPE_REG 266 n.To.Reg = dividend 267 } else { 268 // n % -1 == 0 269 n = s.Prog(s390x.AXOR) 270 n.From.Type = obj.TYPE_REG 271 n.From.Reg = dividend 272 n.To.Type = obj.TYPE_REG 273 n.To.Reg = dividend 274 } 275 276 j.To.Val = n 277 j2.To.Val = s.Pc() 278 } 279 case ssa.OpS390XADDconst, ssa.OpS390XADDWconst: 280 opregregimm(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg(), v.AuxInt) 281 case ssa.OpS390XMULLDconst, ssa.OpS390XMULLWconst, 282 ssa.OpS390XSUBconst, ssa.OpS390XSUBWconst, 283 ssa.OpS390XANDconst, ssa.OpS390XANDWconst, 284 ssa.OpS390XORconst, ssa.OpS390XORWconst, 285 ssa.OpS390XXORconst, ssa.OpS390XXORWconst: 286 r := v.Reg() 287 if r != v.Args[0].Reg() { 288 v.Fatalf("input[0] and output not in same register %s", v.LongString()) 289 } 290 p := s.Prog(v.Op.Asm()) 291 p.From.Type = obj.TYPE_CONST 292 p.From.Offset = v.AuxInt 293 p.To.Type = obj.TYPE_REG 294 p.To.Reg = r 295 case ssa.OpS390XSLDconst, ssa.OpS390XSLWconst, 296 ssa.OpS390XSRDconst, ssa.OpS390XSRWconst, 297 ssa.OpS390XSRADconst, ssa.OpS390XSRAWconst, 298 ssa.OpS390XRLLGconst, ssa.OpS390XRLLconst: 299 p := s.Prog(v.Op.Asm()) 300 p.From.Type = obj.TYPE_CONST 301 p.From.Offset = v.AuxInt 302 r := v.Reg() 303 r1 := v.Args[0].Reg() 304 if r != r1 { 305 p.Reg = r1 306 } 307 p.To.Type = obj.TYPE_REG 308 p.To.Reg = r 309 case ssa.OpS390XMOVDaddridx: 310 r := v.Args[0].Reg() 311 i := v.Args[1].Reg() 312 p := s.Prog(s390x.AMOVD) 313 p.From.Scale = 1 314 if i == s390x.REGSP { 315 r, i = i, r 316 } 317 p.From.Type = obj.TYPE_ADDR 318 p.From.Reg = r 319 p.From.Index = i 320 gc.AddAux(&p.From, v) 321 p.To.Type = obj.TYPE_REG 322 p.To.Reg = v.Reg() 323 case ssa.OpS390XMOVDaddr: 324 p := s.Prog(s390x.AMOVD) 325 p.From.Type = obj.TYPE_ADDR 326 p.From.Reg = v.Args[0].Reg() 327 gc.AddAux(&p.From, v) 328 p.To.Type = obj.TYPE_REG 329 p.To.Reg = v.Reg() 330 case ssa.OpS390XCMP, ssa.OpS390XCMPW, ssa.OpS390XCMPU, ssa.OpS390XCMPWU: 331 opregreg(s, v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg()) 332 case ssa.OpS390XFCMPS, ssa.OpS390XFCMP: 333 opregreg(s, v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg()) 334 case ssa.OpS390XCMPconst, ssa.OpS390XCMPWconst: 335 p := s.Prog(v.Op.Asm()) 336 p.From.Type = obj.TYPE_REG 337 p.From.Reg = v.Args[0].Reg() 338 p.To.Type = obj.TYPE_CONST 339 p.To.Offset = v.AuxInt 340 case ssa.OpS390XCMPUconst, ssa.OpS390XCMPWUconst: 341 p := s.Prog(v.Op.Asm()) 342 p.From.Type = obj.TYPE_REG 343 p.From.Reg = v.Args[0].Reg() 344 p.To.Type = obj.TYPE_CONST 345 p.To.Offset = int64(uint32(v.AuxInt)) 346 case ssa.OpS390XMOVDconst: 347 x := v.Reg() 348 p := s.Prog(v.Op.Asm()) 349 p.From.Type = obj.TYPE_CONST 350 p.From.Offset = v.AuxInt 351 p.To.Type = obj.TYPE_REG 352 p.To.Reg = x 353 case ssa.OpS390XFMOVSconst, ssa.OpS390XFMOVDconst: 354 x := v.Reg() 355 p := s.Prog(v.Op.Asm()) 356 p.From.Type = obj.TYPE_FCONST 357 p.From.Val = math.Float64frombits(uint64(v.AuxInt)) 358 p.To.Type = obj.TYPE_REG 359 p.To.Reg = x 360 case ssa.OpS390XADDWload, ssa.OpS390XADDload, 361 ssa.OpS390XMULLWload, ssa.OpS390XMULLDload, 362 ssa.OpS390XSUBWload, ssa.OpS390XSUBload, 363 ssa.OpS390XANDWload, ssa.OpS390XANDload, 364 ssa.OpS390XORWload, ssa.OpS390XORload, 365 ssa.OpS390XXORWload, ssa.OpS390XXORload: 366 r := v.Reg() 367 if r != v.Args[0].Reg() { 368 v.Fatalf("input[0] and output not in same register %s", v.LongString()) 369 } 370 p := s.Prog(v.Op.Asm()) 371 p.From.Type = obj.TYPE_MEM 372 p.From.Reg = v.Args[1].Reg() 373 gc.AddAux(&p.From, v) 374 p.To.Type = obj.TYPE_REG 375 p.To.Reg = r 376 case ssa.OpS390XMOVDload, 377 ssa.OpS390XMOVWZload, ssa.OpS390XMOVHZload, ssa.OpS390XMOVBZload, 378 ssa.OpS390XMOVDBRload, ssa.OpS390XMOVWBRload, ssa.OpS390XMOVHBRload, 379 ssa.OpS390XMOVBload, ssa.OpS390XMOVHload, ssa.OpS390XMOVWload, 380 ssa.OpS390XFMOVSload, ssa.OpS390XFMOVDload: 381 p := s.Prog(v.Op.Asm()) 382 p.From.Type = obj.TYPE_MEM 383 p.From.Reg = v.Args[0].Reg() 384 gc.AddAux(&p.From, v) 385 p.To.Type = obj.TYPE_REG 386 p.To.Reg = v.Reg() 387 case ssa.OpS390XMOVBZloadidx, ssa.OpS390XMOVHZloadidx, ssa.OpS390XMOVWZloadidx, 388 ssa.OpS390XMOVBloadidx, ssa.OpS390XMOVHloadidx, ssa.OpS390XMOVWloadidx, ssa.OpS390XMOVDloadidx, 389 ssa.OpS390XMOVHBRloadidx, ssa.OpS390XMOVWBRloadidx, ssa.OpS390XMOVDBRloadidx, 390 ssa.OpS390XFMOVSloadidx, ssa.OpS390XFMOVDloadidx: 391 r := v.Args[0].Reg() 392 i := v.Args[1].Reg() 393 if i == s390x.REGSP { 394 r, i = i, r 395 } 396 p := s.Prog(v.Op.Asm()) 397 p.From.Type = obj.TYPE_MEM 398 p.From.Reg = r 399 p.From.Scale = 1 400 p.From.Index = i 401 gc.AddAux(&p.From, v) 402 p.To.Type = obj.TYPE_REG 403 p.To.Reg = v.Reg() 404 case ssa.OpS390XMOVBstore, ssa.OpS390XMOVHstore, ssa.OpS390XMOVWstore, ssa.OpS390XMOVDstore, 405 ssa.OpS390XMOVHBRstore, ssa.OpS390XMOVWBRstore, ssa.OpS390XMOVDBRstore, 406 ssa.OpS390XFMOVSstore, ssa.OpS390XFMOVDstore: 407 p := s.Prog(v.Op.Asm()) 408 p.From.Type = obj.TYPE_REG 409 p.From.Reg = v.Args[1].Reg() 410 p.To.Type = obj.TYPE_MEM 411 p.To.Reg = v.Args[0].Reg() 412 gc.AddAux(&p.To, v) 413 case ssa.OpS390XMOVBstoreidx, ssa.OpS390XMOVHstoreidx, ssa.OpS390XMOVWstoreidx, ssa.OpS390XMOVDstoreidx, 414 ssa.OpS390XMOVHBRstoreidx, ssa.OpS390XMOVWBRstoreidx, ssa.OpS390XMOVDBRstoreidx, 415 ssa.OpS390XFMOVSstoreidx, ssa.OpS390XFMOVDstoreidx: 416 r := v.Args[0].Reg() 417 i := v.Args[1].Reg() 418 if i == s390x.REGSP { 419 r, i = i, r 420 } 421 p := s.Prog(v.Op.Asm()) 422 p.From.Type = obj.TYPE_REG 423 p.From.Reg = v.Args[2].Reg() 424 p.To.Type = obj.TYPE_MEM 425 p.To.Reg = r 426 p.To.Scale = 1 427 p.To.Index = i 428 gc.AddAux(&p.To, v) 429 case ssa.OpS390XMOVDstoreconst, ssa.OpS390XMOVWstoreconst, ssa.OpS390XMOVHstoreconst, ssa.OpS390XMOVBstoreconst: 430 p := s.Prog(v.Op.Asm()) 431 p.From.Type = obj.TYPE_CONST 432 sc := v.AuxValAndOff() 433 p.From.Offset = sc.Val() 434 p.To.Type = obj.TYPE_MEM 435 p.To.Reg = v.Args[0].Reg() 436 gc.AddAux2(&p.To, v, sc.Off()) 437 case ssa.OpS390XMOVBreg, ssa.OpS390XMOVHreg, ssa.OpS390XMOVWreg, 438 ssa.OpS390XMOVBZreg, ssa.OpS390XMOVHZreg, ssa.OpS390XMOVWZreg, 439 ssa.OpS390XLDGR, ssa.OpS390XLGDR, 440 ssa.OpS390XCEFBRA, ssa.OpS390XCDFBRA, ssa.OpS390XCEGBRA, ssa.OpS390XCDGBRA, 441 ssa.OpS390XCFEBRA, ssa.OpS390XCFDBRA, ssa.OpS390XCGEBRA, ssa.OpS390XCGDBRA, 442 ssa.OpS390XLDEBR, ssa.OpS390XLEDBR, 443 ssa.OpS390XFNEG, ssa.OpS390XFNEGS, 444 ssa.OpS390XLPDFR, ssa.OpS390XLNDFR: 445 opregreg(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg()) 446 case ssa.OpS390XCLEAR: 447 p := s.Prog(v.Op.Asm()) 448 p.From.Type = obj.TYPE_CONST 449 sc := v.AuxValAndOff() 450 p.From.Offset = sc.Val() 451 p.To.Type = obj.TYPE_MEM 452 p.To.Reg = v.Args[0].Reg() 453 gc.AddAux2(&p.To, v, sc.Off()) 454 case ssa.OpCopy, ssa.OpS390XMOVDreg: 455 if v.Type.IsMemory() { 456 return 457 } 458 x := v.Args[0].Reg() 459 y := v.Reg() 460 if x != y { 461 opregreg(s, moveByType(v.Type), y, x) 462 } 463 case ssa.OpS390XMOVDnop: 464 if v.Reg() != v.Args[0].Reg() { 465 v.Fatalf("input[0] and output not in same register %s", v.LongString()) 466 } 467 // nothing to do 468 case ssa.OpLoadReg: 469 if v.Type.IsFlags() { 470 v.Fatalf("load flags not implemented: %v", v.LongString()) 471 return 472 } 473 p := s.Prog(loadByType(v.Type)) 474 gc.AddrAuto(&p.From, v.Args[0]) 475 p.To.Type = obj.TYPE_REG 476 p.To.Reg = v.Reg() 477 case ssa.OpStoreReg: 478 if v.Type.IsFlags() { 479 v.Fatalf("store flags not implemented: %v", v.LongString()) 480 return 481 } 482 p := s.Prog(storeByType(v.Type)) 483 p.From.Type = obj.TYPE_REG 484 p.From.Reg = v.Args[0].Reg() 485 gc.AddrAuto(&p.To, v) 486 case ssa.OpS390XLoweredGetClosurePtr: 487 // Closure pointer is R12 (already) 488 gc.CheckLoweredGetClosurePtr(v) 489 case ssa.OpS390XLoweredRound32F, ssa.OpS390XLoweredRound64F: 490 // input is already rounded 491 case ssa.OpS390XLoweredGetG: 492 r := v.Reg() 493 p := s.Prog(s390x.AMOVD) 494 p.From.Type = obj.TYPE_REG 495 p.From.Reg = s390x.REGG 496 p.To.Type = obj.TYPE_REG 497 p.To.Reg = r 498 case ssa.OpS390XLoweredGetCallerSP: 499 // caller's SP is FixedFrameSize below the address of the first arg 500 p := s.Prog(s390x.AMOVD) 501 p.From.Type = obj.TYPE_ADDR 502 p.From.Offset = -gc.Ctxt.FixedFrameSize() 503 p.From.Name = obj.NAME_PARAM 504 p.To.Type = obj.TYPE_REG 505 p.To.Reg = v.Reg() 506 case ssa.OpS390XLoweredGetCallerPC: 507 p := s.Prog(obj.AGETCALLERPC) 508 p.To.Type = obj.TYPE_REG 509 p.To.Reg = v.Reg() 510 case ssa.OpS390XCALLstatic, ssa.OpS390XCALLclosure, ssa.OpS390XCALLinter: 511 s.Call(v) 512 case ssa.OpS390XLoweredWB: 513 p := s.Prog(obj.ACALL) 514 p.To.Type = obj.TYPE_MEM 515 p.To.Name = obj.NAME_EXTERN 516 p.To.Sym = v.Aux.(*obj.LSym) 517 case ssa.OpS390XFLOGR, ssa.OpS390XPOPCNT, 518 ssa.OpS390XNEG, ssa.OpS390XNEGW, 519 ssa.OpS390XMOVWBR, ssa.OpS390XMOVDBR: 520 p := s.Prog(v.Op.Asm()) 521 p.From.Type = obj.TYPE_REG 522 p.From.Reg = v.Args[0].Reg() 523 p.To.Type = obj.TYPE_REG 524 p.To.Reg = v.Reg() 525 case ssa.OpS390XNOT, ssa.OpS390XNOTW: 526 v.Fatalf("NOT/NOTW generated %s", v.LongString()) 527 case ssa.OpS390XSumBytes2, ssa.OpS390XSumBytes4, ssa.OpS390XSumBytes8: 528 v.Fatalf("SumBytes generated %s", v.LongString()) 529 case ssa.OpS390XMOVDEQ, ssa.OpS390XMOVDNE, 530 ssa.OpS390XMOVDLT, ssa.OpS390XMOVDLE, 531 ssa.OpS390XMOVDGT, ssa.OpS390XMOVDGE, 532 ssa.OpS390XMOVDGTnoinv, ssa.OpS390XMOVDGEnoinv: 533 r := v.Reg() 534 if r != v.Args[0].Reg() { 535 v.Fatalf("input[0] and output not in same register %s", v.LongString()) 536 } 537 p := s.Prog(v.Op.Asm()) 538 p.From.Type = obj.TYPE_REG 539 p.From.Reg = v.Args[1].Reg() 540 p.To.Type = obj.TYPE_REG 541 p.To.Reg = r 542 case ssa.OpS390XFSQRT: 543 p := s.Prog(v.Op.Asm()) 544 p.From.Type = obj.TYPE_REG 545 p.From.Reg = v.Args[0].Reg() 546 p.To.Type = obj.TYPE_REG 547 p.To.Reg = v.Reg() 548 case ssa.OpS390XInvertFlags: 549 v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString()) 550 case ssa.OpS390XFlagEQ, ssa.OpS390XFlagLT, ssa.OpS390XFlagGT: 551 v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString()) 552 case ssa.OpS390XAddTupleFirst32, ssa.OpS390XAddTupleFirst64: 553 v.Fatalf("AddTupleFirst* should never make it to codegen %v", v.LongString()) 554 case ssa.OpS390XLoweredNilCheck: 555 // Issue a load which will fault if the input is nil. 556 p := s.Prog(s390x.AMOVBZ) 557 p.From.Type = obj.TYPE_MEM 558 p.From.Reg = v.Args[0].Reg() 559 gc.AddAux(&p.From, v) 560 p.To.Type = obj.TYPE_REG 561 p.To.Reg = s390x.REGTMP 562 if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers 563 gc.Warnl(v.Pos, "generated nil check") 564 } 565 case ssa.OpS390XMVC: 566 vo := v.AuxValAndOff() 567 p := s.Prog(s390x.AMVC) 568 p.From.Type = obj.TYPE_CONST 569 p.From.Offset = vo.Val() 570 p.SetFrom3(obj.Addr{ 571 Type: obj.TYPE_MEM, 572 Reg: v.Args[1].Reg(), 573 Offset: vo.Off(), 574 }) 575 p.To.Type = obj.TYPE_MEM 576 p.To.Reg = v.Args[0].Reg() 577 p.To.Offset = vo.Off() 578 case ssa.OpS390XSTMG2, ssa.OpS390XSTMG3, ssa.OpS390XSTMG4, 579 ssa.OpS390XSTM2, ssa.OpS390XSTM3, ssa.OpS390XSTM4: 580 for i := 2; i < len(v.Args)-1; i++ { 581 if v.Args[i].Reg() != v.Args[i-1].Reg()+1 { 582 v.Fatalf("invalid store multiple %s", v.LongString()) 583 } 584 } 585 p := s.Prog(v.Op.Asm()) 586 p.From.Type = obj.TYPE_REG 587 p.From.Reg = v.Args[1].Reg() 588 p.Reg = v.Args[len(v.Args)-2].Reg() 589 p.To.Type = obj.TYPE_MEM 590 p.To.Reg = v.Args[0].Reg() 591 gc.AddAux(&p.To, v) 592 case ssa.OpS390XLoweredMove: 593 // Inputs must be valid pointers to memory, 594 // so adjust arg0 and arg1 as part of the expansion. 595 // arg2 should be src+size, 596 // 597 // mvc: MVC $256, 0(R2), 0(R1) 598 // MOVD $256(R1), R1 599 // MOVD $256(R2), R2 600 // CMP R2, Rarg2 601 // BNE mvc 602 // MVC $rem, 0(R2), 0(R1) // if rem > 0 603 // arg2 is the last address to move in the loop + 256 604 mvc := s.Prog(s390x.AMVC) 605 mvc.From.Type = obj.TYPE_CONST 606 mvc.From.Offset = 256 607 mvc.SetFrom3(obj.Addr{Type: obj.TYPE_MEM, Reg: v.Args[1].Reg()}) 608 mvc.To.Type = obj.TYPE_MEM 609 mvc.To.Reg = v.Args[0].Reg() 610 611 for i := 0; i < 2; i++ { 612 movd := s.Prog(s390x.AMOVD) 613 movd.From.Type = obj.TYPE_ADDR 614 movd.From.Reg = v.Args[i].Reg() 615 movd.From.Offset = 256 616 movd.To.Type = obj.TYPE_REG 617 movd.To.Reg = v.Args[i].Reg() 618 } 619 620 cmpu := s.Prog(s390x.ACMPU) 621 cmpu.From.Reg = v.Args[1].Reg() 622 cmpu.From.Type = obj.TYPE_REG 623 cmpu.To.Reg = v.Args[2].Reg() 624 cmpu.To.Type = obj.TYPE_REG 625 626 bne := s.Prog(s390x.ABLT) 627 bne.To.Type = obj.TYPE_BRANCH 628 gc.Patch(bne, mvc) 629 630 if v.AuxInt > 0 { 631 mvc := s.Prog(s390x.AMVC) 632 mvc.From.Type = obj.TYPE_CONST 633 mvc.From.Offset = v.AuxInt 634 mvc.SetFrom3(obj.Addr{Type: obj.TYPE_MEM, Reg: v.Args[1].Reg()}) 635 mvc.To.Type = obj.TYPE_MEM 636 mvc.To.Reg = v.Args[0].Reg() 637 } 638 case ssa.OpS390XLoweredZero: 639 // Input must be valid pointers to memory, 640 // so adjust arg0 as part of the expansion. 641 // arg1 should be src+size, 642 // 643 // clear: CLEAR $256, 0(R1) 644 // MOVD $256(R1), R1 645 // CMP R1, Rarg1 646 // BNE clear 647 // CLEAR $rem, 0(R1) // if rem > 0 648 // arg1 is the last address to zero in the loop + 256 649 clear := s.Prog(s390x.ACLEAR) 650 clear.From.Type = obj.TYPE_CONST 651 clear.From.Offset = 256 652 clear.To.Type = obj.TYPE_MEM 653 clear.To.Reg = v.Args[0].Reg() 654 655 movd := s.Prog(s390x.AMOVD) 656 movd.From.Type = obj.TYPE_ADDR 657 movd.From.Reg = v.Args[0].Reg() 658 movd.From.Offset = 256 659 movd.To.Type = obj.TYPE_REG 660 movd.To.Reg = v.Args[0].Reg() 661 662 cmpu := s.Prog(s390x.ACMPU) 663 cmpu.From.Reg = v.Args[0].Reg() 664 cmpu.From.Type = obj.TYPE_REG 665 cmpu.To.Reg = v.Args[1].Reg() 666 cmpu.To.Type = obj.TYPE_REG 667 668 bne := s.Prog(s390x.ABLT) 669 bne.To.Type = obj.TYPE_BRANCH 670 gc.Patch(bne, clear) 671 672 if v.AuxInt > 0 { 673 clear := s.Prog(s390x.ACLEAR) 674 clear.From.Type = obj.TYPE_CONST 675 clear.From.Offset = v.AuxInt 676 clear.To.Type = obj.TYPE_MEM 677 clear.To.Reg = v.Args[0].Reg() 678 } 679 case ssa.OpS390XMOVWZatomicload, ssa.OpS390XMOVDatomicload: 680 p := s.Prog(v.Op.Asm()) 681 p.From.Type = obj.TYPE_MEM 682 p.From.Reg = v.Args[0].Reg() 683 gc.AddAux(&p.From, v) 684 p.To.Type = obj.TYPE_REG 685 p.To.Reg = v.Reg0() 686 case ssa.OpS390XMOVWatomicstore, ssa.OpS390XMOVDatomicstore: 687 p := s.Prog(v.Op.Asm()) 688 p.From.Type = obj.TYPE_REG 689 p.From.Reg = v.Args[1].Reg() 690 p.To.Type = obj.TYPE_MEM 691 p.To.Reg = v.Args[0].Reg() 692 gc.AddAux(&p.To, v) 693 case ssa.OpS390XLAA, ssa.OpS390XLAAG: 694 p := s.Prog(v.Op.Asm()) 695 p.Reg = v.Reg0() 696 p.From.Type = obj.TYPE_REG 697 p.From.Reg = v.Args[1].Reg() 698 p.To.Type = obj.TYPE_MEM 699 p.To.Reg = v.Args[0].Reg() 700 gc.AddAux(&p.To, v) 701 case ssa.OpS390XLoweredAtomicCas32, ssa.OpS390XLoweredAtomicCas64: 702 // Convert the flags output of CS{,G} into a bool. 703 // CS{,G} arg1, arg2, arg0 704 // MOVD $0, ret 705 // BNE 2(PC) 706 // MOVD $1, ret 707 // NOP (so the BNE has somewhere to land) 708 709 // CS{,G} arg1, arg2, arg0 710 cs := s.Prog(v.Op.Asm()) 711 cs.From.Type = obj.TYPE_REG 712 cs.From.Reg = v.Args[1].Reg() // old 713 cs.Reg = v.Args[2].Reg() // new 714 cs.To.Type = obj.TYPE_MEM 715 cs.To.Reg = v.Args[0].Reg() 716 gc.AddAux(&cs.To, v) 717 718 // MOVD $0, ret 719 movd := s.Prog(s390x.AMOVD) 720 movd.From.Type = obj.TYPE_CONST 721 movd.From.Offset = 0 722 movd.To.Type = obj.TYPE_REG 723 movd.To.Reg = v.Reg0() 724 725 // BNE 2(PC) 726 bne := s.Prog(s390x.ABNE) 727 bne.To.Type = obj.TYPE_BRANCH 728 729 // MOVD $1, ret 730 movd = s.Prog(s390x.AMOVD) 731 movd.From.Type = obj.TYPE_CONST 732 movd.From.Offset = 1 733 movd.To.Type = obj.TYPE_REG 734 movd.To.Reg = v.Reg0() 735 736 // NOP (so the BNE has somewhere to land) 737 nop := s.Prog(obj.ANOP) 738 gc.Patch(bne, nop) 739 case ssa.OpS390XLoweredAtomicExchange32, ssa.OpS390XLoweredAtomicExchange64: 740 // Loop until the CS{,G} succeeds. 741 // MOV{WZ,D} arg0, ret 742 // cs: CS{,G} ret, arg1, arg0 743 // BNE cs 744 745 // MOV{WZ,D} arg0, ret 746 load := s.Prog(loadByType(v.Type.FieldType(0))) 747 load.From.Type = obj.TYPE_MEM 748 load.From.Reg = v.Args[0].Reg() 749 load.To.Type = obj.TYPE_REG 750 load.To.Reg = v.Reg0() 751 gc.AddAux(&load.From, v) 752 753 // CS{,G} ret, arg1, arg0 754 cs := s.Prog(v.Op.Asm()) 755 cs.From.Type = obj.TYPE_REG 756 cs.From.Reg = v.Reg0() // old 757 cs.Reg = v.Args[1].Reg() // new 758 cs.To.Type = obj.TYPE_MEM 759 cs.To.Reg = v.Args[0].Reg() 760 gc.AddAux(&cs.To, v) 761 762 // BNE cs 763 bne := s.Prog(s390x.ABNE) 764 bne.To.Type = obj.TYPE_BRANCH 765 gc.Patch(bne, cs) 766 case ssa.OpClobber: 767 // TODO: implement for clobberdead experiment. Nop is ok for now. 768 default: 769 v.Fatalf("genValue not implemented: %s", v.LongString()) 770 } 771 } 772 773 var blockJump = [...]struct { 774 asm, invasm obj.As 775 }{ 776 ssa.BlockS390XEQ: {s390x.ABEQ, s390x.ABNE}, 777 ssa.BlockS390XNE: {s390x.ABNE, s390x.ABEQ}, 778 ssa.BlockS390XLT: {s390x.ABLT, s390x.ABGE}, 779 ssa.BlockS390XGE: {s390x.ABGE, s390x.ABLT}, 780 ssa.BlockS390XLE: {s390x.ABLE, s390x.ABGT}, 781 ssa.BlockS390XGT: {s390x.ABGT, s390x.ABLE}, 782 ssa.BlockS390XGTF: {s390x.ABGT, s390x.ABLEU}, 783 ssa.BlockS390XGEF: {s390x.ABGE, s390x.ABLTU}, 784 } 785 786 func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { 787 switch b.Kind { 788 case ssa.BlockPlain: 789 if b.Succs[0].Block() != next { 790 p := s.Prog(s390x.ABR) 791 p.To.Type = obj.TYPE_BRANCH 792 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) 793 } 794 case ssa.BlockDefer: 795 // defer returns in R3: 796 // 0 if we should continue executing 797 // 1 if we should jump to deferreturn call 798 p := s.Prog(s390x.ACMPW) 799 p.From.Type = obj.TYPE_REG 800 p.From.Reg = s390x.REG_R3 801 p.To.Type = obj.TYPE_CONST 802 p.To.Offset = 0 803 p = s.Prog(s390x.ABNE) 804 p.To.Type = obj.TYPE_BRANCH 805 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()}) 806 if b.Succs[0].Block() != next { 807 p := s.Prog(s390x.ABR) 808 p.To.Type = obj.TYPE_BRANCH 809 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) 810 } 811 case ssa.BlockExit: 812 s.Prog(obj.AUNDEF) // tell plive.go that we never reach here 813 case ssa.BlockRet: 814 s.Prog(obj.ARET) 815 case ssa.BlockRetJmp: 816 p := s.Prog(s390x.ABR) 817 p.To.Type = obj.TYPE_MEM 818 p.To.Name = obj.NAME_EXTERN 819 p.To.Sym = b.Aux.(*obj.LSym) 820 case ssa.BlockS390XEQ, ssa.BlockS390XNE, 821 ssa.BlockS390XLT, ssa.BlockS390XGE, 822 ssa.BlockS390XLE, ssa.BlockS390XGT, 823 ssa.BlockS390XGEF, ssa.BlockS390XGTF: 824 jmp := blockJump[b.Kind] 825 switch next { 826 case b.Succs[0].Block(): 827 s.Br(jmp.invasm, b.Succs[1].Block()) 828 case b.Succs[1].Block(): 829 s.Br(jmp.asm, b.Succs[0].Block()) 830 default: 831 if b.Likely != ssa.BranchUnlikely { 832 s.Br(jmp.asm, b.Succs[0].Block()) 833 s.Br(s390x.ABR, b.Succs[1].Block()) 834 } else { 835 s.Br(jmp.invasm, b.Succs[1].Block()) 836 s.Br(s390x.ABR, b.Succs[0].Block()) 837 } 838 } 839 default: 840 b.Fatalf("branch not implemented: %s. Control: %s", b.LongString(), b.Control.LongString()) 841 } 842 }