rsc.io/go@v0.0.0-20150416155037-e040fd465409/src/cmd/9g/gsubr.go (about) 1 // Derived from Inferno utils/6c/txt.c 2 // http://code.google.com/p/inferno-os/source/browse/utils/6c/txt.c 3 // 4 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. 5 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) 6 // Portions Copyright © 1997-1999 Vita Nuova Limited 7 // Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) 8 // Portions Copyright © 2004,2006 Bruce Ellis 9 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) 10 // Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others 11 // Portions Copyright © 2009 The Go Authors. All rights reserved. 12 // 13 // Permission is hereby granted, free of charge, to any person obtaining a copy 14 // of this software and associated documentation files (the "Software"), to deal 15 // in the Software without restriction, including without limitation the rights 16 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 17 // copies of the Software, and to permit persons to whom the Software is 18 // furnished to do so, subject to the following conditions: 19 // 20 // The above copyright notice and this permission notice shall be included in 21 // all copies or substantial portions of the Software. 22 // 23 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 24 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 25 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 26 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 27 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 28 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 29 // THE SOFTWARE. 30 31 package main 32 33 import ( 34 "cmd/internal/gc" 35 "cmd/internal/obj" 36 "cmd/internal/obj/ppc64" 37 "fmt" 38 ) 39 40 var resvd = []int{ 41 ppc64.REGZERO, 42 ppc64.REGSP, // reserved for SP 43 // We need to preserve the C ABI TLS pointer because sigtramp 44 // may happen during C code and needs to access the g. C 45 // clobbers REGG, so if Go were to clobber REGTLS, sigtramp 46 // won't know which convention to use. By preserving REGTLS, 47 // we can just retrieve g from TLS when we aren't sure. 48 ppc64.REGTLS, 49 50 // TODO(austin): Consolidate REGTLS and REGG? 51 ppc64.REGG, 52 ppc64.REGTMP, // REGTMP 53 ppc64.FREGCVI, 54 ppc64.FREGZERO, 55 ppc64.FREGHALF, 56 ppc64.FREGONE, 57 ppc64.FREGTWO, 58 } 59 60 /* 61 * generate 62 * as $c, n 63 */ 64 func ginscon(as int, c int64, n2 *gc.Node) { 65 var n1 gc.Node 66 67 gc.Nodconst(&n1, gc.Types[gc.TINT64], c) 68 69 if as != ppc64.AMOVD && (c < -ppc64.BIG || c > ppc64.BIG) || n2.Op != gc.OREGISTER || as == ppc64.AMULLD { 70 // cannot have more than 16-bit of immediate in ADD, etc. 71 // instead, MOV into register first. 72 var ntmp gc.Node 73 gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil) 74 75 rawgins(ppc64.AMOVD, &n1, &ntmp) 76 rawgins(as, &ntmp, n2) 77 gc.Regfree(&ntmp) 78 return 79 } 80 81 rawgins(as, &n1, n2) 82 } 83 84 /* 85 * generate 86 * as n, $c (CMP/CMPU) 87 */ 88 func ginscon2(as int, n2 *gc.Node, c int64) { 89 var n1 gc.Node 90 91 gc.Nodconst(&n1, gc.Types[gc.TINT64], c) 92 93 switch as { 94 default: 95 gc.Fatal("ginscon2") 96 97 case ppc64.ACMP: 98 if -ppc64.BIG <= c && c <= ppc64.BIG { 99 rawgins(as, n2, &n1) 100 return 101 } 102 103 case ppc64.ACMPU: 104 if 0 <= c && c <= 2*ppc64.BIG { 105 rawgins(as, n2, &n1) 106 return 107 } 108 } 109 110 // MOV n1 into register first 111 var ntmp gc.Node 112 gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil) 113 114 rawgins(ppc64.AMOVD, &n1, &ntmp) 115 rawgins(as, n2, &ntmp) 116 gc.Regfree(&ntmp) 117 } 118 119 /* 120 * set up nodes representing 2^63 121 */ 122 var bigi gc.Node 123 124 var bigf gc.Node 125 126 var bignodes_did int 127 128 func bignodes() { 129 if bignodes_did != 0 { 130 return 131 } 132 bignodes_did = 1 133 134 gc.Nodconst(&bigi, gc.Types[gc.TUINT64], 1) 135 gc.Mpshiftfix(bigi.Val.U.Xval, 63) 136 137 bigf = bigi 138 bigf.Type = gc.Types[gc.TFLOAT64] 139 bigf.Val.Ctype = gc.CTFLT 140 bigf.Val.U.Fval = new(gc.Mpflt) 141 gc.Mpmovefixflt(bigf.Val.U.Fval, bigi.Val.U.Xval) 142 } 143 144 /* 145 * generate move: 146 * t = f 147 * hard part is conversions. 148 */ 149 func gmove(f *gc.Node, t *gc.Node) { 150 if gc.Debug['M'] != 0 { 151 fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, obj.FmtLong), gc.Nconv(t, obj.FmtLong)) 152 } 153 154 ft := int(gc.Simsimtype(f.Type)) 155 tt := int(gc.Simsimtype(t.Type)) 156 cvt := (*gc.Type)(t.Type) 157 158 if gc.Iscomplex[ft] || gc.Iscomplex[tt] { 159 gc.Complexmove(f, t) 160 return 161 } 162 163 // cannot have two memory operands 164 var r2 gc.Node 165 var r1 gc.Node 166 var a int 167 if gc.Ismem(f) && gc.Ismem(t) { 168 goto hard 169 } 170 171 // convert constant to desired type 172 if f.Op == gc.OLITERAL { 173 var con gc.Node 174 switch tt { 175 default: 176 gc.Convconst(&con, t.Type, &f.Val) 177 178 case gc.TINT32, 179 gc.TINT16, 180 gc.TINT8: 181 var con gc.Node 182 gc.Convconst(&con, gc.Types[gc.TINT64], &f.Val) 183 var r1 gc.Node 184 gc.Regalloc(&r1, con.Type, t) 185 gins(ppc64.AMOVD, &con, &r1) 186 gmove(&r1, t) 187 gc.Regfree(&r1) 188 return 189 190 case gc.TUINT32, 191 gc.TUINT16, 192 gc.TUINT8: 193 var con gc.Node 194 gc.Convconst(&con, gc.Types[gc.TUINT64], &f.Val) 195 var r1 gc.Node 196 gc.Regalloc(&r1, con.Type, t) 197 gins(ppc64.AMOVD, &con, &r1) 198 gmove(&r1, t) 199 gc.Regfree(&r1) 200 return 201 } 202 203 f = &con 204 ft = tt // so big switch will choose a simple mov 205 206 // constants can't move directly to memory. 207 if gc.Ismem(t) { 208 goto hard 209 } 210 } 211 212 // float constants come from memory. 213 //if(isfloat[tt]) 214 // goto hard; 215 216 // 64-bit immediates are also from memory. 217 //if(isint[tt]) 218 // goto hard; 219 //// 64-bit immediates are really 32-bit sign-extended 220 //// unless moving into a register. 221 //if(isint[tt]) { 222 // if(mpcmpfixfix(con.val.u.xval, minintval[TINT32]) < 0) 223 // goto hard; 224 // if(mpcmpfixfix(con.val.u.xval, maxintval[TINT32]) > 0) 225 // goto hard; 226 //} 227 228 // value -> value copy, only one memory operand. 229 // figure out the instruction to use. 230 // break out of switch for one-instruction gins. 231 // goto rdst for "destination must be register". 232 // goto hard for "convert to cvt type first". 233 // otherwise handle and return. 234 235 switch uint32(ft)<<16 | uint32(tt) { 236 default: 237 gc.Fatal("gmove %v -> %v", gc.Tconv(f.Type, obj.FmtLong), gc.Tconv(t.Type, obj.FmtLong)) 238 239 /* 240 * integer copy and truncate 241 */ 242 case gc.TINT8<<16 | gc.TINT8, // same size 243 gc.TUINT8<<16 | gc.TINT8, 244 gc.TINT16<<16 | gc.TINT8, 245 // truncate 246 gc.TUINT16<<16 | gc.TINT8, 247 gc.TINT32<<16 | gc.TINT8, 248 gc.TUINT32<<16 | gc.TINT8, 249 gc.TINT64<<16 | gc.TINT8, 250 gc.TUINT64<<16 | gc.TINT8: 251 a = ppc64.AMOVB 252 253 case gc.TINT8<<16 | gc.TUINT8, // same size 254 gc.TUINT8<<16 | gc.TUINT8, 255 gc.TINT16<<16 | gc.TUINT8, 256 // truncate 257 gc.TUINT16<<16 | gc.TUINT8, 258 gc.TINT32<<16 | gc.TUINT8, 259 gc.TUINT32<<16 | gc.TUINT8, 260 gc.TINT64<<16 | gc.TUINT8, 261 gc.TUINT64<<16 | gc.TUINT8: 262 a = ppc64.AMOVBZ 263 264 case gc.TINT16<<16 | gc.TINT16, // same size 265 gc.TUINT16<<16 | gc.TINT16, 266 gc.TINT32<<16 | gc.TINT16, 267 // truncate 268 gc.TUINT32<<16 | gc.TINT16, 269 gc.TINT64<<16 | gc.TINT16, 270 gc.TUINT64<<16 | gc.TINT16: 271 a = ppc64.AMOVH 272 273 case gc.TINT16<<16 | gc.TUINT16, // same size 274 gc.TUINT16<<16 | gc.TUINT16, 275 gc.TINT32<<16 | gc.TUINT16, 276 // truncate 277 gc.TUINT32<<16 | gc.TUINT16, 278 gc.TINT64<<16 | gc.TUINT16, 279 gc.TUINT64<<16 | gc.TUINT16: 280 a = ppc64.AMOVHZ 281 282 case gc.TINT32<<16 | gc.TINT32, // same size 283 gc.TUINT32<<16 | gc.TINT32, 284 gc.TINT64<<16 | gc.TINT32, 285 // truncate 286 gc.TUINT64<<16 | gc.TINT32: 287 a = ppc64.AMOVW 288 289 case gc.TINT32<<16 | gc.TUINT32, // same size 290 gc.TUINT32<<16 | gc.TUINT32, 291 gc.TINT64<<16 | gc.TUINT32, 292 gc.TUINT64<<16 | gc.TUINT32: 293 a = ppc64.AMOVWZ 294 295 case gc.TINT64<<16 | gc.TINT64, // same size 296 gc.TINT64<<16 | gc.TUINT64, 297 gc.TUINT64<<16 | gc.TINT64, 298 gc.TUINT64<<16 | gc.TUINT64: 299 a = ppc64.AMOVD 300 301 /* 302 * integer up-conversions 303 */ 304 case gc.TINT8<<16 | gc.TINT16, // sign extend int8 305 gc.TINT8<<16 | gc.TUINT16, 306 gc.TINT8<<16 | gc.TINT32, 307 gc.TINT8<<16 | gc.TUINT32, 308 gc.TINT8<<16 | gc.TINT64, 309 gc.TINT8<<16 | gc.TUINT64: 310 a = ppc64.AMOVB 311 312 goto rdst 313 314 case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8 315 gc.TUINT8<<16 | gc.TUINT16, 316 gc.TUINT8<<16 | gc.TINT32, 317 gc.TUINT8<<16 | gc.TUINT32, 318 gc.TUINT8<<16 | gc.TINT64, 319 gc.TUINT8<<16 | gc.TUINT64: 320 a = ppc64.AMOVBZ 321 322 goto rdst 323 324 case gc.TINT16<<16 | gc.TINT32, // sign extend int16 325 gc.TINT16<<16 | gc.TUINT32, 326 gc.TINT16<<16 | gc.TINT64, 327 gc.TINT16<<16 | gc.TUINT64: 328 a = ppc64.AMOVH 329 330 goto rdst 331 332 case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16 333 gc.TUINT16<<16 | gc.TUINT32, 334 gc.TUINT16<<16 | gc.TINT64, 335 gc.TUINT16<<16 | gc.TUINT64: 336 a = ppc64.AMOVHZ 337 338 goto rdst 339 340 case gc.TINT32<<16 | gc.TINT64, // sign extend int32 341 gc.TINT32<<16 | gc.TUINT64: 342 a = ppc64.AMOVW 343 344 goto rdst 345 346 case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32 347 gc.TUINT32<<16 | gc.TUINT64: 348 a = ppc64.AMOVWZ 349 350 goto rdst 351 352 //warn("gmove: convert float to int not implemented: %N -> %N\n", f, t); 353 //return; 354 // algorithm is: 355 // if small enough, use native float64 -> int64 conversion. 356 // otherwise, subtract 2^63, convert, and add it back. 357 /* 358 * float to integer 359 */ 360 case gc.TFLOAT32<<16 | gc.TINT32, 361 gc.TFLOAT64<<16 | gc.TINT32, 362 gc.TFLOAT32<<16 | gc.TINT64, 363 gc.TFLOAT64<<16 | gc.TINT64, 364 gc.TFLOAT32<<16 | gc.TINT16, 365 gc.TFLOAT32<<16 | gc.TINT8, 366 gc.TFLOAT32<<16 | gc.TUINT16, 367 gc.TFLOAT32<<16 | gc.TUINT8, 368 gc.TFLOAT64<<16 | gc.TINT16, 369 gc.TFLOAT64<<16 | gc.TINT8, 370 gc.TFLOAT64<<16 | gc.TUINT16, 371 gc.TFLOAT64<<16 | gc.TUINT8, 372 gc.TFLOAT32<<16 | gc.TUINT32, 373 gc.TFLOAT64<<16 | gc.TUINT32, 374 gc.TFLOAT32<<16 | gc.TUINT64, 375 gc.TFLOAT64<<16 | gc.TUINT64: 376 bignodes() 377 378 var r1 gc.Node 379 gc.Regalloc(&r1, gc.Types[ft], f) 380 gmove(f, &r1) 381 if tt == gc.TUINT64 { 382 gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], nil) 383 gmove(&bigf, &r2) 384 gins(ppc64.AFCMPU, &r1, &r2) 385 p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TFLOAT64]), nil, +1)) 386 gins(ppc64.AFSUB, &r2, &r1) 387 gc.Patch(p1, gc.Pc) 388 gc.Regfree(&r2) 389 } 390 391 gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], nil) 392 var r3 gc.Node 393 gc.Regalloc(&r3, gc.Types[gc.TINT64], t) 394 gins(ppc64.AFCTIDZ, &r1, &r2) 395 p1 := (*obj.Prog)(gins(ppc64.AFMOVD, &r2, nil)) 396 p1.To.Type = obj.TYPE_MEM 397 p1.To.Reg = ppc64.REGSP 398 p1.To.Offset = -8 399 p1 = gins(ppc64.AMOVD, nil, &r3) 400 p1.From.Type = obj.TYPE_MEM 401 p1.From.Reg = ppc64.REGSP 402 p1.From.Offset = -8 403 gc.Regfree(&r2) 404 gc.Regfree(&r1) 405 if tt == gc.TUINT64 { 406 p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TFLOAT64]), nil, +1)) // use CR0 here again 407 gc.Nodreg(&r1, gc.Types[gc.TINT64], ppc64.REGTMP) 408 gins(ppc64.AMOVD, &bigi, &r1) 409 gins(ppc64.AADD, &r1, &r3) 410 gc.Patch(p1, gc.Pc) 411 } 412 413 gmove(&r3, t) 414 gc.Regfree(&r3) 415 return 416 417 //warn("gmove: convert int to float not implemented: %N -> %N\n", f, t); 418 //return; 419 // algorithm is: 420 // if small enough, use native int64 -> uint64 conversion. 421 // otherwise, halve (rounding to odd?), convert, and double. 422 /* 423 * integer to float 424 */ 425 case gc.TINT32<<16 | gc.TFLOAT32, 426 gc.TINT32<<16 | gc.TFLOAT64, 427 gc.TINT64<<16 | gc.TFLOAT32, 428 gc.TINT64<<16 | gc.TFLOAT64, 429 gc.TINT16<<16 | gc.TFLOAT32, 430 gc.TINT16<<16 | gc.TFLOAT64, 431 gc.TINT8<<16 | gc.TFLOAT32, 432 gc.TINT8<<16 | gc.TFLOAT64, 433 gc.TUINT16<<16 | gc.TFLOAT32, 434 gc.TUINT16<<16 | gc.TFLOAT64, 435 gc.TUINT8<<16 | gc.TFLOAT32, 436 gc.TUINT8<<16 | gc.TFLOAT64, 437 gc.TUINT32<<16 | gc.TFLOAT32, 438 gc.TUINT32<<16 | gc.TFLOAT64, 439 gc.TUINT64<<16 | gc.TFLOAT32, 440 gc.TUINT64<<16 | gc.TFLOAT64: 441 bignodes() 442 443 var r1 gc.Node 444 gc.Regalloc(&r1, gc.Types[gc.TINT64], nil) 445 gmove(f, &r1) 446 if ft == gc.TUINT64 { 447 gc.Nodreg(&r2, gc.Types[gc.TUINT64], ppc64.REGTMP) 448 gmove(&bigi, &r2) 449 gins(ppc64.ACMPU, &r1, &r2) 450 p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1)) 451 p2 := (*obj.Prog)(gins(ppc64.ASRD, nil, &r1)) 452 p2.From.Type = obj.TYPE_CONST 453 p2.From.Offset = 1 454 gc.Patch(p1, gc.Pc) 455 } 456 457 gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], t) 458 p1 := (*obj.Prog)(gins(ppc64.AMOVD, &r1, nil)) 459 p1.To.Type = obj.TYPE_MEM 460 p1.To.Reg = ppc64.REGSP 461 p1.To.Offset = -8 462 p1 = gins(ppc64.AFMOVD, nil, &r2) 463 p1.From.Type = obj.TYPE_MEM 464 p1.From.Reg = ppc64.REGSP 465 p1.From.Offset = -8 466 gins(ppc64.AFCFID, &r2, &r2) 467 gc.Regfree(&r1) 468 if ft == gc.TUINT64 { 469 p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1)) // use CR0 here again 470 gc.Nodreg(&r1, gc.Types[gc.TFLOAT64], ppc64.FREGTWO) 471 gins(ppc64.AFMUL, &r1, &r2) 472 gc.Patch(p1, gc.Pc) 473 } 474 475 gmove(&r2, t) 476 gc.Regfree(&r2) 477 return 478 479 /* 480 * float to float 481 */ 482 case gc.TFLOAT32<<16 | gc.TFLOAT32: 483 a = ppc64.AFMOVS 484 485 case gc.TFLOAT64<<16 | gc.TFLOAT64: 486 a = ppc64.AFMOVD 487 488 case gc.TFLOAT32<<16 | gc.TFLOAT64: 489 a = ppc64.AFMOVS 490 goto rdst 491 492 case gc.TFLOAT64<<16 | gc.TFLOAT32: 493 a = ppc64.AFRSP 494 goto rdst 495 } 496 497 gins(a, f, t) 498 return 499 500 // requires register destination 501 rdst: 502 { 503 gc.Regalloc(&r1, t.Type, t) 504 505 gins(a, f, &r1) 506 gmove(&r1, t) 507 gc.Regfree(&r1) 508 return 509 } 510 511 // requires register intermediate 512 hard: 513 gc.Regalloc(&r1, cvt, t) 514 515 gmove(f, &r1) 516 gmove(&r1, t) 517 gc.Regfree(&r1) 518 return 519 } 520 521 func intLiteral(n *gc.Node) (x int64, ok bool) { 522 if n == nil || n.Op != gc.OLITERAL { 523 return 524 } 525 switch n.Val.Ctype { 526 case gc.CTINT, gc.CTRUNE: 527 return gc.Mpgetfix(n.Val.U.Xval), true 528 case gc.CTBOOL: 529 return int64(bool2int(n.Val.U.Bval)), true 530 } 531 return 532 } 533 534 // gins is called by the front end. 535 // It synthesizes some multiple-instruction sequences 536 // so the front end can stay simpler. 537 func gins(as int, f, t *gc.Node) *obj.Prog { 538 if as >= obj.A_ARCHSPECIFIC { 539 if x, ok := intLiteral(f); ok { 540 ginscon(as, x, t) 541 return nil // caller must not use 542 } 543 } 544 if as == ppc64.ACMP || as == ppc64.ACMPU { 545 if x, ok := intLiteral(t); ok { 546 ginscon2(as, f, x) 547 return nil // caller must not use 548 } 549 } 550 return rawgins(as, f, t) 551 } 552 553 /* 554 * generate one instruction: 555 * as f, t 556 */ 557 func rawgins(as int, f *gc.Node, t *gc.Node) *obj.Prog { 558 // TODO(austin): Add self-move test like in 6g (but be careful 559 // of truncation moves) 560 561 p := gc.Prog(as) 562 gc.Naddr(&p.From, f) 563 gc.Naddr(&p.To, t) 564 565 switch as { 566 case obj.ACALL: 567 if p.To.Type == obj.TYPE_REG && p.To.Reg != ppc64.REG_CTR { 568 // Allow front end to emit CALL REG, and rewrite into MOV REG, CTR; CALL CTR. 569 pp := gc.Prog(as) 570 pp.From = p.From 571 pp.To.Type = obj.TYPE_REG 572 pp.To.Reg = ppc64.REG_CTR 573 574 p.As = ppc64.AMOVD 575 p.From = p.To 576 p.To.Type = obj.TYPE_REG 577 p.To.Reg = ppc64.REG_CTR 578 579 if gc.Debug['g'] != 0 { 580 fmt.Printf("%v\n", p) 581 fmt.Printf("%v\n", pp) 582 } 583 584 return pp 585 } 586 587 // Bad things the front end has done to us. Crash to find call stack. 588 case ppc64.AAND, ppc64.AMULLD: 589 if p.From.Type == obj.TYPE_CONST { 590 gc.Debug['h'] = 1 591 gc.Fatal("bad inst: %v", p) 592 } 593 case ppc64.ACMP, ppc64.ACMPU: 594 if p.From.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_MEM { 595 gc.Debug['h'] = 1 596 gc.Fatal("bad inst: %v", p) 597 } 598 } 599 600 if gc.Debug['g'] != 0 { 601 fmt.Printf("%v\n", p) 602 } 603 604 w := int32(0) 605 switch as { 606 case ppc64.AMOVB, 607 ppc64.AMOVBU, 608 ppc64.AMOVBZ, 609 ppc64.AMOVBZU: 610 w = 1 611 612 case ppc64.AMOVH, 613 ppc64.AMOVHU, 614 ppc64.AMOVHZ, 615 ppc64.AMOVHZU: 616 w = 2 617 618 case ppc64.AMOVW, 619 ppc64.AMOVWU, 620 ppc64.AMOVWZ, 621 ppc64.AMOVWZU: 622 w = 4 623 624 case ppc64.AMOVD, 625 ppc64.AMOVDU: 626 if p.From.Type == obj.TYPE_CONST || p.From.Type == obj.TYPE_ADDR { 627 break 628 } 629 w = 8 630 } 631 632 if w != 0 && ((f != nil && p.From.Width < int64(w)) || (t != nil && p.To.Type != obj.TYPE_REG && p.To.Width > int64(w))) { 633 gc.Dump("f", f) 634 gc.Dump("t", t) 635 gc.Fatal("bad width: %v (%d, %d)\n", p, p.From.Width, p.To.Width) 636 } 637 638 return p 639 } 640 641 func fixlargeoffset(n *gc.Node) { 642 if n == nil { 643 return 644 } 645 if n.Op != gc.OINDREG { 646 return 647 } 648 if n.Reg == ppc64.REGSP { // stack offset cannot be large 649 return 650 } 651 if n.Xoffset != int64(int32(n.Xoffset)) { 652 // TODO(minux): offset too large, move into R31 and add to R31 instead. 653 // this is used only in test/fixedbugs/issue6036.go. 654 gc.Fatal("offset too large: %v", gc.Nconv(n, 0)) 655 656 a := gc.Node(*n) 657 a.Op = gc.OREGISTER 658 a.Type = gc.Types[gc.Tptr] 659 a.Xoffset = 0 660 gc.Cgen_checknil(&a) 661 ginscon(optoas(gc.OADD, gc.Types[gc.Tptr]), n.Xoffset, &a) 662 n.Xoffset = 0 663 } 664 } 665 666 /* 667 * return Axxx for Oxxx on type t. 668 */ 669 func optoas(op int, t *gc.Type) int { 670 if t == nil { 671 gc.Fatal("optoas: t is nil") 672 } 673 674 a := int(obj.AXXX) 675 switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) { 676 default: 677 gc.Fatal("optoas: no entry for op=%v type=%v", gc.Oconv(int(op), 0), gc.Tconv(t, 0)) 678 679 case gc.OEQ<<16 | gc.TBOOL, 680 gc.OEQ<<16 | gc.TINT8, 681 gc.OEQ<<16 | gc.TUINT8, 682 gc.OEQ<<16 | gc.TINT16, 683 gc.OEQ<<16 | gc.TUINT16, 684 gc.OEQ<<16 | gc.TINT32, 685 gc.OEQ<<16 | gc.TUINT32, 686 gc.OEQ<<16 | gc.TINT64, 687 gc.OEQ<<16 | gc.TUINT64, 688 gc.OEQ<<16 | gc.TPTR32, 689 gc.OEQ<<16 | gc.TPTR64, 690 gc.OEQ<<16 | gc.TFLOAT32, 691 gc.OEQ<<16 | gc.TFLOAT64: 692 a = ppc64.ABEQ 693 694 case gc.ONE<<16 | gc.TBOOL, 695 gc.ONE<<16 | gc.TINT8, 696 gc.ONE<<16 | gc.TUINT8, 697 gc.ONE<<16 | gc.TINT16, 698 gc.ONE<<16 | gc.TUINT16, 699 gc.ONE<<16 | gc.TINT32, 700 gc.ONE<<16 | gc.TUINT32, 701 gc.ONE<<16 | gc.TINT64, 702 gc.ONE<<16 | gc.TUINT64, 703 gc.ONE<<16 | gc.TPTR32, 704 gc.ONE<<16 | gc.TPTR64, 705 gc.ONE<<16 | gc.TFLOAT32, 706 gc.ONE<<16 | gc.TFLOAT64: 707 a = ppc64.ABNE 708 709 case gc.OLT<<16 | gc.TINT8, // ACMP 710 gc.OLT<<16 | gc.TINT16, 711 gc.OLT<<16 | gc.TINT32, 712 gc.OLT<<16 | gc.TINT64, 713 gc.OLT<<16 | gc.TUINT8, 714 // ACMPU 715 gc.OLT<<16 | gc.TUINT16, 716 gc.OLT<<16 | gc.TUINT32, 717 gc.OLT<<16 | gc.TUINT64, 718 gc.OLT<<16 | gc.TFLOAT32, 719 // AFCMPU 720 gc.OLT<<16 | gc.TFLOAT64: 721 a = ppc64.ABLT 722 723 case gc.OLE<<16 | gc.TINT8, // ACMP 724 gc.OLE<<16 | gc.TINT16, 725 gc.OLE<<16 | gc.TINT32, 726 gc.OLE<<16 | gc.TINT64, 727 gc.OLE<<16 | gc.TUINT8, 728 // ACMPU 729 gc.OLE<<16 | gc.TUINT16, 730 gc.OLE<<16 | gc.TUINT32, 731 gc.OLE<<16 | gc.TUINT64: 732 // No OLE for floats, because it mishandles NaN. 733 // Front end must reverse comparison or use OLT and OEQ together. 734 a = ppc64.ABLE 735 736 case gc.OGT<<16 | gc.TINT8, 737 gc.OGT<<16 | gc.TINT16, 738 gc.OGT<<16 | gc.TINT32, 739 gc.OGT<<16 | gc.TINT64, 740 gc.OGT<<16 | gc.TUINT8, 741 gc.OGT<<16 | gc.TUINT16, 742 gc.OGT<<16 | gc.TUINT32, 743 gc.OGT<<16 | gc.TUINT64, 744 gc.OGT<<16 | gc.TFLOAT32, 745 gc.OGT<<16 | gc.TFLOAT64: 746 a = ppc64.ABGT 747 748 case gc.OGE<<16 | gc.TINT8, 749 gc.OGE<<16 | gc.TINT16, 750 gc.OGE<<16 | gc.TINT32, 751 gc.OGE<<16 | gc.TINT64, 752 gc.OGE<<16 | gc.TUINT8, 753 gc.OGE<<16 | gc.TUINT16, 754 gc.OGE<<16 | gc.TUINT32, 755 gc.OGE<<16 | gc.TUINT64: 756 // No OGE for floats, because it mishandles NaN. 757 // Front end must reverse comparison or use OLT and OEQ together. 758 a = ppc64.ABGE 759 760 case gc.OCMP<<16 | gc.TBOOL, 761 gc.OCMP<<16 | gc.TINT8, 762 gc.OCMP<<16 | gc.TINT16, 763 gc.OCMP<<16 | gc.TINT32, 764 gc.OCMP<<16 | gc.TPTR32, 765 gc.OCMP<<16 | gc.TINT64: 766 a = ppc64.ACMP 767 768 case gc.OCMP<<16 | gc.TUINT8, 769 gc.OCMP<<16 | gc.TUINT16, 770 gc.OCMP<<16 | gc.TUINT32, 771 gc.OCMP<<16 | gc.TUINT64, 772 gc.OCMP<<16 | gc.TPTR64: 773 a = ppc64.ACMPU 774 775 case gc.OCMP<<16 | gc.TFLOAT32, 776 gc.OCMP<<16 | gc.TFLOAT64: 777 a = ppc64.AFCMPU 778 779 case gc.OAS<<16 | gc.TBOOL, 780 gc.OAS<<16 | gc.TINT8: 781 a = ppc64.AMOVB 782 783 case gc.OAS<<16 | gc.TUINT8: 784 a = ppc64.AMOVBZ 785 786 case gc.OAS<<16 | gc.TINT16: 787 a = ppc64.AMOVH 788 789 case gc.OAS<<16 | gc.TUINT16: 790 a = ppc64.AMOVHZ 791 792 case gc.OAS<<16 | gc.TINT32: 793 a = ppc64.AMOVW 794 795 case gc.OAS<<16 | gc.TUINT32, 796 gc.OAS<<16 | gc.TPTR32: 797 a = ppc64.AMOVWZ 798 799 case gc.OAS<<16 | gc.TINT64, 800 gc.OAS<<16 | gc.TUINT64, 801 gc.OAS<<16 | gc.TPTR64: 802 a = ppc64.AMOVD 803 804 case gc.OAS<<16 | gc.TFLOAT32: 805 a = ppc64.AFMOVS 806 807 case gc.OAS<<16 | gc.TFLOAT64: 808 a = ppc64.AFMOVD 809 810 case gc.OADD<<16 | gc.TINT8, 811 gc.OADD<<16 | gc.TUINT8, 812 gc.OADD<<16 | gc.TINT16, 813 gc.OADD<<16 | gc.TUINT16, 814 gc.OADD<<16 | gc.TINT32, 815 gc.OADD<<16 | gc.TUINT32, 816 gc.OADD<<16 | gc.TPTR32, 817 gc.OADD<<16 | gc.TINT64, 818 gc.OADD<<16 | gc.TUINT64, 819 gc.OADD<<16 | gc.TPTR64: 820 a = ppc64.AADD 821 822 case gc.OADD<<16 | gc.TFLOAT32: 823 a = ppc64.AFADDS 824 825 case gc.OADD<<16 | gc.TFLOAT64: 826 a = ppc64.AFADD 827 828 case gc.OSUB<<16 | gc.TINT8, 829 gc.OSUB<<16 | gc.TUINT8, 830 gc.OSUB<<16 | gc.TINT16, 831 gc.OSUB<<16 | gc.TUINT16, 832 gc.OSUB<<16 | gc.TINT32, 833 gc.OSUB<<16 | gc.TUINT32, 834 gc.OSUB<<16 | gc.TPTR32, 835 gc.OSUB<<16 | gc.TINT64, 836 gc.OSUB<<16 | gc.TUINT64, 837 gc.OSUB<<16 | gc.TPTR64: 838 a = ppc64.ASUB 839 840 case gc.OSUB<<16 | gc.TFLOAT32: 841 a = ppc64.AFSUBS 842 843 case gc.OSUB<<16 | gc.TFLOAT64: 844 a = ppc64.AFSUB 845 846 case gc.OMINUS<<16 | gc.TINT8, 847 gc.OMINUS<<16 | gc.TUINT8, 848 gc.OMINUS<<16 | gc.TINT16, 849 gc.OMINUS<<16 | gc.TUINT16, 850 gc.OMINUS<<16 | gc.TINT32, 851 gc.OMINUS<<16 | gc.TUINT32, 852 gc.OMINUS<<16 | gc.TPTR32, 853 gc.OMINUS<<16 | gc.TINT64, 854 gc.OMINUS<<16 | gc.TUINT64, 855 gc.OMINUS<<16 | gc.TPTR64: 856 a = ppc64.ANEG 857 858 case gc.OAND<<16 | gc.TINT8, 859 gc.OAND<<16 | gc.TUINT8, 860 gc.OAND<<16 | gc.TINT16, 861 gc.OAND<<16 | gc.TUINT16, 862 gc.OAND<<16 | gc.TINT32, 863 gc.OAND<<16 | gc.TUINT32, 864 gc.OAND<<16 | gc.TPTR32, 865 gc.OAND<<16 | gc.TINT64, 866 gc.OAND<<16 | gc.TUINT64, 867 gc.OAND<<16 | gc.TPTR64: 868 a = ppc64.AAND 869 870 case gc.OOR<<16 | gc.TINT8, 871 gc.OOR<<16 | gc.TUINT8, 872 gc.OOR<<16 | gc.TINT16, 873 gc.OOR<<16 | gc.TUINT16, 874 gc.OOR<<16 | gc.TINT32, 875 gc.OOR<<16 | gc.TUINT32, 876 gc.OOR<<16 | gc.TPTR32, 877 gc.OOR<<16 | gc.TINT64, 878 gc.OOR<<16 | gc.TUINT64, 879 gc.OOR<<16 | gc.TPTR64: 880 a = ppc64.AOR 881 882 case gc.OXOR<<16 | gc.TINT8, 883 gc.OXOR<<16 | gc.TUINT8, 884 gc.OXOR<<16 | gc.TINT16, 885 gc.OXOR<<16 | gc.TUINT16, 886 gc.OXOR<<16 | gc.TINT32, 887 gc.OXOR<<16 | gc.TUINT32, 888 gc.OXOR<<16 | gc.TPTR32, 889 gc.OXOR<<16 | gc.TINT64, 890 gc.OXOR<<16 | gc.TUINT64, 891 gc.OXOR<<16 | gc.TPTR64: 892 a = ppc64.AXOR 893 894 // TODO(minux): handle rotates 895 //case CASE(OLROT, TINT8): 896 //case CASE(OLROT, TUINT8): 897 //case CASE(OLROT, TINT16): 898 //case CASE(OLROT, TUINT16): 899 //case CASE(OLROT, TINT32): 900 //case CASE(OLROT, TUINT32): 901 //case CASE(OLROT, TPTR32): 902 //case CASE(OLROT, TINT64): 903 //case CASE(OLROT, TUINT64): 904 //case CASE(OLROT, TPTR64): 905 // a = 0//???; RLDC? 906 // break; 907 908 case gc.OLSH<<16 | gc.TINT8, 909 gc.OLSH<<16 | gc.TUINT8, 910 gc.OLSH<<16 | gc.TINT16, 911 gc.OLSH<<16 | gc.TUINT16, 912 gc.OLSH<<16 | gc.TINT32, 913 gc.OLSH<<16 | gc.TUINT32, 914 gc.OLSH<<16 | gc.TPTR32, 915 gc.OLSH<<16 | gc.TINT64, 916 gc.OLSH<<16 | gc.TUINT64, 917 gc.OLSH<<16 | gc.TPTR64: 918 a = ppc64.ASLD 919 920 case gc.ORSH<<16 | gc.TUINT8, 921 gc.ORSH<<16 | gc.TUINT16, 922 gc.ORSH<<16 | gc.TUINT32, 923 gc.ORSH<<16 | gc.TPTR32, 924 gc.ORSH<<16 | gc.TUINT64, 925 gc.ORSH<<16 | gc.TPTR64: 926 a = ppc64.ASRD 927 928 case gc.ORSH<<16 | gc.TINT8, 929 gc.ORSH<<16 | gc.TINT16, 930 gc.ORSH<<16 | gc.TINT32, 931 gc.ORSH<<16 | gc.TINT64: 932 a = ppc64.ASRAD 933 934 // TODO(minux): handle rotates 935 //case CASE(ORROTC, TINT8): 936 //case CASE(ORROTC, TUINT8): 937 //case CASE(ORROTC, TINT16): 938 //case CASE(ORROTC, TUINT16): 939 //case CASE(ORROTC, TINT32): 940 //case CASE(ORROTC, TUINT32): 941 //case CASE(ORROTC, TINT64): 942 //case CASE(ORROTC, TUINT64): 943 // a = 0//??? RLDC?? 944 // break; 945 946 case gc.OHMUL<<16 | gc.TINT64: 947 a = ppc64.AMULHD 948 949 case gc.OHMUL<<16 | gc.TUINT64, 950 gc.OHMUL<<16 | gc.TPTR64: 951 a = ppc64.AMULHDU 952 953 case gc.OMUL<<16 | gc.TINT8, 954 gc.OMUL<<16 | gc.TINT16, 955 gc.OMUL<<16 | gc.TINT32, 956 gc.OMUL<<16 | gc.TINT64: 957 a = ppc64.AMULLD 958 959 case gc.OMUL<<16 | gc.TUINT8, 960 gc.OMUL<<16 | gc.TUINT16, 961 gc.OMUL<<16 | gc.TUINT32, 962 gc.OMUL<<16 | gc.TPTR32, 963 // don't use word multiply, the high 32-bit are undefined. 964 // fallthrough 965 gc.OMUL<<16 | gc.TUINT64, 966 gc.OMUL<<16 | gc.TPTR64: 967 a = ppc64.AMULLD 968 // for 64-bit multiplies, signedness doesn't matter. 969 970 case gc.OMUL<<16 | gc.TFLOAT32: 971 a = ppc64.AFMULS 972 973 case gc.OMUL<<16 | gc.TFLOAT64: 974 a = ppc64.AFMUL 975 976 case gc.ODIV<<16 | gc.TINT8, 977 gc.ODIV<<16 | gc.TINT16, 978 gc.ODIV<<16 | gc.TINT32, 979 gc.ODIV<<16 | gc.TINT64: 980 a = ppc64.ADIVD 981 982 case gc.ODIV<<16 | gc.TUINT8, 983 gc.ODIV<<16 | gc.TUINT16, 984 gc.ODIV<<16 | gc.TUINT32, 985 gc.ODIV<<16 | gc.TPTR32, 986 gc.ODIV<<16 | gc.TUINT64, 987 gc.ODIV<<16 | gc.TPTR64: 988 a = ppc64.ADIVDU 989 990 case gc.ODIV<<16 | gc.TFLOAT32: 991 a = ppc64.AFDIVS 992 993 case gc.ODIV<<16 | gc.TFLOAT64: 994 a = ppc64.AFDIV 995 } 996 997 return a 998 } 999 1000 const ( 1001 ODynam = 1 << 0 1002 OAddable = 1 << 1 1003 ) 1004 1005 func xgen(n *gc.Node, a *gc.Node, o int) bool { 1006 // TODO(minux) 1007 1008 return -1 != 0 /*TypeKind(100016)*/ 1009 } 1010 1011 func sudoclean() { 1012 return 1013 } 1014 1015 /* 1016 * generate code to compute address of n, 1017 * a reference to a (perhaps nested) field inside 1018 * an array or struct. 1019 * return 0 on failure, 1 on success. 1020 * on success, leaves usable address in a. 1021 * 1022 * caller is responsible for calling sudoclean 1023 * after successful sudoaddable, 1024 * to release the register used for a. 1025 */ 1026 func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool { 1027 // TODO(minux) 1028 1029 *a = obj.Addr{} 1030 return false 1031 }