github.com/4ad/go@v0.0.0-20161219182952-69a12818b605/src/cmd/compile/internal/sparc64/gsubr.go (about) 1 // Derived from Inferno utils/6c/txt.c 2 // http://code.google.com/p/inferno-os/source/browse/utils/6c/txt.c 3 // 4 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. 5 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) 6 // Portions Copyright © 1997-1999 Vita Nuova Limited 7 // Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) 8 // Portions Copyright © 2004,2006 Bruce Ellis 9 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) 10 // Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others 11 // Portions Copyright © 2009 The Go Authors. All rights reserved. 12 // 13 // Permission is hereby granted, free of charge, to any person obtaining a copy 14 // of this software and associated documentation files (the "Software"), to deal 15 // in the Software without restriction, including without limitation the rights 16 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 17 // copies of the Software, and to permit persons to whom the Software is 18 // furnished to do so, subject to the following conditions: 19 // 20 // The above copyright notice and this permission notice shall be included in 21 // all copies or substantial portions of the Software. 22 // 23 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 24 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 25 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 26 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 27 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 28 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 29 // THE SOFTWARE. 30 31 package sparc64 32 33 import ( 34 "cmd/compile/internal/big" 35 "cmd/compile/internal/gc" 36 "cmd/internal/obj" 37 "cmd/internal/obj/sparc64" 38 "fmt" 39 ) 40 41 var resvd = []int{ 42 sparc64.REG_ZR, 43 sparc64.REG_RT1, 44 sparc64.REG_CTXT, 45 sparc64.REG_G, 46 sparc64.REG_RT2, 47 sparc64.REG_TMP, 48 sparc64.REG_G6, 49 sparc64.REG_TLS, 50 sparc64.REG_RSP, 51 sparc64.REG_OLR, 52 sparc64.REG_TMP2, 53 sparc64.REG_L7, 54 sparc64.REG_I0, // TODO(aram): revisit this. 55 sparc64.REG_I1, // TODO(aram): revisit this. 56 sparc64.REG_I2, // TODO(aram): revisit this. 57 sparc64.REG_I3, // TODO(aram): revisit this. 58 sparc64.REG_I4, // TODO(aram): revisit this. 59 sparc64.REG_I5, // TODO(aram): revisit this. 60 sparc64.REG_RFP, 61 sparc64.REG_ILR, 62 sparc64.REG_YTMP, 63 sparc64.REG_YTWO, 64 } 65 66 /* 67 * generate 68 * as $c, n 69 */ 70 func ginscon(as obj.As, c int64, n2 *gc.Node) { 71 var n1 gc.Node 72 73 gc.Nodconst(&n1, gc.Types[gc.TINT64], c) 74 75 if as != sparc64.AMOVD && (c < -sparc64.BIG || c > sparc64.BIG) || as == sparc64.AMULD || n2 != nil && n2.Op != gc.OREGISTER { 76 // cannot have more than 13-bit of immediate in ADD, etc. 77 // instead, MOV into register first. 78 var ntmp gc.Node 79 gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil) 80 81 gins(sparc64.AMOVD, &n1, &ntmp) 82 gins(as, &ntmp, n2) 83 gc.Regfree(&ntmp) 84 return 85 } 86 87 rawgins(as, &n1, n2) 88 } 89 90 /* 91 * generate 92 * as n, $c (CMP) 93 */ 94 func ginscon2(as obj.As, n2 *gc.Node, c int64) { 95 var n1 gc.Node 96 97 gc.Nodconst(&n1, gc.Types[gc.TINT64], c) 98 99 switch as { 100 default: 101 gc.Fatalf("ginscon2") 102 103 case sparc64.ACMP: 104 if -sparc64.BIG <= c && c <= sparc64.BIG { 105 gcmp(as, n2, &n1) 106 return 107 } 108 } 109 110 // MOV n1 into register first 111 var ntmp gc.Node 112 gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil) 113 114 rawgins(sparc64.AMOVD, &n1, &ntmp) 115 gcmp(as, n2, &ntmp) 116 gc.Regfree(&ntmp) 117 } 118 119 func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog { 120 if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && n2.Op != gc.OLITERAL { 121 // Reverse comparison to place constant last. 122 op = gc.Brrev(op) 123 n1, n2 = n2, n1 124 } 125 126 var r1, r2, g1, g2 gc.Node 127 gc.Regalloc(&r1, t, n1) 128 gc.Regalloc(&g1, n1.Type, &r1) 129 gc.Cgen(n1, &g1) 130 gmove(&g1, &r1) 131 if gc.Isint[t.Etype] && gc.Isconst(n2, gc.CTINT) { 132 ginscon2(optoas(gc.OCMP, t), &r1, n2.Int64()) 133 } else { 134 gc.Regalloc(&r2, t, n2) 135 gc.Regalloc(&g2, n1.Type, &r2) 136 gc.Cgen(n2, &g2) 137 gmove(&g2, &r2) 138 gcmp(optoas(gc.OCMP, t), &r1, &r2) 139 gc.Regfree(&g2) 140 gc.Regfree(&r2) 141 } 142 gc.Regfree(&g1) 143 gc.Regfree(&r1) 144 return gc.Gbranch(optoas(op, t), nil, likely) 145 } 146 147 // set up nodes representing 2^63 148 var ( 149 bigi gc.Node 150 bigf gc.Node 151 bignodes_did bool 152 ) 153 154 func bignodes() { 155 if bignodes_did { 156 return 157 } 158 bignodes_did = true 159 160 var i big.Int 161 i.SetInt64(1) 162 i.Lsh(&i, 63) 163 164 gc.Nodconst(&bigi, gc.Types[gc.TUINT64], 0) 165 bigi.SetBigInt(&i) 166 167 bigi.Convconst(&bigf, gc.Types[gc.TFLOAT64]) 168 } 169 170 /* 171 * generate move: 172 * t = f 173 * hard part is conversions. 174 */ 175 func gmove(f *gc.Node, t *gc.Node) { 176 if gc.Debug['M'] != 0 { 177 fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, gc.FmtLong), gc.Nconv(t, gc.FmtLong)) 178 } 179 180 ft := int(gc.Simsimtype(f.Type)) 181 tt := int(gc.Simsimtype(t.Type)) 182 cvt := t.Type 183 184 if gc.Iscomplex[ft] || gc.Iscomplex[tt] { 185 gc.Complexmove(f, t) 186 return 187 } 188 189 // cannot have two memory operands 190 var r1 gc.Node 191 var r2 gc.Node 192 var a obj.As 193 if gc.Ismem(f) && gc.Ismem(t) { 194 goto hard 195 } 196 197 // convert constant to desired type 198 if f.Op == gc.OLITERAL { 199 var con gc.Node 200 switch tt { 201 default: 202 f.Convconst(&con, t.Type) 203 204 case gc.TINT32, 205 gc.TINT16, 206 gc.TINT8: 207 var con gc.Node 208 f.Convconst(&con, gc.Types[gc.TINT64]) 209 var r1 gc.Node 210 gc.Regalloc(&r1, con.Type, t) 211 gins(sparc64.AMOVD, &con, &r1) 212 gmove(&r1, t) 213 gc.Regfree(&r1) 214 return 215 216 case gc.TUINT32, 217 gc.TUINT16, 218 gc.TUINT8: 219 var con gc.Node 220 f.Convconst(&con, gc.Types[gc.TUINT64]) 221 var r1 gc.Node 222 gc.Regalloc(&r1, con.Type, t) 223 gins(sparc64.AMOVD, &con, &r1) 224 gmove(&r1, t) 225 gc.Regfree(&r1) 226 return 227 } 228 229 f = &con 230 ft = tt // so big switch will choose a simple mov 231 232 // constants can't move directly to memory. 233 if gc.Ismem(t) { 234 goto hard 235 } 236 } 237 238 // value -> value copy, first operand in memory. 239 // any floating point operand requires register 240 // src, so goto hard to copy to register first. 241 if gc.Ismem(f) && ft != tt && (gc.Isfloat[ft] || gc.Isfloat[tt]) { 242 cvt = gc.Types[ft] 243 goto hard 244 } 245 246 // value -> value copy, only one memory operand. 247 // figure out the instruction to use. 248 // break out of switch for one-instruction gins. 249 // goto fsrccpy for "float operation, and source is an integer register". 250 // goto fdstcpy for "float operation, and destination is an integer register". 251 // goto rdst for "destination must be register". 252 // goto hard for "convert to cvt type first". 253 // otherwise handle and return. 254 255 switch uint32(ft)<<16 | uint32(tt) { 256 default: 257 gc.Fatalf("gmove %v -> %v", gc.Tconv(f.Type, gc.FmtLong), gc.Tconv(t.Type, gc.FmtLong)) 258 259 /* 260 * integer copy and truncate 261 */ 262 case gc.TINT8<<16 | gc.TINT8, // same size 263 gc.TUINT8<<16 | gc.TINT8, 264 gc.TINT16<<16 | gc.TINT8, 265 // truncate 266 gc.TUINT16<<16 | gc.TINT8, 267 gc.TINT32<<16 | gc.TINT8, 268 gc.TUINT32<<16 | gc.TINT8, 269 gc.TINT64<<16 | gc.TINT8, 270 gc.TUINT64<<16 | gc.TINT8: 271 a = sparc64.AMOVB 272 273 case gc.TINT8<<16 | gc.TUINT8, // same size 274 gc.TUINT8<<16 | gc.TUINT8, 275 gc.TINT16<<16 | gc.TUINT8, 276 // truncate 277 gc.TUINT16<<16 | gc.TUINT8, 278 gc.TINT32<<16 | gc.TUINT8, 279 gc.TUINT32<<16 | gc.TUINT8, 280 gc.TINT64<<16 | gc.TUINT8, 281 gc.TUINT64<<16 | gc.TUINT8: 282 a = sparc64.AMOVUB 283 284 case gc.TINT16<<16 | gc.TINT16, // same size 285 gc.TUINT16<<16 | gc.TINT16, 286 gc.TINT32<<16 | gc.TINT16, 287 // truncate 288 gc.TUINT32<<16 | gc.TINT16, 289 gc.TINT64<<16 | gc.TINT16, 290 gc.TUINT64<<16 | gc.TINT16: 291 a = sparc64.AMOVH 292 293 case gc.TINT16<<16 | gc.TUINT16, // same size 294 gc.TUINT16<<16 | gc.TUINT16, 295 gc.TINT32<<16 | gc.TUINT16, 296 // truncate 297 gc.TUINT32<<16 | gc.TUINT16, 298 gc.TINT64<<16 | gc.TUINT16, 299 gc.TUINT64<<16 | gc.TUINT16: 300 a = sparc64.AMOVUH 301 302 case gc.TINT32<<16 | gc.TINT32, // same size 303 gc.TUINT32<<16 | gc.TINT32, 304 gc.TINT64<<16 | gc.TINT32, 305 // truncate 306 gc.TUINT64<<16 | gc.TINT32: 307 a = sparc64.AMOVW 308 309 case gc.TINT32<<16 | gc.TUINT32, // same size 310 gc.TUINT32<<16 | gc.TUINT32, 311 gc.TINT64<<16 | gc.TUINT32, 312 gc.TUINT64<<16 | gc.TUINT32: 313 a = sparc64.AMOVUW 314 315 case gc.TINT64<<16 | gc.TINT64, // same size 316 gc.TINT64<<16 | gc.TUINT64, 317 gc.TUINT64<<16 | gc.TINT64, 318 gc.TUINT64<<16 | gc.TUINT64: 319 a = sparc64.AMOVD 320 321 /* 322 * integer up-conversions 323 */ 324 case gc.TINT8<<16 | gc.TINT16, // sign extend int8 325 gc.TINT8<<16 | gc.TUINT16, 326 gc.TINT8<<16 | gc.TINT32, 327 gc.TINT8<<16 | gc.TUINT32, 328 gc.TINT8<<16 | gc.TINT64, 329 gc.TINT8<<16 | gc.TUINT64: 330 a = sparc64.AMOVB 331 332 goto rdst 333 334 case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8 335 gc.TUINT8<<16 | gc.TUINT16, 336 gc.TUINT8<<16 | gc.TINT32, 337 gc.TUINT8<<16 | gc.TUINT32, 338 gc.TUINT8<<16 | gc.TINT64, 339 gc.TUINT8<<16 | gc.TUINT64: 340 a = sparc64.AMOVUB 341 342 goto rdst 343 344 case gc.TINT16<<16 | gc.TINT32, // sign extend int16 345 gc.TINT16<<16 | gc.TUINT32, 346 gc.TINT16<<16 | gc.TINT64, 347 gc.TINT16<<16 | gc.TUINT64: 348 a = sparc64.AMOVH 349 350 goto rdst 351 352 case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16 353 gc.TUINT16<<16 | gc.TUINT32, 354 gc.TUINT16<<16 | gc.TINT64, 355 gc.TUINT16<<16 | gc.TUINT64: 356 a = sparc64.AMOVUH 357 358 goto rdst 359 360 case gc.TINT32<<16 | gc.TINT64, // sign extend int32 361 gc.TINT32<<16 | gc.TUINT64: 362 a = sparc64.AMOVW 363 364 goto rdst 365 366 case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32 367 gc.TUINT32<<16 | gc.TUINT64: 368 a = sparc64.AMOVUW 369 370 goto rdst 371 372 //return; 373 // algorithm is: 374 // if small enough, use native float64 -> int64 conversion. 375 // otherwise, subtract 2^63, convert, and add it back. 376 /* 377 * float to integer 378 */ 379 case gc.TFLOAT32<<16 | gc.TINT32, 380 gc.TFLOAT32<<16 | gc.TINT64, 381 gc.TFLOAT32<<16 | gc.TINT16, 382 gc.TFLOAT32<<16 | gc.TINT8, 383 gc.TFLOAT32<<16 | gc.TUINT16, 384 gc.TFLOAT32<<16 | gc.TUINT8, 385 gc.TFLOAT32<<16 | gc.TUINT32, 386 gc.TFLOAT32<<16 | gc.TUINT64: 387 cvt = gc.Types[gc.TFLOAT64] 388 389 goto hard 390 391 case gc.TFLOAT64<<16 | gc.TINT32, 392 gc.TFLOAT64<<16 | gc.TINT64, 393 gc.TFLOAT64<<16 | gc.TINT16, 394 gc.TFLOAT64<<16 | gc.TINT8, 395 gc.TFLOAT64<<16 | gc.TUINT16, 396 gc.TFLOAT64<<16 | gc.TUINT8, 397 gc.TFLOAT64<<16 | gc.TUINT32, 398 gc.TFLOAT64<<16 | gc.TUINT64: 399 bignodes() 400 401 var r1 gc.Node 402 gc.Regalloc(&r1, gc.Types[ft], f) 403 gmove(f, &r1) 404 if tt == gc.TUINT64 { 405 gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], nil) 406 gmove(&bigf, &r2) 407 gins(sparc64.AFCMPD, &r2, &r1) 408 p1 := gc.Gbranch(optoas(gc.OGT, gc.Types[gc.TFLOAT64]), nil, +1) 409 gins(sparc64.AFSUBD, &r2, &r1) 410 gc.Patch(p1, gc.Pc) 411 gc.Regfree(&r2) 412 } 413 414 gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], nil) 415 var r3 gc.Node 416 gc.Regalloc(&r3, gc.Types[gc.TINT64], t) 417 gins(sparc64.AFDTOX, &r1, &r2) 418 p1 := gins(sparc64.AFMOVD, &r2, nil) 419 p1.To.Type = obj.TYPE_MEM 420 p1.To.Reg = sparc64.REG_RSP 421 p1.To.Offset = -8 + sparc64.StackBias 422 p1 = gins(sparc64.AMOVD, nil, &r3) 423 p1.From.Type = obj.TYPE_MEM 424 p1.From.Reg = sparc64.REG_RSP 425 p1.From.Offset = -8 + sparc64.StackBias 426 gc.Regfree(&r2) 427 gc.Regfree(&r1) 428 if tt == gc.TUINT64 { 429 p1 := gc.Gbranch(optoas(gc.OGT, gc.Types[gc.TFLOAT64]), nil, +1) 430 gc.Nodreg(&r1, gc.Types[gc.TINT64], sparc64.REG_RT1) 431 gins(sparc64.AMOVD, &bigi, &r1) 432 gins(sparc64.AADD, &r1, &r3) 433 gc.Patch(p1, gc.Pc) 434 } 435 436 gmove(&r3, t) 437 gc.Regfree(&r3) 438 return 439 440 //warn("gmove: convert int to float not implemented: %N -> %N\n", f, t); 441 //return; 442 // algorithm is: 443 // if small enough, use native int64 -> uint64 conversion. 444 // otherwise, halve (rounding to odd?), convert, and double. 445 /* 446 * integer to float 447 */ 448 case gc.TINT32<<16 | gc.TFLOAT32, 449 gc.TINT32<<16 | gc.TFLOAT64, 450 gc.TINT16<<16 | gc.TFLOAT32, 451 gc.TINT16<<16 | gc.TFLOAT64, 452 gc.TINT8<<16 | gc.TFLOAT32, 453 gc.TINT8<<16 | gc.TFLOAT64: 454 cvt = gc.Types[gc.TINT64] 455 456 goto hard 457 458 case gc.TUINT16<<16 | gc.TFLOAT32, 459 gc.TUINT16<<16 | gc.TFLOAT64, 460 gc.TUINT8<<16 | gc.TFLOAT32, 461 gc.TUINT8<<16 | gc.TFLOAT64, 462 gc.TUINT32<<16 | gc.TFLOAT32, 463 gc.TUINT32<<16 | gc.TFLOAT64: 464 cvt = gc.Types[gc.TUINT64] 465 466 goto hard 467 468 case gc.TINT64<<16 | gc.TFLOAT32, 469 gc.TINT64<<16 | gc.TFLOAT64, 470 gc.TUINT64<<16 | gc.TFLOAT32, 471 gc.TUINT64<<16 | gc.TFLOAT64: 472 bignodes() 473 474 // The algorithm is: 475 // if small enough, use native int64 -> float64 conversion, 476 // otherwise halve (x -> (x>>1)|(x&1)), convert, and double. 477 var r1 gc.Node 478 gc.Regalloc(&r1, gc.Types[gc.TINT64], nil) 479 gmove(f, &r1) 480 if ft == gc.TUINT64 { 481 gc.Nodreg(&r2, gc.Types[gc.TUINT64], sparc64.REG_RT1) 482 gmove(&bigi, &r2) 483 gins(sparc64.ACMP, &r1, &r2) 484 p1 := gc.Gbranch(sparc64.ABLEUD, nil, +1) 485 var r3 gc.Node 486 gc.Regalloc(&r3, gc.Types[gc.TUINT64], nil) 487 p2 := gins(sparc64.AAND, nil, &r3) // andi. 488 p2.Reg = r1.Reg 489 p2.From.Type = obj.TYPE_CONST 490 p2.From.Offset = 1 491 p3 := gins(sparc64.ASRLD, nil, &r1) 492 p3.From.Type = obj.TYPE_CONST 493 p3.From.Offset = 1 494 gins(sparc64.AOR, &r3, &r1) 495 gc.Regfree(&r3) 496 gc.Patch(p1, gc.Pc) 497 } 498 499 gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], t) 500 p1 := gins(sparc64.AMOVD, &r1, nil) 501 p1.To.Type = obj.TYPE_MEM 502 p1.To.Reg = sparc64.REG_RSP 503 p1.To.Offset = -8 + sparc64.StackBias 504 p1 = gins(sparc64.AFMOVD, nil, &r2) 505 p1.From.Type = obj.TYPE_MEM 506 p1.From.Reg = sparc64.REG_RSP 507 p1.From.Offset = -8 + sparc64.StackBias 508 gins(sparc64.AFXTOD, &r2, &r2) 509 gc.Regfree(&r1) 510 if ft == gc.TUINT64 { 511 p1 := gc.Gbranch(sparc64.ABLEUD, nil, +1) 512 gc.Nodreg(&r1, gc.Types[gc.TFLOAT64], sparc64.REG_YTWO) 513 gins(sparc64.AFMULD, &r1, &r2) 514 gc.Patch(p1, gc.Pc) 515 } 516 517 gmove(&r2, t) 518 gc.Regfree(&r2) 519 return 520 521 /* 522 * float to float 523 */ 524 case gc.TFLOAT32<<16 | gc.TFLOAT32: 525 a = sparc64.AFMOVS 526 527 case gc.TFLOAT64<<16 | gc.TFLOAT64: 528 a = sparc64.AFMOVD 529 530 case gc.TFLOAT32<<16 | gc.TFLOAT64: 531 a = sparc64.AFSTOD 532 goto rdst 533 534 case gc.TFLOAT64<<16 | gc.TFLOAT32: 535 a = sparc64.AFDTOS 536 goto rdst 537 } 538 539 gins(a, f, t) 540 return 541 542 // requires register destination 543 rdst: 544 gc.Regalloc(&r1, t.Type, t) 545 546 gins(a, f, &r1) 547 gmove(&r1, t) 548 gc.Regfree(&r1) 549 return 550 551 // requires register intermediate 552 hard: 553 gc.Regalloc(&r1, cvt, t) 554 555 gmove(f, &r1) 556 gmove(&r1, t) 557 gc.Regfree(&r1) 558 return 559 } 560 561 // gins is called by the front end. 562 // It synthesizes some multiple-instruction sequences 563 // so the front end can stay simpler. 564 func gins(as obj.As, f, t *gc.Node) *obj.Prog { 565 if as >= obj.A_ARCHSPECIFIC { 566 if x, ok := f.IntLiteral(); ok { 567 ginscon(as, x, t) 568 return nil // caller must not use 569 } 570 } 571 if as == sparc64.ACMP { 572 if x, ok := t.IntLiteral(); ok { 573 ginscon2(as, f, x) 574 return nil // caller must not use 575 } 576 } 577 return rawgins(as, f, t) 578 } 579 580 /* 581 * generate one instruction: 582 * as f, t 583 */ 584 func rawgins(as obj.As, f *gc.Node, t *gc.Node) *obj.Prog { 585 // TODO(austin): Add self-move test like in 6g (but be careful 586 // of truncation moves) 587 588 p := gc.Prog(as) 589 gc.Naddr(&p.From, f) 590 gc.Naddr(&p.To, t) 591 592 switch as { 593 case sparc64.ACMP, sparc64.AFCMPS, sparc64.AFCMPD: 594 if t != nil { 595 if f.Op != gc.OREGISTER { 596 gc.Fatalf("bad operands to gcmp") 597 } 598 p.From = p.To 599 p.To = obj.Addr{} 600 raddr(f, p) 601 } 602 } 603 604 // Bad things the front end has done to us. Crash to find call stack. 605 switch as { 606 case sparc64.AAND, sparc64.AMULD: 607 if p.From.Type == obj.TYPE_CONST { 608 gc.Debug['h'] = 1 609 gc.Fatalf("bad inst: %v", p) 610 } 611 case sparc64.ACMP: 612 if p.From.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_MEM { 613 gc.Debug['h'] = 1 614 gc.Fatalf("bad inst: %v", p) 615 } 616 } 617 618 if gc.Debug['g'] != 0 { 619 fmt.Printf("%v\n", p) 620 } 621 622 w := int32(0) 623 switch as { 624 case sparc64.AMOVB, 625 sparc64.AMOVUB: 626 w = 1 627 628 case sparc64.AMOVH, 629 sparc64.AMOVUH: 630 w = 2 631 632 case sparc64.AMOVW, 633 sparc64.AMOVUW: 634 w = 4 635 636 case sparc64.AMOVD: 637 if p.From.Type == obj.TYPE_CONST || p.From.Type == obj.TYPE_ADDR { 638 break 639 } 640 w = 8 641 } 642 643 if w != 0 && ((f != nil && p.From.Width < int64(w)) || (t != nil && p.To.Type != obj.TYPE_REG && p.To.Width > int64(w))) { 644 gc.Dump("f", f) 645 gc.Dump("t", t) 646 gc.Fatalf("bad width: %v (%d, %d)\n", p, p.From.Width, p.To.Width) 647 } 648 649 return p 650 } 651 652 /* 653 * insert n into reg slot of p 654 */ 655 func raddr(n *gc.Node, p *obj.Prog) { 656 var a obj.Addr 657 658 gc.Naddr(&a, n) 659 if a.Type != obj.TYPE_REG { 660 if n != nil { 661 gc.Fatalf("bad in raddr: %v", n.Op) 662 } else { 663 gc.Fatalf("bad in raddr: <null>") 664 } 665 p.Reg = 0 666 } else { 667 p.Reg = a.Reg 668 } 669 } 670 671 func gcmp(as obj.As, lhs *gc.Node, rhs *gc.Node) *obj.Prog { 672 if lhs.Op != gc.OREGISTER { 673 gc.Fatalf("bad operands to gcmp: %v %v", lhs.Op, rhs.Op) 674 } 675 676 p := rawgins(as, rhs, nil) 677 raddr(lhs, p) 678 return p 679 } 680 681 /* 682 * return Axxx for Oxxx on type t. 683 */ 684 func optoas(op gc.Op, t *gc.Type) obj.As { 685 if t == nil { 686 gc.Fatalf("optoas: t is nil") 687 } 688 689 // avoid constant conversions in switches below 690 const ( 691 OMINUS_ = uint32(gc.OMINUS) << 16 692 OLSH_ = uint32(gc.OLSH) << 16 693 ORSH_ = uint32(gc.ORSH) << 16 694 OADD_ = uint32(gc.OADD) << 16 695 OSUB_ = uint32(gc.OSUB) << 16 696 OMUL_ = uint32(gc.OMUL) << 16 697 ODIV_ = uint32(gc.ODIV) << 16 698 OOR_ = uint32(gc.OOR) << 16 699 OAND_ = uint32(gc.OAND) << 16 700 OXOR_ = uint32(gc.OXOR) << 16 701 OEQ_ = uint32(gc.OEQ) << 16 702 ONE_ = uint32(gc.ONE) << 16 703 OLT_ = uint32(gc.OLT) << 16 704 OLE_ = uint32(gc.OLE) << 16 705 OGE_ = uint32(gc.OGE) << 16 706 OGT_ = uint32(gc.OGT) << 16 707 OCMP_ = uint32(gc.OCMP) << 16 708 OAS_ = uint32(gc.OAS) << 16 709 OHMUL_ = uint32(gc.OHMUL) << 16 710 OSQRT_ = uint32(gc.OSQRT) << 16 711 ) 712 713 a := obj.AXXX 714 switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) { 715 default: 716 gc.Fatalf("optoas: no entry for op=%v type=%v", op, t) 717 718 case OEQ_ | gc.TBOOL, 719 OEQ_ | gc.TINT8, 720 OEQ_ | gc.TUINT8, 721 OEQ_ | gc.TINT16, 722 OEQ_ | gc.TUINT16, 723 OEQ_ | gc.TINT32, 724 OEQ_ | gc.TUINT32, 725 OEQ_ | gc.TPTR32: 726 a = sparc64.ABEW 727 728 case OEQ_ | gc.TINT64, 729 OEQ_ | gc.TUINT64, 730 OEQ_ | gc.TPTR64: 731 a = sparc64.ABED 732 733 case OEQ_ | gc.TFLOAT32, 734 OEQ_ | gc.TFLOAT64: 735 a = sparc64.AFBE 736 737 case ONE_ | gc.TBOOL, 738 ONE_ | gc.TINT8, 739 ONE_ | gc.TUINT8, 740 ONE_ | gc.TINT16, 741 ONE_ | gc.TUINT16, 742 ONE_ | gc.TINT32, 743 ONE_ | gc.TUINT32, 744 ONE_ | gc.TPTR32: 745 a = sparc64.ABNEW 746 747 case ONE_ | gc.TINT64, 748 ONE_ | gc.TUINT64, 749 ONE_ | gc.TPTR64: 750 a = sparc64.ABNED 751 752 case ONE_ | gc.TFLOAT32, 753 ONE_ | gc.TFLOAT64: 754 a = sparc64.AFBNE 755 756 case OLT_ | gc.TINT8, 757 OLT_ | gc.TINT16, 758 OLT_ | gc.TINT32: 759 a = sparc64.ABLW 760 761 case OLT_ | gc.TINT64: 762 a = sparc64.ABLD 763 764 case OLT_ | gc.TUINT8, 765 OLT_ | gc.TUINT16, 766 OLT_ | gc.TUINT32: 767 a = sparc64.ABCSW 768 769 case OLT_ | gc.TUINT64: 770 a = sparc64.ABCSD 771 772 case OLT_ | gc.TFLOAT32, 773 OLT_ | gc.TFLOAT64: 774 a = sparc64.AFBL 775 776 case OLE_ | gc.TINT8, 777 OLE_ | gc.TINT16, 778 OLE_ | gc.TINT32: 779 a = sparc64.ABLEW 780 781 case OLE_ | gc.TINT64: 782 a = sparc64.ABLED 783 784 case OLE_ | gc.TUINT8, 785 OLE_ | gc.TUINT16, 786 OLE_ | gc.TUINT32: 787 a = sparc64.ABLEUW 788 789 case OLE_ | gc.TUINT64: 790 a = sparc64.ABLEUD 791 792 case OLE_ | gc.TFLOAT32, 793 OLE_ | gc.TFLOAT64: 794 a = sparc64.AFBLE 795 796 case OGT_ | gc.TINT8, 797 OGT_ | gc.TINT16, 798 OGT_ | gc.TINT32: 799 a = sparc64.ABGW 800 801 case OGT_ | gc.TINT64: 802 a = sparc64.ABGD 803 804 case OGT_ | gc.TFLOAT32, 805 OGT_ | gc.TFLOAT64: 806 a = sparc64.AFBG 807 808 case OGT_ | gc.TUINT8, 809 OGT_ | gc.TUINT16, 810 OGT_ | gc.TUINT32: 811 a = sparc64.ABGUW 812 813 case OGT_ | gc.TUINT64: 814 a = sparc64.ABGUD 815 816 case OGE_ | gc.TINT8, 817 OGE_ | gc.TINT16, 818 OGE_ | gc.TINT32: 819 a = sparc64.ABGEW 820 821 case OGE_ | gc.TINT64: 822 a = sparc64.ABGED 823 824 case OGE_ | gc.TFLOAT32, 825 OGE_ | gc.TFLOAT64: 826 a = sparc64.AFBGE 827 828 case OGE_ | gc.TUINT8, 829 OGE_ | gc.TUINT16, 830 OGE_ | gc.TUINT32: 831 a = sparc64.ABCCW 832 833 case OGE_ | gc.TUINT64: 834 a = sparc64.ABCCD 835 836 case OCMP_ | gc.TBOOL, 837 OCMP_ | gc.TINT8, 838 OCMP_ | gc.TINT16, 839 OCMP_ | gc.TINT32, 840 OCMP_ | gc.TPTR32, 841 OCMP_ | gc.TINT64, 842 OCMP_ | gc.TUINT8, 843 OCMP_ | gc.TUINT16, 844 OCMP_ | gc.TUINT32, 845 OCMP_ | gc.TUINT64, 846 OCMP_ | gc.TPTR64: 847 a = sparc64.ACMP 848 849 case OCMP_ | gc.TFLOAT32: 850 a = sparc64.AFCMPS 851 852 case OCMP_ | gc.TFLOAT64: 853 a = sparc64.AFCMPD 854 855 case OAS_ | gc.TBOOL, 856 OAS_ | gc.TINT8: 857 a = sparc64.AMOVB 858 859 case OAS_ | gc.TUINT8: 860 a = sparc64.AMOVUB 861 862 case OAS_ | gc.TINT16: 863 a = sparc64.AMOVH 864 865 case OAS_ | gc.TUINT16: 866 a = sparc64.AMOVUH 867 868 case OAS_ | gc.TINT32: 869 a = sparc64.AMOVW 870 871 case OAS_ | gc.TUINT32, 872 OAS_ | gc.TPTR32: 873 a = sparc64.AMOVUW 874 875 case OAS_ | gc.TINT64, 876 OAS_ | gc.TUINT64, 877 OAS_ | gc.TPTR64: 878 a = sparc64.AMOVD 879 880 case OAS_ | gc.TFLOAT32: 881 a = sparc64.AFMOVS 882 883 case OAS_ | gc.TFLOAT64: 884 a = sparc64.AFMOVD 885 886 case OADD_ | gc.TINT8, 887 OADD_ | gc.TUINT8, 888 OADD_ | gc.TINT16, 889 OADD_ | gc.TUINT16, 890 OADD_ | gc.TINT32, 891 OADD_ | gc.TUINT32, 892 OADD_ | gc.TPTR32, 893 OADD_ | gc.TINT64, 894 OADD_ | gc.TUINT64, 895 OADD_ | gc.TPTR64: 896 a = sparc64.AADD 897 898 case OADD_ | gc.TFLOAT32: 899 a = sparc64.AFADDS 900 901 case OADD_ | gc.TFLOAT64: 902 a = sparc64.AFADDD 903 904 case OSUB_ | gc.TINT8, 905 OSUB_ | gc.TUINT8, 906 OSUB_ | gc.TINT16, 907 OSUB_ | gc.TUINT16, 908 OSUB_ | gc.TINT32, 909 OSUB_ | gc.TUINT32, 910 OSUB_ | gc.TPTR32, 911 OSUB_ | gc.TINT64, 912 OSUB_ | gc.TUINT64, 913 OSUB_ | gc.TPTR64: 914 a = sparc64.ASUB 915 916 case OSUB_ | gc.TFLOAT32: 917 a = sparc64.AFSUBS 918 919 case OSUB_ | gc.TFLOAT64: 920 a = sparc64.AFSUBD 921 922 case OMINUS_ | gc.TINT8, 923 OMINUS_ | gc.TUINT8, 924 OMINUS_ | gc.TINT16, 925 OMINUS_ | gc.TUINT16, 926 OMINUS_ | gc.TINT32, 927 OMINUS_ | gc.TUINT32, 928 OMINUS_ | gc.TPTR32, 929 OMINUS_ | gc.TINT64, 930 OMINUS_ | gc.TUINT64, 931 OMINUS_ | gc.TPTR64: 932 a = sparc64.ANEG 933 934 case OMINUS_ | gc.TFLOAT32: 935 a = sparc64.AFNEGS 936 937 case OMINUS_ | gc.TFLOAT64: 938 a = sparc64.AFNEGD 939 940 case OAND_ | gc.TINT8, 941 OAND_ | gc.TUINT8, 942 OAND_ | gc.TINT16, 943 OAND_ | gc.TUINT16, 944 OAND_ | gc.TINT32, 945 OAND_ | gc.TUINT32, 946 OAND_ | gc.TPTR32, 947 OAND_ | gc.TINT64, 948 OAND_ | gc.TUINT64, 949 OAND_ | gc.TPTR64: 950 a = sparc64.AAND 951 952 case OOR_ | gc.TINT8, 953 OOR_ | gc.TUINT8, 954 OOR_ | gc.TINT16, 955 OOR_ | gc.TUINT16, 956 OOR_ | gc.TINT32, 957 OOR_ | gc.TUINT32, 958 OOR_ | gc.TPTR32, 959 OOR_ | gc.TINT64, 960 OOR_ | gc.TUINT64, 961 OOR_ | gc.TPTR64: 962 a = sparc64.AOR 963 964 case OXOR_ | gc.TINT8, 965 OXOR_ | gc.TUINT8, 966 OXOR_ | gc.TINT16, 967 OXOR_ | gc.TUINT16, 968 OXOR_ | gc.TINT32, 969 OXOR_ | gc.TUINT32, 970 OXOR_ | gc.TPTR32, 971 OXOR_ | gc.TINT64, 972 OXOR_ | gc.TUINT64, 973 OXOR_ | gc.TPTR64: 974 a = sparc64.AXOR 975 976 // TODO(minux): handle rotates 977 //case CASE(OLROT, TINT8): 978 //case CASE(OLROT, TUINT8): 979 //case CASE(OLROT, TINT16): 980 //case CASE(OLROT, TUINT16): 981 //case CASE(OLROT, TINT32): 982 //case CASE(OLROT, TUINT32): 983 //case CASE(OLROT, TPTR32): 984 //case CASE(OLROT, TINT64): 985 //case CASE(OLROT, TUINT64): 986 //case CASE(OLROT, TPTR64): 987 // a = 0//???; RLDC? 988 // break; 989 990 case OLSH_ | gc.TINT8, 991 OLSH_ | gc.TUINT8, 992 OLSH_ | gc.TINT16, 993 OLSH_ | gc.TUINT16, 994 OLSH_ | gc.TINT32, 995 OLSH_ | gc.TUINT32, 996 OLSH_ | gc.TPTR32: 997 a = sparc64.ASLLW 998 999 case OLSH_ | gc.TINT64, 1000 OLSH_ | gc.TUINT64, 1001 OLSH_ | gc.TPTR64: 1002 a = sparc64.ASLLD 1003 1004 case ORSH_ | gc.TUINT8, 1005 ORSH_ | gc.TUINT16, 1006 ORSH_ | gc.TUINT32, 1007 ORSH_ | gc.TPTR32: 1008 a = sparc64.ASRLW 1009 1010 case ORSH_ | gc.TUINT64, 1011 ORSH_ | gc.TPTR64: 1012 a = sparc64.ASRLD 1013 1014 case ORSH_ | gc.TINT8, 1015 ORSH_ | gc.TINT16, 1016 ORSH_ | gc.TINT32: 1017 a = sparc64.ASRAW 1018 1019 case ORSH_ | gc.TINT64: 1020 a = sparc64.ASRAD 1021 1022 // TODO(shawn): handle rotates, likely via addcc/addxc and PSR icc 1023 // overflow bit 1024 //case CASE(ORROTC, TINT8): 1025 //case CASE(ORROTC, TUINT8): 1026 //case CASE(ORROTC, TINT16): 1027 //case CASE(ORROTC, TUINT16): 1028 //case CASE(ORROTC, TINT32): 1029 //case CASE(ORROTC, TUINT32): 1030 //case CASE(ORROTC, TINT64): 1031 //case CASE(ORROTC, TUINT64): 1032 // a = 0//??? RLDC?? 1033 // break; 1034 1035 // TODO(aram): handle high-multiply via mulx? 1036 //case OHMUL_ | gc.TINT64: 1037 // a = sparc64.ASMULH 1038 // 1039 //case OHMUL_ | gc.TUINT64, 1040 // OHMUL_ | gc.TPTR64: 1041 // a = sparc64.AUMULH 1042 1043 case OMUL_ | gc.TINT8, 1044 OMUL_ | gc.TINT16, 1045 OMUL_ | gc.TINT32, 1046 OMUL_ | gc.TINT64, 1047 OMUL_ | gc.TUINT8, 1048 OMUL_ | gc.TUINT16, 1049 OMUL_ | gc.TUINT32, 1050 OMUL_ | gc.TPTR32, 1051 OMUL_ | gc.TUINT64, 1052 OMUL_ | gc.TPTR64: 1053 a = sparc64.AMULD 1054 1055 case OMUL_ | gc.TFLOAT32: 1056 a = sparc64.AFMULS 1057 1058 case OMUL_ | gc.TFLOAT64: 1059 a = sparc64.AFMULD 1060 1061 case ODIV_ | gc.TINT8, 1062 ODIV_ | gc.TINT16, 1063 ODIV_ | gc.TINT32, 1064 ODIV_ | gc.TINT64: 1065 a = sparc64.ASDIVD 1066 1067 case ODIV_ | gc.TUINT8, 1068 ODIV_ | gc.TUINT16, 1069 ODIV_ | gc.TUINT32, 1070 ODIV_ | gc.TPTR32, 1071 ODIV_ | gc.TUINT64, 1072 ODIV_ | gc.TPTR64: 1073 a = sparc64.AUDIVD 1074 1075 case ODIV_ | gc.TFLOAT32: 1076 a = sparc64.AFDIVS 1077 1078 case ODIV_ | gc.TFLOAT64: 1079 a = sparc64.AFDIVD 1080 1081 case OSQRT_ | gc.TFLOAT64: 1082 a = sparc64.AFSQRTD 1083 } 1084 1085 return a 1086 } 1087 1088 const ( 1089 ODynam = 1 << 0 1090 OAddable = 1 << 1 1091 ) 1092 1093 func xgen(n *gc.Node, a *gc.Node, o int) bool { 1094 // TODO(minux) 1095 1096 return -1 != 0 /*TypeKind(100016)*/ 1097 } 1098 1099 func sudoclean() { 1100 return 1101 } 1102 1103 /* 1104 * generate code to compute address of n, 1105 * a reference to a (perhaps nested) field inside 1106 * an array or struct. 1107 * return 0 on failure, 1 on success. 1108 * on success, leaves usable address in a. 1109 * 1110 * caller is responsible for calling sudoclean 1111 * after successful sudoaddable, 1112 * to release the register used for a. 1113 */ 1114 func sudoaddable(as obj.As, n *gc.Node, a *obj.Addr) bool { 1115 // TODO(minux) 1116 1117 *a = obj.Addr{} 1118 return false 1119 }