github.com/peggyl/go@v0.0.0-20151008231540-ae315999c2d5/src/cmd/compile/internal/arm64/gsubr.go (about) 1 // Derived from Inferno utils/6c/txt.c 2 // http://code.google.com/p/inferno-os/source/browse/utils/6c/txt.c 3 // 4 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. 5 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) 6 // Portions Copyright © 1997-1999 Vita Nuova Limited 7 // Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) 8 // Portions Copyright © 2004,2006 Bruce Ellis 9 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) 10 // Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others 11 // Portions Copyright © 2009 The Go Authors. All rights reserved. 12 // 13 // Permission is hereby granted, free of charge, to any person obtaining a copy 14 // of this software and associated documentation files (the "Software"), to deal 15 // in the Software without restriction, including without limitation the rights 16 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 17 // copies of the Software, and to permit persons to whom the Software is 18 // furnished to do so, subject to the following conditions: 19 // 20 // The above copyright notice and this permission notice shall be included in 21 // all copies or substantial portions of the Software. 22 // 23 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 24 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 25 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 26 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 27 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 28 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 29 // THE SOFTWARE. 30 31 package arm64 32 33 import ( 34 "cmd/compile/internal/gc" 35 "cmd/internal/obj" 36 "cmd/internal/obj/arm64" 37 "fmt" 38 ) 39 40 var resvd = []int{ 41 arm64.REGTMP, 42 arm64.REGG, 43 arm64.REGRT1, 44 arm64.REGRT2, 45 arm64.REG_R31, // REGZERO and REGSP 46 arm64.FREGZERO, 47 arm64.FREGHALF, 48 arm64.FREGONE, 49 arm64.FREGTWO, 50 } 51 52 /* 53 * generate 54 * as $c, n 55 */ 56 func ginscon(as int, c int64, n2 *gc.Node) { 57 var n1 gc.Node 58 59 gc.Nodconst(&n1, gc.Types[gc.TINT64], c) 60 61 if as != arm64.AMOVD && (c < -arm64.BIG || c > arm64.BIG) || as == arm64.AMUL || n2 != nil && n2.Op != gc.OREGISTER { 62 // cannot have more than 16-bit of immediate in ADD, etc. 63 // instead, MOV into register first. 64 var ntmp gc.Node 65 gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil) 66 67 gins(arm64.AMOVD, &n1, &ntmp) 68 gins(as, &ntmp, n2) 69 gc.Regfree(&ntmp) 70 return 71 } 72 73 rawgins(as, &n1, n2) 74 } 75 76 /* 77 * generate 78 * as n, $c (CMP) 79 */ 80 func ginscon2(as int, n2 *gc.Node, c int64) { 81 var n1 gc.Node 82 83 gc.Nodconst(&n1, gc.Types[gc.TINT64], c) 84 85 switch as { 86 default: 87 gc.Fatalf("ginscon2") 88 89 case arm64.ACMP: 90 if -arm64.BIG <= c && c <= arm64.BIG { 91 gcmp(as, n2, &n1) 92 return 93 } 94 } 95 96 // MOV n1 into register first 97 var ntmp gc.Node 98 gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil) 99 100 rawgins(arm64.AMOVD, &n1, &ntmp) 101 gcmp(as, n2, &ntmp) 102 gc.Regfree(&ntmp) 103 } 104 105 func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog { 106 if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && n2.Op != gc.OLITERAL { 107 // Reverse comparison to place constant last. 108 op = gc.Brrev(op) 109 n1, n2 = n2, n1 110 } 111 112 var r1, r2, g1, g2 gc.Node 113 gc.Regalloc(&r1, t, n1) 114 gc.Regalloc(&g1, n1.Type, &r1) 115 gc.Cgen(n1, &g1) 116 gmove(&g1, &r1) 117 if gc.Isint[t.Etype] && gc.Isconst(n2, gc.CTINT) { 118 ginscon2(optoas(gc.OCMP, t), &r1, n2.Int()) 119 } else { 120 gc.Regalloc(&r2, t, n2) 121 gc.Regalloc(&g2, n1.Type, &r2) 122 gc.Cgen(n2, &g2) 123 gmove(&g2, &r2) 124 gcmp(optoas(gc.OCMP, t), &r1, &r2) 125 gc.Regfree(&g2) 126 gc.Regfree(&r2) 127 } 128 gc.Regfree(&g1) 129 gc.Regfree(&r1) 130 return gc.Gbranch(optoas(op, t), nil, likely) 131 } 132 133 /* 134 * generate move: 135 * t = f 136 * hard part is conversions. 137 */ 138 func gmove(f *gc.Node, t *gc.Node) { 139 if gc.Debug['M'] != 0 { 140 fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, obj.FmtLong), gc.Nconv(t, obj.FmtLong)) 141 } 142 143 ft := int(gc.Simsimtype(f.Type)) 144 tt := int(gc.Simsimtype(t.Type)) 145 cvt := (*gc.Type)(t.Type) 146 147 if gc.Iscomplex[ft] || gc.Iscomplex[tt] { 148 gc.Complexmove(f, t) 149 return 150 } 151 152 // cannot have two memory operands 153 var r1 gc.Node 154 var a int 155 if gc.Ismem(f) && gc.Ismem(t) { 156 goto hard 157 } 158 159 // convert constant to desired type 160 if f.Op == gc.OLITERAL { 161 var con gc.Node 162 switch tt { 163 default: 164 f.Convconst(&con, t.Type) 165 166 case gc.TINT32, 167 gc.TINT16, 168 gc.TINT8: 169 var con gc.Node 170 f.Convconst(&con, gc.Types[gc.TINT64]) 171 var r1 gc.Node 172 gc.Regalloc(&r1, con.Type, t) 173 gins(arm64.AMOVD, &con, &r1) 174 gmove(&r1, t) 175 gc.Regfree(&r1) 176 return 177 178 case gc.TUINT32, 179 gc.TUINT16, 180 gc.TUINT8: 181 var con gc.Node 182 f.Convconst(&con, gc.Types[gc.TUINT64]) 183 var r1 gc.Node 184 gc.Regalloc(&r1, con.Type, t) 185 gins(arm64.AMOVD, &con, &r1) 186 gmove(&r1, t) 187 gc.Regfree(&r1) 188 return 189 } 190 191 f = &con 192 ft = tt // so big switch will choose a simple mov 193 194 // constants can't move directly to memory. 195 if gc.Ismem(t) { 196 goto hard 197 } 198 } 199 200 // value -> value copy, first operand in memory. 201 // any floating point operand requires register 202 // src, so goto hard to copy to register first. 203 if gc.Ismem(f) && ft != tt && (gc.Isfloat[ft] || gc.Isfloat[tt]) { 204 cvt = gc.Types[ft] 205 goto hard 206 } 207 208 // value -> value copy, only one memory operand. 209 // figure out the instruction to use. 210 // break out of switch for one-instruction gins. 211 // goto rdst for "destination must be register". 212 // goto hard for "convert to cvt type first". 213 // otherwise handle and return. 214 215 switch uint32(ft)<<16 | uint32(tt) { 216 default: 217 gc.Fatalf("gmove %v -> %v", gc.Tconv(f.Type, obj.FmtLong), gc.Tconv(t.Type, obj.FmtLong)) 218 219 /* 220 * integer copy and truncate 221 */ 222 case gc.TINT8<<16 | gc.TINT8, // same size 223 gc.TUINT8<<16 | gc.TINT8, 224 gc.TINT16<<16 | gc.TINT8, 225 // truncate 226 gc.TUINT16<<16 | gc.TINT8, 227 gc.TINT32<<16 | gc.TINT8, 228 gc.TUINT32<<16 | gc.TINT8, 229 gc.TINT64<<16 | gc.TINT8, 230 gc.TUINT64<<16 | gc.TINT8: 231 a = arm64.AMOVB 232 233 case gc.TINT8<<16 | gc.TUINT8, // same size 234 gc.TUINT8<<16 | gc.TUINT8, 235 gc.TINT16<<16 | gc.TUINT8, 236 // truncate 237 gc.TUINT16<<16 | gc.TUINT8, 238 gc.TINT32<<16 | gc.TUINT8, 239 gc.TUINT32<<16 | gc.TUINT8, 240 gc.TINT64<<16 | gc.TUINT8, 241 gc.TUINT64<<16 | gc.TUINT8: 242 a = arm64.AMOVBU 243 244 case gc.TINT16<<16 | gc.TINT16, // same size 245 gc.TUINT16<<16 | gc.TINT16, 246 gc.TINT32<<16 | gc.TINT16, 247 // truncate 248 gc.TUINT32<<16 | gc.TINT16, 249 gc.TINT64<<16 | gc.TINT16, 250 gc.TUINT64<<16 | gc.TINT16: 251 a = arm64.AMOVH 252 253 case gc.TINT16<<16 | gc.TUINT16, // same size 254 gc.TUINT16<<16 | gc.TUINT16, 255 gc.TINT32<<16 | gc.TUINT16, 256 // truncate 257 gc.TUINT32<<16 | gc.TUINT16, 258 gc.TINT64<<16 | gc.TUINT16, 259 gc.TUINT64<<16 | gc.TUINT16: 260 a = arm64.AMOVHU 261 262 case gc.TINT32<<16 | gc.TINT32, // same size 263 gc.TUINT32<<16 | gc.TINT32, 264 gc.TINT64<<16 | gc.TINT32, 265 // truncate 266 gc.TUINT64<<16 | gc.TINT32: 267 a = arm64.AMOVW 268 269 case gc.TINT32<<16 | gc.TUINT32, // same size 270 gc.TUINT32<<16 | gc.TUINT32, 271 gc.TINT64<<16 | gc.TUINT32, 272 gc.TUINT64<<16 | gc.TUINT32: 273 a = arm64.AMOVWU 274 275 case gc.TINT64<<16 | gc.TINT64, // same size 276 gc.TINT64<<16 | gc.TUINT64, 277 gc.TUINT64<<16 | gc.TINT64, 278 gc.TUINT64<<16 | gc.TUINT64: 279 a = arm64.AMOVD 280 281 /* 282 * integer up-conversions 283 */ 284 case gc.TINT8<<16 | gc.TINT16, // sign extend int8 285 gc.TINT8<<16 | gc.TUINT16, 286 gc.TINT8<<16 | gc.TINT32, 287 gc.TINT8<<16 | gc.TUINT32, 288 gc.TINT8<<16 | gc.TINT64, 289 gc.TINT8<<16 | gc.TUINT64: 290 a = arm64.AMOVB 291 292 goto rdst 293 294 case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8 295 gc.TUINT8<<16 | gc.TUINT16, 296 gc.TUINT8<<16 | gc.TINT32, 297 gc.TUINT8<<16 | gc.TUINT32, 298 gc.TUINT8<<16 | gc.TINT64, 299 gc.TUINT8<<16 | gc.TUINT64: 300 a = arm64.AMOVBU 301 302 goto rdst 303 304 case gc.TINT16<<16 | gc.TINT32, // sign extend int16 305 gc.TINT16<<16 | gc.TUINT32, 306 gc.TINT16<<16 | gc.TINT64, 307 gc.TINT16<<16 | gc.TUINT64: 308 a = arm64.AMOVH 309 310 goto rdst 311 312 case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16 313 gc.TUINT16<<16 | gc.TUINT32, 314 gc.TUINT16<<16 | gc.TINT64, 315 gc.TUINT16<<16 | gc.TUINT64: 316 a = arm64.AMOVHU 317 318 goto rdst 319 320 case gc.TINT32<<16 | gc.TINT64, // sign extend int32 321 gc.TINT32<<16 | gc.TUINT64: 322 a = arm64.AMOVW 323 324 goto rdst 325 326 case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32 327 gc.TUINT32<<16 | gc.TUINT64: 328 a = arm64.AMOVWU 329 330 goto rdst 331 332 /* 333 * float to integer 334 */ 335 case gc.TFLOAT32<<16 | gc.TINT32: 336 a = arm64.AFCVTZSSW 337 goto rdst 338 339 case gc.TFLOAT64<<16 | gc.TINT32: 340 a = arm64.AFCVTZSDW 341 goto rdst 342 343 case gc.TFLOAT32<<16 | gc.TINT64: 344 a = arm64.AFCVTZSS 345 goto rdst 346 347 case gc.TFLOAT64<<16 | gc.TINT64: 348 a = arm64.AFCVTZSD 349 goto rdst 350 351 case gc.TFLOAT32<<16 | gc.TUINT32: 352 a = arm64.AFCVTZUSW 353 goto rdst 354 355 case gc.TFLOAT64<<16 | gc.TUINT32: 356 a = arm64.AFCVTZUDW 357 goto rdst 358 359 case gc.TFLOAT32<<16 | gc.TUINT64: 360 a = arm64.AFCVTZUS 361 goto rdst 362 363 case gc.TFLOAT64<<16 | gc.TUINT64: 364 a = arm64.AFCVTZUD 365 goto rdst 366 367 case gc.TFLOAT32<<16 | gc.TINT16, 368 gc.TFLOAT32<<16 | gc.TINT8, 369 gc.TFLOAT64<<16 | gc.TINT16, 370 gc.TFLOAT64<<16 | gc.TINT8: 371 cvt = gc.Types[gc.TINT32] 372 373 goto hard 374 375 case gc.TFLOAT32<<16 | gc.TUINT16, 376 gc.TFLOAT32<<16 | gc.TUINT8, 377 gc.TFLOAT64<<16 | gc.TUINT16, 378 gc.TFLOAT64<<16 | gc.TUINT8: 379 cvt = gc.Types[gc.TUINT32] 380 381 goto hard 382 383 /* 384 * integer to float 385 */ 386 case gc.TINT8<<16 | gc.TFLOAT32, 387 gc.TINT16<<16 | gc.TFLOAT32, 388 gc.TINT32<<16 | gc.TFLOAT32: 389 a = arm64.ASCVTFWS 390 391 goto rdst 392 393 case gc.TINT8<<16 | gc.TFLOAT64, 394 gc.TINT16<<16 | gc.TFLOAT64, 395 gc.TINT32<<16 | gc.TFLOAT64: 396 a = arm64.ASCVTFWD 397 398 goto rdst 399 400 case gc.TINT64<<16 | gc.TFLOAT32: 401 a = arm64.ASCVTFS 402 goto rdst 403 404 case gc.TINT64<<16 | gc.TFLOAT64: 405 a = arm64.ASCVTFD 406 goto rdst 407 408 case gc.TUINT8<<16 | gc.TFLOAT32, 409 gc.TUINT16<<16 | gc.TFLOAT32, 410 gc.TUINT32<<16 | gc.TFLOAT32: 411 a = arm64.AUCVTFWS 412 413 goto rdst 414 415 case gc.TUINT8<<16 | gc.TFLOAT64, 416 gc.TUINT16<<16 | gc.TFLOAT64, 417 gc.TUINT32<<16 | gc.TFLOAT64: 418 a = arm64.AUCVTFWD 419 420 goto rdst 421 422 case gc.TUINT64<<16 | gc.TFLOAT32: 423 a = arm64.AUCVTFS 424 goto rdst 425 426 case gc.TUINT64<<16 | gc.TFLOAT64: 427 a = arm64.AUCVTFD 428 goto rdst 429 430 /* 431 * float to float 432 */ 433 case gc.TFLOAT32<<16 | gc.TFLOAT32: 434 a = arm64.AFMOVS 435 436 case gc.TFLOAT64<<16 | gc.TFLOAT64: 437 a = arm64.AFMOVD 438 439 case gc.TFLOAT32<<16 | gc.TFLOAT64: 440 a = arm64.AFCVTSD 441 goto rdst 442 443 case gc.TFLOAT64<<16 | gc.TFLOAT32: 444 a = arm64.AFCVTDS 445 goto rdst 446 } 447 448 gins(a, f, t) 449 return 450 451 // requires register destination 452 rdst: 453 gc.Regalloc(&r1, t.Type, t) 454 455 gins(a, f, &r1) 456 gmove(&r1, t) 457 gc.Regfree(&r1) 458 return 459 460 // requires register intermediate 461 hard: 462 gc.Regalloc(&r1, cvt, t) 463 464 gmove(f, &r1) 465 gmove(&r1, t) 466 gc.Regfree(&r1) 467 return 468 } 469 470 // gins is called by the front end. 471 // It synthesizes some multiple-instruction sequences 472 // so the front end can stay simpler. 473 func gins(as int, f, t *gc.Node) *obj.Prog { 474 if as >= obj.A_ARCHSPECIFIC { 475 if x, ok := f.IntLiteral(); ok { 476 ginscon(as, x, t) 477 return nil // caller must not use 478 } 479 } 480 if as == arm64.ACMP { 481 if x, ok := t.IntLiteral(); ok { 482 ginscon2(as, f, x) 483 return nil // caller must not use 484 } 485 } 486 return rawgins(as, f, t) 487 } 488 489 /* 490 * generate one instruction: 491 * as f, t 492 */ 493 func rawgins(as int, f *gc.Node, t *gc.Node) *obj.Prog { 494 // TODO(austin): Add self-move test like in 6g (but be careful 495 // of truncation moves) 496 497 p := gc.Prog(as) 498 gc.Naddr(&p.From, f) 499 gc.Naddr(&p.To, t) 500 501 switch as { 502 case arm64.ACMP, arm64.AFCMPS, arm64.AFCMPD: 503 if t != nil { 504 if f.Op != gc.OREGISTER { 505 gc.Fatalf("bad operands to gcmp") 506 } 507 p.From = p.To 508 p.To = obj.Addr{} 509 raddr(f, p) 510 } 511 } 512 513 // Bad things the front end has done to us. Crash to find call stack. 514 switch as { 515 case arm64.AAND, arm64.AMUL: 516 if p.From.Type == obj.TYPE_CONST { 517 gc.Debug['h'] = 1 518 gc.Fatalf("bad inst: %v", p) 519 } 520 case arm64.ACMP: 521 if p.From.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_MEM { 522 gc.Debug['h'] = 1 523 gc.Fatalf("bad inst: %v", p) 524 } 525 } 526 527 if gc.Debug['g'] != 0 { 528 fmt.Printf("%v\n", p) 529 } 530 531 w := int32(0) 532 switch as { 533 case arm64.AMOVB, 534 arm64.AMOVBU: 535 w = 1 536 537 case arm64.AMOVH, 538 arm64.AMOVHU: 539 w = 2 540 541 case arm64.AMOVW, 542 arm64.AMOVWU: 543 w = 4 544 545 case arm64.AMOVD: 546 if p.From.Type == obj.TYPE_CONST || p.From.Type == obj.TYPE_ADDR { 547 break 548 } 549 w = 8 550 } 551 552 if w != 0 && ((f != nil && p.From.Width < int64(w)) || (t != nil && p.To.Type != obj.TYPE_REG && p.To.Width > int64(w))) { 553 gc.Dump("f", f) 554 gc.Dump("t", t) 555 gc.Fatalf("bad width: %v (%d, %d)\n", p, p.From.Width, p.To.Width) 556 } 557 558 return p 559 } 560 561 /* 562 * insert n into reg slot of p 563 */ 564 func raddr(n *gc.Node, p *obj.Prog) { 565 var a obj.Addr 566 567 gc.Naddr(&a, n) 568 if a.Type != obj.TYPE_REG { 569 if n != nil { 570 gc.Fatalf("bad in raddr: %v", gc.Oconv(int(n.Op), 0)) 571 } else { 572 gc.Fatalf("bad in raddr: <null>") 573 } 574 p.Reg = 0 575 } else { 576 p.Reg = a.Reg 577 } 578 } 579 580 func gcmp(as int, lhs *gc.Node, rhs *gc.Node) *obj.Prog { 581 if lhs.Op != gc.OREGISTER { 582 gc.Fatalf("bad operands to gcmp: %v %v", gc.Oconv(int(lhs.Op), 0), gc.Oconv(int(rhs.Op), 0)) 583 } 584 585 p := rawgins(as, rhs, nil) 586 raddr(lhs, p) 587 return p 588 } 589 590 /* 591 * return Axxx for Oxxx on type t. 592 */ 593 func optoas(op int, t *gc.Type) int { 594 if t == nil { 595 gc.Fatalf("optoas: t is nil") 596 } 597 598 a := int(obj.AXXX) 599 switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) { 600 default: 601 gc.Fatalf("optoas: no entry for op=%v type=%v", gc.Oconv(int(op), 0), t) 602 603 case gc.OEQ<<16 | gc.TBOOL, 604 gc.OEQ<<16 | gc.TINT8, 605 gc.OEQ<<16 | gc.TUINT8, 606 gc.OEQ<<16 | gc.TINT16, 607 gc.OEQ<<16 | gc.TUINT16, 608 gc.OEQ<<16 | gc.TINT32, 609 gc.OEQ<<16 | gc.TUINT32, 610 gc.OEQ<<16 | gc.TINT64, 611 gc.OEQ<<16 | gc.TUINT64, 612 gc.OEQ<<16 | gc.TPTR32, 613 gc.OEQ<<16 | gc.TPTR64, 614 gc.OEQ<<16 | gc.TFLOAT32, 615 gc.OEQ<<16 | gc.TFLOAT64: 616 a = arm64.ABEQ 617 618 case gc.ONE<<16 | gc.TBOOL, 619 gc.ONE<<16 | gc.TINT8, 620 gc.ONE<<16 | gc.TUINT8, 621 gc.ONE<<16 | gc.TINT16, 622 gc.ONE<<16 | gc.TUINT16, 623 gc.ONE<<16 | gc.TINT32, 624 gc.ONE<<16 | gc.TUINT32, 625 gc.ONE<<16 | gc.TINT64, 626 gc.ONE<<16 | gc.TUINT64, 627 gc.ONE<<16 | gc.TPTR32, 628 gc.ONE<<16 | gc.TPTR64, 629 gc.ONE<<16 | gc.TFLOAT32, 630 gc.ONE<<16 | gc.TFLOAT64: 631 a = arm64.ABNE 632 633 case gc.OLT<<16 | gc.TINT8, 634 gc.OLT<<16 | gc.TINT16, 635 gc.OLT<<16 | gc.TINT32, 636 gc.OLT<<16 | gc.TINT64: 637 a = arm64.ABLT 638 639 case gc.OLT<<16 | gc.TUINT8, 640 gc.OLT<<16 | gc.TUINT16, 641 gc.OLT<<16 | gc.TUINT32, 642 gc.OLT<<16 | gc.TUINT64, 643 gc.OLT<<16 | gc.TFLOAT32, 644 gc.OLT<<16 | gc.TFLOAT64: 645 a = arm64.ABLO 646 647 case gc.OLE<<16 | gc.TINT8, 648 gc.OLE<<16 | gc.TINT16, 649 gc.OLE<<16 | gc.TINT32, 650 gc.OLE<<16 | gc.TINT64: 651 a = arm64.ABLE 652 653 case gc.OLE<<16 | gc.TUINT8, 654 gc.OLE<<16 | gc.TUINT16, 655 gc.OLE<<16 | gc.TUINT32, 656 gc.OLE<<16 | gc.TUINT64, 657 gc.OLE<<16 | gc.TFLOAT32, 658 gc.OLE<<16 | gc.TFLOAT64: 659 a = arm64.ABLS 660 661 case gc.OGT<<16 | gc.TINT8, 662 gc.OGT<<16 | gc.TINT16, 663 gc.OGT<<16 | gc.TINT32, 664 gc.OGT<<16 | gc.TINT64, 665 gc.OGT<<16 | gc.TFLOAT32, 666 gc.OGT<<16 | gc.TFLOAT64: 667 a = arm64.ABGT 668 669 case gc.OGT<<16 | gc.TUINT8, 670 gc.OGT<<16 | gc.TUINT16, 671 gc.OGT<<16 | gc.TUINT32, 672 gc.OGT<<16 | gc.TUINT64: 673 a = arm64.ABHI 674 675 case gc.OGE<<16 | gc.TINT8, 676 gc.OGE<<16 | gc.TINT16, 677 gc.OGE<<16 | gc.TINT32, 678 gc.OGE<<16 | gc.TINT64, 679 gc.OGE<<16 | gc.TFLOAT32, 680 gc.OGE<<16 | gc.TFLOAT64: 681 a = arm64.ABGE 682 683 case gc.OGE<<16 | gc.TUINT8, 684 gc.OGE<<16 | gc.TUINT16, 685 gc.OGE<<16 | gc.TUINT32, 686 gc.OGE<<16 | gc.TUINT64: 687 a = arm64.ABHS 688 689 case gc.OCMP<<16 | gc.TBOOL, 690 gc.OCMP<<16 | gc.TINT8, 691 gc.OCMP<<16 | gc.TINT16, 692 gc.OCMP<<16 | gc.TINT32, 693 gc.OCMP<<16 | gc.TPTR32, 694 gc.OCMP<<16 | gc.TINT64, 695 gc.OCMP<<16 | gc.TUINT8, 696 gc.OCMP<<16 | gc.TUINT16, 697 gc.OCMP<<16 | gc.TUINT32, 698 gc.OCMP<<16 | gc.TUINT64, 699 gc.OCMP<<16 | gc.TPTR64: 700 a = arm64.ACMP 701 702 case gc.OCMP<<16 | gc.TFLOAT32: 703 a = arm64.AFCMPS 704 705 case gc.OCMP<<16 | gc.TFLOAT64: 706 a = arm64.AFCMPD 707 708 case gc.OAS<<16 | gc.TBOOL, 709 gc.OAS<<16 | gc.TINT8: 710 a = arm64.AMOVB 711 712 case gc.OAS<<16 | gc.TUINT8: 713 a = arm64.AMOVBU 714 715 case gc.OAS<<16 | gc.TINT16: 716 a = arm64.AMOVH 717 718 case gc.OAS<<16 | gc.TUINT16: 719 a = arm64.AMOVHU 720 721 case gc.OAS<<16 | gc.TINT32: 722 a = arm64.AMOVW 723 724 case gc.OAS<<16 | gc.TUINT32, 725 gc.OAS<<16 | gc.TPTR32: 726 a = arm64.AMOVWU 727 728 case gc.OAS<<16 | gc.TINT64, 729 gc.OAS<<16 | gc.TUINT64, 730 gc.OAS<<16 | gc.TPTR64: 731 a = arm64.AMOVD 732 733 case gc.OAS<<16 | gc.TFLOAT32: 734 a = arm64.AFMOVS 735 736 case gc.OAS<<16 | gc.TFLOAT64: 737 a = arm64.AFMOVD 738 739 case gc.OADD<<16 | gc.TINT8, 740 gc.OADD<<16 | gc.TUINT8, 741 gc.OADD<<16 | gc.TINT16, 742 gc.OADD<<16 | gc.TUINT16, 743 gc.OADD<<16 | gc.TINT32, 744 gc.OADD<<16 | gc.TUINT32, 745 gc.OADD<<16 | gc.TPTR32, 746 gc.OADD<<16 | gc.TINT64, 747 gc.OADD<<16 | gc.TUINT64, 748 gc.OADD<<16 | gc.TPTR64: 749 a = arm64.AADD 750 751 case gc.OADD<<16 | gc.TFLOAT32: 752 a = arm64.AFADDS 753 754 case gc.OADD<<16 | gc.TFLOAT64: 755 a = arm64.AFADDD 756 757 case gc.OSUB<<16 | gc.TINT8, 758 gc.OSUB<<16 | gc.TUINT8, 759 gc.OSUB<<16 | gc.TINT16, 760 gc.OSUB<<16 | gc.TUINT16, 761 gc.OSUB<<16 | gc.TINT32, 762 gc.OSUB<<16 | gc.TUINT32, 763 gc.OSUB<<16 | gc.TPTR32, 764 gc.OSUB<<16 | gc.TINT64, 765 gc.OSUB<<16 | gc.TUINT64, 766 gc.OSUB<<16 | gc.TPTR64: 767 a = arm64.ASUB 768 769 case gc.OSUB<<16 | gc.TFLOAT32: 770 a = arm64.AFSUBS 771 772 case gc.OSUB<<16 | gc.TFLOAT64: 773 a = arm64.AFSUBD 774 775 case gc.OMINUS<<16 | gc.TINT8, 776 gc.OMINUS<<16 | gc.TUINT8, 777 gc.OMINUS<<16 | gc.TINT16, 778 gc.OMINUS<<16 | gc.TUINT16, 779 gc.OMINUS<<16 | gc.TINT32, 780 gc.OMINUS<<16 | gc.TUINT32, 781 gc.OMINUS<<16 | gc.TPTR32, 782 gc.OMINUS<<16 | gc.TINT64, 783 gc.OMINUS<<16 | gc.TUINT64, 784 gc.OMINUS<<16 | gc.TPTR64: 785 a = arm64.ANEG 786 787 case gc.OMINUS<<16 | gc.TFLOAT32: 788 a = arm64.AFNEGS 789 790 case gc.OMINUS<<16 | gc.TFLOAT64: 791 a = arm64.AFNEGD 792 793 case gc.OAND<<16 | gc.TINT8, 794 gc.OAND<<16 | gc.TUINT8, 795 gc.OAND<<16 | gc.TINT16, 796 gc.OAND<<16 | gc.TUINT16, 797 gc.OAND<<16 | gc.TINT32, 798 gc.OAND<<16 | gc.TUINT32, 799 gc.OAND<<16 | gc.TPTR32, 800 gc.OAND<<16 | gc.TINT64, 801 gc.OAND<<16 | gc.TUINT64, 802 gc.OAND<<16 | gc.TPTR64: 803 a = arm64.AAND 804 805 case gc.OOR<<16 | gc.TINT8, 806 gc.OOR<<16 | gc.TUINT8, 807 gc.OOR<<16 | gc.TINT16, 808 gc.OOR<<16 | gc.TUINT16, 809 gc.OOR<<16 | gc.TINT32, 810 gc.OOR<<16 | gc.TUINT32, 811 gc.OOR<<16 | gc.TPTR32, 812 gc.OOR<<16 | gc.TINT64, 813 gc.OOR<<16 | gc.TUINT64, 814 gc.OOR<<16 | gc.TPTR64: 815 a = arm64.AORR 816 817 case gc.OXOR<<16 | gc.TINT8, 818 gc.OXOR<<16 | gc.TUINT8, 819 gc.OXOR<<16 | gc.TINT16, 820 gc.OXOR<<16 | gc.TUINT16, 821 gc.OXOR<<16 | gc.TINT32, 822 gc.OXOR<<16 | gc.TUINT32, 823 gc.OXOR<<16 | gc.TPTR32, 824 gc.OXOR<<16 | gc.TINT64, 825 gc.OXOR<<16 | gc.TUINT64, 826 gc.OXOR<<16 | gc.TPTR64: 827 a = arm64.AEOR 828 829 // TODO(minux): handle rotates 830 //case CASE(OLROT, TINT8): 831 //case CASE(OLROT, TUINT8): 832 //case CASE(OLROT, TINT16): 833 //case CASE(OLROT, TUINT16): 834 //case CASE(OLROT, TINT32): 835 //case CASE(OLROT, TUINT32): 836 //case CASE(OLROT, TPTR32): 837 //case CASE(OLROT, TINT64): 838 //case CASE(OLROT, TUINT64): 839 //case CASE(OLROT, TPTR64): 840 // a = 0//???; RLDC? 841 // break; 842 843 case gc.OLSH<<16 | gc.TINT8, 844 gc.OLSH<<16 | gc.TUINT8, 845 gc.OLSH<<16 | gc.TINT16, 846 gc.OLSH<<16 | gc.TUINT16, 847 gc.OLSH<<16 | gc.TINT32, 848 gc.OLSH<<16 | gc.TUINT32, 849 gc.OLSH<<16 | gc.TPTR32, 850 gc.OLSH<<16 | gc.TINT64, 851 gc.OLSH<<16 | gc.TUINT64, 852 gc.OLSH<<16 | gc.TPTR64: 853 a = arm64.ALSL 854 855 case gc.ORSH<<16 | gc.TUINT8, 856 gc.ORSH<<16 | gc.TUINT16, 857 gc.ORSH<<16 | gc.TUINT32, 858 gc.ORSH<<16 | gc.TPTR32, 859 gc.ORSH<<16 | gc.TUINT64, 860 gc.ORSH<<16 | gc.TPTR64: 861 a = arm64.ALSR 862 863 case gc.ORSH<<16 | gc.TINT8, 864 gc.ORSH<<16 | gc.TINT16, 865 gc.ORSH<<16 | gc.TINT32, 866 gc.ORSH<<16 | gc.TINT64: 867 a = arm64.AASR 868 869 // TODO(minux): handle rotates 870 //case CASE(ORROTC, TINT8): 871 //case CASE(ORROTC, TUINT8): 872 //case CASE(ORROTC, TINT16): 873 //case CASE(ORROTC, TUINT16): 874 //case CASE(ORROTC, TINT32): 875 //case CASE(ORROTC, TUINT32): 876 //case CASE(ORROTC, TINT64): 877 //case CASE(ORROTC, TUINT64): 878 // a = 0//??? RLDC?? 879 // break; 880 881 case gc.OHMUL<<16 | gc.TINT64: 882 a = arm64.ASMULH 883 884 case gc.OHMUL<<16 | gc.TUINT64, 885 gc.OHMUL<<16 | gc.TPTR64: 886 a = arm64.AUMULH 887 888 case gc.OMUL<<16 | gc.TINT8, 889 gc.OMUL<<16 | gc.TINT16, 890 gc.OMUL<<16 | gc.TINT32: 891 a = arm64.ASMULL 892 893 case gc.OMUL<<16 | gc.TINT64: 894 a = arm64.AMUL 895 896 case gc.OMUL<<16 | gc.TUINT8, 897 gc.OMUL<<16 | gc.TUINT16, 898 gc.OMUL<<16 | gc.TUINT32, 899 gc.OMUL<<16 | gc.TPTR32: 900 // don't use word multiply, the high 32-bit are undefined. 901 a = arm64.AUMULL 902 903 case gc.OMUL<<16 | gc.TUINT64, 904 gc.OMUL<<16 | gc.TPTR64: 905 a = arm64.AMUL // for 64-bit multiplies, signedness doesn't matter. 906 907 case gc.OMUL<<16 | gc.TFLOAT32: 908 a = arm64.AFMULS 909 910 case gc.OMUL<<16 | gc.TFLOAT64: 911 a = arm64.AFMULD 912 913 case gc.ODIV<<16 | gc.TINT8, 914 gc.ODIV<<16 | gc.TINT16, 915 gc.ODIV<<16 | gc.TINT32, 916 gc.ODIV<<16 | gc.TINT64: 917 a = arm64.ASDIV 918 919 case gc.ODIV<<16 | gc.TUINT8, 920 gc.ODIV<<16 | gc.TUINT16, 921 gc.ODIV<<16 | gc.TUINT32, 922 gc.ODIV<<16 | gc.TPTR32, 923 gc.ODIV<<16 | gc.TUINT64, 924 gc.ODIV<<16 | gc.TPTR64: 925 a = arm64.AUDIV 926 927 case gc.ODIV<<16 | gc.TFLOAT32: 928 a = arm64.AFDIVS 929 930 case gc.ODIV<<16 | gc.TFLOAT64: 931 a = arm64.AFDIVD 932 933 case gc.OSQRT<<16 | gc.TFLOAT64: 934 a = arm64.AFSQRTD 935 } 936 937 return a 938 } 939 940 const ( 941 ODynam = 1 << 0 942 OAddable = 1 << 1 943 ) 944 945 func xgen(n *gc.Node, a *gc.Node, o int) bool { 946 // TODO(minux) 947 948 return -1 != 0 /*TypeKind(100016)*/ 949 } 950 951 func sudoclean() { 952 return 953 } 954 955 /* 956 * generate code to compute address of n, 957 * a reference to a (perhaps nested) field inside 958 * an array or struct. 959 * return 0 on failure, 1 on success. 960 * on success, leaves usable address in a. 961 * 962 * caller is responsible for calling sudoclean 963 * after successful sudoaddable, 964 * to release the register used for a. 965 */ 966 func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool { 967 // TODO(minux) 968 969 *a = obj.Addr{} 970 return false 971 }