github.com/q45/go@v0.0.0-20151101211701-a4fb8c13db3f/src/cmd/compile/internal/arm64/gsubr.go (about) 1 // Derived from Inferno utils/6c/txt.c 2 // http://code.google.com/p/inferno-os/source/browse/utils/6c/txt.c 3 // 4 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. 5 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) 6 // Portions Copyright © 1997-1999 Vita Nuova Limited 7 // Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) 8 // Portions Copyright © 2004,2006 Bruce Ellis 9 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) 10 // Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others 11 // Portions Copyright © 2009 The Go Authors. All rights reserved. 12 // 13 // Permission is hereby granted, free of charge, to any person obtaining a copy 14 // of this software and associated documentation files (the "Software"), to deal 15 // in the Software without restriction, including without limitation the rights 16 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 17 // copies of the Software, and to permit persons to whom the Software is 18 // furnished to do so, subject to the following conditions: 19 // 20 // The above copyright notice and this permission notice shall be included in 21 // all copies or substantial portions of the Software. 22 // 23 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 24 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 25 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 26 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 27 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 28 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 29 // THE SOFTWARE. 30 31 package arm64 32 33 import ( 34 "cmd/compile/internal/gc" 35 "cmd/internal/obj" 36 "cmd/internal/obj/arm64" 37 "fmt" 38 ) 39 40 var resvd = []int{ 41 arm64.REGTMP, 42 arm64.REGG, 43 arm64.REGRT1, 44 arm64.REGRT2, 45 arm64.REG_R31, // REGZERO and REGSP 46 arm64.FREGZERO, 47 arm64.FREGHALF, 48 arm64.FREGONE, 49 arm64.FREGTWO, 50 } 51 52 /* 53 * generate 54 * as $c, n 55 */ 56 func ginscon(as int, c int64, n2 *gc.Node) { 57 var n1 gc.Node 58 59 gc.Nodconst(&n1, gc.Types[gc.TINT64], c) 60 61 if as != arm64.AMOVD && (c < -arm64.BIG || c > arm64.BIG) || as == arm64.AMUL || n2 != nil && n2.Op != gc.OREGISTER { 62 // cannot have more than 16-bit of immediate in ADD, etc. 63 // instead, MOV into register first. 64 var ntmp gc.Node 65 gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil) 66 67 gins(arm64.AMOVD, &n1, &ntmp) 68 gins(as, &ntmp, n2) 69 gc.Regfree(&ntmp) 70 return 71 } 72 73 rawgins(as, &n1, n2) 74 } 75 76 /* 77 * generate 78 * as n, $c (CMP) 79 */ 80 func ginscon2(as int, n2 *gc.Node, c int64) { 81 var n1 gc.Node 82 83 gc.Nodconst(&n1, gc.Types[gc.TINT64], c) 84 85 switch as { 86 default: 87 gc.Fatalf("ginscon2") 88 89 case arm64.ACMP: 90 if -arm64.BIG <= c && c <= arm64.BIG { 91 gcmp(as, n2, &n1) 92 return 93 } 94 } 95 96 // MOV n1 into register first 97 var ntmp gc.Node 98 gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil) 99 100 rawgins(arm64.AMOVD, &n1, &ntmp) 101 gcmp(as, n2, &ntmp) 102 gc.Regfree(&ntmp) 103 } 104 105 func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog { 106 if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && n2.Op != gc.OLITERAL { 107 // Reverse comparison to place constant last. 108 op = gc.Brrev(op) 109 n1, n2 = n2, n1 110 } 111 112 var r1, r2, g1, g2 gc.Node 113 gc.Regalloc(&r1, t, n1) 114 gc.Regalloc(&g1, n1.Type, &r1) 115 gc.Cgen(n1, &g1) 116 gmove(&g1, &r1) 117 if gc.Isint[t.Etype] && gc.Isconst(n2, gc.CTINT) { 118 ginscon2(optoas(gc.OCMP, t), &r1, n2.Int()) 119 } else { 120 gc.Regalloc(&r2, t, n2) 121 gc.Regalloc(&g2, n1.Type, &r2) 122 gc.Cgen(n2, &g2) 123 gmove(&g2, &r2) 124 gcmp(optoas(gc.OCMP, t), &r1, &r2) 125 gc.Regfree(&g2) 126 gc.Regfree(&r2) 127 } 128 gc.Regfree(&g1) 129 gc.Regfree(&r1) 130 return gc.Gbranch(optoas(op, t), nil, likely) 131 } 132 133 /* 134 * generate move: 135 * t = f 136 * hard part is conversions. 137 */ 138 func gmove(f *gc.Node, t *gc.Node) { 139 if gc.Debug['M'] != 0 { 140 fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, obj.FmtLong), gc.Nconv(t, obj.FmtLong)) 141 } 142 143 ft := int(gc.Simsimtype(f.Type)) 144 tt := int(gc.Simsimtype(t.Type)) 145 cvt := (*gc.Type)(t.Type) 146 147 if gc.Iscomplex[ft] || gc.Iscomplex[tt] { 148 gc.Complexmove(f, t) 149 return 150 } 151 152 // cannot have two memory operands 153 var r1 gc.Node 154 var a int 155 if gc.Ismem(f) && gc.Ismem(t) { 156 goto hard 157 } 158 159 // convert constant to desired type 160 if f.Op == gc.OLITERAL { 161 var con gc.Node 162 switch tt { 163 default: 164 f.Convconst(&con, t.Type) 165 166 case gc.TINT32, 167 gc.TINT16, 168 gc.TINT8: 169 var con gc.Node 170 f.Convconst(&con, gc.Types[gc.TINT64]) 171 var r1 gc.Node 172 gc.Regalloc(&r1, con.Type, t) 173 gins(arm64.AMOVD, &con, &r1) 174 gmove(&r1, t) 175 gc.Regfree(&r1) 176 return 177 178 case gc.TUINT32, 179 gc.TUINT16, 180 gc.TUINT8: 181 var con gc.Node 182 f.Convconst(&con, gc.Types[gc.TUINT64]) 183 var r1 gc.Node 184 gc.Regalloc(&r1, con.Type, t) 185 gins(arm64.AMOVD, &con, &r1) 186 gmove(&r1, t) 187 gc.Regfree(&r1) 188 return 189 } 190 191 f = &con 192 ft = tt // so big switch will choose a simple mov 193 194 // constants can't move directly to memory. 195 if gc.Ismem(t) { 196 goto hard 197 } 198 } 199 200 // value -> value copy, first operand in memory. 201 // any floating point operand requires register 202 // src, so goto hard to copy to register first. 203 if gc.Ismem(f) && ft != tt && (gc.Isfloat[ft] || gc.Isfloat[tt]) { 204 cvt = gc.Types[ft] 205 goto hard 206 } 207 208 // value -> value copy, only one memory operand. 209 // figure out the instruction to use. 210 // break out of switch for one-instruction gins. 211 // goto rdst for "destination must be register". 212 // goto hard for "convert to cvt type first". 213 // otherwise handle and return. 214 215 switch uint32(ft)<<16 | uint32(tt) { 216 default: 217 gc.Fatalf("gmove %v -> %v", gc.Tconv(f.Type, obj.FmtLong), gc.Tconv(t.Type, obj.FmtLong)) 218 219 /* 220 * integer copy and truncate 221 */ 222 case gc.TINT8<<16 | gc.TINT8, // same size 223 gc.TUINT8<<16 | gc.TINT8, 224 gc.TINT16<<16 | gc.TINT8, 225 // truncate 226 gc.TUINT16<<16 | gc.TINT8, 227 gc.TINT32<<16 | gc.TINT8, 228 gc.TUINT32<<16 | gc.TINT8, 229 gc.TINT64<<16 | gc.TINT8, 230 gc.TUINT64<<16 | gc.TINT8: 231 a = arm64.AMOVB 232 233 case gc.TINT8<<16 | gc.TUINT8, // same size 234 gc.TUINT8<<16 | gc.TUINT8, 235 gc.TINT16<<16 | gc.TUINT8, 236 // truncate 237 gc.TUINT16<<16 | gc.TUINT8, 238 gc.TINT32<<16 | gc.TUINT8, 239 gc.TUINT32<<16 | gc.TUINT8, 240 gc.TINT64<<16 | gc.TUINT8, 241 gc.TUINT64<<16 | gc.TUINT8: 242 a = arm64.AMOVBU 243 244 case gc.TINT16<<16 | gc.TINT16, // same size 245 gc.TUINT16<<16 | gc.TINT16, 246 gc.TINT32<<16 | gc.TINT16, 247 // truncate 248 gc.TUINT32<<16 | gc.TINT16, 249 gc.TINT64<<16 | gc.TINT16, 250 gc.TUINT64<<16 | gc.TINT16: 251 a = arm64.AMOVH 252 253 case gc.TINT16<<16 | gc.TUINT16, // same size 254 gc.TUINT16<<16 | gc.TUINT16, 255 gc.TINT32<<16 | gc.TUINT16, 256 // truncate 257 gc.TUINT32<<16 | gc.TUINT16, 258 gc.TINT64<<16 | gc.TUINT16, 259 gc.TUINT64<<16 | gc.TUINT16: 260 a = arm64.AMOVHU 261 262 case gc.TINT32<<16 | gc.TINT32, // same size 263 gc.TUINT32<<16 | gc.TINT32, 264 gc.TINT64<<16 | gc.TINT32, 265 // truncate 266 gc.TUINT64<<16 | gc.TINT32: 267 a = arm64.AMOVW 268 269 case gc.TINT32<<16 | gc.TUINT32, // same size 270 gc.TUINT32<<16 | gc.TUINT32, 271 gc.TINT64<<16 | gc.TUINT32, 272 gc.TUINT64<<16 | gc.TUINT32: 273 a = arm64.AMOVWU 274 275 case gc.TINT64<<16 | gc.TINT64, // same size 276 gc.TINT64<<16 | gc.TUINT64, 277 gc.TUINT64<<16 | gc.TINT64, 278 gc.TUINT64<<16 | gc.TUINT64: 279 a = arm64.AMOVD 280 281 /* 282 * integer up-conversions 283 */ 284 case gc.TINT8<<16 | gc.TINT16, // sign extend int8 285 gc.TINT8<<16 | gc.TUINT16, 286 gc.TINT8<<16 | gc.TINT32, 287 gc.TINT8<<16 | gc.TUINT32, 288 gc.TINT8<<16 | gc.TINT64, 289 gc.TINT8<<16 | gc.TUINT64: 290 a = arm64.AMOVB 291 292 goto rdst 293 294 case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8 295 gc.TUINT8<<16 | gc.TUINT16, 296 gc.TUINT8<<16 | gc.TINT32, 297 gc.TUINT8<<16 | gc.TUINT32, 298 gc.TUINT8<<16 | gc.TINT64, 299 gc.TUINT8<<16 | gc.TUINT64: 300 a = arm64.AMOVBU 301 302 goto rdst 303 304 case gc.TINT16<<16 | gc.TINT32, // sign extend int16 305 gc.TINT16<<16 | gc.TUINT32, 306 gc.TINT16<<16 | gc.TINT64, 307 gc.TINT16<<16 | gc.TUINT64: 308 a = arm64.AMOVH 309 310 goto rdst 311 312 case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16 313 gc.TUINT16<<16 | gc.TUINT32, 314 gc.TUINT16<<16 | gc.TINT64, 315 gc.TUINT16<<16 | gc.TUINT64: 316 a = arm64.AMOVHU 317 318 goto rdst 319 320 case gc.TINT32<<16 | gc.TINT64, // sign extend int32 321 gc.TINT32<<16 | gc.TUINT64: 322 a = arm64.AMOVW 323 324 goto rdst 325 326 case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32 327 gc.TUINT32<<16 | gc.TUINT64: 328 a = arm64.AMOVWU 329 330 goto rdst 331 332 /* 333 * float to integer 334 */ 335 case gc.TFLOAT32<<16 | gc.TINT32: 336 a = arm64.AFCVTZSSW 337 goto rdst 338 339 case gc.TFLOAT64<<16 | gc.TINT32: 340 a = arm64.AFCVTZSDW 341 goto rdst 342 343 case gc.TFLOAT32<<16 | gc.TINT64: 344 a = arm64.AFCVTZSS 345 goto rdst 346 347 case gc.TFLOAT64<<16 | gc.TINT64: 348 a = arm64.AFCVTZSD 349 goto rdst 350 351 case gc.TFLOAT32<<16 | gc.TUINT32: 352 a = arm64.AFCVTZUSW 353 goto rdst 354 355 case gc.TFLOAT64<<16 | gc.TUINT32: 356 a = arm64.AFCVTZUDW 357 goto rdst 358 359 case gc.TFLOAT32<<16 | gc.TUINT64: 360 a = arm64.AFCVTZUS 361 goto rdst 362 363 case gc.TFLOAT64<<16 | gc.TUINT64: 364 a = arm64.AFCVTZUD 365 goto rdst 366 367 case gc.TFLOAT32<<16 | gc.TINT16, 368 gc.TFLOAT32<<16 | gc.TINT8, 369 gc.TFLOAT64<<16 | gc.TINT16, 370 gc.TFLOAT64<<16 | gc.TINT8: 371 cvt = gc.Types[gc.TINT32] 372 373 goto hard 374 375 case gc.TFLOAT32<<16 | gc.TUINT16, 376 gc.TFLOAT32<<16 | gc.TUINT8, 377 gc.TFLOAT64<<16 | gc.TUINT16, 378 gc.TFLOAT64<<16 | gc.TUINT8: 379 cvt = gc.Types[gc.TUINT32] 380 381 goto hard 382 383 /* 384 * integer to float 385 */ 386 case gc.TINT8<<16 | gc.TFLOAT32, 387 gc.TINT16<<16 | gc.TFLOAT32, 388 gc.TINT32<<16 | gc.TFLOAT32: 389 a = arm64.ASCVTFWS 390 391 goto rdst 392 393 case gc.TINT8<<16 | gc.TFLOAT64, 394 gc.TINT16<<16 | gc.TFLOAT64, 395 gc.TINT32<<16 | gc.TFLOAT64: 396 a = arm64.ASCVTFWD 397 398 goto rdst 399 400 case gc.TINT64<<16 | gc.TFLOAT32: 401 a = arm64.ASCVTFS 402 goto rdst 403 404 case gc.TINT64<<16 | gc.TFLOAT64: 405 a = arm64.ASCVTFD 406 goto rdst 407 408 case gc.TUINT8<<16 | gc.TFLOAT32, 409 gc.TUINT16<<16 | gc.TFLOAT32, 410 gc.TUINT32<<16 | gc.TFLOAT32: 411 a = arm64.AUCVTFWS 412 413 goto rdst 414 415 case gc.TUINT8<<16 | gc.TFLOAT64, 416 gc.TUINT16<<16 | gc.TFLOAT64, 417 gc.TUINT32<<16 | gc.TFLOAT64: 418 a = arm64.AUCVTFWD 419 420 goto rdst 421 422 case gc.TUINT64<<16 | gc.TFLOAT32: 423 a = arm64.AUCVTFS 424 goto rdst 425 426 case gc.TUINT64<<16 | gc.TFLOAT64: 427 a = arm64.AUCVTFD 428 goto rdst 429 430 /* 431 * float to float 432 */ 433 case gc.TFLOAT32<<16 | gc.TFLOAT32: 434 a = arm64.AFMOVS 435 436 case gc.TFLOAT64<<16 | gc.TFLOAT64: 437 a = arm64.AFMOVD 438 439 case gc.TFLOAT32<<16 | gc.TFLOAT64: 440 a = arm64.AFCVTSD 441 goto rdst 442 443 case gc.TFLOAT64<<16 | gc.TFLOAT32: 444 a = arm64.AFCVTDS 445 goto rdst 446 } 447 448 gins(a, f, t) 449 return 450 451 // requires register destination 452 rdst: 453 gc.Regalloc(&r1, t.Type, t) 454 455 gins(a, f, &r1) 456 gmove(&r1, t) 457 gc.Regfree(&r1) 458 return 459 460 // requires register intermediate 461 hard: 462 gc.Regalloc(&r1, cvt, t) 463 464 gmove(f, &r1) 465 gmove(&r1, t) 466 gc.Regfree(&r1) 467 return 468 } 469 470 // gins is called by the front end. 471 // It synthesizes some multiple-instruction sequences 472 // so the front end can stay simpler. 473 func gins(as int, f, t *gc.Node) *obj.Prog { 474 if as >= obj.A_ARCHSPECIFIC { 475 if x, ok := f.IntLiteral(); ok { 476 ginscon(as, x, t) 477 return nil // caller must not use 478 } 479 } 480 if as == arm64.ACMP { 481 if x, ok := t.IntLiteral(); ok { 482 ginscon2(as, f, x) 483 return nil // caller must not use 484 } 485 } 486 return rawgins(as, f, t) 487 } 488 489 /* 490 * generate one instruction: 491 * as f, t 492 */ 493 func rawgins(as int, f *gc.Node, t *gc.Node) *obj.Prog { 494 // TODO(austin): Add self-move test like in 6g (but be careful 495 // of truncation moves) 496 497 p := gc.Prog(as) 498 gc.Naddr(&p.From, f) 499 gc.Naddr(&p.To, t) 500 501 switch as { 502 case arm64.ACMP, arm64.AFCMPS, arm64.AFCMPD: 503 if t != nil { 504 if f.Op != gc.OREGISTER { 505 gc.Fatalf("bad operands to gcmp") 506 } 507 p.From = p.To 508 p.To = obj.Addr{} 509 raddr(f, p) 510 } 511 } 512 513 // Bad things the front end has done to us. Crash to find call stack. 514 switch as { 515 case arm64.AAND, arm64.AMUL: 516 if p.From.Type == obj.TYPE_CONST { 517 gc.Debug['h'] = 1 518 gc.Fatalf("bad inst: %v", p) 519 } 520 case arm64.ACMP: 521 if p.From.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_MEM { 522 gc.Debug['h'] = 1 523 gc.Fatalf("bad inst: %v", p) 524 } 525 } 526 527 if gc.Debug['g'] != 0 { 528 fmt.Printf("%v\n", p) 529 } 530 531 w := int32(0) 532 switch as { 533 case arm64.AMOVB, 534 arm64.AMOVBU: 535 w = 1 536 537 case arm64.AMOVH, 538 arm64.AMOVHU: 539 w = 2 540 541 case arm64.AMOVW, 542 arm64.AMOVWU: 543 w = 4 544 545 case arm64.AMOVD: 546 if p.From.Type == obj.TYPE_CONST || p.From.Type == obj.TYPE_ADDR { 547 break 548 } 549 w = 8 550 } 551 552 if w != 0 && ((f != nil && p.From.Width < int64(w)) || (t != nil && p.To.Type != obj.TYPE_REG && p.To.Width > int64(w))) { 553 gc.Dump("f", f) 554 gc.Dump("t", t) 555 gc.Fatalf("bad width: %v (%d, %d)\n", p, p.From.Width, p.To.Width) 556 } 557 558 return p 559 } 560 561 /* 562 * insert n into reg slot of p 563 */ 564 func raddr(n *gc.Node, p *obj.Prog) { 565 var a obj.Addr 566 567 gc.Naddr(&a, n) 568 if a.Type != obj.TYPE_REG { 569 if n != nil { 570 gc.Fatalf("bad in raddr: %v", gc.Oconv(int(n.Op), 0)) 571 } else { 572 gc.Fatalf("bad in raddr: <null>") 573 } 574 p.Reg = 0 575 } else { 576 p.Reg = a.Reg 577 } 578 } 579 580 func gcmp(as int, lhs *gc.Node, rhs *gc.Node) *obj.Prog { 581 if lhs.Op != gc.OREGISTER { 582 gc.Fatalf("bad operands to gcmp: %v %v", gc.Oconv(int(lhs.Op), 0), gc.Oconv(int(rhs.Op), 0)) 583 } 584 585 p := rawgins(as, rhs, nil) 586 raddr(lhs, p) 587 return p 588 } 589 590 /* 591 * return Axxx for Oxxx on type t. 592 */ 593 func optoas(op gc.Op, t *gc.Type) int { 594 if t == nil { 595 gc.Fatalf("optoas: t is nil") 596 } 597 598 // avoid constant conversions in switches below 599 const ( 600 OMINUS_ = uint32(gc.OMINUS) << 16 601 OLSH_ = uint32(gc.OLSH) << 16 602 ORSH_ = uint32(gc.ORSH) << 16 603 OADD_ = uint32(gc.OADD) << 16 604 OSUB_ = uint32(gc.OSUB) << 16 605 OMUL_ = uint32(gc.OMUL) << 16 606 ODIV_ = uint32(gc.ODIV) << 16 607 OOR_ = uint32(gc.OOR) << 16 608 OAND_ = uint32(gc.OAND) << 16 609 OXOR_ = uint32(gc.OXOR) << 16 610 OEQ_ = uint32(gc.OEQ) << 16 611 ONE_ = uint32(gc.ONE) << 16 612 OLT_ = uint32(gc.OLT) << 16 613 OLE_ = uint32(gc.OLE) << 16 614 OGE_ = uint32(gc.OGE) << 16 615 OGT_ = uint32(gc.OGT) << 16 616 OCMP_ = uint32(gc.OCMP) << 16 617 OAS_ = uint32(gc.OAS) << 16 618 OHMUL_ = uint32(gc.OHMUL) << 16 619 OSQRT_ = uint32(gc.OSQRT) << 16 620 ) 621 622 a := int(obj.AXXX) 623 switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) { 624 default: 625 gc.Fatalf("optoas: no entry for op=%v type=%v", gc.Oconv(int(op), 0), t) 626 627 case OEQ_ | gc.TBOOL, 628 OEQ_ | gc.TINT8, 629 OEQ_ | gc.TUINT8, 630 OEQ_ | gc.TINT16, 631 OEQ_ | gc.TUINT16, 632 OEQ_ | gc.TINT32, 633 OEQ_ | gc.TUINT32, 634 OEQ_ | gc.TINT64, 635 OEQ_ | gc.TUINT64, 636 OEQ_ | gc.TPTR32, 637 OEQ_ | gc.TPTR64, 638 OEQ_ | gc.TFLOAT32, 639 OEQ_ | gc.TFLOAT64: 640 a = arm64.ABEQ 641 642 case ONE_ | gc.TBOOL, 643 ONE_ | gc.TINT8, 644 ONE_ | gc.TUINT8, 645 ONE_ | gc.TINT16, 646 ONE_ | gc.TUINT16, 647 ONE_ | gc.TINT32, 648 ONE_ | gc.TUINT32, 649 ONE_ | gc.TINT64, 650 ONE_ | gc.TUINT64, 651 ONE_ | gc.TPTR32, 652 ONE_ | gc.TPTR64, 653 ONE_ | gc.TFLOAT32, 654 ONE_ | gc.TFLOAT64: 655 a = arm64.ABNE 656 657 case OLT_ | gc.TINT8, 658 OLT_ | gc.TINT16, 659 OLT_ | gc.TINT32, 660 OLT_ | gc.TINT64: 661 a = arm64.ABLT 662 663 case OLT_ | gc.TUINT8, 664 OLT_ | gc.TUINT16, 665 OLT_ | gc.TUINT32, 666 OLT_ | gc.TUINT64, 667 OLT_ | gc.TFLOAT32, 668 OLT_ | gc.TFLOAT64: 669 a = arm64.ABLO 670 671 case OLE_ | gc.TINT8, 672 OLE_ | gc.TINT16, 673 OLE_ | gc.TINT32, 674 OLE_ | gc.TINT64: 675 a = arm64.ABLE 676 677 case OLE_ | gc.TUINT8, 678 OLE_ | gc.TUINT16, 679 OLE_ | gc.TUINT32, 680 OLE_ | gc.TUINT64, 681 OLE_ | gc.TFLOAT32, 682 OLE_ | gc.TFLOAT64: 683 a = arm64.ABLS 684 685 case OGT_ | gc.TINT8, 686 OGT_ | gc.TINT16, 687 OGT_ | gc.TINT32, 688 OGT_ | gc.TINT64, 689 OGT_ | gc.TFLOAT32, 690 OGT_ | gc.TFLOAT64: 691 a = arm64.ABGT 692 693 case OGT_ | gc.TUINT8, 694 OGT_ | gc.TUINT16, 695 OGT_ | gc.TUINT32, 696 OGT_ | gc.TUINT64: 697 a = arm64.ABHI 698 699 case OGE_ | gc.TINT8, 700 OGE_ | gc.TINT16, 701 OGE_ | gc.TINT32, 702 OGE_ | gc.TINT64, 703 OGE_ | gc.TFLOAT32, 704 OGE_ | gc.TFLOAT64: 705 a = arm64.ABGE 706 707 case OGE_ | gc.TUINT8, 708 OGE_ | gc.TUINT16, 709 OGE_ | gc.TUINT32, 710 OGE_ | gc.TUINT64: 711 a = arm64.ABHS 712 713 case OCMP_ | gc.TBOOL, 714 OCMP_ | gc.TINT8, 715 OCMP_ | gc.TINT16, 716 OCMP_ | gc.TINT32, 717 OCMP_ | gc.TPTR32, 718 OCMP_ | gc.TINT64, 719 OCMP_ | gc.TUINT8, 720 OCMP_ | gc.TUINT16, 721 OCMP_ | gc.TUINT32, 722 OCMP_ | gc.TUINT64, 723 OCMP_ | gc.TPTR64: 724 a = arm64.ACMP 725 726 case OCMP_ | gc.TFLOAT32: 727 a = arm64.AFCMPS 728 729 case OCMP_ | gc.TFLOAT64: 730 a = arm64.AFCMPD 731 732 case OAS_ | gc.TBOOL, 733 OAS_ | gc.TINT8: 734 a = arm64.AMOVB 735 736 case OAS_ | gc.TUINT8: 737 a = arm64.AMOVBU 738 739 case OAS_ | gc.TINT16: 740 a = arm64.AMOVH 741 742 case OAS_ | gc.TUINT16: 743 a = arm64.AMOVHU 744 745 case OAS_ | gc.TINT32: 746 a = arm64.AMOVW 747 748 case OAS_ | gc.TUINT32, 749 OAS_ | gc.TPTR32: 750 a = arm64.AMOVWU 751 752 case OAS_ | gc.TINT64, 753 OAS_ | gc.TUINT64, 754 OAS_ | gc.TPTR64: 755 a = arm64.AMOVD 756 757 case OAS_ | gc.TFLOAT32: 758 a = arm64.AFMOVS 759 760 case OAS_ | gc.TFLOAT64: 761 a = arm64.AFMOVD 762 763 case OADD_ | gc.TINT8, 764 OADD_ | gc.TUINT8, 765 OADD_ | gc.TINT16, 766 OADD_ | gc.TUINT16, 767 OADD_ | gc.TINT32, 768 OADD_ | gc.TUINT32, 769 OADD_ | gc.TPTR32, 770 OADD_ | gc.TINT64, 771 OADD_ | gc.TUINT64, 772 OADD_ | gc.TPTR64: 773 a = arm64.AADD 774 775 case OADD_ | gc.TFLOAT32: 776 a = arm64.AFADDS 777 778 case OADD_ | gc.TFLOAT64: 779 a = arm64.AFADDD 780 781 case OSUB_ | gc.TINT8, 782 OSUB_ | gc.TUINT8, 783 OSUB_ | gc.TINT16, 784 OSUB_ | gc.TUINT16, 785 OSUB_ | gc.TINT32, 786 OSUB_ | gc.TUINT32, 787 OSUB_ | gc.TPTR32, 788 OSUB_ | gc.TINT64, 789 OSUB_ | gc.TUINT64, 790 OSUB_ | gc.TPTR64: 791 a = arm64.ASUB 792 793 case OSUB_ | gc.TFLOAT32: 794 a = arm64.AFSUBS 795 796 case OSUB_ | gc.TFLOAT64: 797 a = arm64.AFSUBD 798 799 case OMINUS_ | gc.TINT8, 800 OMINUS_ | gc.TUINT8, 801 OMINUS_ | gc.TINT16, 802 OMINUS_ | gc.TUINT16, 803 OMINUS_ | gc.TINT32, 804 OMINUS_ | gc.TUINT32, 805 OMINUS_ | gc.TPTR32, 806 OMINUS_ | gc.TINT64, 807 OMINUS_ | gc.TUINT64, 808 OMINUS_ | gc.TPTR64: 809 a = arm64.ANEG 810 811 case OMINUS_ | gc.TFLOAT32: 812 a = arm64.AFNEGS 813 814 case OMINUS_ | gc.TFLOAT64: 815 a = arm64.AFNEGD 816 817 case OAND_ | gc.TINT8, 818 OAND_ | gc.TUINT8, 819 OAND_ | gc.TINT16, 820 OAND_ | gc.TUINT16, 821 OAND_ | gc.TINT32, 822 OAND_ | gc.TUINT32, 823 OAND_ | gc.TPTR32, 824 OAND_ | gc.TINT64, 825 OAND_ | gc.TUINT64, 826 OAND_ | gc.TPTR64: 827 a = arm64.AAND 828 829 case OOR_ | gc.TINT8, 830 OOR_ | gc.TUINT8, 831 OOR_ | gc.TINT16, 832 OOR_ | gc.TUINT16, 833 OOR_ | gc.TINT32, 834 OOR_ | gc.TUINT32, 835 OOR_ | gc.TPTR32, 836 OOR_ | gc.TINT64, 837 OOR_ | gc.TUINT64, 838 OOR_ | gc.TPTR64: 839 a = arm64.AORR 840 841 case OXOR_ | gc.TINT8, 842 OXOR_ | gc.TUINT8, 843 OXOR_ | gc.TINT16, 844 OXOR_ | gc.TUINT16, 845 OXOR_ | gc.TINT32, 846 OXOR_ | gc.TUINT32, 847 OXOR_ | gc.TPTR32, 848 OXOR_ | gc.TINT64, 849 OXOR_ | gc.TUINT64, 850 OXOR_ | gc.TPTR64: 851 a = arm64.AEOR 852 853 // TODO(minux): handle rotates 854 //case CASE(OLROT, TINT8): 855 //case CASE(OLROT, TUINT8): 856 //case CASE(OLROT, TINT16): 857 //case CASE(OLROT, TUINT16): 858 //case CASE(OLROT, TINT32): 859 //case CASE(OLROT, TUINT32): 860 //case CASE(OLROT, TPTR32): 861 //case CASE(OLROT, TINT64): 862 //case CASE(OLROT, TUINT64): 863 //case CASE(OLROT, TPTR64): 864 // a = 0//???; RLDC? 865 // break; 866 867 case OLSH_ | gc.TINT8, 868 OLSH_ | gc.TUINT8, 869 OLSH_ | gc.TINT16, 870 OLSH_ | gc.TUINT16, 871 OLSH_ | gc.TINT32, 872 OLSH_ | gc.TUINT32, 873 OLSH_ | gc.TPTR32, 874 OLSH_ | gc.TINT64, 875 OLSH_ | gc.TUINT64, 876 OLSH_ | gc.TPTR64: 877 a = arm64.ALSL 878 879 case ORSH_ | gc.TUINT8, 880 ORSH_ | gc.TUINT16, 881 ORSH_ | gc.TUINT32, 882 ORSH_ | gc.TPTR32, 883 ORSH_ | gc.TUINT64, 884 ORSH_ | gc.TPTR64: 885 a = arm64.ALSR 886 887 case ORSH_ | gc.TINT8, 888 ORSH_ | gc.TINT16, 889 ORSH_ | gc.TINT32, 890 ORSH_ | gc.TINT64: 891 a = arm64.AASR 892 893 // TODO(minux): handle rotates 894 //case CASE(ORROTC, TINT8): 895 //case CASE(ORROTC, TUINT8): 896 //case CASE(ORROTC, TINT16): 897 //case CASE(ORROTC, TUINT16): 898 //case CASE(ORROTC, TINT32): 899 //case CASE(ORROTC, TUINT32): 900 //case CASE(ORROTC, TINT64): 901 //case CASE(ORROTC, TUINT64): 902 // a = 0//??? RLDC?? 903 // break; 904 905 case OHMUL_ | gc.TINT64: 906 a = arm64.ASMULH 907 908 case OHMUL_ | gc.TUINT64, 909 OHMUL_ | gc.TPTR64: 910 a = arm64.AUMULH 911 912 case OMUL_ | gc.TINT8, 913 OMUL_ | gc.TINT16, 914 OMUL_ | gc.TINT32: 915 a = arm64.ASMULL 916 917 case OMUL_ | gc.TINT64: 918 a = arm64.AMUL 919 920 case OMUL_ | gc.TUINT8, 921 OMUL_ | gc.TUINT16, 922 OMUL_ | gc.TUINT32, 923 OMUL_ | gc.TPTR32: 924 // don't use word multiply, the high 32-bit are undefined. 925 a = arm64.AUMULL 926 927 case OMUL_ | gc.TUINT64, 928 OMUL_ | gc.TPTR64: 929 a = arm64.AMUL // for 64-bit multiplies, signedness doesn't matter. 930 931 case OMUL_ | gc.TFLOAT32: 932 a = arm64.AFMULS 933 934 case OMUL_ | gc.TFLOAT64: 935 a = arm64.AFMULD 936 937 case ODIV_ | gc.TINT8, 938 ODIV_ | gc.TINT16, 939 ODIV_ | gc.TINT32, 940 ODIV_ | gc.TINT64: 941 a = arm64.ASDIV 942 943 case ODIV_ | gc.TUINT8, 944 ODIV_ | gc.TUINT16, 945 ODIV_ | gc.TUINT32, 946 ODIV_ | gc.TPTR32, 947 ODIV_ | gc.TUINT64, 948 ODIV_ | gc.TPTR64: 949 a = arm64.AUDIV 950 951 case ODIV_ | gc.TFLOAT32: 952 a = arm64.AFDIVS 953 954 case ODIV_ | gc.TFLOAT64: 955 a = arm64.AFDIVD 956 957 case OSQRT_ | gc.TFLOAT64: 958 a = arm64.AFSQRTD 959 } 960 961 return a 962 } 963 964 const ( 965 ODynam = 1 << 0 966 OAddable = 1 << 1 967 ) 968 969 func xgen(n *gc.Node, a *gc.Node, o int) bool { 970 // TODO(minux) 971 972 return -1 != 0 /*TypeKind(100016)*/ 973 } 974 975 func sudoclean() { 976 return 977 } 978 979 /* 980 * generate code to compute address of n, 981 * a reference to a (perhaps nested) field inside 982 * an array or struct. 983 * return 0 on failure, 1 on success. 984 * on success, leaves usable address in a. 985 * 986 * caller is responsible for calling sudoclean 987 * after successful sudoaddable, 988 * to release the register used for a. 989 */ 990 func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool { 991 // TODO(minux) 992 993 *a = obj.Addr{} 994 return false 995 }