github.com/gocuntian/go@v0.0.0-20160610041250-fee02d270bf8/src/cmd/compile/internal/arm/gsubr.go (about) 1 // Derived from Inferno utils/5c/txt.c 2 // http://code.google.com/p/inferno-os/source/browse/utils/5c/txt.c 3 // 4 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. 5 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) 6 // Portions Copyright © 1997-1999 Vita Nuova Limited 7 // Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) 8 // Portions Copyright © 2004,2006 Bruce Ellis 9 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) 10 // Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others 11 // Portions Copyright © 2009 The Go Authors. All rights reserved. 12 // 13 // Permission is hereby granted, free of charge, to any person obtaining a copy 14 // of this software and associated documentation files (the "Software"), to deal 15 // in the Software without restriction, including without limitation the rights 16 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 17 // copies of the Software, and to permit persons to whom the Software is 18 // furnished to do so, subject to the following conditions: 19 // 20 // The above copyright notice and this permission notice shall be included in 21 // all copies or substantial portions of the Software. 22 // 23 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 24 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 25 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 26 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 27 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 28 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 29 // THE SOFTWARE. 30 31 package arm 32 33 import ( 34 "cmd/compile/internal/gc" 35 "cmd/internal/obj" 36 "cmd/internal/obj/arm" 37 "fmt" 38 ) 39 40 var resvd = []int{ 41 arm.REG_R9, // formerly reserved for m; might be okay to reuse now; not sure about NaCl 42 arm.REG_R10, // reserved for g 43 } 44 45 /* 46 * return constant i node. 47 * overwritten by next call, but useful in calls to gins. 48 */ 49 50 var ncon_n gc.Node 51 52 func ncon(i uint32) *gc.Node { 53 if ncon_n.Type == nil { 54 gc.Nodconst(&ncon_n, gc.Types[gc.TUINT32], 0) 55 } 56 ncon_n.SetInt(int64(i)) 57 return &ncon_n 58 } 59 60 var sclean [10]gc.Node 61 62 var nsclean int 63 64 /* 65 * n is a 64-bit value. fill in lo and hi to refer to its 32-bit halves. 66 */ 67 func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) { 68 if !gc.Is64(n.Type) { 69 gc.Fatalf("split64 %v", n.Type) 70 } 71 72 if nsclean >= len(sclean) { 73 gc.Fatalf("split64 clean") 74 } 75 sclean[nsclean].Op = gc.OEMPTY 76 nsclean++ 77 switch n.Op { 78 default: 79 switch n.Op { 80 default: 81 var n1 gc.Node 82 if !dotaddable(n, &n1) { 83 gc.Igen(n, &n1, nil) 84 sclean[nsclean-1] = n1 85 } 86 87 n = &n1 88 89 case gc.ONAME, gc.OINDREG: 90 // nothing 91 } 92 93 *lo = *n 94 *hi = *n 95 lo.Type = gc.Types[gc.TUINT32] 96 if n.Type.Etype == gc.TINT64 { 97 hi.Type = gc.Types[gc.TINT32] 98 } else { 99 hi.Type = gc.Types[gc.TUINT32] 100 } 101 hi.Xoffset += 4 102 103 case gc.OLITERAL: 104 var n1 gc.Node 105 n.Convconst(&n1, n.Type) 106 i := n1.Int64() 107 gc.Nodconst(lo, gc.Types[gc.TUINT32], int64(uint32(i))) 108 i >>= 32 109 if n.Type.Etype == gc.TINT64 { 110 gc.Nodconst(hi, gc.Types[gc.TINT32], int64(int32(i))) 111 } else { 112 gc.Nodconst(hi, gc.Types[gc.TUINT32], int64(uint32(i))) 113 } 114 } 115 } 116 117 func splitclean() { 118 if nsclean <= 0 { 119 gc.Fatalf("splitclean") 120 } 121 nsclean-- 122 if sclean[nsclean].Op != gc.OEMPTY { 123 gc.Regfree(&sclean[nsclean]) 124 } 125 } 126 127 func gmove(f *gc.Node, t *gc.Node) { 128 if gc.Debug['M'] != 0 { 129 fmt.Printf("gmove %v -> %v\n", f, t) 130 } 131 132 ft := gc.Simsimtype(f.Type) 133 tt := gc.Simsimtype(t.Type) 134 cvt := t.Type 135 136 if gc.Iscomplex[ft] || gc.Iscomplex[tt] { 137 gc.Complexmove(f, t) 138 return 139 } 140 141 // cannot have two memory operands; 142 // except 64-bit, which always copies via registers anyway. 143 var a obj.As 144 var r1 gc.Node 145 if !gc.Is64(f.Type) && !gc.Is64(t.Type) && gc.Ismem(f) && gc.Ismem(t) { 146 goto hard 147 } 148 149 // convert constant to desired type 150 if f.Op == gc.OLITERAL { 151 var con gc.Node 152 switch tt { 153 default: 154 f.Convconst(&con, t.Type) 155 156 case gc.TINT16, 157 gc.TINT8: 158 var con gc.Node 159 f.Convconst(&con, gc.Types[gc.TINT32]) 160 var r1 gc.Node 161 gc.Regalloc(&r1, con.Type, t) 162 gins(arm.AMOVW, &con, &r1) 163 gmove(&r1, t) 164 gc.Regfree(&r1) 165 return 166 167 case gc.TUINT16, 168 gc.TUINT8: 169 var con gc.Node 170 f.Convconst(&con, gc.Types[gc.TUINT32]) 171 var r1 gc.Node 172 gc.Regalloc(&r1, con.Type, t) 173 gins(arm.AMOVW, &con, &r1) 174 gmove(&r1, t) 175 gc.Regfree(&r1) 176 return 177 } 178 179 f = &con 180 ft = gc.Simsimtype(con.Type) 181 182 // constants can't move directly to memory 183 if gc.Ismem(t) && !gc.Is64(t.Type) { 184 goto hard 185 } 186 } 187 188 // value -> value copy, only one memory operand. 189 // figure out the instruction to use. 190 // break out of switch for one-instruction gins. 191 // goto rdst for "destination must be register". 192 // goto hard for "convert to cvt type first". 193 // otherwise handle and return. 194 195 switch uint32(ft)<<16 | uint32(tt) { 196 default: 197 // should not happen 198 gc.Fatalf("gmove %v -> %v", f, t) 199 return 200 201 /* 202 * integer copy and truncate 203 */ 204 case gc.TINT8<<16 | gc.TINT8: // same size 205 if !gc.Ismem(f) { 206 a = arm.AMOVB 207 break 208 } 209 fallthrough 210 211 case gc.TUINT8<<16 | gc.TINT8, 212 gc.TINT16<<16 | gc.TINT8, // truncate 213 gc.TUINT16<<16 | gc.TINT8, 214 gc.TINT32<<16 | gc.TINT8, 215 gc.TUINT32<<16 | gc.TINT8: 216 a = arm.AMOVBS 217 218 case gc.TUINT8<<16 | gc.TUINT8: 219 if !gc.Ismem(f) { 220 a = arm.AMOVB 221 break 222 } 223 fallthrough 224 225 case gc.TINT8<<16 | gc.TUINT8, 226 gc.TINT16<<16 | gc.TUINT8, 227 gc.TUINT16<<16 | gc.TUINT8, 228 gc.TINT32<<16 | gc.TUINT8, 229 gc.TUINT32<<16 | gc.TUINT8: 230 a = arm.AMOVBU 231 232 case gc.TINT64<<16 | gc.TINT8, // truncate low word 233 gc.TUINT64<<16 | gc.TINT8: 234 a = arm.AMOVBS 235 236 goto trunc64 237 238 case gc.TINT64<<16 | gc.TUINT8, 239 gc.TUINT64<<16 | gc.TUINT8: 240 a = arm.AMOVBU 241 goto trunc64 242 243 case gc.TINT16<<16 | gc.TINT16: // same size 244 if !gc.Ismem(f) { 245 a = arm.AMOVH 246 break 247 } 248 fallthrough 249 250 case gc.TUINT16<<16 | gc.TINT16, 251 gc.TINT32<<16 | gc.TINT16, // truncate 252 gc.TUINT32<<16 | gc.TINT16: 253 a = arm.AMOVHS 254 255 case gc.TUINT16<<16 | gc.TUINT16: 256 if !gc.Ismem(f) { 257 a = arm.AMOVH 258 break 259 } 260 fallthrough 261 262 case gc.TINT16<<16 | gc.TUINT16, 263 gc.TINT32<<16 | gc.TUINT16, 264 gc.TUINT32<<16 | gc.TUINT16: 265 a = arm.AMOVHU 266 267 case gc.TINT64<<16 | gc.TINT16, // truncate low word 268 gc.TUINT64<<16 | gc.TINT16: 269 a = arm.AMOVHS 270 271 goto trunc64 272 273 case gc.TINT64<<16 | gc.TUINT16, 274 gc.TUINT64<<16 | gc.TUINT16: 275 a = arm.AMOVHU 276 goto trunc64 277 278 case gc.TINT32<<16 | gc.TINT32, // same size 279 gc.TINT32<<16 | gc.TUINT32, 280 gc.TUINT32<<16 | gc.TINT32, 281 gc.TUINT32<<16 | gc.TUINT32: 282 a = arm.AMOVW 283 284 case gc.TINT64<<16 | gc.TINT32, // truncate 285 gc.TUINT64<<16 | gc.TINT32, 286 gc.TINT64<<16 | gc.TUINT32, 287 gc.TUINT64<<16 | gc.TUINT32: 288 var flo gc.Node 289 var fhi gc.Node 290 split64(f, &flo, &fhi) 291 292 var r1 gc.Node 293 gc.Regalloc(&r1, t.Type, nil) 294 gins(arm.AMOVW, &flo, &r1) 295 gins(arm.AMOVW, &r1, t) 296 gc.Regfree(&r1) 297 splitclean() 298 return 299 300 case gc.TINT64<<16 | gc.TINT64, // same size 301 gc.TINT64<<16 | gc.TUINT64, 302 gc.TUINT64<<16 | gc.TINT64, 303 gc.TUINT64<<16 | gc.TUINT64: 304 var fhi gc.Node 305 var flo gc.Node 306 split64(f, &flo, &fhi) 307 308 var tlo gc.Node 309 var thi gc.Node 310 split64(t, &tlo, &thi) 311 var r1 gc.Node 312 gc.Regalloc(&r1, flo.Type, nil) 313 var r2 gc.Node 314 gc.Regalloc(&r2, fhi.Type, nil) 315 gins(arm.AMOVW, &flo, &r1) 316 gins(arm.AMOVW, &fhi, &r2) 317 gins(arm.AMOVW, &r1, &tlo) 318 gins(arm.AMOVW, &r2, &thi) 319 gc.Regfree(&r1) 320 gc.Regfree(&r2) 321 splitclean() 322 splitclean() 323 return 324 325 /* 326 * integer up-conversions 327 */ 328 case gc.TINT8<<16 | gc.TINT16, // sign extend int8 329 gc.TINT8<<16 | gc.TUINT16, 330 gc.TINT8<<16 | gc.TINT32, 331 gc.TINT8<<16 | gc.TUINT32: 332 a = arm.AMOVBS 333 334 goto rdst 335 336 case gc.TINT8<<16 | gc.TINT64, // convert via int32 337 gc.TINT8<<16 | gc.TUINT64: 338 cvt = gc.Types[gc.TINT32] 339 340 goto hard 341 342 case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8 343 gc.TUINT8<<16 | gc.TUINT16, 344 gc.TUINT8<<16 | gc.TINT32, 345 gc.TUINT8<<16 | gc.TUINT32: 346 a = arm.AMOVBU 347 348 goto rdst 349 350 case gc.TUINT8<<16 | gc.TINT64, // convert via uint32 351 gc.TUINT8<<16 | gc.TUINT64: 352 cvt = gc.Types[gc.TUINT32] 353 354 goto hard 355 356 case gc.TINT16<<16 | gc.TINT32, // sign extend int16 357 gc.TINT16<<16 | gc.TUINT32: 358 a = arm.AMOVHS 359 360 goto rdst 361 362 case gc.TINT16<<16 | gc.TINT64, // convert via int32 363 gc.TINT16<<16 | gc.TUINT64: 364 cvt = gc.Types[gc.TINT32] 365 366 goto hard 367 368 case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16 369 gc.TUINT16<<16 | gc.TUINT32: 370 a = arm.AMOVHU 371 372 goto rdst 373 374 case gc.TUINT16<<16 | gc.TINT64, // convert via uint32 375 gc.TUINT16<<16 | gc.TUINT64: 376 cvt = gc.Types[gc.TUINT32] 377 378 goto hard 379 380 case gc.TINT32<<16 | gc.TINT64, // sign extend int32 381 gc.TINT32<<16 | gc.TUINT64: 382 var tlo gc.Node 383 var thi gc.Node 384 split64(t, &tlo, &thi) 385 386 var r1 gc.Node 387 gc.Regalloc(&r1, tlo.Type, nil) 388 var r2 gc.Node 389 gc.Regalloc(&r2, thi.Type, nil) 390 gmove(f, &r1) 391 p1 := gins(arm.AMOVW, &r1, &r2) 392 p1.From.Type = obj.TYPE_SHIFT 393 p1.From.Offset = 2<<5 | 31<<7 | int64(r1.Reg)&15 // r1->31 394 p1.From.Reg = 0 395 396 //print("gmove: %v\n", p1); 397 gins(arm.AMOVW, &r1, &tlo) 398 399 gins(arm.AMOVW, &r2, &thi) 400 gc.Regfree(&r1) 401 gc.Regfree(&r2) 402 splitclean() 403 return 404 405 case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32 406 gc.TUINT32<<16 | gc.TUINT64: 407 var thi gc.Node 408 var tlo gc.Node 409 split64(t, &tlo, &thi) 410 411 gmove(f, &tlo) 412 var r1 gc.Node 413 gc.Regalloc(&r1, thi.Type, nil) 414 gins(arm.AMOVW, ncon(0), &r1) 415 gins(arm.AMOVW, &r1, &thi) 416 gc.Regfree(&r1) 417 splitclean() 418 return 419 420 // case CASE(TFLOAT64, TUINT64): 421 /* 422 * float to integer 423 */ 424 case gc.TFLOAT32<<16 | gc.TINT8, 425 gc.TFLOAT32<<16 | gc.TUINT8, 426 gc.TFLOAT32<<16 | gc.TINT16, 427 gc.TFLOAT32<<16 | gc.TUINT16, 428 gc.TFLOAT32<<16 | gc.TINT32, 429 gc.TFLOAT32<<16 | gc.TUINT32, 430 431 // case CASE(TFLOAT32, TUINT64): 432 433 gc.TFLOAT64<<16 | gc.TINT8, 434 gc.TFLOAT64<<16 | gc.TUINT8, 435 gc.TFLOAT64<<16 | gc.TINT16, 436 gc.TFLOAT64<<16 | gc.TUINT16, 437 gc.TFLOAT64<<16 | gc.TINT32, 438 gc.TFLOAT64<<16 | gc.TUINT32: 439 fa := arm.AMOVF 440 441 a := arm.AMOVFW 442 if ft == gc.TFLOAT64 { 443 fa = arm.AMOVD 444 a = arm.AMOVDW 445 } 446 447 ta := arm.AMOVW 448 switch tt { 449 case gc.TINT8: 450 ta = arm.AMOVBS 451 452 case gc.TUINT8: 453 ta = arm.AMOVBU 454 455 case gc.TINT16: 456 ta = arm.AMOVHS 457 458 case gc.TUINT16: 459 ta = arm.AMOVHU 460 } 461 462 var r1 gc.Node 463 gc.Regalloc(&r1, gc.Types[ft], f) 464 var r2 gc.Node 465 gc.Regalloc(&r2, gc.Types[tt], t) 466 gins(fa, f, &r1) // load to fpu 467 p1 := gins(a, &r1, &r1) // convert to w 468 switch tt { 469 case gc.TUINT8, 470 gc.TUINT16, 471 gc.TUINT32: 472 p1.Scond |= arm.C_UBIT 473 } 474 475 gins(arm.AMOVW, &r1, &r2) // copy to cpu 476 gins(ta, &r2, t) // store 477 gc.Regfree(&r1) 478 gc.Regfree(&r2) 479 return 480 481 /* 482 * integer to float 483 */ 484 case gc.TINT8<<16 | gc.TFLOAT32, 485 gc.TUINT8<<16 | gc.TFLOAT32, 486 gc.TINT16<<16 | gc.TFLOAT32, 487 gc.TUINT16<<16 | gc.TFLOAT32, 488 gc.TINT32<<16 | gc.TFLOAT32, 489 gc.TUINT32<<16 | gc.TFLOAT32, 490 gc.TINT8<<16 | gc.TFLOAT64, 491 gc.TUINT8<<16 | gc.TFLOAT64, 492 gc.TINT16<<16 | gc.TFLOAT64, 493 gc.TUINT16<<16 | gc.TFLOAT64, 494 gc.TINT32<<16 | gc.TFLOAT64, 495 gc.TUINT32<<16 | gc.TFLOAT64: 496 fa := arm.AMOVW 497 498 switch ft { 499 case gc.TINT8: 500 fa = arm.AMOVBS 501 502 case gc.TUINT8: 503 fa = arm.AMOVBU 504 505 case gc.TINT16: 506 fa = arm.AMOVHS 507 508 case gc.TUINT16: 509 fa = arm.AMOVHU 510 } 511 512 a := arm.AMOVWF 513 ta := arm.AMOVF 514 if tt == gc.TFLOAT64 { 515 a = arm.AMOVWD 516 ta = arm.AMOVD 517 } 518 519 var r1 gc.Node 520 gc.Regalloc(&r1, gc.Types[ft], f) 521 var r2 gc.Node 522 gc.Regalloc(&r2, gc.Types[tt], t) 523 gins(fa, f, &r1) // load to cpu 524 gins(arm.AMOVW, &r1, &r2) // copy to fpu 525 p1 := gins(a, &r2, &r2) // convert 526 switch ft { 527 case gc.TUINT8, 528 gc.TUINT16, 529 gc.TUINT32: 530 p1.Scond |= arm.C_UBIT 531 } 532 533 gins(ta, &r2, t) // store 534 gc.Regfree(&r1) 535 gc.Regfree(&r2) 536 return 537 538 case gc.TUINT64<<16 | gc.TFLOAT32, 539 gc.TUINT64<<16 | gc.TFLOAT64: 540 gc.Fatalf("gmove UINT64, TFLOAT not implemented") 541 return 542 543 /* 544 * float to float 545 */ 546 case gc.TFLOAT32<<16 | gc.TFLOAT32: 547 a = arm.AMOVF 548 549 case gc.TFLOAT64<<16 | gc.TFLOAT64: 550 a = arm.AMOVD 551 552 case gc.TFLOAT32<<16 | gc.TFLOAT64: 553 var r1 gc.Node 554 gc.Regalloc(&r1, gc.Types[gc.TFLOAT64], t) 555 gins(arm.AMOVF, f, &r1) 556 gins(arm.AMOVFD, &r1, &r1) 557 gins(arm.AMOVD, &r1, t) 558 gc.Regfree(&r1) 559 return 560 561 case gc.TFLOAT64<<16 | gc.TFLOAT32: 562 var r1 gc.Node 563 gc.Regalloc(&r1, gc.Types[gc.TFLOAT64], t) 564 gins(arm.AMOVD, f, &r1) 565 gins(arm.AMOVDF, &r1, &r1) 566 gins(arm.AMOVF, &r1, t) 567 gc.Regfree(&r1) 568 return 569 } 570 571 gins(a, f, t) 572 return 573 574 // TODO(kaib): we almost always require a register dest anyway, this can probably be 575 // removed. 576 // requires register destination 577 rdst: 578 { 579 gc.Regalloc(&r1, t.Type, t) 580 581 gins(a, f, &r1) 582 gmove(&r1, t) 583 gc.Regfree(&r1) 584 return 585 } 586 587 // requires register intermediate 588 hard: 589 gc.Regalloc(&r1, cvt, t) 590 591 gmove(f, &r1) 592 gmove(&r1, t) 593 gc.Regfree(&r1) 594 return 595 596 // truncate 64 bit integer 597 trunc64: 598 var fhi gc.Node 599 var flo gc.Node 600 split64(f, &flo, &fhi) 601 602 gc.Regalloc(&r1, t.Type, nil) 603 gins(a, &flo, &r1) 604 gins(a, &r1, t) 605 gc.Regfree(&r1) 606 splitclean() 607 return 608 } 609 610 func samaddr(f *gc.Node, t *gc.Node) bool { 611 if f.Op != t.Op { 612 return false 613 } 614 615 switch f.Op { 616 case gc.OREGISTER: 617 if f.Reg != t.Reg { 618 break 619 } 620 return true 621 } 622 623 return false 624 } 625 626 /* 627 * generate one instruction: 628 * as f, t 629 */ 630 func gins(as obj.As, f *gc.Node, t *gc.Node) *obj.Prog { 631 // Node nod; 632 // int32 v; 633 634 if f != nil && f.Op == gc.OINDEX { 635 gc.Fatalf("gins OINDEX not implemented") 636 } 637 638 // gc.Regalloc(&nod, ®node, Z); 639 // v = constnode.vconst; 640 // gc.Cgen(f->right, &nod); 641 // constnode.vconst = v; 642 // idx.reg = nod.reg; 643 // gc.Regfree(&nod); 644 if t != nil && t.Op == gc.OINDEX { 645 gc.Fatalf("gins OINDEX not implemented") 646 } 647 648 // gc.Regalloc(&nod, ®node, Z); 649 // v = constnode.vconst; 650 // gc.Cgen(t->right, &nod); 651 // constnode.vconst = v; 652 // idx.reg = nod.reg; 653 // gc.Regfree(&nod); 654 655 p := gc.Prog(as) 656 gc.Naddr(&p.From, f) 657 gc.Naddr(&p.To, t) 658 659 switch as { 660 case arm.ABL: 661 if p.To.Type == obj.TYPE_REG { 662 p.To.Type = obj.TYPE_MEM 663 } 664 665 case arm.ACMP, arm.ACMPF, arm.ACMPD: 666 if t != nil { 667 if f.Op != gc.OREGISTER { 668 /* generate a comparison 669 TODO(kaib): one of the args can actually be a small constant. relax the constraint and fix call sites. 670 */ 671 gc.Fatalf("bad operands to gcmp") 672 } 673 p.From = p.To 674 p.To = obj.Addr{} 675 raddr(f, p) 676 } 677 678 case arm.AMULU: 679 if f != nil && f.Op != gc.OREGISTER { 680 gc.Fatalf("bad operands to mul") 681 } 682 683 case arm.AMOVW: 684 if (p.From.Type == obj.TYPE_MEM || p.From.Type == obj.TYPE_ADDR || p.From.Type == obj.TYPE_CONST) && (p.To.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_ADDR) { 685 gc.Fatalf("gins double memory") 686 } 687 688 case arm.AADD: 689 if p.To.Type == obj.TYPE_MEM { 690 gc.Fatalf("gins arith to mem") 691 } 692 693 case arm.ARSB: 694 if p.From.Type == obj.TYPE_NONE { 695 gc.Fatalf("rsb with no from") 696 } 697 } 698 699 if gc.Debug['g'] != 0 { 700 fmt.Printf("%v\n", p) 701 } 702 return p 703 } 704 705 /* 706 * insert n into reg slot of p 707 */ 708 func raddr(n *gc.Node, p *obj.Prog) { 709 var a obj.Addr 710 gc.Naddr(&a, n) 711 if a.Type != obj.TYPE_REG { 712 if n != nil { 713 gc.Fatalf("bad in raddr: %v", n.Op) 714 } else { 715 gc.Fatalf("bad in raddr: <null>") 716 } 717 p.Reg = 0 718 } else { 719 p.Reg = a.Reg 720 } 721 } 722 723 /* generate a constant shift 724 * arm encodes a shift by 32 as 0, thus asking for 0 shift is illegal. 725 */ 726 func gshift(as obj.As, lhs *gc.Node, stype int32, sval int32, rhs *gc.Node) *obj.Prog { 727 if sval <= 0 || sval > 32 { 728 gc.Fatalf("bad shift value: %d", sval) 729 } 730 731 sval = sval & 0x1f 732 733 p := gins(as, nil, rhs) 734 p.From.Type = obj.TYPE_SHIFT 735 p.From.Offset = int64(stype) | int64(sval)<<7 | int64(lhs.Reg)&15 736 return p 737 } 738 739 /* generate a register shift 740 */ 741 func gregshift(as obj.As, lhs *gc.Node, stype int32, reg *gc.Node, rhs *gc.Node) *obj.Prog { 742 p := gins(as, nil, rhs) 743 p.From.Type = obj.TYPE_SHIFT 744 p.From.Offset = int64(stype) | (int64(reg.Reg)&15)<<8 | 1<<4 | int64(lhs.Reg)&15 745 return p 746 } 747 748 /* 749 * return Axxx for Oxxx on type t. 750 */ 751 func optoas(op gc.Op, t *gc.Type) obj.As { 752 if t == nil { 753 gc.Fatalf("optoas: t is nil") 754 } 755 756 // avoid constant conversions in switches below 757 const ( 758 OMINUS_ = uint32(gc.OMINUS) << 16 759 OLSH_ = uint32(gc.OLSH) << 16 760 ORSH_ = uint32(gc.ORSH) << 16 761 OADD_ = uint32(gc.OADD) << 16 762 OSUB_ = uint32(gc.OSUB) << 16 763 OMUL_ = uint32(gc.OMUL) << 16 764 ODIV_ = uint32(gc.ODIV) << 16 765 OMOD_ = uint32(gc.OMOD) << 16 766 OOR_ = uint32(gc.OOR) << 16 767 OAND_ = uint32(gc.OAND) << 16 768 OXOR_ = uint32(gc.OXOR) << 16 769 OEQ_ = uint32(gc.OEQ) << 16 770 ONE_ = uint32(gc.ONE) << 16 771 OLT_ = uint32(gc.OLT) << 16 772 OLE_ = uint32(gc.OLE) << 16 773 OGE_ = uint32(gc.OGE) << 16 774 OGT_ = uint32(gc.OGT) << 16 775 OCMP_ = uint32(gc.OCMP) << 16 776 OPS_ = uint32(gc.OPS) << 16 777 OAS_ = uint32(gc.OAS) << 16 778 OSQRT_ = uint32(gc.OSQRT) << 16 779 ) 780 781 a := obj.AXXX 782 switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) { 783 default: 784 gc.Fatalf("optoas: no entry %v-%v etype %v simtype %v", op, t, gc.Types[t.Etype], gc.Types[gc.Simtype[t.Etype]]) 785 786 /* case CASE(OADDR, TPTR32): 787 a = ALEAL; 788 break; 789 790 case CASE(OADDR, TPTR64): 791 a = ALEAQ; 792 break; 793 */ 794 // TODO(kaib): make sure the conditional branches work on all edge cases 795 case OEQ_ | gc.TBOOL, 796 OEQ_ | gc.TINT8, 797 OEQ_ | gc.TUINT8, 798 OEQ_ | gc.TINT16, 799 OEQ_ | gc.TUINT16, 800 OEQ_ | gc.TINT32, 801 OEQ_ | gc.TUINT32, 802 OEQ_ | gc.TINT64, 803 OEQ_ | gc.TUINT64, 804 OEQ_ | gc.TPTR32, 805 OEQ_ | gc.TPTR64, 806 OEQ_ | gc.TFLOAT32, 807 OEQ_ | gc.TFLOAT64: 808 a = arm.ABEQ 809 810 case ONE_ | gc.TBOOL, 811 ONE_ | gc.TINT8, 812 ONE_ | gc.TUINT8, 813 ONE_ | gc.TINT16, 814 ONE_ | gc.TUINT16, 815 ONE_ | gc.TINT32, 816 ONE_ | gc.TUINT32, 817 ONE_ | gc.TINT64, 818 ONE_ | gc.TUINT64, 819 ONE_ | gc.TPTR32, 820 ONE_ | gc.TPTR64, 821 ONE_ | gc.TFLOAT32, 822 ONE_ | gc.TFLOAT64: 823 a = arm.ABNE 824 825 case OLT_ | gc.TINT8, 826 OLT_ | gc.TINT16, 827 OLT_ | gc.TINT32, 828 OLT_ | gc.TINT64, 829 OLT_ | gc.TFLOAT32, 830 OLT_ | gc.TFLOAT64: 831 a = arm.ABLT 832 833 case OLT_ | gc.TUINT8, 834 OLT_ | gc.TUINT16, 835 OLT_ | gc.TUINT32, 836 OLT_ | gc.TUINT64: 837 a = arm.ABLO 838 839 case OLE_ | gc.TINT8, 840 OLE_ | gc.TINT16, 841 OLE_ | gc.TINT32, 842 OLE_ | gc.TINT64, 843 OLE_ | gc.TFLOAT32, 844 OLE_ | gc.TFLOAT64: 845 a = arm.ABLE 846 847 case OLE_ | gc.TUINT8, 848 OLE_ | gc.TUINT16, 849 OLE_ | gc.TUINT32, 850 OLE_ | gc.TUINT64: 851 a = arm.ABLS 852 853 case OGT_ | gc.TINT8, 854 OGT_ | gc.TINT16, 855 OGT_ | gc.TINT32, 856 OGT_ | gc.TINT64, 857 OGT_ | gc.TFLOAT32, 858 OGT_ | gc.TFLOAT64: 859 a = arm.ABGT 860 861 case OGT_ | gc.TUINT8, 862 OGT_ | gc.TUINT16, 863 OGT_ | gc.TUINT32, 864 OGT_ | gc.TUINT64: 865 a = arm.ABHI 866 867 case OGE_ | gc.TINT8, 868 OGE_ | gc.TINT16, 869 OGE_ | gc.TINT32, 870 OGE_ | gc.TINT64, 871 OGE_ | gc.TFLOAT32, 872 OGE_ | gc.TFLOAT64: 873 a = arm.ABGE 874 875 case OGE_ | gc.TUINT8, 876 OGE_ | gc.TUINT16, 877 OGE_ | gc.TUINT32, 878 OGE_ | gc.TUINT64: 879 a = arm.ABHS 880 881 case OCMP_ | gc.TBOOL, 882 OCMP_ | gc.TINT8, 883 OCMP_ | gc.TUINT8, 884 OCMP_ | gc.TINT16, 885 OCMP_ | gc.TUINT16, 886 OCMP_ | gc.TINT32, 887 OCMP_ | gc.TUINT32, 888 OCMP_ | gc.TPTR32: 889 a = arm.ACMP 890 891 case OCMP_ | gc.TFLOAT32: 892 a = arm.ACMPF 893 894 case OCMP_ | gc.TFLOAT64: 895 a = arm.ACMPD 896 897 case OPS_ | gc.TFLOAT32, 898 OPS_ | gc.TFLOAT64: 899 a = arm.ABVS 900 901 case OAS_ | gc.TBOOL: 902 a = arm.AMOVB 903 904 case OAS_ | gc.TINT8: 905 a = arm.AMOVBS 906 907 case OAS_ | gc.TUINT8: 908 a = arm.AMOVBU 909 910 case OAS_ | gc.TINT16: 911 a = arm.AMOVHS 912 913 case OAS_ | gc.TUINT16: 914 a = arm.AMOVHU 915 916 case OAS_ | gc.TINT32, 917 OAS_ | gc.TUINT32, 918 OAS_ | gc.TPTR32: 919 a = arm.AMOVW 920 921 case OAS_ | gc.TFLOAT32: 922 a = arm.AMOVF 923 924 case OAS_ | gc.TFLOAT64: 925 a = arm.AMOVD 926 927 case OADD_ | gc.TINT8, 928 OADD_ | gc.TUINT8, 929 OADD_ | gc.TINT16, 930 OADD_ | gc.TUINT16, 931 OADD_ | gc.TINT32, 932 OADD_ | gc.TUINT32, 933 OADD_ | gc.TPTR32: 934 a = arm.AADD 935 936 case OADD_ | gc.TFLOAT32: 937 a = arm.AADDF 938 939 case OADD_ | gc.TFLOAT64: 940 a = arm.AADDD 941 942 case OSUB_ | gc.TINT8, 943 OSUB_ | gc.TUINT8, 944 OSUB_ | gc.TINT16, 945 OSUB_ | gc.TUINT16, 946 OSUB_ | gc.TINT32, 947 OSUB_ | gc.TUINT32, 948 OSUB_ | gc.TPTR32: 949 a = arm.ASUB 950 951 case OSUB_ | gc.TFLOAT32: 952 a = arm.ASUBF 953 954 case OSUB_ | gc.TFLOAT64: 955 a = arm.ASUBD 956 957 case OMINUS_ | gc.TINT8, 958 OMINUS_ | gc.TUINT8, 959 OMINUS_ | gc.TINT16, 960 OMINUS_ | gc.TUINT16, 961 OMINUS_ | gc.TINT32, 962 OMINUS_ | gc.TUINT32, 963 OMINUS_ | gc.TPTR32: 964 a = arm.ARSB 965 966 case OAND_ | gc.TINT8, 967 OAND_ | gc.TUINT8, 968 OAND_ | gc.TINT16, 969 OAND_ | gc.TUINT16, 970 OAND_ | gc.TINT32, 971 OAND_ | gc.TUINT32, 972 OAND_ | gc.TPTR32: 973 a = arm.AAND 974 975 case OOR_ | gc.TINT8, 976 OOR_ | gc.TUINT8, 977 OOR_ | gc.TINT16, 978 OOR_ | gc.TUINT16, 979 OOR_ | gc.TINT32, 980 OOR_ | gc.TUINT32, 981 OOR_ | gc.TPTR32: 982 a = arm.AORR 983 984 case OXOR_ | gc.TINT8, 985 OXOR_ | gc.TUINT8, 986 OXOR_ | gc.TINT16, 987 OXOR_ | gc.TUINT16, 988 OXOR_ | gc.TINT32, 989 OXOR_ | gc.TUINT32, 990 OXOR_ | gc.TPTR32: 991 a = arm.AEOR 992 993 case OLSH_ | gc.TINT8, 994 OLSH_ | gc.TUINT8, 995 OLSH_ | gc.TINT16, 996 OLSH_ | gc.TUINT16, 997 OLSH_ | gc.TINT32, 998 OLSH_ | gc.TUINT32, 999 OLSH_ | gc.TPTR32: 1000 a = arm.ASLL 1001 1002 case ORSH_ | gc.TUINT8, 1003 ORSH_ | gc.TUINT16, 1004 ORSH_ | gc.TUINT32, 1005 ORSH_ | gc.TPTR32: 1006 a = arm.ASRL 1007 1008 case ORSH_ | gc.TINT8, 1009 ORSH_ | gc.TINT16, 1010 ORSH_ | gc.TINT32: 1011 a = arm.ASRA 1012 1013 case OMUL_ | gc.TUINT8, 1014 OMUL_ | gc.TUINT16, 1015 OMUL_ | gc.TUINT32, 1016 OMUL_ | gc.TPTR32: 1017 a = arm.AMULU 1018 1019 case OMUL_ | gc.TINT8, 1020 OMUL_ | gc.TINT16, 1021 OMUL_ | gc.TINT32: 1022 a = arm.AMUL 1023 1024 case OMUL_ | gc.TFLOAT32: 1025 a = arm.AMULF 1026 1027 case OMUL_ | gc.TFLOAT64: 1028 a = arm.AMULD 1029 1030 case ODIV_ | gc.TUINT8, 1031 ODIV_ | gc.TUINT16, 1032 ODIV_ | gc.TUINT32, 1033 ODIV_ | gc.TPTR32: 1034 a = arm.ADIVU 1035 1036 case ODIV_ | gc.TINT8, 1037 ODIV_ | gc.TINT16, 1038 ODIV_ | gc.TINT32: 1039 a = arm.ADIV 1040 1041 case OMOD_ | gc.TUINT8, 1042 OMOD_ | gc.TUINT16, 1043 OMOD_ | gc.TUINT32, 1044 OMOD_ | gc.TPTR32: 1045 a = arm.AMODU 1046 1047 case OMOD_ | gc.TINT8, 1048 OMOD_ | gc.TINT16, 1049 OMOD_ | gc.TINT32: 1050 a = arm.AMOD 1051 1052 // case CASE(OEXTEND, TINT16): 1053 // a = ACWD; 1054 // break; 1055 1056 // case CASE(OEXTEND, TINT32): 1057 // a = ACDQ; 1058 // break; 1059 1060 // case CASE(OEXTEND, TINT64): 1061 // a = ACQO; 1062 // break; 1063 1064 case ODIV_ | gc.TFLOAT32: 1065 a = arm.ADIVF 1066 1067 case ODIV_ | gc.TFLOAT64: 1068 a = arm.ADIVD 1069 1070 case OSQRT_ | gc.TFLOAT64: 1071 a = arm.ASQRTD 1072 } 1073 1074 return a 1075 } 1076 1077 const ( 1078 ODynam = 1 << 0 1079 OPtrto = 1 << 1 1080 ) 1081 1082 var clean [20]gc.Node 1083 1084 var cleani int = 0 1085 1086 func sudoclean() { 1087 if clean[cleani-1].Op != gc.OEMPTY { 1088 gc.Regfree(&clean[cleani-1]) 1089 } 1090 if clean[cleani-2].Op != gc.OEMPTY { 1091 gc.Regfree(&clean[cleani-2]) 1092 } 1093 cleani -= 2 1094 } 1095 1096 func dotaddable(n *gc.Node, n1 *gc.Node) bool { 1097 if n.Op != gc.ODOT { 1098 return false 1099 } 1100 1101 var oary [10]int64 1102 var nn *gc.Node 1103 o := gc.Dotoffset(n, oary[:], &nn) 1104 if nn != nil && nn.Addable && o == 1 && oary[0] >= 0 { 1105 *n1 = *nn 1106 n1.Type = n.Type 1107 n1.Xoffset += oary[0] 1108 return true 1109 } 1110 1111 return false 1112 } 1113 1114 /* 1115 * generate code to compute address of n, 1116 * a reference to a (perhaps nested) field inside 1117 * an array or struct. 1118 * return 0 on failure, 1 on success. 1119 * on success, leaves usable address in a. 1120 * 1121 * caller is responsible for calling sudoclean 1122 * after successful sudoaddable, 1123 * to release the register used for a. 1124 */ 1125 func sudoaddable(as obj.As, n *gc.Node, a *obj.Addr) bool { 1126 if n.Type == nil { 1127 return false 1128 } 1129 1130 *a = obj.Addr{} 1131 1132 switch n.Op { 1133 case gc.OLITERAL: 1134 if !gc.Isconst(n, gc.CTINT) { 1135 break 1136 } 1137 v := n.Int64() 1138 if v >= 32000 || v <= -32000 { 1139 break 1140 } 1141 switch as { 1142 default: 1143 return false 1144 1145 case arm.AADD, 1146 arm.ASUB, 1147 arm.AAND, 1148 arm.AORR, 1149 arm.AEOR, 1150 arm.AMOVB, 1151 arm.AMOVBS, 1152 arm.AMOVBU, 1153 arm.AMOVH, 1154 arm.AMOVHS, 1155 arm.AMOVHU, 1156 arm.AMOVW: 1157 break 1158 } 1159 1160 cleani += 2 1161 reg := &clean[cleani-1] 1162 reg1 := &clean[cleani-2] 1163 reg.Op = gc.OEMPTY 1164 reg1.Op = gc.OEMPTY 1165 gc.Naddr(a, n) 1166 return true 1167 1168 case gc.ODOT, 1169 gc.ODOTPTR: 1170 cleani += 2 1171 reg := &clean[cleani-1] 1172 reg1 := &clean[cleani-2] 1173 reg.Op = gc.OEMPTY 1174 reg1.Op = gc.OEMPTY 1175 var nn *gc.Node 1176 var oary [10]int64 1177 o := gc.Dotoffset(n, oary[:], &nn) 1178 if nn == nil { 1179 sudoclean() 1180 return false 1181 } 1182 1183 if nn.Addable && o == 1 && oary[0] >= 0 { 1184 // directly addressable set of DOTs 1185 n1 := *nn 1186 1187 n1.Type = n.Type 1188 n1.Xoffset += oary[0] 1189 gc.Naddr(a, &n1) 1190 return true 1191 } 1192 1193 gc.Regalloc(reg, gc.Types[gc.Tptr], nil) 1194 n1 := *reg 1195 n1.Op = gc.OINDREG 1196 if oary[0] >= 0 { 1197 gc.Agen(nn, reg) 1198 n1.Xoffset = oary[0] 1199 } else { 1200 gc.Cgen(nn, reg) 1201 gc.Cgen_checknil(reg) 1202 n1.Xoffset = -(oary[0] + 1) 1203 } 1204 1205 for i := 1; i < o; i++ { 1206 if oary[i] >= 0 { 1207 gc.Fatalf("can't happen") 1208 } 1209 gins(arm.AMOVW, &n1, reg) 1210 gc.Cgen_checknil(reg) 1211 n1.Xoffset = -(oary[i] + 1) 1212 } 1213 1214 a.Type = obj.TYPE_NONE 1215 a.Name = obj.NAME_NONE 1216 n1.Type = n.Type 1217 gc.Naddr(a, &n1) 1218 return true 1219 1220 case gc.OINDEX: 1221 return false 1222 } 1223 1224 return false 1225 }