github.com/sean-/go@v0.0.0-20151219100004-97f854cd7bb6/src/cmd/compile/internal/arm/gsubr.go (about) 1 // Derived from Inferno utils/5c/txt.c 2 // http://code.google.com/p/inferno-os/source/browse/utils/5c/txt.c 3 // 4 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. 5 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) 6 // Portions Copyright © 1997-1999 Vita Nuova Limited 7 // Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) 8 // Portions Copyright © 2004,2006 Bruce Ellis 9 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) 10 // Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others 11 // Portions Copyright © 2009 The Go Authors. All rights reserved. 12 // 13 // Permission is hereby granted, free of charge, to any person obtaining a copy 14 // of this software and associated documentation files (the "Software"), to deal 15 // in the Software without restriction, including without limitation the rights 16 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 17 // copies of the Software, and to permit persons to whom the Software is 18 // furnished to do so, subject to the following conditions: 19 // 20 // The above copyright notice and this permission notice shall be included in 21 // all copies or substantial portions of the Software. 22 // 23 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 24 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 25 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 26 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 27 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 28 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 29 // THE SOFTWARE. 30 31 package arm 32 33 import ( 34 "cmd/compile/internal/gc" 35 "cmd/internal/obj" 36 "cmd/internal/obj/arm" 37 "fmt" 38 ) 39 40 var resvd = []int{ 41 arm.REG_R9, // formerly reserved for m; might be okay to reuse now; not sure about NaCl 42 arm.REG_R10, // reserved for g 43 } 44 45 /* 46 * return constant i node. 47 * overwritten by next call, but useful in calls to gins. 48 */ 49 50 var ncon_n gc.Node 51 52 func ncon(i uint32) *gc.Node { 53 if ncon_n.Type == nil { 54 gc.Nodconst(&ncon_n, gc.Types[gc.TUINT32], 0) 55 } 56 ncon_n.SetInt(int64(i)) 57 return &ncon_n 58 } 59 60 var sclean [10]gc.Node 61 62 var nsclean int 63 64 /* 65 * n is a 64-bit value. fill in lo and hi to refer to its 32-bit halves. 66 */ 67 func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) { 68 if !gc.Is64(n.Type) { 69 gc.Fatalf("split64 %v", n.Type) 70 } 71 72 if nsclean >= len(sclean) { 73 gc.Fatalf("split64 clean") 74 } 75 sclean[nsclean].Op = gc.OEMPTY 76 nsclean++ 77 switch n.Op { 78 default: 79 switch n.Op { 80 default: 81 var n1 gc.Node 82 if !dotaddable(n, &n1) { 83 gc.Igen(n, &n1, nil) 84 sclean[nsclean-1] = n1 85 } 86 87 n = &n1 88 89 case gc.ONAME: 90 if n.Class == gc.PPARAMREF { 91 var n1 gc.Node 92 gc.Cgen(n.Name.Heapaddr, &n1) 93 sclean[nsclean-1] = n1 94 n = &n1 95 } 96 97 // nothing 98 case gc.OINDREG: 99 break 100 } 101 102 *lo = *n 103 *hi = *n 104 lo.Type = gc.Types[gc.TUINT32] 105 if n.Type.Etype == gc.TINT64 { 106 hi.Type = gc.Types[gc.TINT32] 107 } else { 108 hi.Type = gc.Types[gc.TUINT32] 109 } 110 hi.Xoffset += 4 111 112 case gc.OLITERAL: 113 var n1 gc.Node 114 n.Convconst(&n1, n.Type) 115 i := n1.Int() 116 gc.Nodconst(lo, gc.Types[gc.TUINT32], int64(uint32(i))) 117 i >>= 32 118 if n.Type.Etype == gc.TINT64 { 119 gc.Nodconst(hi, gc.Types[gc.TINT32], int64(int32(i))) 120 } else { 121 gc.Nodconst(hi, gc.Types[gc.TUINT32], int64(uint32(i))) 122 } 123 } 124 } 125 126 func splitclean() { 127 if nsclean <= 0 { 128 gc.Fatalf("splitclean") 129 } 130 nsclean-- 131 if sclean[nsclean].Op != gc.OEMPTY { 132 gc.Regfree(&sclean[nsclean]) 133 } 134 } 135 136 func gmove(f *gc.Node, t *gc.Node) { 137 if gc.Debug['M'] != 0 { 138 fmt.Printf("gmove %v -> %v\n", f, t) 139 } 140 141 ft := gc.Simsimtype(f.Type) 142 tt := gc.Simsimtype(t.Type) 143 cvt := t.Type 144 145 if gc.Iscomplex[ft] || gc.Iscomplex[tt] { 146 gc.Complexmove(f, t) 147 return 148 } 149 150 // cannot have two memory operands; 151 // except 64-bit, which always copies via registers anyway. 152 var a int 153 var r1 gc.Node 154 if !gc.Is64(f.Type) && !gc.Is64(t.Type) && gc.Ismem(f) && gc.Ismem(t) { 155 goto hard 156 } 157 158 // convert constant to desired type 159 if f.Op == gc.OLITERAL { 160 var con gc.Node 161 switch tt { 162 default: 163 f.Convconst(&con, t.Type) 164 165 case gc.TINT16, 166 gc.TINT8: 167 var con gc.Node 168 f.Convconst(&con, gc.Types[gc.TINT32]) 169 var r1 gc.Node 170 gc.Regalloc(&r1, con.Type, t) 171 gins(arm.AMOVW, &con, &r1) 172 gmove(&r1, t) 173 gc.Regfree(&r1) 174 return 175 176 case gc.TUINT16, 177 gc.TUINT8: 178 var con gc.Node 179 f.Convconst(&con, gc.Types[gc.TUINT32]) 180 var r1 gc.Node 181 gc.Regalloc(&r1, con.Type, t) 182 gins(arm.AMOVW, &con, &r1) 183 gmove(&r1, t) 184 gc.Regfree(&r1) 185 return 186 } 187 188 f = &con 189 ft = gc.Simsimtype(con.Type) 190 191 // constants can't move directly to memory 192 if gc.Ismem(t) && !gc.Is64(t.Type) { 193 goto hard 194 } 195 } 196 197 // value -> value copy, only one memory operand. 198 // figure out the instruction to use. 199 // break out of switch for one-instruction gins. 200 // goto rdst for "destination must be register". 201 // goto hard for "convert to cvt type first". 202 // otherwise handle and return. 203 204 switch uint32(ft)<<16 | uint32(tt) { 205 default: 206 // should not happen 207 gc.Fatalf("gmove %v -> %v", f, t) 208 return 209 210 /* 211 * integer copy and truncate 212 */ 213 case gc.TINT8<<16 | gc.TINT8: // same size 214 if !gc.Ismem(f) { 215 a = arm.AMOVB 216 break 217 } 218 fallthrough 219 220 case gc.TUINT8<<16 | gc.TINT8, 221 gc.TINT16<<16 | gc.TINT8, // truncate 222 gc.TUINT16<<16 | gc.TINT8, 223 gc.TINT32<<16 | gc.TINT8, 224 gc.TUINT32<<16 | gc.TINT8: 225 a = arm.AMOVBS 226 227 case gc.TUINT8<<16 | gc.TUINT8: 228 if !gc.Ismem(f) { 229 a = arm.AMOVB 230 break 231 } 232 fallthrough 233 234 case gc.TINT8<<16 | gc.TUINT8, 235 gc.TINT16<<16 | gc.TUINT8, 236 gc.TUINT16<<16 | gc.TUINT8, 237 gc.TINT32<<16 | gc.TUINT8, 238 gc.TUINT32<<16 | gc.TUINT8: 239 a = arm.AMOVBU 240 241 case gc.TINT64<<16 | gc.TINT8, // truncate low word 242 gc.TUINT64<<16 | gc.TINT8: 243 a = arm.AMOVBS 244 245 goto trunc64 246 247 case gc.TINT64<<16 | gc.TUINT8, 248 gc.TUINT64<<16 | gc.TUINT8: 249 a = arm.AMOVBU 250 goto trunc64 251 252 case gc.TINT16<<16 | gc.TINT16: // same size 253 if !gc.Ismem(f) { 254 a = arm.AMOVH 255 break 256 } 257 fallthrough 258 259 case gc.TUINT16<<16 | gc.TINT16, 260 gc.TINT32<<16 | gc.TINT16, // truncate 261 gc.TUINT32<<16 | gc.TINT16: 262 a = arm.AMOVHS 263 264 case gc.TUINT16<<16 | gc.TUINT16: 265 if !gc.Ismem(f) { 266 a = arm.AMOVH 267 break 268 } 269 fallthrough 270 271 case gc.TINT16<<16 | gc.TUINT16, 272 gc.TINT32<<16 | gc.TUINT16, 273 gc.TUINT32<<16 | gc.TUINT16: 274 a = arm.AMOVHU 275 276 case gc.TINT64<<16 | gc.TINT16, // truncate low word 277 gc.TUINT64<<16 | gc.TINT16: 278 a = arm.AMOVHS 279 280 goto trunc64 281 282 case gc.TINT64<<16 | gc.TUINT16, 283 gc.TUINT64<<16 | gc.TUINT16: 284 a = arm.AMOVHU 285 goto trunc64 286 287 case gc.TINT32<<16 | gc.TINT32, // same size 288 gc.TINT32<<16 | gc.TUINT32, 289 gc.TUINT32<<16 | gc.TINT32, 290 gc.TUINT32<<16 | gc.TUINT32: 291 a = arm.AMOVW 292 293 case gc.TINT64<<16 | gc.TINT32, // truncate 294 gc.TUINT64<<16 | gc.TINT32, 295 gc.TINT64<<16 | gc.TUINT32, 296 gc.TUINT64<<16 | gc.TUINT32: 297 var flo gc.Node 298 var fhi gc.Node 299 split64(f, &flo, &fhi) 300 301 var r1 gc.Node 302 gc.Regalloc(&r1, t.Type, nil) 303 gins(arm.AMOVW, &flo, &r1) 304 gins(arm.AMOVW, &r1, t) 305 gc.Regfree(&r1) 306 splitclean() 307 return 308 309 case gc.TINT64<<16 | gc.TINT64, // same size 310 gc.TINT64<<16 | gc.TUINT64, 311 gc.TUINT64<<16 | gc.TINT64, 312 gc.TUINT64<<16 | gc.TUINT64: 313 var fhi gc.Node 314 var flo gc.Node 315 split64(f, &flo, &fhi) 316 317 var tlo gc.Node 318 var thi gc.Node 319 split64(t, &tlo, &thi) 320 var r1 gc.Node 321 gc.Regalloc(&r1, flo.Type, nil) 322 var r2 gc.Node 323 gc.Regalloc(&r2, fhi.Type, nil) 324 gins(arm.AMOVW, &flo, &r1) 325 gins(arm.AMOVW, &fhi, &r2) 326 gins(arm.AMOVW, &r1, &tlo) 327 gins(arm.AMOVW, &r2, &thi) 328 gc.Regfree(&r1) 329 gc.Regfree(&r2) 330 splitclean() 331 splitclean() 332 return 333 334 /* 335 * integer up-conversions 336 */ 337 case gc.TINT8<<16 | gc.TINT16, // sign extend int8 338 gc.TINT8<<16 | gc.TUINT16, 339 gc.TINT8<<16 | gc.TINT32, 340 gc.TINT8<<16 | gc.TUINT32: 341 a = arm.AMOVBS 342 343 goto rdst 344 345 case gc.TINT8<<16 | gc.TINT64, // convert via int32 346 gc.TINT8<<16 | gc.TUINT64: 347 cvt = gc.Types[gc.TINT32] 348 349 goto hard 350 351 case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8 352 gc.TUINT8<<16 | gc.TUINT16, 353 gc.TUINT8<<16 | gc.TINT32, 354 gc.TUINT8<<16 | gc.TUINT32: 355 a = arm.AMOVBU 356 357 goto rdst 358 359 case gc.TUINT8<<16 | gc.TINT64, // convert via uint32 360 gc.TUINT8<<16 | gc.TUINT64: 361 cvt = gc.Types[gc.TUINT32] 362 363 goto hard 364 365 case gc.TINT16<<16 | gc.TINT32, // sign extend int16 366 gc.TINT16<<16 | gc.TUINT32: 367 a = arm.AMOVHS 368 369 goto rdst 370 371 case gc.TINT16<<16 | gc.TINT64, // convert via int32 372 gc.TINT16<<16 | gc.TUINT64: 373 cvt = gc.Types[gc.TINT32] 374 375 goto hard 376 377 case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16 378 gc.TUINT16<<16 | gc.TUINT32: 379 a = arm.AMOVHU 380 381 goto rdst 382 383 case gc.TUINT16<<16 | gc.TINT64, // convert via uint32 384 gc.TUINT16<<16 | gc.TUINT64: 385 cvt = gc.Types[gc.TUINT32] 386 387 goto hard 388 389 case gc.TINT32<<16 | gc.TINT64, // sign extend int32 390 gc.TINT32<<16 | gc.TUINT64: 391 var tlo gc.Node 392 var thi gc.Node 393 split64(t, &tlo, &thi) 394 395 var r1 gc.Node 396 gc.Regalloc(&r1, tlo.Type, nil) 397 var r2 gc.Node 398 gc.Regalloc(&r2, thi.Type, nil) 399 gmove(f, &r1) 400 p1 := gins(arm.AMOVW, &r1, &r2) 401 p1.From.Type = obj.TYPE_SHIFT 402 p1.From.Offset = 2<<5 | 31<<7 | int64(r1.Reg)&15 // r1->31 403 p1.From.Reg = 0 404 405 //print("gmove: %v\n", p1); 406 gins(arm.AMOVW, &r1, &tlo) 407 408 gins(arm.AMOVW, &r2, &thi) 409 gc.Regfree(&r1) 410 gc.Regfree(&r2) 411 splitclean() 412 return 413 414 case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32 415 gc.TUINT32<<16 | gc.TUINT64: 416 var thi gc.Node 417 var tlo gc.Node 418 split64(t, &tlo, &thi) 419 420 gmove(f, &tlo) 421 var r1 gc.Node 422 gc.Regalloc(&r1, thi.Type, nil) 423 gins(arm.AMOVW, ncon(0), &r1) 424 gins(arm.AMOVW, &r1, &thi) 425 gc.Regfree(&r1) 426 splitclean() 427 return 428 429 // case CASE(TFLOAT64, TUINT64): 430 /* 431 * float to integer 432 */ 433 case gc.TFLOAT32<<16 | gc.TINT8, 434 gc.TFLOAT32<<16 | gc.TUINT8, 435 gc.TFLOAT32<<16 | gc.TINT16, 436 gc.TFLOAT32<<16 | gc.TUINT16, 437 gc.TFLOAT32<<16 | gc.TINT32, 438 gc.TFLOAT32<<16 | gc.TUINT32, 439 440 // case CASE(TFLOAT32, TUINT64): 441 442 gc.TFLOAT64<<16 | gc.TINT8, 443 gc.TFLOAT64<<16 | gc.TUINT8, 444 gc.TFLOAT64<<16 | gc.TINT16, 445 gc.TFLOAT64<<16 | gc.TUINT16, 446 gc.TFLOAT64<<16 | gc.TINT32, 447 gc.TFLOAT64<<16 | gc.TUINT32: 448 fa := arm.AMOVF 449 450 a := arm.AMOVFW 451 if ft == gc.TFLOAT64 { 452 fa = arm.AMOVD 453 a = arm.AMOVDW 454 } 455 456 ta := arm.AMOVW 457 switch tt { 458 case gc.TINT8: 459 ta = arm.AMOVBS 460 461 case gc.TUINT8: 462 ta = arm.AMOVBU 463 464 case gc.TINT16: 465 ta = arm.AMOVHS 466 467 case gc.TUINT16: 468 ta = arm.AMOVHU 469 } 470 471 var r1 gc.Node 472 gc.Regalloc(&r1, gc.Types[ft], f) 473 var r2 gc.Node 474 gc.Regalloc(&r2, gc.Types[tt], t) 475 gins(fa, f, &r1) // load to fpu 476 p1 := gins(a, &r1, &r1) // convert to w 477 switch tt { 478 case gc.TUINT8, 479 gc.TUINT16, 480 gc.TUINT32: 481 p1.Scond |= arm.C_UBIT 482 } 483 484 gins(arm.AMOVW, &r1, &r2) // copy to cpu 485 gins(ta, &r2, t) // store 486 gc.Regfree(&r1) 487 gc.Regfree(&r2) 488 return 489 490 /* 491 * integer to float 492 */ 493 case gc.TINT8<<16 | gc.TFLOAT32, 494 gc.TUINT8<<16 | gc.TFLOAT32, 495 gc.TINT16<<16 | gc.TFLOAT32, 496 gc.TUINT16<<16 | gc.TFLOAT32, 497 gc.TINT32<<16 | gc.TFLOAT32, 498 gc.TUINT32<<16 | gc.TFLOAT32, 499 gc.TINT8<<16 | gc.TFLOAT64, 500 gc.TUINT8<<16 | gc.TFLOAT64, 501 gc.TINT16<<16 | gc.TFLOAT64, 502 gc.TUINT16<<16 | gc.TFLOAT64, 503 gc.TINT32<<16 | gc.TFLOAT64, 504 gc.TUINT32<<16 | gc.TFLOAT64: 505 fa := arm.AMOVW 506 507 switch ft { 508 case gc.TINT8: 509 fa = arm.AMOVBS 510 511 case gc.TUINT8: 512 fa = arm.AMOVBU 513 514 case gc.TINT16: 515 fa = arm.AMOVHS 516 517 case gc.TUINT16: 518 fa = arm.AMOVHU 519 } 520 521 a := arm.AMOVWF 522 ta := arm.AMOVF 523 if tt == gc.TFLOAT64 { 524 a = arm.AMOVWD 525 ta = arm.AMOVD 526 } 527 528 var r1 gc.Node 529 gc.Regalloc(&r1, gc.Types[ft], f) 530 var r2 gc.Node 531 gc.Regalloc(&r2, gc.Types[tt], t) 532 gins(fa, f, &r1) // load to cpu 533 gins(arm.AMOVW, &r1, &r2) // copy to fpu 534 p1 := gins(a, &r2, &r2) // convert 535 switch ft { 536 case gc.TUINT8, 537 gc.TUINT16, 538 gc.TUINT32: 539 p1.Scond |= arm.C_UBIT 540 } 541 542 gins(ta, &r2, t) // store 543 gc.Regfree(&r1) 544 gc.Regfree(&r2) 545 return 546 547 case gc.TUINT64<<16 | gc.TFLOAT32, 548 gc.TUINT64<<16 | gc.TFLOAT64: 549 gc.Fatalf("gmove UINT64, TFLOAT not implemented") 550 return 551 552 /* 553 * float to float 554 */ 555 case gc.TFLOAT32<<16 | gc.TFLOAT32: 556 a = arm.AMOVF 557 558 case gc.TFLOAT64<<16 | gc.TFLOAT64: 559 a = arm.AMOVD 560 561 case gc.TFLOAT32<<16 | gc.TFLOAT64: 562 var r1 gc.Node 563 gc.Regalloc(&r1, gc.Types[gc.TFLOAT64], t) 564 gins(arm.AMOVF, f, &r1) 565 gins(arm.AMOVFD, &r1, &r1) 566 gins(arm.AMOVD, &r1, t) 567 gc.Regfree(&r1) 568 return 569 570 case gc.TFLOAT64<<16 | gc.TFLOAT32: 571 var r1 gc.Node 572 gc.Regalloc(&r1, gc.Types[gc.TFLOAT64], t) 573 gins(arm.AMOVD, f, &r1) 574 gins(arm.AMOVDF, &r1, &r1) 575 gins(arm.AMOVF, &r1, t) 576 gc.Regfree(&r1) 577 return 578 } 579 580 gins(a, f, t) 581 return 582 583 // TODO(kaib): we almost always require a register dest anyway, this can probably be 584 // removed. 585 // requires register destination 586 rdst: 587 { 588 gc.Regalloc(&r1, t.Type, t) 589 590 gins(a, f, &r1) 591 gmove(&r1, t) 592 gc.Regfree(&r1) 593 return 594 } 595 596 // requires register intermediate 597 hard: 598 gc.Regalloc(&r1, cvt, t) 599 600 gmove(f, &r1) 601 gmove(&r1, t) 602 gc.Regfree(&r1) 603 return 604 605 // truncate 64 bit integer 606 trunc64: 607 var fhi gc.Node 608 var flo gc.Node 609 split64(f, &flo, &fhi) 610 611 gc.Regalloc(&r1, t.Type, nil) 612 gins(a, &flo, &r1) 613 gins(a, &r1, t) 614 gc.Regfree(&r1) 615 splitclean() 616 return 617 } 618 619 func samaddr(f *gc.Node, t *gc.Node) bool { 620 if f.Op != t.Op { 621 return false 622 } 623 624 switch f.Op { 625 case gc.OREGISTER: 626 if f.Reg != t.Reg { 627 break 628 } 629 return true 630 } 631 632 return false 633 } 634 635 /* 636 * generate one instruction: 637 * as f, t 638 */ 639 func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog { 640 // Node nod; 641 // int32 v; 642 643 if f != nil && f.Op == gc.OINDEX { 644 gc.Fatalf("gins OINDEX not implemented") 645 } 646 647 // gc.Regalloc(&nod, ®node, Z); 648 // v = constnode.vconst; 649 // gc.Cgen(f->right, &nod); 650 // constnode.vconst = v; 651 // idx.reg = nod.reg; 652 // gc.Regfree(&nod); 653 if t != nil && t.Op == gc.OINDEX { 654 gc.Fatalf("gins OINDEX not implemented") 655 } 656 657 // gc.Regalloc(&nod, ®node, Z); 658 // v = constnode.vconst; 659 // gc.Cgen(t->right, &nod); 660 // constnode.vconst = v; 661 // idx.reg = nod.reg; 662 // gc.Regfree(&nod); 663 664 p := gc.Prog(as) 665 gc.Naddr(&p.From, f) 666 gc.Naddr(&p.To, t) 667 668 switch as { 669 case arm.ABL: 670 if p.To.Type == obj.TYPE_REG { 671 p.To.Type = obj.TYPE_MEM 672 } 673 674 case arm.ACMP, arm.ACMPF, arm.ACMPD: 675 if t != nil { 676 if f.Op != gc.OREGISTER { 677 /* generate a comparison 678 TODO(kaib): one of the args can actually be a small constant. relax the constraint and fix call sites. 679 */ 680 gc.Fatalf("bad operands to gcmp") 681 } 682 p.From = p.To 683 p.To = obj.Addr{} 684 raddr(f, p) 685 } 686 687 case arm.AMULU: 688 if f != nil && f.Op != gc.OREGISTER { 689 gc.Fatalf("bad operands to mul") 690 } 691 692 case arm.AMOVW: 693 if (p.From.Type == obj.TYPE_MEM || p.From.Type == obj.TYPE_ADDR || p.From.Type == obj.TYPE_CONST) && (p.To.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_ADDR) { 694 gc.Fatalf("gins double memory") 695 } 696 697 case arm.AADD: 698 if p.To.Type == obj.TYPE_MEM { 699 gc.Fatalf("gins arith to mem") 700 } 701 702 case arm.ARSB: 703 if p.From.Type == obj.TYPE_NONE { 704 gc.Fatalf("rsb with no from") 705 } 706 } 707 708 if gc.Debug['g'] != 0 { 709 fmt.Printf("%v\n", p) 710 } 711 return p 712 } 713 714 /* 715 * insert n into reg slot of p 716 */ 717 func raddr(n *gc.Node, p *obj.Prog) { 718 var a obj.Addr 719 gc.Naddr(&a, n) 720 if a.Type != obj.TYPE_REG { 721 if n != nil { 722 gc.Fatalf("bad in raddr: %v", gc.Oconv(int(n.Op), 0)) 723 } else { 724 gc.Fatalf("bad in raddr: <null>") 725 } 726 p.Reg = 0 727 } else { 728 p.Reg = a.Reg 729 } 730 } 731 732 /* generate a constant shift 733 * arm encodes a shift by 32 as 0, thus asking for 0 shift is illegal. 734 */ 735 func gshift(as int, lhs *gc.Node, stype int32, sval int32, rhs *gc.Node) *obj.Prog { 736 if sval <= 0 || sval > 32 { 737 gc.Fatalf("bad shift value: %d", sval) 738 } 739 740 sval = sval & 0x1f 741 742 p := gins(as, nil, rhs) 743 p.From.Type = obj.TYPE_SHIFT 744 p.From.Offset = int64(stype) | int64(sval)<<7 | int64(lhs.Reg)&15 745 return p 746 } 747 748 /* generate a register shift 749 */ 750 func gregshift(as int, lhs *gc.Node, stype int32, reg *gc.Node, rhs *gc.Node) *obj.Prog { 751 p := gins(as, nil, rhs) 752 p.From.Type = obj.TYPE_SHIFT 753 p.From.Offset = int64(stype) | (int64(reg.Reg)&15)<<8 | 1<<4 | int64(lhs.Reg)&15 754 return p 755 } 756 757 /* 758 * return Axxx for Oxxx on type t. 759 */ 760 func optoas(op gc.Op, t *gc.Type) int { 761 if t == nil { 762 gc.Fatalf("optoas: t is nil") 763 } 764 765 // avoid constant conversions in switches below 766 const ( 767 OMINUS_ = uint32(gc.OMINUS) << 16 768 OLSH_ = uint32(gc.OLSH) << 16 769 ORSH_ = uint32(gc.ORSH) << 16 770 OADD_ = uint32(gc.OADD) << 16 771 OSUB_ = uint32(gc.OSUB) << 16 772 OMUL_ = uint32(gc.OMUL) << 16 773 ODIV_ = uint32(gc.ODIV) << 16 774 OMOD_ = uint32(gc.OMOD) << 16 775 OOR_ = uint32(gc.OOR) << 16 776 OAND_ = uint32(gc.OAND) << 16 777 OXOR_ = uint32(gc.OXOR) << 16 778 OEQ_ = uint32(gc.OEQ) << 16 779 ONE_ = uint32(gc.ONE) << 16 780 OLT_ = uint32(gc.OLT) << 16 781 OLE_ = uint32(gc.OLE) << 16 782 OGE_ = uint32(gc.OGE) << 16 783 OGT_ = uint32(gc.OGT) << 16 784 OCMP_ = uint32(gc.OCMP) << 16 785 OPS_ = uint32(gc.OPS) << 16 786 OAS_ = uint32(gc.OAS) << 16 787 OSQRT_ = uint32(gc.OSQRT) << 16 788 ) 789 790 a := obj.AXXX 791 switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) { 792 default: 793 gc.Fatalf("optoas: no entry %v-%v etype %v simtype %v", gc.Oconv(int(op), 0), t, gc.Types[t.Etype], gc.Types[gc.Simtype[t.Etype]]) 794 795 /* case CASE(OADDR, TPTR32): 796 a = ALEAL; 797 break; 798 799 case CASE(OADDR, TPTR64): 800 a = ALEAQ; 801 break; 802 */ 803 // TODO(kaib): make sure the conditional branches work on all edge cases 804 case OEQ_ | gc.TBOOL, 805 OEQ_ | gc.TINT8, 806 OEQ_ | gc.TUINT8, 807 OEQ_ | gc.TINT16, 808 OEQ_ | gc.TUINT16, 809 OEQ_ | gc.TINT32, 810 OEQ_ | gc.TUINT32, 811 OEQ_ | gc.TINT64, 812 OEQ_ | gc.TUINT64, 813 OEQ_ | gc.TPTR32, 814 OEQ_ | gc.TPTR64, 815 OEQ_ | gc.TFLOAT32, 816 OEQ_ | gc.TFLOAT64: 817 a = arm.ABEQ 818 819 case ONE_ | gc.TBOOL, 820 ONE_ | gc.TINT8, 821 ONE_ | gc.TUINT8, 822 ONE_ | gc.TINT16, 823 ONE_ | gc.TUINT16, 824 ONE_ | gc.TINT32, 825 ONE_ | gc.TUINT32, 826 ONE_ | gc.TINT64, 827 ONE_ | gc.TUINT64, 828 ONE_ | gc.TPTR32, 829 ONE_ | gc.TPTR64, 830 ONE_ | gc.TFLOAT32, 831 ONE_ | gc.TFLOAT64: 832 a = arm.ABNE 833 834 case OLT_ | gc.TINT8, 835 OLT_ | gc.TINT16, 836 OLT_ | gc.TINT32, 837 OLT_ | gc.TINT64, 838 OLT_ | gc.TFLOAT32, 839 OLT_ | gc.TFLOAT64: 840 a = arm.ABLT 841 842 case OLT_ | gc.TUINT8, 843 OLT_ | gc.TUINT16, 844 OLT_ | gc.TUINT32, 845 OLT_ | gc.TUINT64: 846 a = arm.ABLO 847 848 case OLE_ | gc.TINT8, 849 OLE_ | gc.TINT16, 850 OLE_ | gc.TINT32, 851 OLE_ | gc.TINT64, 852 OLE_ | gc.TFLOAT32, 853 OLE_ | gc.TFLOAT64: 854 a = arm.ABLE 855 856 case OLE_ | gc.TUINT8, 857 OLE_ | gc.TUINT16, 858 OLE_ | gc.TUINT32, 859 OLE_ | gc.TUINT64: 860 a = arm.ABLS 861 862 case OGT_ | gc.TINT8, 863 OGT_ | gc.TINT16, 864 OGT_ | gc.TINT32, 865 OGT_ | gc.TINT64, 866 OGT_ | gc.TFLOAT32, 867 OGT_ | gc.TFLOAT64: 868 a = arm.ABGT 869 870 case OGT_ | gc.TUINT8, 871 OGT_ | gc.TUINT16, 872 OGT_ | gc.TUINT32, 873 OGT_ | gc.TUINT64: 874 a = arm.ABHI 875 876 case OGE_ | gc.TINT8, 877 OGE_ | gc.TINT16, 878 OGE_ | gc.TINT32, 879 OGE_ | gc.TINT64, 880 OGE_ | gc.TFLOAT32, 881 OGE_ | gc.TFLOAT64: 882 a = arm.ABGE 883 884 case OGE_ | gc.TUINT8, 885 OGE_ | gc.TUINT16, 886 OGE_ | gc.TUINT32, 887 OGE_ | gc.TUINT64: 888 a = arm.ABHS 889 890 case OCMP_ | gc.TBOOL, 891 OCMP_ | gc.TINT8, 892 OCMP_ | gc.TUINT8, 893 OCMP_ | gc.TINT16, 894 OCMP_ | gc.TUINT16, 895 OCMP_ | gc.TINT32, 896 OCMP_ | gc.TUINT32, 897 OCMP_ | gc.TPTR32: 898 a = arm.ACMP 899 900 case OCMP_ | gc.TFLOAT32: 901 a = arm.ACMPF 902 903 case OCMP_ | gc.TFLOAT64: 904 a = arm.ACMPD 905 906 case OPS_ | gc.TFLOAT32, 907 OPS_ | gc.TFLOAT64: 908 a = arm.ABVS 909 910 case OAS_ | gc.TBOOL: 911 a = arm.AMOVB 912 913 case OAS_ | gc.TINT8: 914 a = arm.AMOVBS 915 916 case OAS_ | gc.TUINT8: 917 a = arm.AMOVBU 918 919 case OAS_ | gc.TINT16: 920 a = arm.AMOVHS 921 922 case OAS_ | gc.TUINT16: 923 a = arm.AMOVHU 924 925 case OAS_ | gc.TINT32, 926 OAS_ | gc.TUINT32, 927 OAS_ | gc.TPTR32: 928 a = arm.AMOVW 929 930 case OAS_ | gc.TFLOAT32: 931 a = arm.AMOVF 932 933 case OAS_ | gc.TFLOAT64: 934 a = arm.AMOVD 935 936 case OADD_ | gc.TINT8, 937 OADD_ | gc.TUINT8, 938 OADD_ | gc.TINT16, 939 OADD_ | gc.TUINT16, 940 OADD_ | gc.TINT32, 941 OADD_ | gc.TUINT32, 942 OADD_ | gc.TPTR32: 943 a = arm.AADD 944 945 case OADD_ | gc.TFLOAT32: 946 a = arm.AADDF 947 948 case OADD_ | gc.TFLOAT64: 949 a = arm.AADDD 950 951 case OSUB_ | gc.TINT8, 952 OSUB_ | gc.TUINT8, 953 OSUB_ | gc.TINT16, 954 OSUB_ | gc.TUINT16, 955 OSUB_ | gc.TINT32, 956 OSUB_ | gc.TUINT32, 957 OSUB_ | gc.TPTR32: 958 a = arm.ASUB 959 960 case OSUB_ | gc.TFLOAT32: 961 a = arm.ASUBF 962 963 case OSUB_ | gc.TFLOAT64: 964 a = arm.ASUBD 965 966 case OMINUS_ | gc.TINT8, 967 OMINUS_ | gc.TUINT8, 968 OMINUS_ | gc.TINT16, 969 OMINUS_ | gc.TUINT16, 970 OMINUS_ | gc.TINT32, 971 OMINUS_ | gc.TUINT32, 972 OMINUS_ | gc.TPTR32: 973 a = arm.ARSB 974 975 case OAND_ | gc.TINT8, 976 OAND_ | gc.TUINT8, 977 OAND_ | gc.TINT16, 978 OAND_ | gc.TUINT16, 979 OAND_ | gc.TINT32, 980 OAND_ | gc.TUINT32, 981 OAND_ | gc.TPTR32: 982 a = arm.AAND 983 984 case OOR_ | gc.TINT8, 985 OOR_ | gc.TUINT8, 986 OOR_ | gc.TINT16, 987 OOR_ | gc.TUINT16, 988 OOR_ | gc.TINT32, 989 OOR_ | gc.TUINT32, 990 OOR_ | gc.TPTR32: 991 a = arm.AORR 992 993 case OXOR_ | gc.TINT8, 994 OXOR_ | gc.TUINT8, 995 OXOR_ | gc.TINT16, 996 OXOR_ | gc.TUINT16, 997 OXOR_ | gc.TINT32, 998 OXOR_ | gc.TUINT32, 999 OXOR_ | gc.TPTR32: 1000 a = arm.AEOR 1001 1002 case OLSH_ | gc.TINT8, 1003 OLSH_ | gc.TUINT8, 1004 OLSH_ | gc.TINT16, 1005 OLSH_ | gc.TUINT16, 1006 OLSH_ | gc.TINT32, 1007 OLSH_ | gc.TUINT32, 1008 OLSH_ | gc.TPTR32: 1009 a = arm.ASLL 1010 1011 case ORSH_ | gc.TUINT8, 1012 ORSH_ | gc.TUINT16, 1013 ORSH_ | gc.TUINT32, 1014 ORSH_ | gc.TPTR32: 1015 a = arm.ASRL 1016 1017 case ORSH_ | gc.TINT8, 1018 ORSH_ | gc.TINT16, 1019 ORSH_ | gc.TINT32: 1020 a = arm.ASRA 1021 1022 case OMUL_ | gc.TUINT8, 1023 OMUL_ | gc.TUINT16, 1024 OMUL_ | gc.TUINT32, 1025 OMUL_ | gc.TPTR32: 1026 a = arm.AMULU 1027 1028 case OMUL_ | gc.TINT8, 1029 OMUL_ | gc.TINT16, 1030 OMUL_ | gc.TINT32: 1031 a = arm.AMUL 1032 1033 case OMUL_ | gc.TFLOAT32: 1034 a = arm.AMULF 1035 1036 case OMUL_ | gc.TFLOAT64: 1037 a = arm.AMULD 1038 1039 case ODIV_ | gc.TUINT8, 1040 ODIV_ | gc.TUINT16, 1041 ODIV_ | gc.TUINT32, 1042 ODIV_ | gc.TPTR32: 1043 a = arm.ADIVU 1044 1045 case ODIV_ | gc.TINT8, 1046 ODIV_ | gc.TINT16, 1047 ODIV_ | gc.TINT32: 1048 a = arm.ADIV 1049 1050 case OMOD_ | gc.TUINT8, 1051 OMOD_ | gc.TUINT16, 1052 OMOD_ | gc.TUINT32, 1053 OMOD_ | gc.TPTR32: 1054 a = arm.AMODU 1055 1056 case OMOD_ | gc.TINT8, 1057 OMOD_ | gc.TINT16, 1058 OMOD_ | gc.TINT32: 1059 a = arm.AMOD 1060 1061 // case CASE(OEXTEND, TINT16): 1062 // a = ACWD; 1063 // break; 1064 1065 // case CASE(OEXTEND, TINT32): 1066 // a = ACDQ; 1067 // break; 1068 1069 // case CASE(OEXTEND, TINT64): 1070 // a = ACQO; 1071 // break; 1072 1073 case ODIV_ | gc.TFLOAT32: 1074 a = arm.ADIVF 1075 1076 case ODIV_ | gc.TFLOAT64: 1077 a = arm.ADIVD 1078 1079 case OSQRT_ | gc.TFLOAT64: 1080 a = arm.ASQRTD 1081 } 1082 1083 return a 1084 } 1085 1086 const ( 1087 ODynam = 1 << 0 1088 OPtrto = 1 << 1 1089 ) 1090 1091 var clean [20]gc.Node 1092 1093 var cleani int = 0 1094 1095 func sudoclean() { 1096 if clean[cleani-1].Op != gc.OEMPTY { 1097 gc.Regfree(&clean[cleani-1]) 1098 } 1099 if clean[cleani-2].Op != gc.OEMPTY { 1100 gc.Regfree(&clean[cleani-2]) 1101 } 1102 cleani -= 2 1103 } 1104 1105 func dotaddable(n *gc.Node, n1 *gc.Node) bool { 1106 if n.Op != gc.ODOT { 1107 return false 1108 } 1109 1110 var oary [10]int64 1111 var nn *gc.Node 1112 o := gc.Dotoffset(n, oary[:], &nn) 1113 if nn != nil && nn.Addable && o == 1 && oary[0] >= 0 { 1114 *n1 = *nn 1115 n1.Type = n.Type 1116 n1.Xoffset += oary[0] 1117 return true 1118 } 1119 1120 return false 1121 } 1122 1123 /* 1124 * generate code to compute address of n, 1125 * a reference to a (perhaps nested) field inside 1126 * an array or struct. 1127 * return 0 on failure, 1 on success. 1128 * on success, leaves usable address in a. 1129 * 1130 * caller is responsible for calling sudoclean 1131 * after successful sudoaddable, 1132 * to release the register used for a. 1133 */ 1134 func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool { 1135 if n.Type == nil { 1136 return false 1137 } 1138 1139 *a = obj.Addr{} 1140 1141 switch n.Op { 1142 case gc.OLITERAL: 1143 if !gc.Isconst(n, gc.CTINT) { 1144 break 1145 } 1146 v := n.Int() 1147 if v >= 32000 || v <= -32000 { 1148 break 1149 } 1150 switch as { 1151 default: 1152 return false 1153 1154 case arm.AADD, 1155 arm.ASUB, 1156 arm.AAND, 1157 arm.AORR, 1158 arm.AEOR, 1159 arm.AMOVB, 1160 arm.AMOVBS, 1161 arm.AMOVBU, 1162 arm.AMOVH, 1163 arm.AMOVHS, 1164 arm.AMOVHU, 1165 arm.AMOVW: 1166 break 1167 } 1168 1169 cleani += 2 1170 reg := &clean[cleani-1] 1171 reg1 := &clean[cleani-2] 1172 reg.Op = gc.OEMPTY 1173 reg1.Op = gc.OEMPTY 1174 gc.Naddr(a, n) 1175 return true 1176 1177 case gc.ODOT, 1178 gc.ODOTPTR: 1179 cleani += 2 1180 reg := &clean[cleani-1] 1181 reg1 := &clean[cleani-2] 1182 reg.Op = gc.OEMPTY 1183 reg1.Op = gc.OEMPTY 1184 var nn *gc.Node 1185 var oary [10]int64 1186 o := gc.Dotoffset(n, oary[:], &nn) 1187 if nn == nil { 1188 sudoclean() 1189 return false 1190 } 1191 1192 if nn.Addable && o == 1 && oary[0] >= 0 { 1193 // directly addressable set of DOTs 1194 n1 := *nn 1195 1196 n1.Type = n.Type 1197 n1.Xoffset += oary[0] 1198 gc.Naddr(a, &n1) 1199 return true 1200 } 1201 1202 gc.Regalloc(reg, gc.Types[gc.Tptr], nil) 1203 n1 := *reg 1204 n1.Op = gc.OINDREG 1205 if oary[0] >= 0 { 1206 gc.Agen(nn, reg) 1207 n1.Xoffset = oary[0] 1208 } else { 1209 gc.Cgen(nn, reg) 1210 gc.Cgen_checknil(reg) 1211 n1.Xoffset = -(oary[0] + 1) 1212 } 1213 1214 for i := 1; i < o; i++ { 1215 if oary[i] >= 0 { 1216 gc.Fatalf("can't happen") 1217 } 1218 gins(arm.AMOVW, &n1, reg) 1219 gc.Cgen_checknil(reg) 1220 n1.Xoffset = -(oary[i] + 1) 1221 } 1222 1223 a.Type = obj.TYPE_NONE 1224 a.Name = obj.NAME_NONE 1225 n1.Type = n.Type 1226 gc.Naddr(a, &n1) 1227 return true 1228 1229 case gc.OINDEX: 1230 return false 1231 } 1232 1233 return false 1234 }