github.com/mh-cbon/go@v0.0.0-20160603070303-9e112a3fe4c0/src/cmd/compile/internal/arm64/peep.go (about) 1 // Derived from Inferno utils/6c/peep.c 2 // http://code.google.com/p/inferno-os/source/browse/utils/6c/peep.c 3 // 4 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. 5 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) 6 // Portions Copyright © 1997-1999 Vita Nuova Limited 7 // Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) 8 // Portions Copyright © 2004,2006 Bruce Ellis 9 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) 10 // Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others 11 // Portions Copyright © 2009 The Go Authors. All rights reserved. 12 // 13 // Permission is hereby granted, free of charge, to any person obtaining a copy 14 // of this software and associated documentation files (the "Software"), to deal 15 // in the Software without restriction, including without limitation the rights 16 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 17 // copies of the Software, and to permit persons to whom the Software is 18 // furnished to do so, subject to the following conditions: 19 // 20 // The above copyright notice and this permission notice shall be included in 21 // all copies or substantial portions of the Software. 22 // 23 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 24 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 25 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 26 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 27 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 28 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 29 // THE SOFTWARE. 30 31 package arm64 32 33 import ( 34 "cmd/compile/internal/gc" 35 "cmd/internal/obj" 36 "cmd/internal/obj/arm64" 37 "fmt" 38 ) 39 40 var gactive uint32 41 42 func peep(firstp *obj.Prog) { 43 g := gc.Flowstart(firstp, nil) 44 if g == nil { 45 return 46 } 47 gactive = 0 48 49 var p *obj.Prog 50 var r *gc.Flow 51 var t int 52 loop1: 53 if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 { 54 gc.Dumpit("loop1", g.Start, 0) 55 } 56 57 t = 0 58 for r = g.Start; r != nil; r = r.Link { 59 p = r.Prog 60 61 // TODO(minux) Handle smaller moves. arm and amd64 62 // distinguish between moves that *must* sign/zero 63 // extend and moves that don't care so they 64 // can eliminate moves that don't care without 65 // breaking moves that do care. This might let us 66 // simplify or remove the next peep loop, too. 67 if p.As == arm64.AMOVD || p.As == arm64.AFMOVD { 68 if regtyp(&p.To) { 69 // Try to eliminate reg->reg moves 70 if regtyp(&p.From) { 71 if p.From.Type == p.To.Type { 72 if copyprop(r) { 73 excise(r) 74 t++ 75 } else if subprop(r) && copyprop(r) { 76 excise(r) 77 t++ 78 } 79 } 80 } 81 } 82 } 83 } 84 85 if t != 0 { 86 goto loop1 87 } 88 89 /* 90 * look for MOVB x,R; MOVB R,R (for small MOVs not handled above) 91 */ 92 var p1 *obj.Prog 93 var r1 *gc.Flow 94 for r := g.Start; r != nil; r = r.Link { 95 p = r.Prog 96 switch p.As { 97 default: 98 continue 99 100 case arm64.AMOVH, 101 arm64.AMOVHU, 102 arm64.AMOVB, 103 arm64.AMOVBU, 104 arm64.AMOVW, 105 arm64.AMOVWU: 106 if p.To.Type != obj.TYPE_REG { 107 continue 108 } 109 } 110 111 r1 = r.Link 112 if r1 == nil { 113 continue 114 } 115 p1 = r1.Prog 116 if p1.As != p.As { 117 continue 118 } 119 if p1.From.Type != obj.TYPE_REG || p1.From.Reg != p.To.Reg { 120 continue 121 } 122 if p1.To.Type != obj.TYPE_REG || p1.To.Reg != p.To.Reg { 123 continue 124 } 125 excise(r1) 126 } 127 128 if gc.Debug['D'] > 1 { 129 goto ret /* allow following code improvement to be suppressed */ 130 } 131 132 // MOVD $c, R'; ADD R', R (R' unused) -> ADD $c, R 133 for r := g.Start; r != nil; r = r.Link { 134 p = r.Prog 135 switch p.As { 136 default: 137 continue 138 139 case arm64.AMOVD: 140 if p.To.Type != obj.TYPE_REG { 141 continue 142 } 143 if p.From.Type != obj.TYPE_CONST { 144 continue 145 } 146 if p.From.Offset < 0 || 4096 <= p.From.Offset { 147 continue 148 } 149 } 150 r1 = r.Link 151 if r1 == nil { 152 continue 153 } 154 p1 = r1.Prog 155 if p1.As != arm64.AADD && p1.As != arm64.ASUB { // TODO(aram): also logical after we have bimm. 156 continue 157 } 158 if p1.From.Type != obj.TYPE_REG || p1.From.Reg != p.To.Reg { 159 continue 160 } 161 if p1.To.Type != obj.TYPE_REG { 162 continue 163 } 164 if gc.Debug['P'] != 0 { 165 fmt.Printf("encoding $%d directly into %v in:\n%v\n%v\n", p.From.Offset, obj.Aconv(p1.As), p, p1) 166 } 167 p1.From.Type = obj.TYPE_CONST 168 p1.From = p.From 169 excise(r) 170 } 171 172 /* TODO(minux): 173 * look for OP x,y,R; CMP R, $0 -> OP.S x,y,R 174 * when OP can set condition codes correctly 175 */ 176 177 ret: 178 gc.Flowend(g) 179 } 180 181 func excise(r *gc.Flow) { 182 p := r.Prog 183 if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 { 184 fmt.Printf("%v ===delete===\n", p) 185 } 186 obj.Nopout(p) 187 gc.Ostats.Ndelmov++ 188 } 189 190 func regtyp(a *obj.Addr) bool { 191 // TODO(rsc): Floating point register exclusions? 192 return a.Type == obj.TYPE_REG && arm64.REG_R0 <= a.Reg && a.Reg <= arm64.REG_F31 && a.Reg != arm64.REGZERO 193 } 194 195 /* 196 * the idea is to substitute 197 * one register for another 198 * from one MOV to another 199 * MOV a, R1 200 * ADD b, R1 / no use of R2 201 * MOV R1, R2 202 * would be converted to 203 * MOV a, R2 204 * ADD b, R2 205 * MOV R2, R1 206 * hopefully, then the former or latter MOV 207 * will be eliminated by copy propagation. 208 * 209 * r0 (the argument, not the register) is the MOV at the end of the 210 * above sequences. This returns 1 if it modified any instructions. 211 */ 212 func subprop(r0 *gc.Flow) bool { 213 p := r0.Prog 214 v1 := &p.From 215 if !regtyp(v1) { 216 return false 217 } 218 v2 := &p.To 219 if !regtyp(v2) { 220 return false 221 } 222 for r := gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) { 223 if gc.Uniqs(r) == nil { 224 break 225 } 226 p = r.Prog 227 if p.As == obj.AVARDEF || p.As == obj.AVARKILL { 228 continue 229 } 230 if p.Info.Flags&gc.Call != 0 { 231 return false 232 } 233 234 if p.Info.Flags&(gc.RightRead|gc.RightWrite) == gc.RightWrite { 235 if p.To.Type == v1.Type { 236 if p.To.Reg == v1.Reg { 237 copysub(&p.To, v1, v2, true) 238 if gc.Debug['P'] != 0 { 239 fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog) 240 if p.From.Type == v2.Type { 241 fmt.Printf(" excise") 242 } 243 fmt.Printf("\n") 244 } 245 246 for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) { 247 p = r.Prog 248 copysub(&p.From, v1, v2, true) 249 copysub1(p, v1, v2, true) 250 copysub(&p.To, v1, v2, true) 251 if gc.Debug['P'] != 0 { 252 fmt.Printf("%v\n", r.Prog) 253 } 254 } 255 256 v1.Reg, v2.Reg = v2.Reg, v1.Reg 257 if gc.Debug['P'] != 0 { 258 fmt.Printf("%v last\n", r.Prog) 259 } 260 return true 261 } 262 } 263 } 264 265 if copyau(&p.From, v2) || copyau1(p, v2) || copyau(&p.To, v2) { 266 break 267 } 268 if copysub(&p.From, v1, v2, false) || copysub1(p, v1, v2, false) || copysub(&p.To, v1, v2, false) { 269 break 270 } 271 } 272 273 return false 274 } 275 276 /* 277 * The idea is to remove redundant copies. 278 * v1->v2 F=0 279 * (use v2 s/v2/v1/)* 280 * set v1 F=1 281 * use v2 return fail (v1->v2 move must remain) 282 * ----------------- 283 * v1->v2 F=0 284 * (use v2 s/v2/v1/)* 285 * set v1 F=1 286 * set v2 return success (caller can remove v1->v2 move) 287 */ 288 func copyprop(r0 *gc.Flow) bool { 289 p := r0.Prog 290 v1 := &p.From 291 v2 := &p.To 292 if copyas(v1, v2) { 293 if gc.Debug['P'] != 0 { 294 fmt.Printf("eliminating self-move: %v\n", r0.Prog) 295 } 296 return true 297 } 298 299 gactive++ 300 if gc.Debug['P'] != 0 { 301 fmt.Printf("trying to eliminate %v->%v move from:\n%v\n", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r0.Prog) 302 } 303 return copy1(v1, v2, r0.S1, false) 304 } 305 306 // copy1 replaces uses of v2 with v1 starting at r and returns 1 if 307 // all uses were rewritten. 308 func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f bool) bool { 309 if uint32(r.Active) == gactive { 310 if gc.Debug['P'] != 0 { 311 fmt.Printf("act set; return 1\n") 312 } 313 return true 314 } 315 316 r.Active = int32(gactive) 317 if gc.Debug['P'] != 0 { 318 fmt.Printf("copy1 replace %v with %v f=%v\n", gc.Ctxt.Dconv(v2), gc.Ctxt.Dconv(v1), f) 319 } 320 for ; r != nil; r = r.S1 { 321 p := r.Prog 322 if gc.Debug['P'] != 0 { 323 fmt.Printf("%v", p) 324 } 325 if !f && gc.Uniqp(r) == nil { 326 // Multiple predecessors; conservatively 327 // assume v1 was set on other path 328 f = true 329 330 if gc.Debug['P'] != 0 { 331 fmt.Printf("; merge; f=%v", f) 332 } 333 } 334 335 switch t := copyu(p, v2, nil); t { 336 case 2: /* rar, can't split */ 337 if gc.Debug['P'] != 0 { 338 fmt.Printf("; %v rar; return 0\n", gc.Ctxt.Dconv(v2)) 339 } 340 return false 341 342 case 3: /* set */ 343 if gc.Debug['P'] != 0 { 344 fmt.Printf("; %v set; return 1\n", gc.Ctxt.Dconv(v2)) 345 } 346 return true 347 348 case 1, /* used, substitute */ 349 4: /* use and set */ 350 if f { 351 if gc.Debug['P'] == 0 { 352 return false 353 } 354 if t == 4 { 355 fmt.Printf("; %v used+set and f=%v; return 0\n", gc.Ctxt.Dconv(v2), f) 356 } else { 357 fmt.Printf("; %v used and f=%v; return 0\n", gc.Ctxt.Dconv(v2), f) 358 } 359 return false 360 } 361 362 if copyu(p, v2, v1) != 0 { 363 if gc.Debug['P'] != 0 { 364 fmt.Printf("; sub fail; return 0\n") 365 } 366 return false 367 } 368 369 if gc.Debug['P'] != 0 { 370 fmt.Printf("; sub %v->%v\n => %v", gc.Ctxt.Dconv(v2), gc.Ctxt.Dconv(v1), p) 371 } 372 if t == 4 { 373 if gc.Debug['P'] != 0 { 374 fmt.Printf("; %v used+set; return 1\n", gc.Ctxt.Dconv(v2)) 375 } 376 return true 377 } 378 } 379 380 if !f { 381 t := copyu(p, v1, nil) 382 if t == 2 || t == 3 || t == 4 { 383 f = true 384 if gc.Debug['P'] != 0 { 385 fmt.Printf("; %v set and !f; f=%v", gc.Ctxt.Dconv(v1), f) 386 } 387 } 388 } 389 390 if gc.Debug['P'] != 0 { 391 fmt.Printf("\n") 392 } 393 if r.S2 != nil { 394 if !copy1(v1, v2, r.S2, f) { 395 return false 396 } 397 } 398 } 399 return true 400 } 401 402 // If s==nil, copyu returns the set/use of v in p; otherwise, it 403 // modifies p to replace reads of v with reads of s and returns 0 for 404 // success or non-zero for failure. 405 // 406 // If s==nil, copy returns one of the following values: 407 // 1 if v only used 408 // 2 if v is set and used in one address (read-alter-rewrite; 409 // can't substitute) 410 // 3 if v is only set 411 // 4 if v is set in one address and used in another (so addresses 412 // can be rewritten independently) 413 // 0 otherwise (not touched) 414 func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int { 415 if p.From3Type() != obj.TYPE_NONE { 416 // 7g never generates a from3 417 fmt.Printf("copyu: from3 (%v) not implemented\n", gc.Ctxt.Dconv(p.From3)) 418 } 419 if p.RegTo2 != obj.REG_NONE { 420 // 7g never generates a to2 421 fmt.Printf("copyu: RegTo2 (%v) not implemented\n", obj.Rconv(int(p.RegTo2))) 422 } 423 424 switch p.As { 425 default: 426 fmt.Printf("copyu: can't find %v\n", obj.Aconv(p.As)) 427 return 2 428 429 case obj.ANOP, /* read p->from, write p->to */ 430 arm64.ANEG, 431 arm64.AFNEGD, 432 arm64.AFNEGS, 433 arm64.AFSQRTD, 434 arm64.AFCVTZSD, 435 arm64.AFCVTZSS, 436 arm64.AFCVTZSDW, 437 arm64.AFCVTZSSW, 438 arm64.AFCVTZUD, 439 arm64.AFCVTZUS, 440 arm64.AFCVTZUDW, 441 arm64.AFCVTZUSW, 442 arm64.AFCVTSD, 443 arm64.AFCVTDS, 444 arm64.ASCVTFD, 445 arm64.ASCVTFS, 446 arm64.ASCVTFWD, 447 arm64.ASCVTFWS, 448 arm64.AUCVTFD, 449 arm64.AUCVTFS, 450 arm64.AUCVTFWD, 451 arm64.AUCVTFWS, 452 arm64.AMOVB, 453 arm64.AMOVBU, 454 arm64.AMOVH, 455 arm64.AMOVHU, 456 arm64.AMOVW, 457 arm64.AMOVWU, 458 arm64.AMOVD, 459 arm64.AFMOVS, 460 arm64.AFMOVD: 461 if p.Scond == 0 { 462 if s != nil { 463 if copysub(&p.From, v, s, true) { 464 return 1 465 } 466 467 // Update only indirect uses of v in p->to 468 if !copyas(&p.To, v) { 469 if copysub(&p.To, v, s, true) { 470 return 1 471 } 472 } 473 return 0 474 } 475 476 if copyas(&p.To, v) { 477 // Fix up implicit from 478 if p.From.Type == obj.TYPE_NONE { 479 p.From = p.To 480 } 481 if copyau(&p.From, v) { 482 return 4 483 } 484 return 3 485 } 486 487 if copyau(&p.From, v) { 488 return 1 489 } 490 if copyau(&p.To, v) { 491 // p->to only indirectly uses v 492 return 1 493 } 494 495 return 0 496 } 497 498 /* rar p->from, write p->to or read p->from, rar p->to */ 499 if p.From.Type == obj.TYPE_MEM { 500 if copyas(&p.From, v) { 501 // No s!=nil check; need to fail 502 // anyway in that case 503 return 2 504 } 505 506 if s != nil { 507 if copysub(&p.To, v, s, true) { 508 return 1 509 } 510 return 0 511 } 512 513 if copyas(&p.To, v) { 514 return 3 515 } 516 } else if p.To.Type == obj.TYPE_MEM { 517 if copyas(&p.To, v) { 518 return 2 519 } 520 if s != nil { 521 if copysub(&p.From, v, s, true) { 522 return 1 523 } 524 return 0 525 } 526 527 if copyau(&p.From, v) { 528 return 1 529 } 530 } else { 531 fmt.Printf("copyu: bad %v\n", p) 532 } 533 534 return 0 535 536 case arm64.AADD, /* read p->from, read p->reg, write p->to */ 537 arm64.AADDS, 538 arm64.ASUB, 539 arm64.AADC, 540 arm64.AAND, 541 arm64.AORR, 542 arm64.AEOR, 543 arm64.AROR, 544 arm64.AMUL, 545 arm64.ASMULL, 546 arm64.AUMULL, 547 arm64.ASMULH, 548 arm64.AUMULH, 549 arm64.ASDIV, 550 arm64.AUDIV, 551 arm64.ALSL, 552 arm64.ALSR, 553 arm64.AASR, 554 arm64.AFADDD, 555 arm64.AFADDS, 556 arm64.AFSUBD, 557 arm64.AFSUBS, 558 arm64.AFMULD, 559 arm64.AFMULS, 560 arm64.AFDIVD, 561 arm64.AFDIVS: 562 if s != nil { 563 if copysub(&p.From, v, s, true) { 564 return 1 565 } 566 if copysub1(p, v, s, true) { 567 return 1 568 } 569 570 // Update only indirect uses of v in p->to 571 if !copyas(&p.To, v) { 572 if copysub(&p.To, v, s, true) { 573 return 1 574 } 575 } 576 return 0 577 } 578 579 if copyas(&p.To, v) { 580 if p.Reg == 0 { 581 // Fix up implicit reg (e.g., ADD 582 // R3,R4 -> ADD R3,R4,R4) so we can 583 // update reg and to separately. 584 p.Reg = p.To.Reg 585 } 586 587 if copyau(&p.From, v) { 588 return 4 589 } 590 if copyau1(p, v) { 591 return 4 592 } 593 return 3 594 } 595 596 if copyau(&p.From, v) { 597 return 1 598 } 599 if copyau1(p, v) { 600 return 1 601 } 602 if copyau(&p.To, v) { 603 return 1 604 } 605 return 0 606 607 case arm64.ABEQ, 608 arm64.ABNE, 609 arm64.ABGE, 610 arm64.ABLT, 611 arm64.ABGT, 612 arm64.ABLE, 613 arm64.ABLO, 614 arm64.ABLS, 615 arm64.ABHI, 616 arm64.ABHS: 617 return 0 618 619 case obj.ACHECKNIL, /* read p->from */ 620 arm64.ACMP, /* read p->from, read p->reg */ 621 arm64.AFCMPD, 622 arm64.AFCMPS: 623 if s != nil { 624 if copysub(&p.From, v, s, true) { 625 return 1 626 } 627 if copysub1(p, v, s, true) { 628 return 1 629 } 630 return 0 631 } 632 633 if copyau(&p.From, v) { 634 return 1 635 } 636 if copyau1(p, v) { 637 return 1 638 } 639 return 0 640 641 case arm64.AB: /* read p->to */ 642 if s != nil { 643 if copysub(&p.To, v, s, true) { 644 return 1 645 } 646 return 0 647 } 648 649 if copyau(&p.To, v) { 650 return 1 651 } 652 return 0 653 654 case obj.ARET: /* funny */ 655 if s != nil { 656 return 0 657 } 658 659 // All registers die at this point, so claim 660 // everything is set (and not used). 661 return 3 662 663 case arm64.ABL: /* funny */ 664 if p.From.Type == obj.TYPE_REG && v.Type == obj.TYPE_REG && p.From.Reg == v.Reg { 665 return 2 666 } 667 668 if s != nil { 669 if copysub(&p.To, v, s, true) { 670 return 1 671 } 672 return 0 673 } 674 675 if copyau(&p.To, v) { 676 return 4 677 } 678 return 3 679 680 // R31 is zero, used by DUFFZERO, cannot be substituted. 681 // R16 is ptr to memory, used and set, cannot be substituted. 682 case obj.ADUFFZERO: 683 if v.Type == obj.TYPE_REG { 684 if v.Reg == 31 { 685 return 1 686 } 687 if v.Reg == 16 { 688 return 2 689 } 690 } 691 692 return 0 693 694 // R16, R17 are ptr to src, dst, used and set, cannot be substituted. 695 // R27 is scratch, set by DUFFCOPY, cannot be substituted. 696 case obj.ADUFFCOPY: 697 if v.Type == obj.TYPE_REG { 698 if v.Reg == 16 || v.Reg == 17 { 699 return 2 700 } 701 if v.Reg == 27 { 702 return 3 703 } 704 } 705 706 return 0 707 708 case arm64.AHINT, 709 obj.ATEXT, 710 obj.APCDATA, 711 obj.AFUNCDATA, 712 obj.AVARDEF, 713 obj.AVARKILL, 714 obj.AVARLIVE, 715 obj.AUSEFIELD: 716 return 0 717 } 718 } 719 720 // copyas returns true if a and v address the same register. 721 // 722 // If a is the from operand, this means this operation reads the 723 // register in v. If a is the to operand, this means this operation 724 // writes the register in v. 725 func copyas(a *obj.Addr, v *obj.Addr) bool { 726 return regtyp(v) && a.Type == v.Type && a.Reg == v.Reg 727 } 728 729 // copyau returns true if a either directly or indirectly addresses the 730 // same register as v. 731 // 732 // If a is the from operand, this means this operation reads the 733 // register in v. If a is the to operand, this means the operation 734 // either reads or writes the register in v (if !copyas(a, v), then 735 // the operation reads the register in v). 736 func copyau(a *obj.Addr, v *obj.Addr) bool { 737 if copyas(a, v) { 738 return true 739 } 740 if v.Type == obj.TYPE_REG { 741 if a.Type == obj.TYPE_MEM || (a.Type == obj.TYPE_ADDR && a.Reg != 0) { 742 if v.Reg == a.Reg { 743 return true 744 } 745 } 746 } 747 return false 748 } 749 750 // copyau1 returns true if p->reg references the same register as v and v 751 // is a direct reference. 752 func copyau1(p *obj.Prog, v *obj.Addr) bool { 753 return regtyp(v) && v.Reg != 0 && p.Reg == v.Reg 754 } 755 756 // copysub replaces v with s in a if f==true or indicates it if could if f==false. 757 // Returns true on failure to substitute (it always succeeds on arm64). 758 // TODO(dfc) remove unused return value, remove calls with f=false as they do nothing. 759 func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f bool) bool { 760 if f && copyau(a, v) { 761 a.Reg = s.Reg 762 } 763 return false 764 } 765 766 // copysub1 replaces v with s in p1->reg if f==true or indicates if it could if f==false. 767 // Returns true on failure to substitute (it always succeeds on arm64). 768 // TODO(dfc) remove unused return value, remove calls with f=false as they do nothing. 769 func copysub1(p1 *obj.Prog, v *obj.Addr, s *obj.Addr, f bool) bool { 770 if f && copyau1(p1, v) { 771 p1.Reg = s.Reg 772 } 773 return false 774 } 775 776 func sameaddr(a *obj.Addr, v *obj.Addr) bool { 777 if a.Type != v.Type { 778 return false 779 } 780 if regtyp(v) && a.Reg == v.Reg { 781 return true 782 } 783 if v.Type == obj.NAME_AUTO || v.Type == obj.NAME_PARAM { 784 if v.Offset == a.Offset { 785 return true 786 } 787 } 788 return false 789 } 790 791 func smallindir(a *obj.Addr, reg *obj.Addr) bool { 792 return reg.Type == obj.TYPE_REG && a.Type == obj.TYPE_MEM && a.Reg == reg.Reg && 0 <= a.Offset && a.Offset < 4096 793 } 794 795 func stackaddr(a *obj.Addr) bool { 796 return a.Type == obj.TYPE_REG && a.Reg == arm64.REGSP 797 }