github.com/miolini/go@v0.0.0-20160405192216-fca68c8cb408/src/cmd/compile/internal/arm64/peep.go (about) 1 // Derived from Inferno utils/6c/peep.c 2 // http://code.google.com/p/inferno-os/source/browse/utils/6c/peep.c 3 // 4 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. 5 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) 6 // Portions Copyright © 1997-1999 Vita Nuova Limited 7 // Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) 8 // Portions Copyright © 2004,2006 Bruce Ellis 9 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) 10 // Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others 11 // Portions Copyright © 2009 The Go Authors. All rights reserved. 12 // 13 // Permission is hereby granted, free of charge, to any person obtaining a copy 14 // of this software and associated documentation files (the "Software"), to deal 15 // in the Software without restriction, including without limitation the rights 16 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 17 // copies of the Software, and to permit persons to whom the Software is 18 // furnished to do so, subject to the following conditions: 19 // 20 // The above copyright notice and this permission notice shall be included in 21 // all copies or substantial portions of the Software. 22 // 23 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 24 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 25 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 26 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 27 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 28 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 29 // THE SOFTWARE. 30 31 package arm64 32 33 import ( 34 "cmd/compile/internal/gc" 35 "cmd/internal/obj" 36 "cmd/internal/obj/arm64" 37 "fmt" 38 ) 39 40 var gactive uint32 41 42 func peep(firstp *obj.Prog) { 43 g := gc.Flowstart(firstp, nil) 44 if g == nil { 45 return 46 } 47 gactive = 0 48 49 var p *obj.Prog 50 var r *gc.Flow 51 var t int 52 loop1: 53 if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 { 54 gc.Dumpit("loop1", g.Start, 0) 55 } 56 57 t = 0 58 for r = g.Start; r != nil; r = r.Link { 59 p = r.Prog 60 61 // TODO(minux) Handle smaller moves. arm and amd64 62 // distinguish between moves that *must* sign/zero 63 // extend and moves that don't care so they 64 // can eliminate moves that don't care without 65 // breaking moves that do care. This might let us 66 // simplify or remove the next peep loop, too. 67 if p.As == arm64.AMOVD || p.As == arm64.AFMOVD { 68 if regtyp(&p.To) { 69 // Try to eliminate reg->reg moves 70 if regtyp(&p.From) { 71 if p.From.Type == p.To.Type { 72 if copyprop(r) { 73 excise(r) 74 t++ 75 } else if subprop(r) && copyprop(r) { 76 excise(r) 77 t++ 78 } 79 } 80 } 81 } 82 } 83 } 84 85 if t != 0 { 86 goto loop1 87 } 88 89 /* 90 * look for MOVB x,R; MOVB R,R (for small MOVs not handled above) 91 */ 92 var p1 *obj.Prog 93 var r1 *gc.Flow 94 for r := g.Start; r != nil; r = r.Link { 95 p = r.Prog 96 switch p.As { 97 default: 98 continue 99 100 case arm64.AMOVH, 101 arm64.AMOVHU, 102 arm64.AMOVB, 103 arm64.AMOVBU, 104 arm64.AMOVW, 105 arm64.AMOVWU: 106 if p.To.Type != obj.TYPE_REG { 107 continue 108 } 109 } 110 111 r1 = r.Link 112 if r1 == nil { 113 continue 114 } 115 p1 = r1.Prog 116 if p1.As != p.As { 117 continue 118 } 119 if p1.From.Type != obj.TYPE_REG || p1.From.Reg != p.To.Reg { 120 continue 121 } 122 if p1.To.Type != obj.TYPE_REG || p1.To.Reg != p.To.Reg { 123 continue 124 } 125 excise(r1) 126 } 127 128 if gc.Debug['D'] > 1 { 129 goto ret /* allow following code improvement to be suppressed */ 130 } 131 132 // MOVD $c, R'; ADD R', R (R' unused) -> ADD $c, R 133 for r := g.Start; r != nil; r = r.Link { 134 p = r.Prog 135 switch p.As { 136 default: 137 continue 138 139 case arm64.AMOVD: 140 if p.To.Type != obj.TYPE_REG { 141 continue 142 } 143 if p.From.Type != obj.TYPE_CONST { 144 continue 145 } 146 if p.From.Offset < 0 || 4096 <= p.From.Offset { 147 continue 148 } 149 } 150 r1 = r.Link 151 if r1 == nil { 152 continue 153 } 154 p1 = r1.Prog 155 if p1.As != arm64.AADD && p1.As != arm64.ASUB { // TODO(aram): also logical after we have bimm. 156 continue 157 } 158 if p1.From.Type != obj.TYPE_REG || p1.From.Reg != p.To.Reg { 159 continue 160 } 161 if p1.To.Type != obj.TYPE_REG { 162 continue 163 } 164 if gc.Debug['P'] != 0 { 165 fmt.Printf("encoding $%d directly into %v in:\n%v\n%v\n", p.From.Offset, obj.Aconv(p1.As), p, p1) 166 } 167 p1.From.Type = obj.TYPE_CONST 168 p1.From = p.From 169 excise(r) 170 } 171 172 /* TODO(minux): 173 * look for OP x,y,R; CMP R, $0 -> OP.S x,y,R 174 * when OP can set condition codes correctly 175 */ 176 177 ret: 178 gc.Flowend(g) 179 } 180 181 func excise(r *gc.Flow) { 182 p := r.Prog 183 if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 { 184 fmt.Printf("%v ===delete===\n", p) 185 } 186 obj.Nopout(p) 187 gc.Ostats.Ndelmov++ 188 } 189 190 func regtyp(a *obj.Addr) bool { 191 // TODO(rsc): Floating point register exclusions? 192 return a.Type == obj.TYPE_REG && arm64.REG_R0 <= a.Reg && a.Reg <= arm64.REG_F31 && a.Reg != arm64.REGZERO 193 } 194 195 /* 196 * the idea is to substitute 197 * one register for another 198 * from one MOV to another 199 * MOV a, R1 200 * ADD b, R1 / no use of R2 201 * MOV R1, R2 202 * would be converted to 203 * MOV a, R2 204 * ADD b, R2 205 * MOV R2, R1 206 * hopefully, then the former or latter MOV 207 * will be eliminated by copy propagation. 208 * 209 * r0 (the argument, not the register) is the MOV at the end of the 210 * above sequences. This returns 1 if it modified any instructions. 211 */ 212 func subprop(r0 *gc.Flow) bool { 213 p := r0.Prog 214 v1 := &p.From 215 if !regtyp(v1) { 216 return false 217 } 218 v2 := &p.To 219 if !regtyp(v2) { 220 return false 221 } 222 for r := gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) { 223 if gc.Uniqs(r) == nil { 224 break 225 } 226 p = r.Prog 227 if p.As == obj.AVARDEF || p.As == obj.AVARKILL { 228 continue 229 } 230 if p.Info.Flags&gc.Call != 0 { 231 return false 232 } 233 234 if p.Info.Flags&(gc.RightRead|gc.RightWrite) == gc.RightWrite { 235 if p.To.Type == v1.Type { 236 if p.To.Reg == v1.Reg { 237 copysub(&p.To, v1, v2, true) 238 if gc.Debug['P'] != 0 { 239 fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog) 240 if p.From.Type == v2.Type { 241 fmt.Printf(" excise") 242 } 243 fmt.Printf("\n") 244 } 245 246 for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) { 247 p = r.Prog 248 copysub(&p.From, v1, v2, true) 249 copysub1(p, v1, v2, true) 250 copysub(&p.To, v1, v2, true) 251 if gc.Debug['P'] != 0 { 252 fmt.Printf("%v\n", r.Prog) 253 } 254 } 255 256 v1.Reg, v2.Reg = v2.Reg, v1.Reg 257 if gc.Debug['P'] != 0 { 258 fmt.Printf("%v last\n", r.Prog) 259 } 260 return true 261 } 262 } 263 } 264 265 if copyau(&p.From, v2) || copyau1(p, v2) || copyau(&p.To, v2) { 266 break 267 } 268 if copysub(&p.From, v1, v2, false) || copysub1(p, v1, v2, false) || copysub(&p.To, v1, v2, false) { 269 break 270 } 271 } 272 273 return false 274 } 275 276 /* 277 * The idea is to remove redundant copies. 278 * v1->v2 F=0 279 * (use v2 s/v2/v1/)* 280 * set v1 F=1 281 * use v2 return fail (v1->v2 move must remain) 282 * ----------------- 283 * v1->v2 F=0 284 * (use v2 s/v2/v1/)* 285 * set v1 F=1 286 * set v2 return success (caller can remove v1->v2 move) 287 */ 288 func copyprop(r0 *gc.Flow) bool { 289 p := r0.Prog 290 v1 := &p.From 291 v2 := &p.To 292 if copyas(v1, v2) { 293 if gc.Debug['P'] != 0 { 294 fmt.Printf("eliminating self-move: %v\n", r0.Prog) 295 } 296 return true 297 } 298 299 gactive++ 300 if gc.Debug['P'] != 0 { 301 fmt.Printf("trying to eliminate %v->%v move from:\n%v\n", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r0.Prog) 302 } 303 return copy1(v1, v2, r0.S1, false) 304 } 305 306 // copy1 replaces uses of v2 with v1 starting at r and returns 1 if 307 // all uses were rewritten. 308 func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f bool) bool { 309 if uint32(r.Active) == gactive { 310 if gc.Debug['P'] != 0 { 311 fmt.Printf("act set; return 1\n") 312 } 313 return true 314 } 315 316 r.Active = int32(gactive) 317 if gc.Debug['P'] != 0 { 318 fmt.Printf("copy1 replace %v with %v f=%v\n", gc.Ctxt.Dconv(v2), gc.Ctxt.Dconv(v1), f) 319 } 320 for ; r != nil; r = r.S1 { 321 p := r.Prog 322 if gc.Debug['P'] != 0 { 323 fmt.Printf("%v", p) 324 } 325 if !f && gc.Uniqp(r) == nil { 326 // Multiple predecessors; conservatively 327 // assume v1 was set on other path 328 f = true 329 330 if gc.Debug['P'] != 0 { 331 fmt.Printf("; merge; f=%v", f) 332 } 333 } 334 335 switch t := copyu(p, v2, nil); t { 336 case 2: /* rar, can't split */ 337 if gc.Debug['P'] != 0 { 338 fmt.Printf("; %v rar; return 0\n", gc.Ctxt.Dconv(v2)) 339 } 340 return false 341 342 case 3: /* set */ 343 if gc.Debug['P'] != 0 { 344 fmt.Printf("; %v set; return 1\n", gc.Ctxt.Dconv(v2)) 345 } 346 return true 347 348 case 1, /* used, substitute */ 349 4: /* use and set */ 350 if f { 351 if gc.Debug['P'] == 0 { 352 return false 353 } 354 if t == 4 { 355 fmt.Printf("; %v used+set and f=%v; return 0\n", gc.Ctxt.Dconv(v2), f) 356 } else { 357 fmt.Printf("; %v used and f=%v; return 0\n", gc.Ctxt.Dconv(v2), f) 358 } 359 return false 360 } 361 362 if copyu(p, v2, v1) != 0 { 363 if gc.Debug['P'] != 0 { 364 fmt.Printf("; sub fail; return 0\n") 365 } 366 return false 367 } 368 369 if gc.Debug['P'] != 0 { 370 fmt.Printf("; sub %v->%v\n => %v", gc.Ctxt.Dconv(v2), gc.Ctxt.Dconv(v1), p) 371 } 372 if t == 4 { 373 if gc.Debug['P'] != 0 { 374 fmt.Printf("; %v used+set; return 1\n", gc.Ctxt.Dconv(v2)) 375 } 376 return true 377 } 378 } 379 380 if !f { 381 t := copyu(p, v1, nil) 382 if t == 2 || t == 3 || t == 4 { 383 f = true 384 if gc.Debug['P'] != 0 { 385 fmt.Printf("; %v set and !f; f=%v", gc.Ctxt.Dconv(v1), f) 386 } 387 } 388 } 389 390 if gc.Debug['P'] != 0 { 391 fmt.Printf("\n") 392 } 393 if r.S2 != nil { 394 if !copy1(v1, v2, r.S2, f) { 395 return false 396 } 397 } 398 } 399 return true 400 } 401 402 // If s==nil, copyu returns the set/use of v in p; otherwise, it 403 // modifies p to replace reads of v with reads of s and returns 0 for 404 // success or non-zero for failure. 405 // 406 // If s==nil, copy returns one of the following values: 407 // 1 if v only used 408 // 2 if v is set and used in one address (read-alter-rewrite; 409 // can't substitute) 410 // 3 if v is only set 411 // 4 if v is set in one address and used in another (so addresses 412 // can be rewritten independently) 413 // 0 otherwise (not touched) 414 func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int { 415 if p.From3Type() != obj.TYPE_NONE { 416 // 7g never generates a from3 417 fmt.Printf("copyu: from3 (%v) not implemented\n", gc.Ctxt.Dconv(p.From3)) 418 } 419 if p.RegTo2 != obj.REG_NONE { 420 // 7g never generates a to2 421 fmt.Printf("copyu: RegTo2 (%v) not implemented\n", obj.Rconv(int(p.RegTo2))) 422 } 423 424 switch p.As { 425 default: 426 fmt.Printf("copyu: can't find %v\n", obj.Aconv(p.As)) 427 return 2 428 429 case obj.ANOP, /* read p->from, write p->to */ 430 arm64.ANEG, 431 arm64.AFNEGD, 432 arm64.AFNEGS, 433 arm64.AFSQRTD, 434 arm64.AFCVTZSD, 435 arm64.AFCVTZSS, 436 arm64.AFCVTZSDW, 437 arm64.AFCVTZSSW, 438 arm64.AFCVTZUD, 439 arm64.AFCVTZUS, 440 arm64.AFCVTZUDW, 441 arm64.AFCVTZUSW, 442 arm64.AFCVTSD, 443 arm64.AFCVTDS, 444 arm64.ASCVTFD, 445 arm64.ASCVTFS, 446 arm64.ASCVTFWD, 447 arm64.ASCVTFWS, 448 arm64.AUCVTFD, 449 arm64.AUCVTFS, 450 arm64.AUCVTFWD, 451 arm64.AUCVTFWS, 452 arm64.AMOVB, 453 arm64.AMOVBU, 454 arm64.AMOVH, 455 arm64.AMOVHU, 456 arm64.AMOVW, 457 arm64.AMOVWU, 458 arm64.AMOVD, 459 arm64.AFMOVS, 460 arm64.AFMOVD: 461 if p.Scond == 0 { 462 if s != nil { 463 if copysub(&p.From, v, s, true) { 464 return 1 465 } 466 467 // Update only indirect uses of v in p->to 468 if !copyas(&p.To, v) { 469 if copysub(&p.To, v, s, true) { 470 return 1 471 } 472 } 473 return 0 474 } 475 476 if copyas(&p.To, v) { 477 // Fix up implicit from 478 if p.From.Type == obj.TYPE_NONE { 479 p.From = p.To 480 } 481 if copyau(&p.From, v) { 482 return 4 483 } 484 return 3 485 } 486 487 if copyau(&p.From, v) { 488 return 1 489 } 490 if copyau(&p.To, v) { 491 // p->to only indirectly uses v 492 return 1 493 } 494 495 return 0 496 } 497 498 /* rar p->from, write p->to or read p->from, rar p->to */ 499 if p.From.Type == obj.TYPE_MEM { 500 if copyas(&p.From, v) { 501 // No s!=nil check; need to fail 502 // anyway in that case 503 return 2 504 } 505 506 if s != nil { 507 if copysub(&p.To, v, s, true) { 508 return 1 509 } 510 return 0 511 } 512 513 if copyas(&p.To, v) { 514 return 3 515 } 516 } else if p.To.Type == obj.TYPE_MEM { 517 if copyas(&p.To, v) { 518 return 2 519 } 520 if s != nil { 521 if copysub(&p.From, v, s, true) { 522 return 1 523 } 524 return 0 525 } 526 527 if copyau(&p.From, v) { 528 return 1 529 } 530 } else { 531 fmt.Printf("copyu: bad %v\n", p) 532 } 533 534 return 0 535 536 case arm64.AADD, /* read p->from, read p->reg, write p->to */ 537 arm64.ASUB, 538 arm64.AAND, 539 arm64.AORR, 540 arm64.AEOR, 541 arm64.AMUL, 542 arm64.ASMULL, 543 arm64.AUMULL, 544 arm64.ASMULH, 545 arm64.AUMULH, 546 arm64.ASDIV, 547 arm64.AUDIV, 548 arm64.ALSL, 549 arm64.ALSR, 550 arm64.AASR, 551 arm64.AFADDD, 552 arm64.AFADDS, 553 arm64.AFSUBD, 554 arm64.AFSUBS, 555 arm64.AFMULD, 556 arm64.AFMULS, 557 arm64.AFDIVD, 558 arm64.AFDIVS: 559 if s != nil { 560 if copysub(&p.From, v, s, true) { 561 return 1 562 } 563 if copysub1(p, v, s, true) { 564 return 1 565 } 566 567 // Update only indirect uses of v in p->to 568 if !copyas(&p.To, v) { 569 if copysub(&p.To, v, s, true) { 570 return 1 571 } 572 } 573 return 0 574 } 575 576 if copyas(&p.To, v) { 577 if p.Reg == 0 { 578 // Fix up implicit reg (e.g., ADD 579 // R3,R4 -> ADD R3,R4,R4) so we can 580 // update reg and to separately. 581 p.Reg = p.To.Reg 582 } 583 584 if copyau(&p.From, v) { 585 return 4 586 } 587 if copyau1(p, v) { 588 return 4 589 } 590 return 3 591 } 592 593 if copyau(&p.From, v) { 594 return 1 595 } 596 if copyau1(p, v) { 597 return 1 598 } 599 if copyau(&p.To, v) { 600 return 1 601 } 602 return 0 603 604 case arm64.ABEQ, 605 arm64.ABNE, 606 arm64.ABGE, 607 arm64.ABLT, 608 arm64.ABGT, 609 arm64.ABLE, 610 arm64.ABLO, 611 arm64.ABLS, 612 arm64.ABHI, 613 arm64.ABHS: 614 return 0 615 616 case obj.ACHECKNIL, /* read p->from */ 617 arm64.ACMP, /* read p->from, read p->reg */ 618 arm64.AFCMPD, 619 arm64.AFCMPS: 620 if s != nil { 621 if copysub(&p.From, v, s, true) { 622 return 1 623 } 624 if copysub1(p, v, s, true) { 625 return 1 626 } 627 return 0 628 } 629 630 if copyau(&p.From, v) { 631 return 1 632 } 633 if copyau1(p, v) { 634 return 1 635 } 636 return 0 637 638 case arm64.AB: /* read p->to */ 639 if s != nil { 640 if copysub(&p.To, v, s, true) { 641 return 1 642 } 643 return 0 644 } 645 646 if copyau(&p.To, v) { 647 return 1 648 } 649 return 0 650 651 case obj.ARET: /* funny */ 652 if s != nil { 653 return 0 654 } 655 656 // All registers die at this point, so claim 657 // everything is set (and not used). 658 return 3 659 660 case arm64.ABL: /* funny */ 661 if p.From.Type == obj.TYPE_REG && v.Type == obj.TYPE_REG && p.From.Reg == v.Reg { 662 return 2 663 } 664 665 if s != nil { 666 if copysub(&p.To, v, s, true) { 667 return 1 668 } 669 return 0 670 } 671 672 if copyau(&p.To, v) { 673 return 4 674 } 675 return 3 676 677 // R31 is zero, used by DUFFZERO, cannot be substituted. 678 // R16 is ptr to memory, used and set, cannot be substituted. 679 case obj.ADUFFZERO: 680 if v.Type == obj.TYPE_REG { 681 if v.Reg == 31 { 682 return 1 683 } 684 if v.Reg == 16 { 685 return 2 686 } 687 } 688 689 return 0 690 691 // R16, R17 are ptr to src, dst, used and set, cannot be substituted. 692 // R27 is scratch, set by DUFFCOPY, cannot be substituted. 693 case obj.ADUFFCOPY: 694 if v.Type == obj.TYPE_REG { 695 if v.Reg == 16 || v.Reg == 17 { 696 return 2 697 } 698 if v.Reg == 27 { 699 return 3 700 } 701 } 702 703 return 0 704 705 case arm64.AHINT, 706 obj.ATEXT, 707 obj.APCDATA, 708 obj.AFUNCDATA, 709 obj.AVARDEF, 710 obj.AVARKILL, 711 obj.AVARLIVE, 712 obj.AUSEFIELD: 713 return 0 714 } 715 } 716 717 // copyas returns true if a and v address the same register. 718 // 719 // If a is the from operand, this means this operation reads the 720 // register in v. If a is the to operand, this means this operation 721 // writes the register in v. 722 func copyas(a *obj.Addr, v *obj.Addr) bool { 723 return regtyp(v) && a.Type == v.Type && a.Reg == v.Reg 724 } 725 726 // copyau returns true if a either directly or indirectly addresses the 727 // same register as v. 728 // 729 // If a is the from operand, this means this operation reads the 730 // register in v. If a is the to operand, this means the operation 731 // either reads or writes the register in v (if !copyas(a, v), then 732 // the operation reads the register in v). 733 func copyau(a *obj.Addr, v *obj.Addr) bool { 734 if copyas(a, v) { 735 return true 736 } 737 if v.Type == obj.TYPE_REG { 738 if a.Type == obj.TYPE_MEM || (a.Type == obj.TYPE_ADDR && a.Reg != 0) { 739 if v.Reg == a.Reg { 740 return true 741 } 742 } 743 } 744 return false 745 } 746 747 // copyau1 returns true if p->reg references the same register as v and v 748 // is a direct reference. 749 func copyau1(p *obj.Prog, v *obj.Addr) bool { 750 return regtyp(v) && v.Reg != 0 && p.Reg == v.Reg 751 } 752 753 // copysub replaces v with s in a if f==true or indicates it if could if f==false. 754 // Returns true on failure to substitute (it always succeeds on arm64). 755 // TODO(dfc) remove unused return value, remove calls with f=false as they do nothing. 756 func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f bool) bool { 757 if f && copyau(a, v) { 758 a.Reg = s.Reg 759 } 760 return false 761 } 762 763 // copysub1 replaces v with s in p1->reg if f==true or indicates if it could if f==false. 764 // Returns true on failure to substitute (it always succeeds on arm64). 765 // TODO(dfc) remove unused return value, remove calls with f=false as they do nothing. 766 func copysub1(p1 *obj.Prog, v *obj.Addr, s *obj.Addr, f bool) bool { 767 if f && copyau1(p1, v) { 768 p1.Reg = s.Reg 769 } 770 return false 771 } 772 773 func sameaddr(a *obj.Addr, v *obj.Addr) bool { 774 if a.Type != v.Type { 775 return false 776 } 777 if regtyp(v) && a.Reg == v.Reg { 778 return true 779 } 780 if v.Type == obj.NAME_AUTO || v.Type == obj.NAME_PARAM { 781 if v.Offset == a.Offset { 782 return true 783 } 784 } 785 return false 786 } 787 788 func smallindir(a *obj.Addr, reg *obj.Addr) bool { 789 return reg.Type == obj.TYPE_REG && a.Type == obj.TYPE_MEM && a.Reg == reg.Reg && 0 <= a.Offset && a.Offset < 4096 790 } 791 792 func stackaddr(a *obj.Addr) bool { 793 return a.Type == obj.TYPE_REG && a.Reg == arm64.REGSP 794 }