github.com/cloudwego/frugal@v0.1.15/internal/atm/ssa/ir.go (about) 1 /* 2 * Copyright 2022 ByteDance Inc. 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 package ssa 18 19 import ( 20 `fmt` 21 `sort` 22 `strings` 23 `unsafe` 24 25 `github.com/cloudwego/frugal/internal/atm/abi` 26 `github.com/cloudwego/frugal/internal/atm/hir` 27 `github.com/cloudwego/frugal/internal/rt` 28 ) 29 30 type ( 31 Reg uint64 32 Constness uint8 33 Likeliness uint8 34 ) 35 36 const ( 37 _B_ptr = 63 38 _B_kind = 60 39 _B_name = 52 40 ) 41 42 const ( 43 _M_ptr = 1 44 _M_kind = 0x07 45 _M_name = 0xff 46 ) 47 48 const ( 49 _R_ptr = _M_ptr << _B_ptr 50 _R_kind = _M_kind << _B_kind 51 _R_name = _M_name << _B_name 52 _R_index = (1 << _B_name) - 1 53 ) 54 55 const ( 56 K_sys = 0 57 K_zero = 1 58 K_temp = 2 59 K_arch = 3 60 K_norm = 4 61 ) 62 63 const ( 64 N_size = _M_name + 1 65 ) 66 67 const ( 68 Rz Reg = (0 << _B_ptr) | (K_zero << _B_kind) 69 Pn Reg = (1 << _B_ptr) | (K_zero << _B_kind) 70 ) 71 72 const ( 73 Const Constness = iota 74 Volatile 75 ) 76 77 const ( 78 Likely Likeliness = iota 79 Unlikely 80 ) 81 82 func (self Constness) String() string { 83 switch self { 84 case Const : return "const" 85 case Volatile : return "volatile" 86 default : return "???" 87 } 88 } 89 90 func (self Likeliness) String() string { 91 switch self { 92 case Likely : return "likely" 93 case Unlikely : return "unlikely" 94 default : return "???" 95 } 96 } 97 98 func mksys(ptr uint64, kind uint64) Reg { 99 if kind > N_size { 100 panic(fmt.Sprintf("invalid register kind: %d", kind)) 101 } else { 102 return mkreg(ptr, K_sys, kind) 103 } 104 } 105 106 func mkreg(ptr uint64, kind uint64, name uint64) Reg { 107 return Reg(((ptr & _M_ptr) << _B_ptr) | ((kind & _M_kind) << _B_kind) | ((name & _M_name) << _B_name)) 108 } 109 110 func Tr(i int) Reg { 111 if i < 0 || i > N_size { 112 panic("invalid generic temporary register index") 113 } else { 114 return mkreg(0, K_temp, uint64(i)) 115 } 116 } 117 118 func Pr(i int) Reg { 119 if i < 0 || i > N_size { 120 panic("invalid generic temporary register index") 121 } else { 122 return mkreg(1, K_temp, uint64(i)) 123 } 124 } 125 126 func Rv(reg hir.Register) Reg { 127 switch r := reg.(type) { 128 case hir.GenericRegister : if r == hir.Rz { return Rz } else { return mksys(0, uint64(r)) } 129 case hir.PointerRegister : if r == hir.Pn { return Pn } else { return mksys(1, uint64(r)) } 130 default : panic("unreachable") 131 } 132 } 133 134 func (self Reg) Ptr() bool { 135 return self & _R_ptr != 0 136 } 137 138 func (self Reg) Zero() Reg { 139 return (self & _R_ptr) | (K_zero << _B_kind) 140 } 141 142 func (self Reg) Kind() int { 143 return int((self & _R_kind) >> _B_kind) 144 } 145 146 func (self Reg) Name() int { 147 return int((self & _R_name) >> _B_name) 148 } 149 150 func (self Reg) Index() int { 151 return int(self & _R_index) 152 } 153 154 func (self Reg) String() string { 155 switch self.Kind() { 156 default: { 157 if self.Ptr() { 158 return fmt.Sprintf("p%d.%d", self.Kind(), self.Index()) 159 } else { 160 return fmt.Sprintf("r%d.%d", self.Kind(), self.Index()) 161 } 162 } 163 164 /* physical registers */ 165 case K_arch: { 166 if i := self.Name(); i >= len(ArchRegs) { 167 panic(fmt.Sprintf("invalid physical register index: %d", i)) 168 } else if self.Index() == 0 { 169 return fmt.Sprintf("%%%s", ArchRegNames[ArchRegs[i]]) 170 } else if self.Ptr() { 171 return fmt.Sprintf("p%d:%%%s", self.Index(), ArchRegNames[ArchRegs[i]]) 172 } else { 173 return fmt.Sprintf("r%d:%%%s", self.Index(), ArchRegNames[ArchRegs[i]]) 174 } 175 } 176 177 /* zero registers */ 178 case K_zero: { 179 if self.Ptr() { 180 return "nil" 181 } else { 182 return "zero" 183 } 184 } 185 186 /* temp registers */ 187 case K_temp: { 188 if self.Ptr() { 189 return fmt.Sprintf("tp%d.%d", self.Name(), self.Index()) 190 } else { 191 return fmt.Sprintf("tr%d.%d", self.Name(), self.Index()) 192 } 193 } 194 195 /* SSA normalized registers */ 196 case K_norm: { 197 if self.Ptr() { 198 return fmt.Sprintf("p%d", self.Index()) 199 } else { 200 return fmt.Sprintf("r%d", self.Index()) 201 } 202 } 203 } 204 } 205 206 func (self Reg) Derive(i int) Reg { 207 if self.Kind() == K_zero { 208 return self 209 } else { 210 return (self & (_R_ptr | _R_kind | _R_name)) | Reg(i & _R_index) 211 } 212 } 213 214 func (self Reg) Normalize(i int) Reg { 215 if self.Kind() == K_zero { 216 return self 217 } else { 218 return (self & _R_ptr) | (K_norm << _B_kind) | Reg(i & _R_index) 219 } 220 } 221 222 type IrNode interface { 223 fmt.Stringer 224 Clone() IrNode 225 irnode() 226 } 227 228 type IrImpure interface { 229 IrNode 230 irimpure() 231 } 232 233 type IrImmovable interface { 234 IrNode 235 irimmovable() 236 } 237 238 func (*IrPhi) irnode() {} 239 func (*IrSwitch) irnode() {} 240 func (*IrReturn) irnode() {} 241 func (*IrNop) irnode() {} 242 func (*IrBreakpoint) irnode() {} 243 func (*IrAlias) irnode() {} 244 func (*IrEntry) irnode() {} 245 func (*IrLoad) irnode() {} 246 func (*IrStore) irnode() {} 247 func (*IrLoadArg) irnode() {} 248 func (*IrConstInt) irnode() {} 249 func (*IrConstPtr) irnode() {} 250 func (*IrLEA) irnode() {} 251 func (*IrUnaryExpr) irnode() {} 252 func (*IrBinaryExpr) irnode() {} 253 func (*IrBitTestSet) irnode() {} 254 func (*IrCallFunc) irnode() {} 255 func (*IrCallNative) irnode() {} 256 func (*IrCallMethod) irnode() {} 257 func (*IrClobberList) irnode() {} 258 func (*IrWriteBarrier) irnode() {} 259 func (*IrSpill) irnode() {} 260 func (*IrSlotAlive) irnode() {} 261 262 func (*IrStore) irimpure() {} 263 func (*IrCallFunc) irimpure() {} 264 func (*IrCallNative) irimpure() {} 265 func (*IrCallMethod) irimpure() {} 266 func (*IrClobberList) irimpure() {} 267 func (*IrWriteBarrier) irimpure() {} 268 func (*IrSpill) irimpure() {} 269 270 func (*IrLoad) irimmovable() {} 271 func (*IrStore) irimmovable() {} 272 func (*IrAlias) irimmovable() {} 273 func (*IrEntry) irimmovable() {} 274 func (*IrLoadArg) irimmovable() {} 275 func (*IrClobberList) irimmovable() {} 276 func (*IrWriteBarrier) irimmovable() {} 277 func (*IrSpill) irimmovable() {} 278 func (*IrSlotAlive) irimmovable() {} 279 280 type IrUsages interface { 281 IrNode 282 Usages() []*Reg 283 } 284 285 type IrDefinitions interface { 286 IrNode 287 Definitions() []*Reg 288 } 289 290 type _PhiSorter struct { 291 k []int 292 v []*Reg 293 } 294 295 func (self _PhiSorter) Len() int { 296 return len(self.k) 297 } 298 299 func (self _PhiSorter) Swap(i int, j int) { 300 self.k[i], self.k[j] = self.k[j], self.k[i] 301 self.v[i], self.v[j] = self.v[j], self.v[i] 302 } 303 304 func (self _PhiSorter) Less(i int, j int) bool { 305 return self.k[i] < self.k[j] 306 } 307 308 type IrPhi struct { 309 R Reg 310 V map[*BasicBlock]*Reg 311 } 312 313 func (self *IrPhi) Clone() IrNode { 314 ret := new(IrPhi) 315 ret.V = make(map[*BasicBlock]*Reg, len(self.V)) 316 317 /* clone the Phi mappings */ 318 for b, r := range self.V { 319 p := *r 320 ret.V[b] = &p 321 } 322 323 /* set the dest register */ 324 ret.R = self.R 325 return ret 326 } 327 328 func (self *IrPhi) String() string { 329 nb := len(self.V) 330 ret := make([]string, 0, nb) 331 phi := make([]struct { int; Reg }, 0, nb) 332 333 /* add each path */ 334 for bb, reg := range self.V { 335 phi = append(phi, struct { int; Reg }{ bb.Id, *reg }) 336 } 337 338 /* sort by basic block ID */ 339 sort.Slice(phi, func(i int, j int) bool { 340 return phi[i].int < phi[j].int 341 }) 342 343 /* dump as string */ 344 for _, p := range phi { 345 ret = append(ret, fmt.Sprintf("bb_%d: %s", p.int, p.Reg)) 346 } 347 348 /* join them together */ 349 return fmt.Sprintf( 350 "%s = φ(%s)", 351 self.R, 352 strings.Join(ret, ", "), 353 ) 354 } 355 356 func (self *IrPhi) Usages() []*Reg { 357 k := make([]int, 0, len(self.V)) 358 v := make([]*Reg, 0, len(self.V)) 359 360 /* dump the registers */ 361 for b, r := range self.V { 362 v = append(v, r) 363 k = append(k, b.Id) 364 } 365 366 /* sort by basic block ID */ 367 sort.Sort(_PhiSorter { k, v }) 368 return v 369 } 370 371 func (self *IrPhi) Definitions() []*Reg { 372 return []*Reg { &self.R } 373 } 374 375 type IrBranch struct { 376 To *BasicBlock 377 Likeliness Likeliness 378 } 379 380 func IrLikely(bb *BasicBlock) *IrBranch { 381 return &IrBranch { 382 To : bb, 383 Likeliness : Likely, 384 } 385 } 386 387 func IrUnlikely(bb *BasicBlock) *IrBranch { 388 return &IrBranch { 389 To : bb, 390 Likeliness : Unlikely, 391 } 392 } 393 394 func (self *IrBranch) Clone() *IrBranch { 395 return &IrBranch { 396 To : self.To, 397 Likeliness : self.Likeliness, 398 } 399 } 400 401 func (self *IrBranch) String() string { 402 return fmt.Sprintf("bb_%d (%s)", self.To.Id, self.Likeliness) 403 } 404 405 type IrSuccessors interface { 406 Next() bool 407 Block() *BasicBlock 408 Value() (int32, bool) 409 Likeliness() Likeliness 410 UpdateBlock(bb *BasicBlock) 411 } 412 413 type IrTerminator interface { 414 IrNode 415 Successors() IrSuccessors 416 irterminator() 417 } 418 419 func (*IrSwitch) irterminator() {} 420 func (*IrReturn) irterminator() {} 421 422 type _SwitchTarget struct { 423 i int32 424 b *IrBranch 425 } 426 427 type _SwitchSuccessors struct { 428 i int 429 t []_SwitchTarget 430 } 431 432 func (self *_SwitchSuccessors) Next() bool { 433 self.i++ 434 return self.i < len(self.t) 435 } 436 437 func (self *_SwitchSuccessors) Block() *BasicBlock { 438 if self.i >= len(self.t) { 439 return nil 440 } else { 441 return self.t[self.i].b.To 442 } 443 } 444 445 func (self *_SwitchSuccessors) Value() (int32, bool) { 446 if self.i >= len(self.t) - 1 { 447 return 0, false 448 } else { 449 return self.t[self.i].i, true 450 } 451 } 452 453 func (self *_SwitchSuccessors) Likeliness() Likeliness { 454 if self.i >= len(self.t) { 455 return Unlikely 456 } else { 457 return self.t[self.i].b.Likeliness 458 } 459 } 460 461 func (self *_SwitchSuccessors) UpdateBlock(to *BasicBlock) { 462 if self.i >= len(self.t) { 463 panic("end of iterator") 464 } else { 465 self.t[self.i].b.To = to 466 } 467 } 468 469 type IrSwitch struct { 470 V Reg 471 Ln *IrBranch 472 Br map[int32]*IrBranch 473 } 474 475 func (self *IrSwitch) iter() *_SwitchSuccessors { 476 n := len(self.Br) 477 t := make([]_SwitchTarget, 0, n + 1) 478 479 /* add the key and values */ 480 for i, b := range self.Br { 481 t = append(t, _SwitchTarget { 482 i: i, 483 b: b, 484 }) 485 } 486 487 /* add the default branch */ 488 t = append(t, _SwitchTarget { 489 i: 0, 490 b: self.Ln, 491 }) 492 493 /* sort by switch value */ 494 sort.Slice(t[:n], func(i int, j int) bool { 495 return t[i].i < t[j].i 496 }) 497 498 /* construct the iterator */ 499 return &_SwitchSuccessors { 500 t: t, 501 i: -1, 502 } 503 } 504 505 func (self *IrSwitch) Clone() IrNode { 506 ret := new(IrSwitch) 507 ret.Br = make(map[int32]*IrBranch, len(ret.Br)) 508 509 /* clone the switch branches */ 510 for v, b := range self.Br { 511 ret.Br[v] = b.Clone() 512 } 513 514 /* set the switch register and default branch */ 515 ret.V = self.V 516 ret.Ln = self.Ln.Clone() 517 return ret 518 } 519 520 func (self *IrSwitch) String() string { 521 n := len(self.Br) 522 r := make([]string, 0, n) 523 524 /* no branches */ 525 if n == 0 { 526 return "goto " + self.Ln.String() 527 } 528 529 /* add each case */ 530 for _, v := range self.iter().t[:n] { 531 r = append(r, fmt.Sprintf(" %d => %s,", v.i, v.b)) 532 } 533 534 /* default branch */ 535 r = append(r, fmt.Sprintf( 536 " _ => %s,", 537 self.Ln, 538 )) 539 540 /* join them together */ 541 return fmt.Sprintf( 542 "switch %s {\n%s\n}", 543 self.V, 544 strings.Join(r, "\n"), 545 ) 546 } 547 548 func (self *IrSwitch) Usages() []*Reg { 549 if len(self.Br) == 0 { 550 return nil 551 } else { 552 return []*Reg { &self.V } 553 } 554 } 555 556 func (self *IrSwitch) Successors() IrSuccessors { 557 return self.iter() 558 } 559 560 type _EmptySuccessor struct{} 561 func (_EmptySuccessor) Next() bool { return false } 562 func (_EmptySuccessor) Block() *BasicBlock { return nil } 563 func (_EmptySuccessor) Value() (int32, bool) { return 0, false } 564 func (_EmptySuccessor) Likeliness() Likeliness { return Unlikely } 565 func (_EmptySuccessor) UpdateBlock(_ *BasicBlock) { panic("empty iterator") } 566 567 type IrReturn struct { 568 R []Reg 569 } 570 571 func (self *IrReturn) Clone() IrNode { 572 r := new(IrReturn) 573 r.R = make([]Reg, len(self.R)) 574 copy(r.R, self.R) 575 return r 576 } 577 578 func (self *IrReturn) String() string { 579 nb := len(self.R) 580 ret := make([]string, 0, nb) 581 582 /* dump registers */ 583 for _, r := range self.R { 584 ret = append(ret, r.String()) 585 } 586 587 /* join them together */ 588 return fmt.Sprintf( 589 "ret {%s}", 590 strings.Join(ret, ", "), 591 ) 592 } 593 594 func (self *IrReturn) Usages() []*Reg { 595 return regsliceref(self.R) 596 } 597 598 func (self *IrReturn) Successors() IrSuccessors { 599 return _EmptySuccessor{} 600 } 601 602 type ( 603 IrNop struct{} 604 IrBreakpoint struct{} 605 ) 606 607 func (*IrNop) Clone() IrNode { return new(IrNop) } 608 func (*IrBreakpoint) Clone() IrNode { return new(IrBreakpoint) } 609 610 func (*IrNop) String() string { return "nop" } 611 func (*IrBreakpoint) String() string { return "breakpoint" } 612 613 type IrAlias struct { 614 R Reg 615 V Reg 616 } 617 618 func (self *IrAlias) Clone() IrNode { 619 panic(`alias node "` + self.String() + `" is not cloneable`) 620 } 621 622 func (self *IrAlias) String() string { 623 return fmt.Sprintf("alias %s = %s", self.R, self.V) 624 } 625 626 func (self *IrAlias) Usages() []*Reg { 627 return []*Reg { &self.V } 628 } 629 630 func (self *IrAlias) Definitions() []*Reg { 631 return []*Reg { &self.R } 632 } 633 634 type IrEntry struct { 635 R []Reg 636 } 637 638 func (self *IrEntry) Clone() IrNode { 639 panic(`entry node "` + self.String() + `" is not cloneable`) 640 } 641 642 func (self *IrEntry) String() string { 643 return "entry_point " + regslicerepr(self.R) 644 } 645 646 func (self *IrEntry) Definitions() []*Reg { 647 return regsliceref(self.R) 648 } 649 650 type IrLoad struct { 651 R Reg 652 Mem Reg 653 Size uint8 654 } 655 656 func (self *IrLoad) Clone() IrNode { 657 r := *self 658 return &r 659 } 660 661 func (self *IrLoad) String() string { 662 if self.R.Ptr() { 663 return fmt.Sprintf("%s = load.ptr %s", self.R, self.Mem) 664 } else { 665 return fmt.Sprintf("%s = load.u%d %s", self.R, self.Size * 8, self.Mem) 666 } 667 } 668 669 func (self *IrLoad) Usages() []*Reg { 670 return []*Reg { &self.Mem } 671 } 672 673 func (self *IrLoad) Definitions() []*Reg { 674 return []*Reg { &self.R } 675 } 676 677 type IrStore struct { 678 R Reg 679 Mem Reg 680 Size uint8 681 } 682 683 func (self *IrStore) Clone() IrNode { 684 r := *self 685 return &r 686 } 687 688 func (self *IrStore) String() string { 689 return fmt.Sprintf("store.u%d %s -> *%s", self.Size * 8, self.R, self.Mem) 690 } 691 692 func (self *IrStore) Usages() []*Reg { 693 return []*Reg { &self.R, &self.Mem } 694 } 695 696 type IrLoadArg struct { 697 R Reg 698 I int 699 } 700 701 func (self *IrLoadArg) Clone() IrNode { 702 r := *self 703 return &r 704 } 705 706 func (self *IrLoadArg) String() string { 707 if self.R.Ptr() { 708 return fmt.Sprintf("%s = loadarg.ptr #%d", self.R, self.I) 709 } else { 710 return fmt.Sprintf("%s = loadarg.i64 #%d", self.R, self.I) 711 } 712 } 713 714 func (self *IrLoadArg) Definitions() []*Reg { 715 return []*Reg { &self.R } 716 } 717 718 type IrConstInt struct { 719 R Reg 720 V int64 721 } 722 723 func (self *IrConstInt) Clone() IrNode { 724 r := *self 725 return &r 726 } 727 728 func (self *IrConstInt) String() string { 729 return fmt.Sprintf("%s = const.i64 %d (%#x)", self.R, self.V, self.V) 730 } 731 732 func (self *IrConstInt) Definitions() []*Reg { 733 return []*Reg { &self.R } 734 } 735 736 type IrConstPtr struct { 737 R Reg 738 P unsafe.Pointer 739 M Constness 740 } 741 742 func (self *IrConstPtr) Clone() IrNode { 743 r := *self 744 return &r 745 } 746 747 func (self *IrConstPtr) String() string { 748 return fmt.Sprintf("%s = const.ptr (%s)%p [%s]", self.R, self.M, self.P, rt.FuncName(self.P)) 749 } 750 751 func (self *IrConstPtr) Definitions() []*Reg { 752 return []*Reg { &self.R } 753 } 754 755 type IrLEA struct { 756 R Reg 757 Mem Reg 758 Off Reg 759 } 760 761 func (self *IrLEA) Clone() IrNode { 762 r := *self 763 return &r 764 } 765 766 func (self *IrLEA) String() string { 767 return fmt.Sprintf("%s = &(%s)[%s]", self.R, self.Mem, self.Off) 768 } 769 770 func (self *IrLEA) Usages() []*Reg { 771 return []*Reg { &self.Mem, &self.Off } 772 } 773 774 func (self *IrLEA) Definitions() []*Reg { 775 return []*Reg { &self.R } 776 } 777 778 type ( 779 IrUnaryOp uint8 780 IrBinaryOp uint8 781 ) 782 783 const ( 784 IrOpNegate IrUnaryOp = iota 785 IrOpSwap16 786 IrOpSwap32 787 IrOpSwap64 788 IrOpSx32to64 789 ) 790 791 const ( 792 IrOpAdd IrBinaryOp = iota 793 IrOpSub 794 IrOpMul 795 IrOpAnd 796 IrOpOr 797 IrOpXor 798 IrOpShr 799 IrCmpEq 800 IrCmpNe 801 IrCmpLt 802 IrCmpLtu 803 IrCmpGeu 804 ) 805 806 func (self IrUnaryOp) String() string { 807 switch self { 808 case IrOpNegate : return "negate" 809 case IrOpSwap16 : return "bswap16" 810 case IrOpSwap32 : return "bswap32" 811 case IrOpSwap64 : return "bswap64" 812 case IrOpSx32to64 : return "sign_extend_32_to_64" 813 default : panic("unreachable") 814 } 815 } 816 817 func (self IrBinaryOp) String() string { 818 switch self { 819 case IrOpAdd : return "+" 820 case IrOpSub : return "-" 821 case IrOpMul : return "*" 822 case IrOpAnd : return "&" 823 case IrOpOr : return "|" 824 case IrOpXor : return "^" 825 case IrOpShr : return ">>" 826 case IrCmpEq : return "==" 827 case IrCmpNe : return "!=" 828 case IrCmpLt : return "<" 829 case IrCmpLtu : return "<#" 830 case IrCmpGeu : return ">=#" 831 default : panic("unreachable") 832 } 833 } 834 835 type IrUnaryExpr struct { 836 R Reg 837 V Reg 838 Op IrUnaryOp 839 } 840 841 func (self *IrUnaryExpr) Clone() IrNode { 842 r := *self 843 return &r 844 } 845 846 func (self *IrUnaryExpr) String() string { 847 return fmt.Sprintf("%s = %s %s", self.R, self.Op, self.V) 848 } 849 850 func (self *IrUnaryExpr) Usages() []*Reg { 851 return []*Reg { &self.V } 852 } 853 854 func (self *IrUnaryExpr) Definitions() []*Reg { 855 return []*Reg { &self.R } 856 } 857 858 type IrBinaryExpr struct { 859 R Reg 860 X Reg 861 Y Reg 862 Op IrBinaryOp 863 } 864 865 func IrCopy(r Reg, v Reg) IrNode { 866 switch { 867 case r.Ptr() && v.Ptr() : return &IrLEA { R: r, Mem: v, Off: Rz } 868 case !r.Ptr() && !v.Ptr() : return &IrBinaryExpr { R: r, X: v, Y: Rz, Op: IrOpAdd } 869 default : panic("copy between different kind of registers") 870 } 871 } 872 873 func IrTryIntoCopy(v IrNode) (Reg, Reg, bool) { 874 if p, ok := v.(*IrAlias); ok { 875 return p.R, p.V, true 876 } else if p, ok := v.(*IrLEA); ok && p.Off == Rz { 877 return p.R, p.Mem, true 878 } else if p, ok := v.(*IrBinaryExpr); ok && p.Y == Rz && p.Op == IrOpAdd { 879 return p.R, p.X, true 880 } else { 881 return 0, 0, false 882 } 883 } 884 885 func (self *IrBinaryExpr) Clone() IrNode { 886 r := *self 887 return &r 888 } 889 890 func (self *IrBinaryExpr) String() string { 891 return fmt.Sprintf("%s = %s %s %s", self.R, self.X, self.Op, self.Y) 892 } 893 894 func (self *IrBinaryExpr) Usages() []*Reg { 895 return []*Reg { &self.X, &self.Y } 896 } 897 898 func (self *IrBinaryExpr) Definitions() []*Reg { 899 return []*Reg { &self.R } 900 } 901 902 type IrBitTestSet struct { 903 T Reg 904 S Reg 905 X Reg 906 Y Reg 907 } 908 909 func (self *IrBitTestSet) Clone() IrNode { 910 r := *self 911 return &r 912 } 913 914 func (self *IrBitTestSet) String() string { 915 return fmt.Sprintf("t.%s, s.%s = bts %s, %s", self.T, self.S, self.X, self.Y) 916 } 917 918 func (self *IrBitTestSet) Usages() []*Reg { 919 return []*Reg { &self.X, &self.Y } 920 } 921 922 func (self *IrBitTestSet) Definitions() []*Reg { 923 return []*Reg { &self.T, &self.S } 924 } 925 926 type IrCallFunc struct { 927 R Reg 928 In []Reg 929 Out []Reg 930 Func *abi.FunctionLayout 931 } 932 933 func (self *IrCallFunc) Clone() IrNode { 934 r := new(IrCallFunc) 935 r.R = self.R 936 r.In = make([]Reg, len(self.In)) 937 r.Out = make([]Reg, len(self.Out)) 938 r.Func = self.Func 939 copy(r.In, self.In) 940 copy(r.Out, self.Out) 941 return r 942 } 943 944 func (self *IrCallFunc) String() string { 945 if in := regslicerepr(self.In); len(self.Out) == 0 { 946 return fmt.Sprintf("gcall *%s, {%s}", self.R, in) 947 } else { 948 return fmt.Sprintf("%s = gcall *%s, {%s}", regslicerepr(self.Out), self.R, in) 949 } 950 } 951 952 func (self *IrCallFunc) Usages() []*Reg { 953 return append(regsliceref(self.In), &self.R) 954 } 955 956 func (self *IrCallFunc) Definitions() []*Reg { 957 return regsliceref(self.Out) 958 } 959 960 type IrCallNative struct { 961 R Reg 962 In []Reg 963 Out Reg 964 } 965 966 func (self *IrCallNative) Clone() IrNode { 967 r := new(IrCallNative) 968 r.R = self.R 969 r.In = make([]Reg, len(self.In)) 970 r.Out = self.Out 971 copy(r.In, self.In) 972 return r 973 } 974 975 func (self *IrCallNative) String() string { 976 if in := regslicerepr(self.In); self.Out.Kind() == K_zero { 977 return fmt.Sprintf("ccall *%s, {%s}", self.R, in) 978 } else { 979 return fmt.Sprintf("%s = ccall *%s, {%s}", self.Out, self.R, in) 980 } 981 } 982 983 func (self *IrCallNative) Usages() []*Reg { 984 return append(regsliceref(self.In), &self.R) 985 } 986 987 func (self *IrCallNative) Definitions() []*Reg { 988 if self.Out.Kind() == K_zero { 989 return nil 990 } else { 991 return []*Reg { &self.Out } 992 } 993 } 994 995 type IrCallMethod struct { 996 T Reg 997 V Reg 998 In []Reg 999 Out []Reg 1000 Slot int 1001 Func *abi.FunctionLayout 1002 } 1003 1004 func (self *IrCallMethod) Clone() IrNode { 1005 r := new(IrCallMethod) 1006 r.T = self.T 1007 r.V = self.V 1008 r.In = make([]Reg, len(self.In)) 1009 r.Out = make([]Reg, len(self.Out)) 1010 r.Slot = self.Slot 1011 r.Func = self.Func 1012 copy(r.In, self.In) 1013 copy(r.Out, self.Out) 1014 return r 1015 } 1016 1017 func (self *IrCallMethod) String() string { 1018 if in := regslicerepr(self.In); len(self.Out) == 0 { 1019 return fmt.Sprintf("icall #%d, (%s:%s), {%s}", self.Slot, self.T, self.V, in) 1020 } else { 1021 return fmt.Sprintf("%s = icall #%d, (%s:%s), {%s}", regslicerepr(self.Out), self.Slot, self.T, self.V, in) 1022 } 1023 } 1024 1025 func (self *IrCallMethod) Usages() []*Reg { 1026 return append(regsliceref(self.In), &self.T, &self.V) 1027 } 1028 1029 func (self *IrCallMethod) Definitions() []*Reg { 1030 return regsliceref(self.Out) 1031 } 1032 1033 type IrClobberList struct { 1034 R []Reg 1035 } 1036 1037 func IrMarkClobber(r ...Reg) *IrClobberList { 1038 return &IrClobberList { 1039 R: regsliceclone(r), 1040 } 1041 } 1042 1043 func (self *IrClobberList) Clone() IrNode { 1044 r := new(IrClobberList) 1045 r.R = make([]Reg, len(self.R)) 1046 copy(r.R, self.R) 1047 return r 1048 } 1049 1050 func (self *IrClobberList) String() string { 1051 return "drop " + regslicerepr(self.R) 1052 } 1053 1054 func (self *IrClobberList) Usages() []*Reg { 1055 return regsliceref(self.R) 1056 } 1057 1058 type IrWriteBarrier struct { 1059 R Reg 1060 M Reg 1061 Fn Reg 1062 Var Reg 1063 } 1064 1065 func (self *IrWriteBarrier) Clone() IrNode { 1066 r := *self 1067 return &r 1068 } 1069 1070 func (self *IrWriteBarrier) String() string { 1071 return fmt.Sprintf("write_barrier (%s:%s), %s -> *%s", self.Var, self.Fn, self.R, self.M) 1072 } 1073 1074 func (self *IrWriteBarrier) Usages() []*Reg { 1075 return []*Reg { &self.R, &self.M, &self.Var, &self.Fn } 1076 } 1077 1078 type ( 1079 IrSpillOp uint8 1080 IrSpillSlot uint64 1081 ) 1082 1083 const ( 1084 IrSpillStore IrSpillOp = iota 1085 IrSpillReload 1086 ) 1087 1088 func mkspillslot(id int, ptr bool) IrSpillSlot { 1089 return IrSpillSlot((id << 1) | bool2int(ptr)) 1090 } 1091 1092 func (self IrSpillOp) String() string { 1093 switch self { 1094 case IrSpillStore : return "store" 1095 case IrSpillReload : return "reload" 1096 default : panic("invalid spill op") 1097 } 1098 } 1099 1100 func (self IrSpillSlot) ID() int { 1101 return int(self >> 1) 1102 } 1103 1104 func (self IrSpillSlot) IsPtr() bool { 1105 return int2bool(int(self & 1)) 1106 } 1107 1108 func (self IrSpillSlot) String() string { 1109 if self.IsPtr() { 1110 return fmt.Sprintf("{slot %d.p}", self.ID()) 1111 } else { 1112 return fmt.Sprintf("{slot %d.i}", self.ID()) 1113 } 1114 } 1115 1116 type IrSpill struct { 1117 R Reg 1118 S IrSpillSlot 1119 Op IrSpillOp 1120 } 1121 1122 func IrCreateSpill(reg Reg, id int, op IrSpillOp) IrNode { 1123 return IrCreateSpillEx(reg, reg.Ptr(), id, op) 1124 } 1125 1126 func IrCreateSpillEx(reg Reg, ptr bool, id int, op IrSpillOp) IrNode { 1127 return &IrSpill { 1128 R : reg, 1129 S : mkspillslot(id, ptr), 1130 Op : op, 1131 } 1132 } 1133 1134 func (self *IrSpill) Clone() IrNode { 1135 r := *self 1136 return &r 1137 } 1138 1139 func (self *IrSpill) String() string { 1140 switch self.Op { 1141 case IrSpillStore : return fmt.Sprintf("spill %s -> %s", self.R, self.S) 1142 case IrSpillReload : return fmt.Sprintf("%s = reload %s", self.R, self.S) 1143 default : panic("invalid spill op") 1144 } 1145 } 1146 1147 func (self *IrSpill) Usages() []*Reg { 1148 switch self.Op { 1149 case IrSpillStore : return []*Reg { &self.R } 1150 case IrSpillReload : return nil 1151 default : panic("invalid spill op") 1152 } 1153 } 1154 1155 func (self *IrSpill) Definitions() []*Reg { 1156 switch self.Op { 1157 case IrSpillStore : return nil 1158 case IrSpillReload : return []*Reg { &self.R } 1159 default : panic("invalid spill op") 1160 } 1161 } 1162 1163 type IrSlotAlive struct { 1164 S []IrSpillSlot 1165 } 1166 1167 func IrSlotGen(s IrSpillSlot) IrNode { 1168 return &IrSlotAlive { 1169 S: []IrSpillSlot { s }, 1170 } 1171 } 1172 1173 func (self *IrSlotAlive) Clone() IrNode { 1174 r := new(IrSlotAlive) 1175 r.S = make([]IrSpillSlot, len(self.S)) 1176 copy(r.S, self.S) 1177 return r 1178 } 1179 1180 func (self *IrSlotAlive) String() string { 1181 nb := len(self.S) 1182 sv := make([]string, 0, nb) 1183 ss := make([]IrSpillSlot, nb) 1184 1185 /* sort the slots */ 1186 copy(ss, self.S) 1187 sort.Ints(*(*[]int)(unsafe.Pointer(&ss))) 1188 1189 /* dump the slots */ 1190 for _, v := range ss { 1191 sv = append(sv, v.String()) 1192 } 1193 1194 /* join them together */ 1195 return fmt.Sprintf( 1196 "mark_alive %s", 1197 strings.Join(sv, ", "), 1198 ) 1199 }