github.com/cloudwego/frugal@v0.1.15/internal/atm/ssa/pass_operandalloc_amd64.go (about) 1 /* 2 * Copyright 2022 ByteDance Inc. 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 package ssa 18 19 // OperandAlloc for AMD64 converts 3-operand or 2-operand pseudo-instructions 20 // to 2-operand or one-operand real instructions. 21 type OperandAlloc struct{} 22 23 func (OperandAlloc) Apply(cfg *CFG) { 24 cfg.PostOrder().ForEach(func(bb *BasicBlock) { 25 ins := bb.Ins 26 bb.Ins = make([]IrNode, 0, len(ins)) 27 28 /* check for every instruction */ 29 for _, v := range ins { 30 switch p := v.(type) { 31 default: { 32 bb.Ins = append(bb.Ins, v) 33 } 34 35 /* negation */ 36 case *IrAMD64_NEG: { 37 if p.R == p.V { 38 bb.Ins = append(bb.Ins, v) 39 } else { 40 bb.Ins, p.V = append(bb.Ins, IrArchCopy(p.R, p.V), v), p.R 41 } 42 } 43 44 /* byte swap */ 45 case *IrAMD64_BSWAP: { 46 if p.R == p.V { 47 bb.Ins = append(bb.Ins, v) 48 } else { 49 bb.Ins, p.V = append(bb.Ins, IrArchCopy(p.R, p.V), v), p.R 50 } 51 } 52 53 /* binary operations, register to register */ 54 case *IrAMD64_BinOp_rr: { 55 if p.R == p.X { 56 bb.Ins = append(bb.Ins, v) 57 } else { 58 bb.Ins, p.X = append(bb.Ins, IrArchCopy(p.R, p.X), v), p.R 59 } 60 } 61 62 /* binary operations, register to immediate */ 63 case *IrAMD64_BinOp_ri: { 64 if p.R == p.X || p.Op == IrAMD64_BinMul { 65 bb.Ins = append(bb.Ins, v) 66 } else { 67 bb.Ins, p.X = append(bb.Ins, IrArchCopy(p.R, p.X), v), p.R 68 } 69 } 70 71 /* binary operations, register to memory */ 72 case *IrAMD64_BinOp_rm: { 73 if p.R == p.X { 74 bb.Ins = append(bb.Ins, v) 75 } else { 76 bb.Ins, p.X = append(bb.Ins, IrArchCopy(p.R, p.X), v), p.R 77 } 78 } 79 80 /* bit test and set, register to register */ 81 case *IrAMD64_BTSQ_rr: { 82 if p.S == p.X { 83 bb.Ins = append(bb.Ins, v) 84 } else { 85 bb.Ins, p.X = append(bb.Ins, IrArchCopy(p.S, p.X), v), p.S 86 } 87 } 88 89 /* bit test and set, register to immediate */ 90 case *IrAMD64_BTSQ_ri: { 91 if p.S == p.X { 92 bb.Ins = append(bb.Ins, v) 93 } else { 94 bb.Ins, p.X = append(bb.Ins, IrArchCopy(p.S, p.X), v), p.S 95 } 96 } 97 } 98 } 99 }) 100 } 101