github.com/tetratelabs/wazero@v1.7.3-0.20240513003603-48f702e154b5/internal/engine/wazevo/backend/isa/amd64/operands.go (about) 1 package amd64 2 3 import ( 4 "fmt" 5 "unsafe" 6 7 "github.com/tetratelabs/wazero/internal/engine/wazevo/backend" 8 "github.com/tetratelabs/wazero/internal/engine/wazevo/backend/regalloc" 9 "github.com/tetratelabs/wazero/internal/engine/wazevo/ssa" 10 "github.com/tetratelabs/wazero/internal/engine/wazevo/wazevoapi" 11 ) 12 13 type operand struct { 14 kind operandKind 15 data uint64 16 } 17 18 type operandKind byte 19 20 const ( 21 // operandKindReg is an operand which is an integer Register. 22 operandKindReg operandKind = iota + 1 23 24 // operandKindMem is a value in Memory. 25 // 32, 64, or 128 bit value. 26 operandKindMem 27 28 // operandKindImm32 is a signed-32-bit integer immediate value. 29 operandKindImm32 30 31 // operandKindLabel is a label. 32 operandKindLabel 33 ) 34 35 // String implements fmt.Stringer. 36 func (o operandKind) String() string { 37 switch o { 38 case operandKindReg: 39 return "reg" 40 case operandKindMem: 41 return "mem" 42 case operandKindImm32: 43 return "imm32" 44 case operandKindLabel: 45 return "label" 46 default: 47 panic("BUG: invalid operand kind") 48 } 49 } 50 51 // format returns the string representation of the operand. 52 // _64 is only for the case where the operand is a register, and it's integer. 53 func (o *operand) format(_64 bool) string { 54 switch o.kind { 55 case operandKindReg: 56 return formatVRegSized(o.reg(), _64) 57 case operandKindMem: 58 return o.addressMode().String() 59 case operandKindImm32: 60 return fmt.Sprintf("$%d", int32(o.imm32())) 61 case operandKindLabel: 62 return backend.Label(o.imm32()).String() 63 default: 64 panic(fmt.Sprintf("BUG: invalid operand: %s", o.kind)) 65 } 66 } 67 68 //go:inline 69 func (o *operand) reg() regalloc.VReg { 70 return regalloc.VReg(o.data) 71 } 72 73 //go:inline 74 func (o *operand) setReg(r regalloc.VReg) { 75 o.data = uint64(r) 76 } 77 78 //go:inline 79 func (o *operand) addressMode() *amode { 80 return wazevoapi.PtrFromUintptr[amode](uintptr(o.data)) 81 } 82 83 //go:inline 84 func (o *operand) imm32() uint32 { 85 return uint32(o.data) 86 } 87 88 func (o *operand) label() backend.Label { 89 switch o.kind { 90 case operandKindLabel: 91 return backend.Label(o.data) 92 case operandKindMem: 93 mem := o.addressMode() 94 if mem.kind() != amodeRipRel { 95 panic("BUG: invalid label") 96 } 97 return backend.Label(mem.imm32) 98 default: 99 panic("BUG: invalid operand kind") 100 } 101 } 102 103 func newOperandLabel(label backend.Label) operand { 104 return operand{kind: operandKindLabel, data: uint64(label)} 105 } 106 107 func newOperandReg(r regalloc.VReg) operand { 108 return operand{kind: operandKindReg, data: uint64(r)} 109 } 110 111 func newOperandImm32(imm32 uint32) operand { 112 return operand{kind: operandKindImm32, data: uint64(imm32)} 113 } 114 115 func newOperandMem(amode *amode) operand { 116 return operand{kind: operandKindMem, data: uint64(uintptr(unsafe.Pointer(amode)))} 117 } 118 119 // amode is a memory operand (addressing mode). 120 type amode struct { 121 kindWithShift uint32 122 imm32 uint32 123 base regalloc.VReg 124 125 // For amodeRegRegShift: 126 index regalloc.VReg 127 } 128 129 type amodeKind byte 130 131 const ( 132 // amodeRegRegShift calculates sign-extend-32-to-64(Immediate) + base 133 amodeImmReg amodeKind = iota + 1 134 135 // amodeImmRBP is the same as amodeImmReg, but the base register is fixed to RBP. 136 // The only differece is that it doesn't tell the register allocator to use RBP which is distracting for the 137 // register allocator. 138 amodeImmRBP 139 140 // amodeRegRegShift calculates sign-extend-32-to-64(Immediate) + base + (Register2 << Shift) 141 amodeRegRegShift 142 143 // amodeRipRel is a RIP-relative addressing mode specified by the label. 144 amodeRipRel 145 146 // TODO: there are other addressing modes such as the one without base register. 147 ) 148 149 func (a *amode) kind() amodeKind { 150 return amodeKind(a.kindWithShift & 0xff) 151 } 152 153 func (a *amode) shift() byte { 154 return byte(a.kindWithShift >> 8) 155 } 156 157 func (a *amode) uses(rs *[]regalloc.VReg) { 158 switch a.kind() { 159 case amodeImmReg: 160 *rs = append(*rs, a.base) 161 case amodeRegRegShift: 162 *rs = append(*rs, a.base, a.index) 163 case amodeImmRBP, amodeRipRel: 164 default: 165 panic("BUG: invalid amode kind") 166 } 167 } 168 169 func (a *amode) nregs() int { 170 switch a.kind() { 171 case amodeImmReg: 172 return 1 173 case amodeRegRegShift: 174 return 2 175 case amodeImmRBP, amodeRipRel: 176 return 0 177 default: 178 panic("BUG: invalid amode kind") 179 } 180 } 181 182 func (a *amode) assignUses(i int, reg regalloc.VReg) { 183 switch a.kind() { 184 case amodeImmReg: 185 if i == 0 { 186 a.base = reg 187 } else { 188 panic("BUG: invalid amode assignment") 189 } 190 case amodeRegRegShift: 191 if i == 0 { 192 a.base = reg 193 } else if i == 1 { 194 a.index = reg 195 } else { 196 panic("BUG: invalid amode assignment") 197 } 198 default: 199 panic("BUG: invalid amode assignment") 200 } 201 } 202 203 func (m *machine) newAmodeImmReg(imm32 uint32, base regalloc.VReg) *amode { 204 ret := m.amodePool.Allocate() 205 *ret = amode{kindWithShift: uint32(amodeImmReg), imm32: imm32, base: base} 206 return ret 207 } 208 209 func (m *machine) newAmodeImmRBPReg(imm32 uint32) *amode { 210 ret := m.amodePool.Allocate() 211 *ret = amode{kindWithShift: uint32(amodeImmRBP), imm32: imm32, base: rbpVReg} 212 return ret 213 } 214 215 func (m *machine) newAmodeRegRegShift(imm32 uint32, base, index regalloc.VReg, shift byte) *amode { 216 if shift > 3 { 217 panic(fmt.Sprintf("BUG: invalid shift (must be 3>=): %d", shift)) 218 } 219 ret := m.amodePool.Allocate() 220 *ret = amode{kindWithShift: uint32(amodeRegRegShift) | uint32(shift)<<8, imm32: imm32, base: base, index: index} 221 return ret 222 } 223 224 func (m *machine) newAmodeRipRel(label backend.Label) *amode { 225 ret := m.amodePool.Allocate() 226 *ret = amode{kindWithShift: uint32(amodeRipRel), imm32: uint32(label)} 227 return ret 228 } 229 230 // String implements fmt.Stringer. 231 func (a *amode) String() string { 232 switch a.kind() { 233 case amodeImmReg, amodeImmRBP: 234 if a.imm32 == 0 { 235 return fmt.Sprintf("(%s)", formatVRegSized(a.base, true)) 236 } 237 return fmt.Sprintf("%d(%s)", int32(a.imm32), formatVRegSized(a.base, true)) 238 case amodeRegRegShift: 239 shift := 1 << a.shift() 240 if a.imm32 == 0 { 241 return fmt.Sprintf( 242 "(%s,%s,%d)", 243 formatVRegSized(a.base, true), formatVRegSized(a.index, true), shift) 244 } 245 return fmt.Sprintf( 246 "%d(%s,%s,%d)", 247 int32(a.imm32), formatVRegSized(a.base, true), formatVRegSized(a.index, true), shift) 248 case amodeRipRel: 249 return fmt.Sprintf("%s(%%rip)", backend.Label(a.imm32)) 250 default: 251 panic("BUG: invalid amode kind") 252 } 253 } 254 255 func (m *machine) getOperand_Mem_Reg(def *backend.SSAValueDefinition) (op operand) { 256 if def.IsFromBlockParam() { 257 return newOperandReg(def.BlkParamVReg) 258 } 259 260 if def.SSAValue().Type() == ssa.TypeV128 { 261 // SIMD instructions require strict memory alignment, so we don't support the memory operand for V128 at the moment. 262 return m.getOperand_Reg(def) 263 } 264 265 if m.c.MatchInstr(def, ssa.OpcodeLoad) { 266 instr := def.Instr 267 ptr, offset, _ := instr.LoadData() 268 op = newOperandMem(m.lowerToAddressMode(ptr, offset)) 269 instr.MarkLowered() 270 return op 271 } 272 return m.getOperand_Reg(def) 273 } 274 275 func (m *machine) getOperand_Mem_Imm32_Reg(def *backend.SSAValueDefinition) (op operand) { 276 if def.IsFromBlockParam() { 277 return newOperandReg(def.BlkParamVReg) 278 } 279 280 if m.c.MatchInstr(def, ssa.OpcodeLoad) { 281 instr := def.Instr 282 ptr, offset, _ := instr.LoadData() 283 op = newOperandMem(m.lowerToAddressMode(ptr, offset)) 284 instr.MarkLowered() 285 return op 286 } 287 return m.getOperand_Imm32_Reg(def) 288 } 289 290 func (m *machine) getOperand_Imm32_Reg(def *backend.SSAValueDefinition) (op operand) { 291 if def.IsFromBlockParam() { 292 return newOperandReg(def.BlkParamVReg) 293 } 294 295 instr := def.Instr 296 if instr.Constant() { 297 // If the operation is 64-bit, x64 sign-extends the 32-bit immediate value. 298 // Therefore, we need to check if the immediate value is within the 32-bit range and if the sign bit is set, 299 // we should not use the immediate value. 300 if op, ok := asImm32Operand(instr.ConstantVal(), instr.Return().Type() == ssa.TypeI32); ok { 301 instr.MarkLowered() 302 return op 303 } 304 } 305 return m.getOperand_Reg(def) 306 } 307 308 func asImm32Operand(val uint64, allowSignExt bool) (operand, bool) { 309 if imm32, ok := asImm32(val, allowSignExt); ok { 310 return newOperandImm32(imm32), true 311 } 312 return operand{}, false 313 } 314 315 func asImm32(val uint64, allowSignExt bool) (uint32, bool) { 316 u32val := uint32(val) 317 if uint64(u32val) != val { 318 return 0, false 319 } 320 if !allowSignExt && u32val&0x80000000 != 0 { 321 return 0, false 322 } 323 return u32val, true 324 } 325 326 func (m *machine) getOperand_Reg(def *backend.SSAValueDefinition) (op operand) { 327 var v regalloc.VReg 328 if def.IsFromBlockParam() { 329 v = def.BlkParamVReg 330 } else { 331 instr := def.Instr 332 if instr.Constant() { 333 // We inline all the constant instructions so that we could reduce the register usage. 334 v = m.lowerConstant(instr) 335 instr.MarkLowered() 336 } else { 337 if n := def.N; n == 0 { 338 v = m.c.VRegOf(instr.Return()) 339 } else { 340 _, rs := instr.Returns() 341 v = m.c.VRegOf(rs[n-1]) 342 } 343 } 344 } 345 return newOperandReg(v) 346 }