github.com/bir3/gocompiler@v0.3.205/src/cmd/compile/internal/ssa/_gen/RISCV64.rules (about) 1 // Copyright 2016 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Lowering arithmetic 6 (Add64 ...) => (ADD ...) 7 (AddPtr ...) => (ADD ...) 8 (Add32 ...) => (ADD ...) 9 (Add16 ...) => (ADD ...) 10 (Add8 ...) => (ADD ...) 11 (Add32F ...) => (FADDS ...) 12 (Add64F ...) => (FADDD ...) 13 14 (Sub64 ...) => (SUB ...) 15 (SubPtr ...) => (SUB ...) 16 (Sub32 ...) => (SUB ...) 17 (Sub16 ...) => (SUB ...) 18 (Sub8 ...) => (SUB ...) 19 (Sub32F ...) => (FSUBS ...) 20 (Sub64F ...) => (FSUBD ...) 21 22 (Mul64 ...) => (MUL ...) 23 (Mul64uhilo ...) => (LoweredMuluhilo ...) 24 (Mul64uover ...) => (LoweredMuluover ...) 25 (Mul32 ...) => (MULW ...) 26 (Mul16 x y) => (MULW (SignExt16to32 x) (SignExt16to32 y)) 27 (Mul8 x y) => (MULW (SignExt8to32 x) (SignExt8to32 y)) 28 (Mul32F ...) => (FMULS ...) 29 (Mul64F ...) => (FMULD ...) 30 31 (Div32F ...) => (FDIVS ...) 32 (Div64F ...) => (FDIVD ...) 33 34 (Div64 x y [false]) => (DIV x y) 35 (Div64u ...) => (DIVU ...) 36 (Div32 x y [false]) => (DIVW x y) 37 (Div32u ...) => (DIVUW ...) 38 (Div16 x y [false]) => (DIVW (SignExt16to32 x) (SignExt16to32 y)) 39 (Div16u x y) => (DIVUW (ZeroExt16to32 x) (ZeroExt16to32 y)) 40 (Div8 x y) => (DIVW (SignExt8to32 x) (SignExt8to32 y)) 41 (Div8u x y) => (DIVUW (ZeroExt8to32 x) (ZeroExt8to32 y)) 42 43 (Hmul64 ...) => (MULH ...) 44 (Hmul64u ...) => (MULHU ...) 45 (Hmul32 x y) => (SRAI [32] (MUL (SignExt32to64 x) (SignExt32to64 y))) 46 (Hmul32u x y) => (SRLI [32] (MUL (ZeroExt32to64 x) (ZeroExt32to64 y))) 47 48 (Select0 (Add64carry x y c)) => (ADD (ADD <typ.UInt64> x y) c) 49 (Select1 (Add64carry x y c)) => 50 (OR (SLTU <typ.UInt64> s:(ADD <typ.UInt64> x y) x) (SLTU <typ.UInt64> (ADD <typ.UInt64> s c) s)) 51 52 (Select0 (Sub64borrow x y c)) => (SUB (SUB <typ.UInt64> x y) c) 53 (Select1 (Sub64borrow x y c)) => 54 (OR (SLTU <typ.UInt64> x s:(SUB <typ.UInt64> x y)) (SLTU <typ.UInt64> s (SUB <typ.UInt64> s c))) 55 56 // (x + y) / 2 => (x / 2) + (y / 2) + (x & y & 1) 57 (Avg64u <t> x y) => (ADD (ADD <t> (SRLI <t> [1] x) (SRLI <t> [1] y)) (ANDI <t> [1] (AND <t> x y))) 58 59 (Mod64 x y [false]) => (REM x y) 60 (Mod64u ...) => (REMU ...) 61 (Mod32 x y [false]) => (REMW x y) 62 (Mod32u ...) => (REMUW ...) 63 (Mod16 x y [false]) => (REMW (SignExt16to32 x) (SignExt16to32 y)) 64 (Mod16u x y) => (REMUW (ZeroExt16to32 x) (ZeroExt16to32 y)) 65 (Mod8 x y) => (REMW (SignExt8to32 x) (SignExt8to32 y)) 66 (Mod8u x y) => (REMUW (ZeroExt8to32 x) (ZeroExt8to32 y)) 67 68 (And64 ...) => (AND ...) 69 (And32 ...) => (AND ...) 70 (And16 ...) => (AND ...) 71 (And8 ...) => (AND ...) 72 73 (Or64 ...) => (OR ...) 74 (Or32 ...) => (OR ...) 75 (Or16 ...) => (OR ...) 76 (Or8 ...) => (OR ...) 77 78 (Xor64 ...) => (XOR ...) 79 (Xor32 ...) => (XOR ...) 80 (Xor16 ...) => (XOR ...) 81 (Xor8 ...) => (XOR ...) 82 83 (Neg64 ...) => (NEG ...) 84 (Neg32 ...) => (NEG ...) 85 (Neg16 ...) => (NEG ...) 86 (Neg8 ...) => (NEG ...) 87 (Neg32F ...) => (FNEGS ...) 88 (Neg64F ...) => (FNEGD ...) 89 90 (Com64 ...) => (NOT ...) 91 (Com32 ...) => (NOT ...) 92 (Com16 ...) => (NOT ...) 93 (Com8 ...) => (NOT ...) 94 95 (Sqrt ...) => (FSQRTD ...) 96 (Sqrt32 ...) => (FSQRTS ...) 97 98 (Copysign ...) => (FSGNJD ...) 99 100 (Abs ...) => (FABSD ...) 101 102 (FMA ...) => (FMADDD ...) 103 104 // Sign and zero extension. 105 106 (SignExt8to16 ...) => (MOVBreg ...) 107 (SignExt8to32 ...) => (MOVBreg ...) 108 (SignExt8to64 ...) => (MOVBreg ...) 109 (SignExt16to32 ...) => (MOVHreg ...) 110 (SignExt16to64 ...) => (MOVHreg ...) 111 (SignExt32to64 ...) => (MOVWreg ...) 112 113 (ZeroExt8to16 ...) => (MOVBUreg ...) 114 (ZeroExt8to32 ...) => (MOVBUreg ...) 115 (ZeroExt8to64 ...) => (MOVBUreg ...) 116 (ZeroExt16to32 ...) => (MOVHUreg ...) 117 (ZeroExt16to64 ...) => (MOVHUreg ...) 118 (ZeroExt32to64 ...) => (MOVWUreg ...) 119 120 (Cvt32to32F ...) => (FCVTSW ...) 121 (Cvt32to64F ...) => (FCVTDW ...) 122 (Cvt64to32F ...) => (FCVTSL ...) 123 (Cvt64to64F ...) => (FCVTDL ...) 124 125 (Cvt32Fto32 ...) => (FCVTWS ...) 126 (Cvt32Fto64 ...) => (FCVTLS ...) 127 (Cvt64Fto32 ...) => (FCVTWD ...) 128 (Cvt64Fto64 ...) => (FCVTLD ...) 129 130 (Cvt32Fto64F ...) => (FCVTDS ...) 131 (Cvt64Fto32F ...) => (FCVTSD ...) 132 133 (CvtBoolToUint8 ...) => (Copy ...) 134 135 (Round32F ...) => (Copy ...) 136 (Round64F ...) => (Copy ...) 137 138 (Slicemask <t> x) => (SRAI [63] (NEG <t> x)) 139 140 // Truncations 141 // We ignore the unused high parts of registers, so truncates are just copies. 142 (Trunc16to8 ...) => (Copy ...) 143 (Trunc32to8 ...) => (Copy ...) 144 (Trunc32to16 ...) => (Copy ...) 145 (Trunc64to8 ...) => (Copy ...) 146 (Trunc64to16 ...) => (Copy ...) 147 (Trunc64to32 ...) => (Copy ...) 148 149 // Shifts 150 151 // SLL only considers the bottom 6 bits of y. If y > 64, the result should 152 // always be 0. 153 // 154 // Breaking down the operation: 155 // 156 // (SLL x y) generates x << (y & 63). 157 // 158 // If y < 64, this is the value we want. Otherwise, we want zero. 159 // 160 // So, we AND with -1 * uint64(y < 64), which is 0xfffff... if y < 64 and 0 otherwise. 161 (Lsh8x8 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt8to64 y)))) 162 (Lsh8x16 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt16to64 y)))) 163 (Lsh8x32 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt32to64 y)))) 164 (Lsh8x64 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] y))) 165 (Lsh16x8 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt8to64 y)))) 166 (Lsh16x16 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt16to64 y)))) 167 (Lsh16x32 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt32to64 y)))) 168 (Lsh16x64 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] y))) 169 (Lsh32x8 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt8to64 y)))) 170 (Lsh32x16 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt16to64 y)))) 171 (Lsh32x32 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt32to64 y)))) 172 (Lsh32x64 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] y))) 173 (Lsh64x8 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt8to64 y)))) 174 (Lsh64x16 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt16to64 y)))) 175 (Lsh64x32 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt32to64 y)))) 176 (Lsh64x64 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] y))) 177 178 (Lsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y) 179 (Lsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y) 180 (Lsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y) 181 (Lsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y) 182 183 // SRL only considers the bottom 6 bits of y. If y > 64, the result should 184 // always be 0. See Lsh above for a detailed description. 185 (Rsh8Ux8 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt8to64 y)))) 186 (Rsh8Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt16to64 y)))) 187 (Rsh8Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt32to64 y)))) 188 (Rsh8Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] y))) 189 (Rsh16Ux8 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt8to64 y)))) 190 (Rsh16Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt16to64 y)))) 191 (Rsh16Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt32to64 y)))) 192 (Rsh16Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] y))) 193 (Rsh32Ux8 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt8to64 y)))) 194 (Rsh32Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt16to64 y)))) 195 (Rsh32Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt32to64 y)))) 196 (Rsh32Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] y))) 197 (Rsh64Ux8 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt8to64 y)))) 198 (Rsh64Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt16to64 y)))) 199 (Rsh64Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt32to64 y)))) 200 (Rsh64Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] y))) 201 202 (Rsh8Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL (ZeroExt8to64 x) y) 203 (Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL (ZeroExt16to64 x) y) 204 (Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL (ZeroExt32to64 x) y) 205 (Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL x y) 206 207 // SRA only considers the bottom 6 bits of y. If y > 64, the result should 208 // be either 0 or -1 based on the sign bit. 209 // 210 // We implement this by performing the max shift (-1) if y >= 64. 211 // 212 // We OR (uint64(y < 64) - 1) into y before passing it to SRA. This leaves 213 // us with -1 (0xffff...) if y >= 64. 214 // 215 // We don't need to sign-extend the OR result, as it will be at minimum 8 bits, 216 // more than the 6 bits SRA cares about. 217 (Rsh8x8 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y))))) 218 (Rsh8x16 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y))))) 219 (Rsh8x32 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y))))) 220 (Rsh8x64 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y)))) 221 (Rsh16x8 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y))))) 222 (Rsh16x16 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y))))) 223 (Rsh16x32 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y))))) 224 (Rsh16x64 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y)))) 225 (Rsh32x8 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y))))) 226 (Rsh32x16 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y))))) 227 (Rsh32x32 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y))))) 228 (Rsh32x64 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y)))) 229 (Rsh64x8 <t> x y) && !shiftIsBounded(v) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y))))) 230 (Rsh64x16 <t> x y) && !shiftIsBounded(v) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y))))) 231 (Rsh64x32 <t> x y) && !shiftIsBounded(v) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y))))) 232 (Rsh64x64 <t> x y) && !shiftIsBounded(v) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y)))) 233 234 (Rsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA (SignExt8to64 x) y) 235 (Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA (SignExt16to64 x) y) 236 (Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA (SignExt32to64 x) y) 237 (Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA x y) 238 239 // Rotates. 240 (RotateLeft8 <t> x (MOVDconst [c])) => (Or8 (Lsh8x64 <t> x (MOVDconst [c&7])) (Rsh8Ux64 <t> x (MOVDconst [-c&7]))) 241 (RotateLeft16 <t> x (MOVDconst [c])) => (Or16 (Lsh16x64 <t> x (MOVDconst [c&15])) (Rsh16Ux64 <t> x (MOVDconst [-c&15]))) 242 (RotateLeft32 <t> x (MOVDconst [c])) => (Or32 (Lsh32x64 <t> x (MOVDconst [c&31])) (Rsh32Ux64 <t> x (MOVDconst [-c&31]))) 243 (RotateLeft64 <t> x (MOVDconst [c])) => (Or64 (Lsh64x64 <t> x (MOVDconst [c&63])) (Rsh64Ux64 <t> x (MOVDconst [-c&63]))) 244 245 (Less64 ...) => (SLT ...) 246 (Less32 x y) => (SLT (SignExt32to64 x) (SignExt32to64 y)) 247 (Less16 x y) => (SLT (SignExt16to64 x) (SignExt16to64 y)) 248 (Less8 x y) => (SLT (SignExt8to64 x) (SignExt8to64 y)) 249 (Less64U ...) => (SLTU ...) 250 (Less32U x y) => (SLTU (ZeroExt32to64 x) (ZeroExt32to64 y)) 251 (Less16U x y) => (SLTU (ZeroExt16to64 x) (ZeroExt16to64 y)) 252 (Less8U x y) => (SLTU (ZeroExt8to64 x) (ZeroExt8to64 y)) 253 (Less64F ...) => (FLTD ...) 254 (Less32F ...) => (FLTS ...) 255 256 // Convert x <= y to !(y > x). 257 (Leq64 x y) => (Not (Less64 y x)) 258 (Leq32 x y) => (Not (Less32 y x)) 259 (Leq16 x y) => (Not (Less16 y x)) 260 (Leq8 x y) => (Not (Less8 y x)) 261 (Leq64U x y) => (Not (Less64U y x)) 262 (Leq32U x y) => (Not (Less32U y x)) 263 (Leq16U x y) => (Not (Less16U y x)) 264 (Leq8U x y) => (Not (Less8U y x)) 265 (Leq64F ...) => (FLED ...) 266 (Leq32F ...) => (FLES ...) 267 268 (EqPtr x y) => (SEQZ (SUB <typ.Uintptr> x y)) 269 (Eq64 x y) => (SEQZ (SUB <x.Type> x y)) 270 (Eq32 x y) => (SEQZ (SUB <x.Type> (ZeroExt32to64 x) (ZeroExt32to64 y))) 271 (Eq16 x y) => (SEQZ (SUB <x.Type> (ZeroExt16to64 x) (ZeroExt16to64 y))) 272 (Eq8 x y) => (SEQZ (SUB <x.Type> (ZeroExt8to64 x) (ZeroExt8to64 y))) 273 (Eq64F ...) => (FEQD ...) 274 (Eq32F ...) => (FEQS ...) 275 276 (NeqPtr x y) => (SNEZ (SUB <typ.Uintptr> x y)) 277 (Neq64 x y) => (SNEZ (SUB <x.Type> x y)) 278 (Neq32 x y) => (SNEZ (SUB <x.Type> (ZeroExt32to64 x) (ZeroExt32to64 y))) 279 (Neq16 x y) => (SNEZ (SUB <x.Type> (ZeroExt16to64 x) (ZeroExt16to64 y))) 280 (Neq8 x y) => (SNEZ (SUB <x.Type> (ZeroExt8to64 x) (ZeroExt8to64 y))) 281 (Neq64F ...) => (FNED ...) 282 (Neq32F ...) => (FNES ...) 283 284 // Loads 285 (Load <t> ptr mem) && t.IsBoolean() => (MOVBUload ptr mem) 286 (Load <t> ptr mem) && ( is8BitInt(t) && isSigned(t)) => (MOVBload ptr mem) 287 (Load <t> ptr mem) && ( is8BitInt(t) && !isSigned(t)) => (MOVBUload ptr mem) 288 (Load <t> ptr mem) && (is16BitInt(t) && isSigned(t)) => (MOVHload ptr mem) 289 (Load <t> ptr mem) && (is16BitInt(t) && !isSigned(t)) => (MOVHUload ptr mem) 290 (Load <t> ptr mem) && (is32BitInt(t) && isSigned(t)) => (MOVWload ptr mem) 291 (Load <t> ptr mem) && (is32BitInt(t) && !isSigned(t)) => (MOVWUload ptr mem) 292 (Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVDload ptr mem) 293 (Load <t> ptr mem) && is32BitFloat(t) => (FMOVWload ptr mem) 294 (Load <t> ptr mem) && is64BitFloat(t) => (FMOVDload ptr mem) 295 296 // Stores 297 (Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem) 298 (Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem) 299 (Store {t} ptr val mem) && t.Size() == 4 && !is32BitFloat(val.Type) => (MOVWstore ptr val mem) 300 (Store {t} ptr val mem) && t.Size() == 8 && !is64BitFloat(val.Type) => (MOVDstore ptr val mem) 301 (Store {t} ptr val mem) && t.Size() == 4 && is32BitFloat(val.Type) => (FMOVWstore ptr val mem) 302 (Store {t} ptr val mem) && t.Size() == 8 && is64BitFloat(val.Type) => (FMOVDstore ptr val mem) 303 304 // We need to fold MOVaddr into the LD/MOVDstore ops so that the live variable analysis 305 // knows what variables are being read/written by the ops. 306 (MOVBUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => 307 (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} base mem) 308 (MOVBload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => 309 (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) 310 (MOVHUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => 311 (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} base mem) 312 (MOVHload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => 313 (MOVHload [off1+off2] {mergeSym(sym1,sym2)} base mem) 314 (MOVWUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => 315 (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} base mem) 316 (MOVWload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => 317 (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) 318 (MOVDload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => 319 (MOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem) 320 321 (MOVBstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => 322 (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 323 (MOVHstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => 324 (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 325 (MOVWstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => 326 (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 327 (MOVDstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => 328 (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 329 (MOVBstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => 330 (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 331 (MOVHstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => 332 (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 333 (MOVWstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => 334 (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 335 (MOVDstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => 336 (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 337 338 (MOVBUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) => 339 (MOVBUload [off1+int32(off2)] {sym} base mem) 340 (MOVBload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) => 341 (MOVBload [off1+int32(off2)] {sym} base mem) 342 (MOVHUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) => 343 (MOVHUload [off1+int32(off2)] {sym} base mem) 344 (MOVHload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) => 345 (MOVHload [off1+int32(off2)] {sym} base mem) 346 (MOVWUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) => 347 (MOVWUload [off1+int32(off2)] {sym} base mem) 348 (MOVWload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) => 349 (MOVWload [off1+int32(off2)] {sym} base mem) 350 (MOVDload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) => 351 (MOVDload [off1+int32(off2)] {sym} base mem) 352 353 (MOVBstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) => 354 (MOVBstore [off1+int32(off2)] {sym} base val mem) 355 (MOVHstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) => 356 (MOVHstore [off1+int32(off2)] {sym} base val mem) 357 (MOVWstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) => 358 (MOVWstore [off1+int32(off2)] {sym} base val mem) 359 (MOVDstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) => 360 (MOVDstore [off1+int32(off2)] {sym} base val mem) 361 (MOVBstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVBstorezero [off1+int32(off2)] {sym} ptr mem) 362 (MOVHstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVHstorezero [off1+int32(off2)] {sym} ptr mem) 363 (MOVWstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVWstorezero [off1+int32(off2)] {sym} ptr mem) 364 (MOVDstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVDstorezero [off1+int32(off2)] {sym} ptr mem) 365 366 // Similarly, fold ADDI into MOVaddr to avoid confusing live variable analysis 367 // with OffPtr -> ADDI. 368 (ADDI [c] (MOVaddr [d] {s} x)) && is32Bit(c+int64(d)) => (MOVaddr [int32(c)+d] {s} x) 369 370 // Small zeroing 371 (Zero [0] _ mem) => mem 372 (Zero [1] ptr mem) => (MOVBstore ptr (MOVDconst [0]) mem) 373 (Zero [2] {t} ptr mem) && t.Alignment()%2 == 0 => 374 (MOVHstore ptr (MOVDconst [0]) mem) 375 (Zero [2] ptr mem) => 376 (MOVBstore [1] ptr (MOVDconst [0]) 377 (MOVBstore ptr (MOVDconst [0]) mem)) 378 (Zero [4] {t} ptr mem) && t.Alignment()%4 == 0 => 379 (MOVWstore ptr (MOVDconst [0]) mem) 380 (Zero [4] {t} ptr mem) && t.Alignment()%2 == 0 => 381 (MOVHstore [2] ptr (MOVDconst [0]) 382 (MOVHstore ptr (MOVDconst [0]) mem)) 383 (Zero [4] ptr mem) => 384 (MOVBstore [3] ptr (MOVDconst [0]) 385 (MOVBstore [2] ptr (MOVDconst [0]) 386 (MOVBstore [1] ptr (MOVDconst [0]) 387 (MOVBstore ptr (MOVDconst [0]) mem)))) 388 (Zero [8] {t} ptr mem) && t.Alignment()%8 == 0 => 389 (MOVDstore ptr (MOVDconst [0]) mem) 390 (Zero [8] {t} ptr mem) && t.Alignment()%4 == 0 => 391 (MOVWstore [4] ptr (MOVDconst [0]) 392 (MOVWstore ptr (MOVDconst [0]) mem)) 393 (Zero [8] {t} ptr mem) && t.Alignment()%2 == 0 => 394 (MOVHstore [6] ptr (MOVDconst [0]) 395 (MOVHstore [4] ptr (MOVDconst [0]) 396 (MOVHstore [2] ptr (MOVDconst [0]) 397 (MOVHstore ptr (MOVDconst [0]) mem)))) 398 399 (Zero [3] ptr mem) => 400 (MOVBstore [2] ptr (MOVDconst [0]) 401 (MOVBstore [1] ptr (MOVDconst [0]) 402 (MOVBstore ptr (MOVDconst [0]) mem))) 403 (Zero [6] {t} ptr mem) && t.Alignment()%2 == 0 => 404 (MOVHstore [4] ptr (MOVDconst [0]) 405 (MOVHstore [2] ptr (MOVDconst [0]) 406 (MOVHstore ptr (MOVDconst [0]) mem))) 407 (Zero [12] {t} ptr mem) && t.Alignment()%4 == 0 => 408 (MOVWstore [8] ptr (MOVDconst [0]) 409 (MOVWstore [4] ptr (MOVDconst [0]) 410 (MOVWstore ptr (MOVDconst [0]) mem))) 411 (Zero [16] {t} ptr mem) && t.Alignment()%8 == 0 => 412 (MOVDstore [8] ptr (MOVDconst [0]) 413 (MOVDstore ptr (MOVDconst [0]) mem)) 414 (Zero [24] {t} ptr mem) && t.Alignment()%8 == 0 => 415 (MOVDstore [16] ptr (MOVDconst [0]) 416 (MOVDstore [8] ptr (MOVDconst [0]) 417 (MOVDstore ptr (MOVDconst [0]) mem))) 418 (Zero [32] {t} ptr mem) && t.Alignment()%8 == 0 => 419 (MOVDstore [24] ptr (MOVDconst [0]) 420 (MOVDstore [16] ptr (MOVDconst [0]) 421 (MOVDstore [8] ptr (MOVDconst [0]) 422 (MOVDstore ptr (MOVDconst [0]) mem)))) 423 424 // Medium 8-aligned zeroing uses a Duff's device 425 // 8 and 128 are magic constants, see runtime/mkduff.go 426 (Zero [s] {t} ptr mem) 427 && s%8 == 0 && s <= 8*128 428 && t.Alignment()%8 == 0 && !config.noDuffDevice => 429 (DUFFZERO [8 * (128 - s/8)] ptr mem) 430 431 // Generic zeroing uses a loop 432 (Zero [s] {t} ptr mem) => 433 (LoweredZero [t.Alignment()] 434 ptr 435 (ADD <ptr.Type> ptr (MOVDconst [s-moveSize(t.Alignment(), config)])) 436 mem) 437 438 (Convert ...) => (MOVconvert ...) 439 440 // Checks 441 (IsNonNil ...) => (SNEZ ...) 442 (IsInBounds ...) => (Less64U ...) 443 (IsSliceInBounds ...) => (Leq64U ...) 444 445 // Trivial lowering 446 (NilCheck ...) => (LoweredNilCheck ...) 447 (GetClosurePtr ...) => (LoweredGetClosurePtr ...) 448 (GetCallerSP ...) => (LoweredGetCallerSP ...) 449 (GetCallerPC ...) => (LoweredGetCallerPC ...) 450 451 // Write barrier. 452 (WB ...) => (LoweredWB ...) 453 454 (PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem) 455 (PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem) 456 (PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem) 457 458 // Small moves 459 (Move [0] _ _ mem) => mem 460 (Move [1] dst src mem) => (MOVBstore dst (MOVBload src mem) mem) 461 (Move [2] {t} dst src mem) && t.Alignment()%2 == 0 => 462 (MOVHstore dst (MOVHload src mem) mem) 463 (Move [2] dst src mem) => 464 (MOVBstore [1] dst (MOVBload [1] src mem) 465 (MOVBstore dst (MOVBload src mem) mem)) 466 (Move [4] {t} dst src mem) && t.Alignment()%4 == 0 => 467 (MOVWstore dst (MOVWload src mem) mem) 468 (Move [4] {t} dst src mem) && t.Alignment()%2 == 0 => 469 (MOVHstore [2] dst (MOVHload [2] src mem) 470 (MOVHstore dst (MOVHload src mem) mem)) 471 (Move [4] dst src mem) => 472 (MOVBstore [3] dst (MOVBload [3] src mem) 473 (MOVBstore [2] dst (MOVBload [2] src mem) 474 (MOVBstore [1] dst (MOVBload [1] src mem) 475 (MOVBstore dst (MOVBload src mem) mem)))) 476 (Move [8] {t} dst src mem) && t.Alignment()%8 == 0 => 477 (MOVDstore dst (MOVDload src mem) mem) 478 (Move [8] {t} dst src mem) && t.Alignment()%4 == 0 => 479 (MOVWstore [4] dst (MOVWload [4] src mem) 480 (MOVWstore dst (MOVWload src mem) mem)) 481 (Move [8] {t} dst src mem) && t.Alignment()%2 == 0 => 482 (MOVHstore [6] dst (MOVHload [6] src mem) 483 (MOVHstore [4] dst (MOVHload [4] src mem) 484 (MOVHstore [2] dst (MOVHload [2] src mem) 485 (MOVHstore dst (MOVHload src mem) mem)))) 486 487 (Move [3] dst src mem) => 488 (MOVBstore [2] dst (MOVBload [2] src mem) 489 (MOVBstore [1] dst (MOVBload [1] src mem) 490 (MOVBstore dst (MOVBload src mem) mem))) 491 (Move [6] {t} dst src mem) && t.Alignment()%2 == 0 => 492 (MOVHstore [4] dst (MOVHload [4] src mem) 493 (MOVHstore [2] dst (MOVHload [2] src mem) 494 (MOVHstore dst (MOVHload src mem) mem))) 495 (Move [12] {t} dst src mem) && t.Alignment()%4 == 0 => 496 (MOVWstore [8] dst (MOVWload [8] src mem) 497 (MOVWstore [4] dst (MOVWload [4] src mem) 498 (MOVWstore dst (MOVWload src mem) mem))) 499 (Move [16] {t} dst src mem) && t.Alignment()%8 == 0 => 500 (MOVDstore [8] dst (MOVDload [8] src mem) 501 (MOVDstore dst (MOVDload src mem) mem)) 502 (Move [24] {t} dst src mem) && t.Alignment()%8 == 0 => 503 (MOVDstore [16] dst (MOVDload [16] src mem) 504 (MOVDstore [8] dst (MOVDload [8] src mem) 505 (MOVDstore dst (MOVDload src mem) mem))) 506 (Move [32] {t} dst src mem) && t.Alignment()%8 == 0 => 507 (MOVDstore [24] dst (MOVDload [24] src mem) 508 (MOVDstore [16] dst (MOVDload [16] src mem) 509 (MOVDstore [8] dst (MOVDload [8] src mem) 510 (MOVDstore dst (MOVDload src mem) mem)))) 511 512 // Medium 8-aligned move uses a Duff's device 513 // 16 and 128 are magic constants, see runtime/mkduff.go 514 (Move [s] {t} dst src mem) 515 && s%8 == 0 && s <= 8*128 && t.Alignment()%8 == 0 516 && !config.noDuffDevice && logLargeCopy(v, s) => 517 (DUFFCOPY [16 * (128 - s/8)] dst src mem) 518 519 // Generic move uses a loop 520 (Move [s] {t} dst src mem) && (s <= 16 || logLargeCopy(v, s)) => 521 (LoweredMove [t.Alignment()] 522 dst 523 src 524 (ADDI <src.Type> [s-moveSize(t.Alignment(), config)] src) 525 mem) 526 527 // Boolean ops; 0=false, 1=true 528 (AndB ...) => (AND ...) 529 (OrB ...) => (OR ...) 530 (EqB x y) => (SEQZ (SUB <typ.Bool> x y)) 531 (NeqB x y) => (SNEZ (SUB <typ.Bool> x y)) 532 (Not ...) => (SEQZ ...) 533 534 // Lowering pointer arithmetic 535 // TODO: Special handling for SP offsets, like ARM 536 (OffPtr [off] ptr:(SP)) && is32Bit(off) => (MOVaddr [int32(off)] ptr) 537 (OffPtr [off] ptr) && is32Bit(off) => (ADDI [off] ptr) 538 (OffPtr [off] ptr) => (ADD (MOVDconst [off]) ptr) 539 540 (Const8 [val]) => (MOVDconst [int64(val)]) 541 (Const16 [val]) => (MOVDconst [int64(val)]) 542 (Const32 [val]) => (MOVDconst [int64(val)]) 543 (Const64 [val]) => (MOVDconst [int64(val)]) 544 (Const32F [val]) => (FMVSX (MOVDconst [int64(math.Float32bits(val))])) 545 (Const64F [val]) => (FMVDX (MOVDconst [int64(math.Float64bits(val))])) 546 (ConstNil) => (MOVDconst [0]) 547 (ConstBool [val]) => (MOVDconst [int64(b2i(val))]) 548 549 (Addr {sym} base) => (MOVaddr {sym} [0] base) 550 (LocalAddr {sym} base _) => (MOVaddr {sym} base) 551 552 // Calls 553 (StaticCall ...) => (CALLstatic ...) 554 (ClosureCall ...) => (CALLclosure ...) 555 (InterCall ...) => (CALLinter ...) 556 (TailCall ...) => (CALLtail ...) 557 558 // Atomic Intrinsics 559 (AtomicLoad8 ...) => (LoweredAtomicLoad8 ...) 560 (AtomicLoad32 ...) => (LoweredAtomicLoad32 ...) 561 (AtomicLoad64 ...) => (LoweredAtomicLoad64 ...) 562 (AtomicLoadPtr ...) => (LoweredAtomicLoad64 ...) 563 564 (AtomicStore8 ...) => (LoweredAtomicStore8 ...) 565 (AtomicStore32 ...) => (LoweredAtomicStore32 ...) 566 (AtomicStore64 ...) => (LoweredAtomicStore64 ...) 567 (AtomicStorePtrNoWB ...) => (LoweredAtomicStore64 ...) 568 569 (AtomicAdd32 ...) => (LoweredAtomicAdd32 ...) 570 (AtomicAdd64 ...) => (LoweredAtomicAdd64 ...) 571 572 // AtomicAnd8(ptr,val) => LoweredAtomicAnd32(ptr&^3, ^((uint8(val) ^ 0xff) << ((ptr & 3) * 8))) 573 (AtomicAnd8 ptr val mem) => 574 (LoweredAtomicAnd32 (ANDI <typ.Uintptr> [^3] ptr) 575 (NOT <typ.UInt32> (SLL <typ.UInt32> (XORI <typ.UInt32> [0xff] (ZeroExt8to32 val)) 576 (SLLI <typ.UInt64> [3] (ANDI <typ.UInt64> [3] ptr)))) mem) 577 578 (AtomicAnd32 ...) => (LoweredAtomicAnd32 ...) 579 580 (AtomicCompareAndSwap32 ptr old new mem) => (LoweredAtomicCas32 ptr (SignExt32to64 old) new mem) 581 (AtomicCompareAndSwap64 ...) => (LoweredAtomicCas64 ...) 582 583 (AtomicExchange32 ...) => (LoweredAtomicExchange32 ...) 584 (AtomicExchange64 ...) => (LoweredAtomicExchange64 ...) 585 586 // AtomicOr8(ptr,val) => LoweredAtomicOr32(ptr&^3, uint32(val)<<((ptr&3)*8)) 587 (AtomicOr8 ptr val mem) => 588 (LoweredAtomicOr32 (ANDI <typ.Uintptr> [^3] ptr) 589 (SLL <typ.UInt32> (ZeroExt8to32 val) 590 (SLLI <typ.UInt64> [3] (ANDI <typ.UInt64> [3] ptr))) mem) 591 592 (AtomicOr32 ...) => (LoweredAtomicOr32 ...) 593 594 // Conditional branches 595 (If cond yes no) => (BNEZ (MOVBUreg <typ.UInt64> cond) yes no) 596 597 // Optimizations 598 599 // Absorb SEQZ/SNEZ into branch. 600 (BEQZ (SEQZ x) yes no) => (BNEZ x yes no) 601 (BEQZ (SNEZ x) yes no) => (BEQZ x yes no) 602 (BNEZ (SEQZ x) yes no) => (BEQZ x yes no) 603 (BNEZ (SNEZ x) yes no) => (BNEZ x yes no) 604 605 // Remove redundant NEG from BEQZ/BNEZ. 606 (BEQZ (NEG x) yes no) => (BEQZ x yes no) 607 (BNEZ (NEG x) yes no) => (BNEZ x yes no) 608 609 // Negate comparison with FNES/FNED. 610 (BEQZ (FNES <t> x y) yes no) => (BNEZ (FEQS <t> x y) yes no) 611 (BNEZ (FNES <t> x y) yes no) => (BEQZ (FEQS <t> x y) yes no) 612 (BEQZ (FNED <t> x y) yes no) => (BNEZ (FEQD <t> x y) yes no) 613 (BNEZ (FNED <t> x y) yes no) => (BEQZ (FEQD <t> x y) yes no) 614 615 // Convert BEQZ/BNEZ into more optimal branch conditions. 616 (BEQZ (SUB x y) yes no) => (BEQ x y yes no) 617 (BNEZ (SUB x y) yes no) => (BNE x y yes no) 618 (BEQZ (SLT x y) yes no) => (BGE x y yes no) 619 (BNEZ (SLT x y) yes no) => (BLT x y yes no) 620 (BEQZ (SLTU x y) yes no) => (BGEU x y yes no) 621 (BNEZ (SLTU x y) yes no) => (BLTU x y yes no) 622 (BEQZ (SLTI [x] y) yes no) => (BGE y (MOVDconst [x]) yes no) 623 (BNEZ (SLTI [x] y) yes no) => (BLT y (MOVDconst [x]) yes no) 624 (BEQZ (SLTIU [x] y) yes no) => (BGEU y (MOVDconst [x]) yes no) 625 (BNEZ (SLTIU [x] y) yes no) => (BLTU y (MOVDconst [x]) yes no) 626 627 // Convert branch with zero to more optimal branch zero. 628 (BEQ (MOVDconst [0]) cond yes no) => (BEQZ cond yes no) 629 (BEQ cond (MOVDconst [0]) yes no) => (BEQZ cond yes no) 630 (BNE (MOVDconst [0]) cond yes no) => (BNEZ cond yes no) 631 (BNE cond (MOVDconst [0]) yes no) => (BNEZ cond yes no) 632 (BLT (MOVDconst [0]) cond yes no) => (BGTZ cond yes no) 633 (BLT cond (MOVDconst [0]) yes no) => (BLTZ cond yes no) 634 (BGE (MOVDconst [0]) cond yes no) => (BLEZ cond yes no) 635 (BGE cond (MOVDconst [0]) yes no) => (BGEZ cond yes no) 636 637 // Remove redundant NEG from SEQZ/SNEZ. 638 (SEQZ (NEG x)) => (SEQZ x) 639 (SNEZ (NEG x)) => (SNEZ x) 640 641 // Remove redundant SEQZ/SNEZ. 642 (SEQZ (SEQZ x)) => (SNEZ x) 643 (SEQZ (SNEZ x)) => (SEQZ x) 644 (SNEZ (SEQZ x)) => (SEQZ x) 645 (SNEZ (SNEZ x)) => (SNEZ x) 646 647 // Store zero. 648 (MOVBstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem) 649 (MOVHstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVHstorezero [off] {sym} ptr mem) 650 (MOVWstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVWstorezero [off] {sym} ptr mem) 651 (MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVDstorezero [off] {sym} ptr mem) 652 653 // Boolean ops are already extended. 654 (MOVBUreg x:((FLES|FLTS|FEQS|FNES) _ _)) => x 655 (MOVBUreg x:((FLED|FLTD|FEQD|FNED) _ _)) => x 656 (MOVBUreg x:((SEQZ|SNEZ) _)) => x 657 (MOVBUreg x:((SLT|SLTU) _ _)) => x 658 659 // Avoid extending when already sufficiently masked. 660 (MOVBreg x:(ANDI [c] y)) && c >= 0 && int64(int8(c)) == c => x 661 (MOVHreg x:(ANDI [c] y)) && c >= 0 && int64(int16(c)) == c => x 662 (MOVWreg x:(ANDI [c] y)) && c >= 0 && int64(int32(c)) == c => x 663 (MOVBUreg x:(ANDI [c] y)) && c >= 0 && int64(uint8(c)) == c => x 664 (MOVHUreg x:(ANDI [c] y)) && c >= 0 && int64(uint16(c)) == c => x 665 (MOVWUreg x:(ANDI [c] y)) && c >= 0 && int64(uint32(c)) == c => x 666 667 // Combine masking and zero extension. 668 (MOVBUreg (ANDI [c] x)) && c < 0 => (ANDI [int64(uint8(c))] x) 669 (MOVHUreg (ANDI [c] x)) && c < 0 => (ANDI [int64(uint16(c))] x) 670 (MOVWUreg (ANDI [c] x)) && c < 0 => (AND (MOVDconst [int64(uint32(c))]) x) 671 672 // Avoid sign/zero extension for consts. 673 (MOVBreg (MOVDconst [c])) => (MOVDconst [int64(int8(c))]) 674 (MOVHreg (MOVDconst [c])) => (MOVDconst [int64(int16(c))]) 675 (MOVWreg (MOVDconst [c])) => (MOVDconst [int64(int32(c))]) 676 (MOVBUreg (MOVDconst [c])) => (MOVDconst [int64(uint8(c))]) 677 (MOVHUreg (MOVDconst [c])) => (MOVDconst [int64(uint16(c))]) 678 (MOVWUreg (MOVDconst [c])) => (MOVDconst [int64(uint32(c))]) 679 680 // Avoid sign/zero extension after properly typed load. 681 (MOVBreg x:(MOVBload _ _)) => (MOVDreg x) 682 (MOVHreg x:(MOVBload _ _)) => (MOVDreg x) 683 (MOVHreg x:(MOVBUload _ _)) => (MOVDreg x) 684 (MOVHreg x:(MOVHload _ _)) => (MOVDreg x) 685 (MOVWreg x:(MOVBload _ _)) => (MOVDreg x) 686 (MOVWreg x:(MOVBUload _ _)) => (MOVDreg x) 687 (MOVWreg x:(MOVHload _ _)) => (MOVDreg x) 688 (MOVWreg x:(MOVHUload _ _)) => (MOVDreg x) 689 (MOVWreg x:(MOVWload _ _)) => (MOVDreg x) 690 (MOVBUreg x:(MOVBUload _ _)) => (MOVDreg x) 691 (MOVHUreg x:(MOVBUload _ _)) => (MOVDreg x) 692 (MOVHUreg x:(MOVHUload _ _)) => (MOVDreg x) 693 (MOVWUreg x:(MOVBUload _ _)) => (MOVDreg x) 694 (MOVWUreg x:(MOVHUload _ _)) => (MOVDreg x) 695 (MOVWUreg x:(MOVWUload _ _)) => (MOVDreg x) 696 697 // Avoid zero extension after properly typed atomic operation. 698 (MOVBUreg x:(Select0 (LoweredAtomicLoad8 _ _))) => (MOVDreg x) 699 (MOVBUreg x:(Select0 (LoweredAtomicCas32 _ _ _ _))) => (MOVDreg x) 700 (MOVBUreg x:(Select0 (LoweredAtomicCas64 _ _ _ _))) => (MOVDreg x) 701 702 // Avoid sign extension after word arithmetic. 703 (MOVWreg x:(ADDIW _)) => (MOVDreg x) 704 (MOVWreg x:(SUBW _ _)) => (MOVDreg x) 705 (MOVWreg x:(NEGW _)) => (MOVDreg x) 706 (MOVWreg x:(MULW _ _)) => (MOVDreg x) 707 (MOVWreg x:(DIVW _ _)) => (MOVDreg x) 708 (MOVWreg x:(DIVUW _ _)) => (MOVDreg x) 709 (MOVWreg x:(REMW _ _)) => (MOVDreg x) 710 (MOVWreg x:(REMUW _ _)) => (MOVDreg x) 711 712 // Fold double extensions. 713 (MOVBreg x:(MOVBreg _)) => (MOVDreg x) 714 (MOVHreg x:(MOVBreg _)) => (MOVDreg x) 715 (MOVHreg x:(MOVBUreg _)) => (MOVDreg x) 716 (MOVHreg x:(MOVHreg _)) => (MOVDreg x) 717 (MOVWreg x:(MOVBreg _)) => (MOVDreg x) 718 (MOVWreg x:(MOVBUreg _)) => (MOVDreg x) 719 (MOVWreg x:(MOVHreg _)) => (MOVDreg x) 720 (MOVWreg x:(MOVWreg _)) => (MOVDreg x) 721 (MOVBUreg x:(MOVBUreg _)) => (MOVDreg x) 722 (MOVHUreg x:(MOVBUreg _)) => (MOVDreg x) 723 (MOVHUreg x:(MOVHUreg _)) => (MOVDreg x) 724 (MOVWUreg x:(MOVBUreg _)) => (MOVDreg x) 725 (MOVWUreg x:(MOVHUreg _)) => (MOVDreg x) 726 (MOVWUreg x:(MOVWUreg _)) => (MOVDreg x) 727 728 // Do not extend before store. 729 (MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem) 730 (MOVBstore [off] {sym} ptr (MOVHreg x) mem) => (MOVBstore [off] {sym} ptr x mem) 731 (MOVBstore [off] {sym} ptr (MOVWreg x) mem) => (MOVBstore [off] {sym} ptr x mem) 732 (MOVBstore [off] {sym} ptr (MOVBUreg x) mem) => (MOVBstore [off] {sym} ptr x mem) 733 (MOVBstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVBstore [off] {sym} ptr x mem) 734 (MOVBstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVBstore [off] {sym} ptr x mem) 735 (MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem) 736 (MOVHstore [off] {sym} ptr (MOVWreg x) mem) => (MOVHstore [off] {sym} ptr x mem) 737 (MOVHstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVHstore [off] {sym} ptr x mem) 738 (MOVHstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVHstore [off] {sym} ptr x mem) 739 (MOVWstore [off] {sym} ptr (MOVWreg x) mem) => (MOVWstore [off] {sym} ptr x mem) 740 (MOVWstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVWstore [off] {sym} ptr x mem) 741 742 // Replace extend after load with alternate load where possible. 743 (MOVBreg <t> x:(MOVBUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <t> [off] {sym} ptr mem) 744 (MOVHreg <t> x:(MOVHUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVHload <t> [off] {sym} ptr mem) 745 (MOVWreg <t> x:(MOVWUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWload <t> [off] {sym} ptr mem) 746 (MOVBUreg <t> x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBUload <t> [off] {sym} ptr mem) 747 (MOVHUreg <t> x:(MOVHload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVHUload <t> [off] {sym} ptr mem) 748 (MOVWUreg <t> x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWUload <t> [off] {sym} ptr mem) 749 750 // If a register move has only 1 use, just use the same register without emitting instruction 751 // MOVnop does not emit an instruction, only for ensuring the type. 752 (MOVDreg x) && x.Uses == 1 => (MOVDnop x) 753 754 // TODO: we should be able to get rid of MOVDnop all together. 755 // But for now, this is enough to get rid of lots of them. 756 (MOVDnop (MOVDconst [c])) => (MOVDconst [c]) 757 758 // Fold constant into immediate instructions where possible. 759 (ADD (MOVDconst [val]) x) && is32Bit(val) => (ADDI [val] x) 760 (AND (MOVDconst [val]) x) && is32Bit(val) => (ANDI [val] x) 761 (OR (MOVDconst [val]) x) && is32Bit(val) => (ORI [val] x) 762 (XOR (MOVDconst [val]) x) && is32Bit(val) => (XORI [val] x) 763 (SLL x (MOVDconst [val])) => (SLLI [int64(val&63)] x) 764 (SRL x (MOVDconst [val])) => (SRLI [int64(val&63)] x) 765 (SRA x (MOVDconst [val])) => (SRAI [int64(val&63)] x) 766 (SLT x (MOVDconst [val])) && val >= -2048 && val <= 2047 => (SLTI [val] x) 767 (SLTU x (MOVDconst [val])) && val >= -2048 && val <= 2047 => (SLTIU [val] x) 768 769 // Convert const subtraction into ADDI with negative immediate, where possible. 770 (SUB x (MOVDconst [val])) && is32Bit(-val) => (ADDI [-val] x) 771 (SUB <t> (MOVDconst [val]) y) && is32Bit(-val) => (NEG (ADDI <t> [-val] y)) 772 773 // Subtraction of zero. 774 (SUB x (MOVDconst [0])) => x 775 (SUBW x (MOVDconst [0])) => (ADDIW [0] x) 776 777 // Subtraction from zero. 778 (SUB (MOVDconst [0]) x) => (NEG x) 779 (SUBW (MOVDconst [0]) x) => (NEGW x) 780 781 // Fold negation into subtraction. 782 (NEG (SUB x y)) => (SUB y x) 783 (NEG <t> s:(ADDI [val] (SUB x y))) && s.Uses == 1 && is32Bit(-val) => (ADDI [-val] (SUB <t> y x)) 784 785 // Double negation. 786 (NEG (NEG x)) => x 787 788 // Addition of zero or two constants. 789 (ADDI [0] x) => x 790 (ADDI [x] (MOVDconst [y])) && is32Bit(x + y) => (MOVDconst [x + y]) 791 792 // ANDI with all zeros, all ones or two constants. 793 (ANDI [0] x) => (MOVDconst [0]) 794 (ANDI [-1] x) => x 795 (ANDI [x] (MOVDconst [y])) => (MOVDconst [x & y]) 796 797 // ORI with all zeroes, all ones or two constants. 798 (ORI [0] x) => x 799 (ORI [-1] x) => (MOVDconst [-1]) 800 (ORI [x] (MOVDconst [y])) => (MOVDconst [x | y]) 801 802 // Combine operations with immediate. 803 (ADDI [x] (ADDI [y] z)) && is32Bit(x + y) => (ADDI [x + y] z) 804 (ANDI [x] (ANDI [y] z)) => (ANDI [x & y] z) 805 (ORI [x] (ORI [y] z)) => (ORI [x | y] z) 806 807 // Negation of a constant. 808 (NEG (MOVDconst [x])) => (MOVDconst [-x]) 809 (NEGW (MOVDconst [x])) => (MOVDconst [int64(int32(-x))]) 810 811 // Shift of a constant. 812 (SLLI [x] (MOVDconst [y])) && is32Bit(y << uint32(x)) => (MOVDconst [y << uint32(x)]) 813 (SRLI [x] (MOVDconst [y])) => (MOVDconst [int64(uint64(y) >> uint32(x))]) 814 (SRAI [x] (MOVDconst [y])) => (MOVDconst [int64(y) >> uint32(x)]) 815 816 // SLTI/SLTIU with constants. 817 (SLTI [x] (MOVDconst [y])) => (MOVDconst [b2i(int64(y) < int64(x))]) 818 (SLTIU [x] (MOVDconst [y])) => (MOVDconst [b2i(uint64(y) < uint64(x))]) 819 820 // SLTI/SLTIU with known outcomes. 821 (SLTI [x] (ANDI [y] _)) && y >= 0 && int64(y) < int64(x) => (MOVDconst [1]) 822 (SLTIU [x] (ANDI [y] _)) && y >= 0 && uint64(y) < uint64(x) => (MOVDconst [1]) 823 (SLTI [x] (ORI [y] _)) && y >= 0 && int64(y) >= int64(x) => (MOVDconst [0]) 824 (SLTIU [x] (ORI [y] _)) && y >= 0 && uint64(y) >= uint64(x) => (MOVDconst [0]) 825 826 // SLT/SLTU with known outcomes. 827 (SLT x x) => (MOVDconst [0]) 828 (SLTU x x) => (MOVDconst [0]) 829 830 // Deadcode for LoweredMuluhilo 831 (Select0 m:(LoweredMuluhilo x y)) && m.Uses == 1 => (MULHU x y) 832 (Select1 m:(LoweredMuluhilo x y)) && m.Uses == 1 => (MUL x y) 833 834 // Merge negation into fused multiply-add and multiply-subtract. 835 // 836 // Key: 837 // 838 // [+ -](x * y) [+ -] z. 839 // _ N A S 840 // D U 841 // D B 842 // 843 // Note: multiplication commutativity handled by rule generator. 844 (F(MADD|NMADD|MSUB|NMSUB)D neg:(FNEGD x) y z) && neg.Uses == 1 => (F(NMADD|MADD|NMSUB|MSUB)D x y z) 845 (F(MADD|NMADD|MSUB|NMSUB)D x y neg:(FNEGD z)) && neg.Uses == 1 => (F(MSUB|NMSUB|MADD|NMADD)D x y z)