github.com/euank/go@v0.0.0-20160829210321-495514729181/src/cmd/compile/internal/ssa/gen/AMD64.rules (about) 1 // Copyright 2015 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Lowering arithmetic 6 (Add64 x y) -> (ADDQ x y) 7 (AddPtr x y) && config.PtrSize == 8 -> (ADDQ x y) 8 (AddPtr x y) && config.PtrSize == 4 -> (ADDL x y) 9 (Add32 x y) -> (ADDL x y) 10 (Add16 x y) -> (ADDL x y) 11 (Add8 x y) -> (ADDL x y) 12 (Add32F x y) -> (ADDSS x y) 13 (Add64F x y) -> (ADDSD x y) 14 15 (Sub64 x y) -> (SUBQ x y) 16 (SubPtr x y) && config.PtrSize == 8 -> (SUBQ x y) 17 (SubPtr x y) && config.PtrSize == 4 -> (SUBL x y) 18 (Sub32 x y) -> (SUBL x y) 19 (Sub16 x y) -> (SUBL x y) 20 (Sub8 x y) -> (SUBL x y) 21 (Sub32F x y) -> (SUBSS x y) 22 (Sub64F x y) -> (SUBSD x y) 23 24 (Mul64 x y) -> (MULQ x y) 25 (Mul32 x y) -> (MULL x y) 26 (Mul16 x y) -> (MULL x y) 27 (Mul8 x y) -> (MULL x y) 28 (Mul32F x y) -> (MULSS x y) 29 (Mul64F x y) -> (MULSD x y) 30 31 (Div32F x y) -> (DIVSS x y) 32 (Div64F x y) -> (DIVSD x y) 33 34 (Div64 x y) -> (Select0 (DIVQ x y)) 35 (Div64u x y) -> (Select0 (DIVQU x y)) 36 (Div32 x y) -> (Select0 (DIVL x y)) 37 (Div32u x y) -> (Select0 (DIVLU x y)) 38 (Div16 x y) -> (Select0 (DIVW x y)) 39 (Div16u x y) -> (Select0 (DIVWU x y)) 40 (Div8 x y) -> (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y))) 41 (Div8u x y) -> (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) 42 43 (Hmul64 x y) -> (HMULQ x y) 44 (Hmul64u x y) -> (HMULQU x y) 45 (Hmul32 x y) -> (HMULL x y) 46 (Hmul32u x y) -> (HMULLU x y) 47 (Hmul16 x y) -> (HMULW x y) 48 (Hmul16u x y) -> (HMULWU x y) 49 (Hmul8 x y) -> (HMULB x y) 50 (Hmul8u x y) -> (HMULBU x y) 51 52 (Avg64u x y) -> (AVGQU x y) 53 54 (Mod64 x y) -> (Select1 (DIVQ x y)) 55 (Mod64u x y) -> (Select1 (DIVQU x y)) 56 (Mod32 x y) -> (Select1 (DIVL x y)) 57 (Mod32u x y) -> (Select1 (DIVLU x y)) 58 (Mod16 x y) -> (Select1 (DIVW x y)) 59 (Mod16u x y) -> (Select1 (DIVWU x y)) 60 (Mod8 x y) -> (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y))) 61 (Mod8u x y) -> (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) 62 63 (And64 x y) -> (ANDQ x y) 64 (And32 x y) -> (ANDL x y) 65 (And16 x y) -> (ANDL x y) 66 (And8 x y) -> (ANDL x y) 67 68 (Or64 x y) -> (ORQ x y) 69 (Or32 x y) -> (ORL x y) 70 (Or16 x y) -> (ORL x y) 71 (Or8 x y) -> (ORL x y) 72 73 (Xor64 x y) -> (XORQ x y) 74 (Xor32 x y) -> (XORL x y) 75 (Xor16 x y) -> (XORL x y) 76 (Xor8 x y) -> (XORL x y) 77 78 (Neg64 x) -> (NEGQ x) 79 (Neg32 x) -> (NEGL x) 80 (Neg16 x) -> (NEGL x) 81 (Neg8 x) -> (NEGL x) 82 (Neg32F x) -> (PXOR x (MOVSSconst <config.Frontend().TypeFloat32()> [f2i(math.Copysign(0, -1))])) 83 (Neg64F x) -> (PXOR x (MOVSDconst <config.Frontend().TypeFloat64()> [f2i(math.Copysign(0, -1))])) 84 85 (Com64 x) -> (NOTQ x) 86 (Com32 x) -> (NOTL x) 87 (Com16 x) -> (NOTL x) 88 (Com8 x) -> (NOTL x) 89 90 // Lowering boolean ops 91 (AndB x y) -> (ANDL x y) 92 (OrB x y) -> (ORL x y) 93 (Not x) -> (XORLconst [1] x) 94 95 // Lowering pointer arithmetic 96 (OffPtr [off] ptr) && config.PtrSize == 8 && is32Bit(off) -> (ADDQconst [off] ptr) 97 (OffPtr [off] ptr) && config.PtrSize == 8 -> (ADDQ (MOVQconst [off]) ptr) 98 (OffPtr [off] ptr) && config.PtrSize == 4 -> (ADDLconst [off] ptr) 99 100 // Lowering other arithmetic 101 (Ctz64 <t> x) -> (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <TypeFlags> (BSFQ x))) 102 (Ctz32 <t> x) -> (CMOVLEQ (Select0 <t> (BSFL x)) (MOVLconst <t> [32]) (Select1 <TypeFlags> (BSFL x))) 103 104 (Bswap64 x) -> (BSWAPQ x) 105 (Bswap32 x) -> (BSWAPL x) 106 107 (Sqrt x) -> (SQRTSD x) 108 109 // Lowering extension 110 // Note: we always extend to 64 bits even though some ops don't need that many result bits. 111 (SignExt8to16 x) -> (MOVBQSX x) 112 (SignExt8to32 x) -> (MOVBQSX x) 113 (SignExt8to64 x) -> (MOVBQSX x) 114 (SignExt16to32 x) -> (MOVWQSX x) 115 (SignExt16to64 x) -> (MOVWQSX x) 116 (SignExt32to64 x) -> (MOVLQSX x) 117 118 (ZeroExt8to16 x) -> (MOVBQZX x) 119 (ZeroExt8to32 x) -> (MOVBQZX x) 120 (ZeroExt8to64 x) -> (MOVBQZX x) 121 (ZeroExt16to32 x) -> (MOVWQZX x) 122 (ZeroExt16to64 x) -> (MOVWQZX x) 123 (ZeroExt32to64 x) -> (MOVLQZX x) 124 125 // Lowering truncation 126 // Because we ignore high parts of registers, truncates are just copies. 127 (Trunc16to8 x) -> x 128 (Trunc32to8 x) -> x 129 (Trunc32to16 x) -> x 130 (Trunc64to8 x) -> x 131 (Trunc64to16 x) -> x 132 (Trunc64to32 x) -> x 133 134 // Lowering float <-> int 135 (Cvt32to32F x) -> (CVTSL2SS x) 136 (Cvt32to64F x) -> (CVTSL2SD x) 137 (Cvt64to32F x) -> (CVTSQ2SS x) 138 (Cvt64to64F x) -> (CVTSQ2SD x) 139 140 (Cvt32Fto32 x) -> (CVTTSS2SL x) 141 (Cvt32Fto64 x) -> (CVTTSS2SQ x) 142 (Cvt64Fto32 x) -> (CVTTSD2SL x) 143 (Cvt64Fto64 x) -> (CVTTSD2SQ x) 144 145 (Cvt32Fto64F x) -> (CVTSS2SD x) 146 (Cvt64Fto32F x) -> (CVTSD2SS x) 147 148 // Lowering shifts 149 // Unsigned shifts need to return 0 if shift amount is >= width of shifted value. 150 // result = (arg << shift) & (shift >= argbits ? 0 : 0xffffffffffffffff) 151 (Lsh64x64 <t> x y) -> (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) 152 (Lsh64x32 <t> x y) -> (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) 153 (Lsh64x16 <t> x y) -> (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) 154 (Lsh64x8 <t> x y) -> (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) 155 156 (Lsh32x64 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 157 (Lsh32x32 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 158 (Lsh32x16 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 159 (Lsh32x8 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 160 161 (Lsh16x64 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 162 (Lsh16x32 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 163 (Lsh16x16 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 164 (Lsh16x8 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 165 166 (Lsh8x64 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 167 (Lsh8x32 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 168 (Lsh8x16 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 169 (Lsh8x8 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 170 171 (Lrot64 <t> x [c]) -> (ROLQconst <t> [c&63] x) 172 (Lrot32 <t> x [c]) -> (ROLLconst <t> [c&31] x) 173 (Lrot16 <t> x [c]) -> (ROLWconst <t> [c&15] x) 174 (Lrot8 <t> x [c]) -> (ROLBconst <t> [c&7] x) 175 176 (Rsh64Ux64 <t> x y) -> (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) 177 (Rsh64Ux32 <t> x y) -> (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) 178 (Rsh64Ux16 <t> x y) -> (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) 179 (Rsh64Ux8 <t> x y) -> (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) 180 181 (Rsh32Ux64 <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 182 (Rsh32Ux32 <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 183 (Rsh32Ux16 <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 184 (Rsh32Ux8 <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 185 186 (Rsh16Ux64 <t> x y) -> (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16]))) 187 (Rsh16Ux32 <t> x y) -> (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16]))) 188 (Rsh16Ux16 <t> x y) -> (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16]))) 189 (Rsh16Ux8 <t> x y) -> (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16]))) 190 191 (Rsh8Ux64 <t> x y) -> (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8]))) 192 (Rsh8Ux32 <t> x y) -> (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8]))) 193 (Rsh8Ux16 <t> x y) -> (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8]))) 194 (Rsh8Ux8 <t> x y) -> (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8]))) 195 196 // Signed right shift needs to return 0/-1 if shift amount is >= width of shifted value. 197 // We implement this by setting the shift value to -1 (all ones) if the shift value is >= width. 198 (Rsh64x64 <t> x y) -> (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64]))))) 199 (Rsh64x32 <t> x y) -> (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64]))))) 200 (Rsh64x16 <t> x y) -> (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64]))))) 201 (Rsh64x8 <t> x y) -> (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64]))))) 202 203 (Rsh32x64 <t> x y) -> (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32]))))) 204 (Rsh32x32 <t> x y) -> (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32]))))) 205 (Rsh32x16 <t> x y) -> (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32]))))) 206 (Rsh32x8 <t> x y) -> (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32]))))) 207 208 (Rsh16x64 <t> x y) -> (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16]))))) 209 (Rsh16x32 <t> x y) -> (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16]))))) 210 (Rsh16x16 <t> x y) -> (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16]))))) 211 (Rsh16x8 <t> x y) -> (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16]))))) 212 213 (Rsh8x64 <t> x y) -> (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8]))))) 214 (Rsh8x32 <t> x y) -> (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8]))))) 215 (Rsh8x16 <t> x y) -> (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8]))))) 216 (Rsh8x8 <t> x y) -> (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8]))))) 217 218 // Lowering comparisons 219 (Less64 x y) -> (SETL (CMPQ x y)) 220 (Less32 x y) -> (SETL (CMPL x y)) 221 (Less16 x y) -> (SETL (CMPW x y)) 222 (Less8 x y) -> (SETL (CMPB x y)) 223 (Less64U x y) -> (SETB (CMPQ x y)) 224 (Less32U x y) -> (SETB (CMPL x y)) 225 (Less16U x y) -> (SETB (CMPW x y)) 226 (Less8U x y) -> (SETB (CMPB x y)) 227 // Use SETGF with reversed operands to dodge NaN case 228 (Less64F x y) -> (SETGF (UCOMISD y x)) 229 (Less32F x y) -> (SETGF (UCOMISS y x)) 230 231 (Leq64 x y) -> (SETLE (CMPQ x y)) 232 (Leq32 x y) -> (SETLE (CMPL x y)) 233 (Leq16 x y) -> (SETLE (CMPW x y)) 234 (Leq8 x y) -> (SETLE (CMPB x y)) 235 (Leq64U x y) -> (SETBE (CMPQ x y)) 236 (Leq32U x y) -> (SETBE (CMPL x y)) 237 (Leq16U x y) -> (SETBE (CMPW x y)) 238 (Leq8U x y) -> (SETBE (CMPB x y)) 239 // Use SETGEF with reversed operands to dodge NaN case 240 (Leq64F x y) -> (SETGEF (UCOMISD y x)) 241 (Leq32F x y) -> (SETGEF (UCOMISS y x)) 242 243 (Greater64 x y) -> (SETG (CMPQ x y)) 244 (Greater32 x y) -> (SETG (CMPL x y)) 245 (Greater16 x y) -> (SETG (CMPW x y)) 246 (Greater8 x y) -> (SETG (CMPB x y)) 247 (Greater64U x y) -> (SETA (CMPQ x y)) 248 (Greater32U x y) -> (SETA (CMPL x y)) 249 (Greater16U x y) -> (SETA (CMPW x y)) 250 (Greater8U x y) -> (SETA (CMPB x y)) 251 // Note Go assembler gets UCOMISx operand order wrong, but it is right here 252 // Bug is accommodated at generation of assembly language. 253 (Greater64F x y) -> (SETGF (UCOMISD x y)) 254 (Greater32F x y) -> (SETGF (UCOMISS x y)) 255 256 (Geq64 x y) -> (SETGE (CMPQ x y)) 257 (Geq32 x y) -> (SETGE (CMPL x y)) 258 (Geq16 x y) -> (SETGE (CMPW x y)) 259 (Geq8 x y) -> (SETGE (CMPB x y)) 260 (Geq64U x y) -> (SETAE (CMPQ x y)) 261 (Geq32U x y) -> (SETAE (CMPL x y)) 262 (Geq16U x y) -> (SETAE (CMPW x y)) 263 (Geq8U x y) -> (SETAE (CMPB x y)) 264 // Note Go assembler gets UCOMISx operand order wrong, but it is right here 265 // Bug is accommodated at generation of assembly language. 266 (Geq64F x y) -> (SETGEF (UCOMISD x y)) 267 (Geq32F x y) -> (SETGEF (UCOMISS x y)) 268 269 (Eq64 x y) -> (SETEQ (CMPQ x y)) 270 (Eq32 x y) -> (SETEQ (CMPL x y)) 271 (Eq16 x y) -> (SETEQ (CMPW x y)) 272 (Eq8 x y) -> (SETEQ (CMPB x y)) 273 (EqB x y) -> (SETEQ (CMPB x y)) 274 (EqPtr x y) && config.PtrSize == 8 -> (SETEQ (CMPQ x y)) 275 (EqPtr x y) && config.PtrSize == 4 -> (SETEQ (CMPL x y)) 276 (Eq64F x y) -> (SETEQF (UCOMISD x y)) 277 (Eq32F x y) -> (SETEQF (UCOMISS x y)) 278 279 (Neq64 x y) -> (SETNE (CMPQ x y)) 280 (Neq32 x y) -> (SETNE (CMPL x y)) 281 (Neq16 x y) -> (SETNE (CMPW x y)) 282 (Neq8 x y) -> (SETNE (CMPB x y)) 283 (NeqB x y) -> (SETNE (CMPB x y)) 284 (NeqPtr x y) && config.PtrSize == 8 -> (SETNE (CMPQ x y)) 285 (NeqPtr x y) && config.PtrSize == 4 -> (SETNE (CMPL x y)) 286 (Neq64F x y) -> (SETNEF (UCOMISD x y)) 287 (Neq32F x y) -> (SETNEF (UCOMISS x y)) 288 289 (Int64Hi x) -> (SHRQconst [32] x) // needed for amd64p32 290 291 // Lowering loads 292 (Load <t> ptr mem) && (is64BitInt(t) || isPtr(t) && config.PtrSize == 8) -> (MOVQload ptr mem) 293 (Load <t> ptr mem) && (is32BitInt(t) || isPtr(t) && config.PtrSize == 4) -> (MOVLload ptr mem) 294 (Load <t> ptr mem) && is16BitInt(t) -> (MOVWload ptr mem) 295 (Load <t> ptr mem) && (t.IsBoolean() || is8BitInt(t)) -> (MOVBload ptr mem) 296 (Load <t> ptr mem) && is32BitFloat(t) -> (MOVSSload ptr mem) 297 (Load <t> ptr mem) && is64BitFloat(t) -> (MOVSDload ptr mem) 298 299 // Lowering stores 300 // These more-specific FP versions of Store pattern should come first. 301 (Store [8] ptr val mem) && is64BitFloat(val.Type) -> (MOVSDstore ptr val mem) 302 (Store [4] ptr val mem) && is32BitFloat(val.Type) -> (MOVSSstore ptr val mem) 303 304 (Store [8] ptr val mem) -> (MOVQstore ptr val mem) 305 (Store [4] ptr val mem) -> (MOVLstore ptr val mem) 306 (Store [2] ptr val mem) -> (MOVWstore ptr val mem) 307 (Store [1] ptr val mem) -> (MOVBstore ptr val mem) 308 309 // Lowering moves 310 (Move [s] _ _ mem) && SizeAndAlign(s).Size() == 0 -> mem 311 (Move [s] dst src mem) && SizeAndAlign(s).Size() == 1 -> (MOVBstore dst (MOVBload src mem) mem) 312 (Move [s] dst src mem) && SizeAndAlign(s).Size() == 2 -> (MOVWstore dst (MOVWload src mem) mem) 313 (Move [s] dst src mem) && SizeAndAlign(s).Size() == 4 -> (MOVLstore dst (MOVLload src mem) mem) 314 (Move [s] dst src mem) && SizeAndAlign(s).Size() == 8 -> (MOVQstore dst (MOVQload src mem) mem) 315 (Move [s] dst src mem) && SizeAndAlign(s).Size() == 16 -> (MOVOstore dst (MOVOload src mem) mem) 316 (Move [s] dst src mem) && SizeAndAlign(s).Size() == 3 -> 317 (MOVBstore [2] dst (MOVBload [2] src mem) 318 (MOVWstore dst (MOVWload src mem) mem)) 319 (Move [s] dst src mem) && SizeAndAlign(s).Size() == 5 -> 320 (MOVBstore [4] dst (MOVBload [4] src mem) 321 (MOVLstore dst (MOVLload src mem) mem)) 322 (Move [s] dst src mem) && SizeAndAlign(s).Size() == 6 -> 323 (MOVWstore [4] dst (MOVWload [4] src mem) 324 (MOVLstore dst (MOVLload src mem) mem)) 325 (Move [s] dst src mem) && SizeAndAlign(s).Size() == 7 -> 326 (MOVLstore [3] dst (MOVLload [3] src mem) 327 (MOVLstore dst (MOVLload src mem) mem)) 328 (Move [s] dst src mem) && SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size() < 16 -> 329 (MOVQstore [SizeAndAlign(s).Size()-8] dst (MOVQload [SizeAndAlign(s).Size()-8] src mem) 330 (MOVQstore dst (MOVQload src mem) mem)) 331 332 // Adjust moves to be a multiple of 16 bytes. 333 (Move [s] dst src mem) 334 && SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 <= 8 -> 335 (Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%16] 336 (OffPtr <dst.Type> dst [SizeAndAlign(s).Size()%16]) 337 (OffPtr <src.Type> src [SizeAndAlign(s).Size()%16]) 338 (MOVQstore dst (MOVQload src mem) mem)) 339 (Move [s] dst src mem) 340 && SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 > 8 -> 341 (Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%16] 342 (OffPtr <dst.Type> dst [SizeAndAlign(s).Size()%16]) 343 (OffPtr <src.Type> src [SizeAndAlign(s).Size()%16]) 344 (MOVOstore dst (MOVOload src mem) mem)) 345 346 // Medium copying uses a duff device. 347 (Move [s] dst src mem) 348 && SizeAndAlign(s).Size() >= 32 && SizeAndAlign(s).Size() <= 16*64 && SizeAndAlign(s).Size()%16 == 0 349 && !config.noDuffDevice -> 350 (DUFFCOPY [14*(64-SizeAndAlign(s).Size()/16)] dst src mem) 351 // 14 and 64 are magic constants. 14 is the number of bytes to encode: 352 // MOVUPS (SI), X0 353 // ADDQ $16, SI 354 // MOVUPS X0, (DI) 355 // ADDQ $16, DI 356 // and 64 is the number of such blocks. See src/runtime/duff_amd64.s:duffcopy. 357 358 // Large copying uses REP MOVSQ. 359 (Move [s] dst src mem) && (SizeAndAlign(s).Size() > 16*64 || config.noDuffDevice) && SizeAndAlign(s).Size()%8 == 0 -> 360 (REPMOVSQ dst src (MOVQconst [SizeAndAlign(s).Size()/8]) mem) 361 362 // Lowering Zero instructions 363 (Zero [s] _ mem) && SizeAndAlign(s).Size() == 0 -> mem 364 (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 1 -> (MOVBstoreconst [0] destptr mem) 365 (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 2 -> (MOVWstoreconst [0] destptr mem) 366 (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 4 -> (MOVLstoreconst [0] destptr mem) 367 (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 8 -> (MOVQstoreconst [0] destptr mem) 368 369 (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 3 -> 370 (MOVBstoreconst [makeValAndOff(0,2)] destptr 371 (MOVWstoreconst [0] destptr mem)) 372 (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 5 -> 373 (MOVBstoreconst [makeValAndOff(0,4)] destptr 374 (MOVLstoreconst [0] destptr mem)) 375 (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 6 -> 376 (MOVWstoreconst [makeValAndOff(0,4)] destptr 377 (MOVLstoreconst [0] destptr mem)) 378 (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 7 -> 379 (MOVLstoreconst [makeValAndOff(0,3)] destptr 380 (MOVLstoreconst [0] destptr mem)) 381 382 // Strip off any fractional word zeroing. 383 (Zero [s] destptr mem) && SizeAndAlign(s).Size()%8 != 0 && SizeAndAlign(s).Size() > 8 -> 384 (Zero [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%8] (OffPtr <destptr.Type> destptr [SizeAndAlign(s).Size()%8]) 385 (MOVQstoreconst [0] destptr mem)) 386 387 // Zero small numbers of words directly. 388 (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 16 -> 389 (MOVQstoreconst [makeValAndOff(0,8)] destptr 390 (MOVQstoreconst [0] destptr mem)) 391 (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 24 -> 392 (MOVQstoreconst [makeValAndOff(0,16)] destptr 393 (MOVQstoreconst [makeValAndOff(0,8)] destptr 394 (MOVQstoreconst [0] destptr mem))) 395 (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 32 -> 396 (MOVQstoreconst [makeValAndOff(0,24)] destptr 397 (MOVQstoreconst [makeValAndOff(0,16)] destptr 398 (MOVQstoreconst [makeValAndOff(0,8)] destptr 399 (MOVQstoreconst [0] destptr mem)))) 400 401 // Medium zeroing uses a duff device. 402 (Zero [s] destptr mem) 403 && SizeAndAlign(s).Size() <= 1024 && SizeAndAlign(s).Size()%8 == 0 && SizeAndAlign(s).Size()%16 != 0 404 && !config.noDuffDevice -> 405 (Zero [SizeAndAlign(s).Size()-8] (OffPtr <destptr.Type> [8] destptr) (MOVQstore destptr (MOVQconst [0]) mem)) 406 (Zero [s] destptr mem) 407 && SizeAndAlign(s).Size() <= 1024 && SizeAndAlign(s).Size()%16 == 0 && !config.noDuffDevice -> 408 (DUFFZERO [SizeAndAlign(s).Size()] destptr (MOVOconst [0]) mem) 409 410 // Large zeroing uses REP STOSQ. 411 (Zero [s] destptr mem) 412 && (SizeAndAlign(s).Size() > 1024 || (config.noDuffDevice && SizeAndAlign(s).Size() > 32)) 413 && SizeAndAlign(s).Size()%8 == 0 -> 414 (REPSTOSQ destptr (MOVQconst [SizeAndAlign(s).Size()/8]) (MOVQconst [0]) mem) 415 416 // Lowering constants 417 (Const8 [val]) -> (MOVLconst [val]) 418 (Const16 [val]) -> (MOVLconst [val]) 419 (Const32 [val]) -> (MOVLconst [val]) 420 (Const64 [val]) -> (MOVQconst [val]) 421 (Const32F [val]) -> (MOVSSconst [val]) 422 (Const64F [val]) -> (MOVSDconst [val]) 423 (ConstNil) && config.PtrSize == 8 -> (MOVQconst [0]) 424 (ConstNil) && config.PtrSize == 4 -> (MOVLconst [0]) 425 (ConstBool [b]) -> (MOVLconst [b]) 426 427 // Lowering calls 428 (StaticCall [argwid] {target} mem) -> (CALLstatic [argwid] {target} mem) 429 (ClosureCall [argwid] entry closure mem) -> (CALLclosure [argwid] entry closure mem) 430 (DeferCall [argwid] mem) -> (CALLdefer [argwid] mem) 431 (GoCall [argwid] mem) -> (CALLgo [argwid] mem) 432 (InterCall [argwid] entry mem) -> (CALLinter [argwid] entry mem) 433 434 // Miscellaneous 435 (Convert <t> x mem) && config.PtrSize == 8 -> (MOVQconvert <t> x mem) 436 (Convert <t> x mem) && config.PtrSize == 4 -> (MOVLconvert <t> x mem) 437 (IsNonNil p) && config.PtrSize == 8 -> (SETNE (TESTQ p p)) 438 (IsNonNil p) && config.PtrSize == 4 -> (SETNE (TESTL p p)) 439 (IsInBounds idx len) -> (SETB (CMPQ idx len)) 440 (IsSliceInBounds idx len) -> (SETBE (CMPQ idx len)) 441 (NilCheck ptr mem) -> (LoweredNilCheck ptr mem) 442 (GetG mem) -> (LoweredGetG mem) 443 (GetClosurePtr) -> (LoweredGetClosurePtr) 444 (Addr {sym} base) && config.PtrSize == 8 -> (LEAQ {sym} base) 445 (Addr {sym} base) && config.PtrSize == 4 -> (LEAL {sym} base) 446 447 // block rewrites 448 (If (SETL cmp) yes no) -> (LT cmp yes no) 449 (If (SETLE cmp) yes no) -> (LE cmp yes no) 450 (If (SETG cmp) yes no) -> (GT cmp yes no) 451 (If (SETGE cmp) yes no) -> (GE cmp yes no) 452 (If (SETEQ cmp) yes no) -> (EQ cmp yes no) 453 (If (SETNE cmp) yes no) -> (NE cmp yes no) 454 (If (SETB cmp) yes no) -> (ULT cmp yes no) 455 (If (SETBE cmp) yes no) -> (ULE cmp yes no) 456 (If (SETA cmp) yes no) -> (UGT cmp yes no) 457 (If (SETAE cmp) yes no) -> (UGE cmp yes no) 458 459 // Special case for floating point - LF/LEF not generated 460 (If (SETGF cmp) yes no) -> (UGT cmp yes no) 461 (If (SETGEF cmp) yes no) -> (UGE cmp yes no) 462 (If (SETEQF cmp) yes no) -> (EQF cmp yes no) 463 (If (SETNEF cmp) yes no) -> (NEF cmp yes no) 464 465 (If cond yes no) -> (NE (TESTB cond cond) yes no) 466 467 // Atomic loads. Other than preserving their ordering with respect to other loads, nothing special here. 468 (AtomicLoad32 ptr mem) -> (MOVLatomicload ptr mem) 469 (AtomicLoad64 ptr mem) -> (MOVQatomicload ptr mem) 470 (AtomicLoadPtr ptr mem) && config.PtrSize == 8 -> (MOVQatomicload ptr mem) 471 (AtomicLoadPtr ptr mem) && config.PtrSize == 4 -> (MOVLatomicload ptr mem) 472 473 // Atomic stores. We use XCHG to prevent the hardware reordering a subsequent load. 474 // TODO: most runtime uses of atomic stores don't need that property. Use normal stores for those? 475 (AtomicStore32 ptr val mem) -> (Select1 (XCHGL <MakeTuple(config.Frontend().TypeUInt32(),TypeMem)> val ptr mem)) 476 (AtomicStore64 ptr val mem) -> (Select1 (XCHGQ <MakeTuple(config.Frontend().TypeUInt64(),TypeMem)> val ptr mem)) 477 (AtomicStorePtrNoWB ptr val mem) && config.PtrSize == 8 -> (Select1 (XCHGQ <MakeTuple(config.Frontend().TypeBytePtr(),TypeMem)> val ptr mem)) 478 (AtomicStorePtrNoWB ptr val mem) && config.PtrSize == 4 -> (Select1 (XCHGL <MakeTuple(config.Frontend().TypeBytePtr(),TypeMem)> val ptr mem)) 479 480 // Atomic exchanges. 481 (AtomicExchange32 ptr val mem) -> (XCHGL val ptr mem) 482 (AtomicExchange64 ptr val mem) -> (XCHGQ val ptr mem) 483 484 // Atomic adds. 485 (AtomicAdd32 ptr val mem) -> (AddTupleFirst32 (XADDLlock val ptr mem) val) 486 (AtomicAdd64 ptr val mem) -> (AddTupleFirst64 (XADDQlock val ptr mem) val) 487 (Select0 <t> (AddTupleFirst32 tuple val)) -> (ADDL val (Select0 <t> tuple)) 488 (Select1 (AddTupleFirst32 tuple _ )) -> (Select1 tuple) 489 (Select0 <t> (AddTupleFirst64 tuple val)) -> (ADDQ val (Select0 <t> tuple)) 490 (Select1 (AddTupleFirst64 tuple _ )) -> (Select1 tuple) 491 492 // Atomic compare and swap. 493 (AtomicCompareAndSwap32 ptr old new_ mem) -> (CMPXCHGLlock ptr old new_ mem) 494 (AtomicCompareAndSwap64 ptr old new_ mem) -> (CMPXCHGQlock ptr old new_ mem) 495 496 // Atomic memory updates. 497 (AtomicAnd8 ptr val mem) -> (ANDBlock ptr val mem) 498 (AtomicOr8 ptr val mem) -> (ORBlock ptr val mem) 499 500 // *************************** 501 // Above: lowering rules 502 // Below: optimizations 503 // *************************** 504 // TODO: Should the optimizations be a separate pass? 505 506 // Fold boolean tests into blocks 507 (NE (TESTB (SETL cmp) (SETL cmp)) yes no) -> (LT cmp yes no) 508 (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) -> (LE cmp yes no) 509 (NE (TESTB (SETG cmp) (SETG cmp)) yes no) -> (GT cmp yes no) 510 (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) -> (GE cmp yes no) 511 (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) -> (EQ cmp yes no) 512 (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) -> (NE cmp yes no) 513 (NE (TESTB (SETB cmp) (SETB cmp)) yes no) -> (ULT cmp yes no) 514 (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) -> (ULE cmp yes no) 515 (NE (TESTB (SETA cmp) (SETA cmp)) yes no) -> (UGT cmp yes no) 516 (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) -> (UGE cmp yes no) 517 518 // Special case for floating point - LF/LEF not generated 519 (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) -> (UGT cmp yes no) 520 (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) -> (UGE cmp yes no) 521 (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) -> (EQF cmp yes no) 522 (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) -> (NEF cmp yes no) 523 524 // Disabled because it interferes with the pattern match above and makes worse code. 525 // (SETNEF x) -> (ORQ (SETNE <config.Frontend().TypeInt8()> x) (SETNAN <config.Frontend().TypeInt8()> x)) 526 // (SETEQF x) -> (ANDQ (SETEQ <config.Frontend().TypeInt8()> x) (SETORD <config.Frontend().TypeInt8()> x)) 527 528 // fold constants into instructions 529 (ADDQ x (MOVQconst [c])) && is32Bit(c) -> (ADDQconst [c] x) 530 (ADDQ (MOVQconst [c]) x) && is32Bit(c) -> (ADDQconst [c] x) 531 (ADDL x (MOVLconst [c])) -> (ADDLconst [c] x) 532 (ADDL (MOVLconst [c]) x) -> (ADDLconst [c] x) 533 534 (SUBQ x (MOVQconst [c])) && is32Bit(c) -> (SUBQconst x [c]) 535 (SUBQ (MOVQconst [c]) x) && is32Bit(c) -> (NEGQ (SUBQconst <v.Type> x [c])) 536 (SUBL x (MOVLconst [c])) -> (SUBLconst x [c]) 537 (SUBL (MOVLconst [c]) x) -> (NEGL (SUBLconst <v.Type> x [c])) 538 539 (MULQ x (MOVQconst [c])) && is32Bit(c) -> (MULQconst [c] x) 540 (MULQ (MOVQconst [c]) x) && is32Bit(c) -> (MULQconst [c] x) 541 (MULL x (MOVLconst [c])) -> (MULLconst [c] x) 542 (MULL (MOVLconst [c]) x) -> (MULLconst [c] x) 543 544 (ANDQ x (MOVQconst [c])) && is32Bit(c) -> (ANDQconst [c] x) 545 (ANDQ (MOVQconst [c]) x) && is32Bit(c) -> (ANDQconst [c] x) 546 (ANDL x (MOVLconst [c])) -> (ANDLconst [c] x) 547 (ANDL (MOVLconst [c]) x) -> (ANDLconst [c] x) 548 549 (ANDLconst [c] (ANDLconst [d] x)) -> (ANDLconst [c & d] x) 550 (ANDQconst [c] (ANDQconst [d] x)) -> (ANDQconst [c & d] x) 551 552 (XORLconst [c] (XORLconst [d] x)) -> (XORLconst [c ^ d] x) 553 (XORQconst [c] (XORQconst [d] x)) -> (XORQconst [c ^ d] x) 554 555 (MULLconst [c] (MULLconst [d] x)) -> (MULLconst [int64(int32(c * d))] x) 556 (MULQconst [c] (MULQconst [d] x)) && is32Bit(c*d) -> (MULQconst [c * d] x) 557 558 (ORQ x (MOVQconst [c])) && is32Bit(c) -> (ORQconst [c] x) 559 (ORQ (MOVQconst [c]) x) && is32Bit(c) -> (ORQconst [c] x) 560 (ORL x (MOVLconst [c])) -> (ORLconst [c] x) 561 (ORL (MOVLconst [c]) x) -> (ORLconst [c] x) 562 563 (XORQ x (MOVQconst [c])) && is32Bit(c) -> (XORQconst [c] x) 564 (XORQ (MOVQconst [c]) x) && is32Bit(c) -> (XORQconst [c] x) 565 (XORL x (MOVLconst [c])) -> (XORLconst [c] x) 566 (XORL (MOVLconst [c]) x) -> (XORLconst [c] x) 567 568 (SHLQ x (MOVQconst [c])) -> (SHLQconst [c&63] x) 569 (SHLQ x (MOVLconst [c])) -> (SHLQconst [c&63] x) 570 571 (SHLL x (MOVQconst [c])) -> (SHLLconst [c&31] x) 572 (SHLL x (MOVLconst [c])) -> (SHLLconst [c&31] x) 573 574 (SHRQ x (MOVQconst [c])) -> (SHRQconst [c&63] x) 575 (SHRQ x (MOVLconst [c])) -> (SHRQconst [c&63] x) 576 577 (SHRL x (MOVQconst [c])) -> (SHRLconst [c&31] x) 578 (SHRL x (MOVLconst [c])) -> (SHRLconst [c&31] x) 579 580 (SHRW x (MOVQconst [c])) -> (SHRWconst [c&31] x) 581 (SHRW x (MOVLconst [c])) -> (SHRWconst [c&31] x) 582 583 (SHRB x (MOVQconst [c])) -> (SHRBconst [c&31] x) 584 (SHRB x (MOVLconst [c])) -> (SHRBconst [c&31] x) 585 586 (SARQ x (MOVQconst [c])) -> (SARQconst [c&63] x) 587 (SARQ x (MOVLconst [c])) -> (SARQconst [c&63] x) 588 589 (SARL x (MOVQconst [c])) -> (SARLconst [c&31] x) 590 (SARL x (MOVLconst [c])) -> (SARLconst [c&31] x) 591 592 (SARW x (MOVQconst [c])) -> (SARWconst [c&31] x) 593 (SARW x (MOVLconst [c])) -> (SARWconst [c&31] x) 594 595 (SARB x (MOVQconst [c])) -> (SARBconst [c&31] x) 596 (SARB x (MOVLconst [c])) -> (SARBconst [c&31] x) 597 598 (SARL x (ANDLconst [31] y)) -> (SARL x y) 599 (SARQ x (ANDQconst [63] y)) -> (SARQ x y) 600 601 (SHLL x (ANDLconst [31] y)) -> (SHLL x y) 602 (SHLQ x (ANDQconst [63] y)) -> (SHLQ x y) 603 604 (SHRL x (ANDLconst [31] y)) -> (SHRL x y) 605 (SHRQ x (ANDQconst [63] y)) -> (SHRQ x y) 606 607 (ROLQconst [c] (ROLQconst [d] x)) -> (ROLQconst [(c+d)&63] x) 608 (ROLLconst [c] (ROLLconst [d] x)) -> (ROLLconst [(c+d)&31] x) 609 (ROLWconst [c] (ROLWconst [d] x)) -> (ROLWconst [(c+d)&15] x) 610 (ROLBconst [c] (ROLBconst [d] x)) -> (ROLBconst [(c+d)& 7] x) 611 612 (ROLQconst [0] x) -> x 613 (ROLLconst [0] x) -> x 614 (ROLWconst [0] x) -> x 615 (ROLBconst [0] x) -> x 616 617 // Note: the word and byte shifts keep the low 5 bits (not the low 4 or 3 bits) 618 // because the x86 instructions are defined to use all 5 bits of the shift even 619 // for the small shifts. I don't think we'll ever generate a weird shift (e.g. 620 // (SHRW x (MOVLconst [24])), but just in case. 621 622 (CMPQ x (MOVQconst [c])) && is32Bit(c) -> (CMPQconst x [c]) 623 (CMPQ (MOVQconst [c]) x) && is32Bit(c) -> (InvertFlags (CMPQconst x [c])) 624 (CMPL x (MOVLconst [c])) -> (CMPLconst x [c]) 625 (CMPL (MOVLconst [c]) x) -> (InvertFlags (CMPLconst x [c])) 626 (CMPW x (MOVLconst [c])) -> (CMPWconst x [int64(int16(c))]) 627 (CMPW (MOVLconst [c]) x) -> (InvertFlags (CMPWconst x [int64(int16(c))])) 628 (CMPB x (MOVLconst [c])) -> (CMPBconst x [int64(int8(c))]) 629 (CMPB (MOVLconst [c]) x) -> (InvertFlags (CMPBconst x [int64(int8(c))])) 630 631 // Using MOVBQZX instead of ANDQ is cheaper. 632 (ANDQconst [0xFF] x) -> (MOVBQZX x) 633 (ANDQconst [0xFFFF] x) -> (MOVWQZX x) 634 (ANDQconst [0xFFFFFFFF] x) -> (MOVLQZX x) 635 636 // strength reduction 637 // Assumes that the following costs from https://gmplib.org/~tege/x86-timing.pdf: 638 // 1 - addq, shlq, leaq, negq 639 // 3 - imulq 640 // This limits the rewrites to two instructions. 641 // TODO: 27, 81 642 (MULQconst [-1] x) -> (NEGQ x) 643 (MULQconst [0] _) -> (MOVQconst [0]) 644 (MULQconst [1] x) -> x 645 (MULQconst [3] x) -> (LEAQ2 x x) 646 (MULQconst [5] x) -> (LEAQ4 x x) 647 (MULQconst [7] x) -> (LEAQ8 (NEGQ <v.Type> x) x) 648 (MULQconst [9] x) -> (LEAQ8 x x) 649 (MULQconst [11] x) -> (LEAQ2 x (LEAQ4 <v.Type> x x)) 650 (MULQconst [13] x) -> (LEAQ4 x (LEAQ2 <v.Type> x x)) 651 (MULQconst [21] x) -> (LEAQ4 x (LEAQ4 <v.Type> x x)) 652 (MULQconst [25] x) -> (LEAQ8 x (LEAQ2 <v.Type> x x)) 653 (MULQconst [37] x) -> (LEAQ4 x (LEAQ8 <v.Type> x x)) 654 (MULQconst [41] x) -> (LEAQ8 x (LEAQ4 <v.Type> x x)) 655 (MULQconst [73] x) -> (LEAQ8 x (LEAQ8 <v.Type> x x)) 656 657 (MULQconst [c] x) && isPowerOfTwo(c) -> (SHLQconst [log2(c)] x) 658 (MULQconst [c] x) && isPowerOfTwo(c+1) && c >= 15 -> (SUBQ (SHLQconst <v.Type> [log2(c+1)] x) x) 659 (MULQconst [c] x) && isPowerOfTwo(c-1) && c >= 17 -> (LEAQ1 (SHLQconst <v.Type> [log2(c-1)] x) x) 660 (MULQconst [c] x) && isPowerOfTwo(c-2) && c >= 34 -> (LEAQ2 (SHLQconst <v.Type> [log2(c-2)] x) x) 661 (MULQconst [c] x) && isPowerOfTwo(c-4) && c >= 68 -> (LEAQ4 (SHLQconst <v.Type> [log2(c-4)] x) x) 662 (MULQconst [c] x) && isPowerOfTwo(c-8) && c >= 136 -> (LEAQ8 (SHLQconst <v.Type> [log2(c-8)] x) x) 663 (MULQconst [c] x) && c%3 == 0 && isPowerOfTwo(c/3)-> (SHLQconst [log2(c/3)] (LEAQ2 <v.Type> x x)) 664 (MULQconst [c] x) && c%5 == 0 && isPowerOfTwo(c/5)-> (SHLQconst [log2(c/5)] (LEAQ4 <v.Type> x x)) 665 (MULQconst [c] x) && c%9 == 0 && isPowerOfTwo(c/9)-> (SHLQconst [log2(c/9)] (LEAQ8 <v.Type> x x)) 666 667 // combine add/shift into LEAQ 668 (ADDQ x (SHLQconst [3] y)) -> (LEAQ8 x y) 669 (ADDQ x (SHLQconst [2] y)) -> (LEAQ4 x y) 670 (ADDQ x (SHLQconst [1] y)) -> (LEAQ2 x y) 671 (ADDQ x (ADDQ y y)) -> (LEAQ2 x y) 672 (ADDQ x (ADDQ x y)) -> (LEAQ2 y x) 673 (ADDQ x (ADDQ y x)) -> (LEAQ2 y x) 674 675 // combine ADDQ/ADDQconst into LEAQ1 676 (ADDQconst [c] (ADDQ x y)) -> (LEAQ1 [c] x y) 677 (ADDQ (ADDQconst [c] x) y) -> (LEAQ1 [c] x y) 678 (ADDQ x (ADDQconst [c] y)) -> (LEAQ1 [c] x y) 679 680 // fold ADDQ into LEAQ 681 (ADDQconst [c] (LEAQ [d] {s} x)) && is32Bit(c+d) -> (LEAQ [c+d] {s} x) 682 (LEAQ [c] {s} (ADDQconst [d] x)) && is32Bit(c+d) -> (LEAQ [c+d] {s} x) 683 (LEAQ [c] {s} (ADDQ x y)) && x.Op != OpSB && y.Op != OpSB -> (LEAQ1 [c] {s} x y) 684 (ADDQ x (LEAQ [c] {s} y)) && x.Op != OpSB && y.Op != OpSB -> (LEAQ1 [c] {s} x y) 685 (ADDQ (LEAQ [c] {s} x) y) && x.Op != OpSB && y.Op != OpSB -> (LEAQ1 [c] {s} x y) 686 687 // fold ADDQconst into LEAQx 688 (ADDQconst [c] (LEAQ1 [d] {s} x y)) && is32Bit(c+d) -> (LEAQ1 [c+d] {s} x y) 689 (ADDQconst [c] (LEAQ2 [d] {s} x y)) && is32Bit(c+d) -> (LEAQ2 [c+d] {s} x y) 690 (ADDQconst [c] (LEAQ4 [d] {s} x y)) && is32Bit(c+d) -> (LEAQ4 [c+d] {s} x y) 691 (ADDQconst [c] (LEAQ8 [d] {s} x y)) && is32Bit(c+d) -> (LEAQ8 [c+d] {s} x y) 692 (LEAQ1 [c] {s} (ADDQconst [d] x) y) && is32Bit(c+d) && x.Op != OpSB -> (LEAQ1 [c+d] {s} x y) 693 (LEAQ1 [c] {s} x (ADDQconst [d] y)) && is32Bit(c+d) && y.Op != OpSB -> (LEAQ1 [c+d] {s} x y) 694 (LEAQ2 [c] {s} (ADDQconst [d] x) y) && is32Bit(c+d) && x.Op != OpSB -> (LEAQ2 [c+d] {s} x y) 695 (LEAQ2 [c] {s} x (ADDQconst [d] y)) && is32Bit(c+2*d) && y.Op != OpSB -> (LEAQ2 [c+2*d] {s} x y) 696 (LEAQ4 [c] {s} (ADDQconst [d] x) y) && is32Bit(c+d) && x.Op != OpSB -> (LEAQ4 [c+d] {s} x y) 697 (LEAQ4 [c] {s} x (ADDQconst [d] y)) && is32Bit(c+4*d) && y.Op != OpSB -> (LEAQ4 [c+4*d] {s} x y) 698 (LEAQ8 [c] {s} (ADDQconst [d] x) y) && is32Bit(c+d) && x.Op != OpSB -> (LEAQ8 [c+d] {s} x y) 699 (LEAQ8 [c] {s} x (ADDQconst [d] y)) && is32Bit(c+8*d) && y.Op != OpSB -> (LEAQ8 [c+8*d] {s} x y) 700 701 // fold shifts into LEAQx 702 (LEAQ1 [c] {s} x (SHLQconst [1] y)) -> (LEAQ2 [c] {s} x y) 703 (LEAQ1 [c] {s} (SHLQconst [1] x) y) -> (LEAQ2 [c] {s} y x) 704 (LEAQ1 [c] {s} x (SHLQconst [2] y)) -> (LEAQ4 [c] {s} x y) 705 (LEAQ1 [c] {s} (SHLQconst [2] x) y) -> (LEAQ4 [c] {s} y x) 706 (LEAQ1 [c] {s} x (SHLQconst [3] y)) -> (LEAQ8 [c] {s} x y) 707 (LEAQ1 [c] {s} (SHLQconst [3] x) y) -> (LEAQ8 [c] {s} y x) 708 709 (LEAQ2 [c] {s} x (SHLQconst [1] y)) -> (LEAQ4 [c] {s} x y) 710 (LEAQ2 [c] {s} x (SHLQconst [2] y)) -> (LEAQ8 [c] {s} x y) 711 (LEAQ4 [c] {s} x (SHLQconst [1] y)) -> (LEAQ8 [c] {s} x y) 712 713 // reverse ordering of compare instruction 714 (SETL (InvertFlags x)) -> (SETG x) 715 (SETG (InvertFlags x)) -> (SETL x) 716 (SETB (InvertFlags x)) -> (SETA x) 717 (SETA (InvertFlags x)) -> (SETB x) 718 (SETLE (InvertFlags x)) -> (SETGE x) 719 (SETGE (InvertFlags x)) -> (SETLE x) 720 (SETBE (InvertFlags x)) -> (SETAE x) 721 (SETAE (InvertFlags x)) -> (SETBE x) 722 (SETEQ (InvertFlags x)) -> (SETEQ x) 723 (SETNE (InvertFlags x)) -> (SETNE x) 724 725 // sign extended loads 726 // Note: The combined instruction must end up in the same block 727 // as the original load. If not, we end up making a value with 728 // memory type live in two different blocks, which can lead to 729 // multiple memory values alive simultaneously. 730 // Make sure we don't combine these ops if the load has another use. 731 // This prevents a single load from being split into multiple loads 732 // which then might return different values. See test/atomicload.go. 733 (MOVBQSX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 734 (MOVBQZX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 735 (MOVWQSX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 736 (MOVWQZX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 737 (MOVLQSX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem) 738 (MOVLQZX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVLload <v.Type> [off] {sym} ptr mem) 739 740 (MOVBQZX x:(MOVBloadidx1 [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBloadidx1 <v.Type> [off] {sym} ptr idx mem) 741 (MOVWQZX x:(MOVWloadidx1 [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWloadidx1 <v.Type> [off] {sym} ptr idx mem) 742 (MOVWQZX x:(MOVWloadidx2 [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWloadidx2 <v.Type> [off] {sym} ptr idx mem) 743 (MOVLQZX x:(MOVLloadidx1 [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVLloadidx1 <v.Type> [off] {sym} ptr idx mem) 744 (MOVLQZX x:(MOVLloadidx4 [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVLloadidx4 <v.Type> [off] {sym} ptr idx mem) 745 746 // replace load from same location as preceding store with copy 747 (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x 748 (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x 749 (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x 750 (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x 751 752 // Fold extensions and ANDs together. 753 (MOVBQZX (ANDLconst [c] x)) -> (ANDLconst [c & 0xff] x) 754 (MOVWQZX (ANDLconst [c] x)) -> (ANDLconst [c & 0xffff] x) 755 (MOVLQZX (ANDLconst [c] x)) -> (ANDLconst [c] x) 756 (MOVBQSX (ANDLconst [c] x)) && c & 0x80 == 0 -> (ANDLconst [c & 0x7f] x) 757 (MOVWQSX (ANDLconst [c] x)) && c & 0x8000 == 0 -> (ANDLconst [c & 0x7fff] x) 758 (MOVLQSX (ANDLconst [c] x)) && c & 0x80000000 == 0 -> (ANDLconst [c & 0x7fffffff] x) 759 760 // Don't extend before storing 761 (MOVLstore [off] {sym} ptr (MOVLQSX x) mem) -> (MOVLstore [off] {sym} ptr x mem) 762 (MOVWstore [off] {sym} ptr (MOVWQSX x) mem) -> (MOVWstore [off] {sym} ptr x mem) 763 (MOVBstore [off] {sym} ptr (MOVBQSX x) mem) -> (MOVBstore [off] {sym} ptr x mem) 764 (MOVLstore [off] {sym} ptr (MOVLQZX x) mem) -> (MOVLstore [off] {sym} ptr x mem) 765 (MOVWstore [off] {sym} ptr (MOVWQZX x) mem) -> (MOVWstore [off] {sym} ptr x mem) 766 (MOVBstore [off] {sym} ptr (MOVBQZX x) mem) -> (MOVBstore [off] {sym} ptr x mem) 767 768 // fold constants into memory operations 769 // Note that this is not always a good idea because if not all the uses of 770 // the ADDQconst get eliminated, we still have to compute the ADDQconst and we now 771 // have potentially two live values (ptr and (ADDQconst [off] ptr)) instead of one. 772 // Nevertheless, let's do it! 773 (MOVQload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVQload [off1+off2] {sym} ptr mem) 774 (MOVLload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVLload [off1+off2] {sym} ptr mem) 775 (MOVWload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVWload [off1+off2] {sym} ptr mem) 776 (MOVBload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVBload [off1+off2] {sym} ptr mem) 777 (MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVSSload [off1+off2] {sym} ptr mem) 778 (MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVSDload [off1+off2] {sym} ptr mem) 779 (MOVOload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVOload [off1+off2] {sym} ptr mem) 780 781 (MOVQstore [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVQstore [off1+off2] {sym} ptr val mem) 782 (MOVLstore [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVLstore [off1+off2] {sym} ptr val mem) 783 (MOVWstore [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVWstore [off1+off2] {sym} ptr val mem) 784 (MOVBstore [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVBstore [off1+off2] {sym} ptr val mem) 785 (MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVSSstore [off1+off2] {sym} ptr val mem) 786 (MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVSDstore [off1+off2] {sym} ptr val mem) 787 (MOVOstore [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVOstore [off1+off2] {sym} ptr val mem) 788 789 // Fold constants into stores. 790 (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) && validValAndOff(c,off) -> 791 (MOVQstoreconst [makeValAndOff(c,off)] {sym} ptr mem) 792 (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(off) -> 793 (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem) 794 (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(off) -> 795 (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem) 796 (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(off) -> 797 (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem) 798 799 // Fold address offsets into constant stores. 800 (MOVQstoreconst [sc] {s} (ADDQconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) -> 801 (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 802 (MOVLstoreconst [sc] {s} (ADDQconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) -> 803 (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 804 (MOVWstoreconst [sc] {s} (ADDQconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) -> 805 (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 806 (MOVBstoreconst [sc] {s} (ADDQconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) -> 807 (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 808 809 // We need to fold LEAQ into the MOVx ops so that the live variable analysis knows 810 // what variables are being read/written by the ops. 811 (MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 812 (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) 813 (MOVLload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 814 (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) 815 (MOVWload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 816 (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) 817 (MOVBload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 818 (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) 819 (MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 820 (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem) 821 (MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 822 (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem) 823 (MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 824 (MOVOload [off1+off2] {mergeSym(sym1,sym2)} base mem) 825 826 (MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 827 (MOVBQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 828 (MOVWQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 829 (MOVWQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 830 (MOVLQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 831 (MOVLQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 832 833 (MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 834 (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 835 (MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 836 (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 837 (MOVWstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 838 (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 839 (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 840 (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 841 (MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 842 (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 843 (MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 844 (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 845 (MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 846 (MOVOstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 847 848 (MOVQstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) -> 849 (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 850 (MOVLstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) -> 851 (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 852 (MOVWstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) -> 853 (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 854 (MOVBstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) -> 855 (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 856 857 // generating indexed loads and stores 858 (MOVBload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 859 (MOVBloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 860 (MOVWload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 861 (MOVWloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 862 (MOVWload [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 863 (MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 864 (MOVLload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 865 (MOVLloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 866 (MOVLload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 867 (MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 868 (MOVQload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 869 (MOVQloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 870 (MOVQload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 871 (MOVQloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 872 (MOVSSload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 873 (MOVSSloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 874 (MOVSSload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 875 (MOVSSloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 876 (MOVSDload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 877 (MOVSDloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 878 (MOVSDload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 879 (MOVSDloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 880 881 (MOVBstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 882 (MOVBstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 883 (MOVWstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 884 (MOVWstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 885 (MOVWstore [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 886 (MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 887 (MOVLstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 888 (MOVLstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 889 (MOVLstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 890 (MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 891 (MOVQstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 892 (MOVQstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 893 (MOVQstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 894 (MOVQstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 895 (MOVSSstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 896 (MOVSSstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 897 (MOVSSstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 898 (MOVSSstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 899 (MOVSDstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 900 (MOVSDstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 901 (MOVSDstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 902 (MOVSDstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 903 904 (MOVBload [off] {sym} (ADDQ ptr idx) mem) && ptr.Op != OpSB -> (MOVBloadidx1 [off] {sym} ptr idx mem) 905 (MOVWload [off] {sym} (ADDQ ptr idx) mem) && ptr.Op != OpSB -> (MOVWloadidx1 [off] {sym} ptr idx mem) 906 (MOVLload [off] {sym} (ADDQ ptr idx) mem) && ptr.Op != OpSB -> (MOVLloadidx1 [off] {sym} ptr idx mem) 907 (MOVQload [off] {sym} (ADDQ ptr idx) mem) && ptr.Op != OpSB -> (MOVQloadidx1 [off] {sym} ptr idx mem) 908 (MOVSSload [off] {sym} (ADDQ ptr idx) mem) && ptr.Op != OpSB -> (MOVSSloadidx1 [off] {sym} ptr idx mem) 909 (MOVSDload [off] {sym} (ADDQ ptr idx) mem) && ptr.Op != OpSB -> (MOVSDloadidx1 [off] {sym} ptr idx mem) 910 (MOVBstore [off] {sym} (ADDQ ptr idx) val mem) && ptr.Op != OpSB -> (MOVBstoreidx1 [off] {sym} ptr idx val mem) 911 (MOVWstore [off] {sym} (ADDQ ptr idx) val mem) && ptr.Op != OpSB -> (MOVWstoreidx1 [off] {sym} ptr idx val mem) 912 (MOVLstore [off] {sym} (ADDQ ptr idx) val mem) && ptr.Op != OpSB -> (MOVLstoreidx1 [off] {sym} ptr idx val mem) 913 (MOVQstore [off] {sym} (ADDQ ptr idx) val mem) && ptr.Op != OpSB -> (MOVQstoreidx1 [off] {sym} ptr idx val mem) 914 (MOVSSstore [off] {sym} (ADDQ ptr idx) val mem) && ptr.Op != OpSB -> (MOVSSstoreidx1 [off] {sym} ptr idx val mem) 915 (MOVSDstore [off] {sym} (ADDQ ptr idx) val mem) && ptr.Op != OpSB -> (MOVSDstoreidx1 [off] {sym} ptr idx val mem) 916 917 (MOVBstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) -> 918 (MOVBstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 919 (MOVWstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) -> 920 (MOVWstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 921 (MOVWstoreconst [x] {sym1} (LEAQ2 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) -> 922 (MOVWstoreconstidx2 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 923 (MOVLstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) -> 924 (MOVLstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 925 (MOVLstoreconst [x] {sym1} (LEAQ4 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) -> 926 (MOVLstoreconstidx4 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 927 (MOVQstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) -> 928 (MOVQstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 929 (MOVQstoreconst [x] {sym1} (LEAQ8 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) -> 930 (MOVQstoreconstidx8 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 931 932 (MOVBstoreconst [x] {sym} (ADDQ ptr idx) mem) -> (MOVBstoreconstidx1 [x] {sym} ptr idx mem) 933 (MOVWstoreconst [x] {sym} (ADDQ ptr idx) mem) -> (MOVWstoreconstidx1 [x] {sym} ptr idx mem) 934 (MOVLstoreconst [x] {sym} (ADDQ ptr idx) mem) -> (MOVLstoreconstidx1 [x] {sym} ptr idx mem) 935 (MOVQstoreconst [x] {sym} (ADDQ ptr idx) mem) -> (MOVQstoreconstidx1 [x] {sym} ptr idx mem) 936 937 // combine SHLQ into indexed loads and stores 938 (MOVWloadidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) -> (MOVWloadidx2 [c] {sym} ptr idx mem) 939 (MOVLloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) -> (MOVLloadidx4 [c] {sym} ptr idx mem) 940 (MOVQloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) -> (MOVQloadidx8 [c] {sym} ptr idx mem) 941 (MOVWstoreidx1 [c] {sym} ptr (SHLQconst [1] idx) val mem) -> (MOVWstoreidx2 [c] {sym} ptr idx val mem) 942 (MOVLstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) -> (MOVLstoreidx4 [c] {sym} ptr idx val mem) 943 (MOVQstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) -> (MOVQstoreidx8 [c] {sym} ptr idx val mem) 944 (MOVWstoreconstidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) -> (MOVWstoreconstidx2 [c] {sym} ptr idx mem) 945 (MOVLstoreconstidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) -> (MOVLstoreconstidx4 [c] {sym} ptr idx mem) 946 (MOVQstoreconstidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) -> (MOVQstoreconstidx8 [c] {sym} ptr idx mem) 947 948 // combine ADDQ into indexed loads and stores 949 (MOVBloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVBloadidx1 [c+d] {sym} ptr idx mem) 950 (MOVWloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVWloadidx1 [c+d] {sym} ptr idx mem) 951 (MOVWloadidx2 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVWloadidx2 [c+d] {sym} ptr idx mem) 952 (MOVLloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVLloadidx1 [c+d] {sym} ptr idx mem) 953 (MOVLloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVLloadidx4 [c+d] {sym} ptr idx mem) 954 (MOVQloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVQloadidx1 [c+d] {sym} ptr idx mem) 955 (MOVQloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVQloadidx8 [c+d] {sym} ptr idx mem) 956 (MOVSSloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVSSloadidx1 [c+d] {sym} ptr idx mem) 957 (MOVSSloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVSSloadidx4 [c+d] {sym} ptr idx mem) 958 (MOVSDloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVSDloadidx1 [c+d] {sym} ptr idx mem) 959 (MOVSDloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVSDloadidx8 [c+d] {sym} ptr idx mem) 960 961 (MOVBstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) 962 (MOVWstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) 963 (MOVWstoreidx2 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVWstoreidx2 [c+d] {sym} ptr idx val mem) 964 (MOVLstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) 965 (MOVLstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVLstoreidx4 [c+d] {sym} ptr idx val mem) 966 (MOVQstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) 967 (MOVQstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVQstoreidx8 [c+d] {sym} ptr idx val mem) 968 (MOVSSstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) 969 (MOVSSstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem) 970 (MOVSDstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) 971 (MOVSDstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem) 972 973 (MOVBloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVBloadidx1 [c+d] {sym} ptr idx mem) 974 (MOVWloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVWloadidx1 [c+d] {sym} ptr idx mem) 975 (MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVWloadidx2 [c+2*d] {sym} ptr idx mem) 976 (MOVLloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVLloadidx1 [c+d] {sym} ptr idx mem) 977 (MOVLloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVLloadidx4 [c+4*d] {sym} ptr idx mem) 978 (MOVQloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVQloadidx1 [c+d] {sym} ptr idx mem) 979 (MOVQloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVQloadidx8 [c+8*d] {sym} ptr idx mem) 980 (MOVSSloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVSSloadidx1 [c+d] {sym} ptr idx mem) 981 (MOVSSloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem) 982 (MOVSDloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVSDloadidx1 [c+d] {sym} ptr idx mem) 983 (MOVSDloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem) 984 985 (MOVBstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) 986 (MOVWstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) 987 (MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem) 988 (MOVLstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) 989 (MOVLstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem) 990 (MOVQstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) 991 (MOVQstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVQstoreidx8 [c+8*d] {sym} ptr idx val mem) 992 (MOVSSstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) 993 (MOVSSstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem) 994 (MOVSDstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) 995 (MOVSDstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem) 996 997 (MOVBstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) -> 998 (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 999 (MOVWstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) -> 1000 (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 1001 (MOVWstoreconstidx2 [x] {sym} (ADDQconst [c] ptr) idx mem) -> 1002 (MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem) 1003 (MOVLstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) -> 1004 (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 1005 (MOVLstoreconstidx4 [x] {sym} (ADDQconst [c] ptr) idx mem) -> 1006 (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem) 1007 (MOVQstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) -> 1008 (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 1009 (MOVQstoreconstidx8 [x] {sym} (ADDQconst [c] ptr) idx mem) -> 1010 (MOVQstoreconstidx8 [ValAndOff(x).add(c)] {sym} ptr idx mem) 1011 1012 (MOVBstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) -> 1013 (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 1014 (MOVWstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) -> 1015 (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 1016 (MOVWstoreconstidx2 [x] {sym} ptr (ADDQconst [c] idx) mem) -> 1017 (MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem) 1018 (MOVLstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) -> 1019 (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 1020 (MOVLstoreconstidx4 [x] {sym} ptr (ADDQconst [c] idx) mem) -> 1021 (MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem) 1022 (MOVQstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) -> 1023 (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 1024 (MOVQstoreconstidx8 [x] {sym} ptr (ADDQconst [c] idx) mem) -> 1025 (MOVQstoreconstidx8 [ValAndOff(x).add(8*c)] {sym} ptr idx mem) 1026 1027 // fold LEAQs together 1028 (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 1029 (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x) 1030 1031 // LEAQ into LEAQ1 1032 (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB -> 1033 (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 1034 (LEAQ1 [off1] {sym1} x (LEAQ [off2] {sym2} y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB -> 1035 (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 1036 1037 // LEAQ1 into LEAQ 1038 (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 1039 (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 1040 1041 // LEAQ into LEAQ[248] 1042 (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB -> 1043 (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) 1044 (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB -> 1045 (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) 1046 (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB -> 1047 (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) 1048 1049 // LEAQ[248] into LEAQ 1050 (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 1051 (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) 1052 (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 1053 (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) 1054 (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 1055 (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) 1056 1057 // Absorb InvertFlags into branches. 1058 (LT (InvertFlags cmp) yes no) -> (GT cmp yes no) 1059 (GT (InvertFlags cmp) yes no) -> (LT cmp yes no) 1060 (LE (InvertFlags cmp) yes no) -> (GE cmp yes no) 1061 (GE (InvertFlags cmp) yes no) -> (LE cmp yes no) 1062 (ULT (InvertFlags cmp) yes no) -> (UGT cmp yes no) 1063 (UGT (InvertFlags cmp) yes no) -> (ULT cmp yes no) 1064 (ULE (InvertFlags cmp) yes no) -> (UGE cmp yes no) 1065 (UGE (InvertFlags cmp) yes no) -> (ULE cmp yes no) 1066 (EQ (InvertFlags cmp) yes no) -> (EQ cmp yes no) 1067 (NE (InvertFlags cmp) yes no) -> (NE cmp yes no) 1068 1069 // Constant comparisons. 1070 (CMPQconst (MOVQconst [x]) [y]) && x==y -> (FlagEQ) 1071 (CMPQconst (MOVQconst [x]) [y]) && x<y && uint64(x)<uint64(y) -> (FlagLT_ULT) 1072 (CMPQconst (MOVQconst [x]) [y]) && x<y && uint64(x)>uint64(y) -> (FlagLT_UGT) 1073 (CMPQconst (MOVQconst [x]) [y]) && x>y && uint64(x)<uint64(y) -> (FlagGT_ULT) 1074 (CMPQconst (MOVQconst [x]) [y]) && x>y && uint64(x)>uint64(y) -> (FlagGT_UGT) 1075 (CMPLconst (MOVLconst [x]) [y]) && int32(x)==int32(y) -> (FlagEQ) 1076 (CMPLconst (MOVLconst [x]) [y]) && int32(x)<int32(y) && uint32(x)<uint32(y) -> (FlagLT_ULT) 1077 (CMPLconst (MOVLconst [x]) [y]) && int32(x)<int32(y) && uint32(x)>uint32(y) -> (FlagLT_UGT) 1078 (CMPLconst (MOVLconst [x]) [y]) && int32(x)>int32(y) && uint32(x)<uint32(y) -> (FlagGT_ULT) 1079 (CMPLconst (MOVLconst [x]) [y]) && int32(x)>int32(y) && uint32(x)>uint32(y) -> (FlagGT_UGT) 1080 (CMPWconst (MOVLconst [x]) [y]) && int16(x)==int16(y) -> (FlagEQ) 1081 (CMPWconst (MOVLconst [x]) [y]) && int16(x)<int16(y) && uint16(x)<uint16(y) -> (FlagLT_ULT) 1082 (CMPWconst (MOVLconst [x]) [y]) && int16(x)<int16(y) && uint16(x)>uint16(y) -> (FlagLT_UGT) 1083 (CMPWconst (MOVLconst [x]) [y]) && int16(x)>int16(y) && uint16(x)<uint16(y) -> (FlagGT_ULT) 1084 (CMPWconst (MOVLconst [x]) [y]) && int16(x)>int16(y) && uint16(x)>uint16(y) -> (FlagGT_UGT) 1085 (CMPBconst (MOVLconst [x]) [y]) && int8(x)==int8(y) -> (FlagEQ) 1086 (CMPBconst (MOVLconst [x]) [y]) && int8(x)<int8(y) && uint8(x)<uint8(y) -> (FlagLT_ULT) 1087 (CMPBconst (MOVLconst [x]) [y]) && int8(x)<int8(y) && uint8(x)>uint8(y) -> (FlagLT_UGT) 1088 (CMPBconst (MOVLconst [x]) [y]) && int8(x)>int8(y) && uint8(x)<uint8(y) -> (FlagGT_ULT) 1089 (CMPBconst (MOVLconst [x]) [y]) && int8(x)>int8(y) && uint8(x)>uint8(y) -> (FlagGT_UGT) 1090 1091 // Other known comparisons. 1092 (CMPQconst (MOVBQZX _) [c]) && 0xFF < c -> (FlagLT_ULT) 1093 (CMPQconst (MOVWQZX _) [c]) && 0xFFFF < c -> (FlagLT_ULT) 1094 (CMPQconst (MOVLQZX _) [c]) && 0xFFFFFFFF < c -> (FlagLT_ULT) 1095 (CMPLconst (SHRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n) -> (FlagLT_ULT) 1096 (CMPQconst (SHRQconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n) -> (FlagLT_ULT) 1097 (CMPQconst (ANDQconst _ [m]) [n]) && 0 <= m && m < n -> (FlagLT_ULT) 1098 (CMPLconst (ANDLconst _ [m]) [n]) && 0 <= int32(m) && int32(m) < int32(n) -> (FlagLT_ULT) 1099 (CMPWconst (ANDLconst _ [m]) [n]) && 0 <= int16(m) && int16(m) < int16(n) -> (FlagLT_ULT) 1100 (CMPBconst (ANDLconst _ [m]) [n]) && 0 <= int8(m) && int8(m) < int8(n) -> (FlagLT_ULT) 1101 // TODO: DIVxU also. 1102 1103 // Absorb flag constants into SBB ops. 1104 (SBBQcarrymask (FlagEQ)) -> (MOVQconst [0]) 1105 (SBBQcarrymask (FlagLT_ULT)) -> (MOVQconst [-1]) 1106 (SBBQcarrymask (FlagLT_UGT)) -> (MOVQconst [0]) 1107 (SBBQcarrymask (FlagGT_ULT)) -> (MOVQconst [-1]) 1108 (SBBQcarrymask (FlagGT_UGT)) -> (MOVQconst [0]) 1109 (SBBLcarrymask (FlagEQ)) -> (MOVLconst [0]) 1110 (SBBLcarrymask (FlagLT_ULT)) -> (MOVLconst [-1]) 1111 (SBBLcarrymask (FlagLT_UGT)) -> (MOVLconst [0]) 1112 (SBBLcarrymask (FlagGT_ULT)) -> (MOVLconst [-1]) 1113 (SBBLcarrymask (FlagGT_UGT)) -> (MOVLconst [0]) 1114 1115 // Absorb flag constants into branches. 1116 (EQ (FlagEQ) yes no) -> (First nil yes no) 1117 (EQ (FlagLT_ULT) yes no) -> (First nil no yes) 1118 (EQ (FlagLT_UGT) yes no) -> (First nil no yes) 1119 (EQ (FlagGT_ULT) yes no) -> (First nil no yes) 1120 (EQ (FlagGT_UGT) yes no) -> (First nil no yes) 1121 1122 (NE (FlagEQ) yes no) -> (First nil no yes) 1123 (NE (FlagLT_ULT) yes no) -> (First nil yes no) 1124 (NE (FlagLT_UGT) yes no) -> (First nil yes no) 1125 (NE (FlagGT_ULT) yes no) -> (First nil yes no) 1126 (NE (FlagGT_UGT) yes no) -> (First nil yes no) 1127 1128 (LT (FlagEQ) yes no) -> (First nil no yes) 1129 (LT (FlagLT_ULT) yes no) -> (First nil yes no) 1130 (LT (FlagLT_UGT) yes no) -> (First nil yes no) 1131 (LT (FlagGT_ULT) yes no) -> (First nil no yes) 1132 (LT (FlagGT_UGT) yes no) -> (First nil no yes) 1133 1134 (LE (FlagEQ) yes no) -> (First nil yes no) 1135 (LE (FlagLT_ULT) yes no) -> (First nil yes no) 1136 (LE (FlagLT_UGT) yes no) -> (First nil yes no) 1137 (LE (FlagGT_ULT) yes no) -> (First nil no yes) 1138 (LE (FlagGT_UGT) yes no) -> (First nil no yes) 1139 1140 (GT (FlagEQ) yes no) -> (First nil no yes) 1141 (GT (FlagLT_ULT) yes no) -> (First nil no yes) 1142 (GT (FlagLT_UGT) yes no) -> (First nil no yes) 1143 (GT (FlagGT_ULT) yes no) -> (First nil yes no) 1144 (GT (FlagGT_UGT) yes no) -> (First nil yes no) 1145 1146 (GE (FlagEQ) yes no) -> (First nil yes no) 1147 (GE (FlagLT_ULT) yes no) -> (First nil no yes) 1148 (GE (FlagLT_UGT) yes no) -> (First nil no yes) 1149 (GE (FlagGT_ULT) yes no) -> (First nil yes no) 1150 (GE (FlagGT_UGT) yes no) -> (First nil yes no) 1151 1152 (ULT (FlagEQ) yes no) -> (First nil no yes) 1153 (ULT (FlagLT_ULT) yes no) -> (First nil yes no) 1154 (ULT (FlagLT_UGT) yes no) -> (First nil no yes) 1155 (ULT (FlagGT_ULT) yes no) -> (First nil yes no) 1156 (ULT (FlagGT_UGT) yes no) -> (First nil no yes) 1157 1158 (ULE (FlagEQ) yes no) -> (First nil yes no) 1159 (ULE (FlagLT_ULT) yes no) -> (First nil yes no) 1160 (ULE (FlagLT_UGT) yes no) -> (First nil no yes) 1161 (ULE (FlagGT_ULT) yes no) -> (First nil yes no) 1162 (ULE (FlagGT_UGT) yes no) -> (First nil no yes) 1163 1164 (UGT (FlagEQ) yes no) -> (First nil no yes) 1165 (UGT (FlagLT_ULT) yes no) -> (First nil no yes) 1166 (UGT (FlagLT_UGT) yes no) -> (First nil yes no) 1167 (UGT (FlagGT_ULT) yes no) -> (First nil no yes) 1168 (UGT (FlagGT_UGT) yes no) -> (First nil yes no) 1169 1170 (UGE (FlagEQ) yes no) -> (First nil yes no) 1171 (UGE (FlagLT_ULT) yes no) -> (First nil no yes) 1172 (UGE (FlagLT_UGT) yes no) -> (First nil yes no) 1173 (UGE (FlagGT_ULT) yes no) -> (First nil no yes) 1174 (UGE (FlagGT_UGT) yes no) -> (First nil yes no) 1175 1176 // Absorb flag constants into SETxx ops. 1177 (SETEQ (FlagEQ)) -> (MOVLconst [1]) 1178 (SETEQ (FlagLT_ULT)) -> (MOVLconst [0]) 1179 (SETEQ (FlagLT_UGT)) -> (MOVLconst [0]) 1180 (SETEQ (FlagGT_ULT)) -> (MOVLconst [0]) 1181 (SETEQ (FlagGT_UGT)) -> (MOVLconst [0]) 1182 1183 (SETNE (FlagEQ)) -> (MOVLconst [0]) 1184 (SETNE (FlagLT_ULT)) -> (MOVLconst [1]) 1185 (SETNE (FlagLT_UGT)) -> (MOVLconst [1]) 1186 (SETNE (FlagGT_ULT)) -> (MOVLconst [1]) 1187 (SETNE (FlagGT_UGT)) -> (MOVLconst [1]) 1188 1189 (SETL (FlagEQ)) -> (MOVLconst [0]) 1190 (SETL (FlagLT_ULT)) -> (MOVLconst [1]) 1191 (SETL (FlagLT_UGT)) -> (MOVLconst [1]) 1192 (SETL (FlagGT_ULT)) -> (MOVLconst [0]) 1193 (SETL (FlagGT_UGT)) -> (MOVLconst [0]) 1194 1195 (SETLE (FlagEQ)) -> (MOVLconst [1]) 1196 (SETLE (FlagLT_ULT)) -> (MOVLconst [1]) 1197 (SETLE (FlagLT_UGT)) -> (MOVLconst [1]) 1198 (SETLE (FlagGT_ULT)) -> (MOVLconst [0]) 1199 (SETLE (FlagGT_UGT)) -> (MOVLconst [0]) 1200 1201 (SETG (FlagEQ)) -> (MOVLconst [0]) 1202 (SETG (FlagLT_ULT)) -> (MOVLconst [0]) 1203 (SETG (FlagLT_UGT)) -> (MOVLconst [0]) 1204 (SETG (FlagGT_ULT)) -> (MOVLconst [1]) 1205 (SETG (FlagGT_UGT)) -> (MOVLconst [1]) 1206 1207 (SETGE (FlagEQ)) -> (MOVLconst [1]) 1208 (SETGE (FlagLT_ULT)) -> (MOVLconst [0]) 1209 (SETGE (FlagLT_UGT)) -> (MOVLconst [0]) 1210 (SETGE (FlagGT_ULT)) -> (MOVLconst [1]) 1211 (SETGE (FlagGT_UGT)) -> (MOVLconst [1]) 1212 1213 (SETB (FlagEQ)) -> (MOVLconst [0]) 1214 (SETB (FlagLT_ULT)) -> (MOVLconst [1]) 1215 (SETB (FlagLT_UGT)) -> (MOVLconst [0]) 1216 (SETB (FlagGT_ULT)) -> (MOVLconst [1]) 1217 (SETB (FlagGT_UGT)) -> (MOVLconst [0]) 1218 1219 (SETBE (FlagEQ)) -> (MOVLconst [1]) 1220 (SETBE (FlagLT_ULT)) -> (MOVLconst [1]) 1221 (SETBE (FlagLT_UGT)) -> (MOVLconst [0]) 1222 (SETBE (FlagGT_ULT)) -> (MOVLconst [1]) 1223 (SETBE (FlagGT_UGT)) -> (MOVLconst [0]) 1224 1225 (SETA (FlagEQ)) -> (MOVLconst [0]) 1226 (SETA (FlagLT_ULT)) -> (MOVLconst [0]) 1227 (SETA (FlagLT_UGT)) -> (MOVLconst [1]) 1228 (SETA (FlagGT_ULT)) -> (MOVLconst [0]) 1229 (SETA (FlagGT_UGT)) -> (MOVLconst [1]) 1230 1231 (SETAE (FlagEQ)) -> (MOVLconst [1]) 1232 (SETAE (FlagLT_ULT)) -> (MOVLconst [0]) 1233 (SETAE (FlagLT_UGT)) -> (MOVLconst [1]) 1234 (SETAE (FlagGT_ULT)) -> (MOVLconst [0]) 1235 (SETAE (FlagGT_UGT)) -> (MOVLconst [1]) 1236 1237 // Remove redundant *const ops 1238 (ADDQconst [0] x) -> x 1239 (ADDLconst [c] x) && int32(c)==0 -> x 1240 (SUBQconst [0] x) -> x 1241 (SUBLconst [c] x) && int32(c) == 0 -> x 1242 (ANDQconst [0] _) -> (MOVQconst [0]) 1243 (ANDLconst [c] _) && int32(c)==0 -> (MOVLconst [0]) 1244 (ANDQconst [-1] x) -> x 1245 (ANDLconst [c] x) && int32(c)==-1 -> x 1246 (ORQconst [0] x) -> x 1247 (ORLconst [c] x) && int32(c)==0 -> x 1248 (ORQconst [-1] _) -> (MOVQconst [-1]) 1249 (ORLconst [c] _) && int32(c)==-1 -> (MOVLconst [-1]) 1250 (XORQconst [0] x) -> x 1251 (XORLconst [c] x) && int32(c)==0 -> x 1252 // TODO: since we got rid of the W/B versions, we might miss 1253 // things like (ANDLconst [0x100] x) which were formerly 1254 // (ANDBconst [0] x). Probably doesn't happen very often. 1255 // If we cared, we might do: 1256 // (ANDLconst <t> [c] x) && t.Size()==1 && int8(x)==0 -> (MOVLconst [0]) 1257 1258 // Convert constant subtracts to constant adds 1259 (SUBQconst [c] x) && c != -(1<<31) -> (ADDQconst [-c] x) 1260 (SUBLconst [c] x) -> (ADDLconst [int64(int32(-c))] x) 1261 1262 // generic constant folding 1263 // TODO: more of this 1264 (ADDQconst [c] (MOVQconst [d])) -> (MOVQconst [c+d]) 1265 (ADDLconst [c] (MOVLconst [d])) -> (MOVLconst [int64(int32(c+d))]) 1266 (ADDQconst [c] (ADDQconst [d] x)) && is32Bit(c+d) -> (ADDQconst [c+d] x) 1267 (ADDLconst [c] (ADDLconst [d] x)) -> (ADDLconst [int64(int32(c+d))] x) 1268 (SUBQconst (MOVQconst [d]) [c]) -> (MOVQconst [d-c]) 1269 (SUBQconst (SUBQconst x [d]) [c]) && is32Bit(-c-d) -> (ADDQconst [-c-d] x) 1270 (SARQconst [c] (MOVQconst [d])) -> (MOVQconst [d>>uint64(c)]) 1271 (SARLconst [c] (MOVQconst [d])) -> (MOVQconst [d>>uint64(c)]) 1272 (SARWconst [c] (MOVQconst [d])) -> (MOVQconst [d>>uint64(c)]) 1273 (SARBconst [c] (MOVQconst [d])) -> (MOVQconst [d>>uint64(c)]) 1274 (NEGQ (MOVQconst [c])) -> (MOVQconst [-c]) 1275 (NEGL (MOVLconst [c])) -> (MOVLconst [int64(int32(-c))]) 1276 (MULQconst [c] (MOVQconst [d])) -> (MOVQconst [c*d]) 1277 (MULLconst [c] (MOVLconst [d])) -> (MOVLconst [int64(int32(c*d))]) 1278 (ANDQconst [c] (MOVQconst [d])) -> (MOVQconst [c&d]) 1279 (ANDLconst [c] (MOVLconst [d])) -> (MOVLconst [c&d]) 1280 (ORQconst [c] (MOVQconst [d])) -> (MOVQconst [c|d]) 1281 (ORLconst [c] (MOVLconst [d])) -> (MOVLconst [c|d]) 1282 (XORQconst [c] (MOVQconst [d])) -> (MOVQconst [c^d]) 1283 (XORLconst [c] (MOVLconst [d])) -> (MOVLconst [c^d]) 1284 (NOTQ (MOVQconst [c])) -> (MOVQconst [^c]) 1285 (NOTL (MOVLconst [c])) -> (MOVLconst [^c]) 1286 1287 // generic simplifications 1288 // TODO: more of this 1289 (ADDQ x (NEGQ y)) -> (SUBQ x y) 1290 (ADDL x (NEGL y)) -> (SUBL x y) 1291 (SUBQ x x) -> (MOVQconst [0]) 1292 (SUBL x x) -> (MOVLconst [0]) 1293 (ANDQ x x) -> x 1294 (ANDL x x) -> x 1295 (ORQ x x) -> x 1296 (ORL x x) -> x 1297 (XORQ x x) -> (MOVQconst [0]) 1298 (XORL x x) -> (MOVLconst [0]) 1299 1300 // checking AND against 0. 1301 (CMPQconst (ANDQ x y) [0]) -> (TESTQ x y) 1302 (CMPLconst (ANDL x y) [0]) -> (TESTL x y) 1303 (CMPWconst (ANDL x y) [0]) -> (TESTW x y) 1304 (CMPBconst (ANDL x y) [0]) -> (TESTB x y) 1305 (CMPQconst (ANDQconst [c] x) [0]) -> (TESTQconst [c] x) 1306 (CMPLconst (ANDLconst [c] x) [0]) -> (TESTLconst [c] x) 1307 (CMPWconst (ANDLconst [c] x) [0]) -> (TESTWconst [int64(int16(c))] x) 1308 (CMPBconst (ANDLconst [c] x) [0]) -> (TESTBconst [int64(int8(c))] x) 1309 1310 // TEST %reg,%reg is shorter than CMP 1311 (CMPQconst x [0]) -> (TESTQ x x) 1312 (CMPLconst x [0]) -> (TESTL x x) 1313 (CMPWconst x [0]) -> (TESTW x x) 1314 (CMPBconst x [0]) -> (TESTB x x) 1315 1316 // Combining byte loads into larger (unaligned) loads. 1317 // There are many ways these combinations could occur. This is 1318 // designed to match the way encoding/binary.LittleEndian does it. 1319 (ORL x0:(MOVBload [i] {s} p mem) 1320 s0:(SHLLconst [8] x1:(MOVBload [i+1] {s} p mem))) 1321 && x0.Uses == 1 1322 && x1.Uses == 1 1323 && s0.Uses == 1 1324 && mergePoint(b,x0,x1) != nil 1325 && clobber(x0) 1326 && clobber(x1) 1327 && clobber(s0) 1328 -> @mergePoint(b,x0,x1) (MOVWload [i] {s} p mem) 1329 1330 (ORL o0:(ORL o1:(ORL 1331 x0:(MOVBload [i] {s} p mem) 1332 s0:(SHLLconst [8] x1:(MOVBload [i+1] {s} p mem))) 1333 s1:(SHLLconst [16] x2:(MOVBload [i+2] {s} p mem))) 1334 s2:(SHLLconst [24] x3:(MOVBload [i+3] {s} p mem))) 1335 && x0.Uses == 1 1336 && x1.Uses == 1 1337 && x2.Uses == 1 1338 && x3.Uses == 1 1339 && s0.Uses == 1 1340 && s1.Uses == 1 1341 && s2.Uses == 1 1342 && o0.Uses == 1 1343 && o1.Uses == 1 1344 && mergePoint(b,x0,x1,x2,x3) != nil 1345 && clobber(x0) 1346 && clobber(x1) 1347 && clobber(x2) 1348 && clobber(x3) 1349 && clobber(s0) 1350 && clobber(s1) 1351 && clobber(s2) 1352 && clobber(o0) 1353 && clobber(o1) 1354 -> @mergePoint(b,x0,x1,x2,x3) (MOVLload [i] {s} p mem) 1355 1356 (ORQ o0:(ORQ o1:(ORQ o2:(ORQ o3:(ORQ o4:(ORQ o5:(ORQ 1357 x0:(MOVBload [i] {s} p mem) 1358 s0:(SHLQconst [8] x1:(MOVBload [i+1] {s} p mem))) 1359 s1:(SHLQconst [16] x2:(MOVBload [i+2] {s} p mem))) 1360 s2:(SHLQconst [24] x3:(MOVBload [i+3] {s} p mem))) 1361 s3:(SHLQconst [32] x4:(MOVBload [i+4] {s} p mem))) 1362 s4:(SHLQconst [40] x5:(MOVBload [i+5] {s} p mem))) 1363 s5:(SHLQconst [48] x6:(MOVBload [i+6] {s} p mem))) 1364 s6:(SHLQconst [56] x7:(MOVBload [i+7] {s} p mem))) 1365 && x0.Uses == 1 1366 && x1.Uses == 1 1367 && x2.Uses == 1 1368 && x3.Uses == 1 1369 && x4.Uses == 1 1370 && x5.Uses == 1 1371 && x6.Uses == 1 1372 && x7.Uses == 1 1373 && s0.Uses == 1 1374 && s1.Uses == 1 1375 && s2.Uses == 1 1376 && s3.Uses == 1 1377 && s4.Uses == 1 1378 && s5.Uses == 1 1379 && s6.Uses == 1 1380 && o0.Uses == 1 1381 && o1.Uses == 1 1382 && o2.Uses == 1 1383 && o3.Uses == 1 1384 && o4.Uses == 1 1385 && o5.Uses == 1 1386 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil 1387 && clobber(x0) 1388 && clobber(x1) 1389 && clobber(x2) 1390 && clobber(x3) 1391 && clobber(x4) 1392 && clobber(x5) 1393 && clobber(x6) 1394 && clobber(x7) 1395 && clobber(s0) 1396 && clobber(s1) 1397 && clobber(s2) 1398 && clobber(s3) 1399 && clobber(s4) 1400 && clobber(s5) 1401 && clobber(s6) 1402 && clobber(o0) 1403 && clobber(o1) 1404 && clobber(o2) 1405 && clobber(o3) 1406 && clobber(o4) 1407 && clobber(o5) 1408 -> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVQload [i] {s} p mem) 1409 1410 (ORL x0:(MOVBloadidx1 [i] {s} p idx mem) 1411 s0:(SHLLconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) 1412 && x0.Uses == 1 1413 && x1.Uses == 1 1414 && s0.Uses == 1 1415 && mergePoint(b,x0,x1) != nil 1416 && clobber(x0) 1417 && clobber(x1) 1418 && clobber(s0) 1419 -> @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i] {s} p idx mem) 1420 1421 (ORL o0:(ORL o1:(ORL 1422 x0:(MOVBloadidx1 [i] {s} p idx mem) 1423 s0:(SHLLconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) 1424 s1:(SHLLconst [16] x2:(MOVBloadidx1 [i+2] {s} p idx mem))) 1425 s2:(SHLLconst [24] x3:(MOVBloadidx1 [i+3] {s} p idx mem))) 1426 && x0.Uses == 1 1427 && x1.Uses == 1 1428 && x2.Uses == 1 1429 && x3.Uses == 1 1430 && s0.Uses == 1 1431 && s1.Uses == 1 1432 && s2.Uses == 1 1433 && o0.Uses == 1 1434 && o1.Uses == 1 1435 && mergePoint(b,x0,x1,x2,x3) != nil 1436 && clobber(x0) 1437 && clobber(x1) 1438 && clobber(x2) 1439 && clobber(x3) 1440 && clobber(s0) 1441 && clobber(s1) 1442 && clobber(s2) 1443 && clobber(o0) 1444 && clobber(o1) 1445 -> @mergePoint(b,x0,x1,x2,x3) (MOVLloadidx1 <v.Type> [i] {s} p idx mem) 1446 1447 (ORQ o0:(ORQ o1:(ORQ o2:(ORQ o3:(ORQ o4:(ORQ o5:(ORQ 1448 x0:(MOVBloadidx1 [i] {s} p idx mem) 1449 s0:(SHLQconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) 1450 s1:(SHLQconst [16] x2:(MOVBloadidx1 [i+2] {s} p idx mem))) 1451 s2:(SHLQconst [24] x3:(MOVBloadidx1 [i+3] {s} p idx mem))) 1452 s3:(SHLQconst [32] x4:(MOVBloadidx1 [i+4] {s} p idx mem))) 1453 s4:(SHLQconst [40] x5:(MOVBloadidx1 [i+5] {s} p idx mem))) 1454 s5:(SHLQconst [48] x6:(MOVBloadidx1 [i+6] {s} p idx mem))) 1455 s6:(SHLQconst [56] x7:(MOVBloadidx1 [i+7] {s} p idx mem))) 1456 && x0.Uses == 1 1457 && x1.Uses == 1 1458 && x2.Uses == 1 1459 && x3.Uses == 1 1460 && x4.Uses == 1 1461 && x5.Uses == 1 1462 && x6.Uses == 1 1463 && x7.Uses == 1 1464 && s0.Uses == 1 1465 && s1.Uses == 1 1466 && s2.Uses == 1 1467 && s3.Uses == 1 1468 && s4.Uses == 1 1469 && s5.Uses == 1 1470 && s6.Uses == 1 1471 && o0.Uses == 1 1472 && o1.Uses == 1 1473 && o2.Uses == 1 1474 && o3.Uses == 1 1475 && o4.Uses == 1 1476 && o5.Uses == 1 1477 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil 1478 && clobber(x0) 1479 && clobber(x1) 1480 && clobber(x2) 1481 && clobber(x3) 1482 && clobber(x4) 1483 && clobber(x5) 1484 && clobber(x6) 1485 && clobber(x7) 1486 && clobber(s0) 1487 && clobber(s1) 1488 && clobber(s2) 1489 && clobber(s3) 1490 && clobber(s4) 1491 && clobber(s5) 1492 && clobber(s6) 1493 && clobber(o0) 1494 && clobber(o1) 1495 && clobber(o2) 1496 && clobber(o3) 1497 && clobber(o4) 1498 && clobber(o5) 1499 -> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVQloadidx1 <v.Type> [i] {s} p idx mem) 1500 1501 // Combine constant stores into larger (unaligned) stores. 1502 (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem)) 1503 && x.Uses == 1 1504 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() 1505 && clobber(x) 1506 -> (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem) 1507 (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem)) 1508 && x.Uses == 1 1509 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() 1510 && clobber(x) 1511 -> (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem) 1512 (MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem)) 1513 && x.Uses == 1 1514 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() 1515 && clobber(x) 1516 -> (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 1517 1518 (MOVBstoreconstidx1 [c] {s} p i x:(MOVBstoreconstidx1 [a] {s} p i mem)) 1519 && x.Uses == 1 1520 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() 1521 && clobber(x) 1522 -> (MOVWstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p i mem) 1523 (MOVWstoreconstidx1 [c] {s} p i x:(MOVWstoreconstidx1 [a] {s} p i mem)) 1524 && x.Uses == 1 1525 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() 1526 && clobber(x) 1527 -> (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p i mem) 1528 (MOVLstoreconstidx1 [c] {s} p i x:(MOVLstoreconstidx1 [a] {s} p i mem)) 1529 && x.Uses == 1 1530 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() 1531 && clobber(x) 1532 -> (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p i (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 1533 1534 (MOVWstoreconstidx2 [c] {s} p i x:(MOVWstoreconstidx2 [a] {s} p i mem)) 1535 && x.Uses == 1 1536 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() 1537 && clobber(x) 1538 -> (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p (SHLQconst <i.Type> [1] i) mem) 1539 (MOVLstoreconstidx4 [c] {s} p i x:(MOVLstoreconstidx4 [a] {s} p i mem)) 1540 && x.Uses == 1 1541 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() 1542 && clobber(x) 1543 -> (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p (SHLQconst <i.Type> [2] i) (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 1544 1545 // Combine stores into larger (unaligned) stores. 1546 (MOVBstore [i] {s} p (SHRQconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) 1547 && x.Uses == 1 1548 && clobber(x) 1549 -> (MOVWstore [i-1] {s} p w mem) 1550 (MOVBstore [i] {s} p (SHRQconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRQconst [j-8] w) mem)) 1551 && x.Uses == 1 1552 && clobber(x) 1553 -> (MOVWstore [i-1] {s} p w0 mem) 1554 (MOVWstore [i] {s} p (SHRQconst [16] w) x:(MOVWstore [i-2] {s} p w mem)) 1555 && x.Uses == 1 1556 && clobber(x) 1557 -> (MOVLstore [i-2] {s} p w mem) 1558 (MOVWstore [i] {s} p (SHRQconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRQconst [j-16] w) mem)) 1559 && x.Uses == 1 1560 && clobber(x) 1561 -> (MOVLstore [i-2] {s} p w0 mem) 1562 (MOVLstore [i] {s} p (SHRQconst [32] w) x:(MOVLstore [i-4] {s} p w mem)) 1563 && x.Uses == 1 1564 && clobber(x) 1565 -> (MOVQstore [i-4] {s} p w mem) 1566 (MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem)) 1567 && x.Uses == 1 1568 && clobber(x) 1569 -> (MOVQstore [i-4] {s} p w0 mem) 1570 1571 (MOVBstoreidx1 [i] {s} p idx (SHRQconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem)) 1572 && x.Uses == 1 1573 && clobber(x) 1574 -> (MOVWstoreidx1 [i-1] {s} p idx w mem) 1575 (MOVBstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRQconst [j-8] w) mem)) 1576 && x.Uses == 1 1577 && clobber(x) 1578 -> (MOVWstoreidx1 [i-1] {s} p idx w0 mem) 1579 (MOVWstoreidx1 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx1 [i-2] {s} p idx w mem)) 1580 && x.Uses == 1 1581 && clobber(x) 1582 -> (MOVLstoreidx1 [i-2] {s} p idx w mem) 1583 (MOVWstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx1 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem)) 1584 && x.Uses == 1 1585 && clobber(x) 1586 -> (MOVLstoreidx1 [i-2] {s} p idx w0 mem) 1587 (MOVLstoreidx1 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx1 [i-4] {s} p idx w mem)) 1588 && x.Uses == 1 1589 && clobber(x) 1590 -> (MOVQstoreidx1 [i-4] {s} p idx w mem) 1591 (MOVLstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx1 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem)) 1592 && x.Uses == 1 1593 && clobber(x) 1594 -> (MOVQstoreidx1 [i-4] {s} p idx w0 mem) 1595 1596 (MOVWstoreidx2 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem)) 1597 && x.Uses == 1 1598 && clobber(x) 1599 -> (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w mem) 1600 (MOVWstoreidx2 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx2 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem)) 1601 && x.Uses == 1 1602 && clobber(x) 1603 -> (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w0 mem) 1604 (MOVLstoreidx4 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx4 [i-4] {s} p idx w mem)) 1605 && x.Uses == 1 1606 && clobber(x) 1607 -> (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w mem) 1608 (MOVLstoreidx4 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx4 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem)) 1609 && x.Uses == 1 1610 && clobber(x) 1611 -> (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w0 mem) 1612 1613 // amd64p32 rules 1614 // same as the rules above, but with 32 instead of 64 bit pointer arithmetic. 1615 // LEAQ,ADDQ -> LEAL,ADDL 1616 (ADDLconst [c] (LEAL [d] {s} x)) && is32Bit(c+d) -> (LEAL [c+d] {s} x) 1617 (LEAL [c] {s} (ADDLconst [d] x)) && is32Bit(c+d) -> (LEAL [c+d] {s} x) 1618 1619 (MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) -> 1620 (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) 1621 (MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) -> 1622 (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) 1623 (MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) -> 1624 (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) 1625 (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) -> 1626 (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) 1627 1628 (MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) -> 1629 (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 1630 (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) -> 1631 (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 1632 (MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) -> 1633 (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 1634 (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) -> 1635 (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 1636 1637 (MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) -> 1638 (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 1639 (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) -> 1640 (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 1641 (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) -> 1642 (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 1643 (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) -> 1644 (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 1645 1646 (MOVQload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVQload [off1+off2] {sym} ptr mem) 1647 (MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVLload [off1+off2] {sym} ptr mem) 1648 (MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVWload [off1+off2] {sym} ptr mem) 1649 (MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVBload [off1+off2] {sym} ptr mem) 1650 (MOVQstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVQstore [off1+off2] {sym} ptr val mem) 1651 (MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVLstore [off1+off2] {sym} ptr val mem) 1652 (MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVWstore [off1+off2] {sym} ptr val mem) 1653 (MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVBstore [off1+off2] {sym} ptr val mem) 1654 (MOVQstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) -> 1655 (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 1656 (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) -> 1657 (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 1658 (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) -> 1659 (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 1660 (MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) -> 1661 (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 1662 1663 // Merge ADDQconst and LEAQ into atomic loads. 1664 (MOVQatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> 1665 (MOVQatomicload [off1+off2] {sym} ptr mem) 1666 (MOVLatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> 1667 (MOVLatomicload [off1+off2] {sym} ptr mem) 1668 (MOVQatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 1669 (MOVQatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 1670 (MOVLatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 1671 (MOVLatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 1672 1673 // Merge ADDQconst and LEAQ into atomic stores. 1674 (XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> 1675 (XCHGQ [off1+off2] {sym} val ptr mem) 1676 (XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB -> 1677 (XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) 1678 (XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> 1679 (XCHGL [off1+off2] {sym} val ptr mem) 1680 (XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB -> 1681 (XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) 1682 1683 // Merge ADDQconst into atomic adds. 1684 // TODO: merging LEAQ doesn't work, assembler doesn't like the resulting instructions. 1685 (XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> 1686 (XADDQlock [off1+off2] {sym} val ptr mem) 1687 (XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> 1688 (XADDLlock [off1+off2] {sym} val ptr mem) 1689 1690 // Merge ADDQconst into atomic compare and swaps. 1691 // TODO: merging LEAQ doesn't work, assembler doesn't like the resulting instructions. 1692 (CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) && is32Bit(off1+off2) -> 1693 (CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem) 1694 (CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) && is32Bit(off1+off2) -> 1695 (CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem)