github.com/miolini/go@v0.0.0-20160405192216-fca68c8cb408/src/cmd/compile/internal/ssa/gen/AMD64.rules (about) 1 // Copyright 2015 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Lowering arithmetic 6 (Add64 x y) -> (ADDQ x y) 7 (AddPtr x y) -> (ADDQ x y) 8 (Add32 x y) -> (ADDL x y) 9 (Add16 x y) -> (ADDW x y) 10 (Add8 x y) -> (ADDB x y) 11 (Add32F x y) -> (ADDSS x y) 12 (Add64F x y) -> (ADDSD x y) 13 14 (Sub64 x y) -> (SUBQ x y) 15 (SubPtr x y) -> (SUBQ x y) 16 (Sub32 x y) -> (SUBL x y) 17 (Sub16 x y) -> (SUBW x y) 18 (Sub8 x y) -> (SUBB x y) 19 (Sub32F x y) -> (SUBSS x y) 20 (Sub64F x y) -> (SUBSD x y) 21 22 (Mul64 x y) -> (MULQ x y) 23 (Mul32 x y) -> (MULL x y) 24 (Mul16 x y) -> (MULW x y) 25 (Mul8 x y) -> (MULB x y) 26 (Mul32F x y) -> (MULSS x y) 27 (Mul64F x y) -> (MULSD x y) 28 29 (Div32F x y) -> (DIVSS x y) 30 (Div64F x y) -> (DIVSD x y) 31 32 (Div64 x y) -> (DIVQ x y) 33 (Div64u x y) -> (DIVQU x y) 34 (Div32 x y) -> (DIVL x y) 35 (Div32u x y) -> (DIVLU x y) 36 (Div16 x y) -> (DIVW x y) 37 (Div16u x y) -> (DIVWU x y) 38 (Div8 x y) -> (DIVW (SignExt8to16 x) (SignExt8to16 y)) 39 (Div8u x y) -> (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)) 40 41 (Hmul64 x y) -> (HMULQ x y) 42 (Hmul64u x y) -> (HMULQU x y) 43 (Hmul32 x y) -> (HMULL x y) 44 (Hmul32u x y) -> (HMULLU x y) 45 (Hmul16 x y) -> (HMULW x y) 46 (Hmul16u x y) -> (HMULWU x y) 47 (Hmul8 x y) -> (HMULB x y) 48 (Hmul8u x y) -> (HMULBU x y) 49 50 (Avg64u x y) -> (AVGQU x y) 51 52 (Mod64 x y) -> (MODQ x y) 53 (Mod64u x y) -> (MODQU x y) 54 (Mod32 x y) -> (MODL x y) 55 (Mod32u x y) -> (MODLU x y) 56 (Mod16 x y) -> (MODW x y) 57 (Mod16u x y) -> (MODWU x y) 58 (Mod8 x y) -> (MODW (SignExt8to16 x) (SignExt8to16 y)) 59 (Mod8u x y) -> (MODWU (ZeroExt8to16 x) (ZeroExt8to16 y)) 60 61 (And64 x y) -> (ANDQ x y) 62 (And32 x y) -> (ANDL x y) 63 (And16 x y) -> (ANDW x y) 64 (And8 x y) -> (ANDB x y) 65 66 (Or64 x y) -> (ORQ x y) 67 (Or32 x y) -> (ORL x y) 68 (Or16 x y) -> (ORW x y) 69 (Or8 x y) -> (ORB x y) 70 71 (Xor64 x y) -> (XORQ x y) 72 (Xor32 x y) -> (XORL x y) 73 (Xor16 x y) -> (XORW x y) 74 (Xor8 x y) -> (XORB x y) 75 76 (Neg64 x) -> (NEGQ x) 77 (Neg32 x) -> (NEGL x) 78 (Neg16 x) -> (NEGW x) 79 (Neg8 x) -> (NEGB x) 80 (Neg32F x) -> (PXOR x (MOVSSconst <config.Frontend().TypeFloat32()> [f2i(math.Copysign(0, -1))])) 81 (Neg64F x) -> (PXOR x (MOVSDconst <config.Frontend().TypeFloat64()> [f2i(math.Copysign(0, -1))])) 82 83 (Com64 x) -> (NOTQ x) 84 (Com32 x) -> (NOTL x) 85 (Com16 x) -> (NOTW x) 86 (Com8 x) -> (NOTB x) 87 88 // CMPQconst 0 below is redundant because BSF sets Z but how to remove? 89 (Ctz64 <t> x) -> (CMOVQEQconst (BSFQ <t> x) (CMPQconst x [0]) [64]) 90 (Ctz32 <t> x) -> (CMOVLEQconst (BSFL <t> x) (CMPLconst x [0]) [32]) 91 (Ctz16 <t> x) -> (CMOVWEQconst (BSFW <t> x) (CMPWconst x [0]) [16]) 92 93 (CMOVQEQconst x (InvertFlags y) [c]) -> (CMOVQNEconst x y [c]) 94 (CMOVLEQconst x (InvertFlags y) [c]) -> (CMOVLNEconst x y [c]) 95 (CMOVWEQconst x (InvertFlags y) [c]) -> (CMOVWNEconst x y [c]) 96 97 (CMOVQEQconst _ (FlagEQ) [c]) -> (Const64 [c]) 98 (CMOVLEQconst _ (FlagEQ) [c]) -> (Const32 [c]) 99 (CMOVWEQconst _ (FlagEQ) [c]) -> (Const16 [c]) 100 101 (CMOVQEQconst x (FlagLT_ULT)) -> x 102 (CMOVLEQconst x (FlagLT_ULT)) -> x 103 (CMOVWEQconst x (FlagLT_ULT)) -> x 104 105 (CMOVQEQconst x (FlagLT_UGT)) -> x 106 (CMOVLEQconst x (FlagLT_UGT)) -> x 107 (CMOVWEQconst x (FlagLT_UGT)) -> x 108 109 (CMOVQEQconst x (FlagGT_ULT)) -> x 110 (CMOVLEQconst x (FlagGT_ULT)) -> x 111 (CMOVWEQconst x (FlagGT_ULT)) -> x 112 113 (CMOVQEQconst x (FlagGT_UGT)) -> x 114 (CMOVLEQconst x (FlagGT_UGT)) -> x 115 (CMOVWEQconst x (FlagGT_UGT)) -> x 116 117 (Bswap64 x) -> (BSWAPQ x) 118 (Bswap32 x) -> (BSWAPL x) 119 120 (Sqrt x) -> (SQRTSD x) 121 122 // Note: we always extend to 64 bits even though some ops don't need that many result bits. 123 (SignExt8to16 x) -> (MOVBQSX x) 124 (SignExt8to32 x) -> (MOVBQSX x) 125 (SignExt8to64 x) -> (MOVBQSX x) 126 (SignExt16to32 x) -> (MOVWQSX x) 127 (SignExt16to64 x) -> (MOVWQSX x) 128 (SignExt32to64 x) -> (MOVLQSX x) 129 130 (ZeroExt8to16 x) -> (MOVBQZX x) 131 (ZeroExt8to32 x) -> (MOVBQZX x) 132 (ZeroExt8to64 x) -> (MOVBQZX x) 133 (ZeroExt16to32 x) -> (MOVWQZX x) 134 (ZeroExt16to64 x) -> (MOVWQZX x) 135 (ZeroExt32to64 x) -> (MOVLQZX x) 136 137 (Cvt32to32F x) -> (CVTSL2SS x) 138 (Cvt32to64F x) -> (CVTSL2SD x) 139 (Cvt64to32F x) -> (CVTSQ2SS x) 140 (Cvt64to64F x) -> (CVTSQ2SD x) 141 142 (Cvt32Fto32 x) -> (CVTTSS2SL x) 143 (Cvt32Fto64 x) -> (CVTTSS2SQ x) 144 (Cvt64Fto32 x) -> (CVTTSD2SL x) 145 (Cvt64Fto64 x) -> (CVTTSD2SQ x) 146 147 (Cvt32Fto64F x) -> (CVTSS2SD x) 148 (Cvt64Fto32F x) -> (CVTSD2SS x) 149 150 // Because we ignore high parts of registers, truncates are just copies. 151 (Trunc16to8 x) -> x 152 (Trunc32to8 x) -> x 153 (Trunc32to16 x) -> x 154 (Trunc64to8 x) -> x 155 (Trunc64to16 x) -> x 156 (Trunc64to32 x) -> x 157 158 // Lowering shifts 159 // Unsigned shifts need to return 0 if shift amount is >= width of shifted value. 160 // result = (arg << shift) & (shift >= argbits ? 0 : 0xffffffffffffffff) 161 // Note: for small shifts we generate 32 bits of mask even when we don't need it all. 162 (Lsh64x64 <t> x y) -> (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) 163 (Lsh64x32 <t> x y) -> (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) 164 (Lsh64x16 <t> x y) -> (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) 165 (Lsh64x8 <t> x y) -> (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) 166 167 (Lsh32x64 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 168 (Lsh32x32 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 169 (Lsh32x16 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 170 (Lsh32x8 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 171 172 (Lsh16x64 <t> x y) -> (ANDW (SHLW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16]))) 173 (Lsh16x32 <t> x y) -> (ANDW (SHLW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16]))) 174 (Lsh16x16 <t> x y) -> (ANDW (SHLW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16]))) 175 (Lsh16x8 <t> x y) -> (ANDW (SHLW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16]))) 176 177 (Lsh8x64 <t> x y) -> (ANDB (SHLB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8]))) 178 (Lsh8x32 <t> x y) -> (ANDB (SHLB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8]))) 179 (Lsh8x16 <t> x y) -> (ANDB (SHLB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8]))) 180 (Lsh8x8 <t> x y) -> (ANDB (SHLB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8]))) 181 182 (Lrot64 <t> x [c]) -> (ROLQconst <t> [c&63] x) 183 (Lrot32 <t> x [c]) -> (ROLLconst <t> [c&31] x) 184 (Lrot16 <t> x [c]) -> (ROLWconst <t> [c&15] x) 185 (Lrot8 <t> x [c]) -> (ROLBconst <t> [c&7] x) 186 187 (Rsh64Ux64 <t> x y) -> (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) 188 (Rsh64Ux32 <t> x y) -> (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) 189 (Rsh64Ux16 <t> x y) -> (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) 190 (Rsh64Ux8 <t> x y) -> (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) 191 192 (Rsh32Ux64 <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 193 (Rsh32Ux32 <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 194 (Rsh32Ux16 <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 195 (Rsh32Ux8 <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 196 197 (Rsh16Ux64 <t> x y) -> (ANDW (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16]))) 198 (Rsh16Ux32 <t> x y) -> (ANDW (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16]))) 199 (Rsh16Ux16 <t> x y) -> (ANDW (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16]))) 200 (Rsh16Ux8 <t> x y) -> (ANDW (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16]))) 201 202 (Rsh8Ux64 <t> x y) -> (ANDB (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8]))) 203 (Rsh8Ux32 <t> x y) -> (ANDB (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8]))) 204 (Rsh8Ux16 <t> x y) -> (ANDB (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8]))) 205 (Rsh8Ux8 <t> x y) -> (ANDB (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8]))) 206 207 // Signed right shift needs to return 0/-1 if shift amount is >= width of shifted value. 208 // We implement this by setting the shift value to -1 (all ones) if the shift value is >= width. 209 // Note: for small shift widths we generate 32 bits of mask even when we don't need it all. 210 (Rsh64x64 <t> x y) -> (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64]))))) 211 (Rsh64x32 <t> x y) -> (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64]))))) 212 (Rsh64x16 <t> x y) -> (SARQ <t> x (ORW <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64]))))) 213 (Rsh64x8 <t> x y) -> (SARQ <t> x (ORB <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64]))))) 214 215 (Rsh32x64 <t> x y) -> (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32]))))) 216 (Rsh32x32 <t> x y) -> (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32]))))) 217 (Rsh32x16 <t> x y) -> (SARL <t> x (ORW <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32]))))) 218 (Rsh32x8 <t> x y) -> (SARL <t> x (ORB <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32]))))) 219 220 (Rsh16x64 <t> x y) -> (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16]))))) 221 (Rsh16x32 <t> x y) -> (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16]))))) 222 (Rsh16x16 <t> x y) -> (SARW <t> x (ORW <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16]))))) 223 (Rsh16x8 <t> x y) -> (SARW <t> x (ORB <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16]))))) 224 225 (Rsh8x64 <t> x y) -> (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8]))))) 226 (Rsh8x32 <t> x y) -> (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8]))))) 227 (Rsh8x16 <t> x y) -> (SARB <t> x (ORW <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8]))))) 228 (Rsh8x8 <t> x y) -> (SARB <t> x (ORB <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8]))))) 229 230 (Less64 x y) -> (SETL (CMPQ x y)) 231 (Less32 x y) -> (SETL (CMPL x y)) 232 (Less16 x y) -> (SETL (CMPW x y)) 233 (Less8 x y) -> (SETL (CMPB x y)) 234 (Less64U x y) -> (SETB (CMPQ x y)) 235 (Less32U x y) -> (SETB (CMPL x y)) 236 (Less16U x y) -> (SETB (CMPW x y)) 237 (Less8U x y) -> (SETB (CMPB x y)) 238 // Use SETGF with reversed operands to dodge NaN case 239 (Less64F x y) -> (SETGF (UCOMISD y x)) 240 (Less32F x y) -> (SETGF (UCOMISS y x)) 241 242 (Leq64 x y) -> (SETLE (CMPQ x y)) 243 (Leq32 x y) -> (SETLE (CMPL x y)) 244 (Leq16 x y) -> (SETLE (CMPW x y)) 245 (Leq8 x y) -> (SETLE (CMPB x y)) 246 (Leq64U x y) -> (SETBE (CMPQ x y)) 247 (Leq32U x y) -> (SETBE (CMPL x y)) 248 (Leq16U x y) -> (SETBE (CMPW x y)) 249 (Leq8U x y) -> (SETBE (CMPB x y)) 250 // Use SETGEF with reversed operands to dodge NaN case 251 (Leq64F x y) -> (SETGEF (UCOMISD y x)) 252 (Leq32F x y) -> (SETGEF (UCOMISS y x)) 253 254 (Greater64 x y) -> (SETG (CMPQ x y)) 255 (Greater32 x y) -> (SETG (CMPL x y)) 256 (Greater16 x y) -> (SETG (CMPW x y)) 257 (Greater8 x y) -> (SETG (CMPB x y)) 258 (Greater64U x y) -> (SETA (CMPQ x y)) 259 (Greater32U x y) -> (SETA (CMPL x y)) 260 (Greater16U x y) -> (SETA (CMPW x y)) 261 (Greater8U x y) -> (SETA (CMPB x y)) 262 // Note Go assembler gets UCOMISx operand order wrong, but it is right here 263 // Bug is accommodated at generation of assembly language. 264 (Greater64F x y) -> (SETGF (UCOMISD x y)) 265 (Greater32F x y) -> (SETGF (UCOMISS x y)) 266 267 (Geq64 x y) -> (SETGE (CMPQ x y)) 268 (Geq32 x y) -> (SETGE (CMPL x y)) 269 (Geq16 x y) -> (SETGE (CMPW x y)) 270 (Geq8 x y) -> (SETGE (CMPB x y)) 271 (Geq64U x y) -> (SETAE (CMPQ x y)) 272 (Geq32U x y) -> (SETAE (CMPL x y)) 273 (Geq16U x y) -> (SETAE (CMPW x y)) 274 (Geq8U x y) -> (SETAE (CMPB x y)) 275 // Note Go assembler gets UCOMISx operand order wrong, but it is right here 276 // Bug is accommodated at generation of assembly language. 277 (Geq64F x y) -> (SETGEF (UCOMISD x y)) 278 (Geq32F x y) -> (SETGEF (UCOMISS x y)) 279 280 (Eq64 x y) -> (SETEQ (CMPQ x y)) 281 (Eq32 x y) -> (SETEQ (CMPL x y)) 282 (Eq16 x y) -> (SETEQ (CMPW x y)) 283 (Eq8 x y) -> (SETEQ (CMPB x y)) 284 (EqPtr x y) -> (SETEQ (CMPQ x y)) 285 (Eq64F x y) -> (SETEQF (UCOMISD x y)) 286 (Eq32F x y) -> (SETEQF (UCOMISS x y)) 287 288 (Neq64 x y) -> (SETNE (CMPQ x y)) 289 (Neq32 x y) -> (SETNE (CMPL x y)) 290 (Neq16 x y) -> (SETNE (CMPW x y)) 291 (Neq8 x y) -> (SETNE (CMPB x y)) 292 (NeqPtr x y) -> (SETNE (CMPQ x y)) 293 (Neq64F x y) -> (SETNEF (UCOMISD x y)) 294 (Neq32F x y) -> (SETNEF (UCOMISS x y)) 295 296 (Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) -> (MOVQload ptr mem) 297 (Load <t> ptr mem) && is32BitInt(t) -> (MOVLload ptr mem) 298 (Load <t> ptr mem) && is16BitInt(t) -> (MOVWload ptr mem) 299 (Load <t> ptr mem) && (t.IsBoolean() || is8BitInt(t)) -> (MOVBload ptr mem) 300 (Load <t> ptr mem) && is32BitFloat(t) -> (MOVSSload ptr mem) 301 (Load <t> ptr mem) && is64BitFloat(t) -> (MOVSDload ptr mem) 302 303 // These more-specific FP versions of Store pattern should come first. 304 (Store [8] ptr val mem) && is64BitFloat(val.Type) -> (MOVSDstore ptr val mem) 305 (Store [4] ptr val mem) && is32BitFloat(val.Type) -> (MOVSSstore ptr val mem) 306 307 (Store [8] ptr val mem) -> (MOVQstore ptr val mem) 308 (Store [4] ptr val mem) -> (MOVLstore ptr val mem) 309 (Store [2] ptr val mem) -> (MOVWstore ptr val mem) 310 (Store [1] ptr val mem) -> (MOVBstore ptr val mem) 311 312 // We want this to stick out so the to/from ptr conversion is obvious 313 (Convert <t> x mem) -> (MOVQconvert <t> x mem) 314 315 // checks 316 (IsNonNil p) -> (SETNE (TESTQ p p)) 317 (IsInBounds idx len) -> (SETB (CMPQ idx len)) 318 (IsSliceInBounds idx len) -> (SETBE (CMPQ idx len)) 319 (NilCheck ptr mem) -> (LoweredNilCheck ptr mem) 320 321 (GetG mem) -> (LoweredGetG mem) 322 (GetClosurePtr) -> (LoweredGetClosurePtr) 323 324 // Small moves 325 (Move [0] _ _ mem) -> mem 326 (Move [1] dst src mem) -> (MOVBstore dst (MOVBload src mem) mem) 327 (Move [2] dst src mem) -> (MOVWstore dst (MOVWload src mem) mem) 328 (Move [4] dst src mem) -> (MOVLstore dst (MOVLload src mem) mem) 329 (Move [8] dst src mem) -> (MOVQstore dst (MOVQload src mem) mem) 330 (Move [16] dst src mem) -> (MOVOstore dst (MOVOload src mem) mem) 331 (Move [3] dst src mem) -> 332 (MOVBstore [2] dst (MOVBload [2] src mem) 333 (MOVWstore dst (MOVWload src mem) mem)) 334 (Move [5] dst src mem) -> 335 (MOVBstore [4] dst (MOVBload [4] src mem) 336 (MOVLstore dst (MOVLload src mem) mem)) 337 (Move [6] dst src mem) -> 338 (MOVWstore [4] dst (MOVWload [4] src mem) 339 (MOVLstore dst (MOVLload src mem) mem)) 340 (Move [7] dst src mem) -> 341 (MOVLstore [3] dst (MOVLload [3] src mem) 342 (MOVLstore dst (MOVLload src mem) mem)) 343 (Move [size] dst src mem) && size > 8 && size < 16 -> 344 (MOVQstore [size-8] dst (MOVQload [size-8] src mem) 345 (MOVQstore dst (MOVQload src mem) mem)) 346 347 // Adjust moves to be a multiple of 16 bytes. 348 (Move [size] dst src mem) && size > 16 && size%16 != 0 && size%16 <= 8 -> 349 (Move [size-size%16] (ADDQconst <dst.Type> dst [size%16]) (ADDQconst <src.Type> src [size%16]) 350 (MOVQstore dst (MOVQload src mem) mem)) 351 (Move [size] dst src mem) && size > 16 && size%16 != 0 && size%16 > 8 -> 352 (Move [size-size%16] (ADDQconst <dst.Type> dst [size%16]) (ADDQconst <src.Type> src [size%16]) 353 (MOVOstore dst (MOVOload src mem) mem)) 354 355 // Medium copying uses a duff device. 356 (Move [size] dst src mem) && size >= 32 && size <= 16*64 && size%16 == 0 && !config.noDuffDevice -> 357 (DUFFCOPY [14*(64-size/16)] dst src mem) 358 // 14 and 64 are magic constants. 14 is the number of bytes to encode: 359 // MOVUPS (SI), X0 360 // ADDQ $16, SI 361 // MOVUPS X0, (DI) 362 // ADDQ $16, DI 363 // and 64 is the number of such blocks. See src/runtime/duff_amd64.s:duffcopy. 364 365 // Large copying uses REP MOVSQ. 366 (Move [size] dst src mem) && (size > 16*64 || config.noDuffDevice) && size%8 == 0 -> 367 (REPMOVSQ dst src (MOVQconst [size/8]) mem) 368 369 (Not x) -> (XORBconst [1] x) 370 371 (OffPtr [off] ptr) && is32Bit(off) -> (ADDQconst [off] ptr) 372 (OffPtr [off] ptr) -> (ADDQ (MOVQconst [off]) ptr) 373 374 (Const8 [val]) -> (MOVBconst [val]) 375 (Const16 [val]) -> (MOVWconst [val]) 376 (Const32 [val]) -> (MOVLconst [val]) 377 (Const64 [val]) -> (MOVQconst [val]) 378 (Const32F [val]) -> (MOVSSconst [val]) 379 (Const64F [val]) -> (MOVSDconst [val]) 380 (ConstNil) -> (MOVQconst [0]) 381 (ConstBool [b]) -> (MOVBconst [b]) 382 383 (Addr {sym} base) -> (LEAQ {sym} base) 384 385 (ITab (Load ptr mem)) -> (MOVQload ptr mem) 386 387 // block rewrites 388 (If (SETL cmp) yes no) -> (LT cmp yes no) 389 (If (SETLE cmp) yes no) -> (LE cmp yes no) 390 (If (SETG cmp) yes no) -> (GT cmp yes no) 391 (If (SETGE cmp) yes no) -> (GE cmp yes no) 392 (If (SETEQ cmp) yes no) -> (EQ cmp yes no) 393 (If (SETNE cmp) yes no) -> (NE cmp yes no) 394 (If (SETB cmp) yes no) -> (ULT cmp yes no) 395 (If (SETBE cmp) yes no) -> (ULE cmp yes no) 396 (If (SETA cmp) yes no) -> (UGT cmp yes no) 397 (If (SETAE cmp) yes no) -> (UGE cmp yes no) 398 399 // Special case for floating point - LF/LEF not generated 400 (If (SETGF cmp) yes no) -> (UGT cmp yes no) 401 (If (SETGEF cmp) yes no) -> (UGE cmp yes no) 402 (If (SETEQF cmp) yes no) -> (EQF cmp yes no) 403 (If (SETNEF cmp) yes no) -> (NEF cmp yes no) 404 405 (If cond yes no) -> (NE (TESTB cond cond) yes no) 406 407 (NE (TESTB (SETL cmp)) yes no) -> (LT cmp yes no) 408 (NE (TESTB (SETLE cmp)) yes no) -> (LE cmp yes no) 409 (NE (TESTB (SETG cmp)) yes no) -> (GT cmp yes no) 410 (NE (TESTB (SETGE cmp)) yes no) -> (GE cmp yes no) 411 (NE (TESTB (SETEQ cmp)) yes no) -> (EQ cmp yes no) 412 (NE (TESTB (SETNE cmp)) yes no) -> (NE cmp yes no) 413 (NE (TESTB (SETB cmp)) yes no) -> (ULT cmp yes no) 414 (NE (TESTB (SETBE cmp)) yes no) -> (ULE cmp yes no) 415 (NE (TESTB (SETA cmp)) yes no) -> (UGT cmp yes no) 416 (NE (TESTB (SETAE cmp)) yes no) -> (UGE cmp yes no) 417 418 // Special case for floating point - LF/LEF not generated 419 (NE (TESTB (SETGF cmp)) yes no) -> (UGT cmp yes no) 420 (NE (TESTB (SETGEF cmp)) yes no) -> (UGE cmp yes no) 421 (NE (TESTB (SETEQF cmp)) yes no) -> (EQF cmp yes no) 422 (NE (TESTB (SETNEF cmp)) yes no) -> (NEF cmp yes no) 423 424 // Disabled because it interferes with the pattern match above and makes worse code. 425 // (SETNEF x) -> (ORQ (SETNE <config.Frontend().TypeInt8()> x) (SETNAN <config.Frontend().TypeInt8()> x)) 426 // (SETEQF x) -> (ANDQ (SETEQ <config.Frontend().TypeInt8()> x) (SETORD <config.Frontend().TypeInt8()> x)) 427 428 (StaticCall [argwid] {target} mem) -> (CALLstatic [argwid] {target} mem) 429 (ClosureCall [argwid] entry closure mem) -> (CALLclosure [argwid] entry closure mem) 430 (DeferCall [argwid] mem) -> (CALLdefer [argwid] mem) 431 (GoCall [argwid] mem) -> (CALLgo [argwid] mem) 432 (InterCall [argwid] entry mem) -> (CALLinter [argwid] entry mem) 433 434 // Rules below here apply some simple optimizations after lowering. 435 // TODO: Should this be a separate pass? 436 437 // fold constants into instructions 438 (ADDQ x (MOVQconst [c])) && is32Bit(c) -> (ADDQconst [c] x) 439 (ADDQ (MOVQconst [c]) x) && is32Bit(c) -> (ADDQconst [c] x) 440 (ADDL x (MOVLconst [c])) -> (ADDLconst [c] x) 441 (ADDL (MOVLconst [c]) x) -> (ADDLconst [c] x) 442 (ADDW x (MOVWconst [c])) -> (ADDWconst [c] x) 443 (ADDW (MOVWconst [c]) x) -> (ADDWconst [c] x) 444 (ADDB x (MOVBconst [c])) -> (ADDBconst [c] x) 445 (ADDB (MOVBconst [c]) x) -> (ADDBconst [c] x) 446 447 (SUBQ x (MOVQconst [c])) && is32Bit(c) -> (SUBQconst x [c]) 448 (SUBQ (MOVQconst [c]) x) && is32Bit(c) -> (NEGQ (SUBQconst <v.Type> x [c])) 449 (SUBL x (MOVLconst [c])) -> (SUBLconst x [c]) 450 (SUBL (MOVLconst [c]) x) -> (NEGL (SUBLconst <v.Type> x [c])) 451 (SUBW x (MOVWconst [c])) -> (SUBWconst x [c]) 452 (SUBW (MOVWconst [c]) x) -> (NEGW (SUBWconst <v.Type> x [c])) 453 (SUBB x (MOVBconst [c])) -> (SUBBconst x [c]) 454 (SUBB (MOVBconst [c]) x) -> (NEGB (SUBBconst <v.Type> x [c])) 455 456 (MULQ x (MOVQconst [c])) && is32Bit(c) -> (MULQconst [c] x) 457 (MULQ (MOVQconst [c]) x) && is32Bit(c) -> (MULQconst [c] x) 458 (MULL x (MOVLconst [c])) -> (MULLconst [c] x) 459 (MULL (MOVLconst [c]) x) -> (MULLconst [c] x) 460 (MULW x (MOVWconst [c])) -> (MULWconst [c] x) 461 (MULW (MOVWconst [c]) x) -> (MULWconst [c] x) 462 (MULB x (MOVBconst [c])) -> (MULBconst [c] x) 463 (MULB (MOVBconst [c]) x) -> (MULBconst [c] x) 464 465 (ANDQ x (MOVQconst [c])) && is32Bit(c) -> (ANDQconst [c] x) 466 (ANDQ (MOVQconst [c]) x) && is32Bit(c) -> (ANDQconst [c] x) 467 (ANDL x (MOVLconst [c])) -> (ANDLconst [c] x) 468 (ANDL (MOVLconst [c]) x) -> (ANDLconst [c] x) 469 (ANDW x (MOVLconst [c])) -> (ANDWconst [c] x) 470 (ANDW (MOVLconst [c]) x) -> (ANDWconst [c] x) 471 (ANDW x (MOVWconst [c])) -> (ANDWconst [c] x) 472 (ANDW (MOVWconst [c]) x) -> (ANDWconst [c] x) 473 (ANDB x (MOVLconst [c])) -> (ANDBconst [c] x) 474 (ANDB (MOVLconst [c]) x) -> (ANDBconst [c] x) 475 (ANDB x (MOVBconst [c])) -> (ANDBconst [c] x) 476 (ANDB (MOVBconst [c]) x) -> (ANDBconst [c] x) 477 478 (ANDBconst [c] (ANDBconst [d] x)) -> (ANDBconst [c & d] x) 479 (ANDWconst [c] (ANDWconst [d] x)) -> (ANDWconst [c & d] x) 480 (ANDLconst [c] (ANDLconst [d] x)) -> (ANDLconst [c & d] x) 481 (ANDQconst [c] (ANDQconst [d] x)) -> (ANDQconst [c & d] x) 482 483 (ORQ x (MOVQconst [c])) && is32Bit(c) -> (ORQconst [c] x) 484 (ORQ (MOVQconst [c]) x) && is32Bit(c) -> (ORQconst [c] x) 485 (ORL x (MOVLconst [c])) -> (ORLconst [c] x) 486 (ORL (MOVLconst [c]) x) -> (ORLconst [c] x) 487 (ORW x (MOVWconst [c])) -> (ORWconst [c] x) 488 (ORW (MOVWconst [c]) x) -> (ORWconst [c] x) 489 (ORB x (MOVBconst [c])) -> (ORBconst [c] x) 490 (ORB (MOVBconst [c]) x) -> (ORBconst [c] x) 491 492 (XORQ x (MOVQconst [c])) && is32Bit(c) -> (XORQconst [c] x) 493 (XORQ (MOVQconst [c]) x) && is32Bit(c) -> (XORQconst [c] x) 494 (XORL x (MOVLconst [c])) -> (XORLconst [c] x) 495 (XORL (MOVLconst [c]) x) -> (XORLconst [c] x) 496 (XORW x (MOVWconst [c])) -> (XORWconst [c] x) 497 (XORW (MOVWconst [c]) x) -> (XORWconst [c] x) 498 (XORB x (MOVBconst [c])) -> (XORBconst [c] x) 499 (XORB (MOVBconst [c]) x) -> (XORBconst [c] x) 500 501 (SHLQ x (MOVQconst [c])) -> (SHLQconst [c&63] x) 502 (SHLQ x (MOVLconst [c])) -> (SHLQconst [c&63] x) 503 (SHLQ x (MOVWconst [c])) -> (SHLQconst [c&63] x) 504 (SHLQ x (MOVBconst [c])) -> (SHLQconst [c&63] x) 505 506 (SHLL x (MOVQconst [c])) -> (SHLLconst [c&31] x) 507 (SHLL x (MOVLconst [c])) -> (SHLLconst [c&31] x) 508 (SHLL x (MOVWconst [c])) -> (SHLLconst [c&31] x) 509 (SHLL x (MOVBconst [c])) -> (SHLLconst [c&31] x) 510 511 (SHLW x (MOVQconst [c])) -> (SHLWconst [c&31] x) 512 (SHLW x (MOVLconst [c])) -> (SHLWconst [c&31] x) 513 (SHLW x (MOVWconst [c])) -> (SHLWconst [c&31] x) 514 (SHLW x (MOVBconst [c])) -> (SHLWconst [c&31] x) 515 516 (SHLB x (MOVQconst [c])) -> (SHLBconst [c&31] x) 517 (SHLB x (MOVLconst [c])) -> (SHLBconst [c&31] x) 518 (SHLB x (MOVWconst [c])) -> (SHLBconst [c&31] x) 519 (SHLB x (MOVBconst [c])) -> (SHLBconst [c&31] x) 520 521 (SHRQ x (MOVQconst [c])) -> (SHRQconst [c&63] x) 522 (SHRQ x (MOVLconst [c])) -> (SHRQconst [c&63] x) 523 (SHRQ x (MOVWconst [c])) -> (SHRQconst [c&63] x) 524 (SHRQ x (MOVBconst [c])) -> (SHRQconst [c&63] x) 525 526 (SHRL x (MOVQconst [c])) -> (SHRLconst [c&31] x) 527 (SHRL x (MOVLconst [c])) -> (SHRLconst [c&31] x) 528 (SHRL x (MOVWconst [c])) -> (SHRLconst [c&31] x) 529 (SHRL x (MOVBconst [c])) -> (SHRLconst [c&31] x) 530 531 (SHRW x (MOVQconst [c])) -> (SHRWconst [c&31] x) 532 (SHRW x (MOVLconst [c])) -> (SHRWconst [c&31] x) 533 (SHRW x (MOVWconst [c])) -> (SHRWconst [c&31] x) 534 (SHRW x (MOVBconst [c])) -> (SHRWconst [c&31] x) 535 536 (SHRB x (MOVQconst [c])) -> (SHRBconst [c&31] x) 537 (SHRB x (MOVLconst [c])) -> (SHRBconst [c&31] x) 538 (SHRB x (MOVWconst [c])) -> (SHRBconst [c&31] x) 539 (SHRB x (MOVBconst [c])) -> (SHRBconst [c&31] x) 540 541 (SARQ x (MOVQconst [c])) -> (SARQconst [c&63] x) 542 (SARQ x (MOVLconst [c])) -> (SARQconst [c&63] x) 543 (SARQ x (MOVWconst [c])) -> (SARQconst [c&63] x) 544 (SARQ x (MOVBconst [c])) -> (SARQconst [c&63] x) 545 546 (SARL x (MOVQconst [c])) -> (SARLconst [c&31] x) 547 (SARL x (MOVLconst [c])) -> (SARLconst [c&31] x) 548 (SARL x (MOVWconst [c])) -> (SARLconst [c&31] x) 549 (SARL x (MOVBconst [c])) -> (SARLconst [c&31] x) 550 551 (SARW x (MOVQconst [c])) -> (SARWconst [c&31] x) 552 (SARW x (MOVLconst [c])) -> (SARWconst [c&31] x) 553 (SARW x (MOVWconst [c])) -> (SARWconst [c&31] x) 554 (SARW x (MOVBconst [c])) -> (SARWconst [c&31] x) 555 556 (SARB x (MOVQconst [c])) -> (SARBconst [c&31] x) 557 (SARB x (MOVLconst [c])) -> (SARBconst [c&31] x) 558 (SARB x (MOVWconst [c])) -> (SARBconst [c&31] x) 559 (SARB x (MOVBconst [c])) -> (SARBconst [c&31] x) 560 561 (SARB x (ANDBconst [31] y)) -> (SARB x y) 562 (SARW x (ANDWconst [31] y)) -> (SARW x y) 563 (SARL x (ANDLconst [31] y)) -> (SARL x y) 564 (SARQ x (ANDQconst [63] y)) -> (SARQ x y) 565 566 (SHLB x (ANDBconst [31] y)) -> (SHLB x y) 567 (SHLW x (ANDWconst [31] y)) -> (SHLW x y) 568 (SHLL x (ANDLconst [31] y)) -> (SHLL x y) 569 (SHLQ x (ANDQconst [63] y)) -> (SHLQ x y) 570 571 (SHRB x (ANDBconst [31] y)) -> (SHRB x y) 572 (SHRW x (ANDWconst [31] y)) -> (SHRW x y) 573 (SHRL x (ANDLconst [31] y)) -> (SHRL x y) 574 (SHRQ x (ANDQconst [63] y)) -> (SHRQ x y) 575 576 // Note: the word and byte shifts keep the low 5 bits (not the low 4 or 3 bits) 577 // because the x86 instructions are defined to use all 5 bits of the shift even 578 // for the small shifts. I don't think we'll ever generate a weird shift (e.g. 579 // (SHLW x (MOVWconst [24])), but just in case. 580 581 (CMPQ x (MOVQconst [c])) && is32Bit(c) -> (CMPQconst x [c]) 582 (CMPQ (MOVQconst [c]) x) && is32Bit(c) -> (InvertFlags (CMPQconst x [c])) 583 (CMPL x (MOVLconst [c])) -> (CMPLconst x [c]) 584 (CMPL (MOVLconst [c]) x) -> (InvertFlags (CMPLconst x [c])) 585 (CMPW x (MOVWconst [c])) -> (CMPWconst x [c]) 586 (CMPW (MOVWconst [c]) x) -> (InvertFlags (CMPWconst x [c])) 587 (CMPB x (MOVBconst [c])) -> (CMPBconst x [c]) 588 (CMPB (MOVBconst [c]) x) -> (InvertFlags (CMPBconst x [c])) 589 590 // strength reduction 591 // Assumes that the following costs from https://gmplib.org/~tege/x86-timing.pdf: 592 // 1 - addq, shlq, leaq, negq 593 // 3 - imulq 594 // This limits the rewrites to two instructions. 595 // TODO: 27, 81 596 (MULQconst [-1] x) -> (NEGQ x) 597 (MULQconst [0] _) -> (MOVQconst [0]) 598 (MULQconst [1] x) -> x 599 (MULQconst [3] x) -> (LEAQ2 x x) 600 (MULQconst [5] x) -> (LEAQ4 x x) 601 (MULQconst [7] x) -> (LEAQ8 (NEGQ <v.Type> x) x) 602 (MULQconst [9] x) -> (LEAQ8 x x) 603 (MULQconst [11] x) -> (LEAQ2 x (LEAQ4 <v.Type> x x)) 604 (MULQconst [13] x) -> (LEAQ4 x (LEAQ2 <v.Type> x x)) 605 (MULQconst [21] x) -> (LEAQ4 x (LEAQ4 <v.Type> x x)) 606 (MULQconst [25] x) -> (LEAQ8 x (LEAQ2 <v.Type> x x)) 607 (MULQconst [37] x) -> (LEAQ4 x (LEAQ8 <v.Type> x x)) 608 (MULQconst [41] x) -> (LEAQ8 x (LEAQ4 <v.Type> x x)) 609 (MULQconst [73] x) -> (LEAQ8 x (LEAQ8 <v.Type> x x)) 610 611 (MULQconst [c] x) && isPowerOfTwo(c) -> (SHLQconst [log2(c)] x) 612 (MULQconst [c] x) && isPowerOfTwo(c+1) && c >= 15 -> (SUBQ (SHLQconst <v.Type> [log2(c+1)] x) x) 613 (MULQconst [c] x) && isPowerOfTwo(c-1) && c >= 17 -> (LEAQ1 (SHLQconst <v.Type> [log2(c-1)] x) x) 614 (MULQconst [c] x) && isPowerOfTwo(c-2) && c >= 34 -> (LEAQ2 (SHLQconst <v.Type> [log2(c-2)] x) x) 615 (MULQconst [c] x) && isPowerOfTwo(c-4) && c >= 68 -> (LEAQ4 (SHLQconst <v.Type> [log2(c-4)] x) x) 616 (MULQconst [c] x) && isPowerOfTwo(c-8) && c >= 136 -> (LEAQ8 (SHLQconst <v.Type> [log2(c-8)] x) x) 617 (MULQconst [c] x) && c%3 == 0 && isPowerOfTwo(c/3)-> (SHLQconst [log2(c/3)] (LEAQ2 <v.Type> x x)) 618 (MULQconst [c] x) && c%5 == 0 && isPowerOfTwo(c/5)-> (SHLQconst [log2(c/5)] (LEAQ4 <v.Type> x x)) 619 (MULQconst [c] x) && c%9 == 0 && isPowerOfTwo(c/9)-> (SHLQconst [log2(c/9)] (LEAQ8 <v.Type> x x)) 620 621 // combine add/shift into LEAQ 622 (ADDQ x (SHLQconst [3] y)) -> (LEAQ8 x y) 623 (ADDQ x (SHLQconst [2] y)) -> (LEAQ4 x y) 624 (ADDQ x (SHLQconst [1] y)) -> (LEAQ2 x y) 625 (ADDQ x (ADDQ y y)) -> (LEAQ2 x y) 626 (ADDQ x (ADDQ x y)) -> (LEAQ2 y x) 627 (ADDQ x (ADDQ y x)) -> (LEAQ2 y x) 628 629 // combine ADDQ/ADDQconst into LEAQ1 630 (ADDQconst [c] (ADDQ x y)) -> (LEAQ1 [c] x y) 631 (ADDQ (ADDQconst [c] x) y) -> (LEAQ1 [c] x y) 632 (ADDQ x (ADDQconst [c] y)) -> (LEAQ1 [c] x y) 633 634 // fold ADDQ into LEAQ 635 (ADDQconst [c] (LEAQ [d] {s} x)) && is32Bit(c+d) -> (LEAQ [c+d] {s} x) 636 (LEAQ [c] {s} (ADDQconst [d] x)) && is32Bit(c+d) -> (LEAQ [c+d] {s} x) 637 (LEAQ [c] {s} (ADDQ x y)) && x.Op != OpSB && y.Op != OpSB -> (LEAQ1 [c] {s} x y) 638 (ADDQ x (LEAQ [c] {s} y)) && x.Op != OpSB && y.Op != OpSB -> (LEAQ1 [c] {s} x y) 639 (ADDQ (LEAQ [c] {s} x) y) && x.Op != OpSB && y.Op != OpSB -> (LEAQ1 [c] {s} x y) 640 641 // fold ADDQconst into LEAQx 642 (ADDQconst [c] (LEAQ1 [d] {s} x y)) && is32Bit(c+d) -> (LEAQ1 [c+d] {s} x y) 643 (ADDQconst [c] (LEAQ2 [d] {s} x y)) && is32Bit(c+d) -> (LEAQ2 [c+d] {s} x y) 644 (ADDQconst [c] (LEAQ4 [d] {s} x y)) && is32Bit(c+d) -> (LEAQ4 [c+d] {s} x y) 645 (ADDQconst [c] (LEAQ8 [d] {s} x y)) && is32Bit(c+d) -> (LEAQ8 [c+d] {s} x y) 646 (LEAQ1 [c] {s} (ADDQconst [d] x) y) && is32Bit(c+d) && x.Op != OpSB -> (LEAQ1 [c+d] {s} x y) 647 (LEAQ1 [c] {s} x (ADDQconst [d] y)) && is32Bit(c+d) && y.Op != OpSB -> (LEAQ1 [c+d] {s} x y) 648 (LEAQ2 [c] {s} (ADDQconst [d] x) y) && is32Bit(c+d) && x.Op != OpSB -> (LEAQ2 [c+d] {s} x y) 649 (LEAQ2 [c] {s} x (ADDQconst [d] y)) && is32Bit(c+2*d) && y.Op != OpSB -> (LEAQ2 [c+2*d] {s} x y) 650 (LEAQ4 [c] {s} (ADDQconst [d] x) y) && is32Bit(c+d) && x.Op != OpSB -> (LEAQ4 [c+d] {s} x y) 651 (LEAQ4 [c] {s} x (ADDQconst [d] y)) && is32Bit(c+4*d) && y.Op != OpSB -> (LEAQ4 [c+4*d] {s} x y) 652 (LEAQ8 [c] {s} (ADDQconst [d] x) y) && is32Bit(c+d) && x.Op != OpSB -> (LEAQ8 [c+d] {s} x y) 653 (LEAQ8 [c] {s} x (ADDQconst [d] y)) && is32Bit(c+8*d) && y.Op != OpSB -> (LEAQ8 [c+8*d] {s} x y) 654 655 // fold shifts into LEAQx 656 (LEAQ1 [c] {s} x (SHLQconst [1] y)) -> (LEAQ2 [c] {s} x y) 657 (LEAQ1 [c] {s} (SHLQconst [1] x) y) -> (LEAQ2 [c] {s} y x) 658 (LEAQ1 [c] {s} x (SHLQconst [2] y)) -> (LEAQ4 [c] {s} x y) 659 (LEAQ1 [c] {s} (SHLQconst [2] x) y) -> (LEAQ4 [c] {s} y x) 660 (LEAQ1 [c] {s} x (SHLQconst [3] y)) -> (LEAQ8 [c] {s} x y) 661 (LEAQ1 [c] {s} (SHLQconst [3] x) y) -> (LEAQ8 [c] {s} y x) 662 663 (LEAQ2 [c] {s} x (SHLQconst [1] y)) -> (LEAQ4 [c] {s} x y) 664 (LEAQ2 [c] {s} x (SHLQconst [2] y)) -> (LEAQ8 [c] {s} x y) 665 (LEAQ4 [c] {s} x (SHLQconst [1] y)) -> (LEAQ8 [c] {s} x y) 666 667 // reverse ordering of compare instruction 668 (SETL (InvertFlags x)) -> (SETG x) 669 (SETG (InvertFlags x)) -> (SETL x) 670 (SETB (InvertFlags x)) -> (SETA x) 671 (SETA (InvertFlags x)) -> (SETB x) 672 (SETLE (InvertFlags x)) -> (SETGE x) 673 (SETGE (InvertFlags x)) -> (SETLE x) 674 (SETBE (InvertFlags x)) -> (SETAE x) 675 (SETAE (InvertFlags x)) -> (SETBE x) 676 (SETEQ (InvertFlags x)) -> (SETEQ x) 677 (SETNE (InvertFlags x)) -> (SETNE x) 678 679 // sign extended loads 680 // Note: The combined instruction must end up in the same block 681 // as the original load. If not, we end up making a value with 682 // memory type live in two different blocks, which can lead to 683 // multiple memory values alive simultaneously. 684 // Make sure we don't combine these ops if the load has another use. 685 // This prevents a single load from being split into multiple loads 686 // which then might return different values. See test/atomicload.go. 687 (MOVBQSX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 -> @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 688 (MOVBQZX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 -> @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 689 (MOVWQSX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 -> @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 690 (MOVWQZX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 -> @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 691 (MOVLQSX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 -> @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem) 692 (MOVLQZX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 -> @x.Block (MOVLload <v.Type> [off] {sym} ptr mem) 693 694 (MOVBQZX x:(MOVBloadidx1 [off] {sym} ptr idx mem)) && x.Uses == 1 -> @x.Block (MOVBloadidx1 <v.Type> [off] {sym} ptr idx mem) 695 (MOVWQZX x:(MOVWloadidx1 [off] {sym} ptr idx mem)) && x.Uses == 1 -> @x.Block (MOVWloadidx1 <v.Type> [off] {sym} ptr idx mem) 696 (MOVWQZX x:(MOVWloadidx2 [off] {sym} ptr idx mem)) && x.Uses == 1 -> @x.Block (MOVWloadidx2 <v.Type> [off] {sym} ptr idx mem) 697 (MOVLQZX x:(MOVLloadidx1 [off] {sym} ptr idx mem)) && x.Uses == 1 -> @x.Block (MOVLloadidx1 <v.Type> [off] {sym} ptr idx mem) 698 (MOVLQZX x:(MOVLloadidx4 [off] {sym} ptr idx mem)) && x.Uses == 1 -> @x.Block (MOVLloadidx4 <v.Type> [off] {sym} ptr idx mem) 699 700 // replace load from same location as preceding store with copy 701 (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x 702 (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x 703 (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x 704 (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x 705 706 // Fold extensions and ANDs together. 707 (MOVBQZX (ANDBconst [c] x)) -> (ANDQconst [c & 0xff] x) 708 (MOVWQZX (ANDWconst [c] x)) -> (ANDQconst [c & 0xffff] x) 709 (MOVLQZX (ANDLconst [c] x)) && c & 0x80000000 == 0 -> (ANDQconst [c & 0x7fffffff] x) 710 (MOVBQSX (ANDBconst [c] x)) && c & 0x80 == 0 -> (ANDQconst [c & 0x7f] x) 711 (MOVWQSX (ANDWconst [c] x)) && c & 0x8000 == 0 -> (ANDQconst [c & 0x7fff] x) 712 (MOVLQSX (ANDLconst [c] x)) && c & 0x80000000 == 0 -> (ANDQconst [c & 0x7fffffff] x) 713 714 // Don't extend before storing 715 (MOVLstore [off] {sym} ptr (MOVLQSX x) mem) -> (MOVLstore [off] {sym} ptr x mem) 716 (MOVWstore [off] {sym} ptr (MOVWQSX x) mem) -> (MOVWstore [off] {sym} ptr x mem) 717 (MOVBstore [off] {sym} ptr (MOVBQSX x) mem) -> (MOVBstore [off] {sym} ptr x mem) 718 (MOVLstore [off] {sym} ptr (MOVLQZX x) mem) -> (MOVLstore [off] {sym} ptr x mem) 719 (MOVWstore [off] {sym} ptr (MOVWQZX x) mem) -> (MOVWstore [off] {sym} ptr x mem) 720 (MOVBstore [off] {sym} ptr (MOVBQZX x) mem) -> (MOVBstore [off] {sym} ptr x mem) 721 722 // fold constants into memory operations 723 // Note that this is not always a good idea because if not all the uses of 724 // the ADDQconst get eliminated, we still have to compute the ADDQconst and we now 725 // have potentially two live values (ptr and (ADDQconst [off] ptr)) instead of one. 726 // Nevertheless, let's do it! 727 (MOVQload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVQload [off1+off2] {sym} ptr mem) 728 (MOVLload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVLload [off1+off2] {sym} ptr mem) 729 (MOVWload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVWload [off1+off2] {sym} ptr mem) 730 (MOVBload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVBload [off1+off2] {sym} ptr mem) 731 (MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVSSload [off1+off2] {sym} ptr mem) 732 (MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVSDload [off1+off2] {sym} ptr mem) 733 (MOVOload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVOload [off1+off2] {sym} ptr mem) 734 735 (MOVQstore [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVQstore [off1+off2] {sym} ptr val mem) 736 (MOVLstore [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVLstore [off1+off2] {sym} ptr val mem) 737 (MOVWstore [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVWstore [off1+off2] {sym} ptr val mem) 738 (MOVBstore [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVBstore [off1+off2] {sym} ptr val mem) 739 (MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVSSstore [off1+off2] {sym} ptr val mem) 740 (MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVSDstore [off1+off2] {sym} ptr val mem) 741 (MOVOstore [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVOstore [off1+off2] {sym} ptr val mem) 742 743 // Fold constants into stores. 744 (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) && validValAndOff(c,off) -> 745 (MOVQstoreconst [makeValAndOff(c,off)] {sym} ptr mem) 746 (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(off) -> 747 (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem) 748 (MOVWstore [off] {sym} ptr (MOVWconst [c]) mem) && validOff(off) -> 749 (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem) 750 (MOVBstore [off] {sym} ptr (MOVBconst [c]) mem) && validOff(off) -> 751 (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem) 752 753 // Fold address offsets into constant stores. 754 (MOVQstoreconst [sc] {s} (ADDQconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) -> 755 (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 756 (MOVLstoreconst [sc] {s} (ADDQconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) -> 757 (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 758 (MOVWstoreconst [sc] {s} (ADDQconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) -> 759 (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 760 (MOVBstoreconst [sc] {s} (ADDQconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) -> 761 (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 762 763 // We need to fold LEAQ into the MOVx ops so that the live variable analysis knows 764 // what variables are being read/written by the ops. 765 (MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 766 (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) 767 (MOVLload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 768 (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) 769 (MOVWload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 770 (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) 771 (MOVBload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 772 (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) 773 (MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 774 (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem) 775 (MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 776 (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem) 777 (MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 778 (MOVOload [off1+off2] {mergeSym(sym1,sym2)} base mem) 779 780 (MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 781 (MOVBQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 782 (MOVWQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 783 (MOVWQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 784 (MOVLQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 785 (MOVLQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 786 787 (MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 788 (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 789 (MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 790 (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 791 (MOVWstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 792 (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 793 (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 794 (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 795 (MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 796 (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 797 (MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 798 (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 799 (MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 800 (MOVOstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 801 802 (MOVQstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) -> 803 (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 804 (MOVLstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) -> 805 (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 806 (MOVWstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) -> 807 (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 808 (MOVBstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) -> 809 (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 810 811 // generating indexed loads and stores 812 (MOVBload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 813 (MOVBloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 814 (MOVWload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 815 (MOVWloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 816 (MOVWload [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 817 (MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 818 (MOVLload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 819 (MOVLloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 820 (MOVLload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 821 (MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 822 (MOVQload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 823 (MOVQloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 824 (MOVQload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 825 (MOVQloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 826 (MOVSSload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 827 (MOVSSloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 828 (MOVSSload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 829 (MOVSSloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 830 (MOVSDload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 831 (MOVSDloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 832 (MOVSDload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 833 (MOVSDloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 834 835 (MOVBstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 836 (MOVBstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 837 (MOVWstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 838 (MOVWstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 839 (MOVWstore [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 840 (MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 841 (MOVLstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 842 (MOVLstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 843 (MOVLstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 844 (MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 845 (MOVQstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 846 (MOVQstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 847 (MOVQstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 848 (MOVQstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 849 (MOVSSstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 850 (MOVSSstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 851 (MOVSSstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 852 (MOVSSstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 853 (MOVSDstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 854 (MOVSDstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 855 (MOVSDstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 856 (MOVSDstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 857 858 (MOVBload [off] {sym} (ADDQ ptr idx) mem) && ptr.Op != OpSB -> (MOVBloadidx1 [off] {sym} ptr idx mem) 859 (MOVWload [off] {sym} (ADDQ ptr idx) mem) && ptr.Op != OpSB -> (MOVWloadidx1 [off] {sym} ptr idx mem) 860 (MOVLload [off] {sym} (ADDQ ptr idx) mem) && ptr.Op != OpSB -> (MOVLloadidx1 [off] {sym} ptr idx mem) 861 (MOVQload [off] {sym} (ADDQ ptr idx) mem) && ptr.Op != OpSB -> (MOVQloadidx1 [off] {sym} ptr idx mem) 862 (MOVSSload [off] {sym} (ADDQ ptr idx) mem) && ptr.Op != OpSB -> (MOVSSloadidx1 [off] {sym} ptr idx mem) 863 (MOVSDload [off] {sym} (ADDQ ptr idx) mem) && ptr.Op != OpSB -> (MOVSDloadidx1 [off] {sym} ptr idx mem) 864 (MOVBstore [off] {sym} (ADDQ ptr idx) val mem) && ptr.Op != OpSB -> (MOVBstoreidx1 [off] {sym} ptr idx val mem) 865 (MOVWstore [off] {sym} (ADDQ ptr idx) val mem) && ptr.Op != OpSB -> (MOVWstoreidx1 [off] {sym} ptr idx val mem) 866 (MOVLstore [off] {sym} (ADDQ ptr idx) val mem) && ptr.Op != OpSB -> (MOVLstoreidx1 [off] {sym} ptr idx val mem) 867 (MOVQstore [off] {sym} (ADDQ ptr idx) val mem) && ptr.Op != OpSB -> (MOVQstoreidx1 [off] {sym} ptr idx val mem) 868 (MOVSSstore [off] {sym} (ADDQ ptr idx) val mem) && ptr.Op != OpSB -> (MOVSSstoreidx1 [off] {sym} ptr idx val mem) 869 (MOVSDstore [off] {sym} (ADDQ ptr idx) val mem) && ptr.Op != OpSB -> (MOVSDstoreidx1 [off] {sym} ptr idx val mem) 870 871 (MOVBstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) -> 872 (MOVBstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 873 (MOVWstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) -> 874 (MOVWstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 875 (MOVWstoreconst [x] {sym1} (LEAQ2 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) -> 876 (MOVWstoreconstidx2 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 877 (MOVLstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) -> 878 (MOVLstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 879 (MOVLstoreconst [x] {sym1} (LEAQ4 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) -> 880 (MOVLstoreconstidx4 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 881 (MOVQstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) -> 882 (MOVQstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 883 (MOVQstoreconst [x] {sym1} (LEAQ8 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) -> 884 (MOVQstoreconstidx8 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 885 886 (MOVBstoreconst [x] {sym} (ADDQ ptr idx) mem) -> (MOVBstoreconstidx1 [x] {sym} ptr idx mem) 887 (MOVWstoreconst [x] {sym} (ADDQ ptr idx) mem) -> (MOVWstoreconstidx1 [x] {sym} ptr idx mem) 888 (MOVLstoreconst [x] {sym} (ADDQ ptr idx) mem) -> (MOVLstoreconstidx1 [x] {sym} ptr idx mem) 889 (MOVQstoreconst [x] {sym} (ADDQ ptr idx) mem) -> (MOVQstoreconstidx1 [x] {sym} ptr idx mem) 890 891 892 // combine SHLQ into indexed loads and stores 893 (MOVWloadidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) -> (MOVWloadidx2 [c] {sym} ptr idx mem) 894 (MOVLloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) -> (MOVLloadidx4 [c] {sym} ptr idx mem) 895 (MOVQloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) -> (MOVQloadidx8 [c] {sym} ptr idx mem) 896 (MOVWstoreidx1 [c] {sym} ptr (SHLQconst [1] idx) val mem) -> (MOVWstoreidx2 [c] {sym} ptr idx val mem) 897 (MOVLstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) -> (MOVLstoreidx4 [c] {sym} ptr idx val mem) 898 (MOVQstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) -> (MOVQstoreidx8 [c] {sym} ptr idx val mem) 899 (MOVWstoreconstidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) -> (MOVWstoreconstidx2 [c] {sym} ptr idx mem) 900 (MOVLstoreconstidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) -> (MOVLstoreconstidx4 [c] {sym} ptr idx mem) 901 (MOVQstoreconstidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) -> (MOVQstoreconstidx8 [c] {sym} ptr idx mem) 902 903 // combine ADDQ into indexed loads and stores 904 (MOVBloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVBloadidx1 [c+d] {sym} ptr idx mem) 905 (MOVWloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVWloadidx1 [c+d] {sym} ptr idx mem) 906 (MOVWloadidx2 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVWloadidx2 [c+d] {sym} ptr idx mem) 907 (MOVLloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVLloadidx1 [c+d] {sym} ptr idx mem) 908 (MOVLloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVLloadidx4 [c+d] {sym} ptr idx mem) 909 (MOVQloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVQloadidx1 [c+d] {sym} ptr idx mem) 910 (MOVQloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVQloadidx8 [c+d] {sym} ptr idx mem) 911 (MOVSSloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVSSloadidx1 [c+d] {sym} ptr idx mem) 912 (MOVSSloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVSSloadidx4 [c+d] {sym} ptr idx mem) 913 (MOVSDloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVSDloadidx1 [c+d] {sym} ptr idx mem) 914 (MOVSDloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVSDloadidx8 [c+d] {sym} ptr idx mem) 915 916 (MOVBstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) 917 (MOVWstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) 918 (MOVWstoreidx2 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVWstoreidx2 [c+d] {sym} ptr idx val mem) 919 (MOVLstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) 920 (MOVLstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVLstoreidx4 [c+d] {sym} ptr idx val mem) 921 (MOVQstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) 922 (MOVQstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVQstoreidx8 [c+d] {sym} ptr idx val mem) 923 (MOVSSstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) 924 (MOVSSstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem) 925 (MOVSDstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) 926 (MOVSDstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem) 927 928 (MOVBloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVBloadidx1 [c+d] {sym} ptr idx mem) 929 (MOVWloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVWloadidx1 [c+d] {sym} ptr idx mem) 930 (MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVWloadidx2 [c+2*d] {sym} ptr idx mem) 931 (MOVLloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVLloadidx1 [c+d] {sym} ptr idx mem) 932 (MOVLloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVLloadidx4 [c+4*d] {sym} ptr idx mem) 933 (MOVQloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVQloadidx1 [c+d] {sym} ptr idx mem) 934 (MOVQloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVQloadidx8 [c+8*d] {sym} ptr idx mem) 935 (MOVSSloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVSSloadidx1 [c+d] {sym} ptr idx mem) 936 (MOVSSloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem) 937 (MOVSDloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVSDloadidx1 [c+d] {sym} ptr idx mem) 938 (MOVSDloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem) 939 940 (MOVBstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) 941 (MOVWstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) 942 (MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem) 943 (MOVLstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) 944 (MOVLstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem) 945 (MOVQstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) 946 (MOVQstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVQstoreidx8 [c+8*d] {sym} ptr idx val mem) 947 (MOVSSstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) 948 (MOVSSstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem) 949 (MOVSDstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) 950 (MOVSDstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem) 951 952 (MOVBstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) -> 953 (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 954 (MOVWstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) -> 955 (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 956 (MOVWstoreconstidx2 [x] {sym} (ADDQconst [c] ptr) idx mem) -> 957 (MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem) 958 (MOVLstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) -> 959 (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 960 (MOVLstoreconstidx4 [x] {sym} (ADDQconst [c] ptr) idx mem) -> 961 (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem) 962 (MOVQstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) -> 963 (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 964 (MOVQstoreconstidx8 [x] {sym} (ADDQconst [c] ptr) idx mem) -> 965 (MOVQstoreconstidx8 [ValAndOff(x).add(c)] {sym} ptr idx mem) 966 967 (MOVBstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) -> 968 (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 969 (MOVWstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) -> 970 (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 971 (MOVWstoreconstidx2 [x] {sym} ptr (ADDQconst [c] idx) mem) -> 972 (MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem) 973 (MOVLstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) -> 974 (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 975 (MOVLstoreconstidx4 [x] {sym} ptr (ADDQconst [c] idx) mem) -> 976 (MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem) 977 (MOVQstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) -> 978 (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 979 (MOVQstoreconstidx8 [x] {sym} ptr (ADDQconst [c] idx) mem) -> 980 (MOVQstoreconstidx8 [ValAndOff(x).add(8*c)] {sym} ptr idx mem) 981 982 // fold LEAQs together 983 (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 984 (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x) 985 986 // LEAQ into LEAQ1 987 (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB -> 988 (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 989 (LEAQ1 [off1] {sym1} x (LEAQ [off2] {sym2} y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB -> 990 (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 991 992 // LEAQ1 into LEAQ 993 (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 994 (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 995 996 // LEAQ into LEAQ[248] 997 (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB -> 998 (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) 999 (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB -> 1000 (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) 1001 (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB -> 1002 (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) 1003 1004 // LEAQ[248] into LEAQ 1005 (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 1006 (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) 1007 (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 1008 (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) 1009 (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> 1010 (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) 1011 1012 // lower Zero instructions with word sizes 1013 (Zero [0] _ mem) -> mem 1014 (Zero [1] destptr mem) -> (MOVBstoreconst [0] destptr mem) 1015 (Zero [2] destptr mem) -> (MOVWstoreconst [0] destptr mem) 1016 (Zero [4] destptr mem) -> (MOVLstoreconst [0] destptr mem) 1017 (Zero [8] destptr mem) -> (MOVQstoreconst [0] destptr mem) 1018 1019 (Zero [3] destptr mem) -> 1020 (MOVBstoreconst [makeValAndOff(0,2)] destptr 1021 (MOVWstoreconst [0] destptr mem)) 1022 (Zero [5] destptr mem) -> 1023 (MOVBstoreconst [makeValAndOff(0,4)] destptr 1024 (MOVLstoreconst [0] destptr mem)) 1025 (Zero [6] destptr mem) -> 1026 (MOVWstoreconst [makeValAndOff(0,4)] destptr 1027 (MOVLstoreconst [0] destptr mem)) 1028 (Zero [7] destptr mem) -> 1029 (MOVLstoreconst [makeValAndOff(0,3)] destptr 1030 (MOVLstoreconst [0] destptr mem)) 1031 1032 // Strip off any fractional word zeroing. 1033 (Zero [size] destptr mem) && size%8 != 0 && size > 8 -> 1034 (Zero [size-size%8] (ADDQconst destptr [size%8]) 1035 (MOVQstoreconst [0] destptr mem)) 1036 1037 // Zero small numbers of words directly. 1038 (Zero [16] destptr mem) -> 1039 (MOVQstoreconst [makeValAndOff(0,8)] destptr 1040 (MOVQstoreconst [0] destptr mem)) 1041 (Zero [24] destptr mem) -> 1042 (MOVQstoreconst [makeValAndOff(0,16)] destptr 1043 (MOVQstoreconst [makeValAndOff(0,8)] destptr 1044 (MOVQstoreconst [0] destptr mem))) 1045 (Zero [32] destptr mem) -> 1046 (MOVQstoreconst [makeValAndOff(0,24)] destptr 1047 (MOVQstoreconst [makeValAndOff(0,16)] destptr 1048 (MOVQstoreconst [makeValAndOff(0,8)] destptr 1049 (MOVQstoreconst [0] destptr mem)))) 1050 1051 // Medium zeroing uses a duff device. 1052 (Zero [size] destptr mem) && size <= 1024 && size%8 == 0 && size%16 != 0 && !config.noDuffDevice -> 1053 (Zero [size-8] (ADDQconst [8] destptr) (MOVQstore destptr (MOVQconst [0]) mem)) 1054 (Zero [size] destptr mem) && size <= 1024 && size%16 == 0 && !config.noDuffDevice -> 1055 (DUFFZERO [duffStart(size)] (ADDQconst [duffAdj(size)] destptr) (MOVOconst [0]) mem) 1056 1057 // Large zeroing uses REP STOSQ. 1058 (Zero [size] destptr mem) && (size > 1024 || (config.noDuffDevice && size > 32)) && size%8 == 0 -> 1059 (REPSTOSQ destptr (MOVQconst [size/8]) (MOVQconst [0]) mem) 1060 1061 // Absorb InvertFlags into branches. 1062 (LT (InvertFlags cmp) yes no) -> (GT cmp yes no) 1063 (GT (InvertFlags cmp) yes no) -> (LT cmp yes no) 1064 (LE (InvertFlags cmp) yes no) -> (GE cmp yes no) 1065 (GE (InvertFlags cmp) yes no) -> (LE cmp yes no) 1066 (ULT (InvertFlags cmp) yes no) -> (UGT cmp yes no) 1067 (UGT (InvertFlags cmp) yes no) -> (ULT cmp yes no) 1068 (ULE (InvertFlags cmp) yes no) -> (UGE cmp yes no) 1069 (UGE (InvertFlags cmp) yes no) -> (ULE cmp yes no) 1070 (EQ (InvertFlags cmp) yes no) -> (EQ cmp yes no) 1071 (NE (InvertFlags cmp) yes no) -> (NE cmp yes no) 1072 1073 // Constant comparisons. 1074 (CMPQconst (MOVQconst [x]) [y]) && x==y -> (FlagEQ) 1075 (CMPQconst (MOVQconst [x]) [y]) && x<y && uint64(x)<uint64(y) -> (FlagLT_ULT) 1076 (CMPQconst (MOVQconst [x]) [y]) && x<y && uint64(x)>uint64(y) -> (FlagLT_UGT) 1077 (CMPQconst (MOVQconst [x]) [y]) && x>y && uint64(x)<uint64(y) -> (FlagGT_ULT) 1078 (CMPQconst (MOVQconst [x]) [y]) && x>y && uint64(x)>uint64(y) -> (FlagGT_UGT) 1079 (CMPLconst (MOVLconst [x]) [y]) && int32(x)==int32(y) -> (FlagEQ) 1080 (CMPLconst (MOVLconst [x]) [y]) && int32(x)<int32(y) && uint32(x)<uint32(y) -> (FlagLT_ULT) 1081 (CMPLconst (MOVLconst [x]) [y]) && int32(x)<int32(y) && uint32(x)>uint32(y) -> (FlagLT_UGT) 1082 (CMPLconst (MOVLconst [x]) [y]) && int32(x)>int32(y) && uint32(x)<uint32(y) -> (FlagGT_ULT) 1083 (CMPLconst (MOVLconst [x]) [y]) && int32(x)>int32(y) && uint32(x)>uint32(y) -> (FlagGT_UGT) 1084 (CMPWconst (MOVWconst [x]) [y]) && int16(x)==int16(y) -> (FlagEQ) 1085 (CMPWconst (MOVWconst [x]) [y]) && int16(x)<int16(y) && uint16(x)<uint16(y) -> (FlagLT_ULT) 1086 (CMPWconst (MOVWconst [x]) [y]) && int16(x)<int16(y) && uint16(x)>uint16(y) -> (FlagLT_UGT) 1087 (CMPWconst (MOVWconst [x]) [y]) && int16(x)>int16(y) && uint16(x)<uint16(y) -> (FlagGT_ULT) 1088 (CMPWconst (MOVWconst [x]) [y]) && int16(x)>int16(y) && uint16(x)>uint16(y) -> (FlagGT_UGT) 1089 (CMPBconst (MOVBconst [x]) [y]) && int8(x)==int8(y) -> (FlagEQ) 1090 (CMPBconst (MOVBconst [x]) [y]) && int8(x)<int8(y) && uint8(x)<uint8(y) -> (FlagLT_ULT) 1091 (CMPBconst (MOVBconst [x]) [y]) && int8(x)<int8(y) && uint8(x)>uint8(y) -> (FlagLT_UGT) 1092 (CMPBconst (MOVBconst [x]) [y]) && int8(x)>int8(y) && uint8(x)<uint8(y) -> (FlagGT_ULT) 1093 (CMPBconst (MOVBconst [x]) [y]) && int8(x)>int8(y) && uint8(x)>uint8(y) -> (FlagGT_UGT) 1094 1095 // Other known comparisons. 1096 (CMPQconst (ANDQconst _ [m]) [n]) && 0 <= m && m < n -> (FlagLT_ULT) 1097 (CMPLconst (ANDLconst _ [m]) [n]) && 0 <= int32(m) && int32(m) < int32(n) -> (FlagLT_ULT) 1098 (CMPWconst (ANDWconst _ [m]) [n]) && 0 <= int16(m) && int16(m) < int16(n) -> (FlagLT_ULT) 1099 (CMPBconst (ANDBconst _ [m]) [n]) && 0 <= int8(m) && int8(m) < int8(n) -> (FlagLT_ULT) 1100 // TODO: DIVxU also. 1101 1102 // Absorb flag constants into SBB ops. 1103 (SBBQcarrymask (FlagEQ)) -> (MOVQconst [0]) 1104 (SBBQcarrymask (FlagLT_ULT)) -> (MOVQconst [-1]) 1105 (SBBQcarrymask (FlagLT_UGT)) -> (MOVQconst [0]) 1106 (SBBQcarrymask (FlagGT_ULT)) -> (MOVQconst [-1]) 1107 (SBBQcarrymask (FlagGT_UGT)) -> (MOVQconst [0]) 1108 (SBBLcarrymask (FlagEQ)) -> (MOVLconst [0]) 1109 (SBBLcarrymask (FlagLT_ULT)) -> (MOVLconst [-1]) 1110 (SBBLcarrymask (FlagLT_UGT)) -> (MOVLconst [0]) 1111 (SBBLcarrymask (FlagGT_ULT)) -> (MOVLconst [-1]) 1112 (SBBLcarrymask (FlagGT_UGT)) -> (MOVLconst [0]) 1113 1114 // Absorb flag constants into branches. 1115 (EQ (FlagEQ) yes no) -> (First nil yes no) 1116 (EQ (FlagLT_ULT) yes no) -> (First nil no yes) 1117 (EQ (FlagLT_UGT) yes no) -> (First nil no yes) 1118 (EQ (FlagGT_ULT) yes no) -> (First nil no yes) 1119 (EQ (FlagGT_UGT) yes no) -> (First nil no yes) 1120 1121 (NE (FlagEQ) yes no) -> (First nil no yes) 1122 (NE (FlagLT_ULT) yes no) -> (First nil yes no) 1123 (NE (FlagLT_UGT) yes no) -> (First nil yes no) 1124 (NE (FlagGT_ULT) yes no) -> (First nil yes no) 1125 (NE (FlagGT_UGT) yes no) -> (First nil yes no) 1126 1127 (LT (FlagEQ) yes no) -> (First nil no yes) 1128 (LT (FlagLT_ULT) yes no) -> (First nil yes no) 1129 (LT (FlagLT_UGT) yes no) -> (First nil yes no) 1130 (LT (FlagGT_ULT) yes no) -> (First nil no yes) 1131 (LT (FlagGT_UGT) yes no) -> (First nil no yes) 1132 1133 (LE (FlagEQ) yes no) -> (First nil yes no) 1134 (LE (FlagLT_ULT) yes no) -> (First nil yes no) 1135 (LE (FlagLT_UGT) yes no) -> (First nil yes no) 1136 (LE (FlagGT_ULT) yes no) -> (First nil no yes) 1137 (LE (FlagGT_UGT) yes no) -> (First nil no yes) 1138 1139 (GT (FlagEQ) yes no) -> (First nil no yes) 1140 (GT (FlagLT_ULT) yes no) -> (First nil no yes) 1141 (GT (FlagLT_UGT) yes no) -> (First nil no yes) 1142 (GT (FlagGT_ULT) yes no) -> (First nil yes no) 1143 (GT (FlagGT_UGT) yes no) -> (First nil yes no) 1144 1145 (GE (FlagEQ) yes no) -> (First nil yes no) 1146 (GE (FlagLT_ULT) yes no) -> (First nil no yes) 1147 (GE (FlagLT_UGT) yes no) -> (First nil no yes) 1148 (GE (FlagGT_ULT) yes no) -> (First nil yes no) 1149 (GE (FlagGT_UGT) yes no) -> (First nil yes no) 1150 1151 (ULT (FlagEQ) yes no) -> (First nil no yes) 1152 (ULT (FlagLT_ULT) yes no) -> (First nil yes no) 1153 (ULT (FlagLT_UGT) yes no) -> (First nil no yes) 1154 (ULT (FlagGT_ULT) yes no) -> (First nil yes no) 1155 (ULT (FlagGT_UGT) yes no) -> (First nil no yes) 1156 1157 (ULE (FlagEQ) yes no) -> (First nil yes no) 1158 (ULE (FlagLT_ULT) yes no) -> (First nil yes no) 1159 (ULE (FlagLT_UGT) yes no) -> (First nil no yes) 1160 (ULE (FlagGT_ULT) yes no) -> (First nil yes no) 1161 (ULE (FlagGT_UGT) yes no) -> (First nil no yes) 1162 1163 (UGT (FlagEQ) yes no) -> (First nil no yes) 1164 (UGT (FlagLT_ULT) yes no) -> (First nil no yes) 1165 (UGT (FlagLT_UGT) yes no) -> (First nil yes no) 1166 (UGT (FlagGT_ULT) yes no) -> (First nil no yes) 1167 (UGT (FlagGT_UGT) yes no) -> (First nil yes no) 1168 1169 (UGE (FlagEQ) yes no) -> (First nil yes no) 1170 (UGE (FlagLT_ULT) yes no) -> (First nil no yes) 1171 (UGE (FlagLT_UGT) yes no) -> (First nil yes no) 1172 (UGE (FlagGT_ULT) yes no) -> (First nil no yes) 1173 (UGE (FlagGT_UGT) yes no) -> (First nil yes no) 1174 1175 // Absorb flag constants into SETxx ops. 1176 (SETEQ (FlagEQ)) -> (MOVBconst [1]) 1177 (SETEQ (FlagLT_ULT)) -> (MOVBconst [0]) 1178 (SETEQ (FlagLT_UGT)) -> (MOVBconst [0]) 1179 (SETEQ (FlagGT_ULT)) -> (MOVBconst [0]) 1180 (SETEQ (FlagGT_UGT)) -> (MOVBconst [0]) 1181 1182 (SETNE (FlagEQ)) -> (MOVBconst [0]) 1183 (SETNE (FlagLT_ULT)) -> (MOVBconst [1]) 1184 (SETNE (FlagLT_UGT)) -> (MOVBconst [1]) 1185 (SETNE (FlagGT_ULT)) -> (MOVBconst [1]) 1186 (SETNE (FlagGT_UGT)) -> (MOVBconst [1]) 1187 1188 (SETL (FlagEQ)) -> (MOVBconst [0]) 1189 (SETL (FlagLT_ULT)) -> (MOVBconst [1]) 1190 (SETL (FlagLT_UGT)) -> (MOVBconst [1]) 1191 (SETL (FlagGT_ULT)) -> (MOVBconst [0]) 1192 (SETL (FlagGT_UGT)) -> (MOVBconst [0]) 1193 1194 (SETLE (FlagEQ)) -> (MOVBconst [1]) 1195 (SETLE (FlagLT_ULT)) -> (MOVBconst [1]) 1196 (SETLE (FlagLT_UGT)) -> (MOVBconst [1]) 1197 (SETLE (FlagGT_ULT)) -> (MOVBconst [0]) 1198 (SETLE (FlagGT_UGT)) -> (MOVBconst [0]) 1199 1200 (SETG (FlagEQ)) -> (MOVBconst [0]) 1201 (SETG (FlagLT_ULT)) -> (MOVBconst [0]) 1202 (SETG (FlagLT_UGT)) -> (MOVBconst [0]) 1203 (SETG (FlagGT_ULT)) -> (MOVBconst [1]) 1204 (SETG (FlagGT_UGT)) -> (MOVBconst [1]) 1205 1206 (SETGE (FlagEQ)) -> (MOVBconst [1]) 1207 (SETGE (FlagLT_ULT)) -> (MOVBconst [0]) 1208 (SETGE (FlagLT_UGT)) -> (MOVBconst [0]) 1209 (SETGE (FlagGT_ULT)) -> (MOVBconst [1]) 1210 (SETGE (FlagGT_UGT)) -> (MOVBconst [1]) 1211 1212 (SETB (FlagEQ)) -> (MOVBconst [0]) 1213 (SETB (FlagLT_ULT)) -> (MOVBconst [1]) 1214 (SETB (FlagLT_UGT)) -> (MOVBconst [0]) 1215 (SETB (FlagGT_ULT)) -> (MOVBconst [1]) 1216 (SETB (FlagGT_UGT)) -> (MOVBconst [0]) 1217 1218 (SETBE (FlagEQ)) -> (MOVBconst [1]) 1219 (SETBE (FlagLT_ULT)) -> (MOVBconst [1]) 1220 (SETBE (FlagLT_UGT)) -> (MOVBconst [0]) 1221 (SETBE (FlagGT_ULT)) -> (MOVBconst [1]) 1222 (SETBE (FlagGT_UGT)) -> (MOVBconst [0]) 1223 1224 (SETA (FlagEQ)) -> (MOVBconst [0]) 1225 (SETA (FlagLT_ULT)) -> (MOVBconst [0]) 1226 (SETA (FlagLT_UGT)) -> (MOVBconst [1]) 1227 (SETA (FlagGT_ULT)) -> (MOVBconst [0]) 1228 (SETA (FlagGT_UGT)) -> (MOVBconst [1]) 1229 1230 (SETAE (FlagEQ)) -> (MOVBconst [1]) 1231 (SETAE (FlagLT_ULT)) -> (MOVBconst [0]) 1232 (SETAE (FlagLT_UGT)) -> (MOVBconst [1]) 1233 (SETAE (FlagGT_ULT)) -> (MOVBconst [0]) 1234 (SETAE (FlagGT_UGT)) -> (MOVBconst [1]) 1235 1236 // Remove redundant *const ops 1237 (ADDQconst [0] x) -> x 1238 (ADDLconst [c] x) && int32(c)==0 -> x 1239 (ADDWconst [c] x) && int16(c)==0 -> x 1240 (ADDBconst [c] x) && int8(c)==0 -> x 1241 (SUBQconst [0] x) -> x 1242 (SUBLconst [c] x) && int32(c) == 0 -> x 1243 (SUBWconst [c] x) && int16(c) == 0 -> x 1244 (SUBBconst [c] x) && int8(c) == 0 -> x 1245 (ANDQconst [0] _) -> (MOVQconst [0]) 1246 (ANDLconst [c] _) && int32(c)==0 -> (MOVLconst [0]) 1247 (ANDWconst [c] _) && int16(c)==0 -> (MOVWconst [0]) 1248 (ANDBconst [c] _) && int8(c)==0 -> (MOVBconst [0]) 1249 (ANDQconst [-1] x) -> x 1250 (ANDLconst [c] x) && int32(c)==-1 -> x 1251 (ANDWconst [c] x) && int16(c)==-1 -> x 1252 (ANDBconst [c] x) && int8(c)==-1 -> x 1253 (ORQconst [0] x) -> x 1254 (ORLconst [c] x) && int32(c)==0 -> x 1255 (ORWconst [c] x) && int16(c)==0 -> x 1256 (ORBconst [c] x) && int8(c)==0 -> x 1257 (ORQconst [-1] _) -> (MOVQconst [-1]) 1258 (ORLconst [c] _) && int32(c)==-1 -> (MOVLconst [-1]) 1259 (ORWconst [c] _) && int16(c)==-1 -> (MOVWconst [-1]) 1260 (ORBconst [c] _) && int8(c)==-1 -> (MOVBconst [-1]) 1261 (XORQconst [0] x) -> x 1262 (XORLconst [c] x) && int32(c)==0 -> x 1263 (XORWconst [c] x) && int16(c)==0 -> x 1264 (XORBconst [c] x) && int8(c)==0 -> x 1265 1266 // generic constant folding 1267 // TODO: more of this 1268 (ADDQconst [c] (MOVQconst [d])) -> (MOVQconst [c+d]) 1269 (ADDLconst [c] (MOVLconst [d])) -> (MOVLconst [int64(int32(c+d))]) 1270 (ADDWconst [c] (MOVWconst [d])) -> (MOVWconst [int64(int16(c+d))]) 1271 (ADDBconst [c] (MOVBconst [d])) -> (MOVBconst [int64(int8(c+d))]) 1272 (ADDQconst [c] (ADDQconst [d] x)) && is32Bit(c+d) -> (ADDQconst [c+d] x) 1273 (ADDLconst [c] (ADDLconst [d] x)) -> (ADDLconst [int64(int32(c+d))] x) 1274 (ADDWconst [c] (ADDWconst [d] x)) -> (ADDWconst [int64(int16(c+d))] x) 1275 (ADDBconst [c] (ADDBconst [d] x)) -> (ADDBconst [int64(int8(c+d))] x) 1276 (SUBQconst (MOVQconst [d]) [c]) -> (MOVQconst [d-c]) 1277 (SUBLconst (MOVLconst [d]) [c]) -> (MOVLconst [int64(int32(d-c))]) 1278 (SUBWconst (MOVWconst [d]) [c]) -> (MOVWconst [int64(int16(d-c))]) 1279 (SUBBconst (MOVBconst [d]) [c]) -> (MOVBconst [int64(int8(d-c))]) 1280 (SUBQconst (SUBQconst x [d]) [c]) && is32Bit(-c-d) -> (ADDQconst [-c-d] x) 1281 (SUBLconst (SUBLconst x [d]) [c]) -> (ADDLconst [int64(int32(-c-d))] x) 1282 (SUBWconst (SUBWconst x [d]) [c]) -> (ADDWconst [int64(int16(-c-d))] x) 1283 (SUBBconst (SUBBconst x [d]) [c]) -> (ADDBconst [int64(int8(-c-d))] x) 1284 (SARQconst [c] (MOVQconst [d])) -> (MOVQconst [d>>uint64(c)]) 1285 (SARLconst [c] (MOVQconst [d])) -> (MOVQconst [d>>uint64(c)]) 1286 (SARWconst [c] (MOVQconst [d])) -> (MOVQconst [d>>uint64(c)]) 1287 (SARBconst [c] (MOVQconst [d])) -> (MOVQconst [d>>uint64(c)]) 1288 (NEGQ (MOVQconst [c])) -> (MOVQconst [-c]) 1289 (NEGL (MOVLconst [c])) -> (MOVLconst [int64(int32(-c))]) 1290 (NEGW (MOVWconst [c])) -> (MOVWconst [int64(int16(-c))]) 1291 (NEGB (MOVBconst [c])) -> (MOVBconst [int64(int8(-c))]) 1292 (MULQconst [c] (MOVQconst [d])) -> (MOVQconst [c*d]) 1293 (MULLconst [c] (MOVLconst [d])) -> (MOVLconst [int64(int32(c*d))]) 1294 (MULWconst [c] (MOVWconst [d])) -> (MOVWconst [int64(int16(c*d))]) 1295 (MULBconst [c] (MOVBconst [d])) -> (MOVBconst [int64(int8(c*d))]) 1296 (ANDQconst [c] (MOVQconst [d])) -> (MOVQconst [c&d]) 1297 (ANDLconst [c] (MOVLconst [d])) -> (MOVLconst [c&d]) 1298 (ANDWconst [c] (MOVWconst [d])) -> (MOVWconst [c&d]) 1299 (ANDBconst [c] (MOVBconst [d])) -> (MOVBconst [c&d]) 1300 (ORQconst [c] (MOVQconst [d])) -> (MOVQconst [c|d]) 1301 (ORLconst [c] (MOVLconst [d])) -> (MOVLconst [c|d]) 1302 (ORWconst [c] (MOVWconst [d])) -> (MOVWconst [c|d]) 1303 (ORBconst [c] (MOVBconst [d])) -> (MOVBconst [c|d]) 1304 (XORQconst [c] (MOVQconst [d])) -> (MOVQconst [c^d]) 1305 (XORLconst [c] (MOVLconst [d])) -> (MOVLconst [c^d]) 1306 (XORWconst [c] (MOVWconst [d])) -> (MOVWconst [c^d]) 1307 (XORBconst [c] (MOVBconst [d])) -> (MOVBconst [c^d]) 1308 (NOTQ (MOVQconst [c])) -> (MOVQconst [^c]) 1309 (NOTL (MOVLconst [c])) -> (MOVLconst [^c]) 1310 (NOTW (MOVWconst [c])) -> (MOVWconst [^c]) 1311 (NOTB (MOVBconst [c])) -> (MOVBconst [^c]) 1312 1313 // generic simplifications 1314 // TODO: more of this 1315 (ADDQ x (NEGQ y)) -> (SUBQ x y) 1316 (ADDL x (NEGL y)) -> (SUBL x y) 1317 (ADDW x (NEGW y)) -> (SUBW x y) 1318 (ADDB x (NEGB y)) -> (SUBB x y) 1319 (SUBQ x x) -> (MOVQconst [0]) 1320 (SUBL x x) -> (MOVLconst [0]) 1321 (SUBW x x) -> (MOVWconst [0]) 1322 (SUBB x x) -> (MOVBconst [0]) 1323 (ANDQ x x) -> x 1324 (ANDL x x) -> x 1325 (ANDW x x) -> x 1326 (ANDB x x) -> x 1327 (ORQ x x) -> x 1328 (ORL x x) -> x 1329 (ORW x x) -> x 1330 (ORB x x) -> x 1331 (XORQ x x) -> (MOVQconst [0]) 1332 (XORL x x) -> (MOVLconst [0]) 1333 (XORW x x) -> (MOVWconst [0]) 1334 (XORB x x) -> (MOVBconst [0]) 1335 1336 // checking AND against 0. 1337 (CMPQconst (ANDQ x y) [0]) -> (TESTQ x y) 1338 (CMPLconst (ANDL x y) [0]) -> (TESTL x y) 1339 (CMPWconst (ANDW x y) [0]) -> (TESTW x y) 1340 (CMPBconst (ANDB x y) [0]) -> (TESTB x y) 1341 (CMPQconst (ANDQconst [c] x) [0]) -> (TESTQconst [c] x) 1342 (CMPLconst (ANDLconst [c] x) [0]) -> (TESTLconst [c] x) 1343 (CMPWconst (ANDWconst [c] x) [0]) -> (TESTWconst [c] x) 1344 (CMPBconst (ANDBconst [c] x) [0]) -> (TESTBconst [c] x) 1345 1346 // TEST %reg,%reg is shorter than CMP 1347 (CMPQconst x [0]) -> (TESTQ x x) 1348 (CMPLconst x [0]) -> (TESTL x x) 1349 (CMPWconst x [0]) -> (TESTW x x) 1350 (CMPBconst x [0]) -> (TESTB x x) 1351 1352 // Combining byte loads into larger (unaligned) loads. 1353 // There are many ways these combinations could occur. This is 1354 // designed to match the way encoding/binary.LittleEndian does it. 1355 (ORW x0:(MOVBload [i] {s} p mem) 1356 (SHLWconst [8] x1:(MOVBload [i+1] {s} p mem))) && mergePoint(b,x0,x1) != nil -> @mergePoint(b,x0,x1) (MOVWload [i] {s} p mem) 1357 1358 (ORL (ORL (ORL 1359 x0:(MOVBload [i] {s} p mem) 1360 (SHLLconst [8] x1:(MOVBload [i+1] {s} p mem))) 1361 (SHLLconst [16] x2:(MOVBload [i+2] {s} p mem))) 1362 (SHLLconst [24] x3:(MOVBload [i+3] {s} p mem))) && mergePoint(b,x0,x1,x2,x3) != nil -> @mergePoint(b,x0,x1,x2,x3) (MOVLload [i] {s} p mem) 1363 1364 (ORQ (ORQ (ORQ (ORQ (ORQ (ORQ (ORQ 1365 x0:(MOVBload [i] {s} p mem) 1366 (SHLQconst [8] x1:(MOVBload [i+1] {s} p mem))) 1367 (SHLQconst [16] x2:(MOVBload [i+2] {s} p mem))) 1368 (SHLQconst [24] x3:(MOVBload [i+3] {s} p mem))) 1369 (SHLQconst [32] x4:(MOVBload [i+4] {s} p mem))) 1370 (SHLQconst [40] x5:(MOVBload [i+5] {s} p mem))) 1371 (SHLQconst [48] x6:(MOVBload [i+6] {s} p mem))) 1372 (SHLQconst [56] x7:(MOVBload [i+7] {s} p mem))) && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil -> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVQload [i] {s} p mem) 1373 1374 (ORW x0:(MOVBloadidx1 [i] {s} p idx mem) 1375 (SHLWconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) && mergePoint(b,x0,x1) != nil -> @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i] {s} p idx mem) 1376 1377 (ORL (ORL (ORL 1378 x0:(MOVBloadidx1 [i] {s} p idx mem) 1379 (SHLLconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) 1380 (SHLLconst [16] x2:(MOVBloadidx1 [i+2] {s} p idx mem))) 1381 (SHLLconst [24] x3:(MOVBloadidx1 [i+3] {s} p idx mem))) && mergePoint(b,x0,x1,x2,x3) != nil -> @mergePoint(b,x0,x1,x2,x3) (MOVLloadidx1 <v.Type> [i] {s} p idx mem) 1382 1383 (ORQ (ORQ (ORQ (ORQ (ORQ (ORQ (ORQ 1384 x0:(MOVBloadidx1 [i] {s} p idx mem) 1385 (SHLQconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) 1386 (SHLQconst [16] x2:(MOVBloadidx1 [i+2] {s} p idx mem))) 1387 (SHLQconst [24] x3:(MOVBloadidx1 [i+3] {s} p idx mem))) 1388 (SHLQconst [32] x4:(MOVBloadidx1 [i+4] {s} p idx mem))) 1389 (SHLQconst [40] x5:(MOVBloadidx1 [i+5] {s} p idx mem))) 1390 (SHLQconst [48] x6:(MOVBloadidx1 [i+6] {s} p idx mem))) 1391 (SHLQconst [56] x7:(MOVBloadidx1 [i+7] {s} p idx mem))) && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil -> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVQloadidx1 <v.Type> [i] {s} p idx mem)