github.com/slayercat/go@v0.0.0-20170428012452-c51559813f61/src/cmd/compile/internal/ssa/gen/PPC64.rules (about) 1 // Copyright 2016 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Lowering arithmetic 6 (Add64 x y) -> (ADD x y) 7 (AddPtr x y) -> (ADD x y) 8 (Add32 x y) -> (ADD x y) 9 (Add16 x y) -> (ADD x y) 10 (Add8 x y) -> (ADD x y) 11 (Add64F x y) -> (FADD x y) 12 (Add32F x y) -> (FADDS x y) 13 14 (Sub64 x y) -> (SUB x y) 15 (SubPtr x y) -> (SUB x y) 16 (Sub32 x y) -> (SUB x y) 17 (Sub16 x y) -> (SUB x y) 18 (Sub8 x y) -> (SUB x y) 19 (Sub32F x y) -> (FSUBS x y) 20 (Sub64F x y) -> (FSUB x y) 21 22 (Mod16 x y) -> (Mod32 (SignExt16to32 x) (SignExt16to32 y)) 23 (Mod16u x y) -> (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y)) 24 (Mod8 x y) -> (Mod32 (SignExt8to32 x) (SignExt8to32 y)) 25 (Mod8u x y) -> (Mod32u (ZeroExt8to32 x) (ZeroExt8to32 y)) 26 (Mod64 x y) -> (SUB x (MULLD y (DIVD x y))) 27 (Mod64u x y) -> (SUB x (MULLD y (DIVDU x y))) 28 (Mod32 x y) -> (SUB x (MULLW y (DIVW x y))) 29 (Mod32u x y) -> (SUB x (MULLW y (DIVWU x y))) 30 31 // (x + y) / 2 with x>=y -> (x - y) / 2 + y 32 (Avg64u <t> x y) -> (ADD (SRDconst <t> (SUB <t> x y) [1]) y) 33 34 (Mul64 x y) -> (MULLD x y) 35 (Mul32 x y) -> (MULLW x y) 36 (Mul16 x y) -> (MULLW x y) 37 (Mul8 x y) -> (MULLW x y) 38 39 (Div64 x y) -> (DIVD x y) 40 (Div64u x y) -> (DIVDU x y) 41 (Div32 x y) -> (DIVW x y) 42 (Div32u x y) -> (DIVWU x y) 43 (Div16 x y) -> (DIVW (SignExt16to32 x) (SignExt16to32 y)) 44 (Div16u x y) -> (DIVWU (ZeroExt16to32 x) (ZeroExt16to32 y)) 45 (Div8 x y) -> (DIVW (SignExt8to32 x) (SignExt8to32 y)) 46 (Div8u x y) -> (DIVWU (ZeroExt8to32 x) (ZeroExt8to32 y)) 47 48 (Hmul64 x y) -> (MULHD x y) 49 (Hmul64u x y) -> (MULHDU x y) 50 (Hmul32 x y) -> (MULHW x y) 51 (Hmul32u x y) -> (MULHWU x y) 52 53 (Mul32F x y) -> (FMULS x y) 54 (Mul64F x y) -> (FMUL x y) 55 56 (Div32F x y) -> (FDIVS x y) 57 (Div64F x y) -> (FDIV x y) 58 59 // Lowering float <-> int 60 (Cvt32to32F x) -> (FRSP (FCFID (Xi2f64 (SignExt32to64 x)))) 61 (Cvt32to64F x) -> (FCFID (Xi2f64 (SignExt32to64 x))) 62 (Cvt64to32F x) -> (FRSP (FCFID (Xi2f64 x))) 63 (Cvt64to64F x) -> (FCFID (Xi2f64 x)) 64 65 (Cvt32Fto32 x) -> (Xf2i64 (FCTIWZ x)) 66 (Cvt32Fto64 x) -> (Xf2i64 (FCTIDZ x)) 67 (Cvt64Fto32 x) -> (Xf2i64 (FCTIWZ x)) 68 (Cvt64Fto64 x) -> (Xf2i64 (FCTIDZ x)) 69 70 (Cvt32Fto64F x) -> x // Note x will have the wrong type for patterns dependent on Float32/Float64 71 (Cvt64Fto32F x) -> (FRSP x) 72 73 (Round32F x) -> (LoweredRound32F x) 74 (Round64F x) -> (LoweredRound64F x) 75 76 (Sqrt x) -> (FSQRT x) 77 78 // Lowering constants 79 (Const8 [val]) -> (MOVDconst [val]) 80 (Const16 [val]) -> (MOVDconst [val]) 81 (Const32 [val]) -> (MOVDconst [val]) 82 (Const64 [val]) -> (MOVDconst [val]) 83 (Const32F [val]) -> (FMOVSconst [val]) 84 (Const64F [val]) -> (FMOVDconst [val]) 85 (ConstNil) -> (MOVDconst [0]) 86 (ConstBool [b]) -> (MOVDconst [b]) 87 88 // Rotate generation 89 (ADD (SLDconst x [c]) (SRDconst x [d])) && d == 64-c -> (ROTLconst [c] x) 90 ( OR (SLDconst x [c]) (SRDconst x [d])) && d == 64-c -> (ROTLconst [c] x) 91 (XOR (SLDconst x [c]) (SRDconst x [d])) && d == 64-c -> (ROTLconst [c] x) 92 93 (ADD (SLWconst x [c]) (SRWconst x [d])) && d == 32-c -> (ROTLWconst [c] x) 94 ( OR (SLWconst x [c]) (SRWconst x [d])) && d == 32-c -> (ROTLWconst [c] x) 95 (XOR (SLWconst x [c]) (SRWconst x [d])) && d == 32-c -> (ROTLWconst [c] x) 96 97 (Lsh64x64 x (Const64 [c])) && uint64(c) < 64 -> (SLDconst x [c]) 98 (Rsh64x64 x (Const64 [c])) && uint64(c) < 64 -> (SRADconst x [c]) 99 (Rsh64Ux64 x (Const64 [c])) && uint64(c) < 64 -> (SRDconst x [c]) 100 (Lsh32x64 x (Const64 [c])) && uint64(c) < 32 -> (SLWconst x [c]) 101 (Rsh32x64 x (Const64 [c])) && uint64(c) < 32 -> (SRAWconst x [c]) 102 (Rsh32Ux64 x (Const64 [c])) && uint64(c) < 32 -> (SRWconst x [c]) 103 (Lsh16x64 x (Const64 [c])) && uint64(c) < 16 -> (SLWconst x [c]) 104 (Rsh16x64 x (Const64 [c])) && uint64(c) < 16 -> (SRAWconst (SignExt16to32 x) [c]) 105 (Rsh16Ux64 x (Const64 [c])) && uint64(c) < 16 -> (SRWconst (ZeroExt16to32 x) [c]) 106 (Lsh8x64 x (Const64 [c])) && uint64(c) < 8 -> (SLWconst x [c]) 107 (Rsh8x64 x (Const64 [c])) && uint64(c) < 8 -> (SRAWconst (SignExt8to32 x) [c]) 108 (Rsh8Ux64 x (Const64 [c])) && uint64(c) < 8 -> (SRWconst (ZeroExt8to32 x) [c]) 109 110 (Lsh64x32 x (Const64 [c])) && uint32(c) < 64 -> (SLDconst x [c]) 111 (Rsh64x32 x (Const64 [c])) && uint32(c) < 64 -> (SRADconst x [c]) 112 (Rsh64Ux32 x (Const64 [c])) && uint32(c) < 64 -> (SRDconst x [c]) 113 (Lsh32x32 x (Const64 [c])) && uint32(c) < 32 -> (SLWconst x [c]) 114 (Rsh32x32 x (Const64 [c])) && uint32(c) < 32 -> (SRAWconst x [c]) 115 (Rsh32Ux32 x (Const64 [c])) && uint32(c) < 32 -> (SRWconst x [c]) 116 (Lsh16x32 x (Const64 [c])) && uint32(c) < 16 -> (SLWconst x [c]) 117 (Rsh16x32 x (Const64 [c])) && uint32(c) < 16 -> (SRAWconst (SignExt16to32 x) [c]) 118 (Rsh16Ux32 x (Const64 [c])) && uint32(c) < 16 -> (SRWconst (ZeroExt16to32 x) [c]) 119 (Lsh8x32 x (Const64 [c])) && uint32(c) < 8 -> (SLWconst x [c]) 120 (Rsh8x32 x (Const64 [c])) && uint32(c) < 8 -> (SRAWconst (SignExt8to32 x) [c]) 121 (Rsh8Ux32 x (Const64 [c])) && uint32(c) < 8 -> (SRWconst (ZeroExt8to32 x) [c]) 122 123 // large constant shifts 124 (Lsh64x64 _ (Const64 [c])) && uint64(c) >= 64 -> (MOVDconst [0]) 125 (Rsh64Ux64 _ (Const64 [c])) && uint64(c) >= 64 -> (MOVDconst [0]) 126 (Lsh32x64 _ (Const64 [c])) && uint64(c) >= 32 -> (MOVDconst [0]) 127 (Rsh32Ux64 _ (Const64 [c])) && uint64(c) >= 32 -> (MOVDconst [0]) 128 (Lsh16x64 _ (Const64 [c])) && uint64(c) >= 16 -> (MOVDconst [0]) 129 (Rsh16Ux64 _ (Const64 [c])) && uint64(c) >= 16 -> (MOVDconst [0]) 130 (Lsh8x64 _ (Const64 [c])) && uint64(c) >= 8 -> (MOVDconst [0]) 131 (Rsh8Ux64 _ (Const64 [c])) && uint64(c) >= 8 -> (MOVDconst [0]) 132 133 // large constant signed right shift, we leave the sign bit 134 (Rsh64x64 x (Const64 [c])) && uint64(c) >= 64 -> (SRADconst x [63]) 135 (Rsh32x64 x (Const64 [c])) && uint64(c) >= 32 -> (SRAWconst x [63]) 136 (Rsh16x64 x (Const64 [c])) && uint64(c) >= 16 -> (SRAWconst (SignExt16to32 x) [63]) 137 (Rsh8x64 x (Const64 [c])) && uint64(c) >= 8 -> (SRAWconst (SignExt8to32 x) [63]) 138 139 // constant shifts 140 (Lsh64x64 x (MOVDconst [c])) && uint64(c) < 64 -> (SLDconst x [c]) 141 (Rsh64x64 x (MOVDconst [c])) && uint64(c) < 64 -> (SRADconst x [c]) 142 (Rsh64Ux64 x (MOVDconst [c])) && uint64(c) < 64 -> (SRDconst x [c]) 143 (Lsh32x64 x (MOVDconst [c])) && uint64(c) < 32 -> (SLWconst x [c]) 144 (Rsh32x64 x (MOVDconst [c])) && uint64(c) < 32 -> (SRAWconst x [c]) 145 (Rsh32Ux64 x (MOVDconst [c])) && uint64(c) < 32 -> (SRWconst x [c]) 146 (Lsh16x64 x (MOVDconst [c])) && uint64(c) < 16 -> (SLWconst x [c]) 147 (Rsh16x64 x (MOVDconst [c])) && uint64(c) < 16 -> (SRAWconst (SignExt16to32 x) [c]) 148 (Rsh16Ux64 x (MOVDconst [c])) && uint64(c) < 16 -> (SRWconst (ZeroExt16to32 x) [c]) 149 (Lsh8x64 x (MOVDconst [c])) && uint64(c) < 8 -> (SLWconst x [c]) 150 (Rsh8x64 x (MOVDconst [c])) && uint64(c) < 8 -> (SRAWconst (SignExt8to32 x) [c]) 151 (Rsh8Ux64 x (MOVDconst [c])) && uint64(c) < 8 -> (SRWconst (ZeroExt8to32 x) [c]) 152 153 (Lsh64x32 x (MOVDconst [c])) && uint32(c) < 64 -> (SLDconst x [c]) 154 (Rsh64x32 x (MOVDconst [c])) && uint32(c) < 64 -> (SRADconst x [c]) 155 (Rsh64Ux32 x (MOVDconst [c])) && uint32(c) < 64 -> (SRDconst x [c]) 156 (Lsh32x32 x (MOVDconst [c])) && uint32(c) < 32 -> (SLWconst x [c]) 157 (Rsh32x32 x (MOVDconst [c])) && uint32(c) < 32 -> (SRAWconst x [c]) 158 (Rsh32Ux32 x (MOVDconst [c])) && uint32(c) < 32 -> (SRWconst x [c]) 159 (Lsh16x32 x (MOVDconst [c])) && uint32(c) < 16 -> (SLWconst x [c]) 160 (Rsh16x32 x (MOVDconst [c])) && uint32(c) < 16 -> (SRAWconst (SignExt16to32 x) [c]) 161 (Rsh16Ux32 x (MOVDconst [c])) && uint32(c) < 16 -> (SRWconst (ZeroExt16to32 x) [c]) 162 (Lsh8x32 x (MOVDconst [c])) && uint32(c) < 8 -> (SLWconst x [c]) 163 (Rsh8x32 x (MOVDconst [c])) && uint32(c) < 8 -> (SRAWconst (SignExt8to32 x) [c]) 164 (Rsh8Ux32 x (MOVDconst [c])) && uint32(c) < 8 -> (SRWconst (ZeroExt8to32 x) [c]) 165 166 (Rsh64x64 x y) -> (SRAD x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] y)))) 167 (Rsh64Ux64 x y) -> (SRD x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] y)))) 168 (Lsh64x64 x y) -> (SLD x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] y)))) 169 170 (Rsh32x64 x y) -> (SRAW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] y)))) 171 (Rsh32Ux64 x y) -> (SRW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] y)))) 172 (Lsh32x64 x y) -> (SLW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] y)))) 173 174 (Rsh16x64 x y) -> (SRAW (SignExt16to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] y)))) 175 (Rsh16Ux64 x y) -> (SRW (ZeroExt16to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] y)))) 176 (Lsh16x64 x y) -> (SLW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] y)))) 177 178 (Rsh8x64 x y) -> (SRAW (SignExt8to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] y)))) 179 (Rsh8Ux64 x y) -> (SRW (ZeroExt8to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] y)))) 180 (Lsh8x64 x y) -> (SLW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] y)))) 181 182 183 (Rsh64x32 x y) -> (SRAD x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y))))) 184 (Rsh64Ux32 x y) -> (SRD x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y))))) 185 (Lsh64x32 x y) -> (SLD x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y))))) 186 187 (Rsh32x32 x y) -> (SRAW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y))))) 188 (Rsh32Ux32 x y) -> (SRW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y))))) 189 (Lsh32x32 x y) -> (SLW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y))))) 190 191 (Rsh16x32 x y) -> (SRAW (SignExt16to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y))))) 192 (Rsh16Ux32 x y) -> (SRW (ZeroExt16to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y))))) 193 (Lsh16x32 x y) -> (SLW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y))))) 194 195 (Rsh8x32 x y) -> (SRAW (SignExt8to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y))))) 196 (Rsh8Ux32 x y) -> (SRW (ZeroExt8to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y))))) 197 (Lsh8x32 x y) -> (SLW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y))))) 198 199 200 (Rsh64x16 x y) -> (SRAD x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y))))) 201 (Rsh64Ux16 x y) -> (SRD x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y))))) 202 (Lsh64x16 x y) -> (SLD x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y))))) 203 204 (Rsh32x16 x y) -> (SRAW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y))))) 205 (Rsh32Ux16 x y) -> (SRW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y))))) 206 (Lsh32x16 x y) -> (SLW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y))))) 207 208 (Rsh16x16 x y) -> (SRAW (SignExt16to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y))))) 209 (Rsh16Ux16 x y) -> (SRW (ZeroExt16to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y))))) 210 (Lsh16x16 x y) -> (SLW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y))))) 211 212 (Rsh8x16 x y) -> (SRAW (SignExt8to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y))))) 213 (Rsh8Ux16 x y) -> (SRW (ZeroExt8to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y))))) 214 (Lsh8x16 x y) -> (SLW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y))))) 215 216 217 (Rsh64x8 x y) -> (SRAD x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y))))) 218 (Rsh64Ux8 x y) -> (SRD x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y))))) 219 (Lsh64x8 x y) -> (SLD x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y))))) 220 221 (Rsh32x8 x y) -> (SRAW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y))))) 222 (Rsh32Ux8 x y) -> (SRW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y))))) 223 (Lsh32x8 x y) -> (SLW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y))))) 224 225 (Rsh16x8 x y) -> (SRAW (SignExt16to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y))))) 226 (Rsh16Ux8 x y) -> (SRW (ZeroExt16to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y))))) 227 (Lsh16x8 x y) -> (SLW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y))))) 228 229 (Rsh8x8 x y) -> (SRAW (SignExt8to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y))))) 230 (Rsh8Ux8 x y) -> (SRW (ZeroExt8to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y))))) 231 (Lsh8x8 x y) -> (SLW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y))))) 232 233 // Cleaning up shift ops when input is masked 234 (MaskIfNotCarry (ADDconstForCarry [c] (ANDconst [d] _))) && c < 0 && d > 0 && c + d < 0 -> (MOVDconst [-1]) 235 (ORN x (MOVDconst [-1])) -> x 236 237 // Potentially useful optimizing rewrites. 238 // (ADDconstForCarry [k] c), k < 0 && (c < 0 || k+c >= 0) -> CarrySet 239 // (ADDconstForCarry [k] c), K < 0 && (c >= 0 && k+c < 0) -> CarryClear 240 // (MaskIfNotCarry CarrySet) -> 0 241 // (MaskIfNotCarry CarrySet) -> -1 242 243 (Addr {sym} base) -> (MOVDaddr {sym} base) 244 // (Addr {sym} base) -> (ADDconst {sym} base) 245 (OffPtr [off] ptr) -> (ADD (MOVDconst <types.Int64> [off]) ptr) 246 247 (And64 x y) -> (AND x y) 248 (And32 x y) -> (AND x y) 249 (And16 x y) -> (AND x y) 250 (And8 x y) -> (AND x y) 251 252 (Or64 x y) -> (OR x y) 253 (Or32 x y) -> (OR x y) 254 (Or16 x y) -> (OR x y) 255 (Or8 x y) -> (OR x y) 256 257 (Xor64 x y) -> (XOR x y) 258 (Xor32 x y) -> (XOR x y) 259 (Xor16 x y) -> (XOR x y) 260 (Xor8 x y) -> (XOR x y) 261 262 (Neg64F x) -> (FNEG x) 263 (Neg32F x) -> (FNEG x) 264 (Neg64 x) -> (NEG x) 265 (Neg32 x) -> (NEG x) 266 (Neg16 x) -> (NEG x) 267 (Neg8 x) -> (NEG x) 268 269 (Com64 x) -> (NOR x x) 270 (Com32 x) -> (NOR x x) 271 (Com16 x) -> (NOR x x) 272 (Com8 x) -> (NOR x x) 273 274 // Lowering boolean ops 275 (AndB x y) -> (AND x y) 276 (OrB x y) -> (OR x y) 277 (Not x) -> (XORconst [1] x) 278 279 // Use ANDN for AND x NOT y 280 (AND x (NOR y y)) -> (ANDN x y) 281 282 // Lowering comparisons 283 (EqB x y) -> (ANDconst [1] (EQV x y)) 284 // Sign extension dependence on operand sign sets up for sign/zero-extension elision later 285 (Eq8 x y) && isSigned(x.Type) && isSigned(y.Type) -> (Equal (CMPW (SignExt8to32 x) (SignExt8to32 y))) 286 (Eq16 x y) && isSigned(x.Type) && isSigned(y.Type) -> (Equal (CMPW (SignExt16to32 x) (SignExt16to32 y))) 287 (Eq8 x y) -> (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y))) 288 (Eq16 x y) -> (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y))) 289 (Eq32 x y) -> (Equal (CMPW x y)) 290 (Eq64 x y) -> (Equal (CMP x y)) 291 (Eq32F x y) -> (Equal (FCMPU x y)) 292 (Eq64F x y) -> (Equal (FCMPU x y)) 293 (EqPtr x y) -> (Equal (CMP x y)) 294 295 (NeqB x y) -> (XOR x y) 296 // Like Eq8 and Eq16, prefer sign extension likely to enable later elision. 297 (Neq8 x y) && isSigned(x.Type) && isSigned(y.Type) -> (NotEqual (CMPW (SignExt8to32 x) (SignExt8to32 y))) 298 (Neq16 x y) && isSigned(x.Type) && isSigned(y.Type) -> (NotEqual (CMPW (SignExt16to32 x) (SignExt16to32 y))) 299 (Neq8 x y) -> (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y))) 300 (Neq16 x y) -> (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y))) 301 (Neq32 x y) -> (NotEqual (CMPW x y)) 302 (Neq64 x y) -> (NotEqual (CMP x y)) 303 (Neq32F x y) -> (NotEqual (FCMPU x y)) 304 (Neq64F x y) -> (NotEqual (FCMPU x y)) 305 (NeqPtr x y) -> (NotEqual (CMP x y)) 306 307 (Less8 x y) -> (LessThan (CMPW (SignExt8to32 x) (SignExt8to32 y))) 308 (Less16 x y) -> (LessThan (CMPW (SignExt16to32 x) (SignExt16to32 y))) 309 (Less32 x y) -> (LessThan (CMPW x y)) 310 (Less64 x y) -> (LessThan (CMP x y)) 311 (Less32F x y) -> (FLessThan (FCMPU x y)) 312 (Less64F x y) -> (FLessThan (FCMPU x y)) 313 314 (Less8U x y) -> (LessThan (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y))) 315 (Less16U x y) -> (LessThan (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y))) 316 (Less32U x y) -> (LessThan (CMPWU x y)) 317 (Less64U x y) -> (LessThan (CMPU x y)) 318 319 (Leq8 x y) -> (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y))) 320 (Leq16 x y) -> (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y))) 321 (Leq32 x y) -> (LessEqual (CMPW x y)) 322 (Leq64 x y) -> (LessEqual (CMP x y)) 323 (Leq32F x y) -> (FLessEqual (FCMPU x y)) 324 (Leq64F x y) -> (FLessEqual (FCMPU x y)) 325 326 (Leq8U x y) -> (LessEqual (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y))) 327 (Leq16U x y) -> (LessEqual (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y))) 328 (Leq32U x y) -> (LessEqual (CMPWU x y)) 329 (Leq64U x y) -> (LessEqual (CMPU x y)) 330 331 (Greater8 x y) -> (GreaterThan (CMPW (SignExt8to32 x) (SignExt8to32 y))) 332 (Greater16 x y) -> (GreaterThan (CMPW (SignExt16to32 x) (SignExt16to32 y))) 333 (Greater32 x y) -> (GreaterThan (CMPW x y)) 334 (Greater64 x y) -> (GreaterThan (CMP x y)) 335 (Greater32F x y) -> (FGreaterThan (FCMPU x y)) 336 (Greater64F x y) -> (FGreaterThan (FCMPU x y)) 337 338 (Greater8U x y) -> (GreaterThan (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y))) 339 (Greater16U x y) -> (GreaterThan (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y))) 340 (Greater32U x y) -> (GreaterThan (CMPWU x y)) 341 (Greater64U x y) -> (GreaterThan (CMPU x y)) 342 343 (Geq8 x y) -> (GreaterEqual (CMPW (SignExt8to32 x) (SignExt8to32 y))) 344 (Geq16 x y) -> (GreaterEqual (CMPW (SignExt16to32 x) (SignExt16to32 y))) 345 (Geq32 x y) -> (GreaterEqual (CMPW x y)) 346 (Geq64 x y) -> (GreaterEqual (CMP x y)) 347 (Geq32F x y) -> (FGreaterEqual (FCMPU x y)) 348 (Geq64F x y) -> (FGreaterEqual (FCMPU x y)) 349 350 (Geq8U x y) -> (GreaterEqual (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y))) 351 (Geq16U x y) -> (GreaterEqual (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y))) 352 (Geq32U x y) -> (GreaterEqual (CMPWU x y)) 353 (Geq64U x y) -> (GreaterEqual (CMPU x y)) 354 355 // Absorb pseudo-ops into blocks. 356 (If (Equal cc) yes no) -> (EQ cc yes no) 357 (If (NotEqual cc) yes no) -> (NE cc yes no) 358 (If (LessThan cc) yes no) -> (LT cc yes no) 359 (If (LessEqual cc) yes no) -> (LE cc yes no) 360 (If (GreaterThan cc) yes no) -> (GT cc yes no) 361 (If (GreaterEqual cc) yes no) -> (GE cc yes no) 362 (If (FLessThan cc) yes no) -> (FLT cc yes no) 363 (If (FLessEqual cc) yes no) -> (FLE cc yes no) 364 (If (FGreaterThan cc) yes no) -> (FGT cc yes no) 365 (If (FGreaterEqual cc) yes no) -> (FGE cc yes no) 366 367 (If cond yes no) -> (NE (CMPWconst [0] cond) yes no) 368 369 // Absorb boolean tests into block 370 (NE (CMPWconst [0] (Equal cc)) yes no) -> (EQ cc yes no) 371 (NE (CMPWconst [0] (NotEqual cc)) yes no) -> (NE cc yes no) 372 (NE (CMPWconst [0] (LessThan cc)) yes no) -> (LT cc yes no) 373 (NE (CMPWconst [0] (LessEqual cc)) yes no) -> (LE cc yes no) 374 (NE (CMPWconst [0] (GreaterThan cc)) yes no) -> (GT cc yes no) 375 (NE (CMPWconst [0] (GreaterEqual cc)) yes no) -> (GE cc yes no) 376 (NE (CMPWconst [0] (FLessThan cc)) yes no) -> (FLT cc yes no) 377 (NE (CMPWconst [0] (FLessEqual cc)) yes no) -> (FLE cc yes no) 378 (NE (CMPWconst [0] (FGreaterThan cc)) yes no) -> (FGT cc yes no) 379 (NE (CMPWconst [0] (FGreaterEqual cc)) yes no) -> (FGE cc yes no) 380 381 // Elide compares of bit tests // TODO need to make both CC and result of ANDCC available. 382 (EQ (CMPconst [0] (ANDconst [c] x)) yes no) -> (EQ (ANDCCconst [c] x) yes no) 383 (NE (CMPconst [0] (ANDconst [c] x)) yes no) -> (NE (ANDCCconst [c] x) yes no) 384 (EQ (CMPWconst [0] (ANDconst [c] x)) yes no) -> (EQ (ANDCCconst [c] x) yes no) 385 (NE (CMPWconst [0] (ANDconst [c] x)) yes no) -> (NE (ANDCCconst [c] x) yes no) 386 387 // absorb flag constants into branches 388 (EQ (FlagEQ) yes no) -> (First nil yes no) 389 (EQ (FlagLT) yes no) -> (First nil no yes) 390 (EQ (FlagGT) yes no) -> (First nil no yes) 391 392 (NE (FlagEQ) yes no) -> (First nil no yes) 393 (NE (FlagLT) yes no) -> (First nil yes no) 394 (NE (FlagGT) yes no) -> (First nil yes no) 395 396 (LT (FlagEQ) yes no) -> (First nil no yes) 397 (LT (FlagLT) yes no) -> (First nil yes no) 398 (LT (FlagGT) yes no) -> (First nil no yes) 399 400 (LE (FlagEQ) yes no) -> (First nil yes no) 401 (LE (FlagLT) yes no) -> (First nil yes no) 402 (LE (FlagGT) yes no) -> (First nil no yes) 403 404 (GT (FlagEQ) yes no) -> (First nil no yes) 405 (GT (FlagLT) yes no) -> (First nil no yes) 406 (GT (FlagGT) yes no) -> (First nil yes no) 407 408 (GE (FlagEQ) yes no) -> (First nil yes no) 409 (GE (FlagLT) yes no) -> (First nil no yes) 410 (GE (FlagGT) yes no) -> (First nil yes no) 411 412 // absorb InvertFlags into branches 413 (LT (InvertFlags cmp) yes no) -> (GT cmp yes no) 414 (GT (InvertFlags cmp) yes no) -> (LT cmp yes no) 415 (LE (InvertFlags cmp) yes no) -> (GE cmp yes no) 416 (GE (InvertFlags cmp) yes no) -> (LE cmp yes no) 417 (EQ (InvertFlags cmp) yes no) -> (EQ cmp yes no) 418 (NE (InvertFlags cmp) yes no) -> (NE cmp yes no) 419 420 // constant comparisons 421 (CMPWconst (MOVDconst [x]) [y]) && int32(x)==int32(y) -> (FlagEQ) 422 (CMPWconst (MOVDconst [x]) [y]) && int32(x)<int32(y) -> (FlagLT) 423 (CMPWconst (MOVDconst [x]) [y]) && int32(x)>int32(y) -> (FlagGT) 424 425 (CMPconst (MOVDconst [x]) [y]) && int64(x)==int64(y) -> (FlagEQ) 426 (CMPconst (MOVDconst [x]) [y]) && int64(x)<int64(y) -> (FlagLT) 427 (CMPconst (MOVDconst [x]) [y]) && int64(x)>int64(y) -> (FlagGT) 428 429 (CMPWUconst (MOVDconst [x]) [y]) && int32(x)==int32(y) -> (FlagEQ) 430 (CMPWUconst (MOVDconst [x]) [y]) && uint32(x)<uint32(y) -> (FlagLT) 431 (CMPWUconst (MOVDconst [x]) [y]) && uint32(x)>uint32(y) -> (FlagGT) 432 433 (CMPUconst (MOVDconst [x]) [y]) && int64(x)==int64(y) -> (FlagEQ) 434 (CMPUconst (MOVDconst [x]) [y]) && uint64(x)<uint64(y) -> (FlagLT) 435 (CMPUconst (MOVDconst [x]) [y]) && uint64(x)>uint64(y) -> (FlagGT) 436 437 // other known comparisons 438 //(CMPconst (MOVBUreg _) [c]) && 0xff < c -> (FlagLT) 439 //(CMPconst (MOVHUreg _) [c]) && 0xffff < c -> (FlagLT) 440 //(CMPconst (ANDconst _ [m]) [n]) && 0 <= int32(m) && int32(m) < int32(n) -> (FlagLT) 441 //(CMPconst (SRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 32 && (1<<uint32(32-c)) <= uint32(n) -> (FlagLT) 442 443 // absorb flag constants into boolean values 444 (Equal (FlagEQ)) -> (MOVDconst [1]) 445 (Equal (FlagLT)) -> (MOVDconst [0]) 446 (Equal (FlagGT)) -> (MOVDconst [0]) 447 448 (NotEqual (FlagEQ)) -> (MOVDconst [0]) 449 (NotEqual (FlagLT)) -> (MOVDconst [1]) 450 (NotEqual (FlagGT)) -> (MOVDconst [1]) 451 452 (LessThan (FlagEQ)) -> (MOVDconst [0]) 453 (LessThan (FlagLT)) -> (MOVDconst [1]) 454 (LessThan (FlagGT)) -> (MOVDconst [0]) 455 456 (LessEqual (FlagEQ)) -> (MOVDconst [1]) 457 (LessEqual (FlagLT)) -> (MOVDconst [1]) 458 (LessEqual (FlagGT)) -> (MOVDconst [0]) 459 460 (GreaterThan (FlagEQ)) -> (MOVDconst [0]) 461 (GreaterThan (FlagLT)) -> (MOVDconst [0]) 462 (GreaterThan (FlagGT)) -> (MOVDconst [1]) 463 464 (GreaterEqual (FlagEQ)) -> (MOVDconst [1]) 465 (GreaterEqual (FlagLT)) -> (MOVDconst [0]) 466 (GreaterEqual (FlagGT)) -> (MOVDconst [1]) 467 468 // absorb InvertFlags into boolean values 469 (Equal (InvertFlags x)) -> (Equal x) 470 (NotEqual (InvertFlags x)) -> (NotEqual x) 471 (LessThan (InvertFlags x)) -> (GreaterThan x) 472 (GreaterThan (InvertFlags x)) -> (LessThan x) 473 (LessEqual (InvertFlags x)) -> (GreaterEqual x) 474 (GreaterEqual (InvertFlags x)) -> (LessEqual x) 475 476 // Lowering loads 477 (Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) -> (MOVDload ptr mem) 478 (Load <t> ptr mem) && is32BitInt(t) && isSigned(t) -> (MOVWload ptr mem) 479 (Load <t> ptr mem) && is32BitInt(t) && !isSigned(t) -> (MOVWZload ptr mem) 480 (Load <t> ptr mem) && is16BitInt(t) && isSigned(t) -> (MOVHload ptr mem) 481 (Load <t> ptr mem) && is16BitInt(t) && !isSigned(t) -> (MOVHZload ptr mem) 482 (Load <t> ptr mem) && t.IsBoolean() -> (MOVBZload ptr mem) 483 (Load <t> ptr mem) && is8BitInt(t) && isSigned(t) -> (MOVBreg (MOVBZload ptr mem)) // PPC has no signed-byte load. 484 (Load <t> ptr mem) && is8BitInt(t) && !isSigned(t) -> (MOVBZload ptr mem) 485 486 (Load <t> ptr mem) && is32BitFloat(t) -> (FMOVSload ptr mem) 487 (Load <t> ptr mem) && is64BitFloat(t) -> (FMOVDload ptr mem) 488 489 (Store {t} ptr val mem) && t.(Type).Size() == 8 && is64BitFloat(val.Type) -> (FMOVDstore ptr val mem) 490 (Store {t} ptr val mem) && t.(Type).Size() == 8 && is32BitFloat(val.Type) -> (FMOVDstore ptr val mem) // glitch from (Cvt32Fto64F x) -> x -- type is wrong 491 (Store {t} ptr val mem) && t.(Type).Size() == 4 && is32BitFloat(val.Type) -> (FMOVSstore ptr val mem) 492 (Store {t} ptr val mem) && t.(Type).Size() == 8 && (is64BitInt(val.Type) || isPtr(val.Type)) -> (MOVDstore ptr val mem) 493 (Store {t} ptr val mem) && t.(Type).Size() == 4 && is32BitInt(val.Type) -> (MOVWstore ptr val mem) 494 (Store {t} ptr val mem) && t.(Type).Size() == 2 -> (MOVHstore ptr val mem) 495 (Store {t} ptr val mem) && t.(Type).Size() == 1 -> (MOVBstore ptr val mem) 496 497 // Using Zero instead of LoweredZero allows the 498 // target address to be folded where possible. 499 (Zero [0] _ mem) -> mem 500 (Zero [1] destptr mem) -> (MOVBstorezero destptr mem) 501 (Zero [2] destptr mem) -> 502 (MOVHstorezero destptr mem) 503 (Zero [3] destptr mem) -> 504 (MOVBstorezero [2] destptr 505 (MOVHstorezero destptr mem)) 506 (Zero [4] destptr mem) -> 507 (MOVWstorezero destptr mem) 508 (Zero [5] destptr mem) -> 509 (MOVBstorezero [4] destptr 510 (MOVWstorezero destptr mem)) 511 (Zero [6] destptr mem) -> 512 (MOVHstorezero [4] destptr 513 (MOVWstorezero destptr mem)) 514 (Zero [7] destptr mem) -> 515 (MOVBstorezero [6] destptr 516 (MOVHstorezero [4] destptr 517 (MOVWstorezero destptr mem))) 518 (Zero [8] destptr mem) -> 519 (MOVDstorezero destptr mem) 520 521 // Zero small numbers of words directly. 522 (Zero [12] destptr mem) -> 523 (MOVWstorezero [8] destptr 524 (MOVDstorezero [0] destptr mem)) 525 (Zero [16] destptr mem) -> 526 (MOVDstorezero [8] destptr 527 (MOVDstorezero [0] destptr mem)) 528 (Zero [24] destptr mem) -> 529 (MOVDstorezero [16] destptr 530 (MOVDstorezero [8] destptr 531 (MOVDstorezero [0] destptr mem))) 532 (Zero [32] destptr mem) -> 533 (MOVDstorezero [24] destptr 534 (MOVDstorezero [16] destptr 535 (MOVDstorezero [8] destptr 536 (MOVDstorezero [0] destptr mem)))) 537 538 (Zero [40] destptr mem) -> 539 (MOVDstorezero [32] destptr 540 (MOVDstorezero [24] destptr 541 (MOVDstorezero [16] destptr 542 (MOVDstorezero [8] destptr 543 (MOVDstorezero [0] destptr mem))))) 544 545 (Zero [48] destptr mem) -> 546 (MOVDstorezero [40] destptr 547 (MOVDstorezero [32] destptr 548 (MOVDstorezero [24] destptr 549 (MOVDstorezero [16] destptr 550 (MOVDstorezero [8] destptr 551 (MOVDstorezero [0] destptr mem)))))) 552 553 (Zero [56] destptr mem) -> 554 (MOVDstorezero [48] destptr 555 (MOVDstorezero [40] destptr 556 (MOVDstorezero [32] destptr 557 (MOVDstorezero [24] destptr 558 (MOVDstorezero [16] destptr 559 (MOVDstorezero [8] destptr 560 (MOVDstorezero [0] destptr mem))))))) 561 562 // Handle cases not handled above 563 (Zero [s] ptr mem) -> (LoweredZero [s] ptr mem) 564 565 // moves 566 // Only the MOVD and MOVW instructions require 4 byte 567 // alignment in the offset field. The other MOVx instructions 568 // allow any alignment. 569 (Move [0] _ _ mem) -> mem 570 (Move [1] dst src mem) -> (MOVBstore dst (MOVBZload src mem) mem) 571 (Move [2] dst src mem) -> 572 (MOVHstore dst (MOVHZload src mem) mem) 573 (Move [4] dst src mem) -> 574 (MOVWstore dst (MOVWZload src mem) mem) 575 // MOVD for load and store must have offsets that are multiple of 4 576 (Move [8] {t} dst src mem) && t.(Type).Alignment()%4 == 0 -> 577 (MOVDstore dst (MOVDload src mem) mem) 578 (Move [8] dst src mem) -> 579 (MOVWstore [4] dst (MOVWZload [4] src mem) 580 (MOVWstore dst (MOVWZload src mem) mem)) 581 (Move [3] dst src mem) -> 582 (MOVBstore [2] dst (MOVBZload [2] src mem) 583 (MOVHstore dst (MOVHload src mem) mem)) 584 (Move [5] dst src mem) -> 585 (MOVBstore [4] dst (MOVBZload [4] src mem) 586 (MOVWstore dst (MOVWZload src mem) mem)) 587 (Move [6] dst src mem) -> 588 (MOVHstore [4] dst (MOVHZload [4] src mem) 589 (MOVWstore dst (MOVWZload src mem) mem)) 590 (Move [7] dst src mem) -> 591 (MOVBstore [6] dst (MOVBZload [6] src mem) 592 (MOVHstore [4] dst (MOVHZload [4] src mem) 593 (MOVWstore dst (MOVWZload src mem) mem))) 594 595 // Large move uses a loop. Since the address is computed and the 596 // offset is zero, any alignment can be used. 597 (Move [s] dst src mem) && s > 8 -> 598 (LoweredMove [s] dst src mem) 599 600 // Calls 601 // Lowering calls 602 (StaticCall [argwid] {target} mem) -> (CALLstatic [argwid] {target} mem) 603 (ClosureCall [argwid] entry closure mem) -> (CALLclosure [argwid] entry closure mem) 604 (InterCall [argwid] entry mem) -> (CALLinter [argwid] entry mem) 605 606 // Miscellaneous 607 (Convert <t> x mem) -> (MOVDconvert <t> x mem) 608 (GetClosurePtr) -> (LoweredGetClosurePtr) 609 (IsNonNil ptr) -> (NotEqual (CMPconst [0] ptr)) 610 (IsInBounds idx len) -> (LessThan (CMPU idx len)) 611 (IsSliceInBounds idx len) -> (LessEqual (CMPU idx len)) 612 (NilCheck ptr mem) -> (LoweredNilCheck ptr mem) 613 614 // Optimizations 615 // Note that PPC "logical" immediates come in 0:15 and 16:31 unsigned immediate forms, 616 // so ORconst, XORconst easily expand into a pair. 617 618 // Include very-large constants in the const-const case. 619 (AND (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c&d]) 620 (OR (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c|d]) 621 (XOR (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c^d]) 622 623 // Discover consts 624 (AND x (MOVDconst [c])) && isU16Bit(c) -> (ANDconst [c] x) 625 (XOR x (MOVDconst [c])) && isU32Bit(c) -> (XORconst [c] x) 626 (OR x (MOVDconst [c])) && isU32Bit(c) -> (ORconst [c] x) 627 628 // Simplify consts 629 (ANDconst [c] (ANDconst [d] x)) -> (ANDconst [c&d] x) 630 (ORconst [c] (ORconst [d] x)) -> (ORconst [c|d] x) 631 (XORconst [c] (XORconst [d] x)) -> (XORconst [c^d] x) 632 (ANDconst [-1] x) -> x 633 (ANDconst [0] _) -> (MOVDconst [0]) 634 (XORconst [0] x) -> x 635 (ORconst [-1] _) -> (MOVDconst [-1]) 636 (ORconst [0] x) -> x 637 638 // zero-extend of small and -> small and 639 (MOVBZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFF -> y 640 (MOVHZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFF -> y 641 (MOVWZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFFFFFF -> y 642 (MOVWZreg y:(AND (MOVDconst [c]) _)) && uint64(c) <= 0xFFFFFFFF -> y 643 644 // sign extend of small-positive and -> small-positive-and 645 (MOVBreg y:(ANDconst [c] _)) && uint64(c) <= 0x7F -> y 646 (MOVHreg y:(ANDconst [c] _)) && uint64(c) <= 0x7FFF -> y 647 (MOVWreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFF -> y // 0xFFFF is largest immediate constant, when regarded as 32-bit is > 0 648 (MOVWreg y:(AND (MOVDconst [c]) _)) && uint64(c) <= 0x7FFFFFFF -> y 649 650 // small and of zero-extend -> either zero-extend or small and 651 // degenerate-and 652 (ANDconst [c] y:(MOVBZreg _)) && c&0xFF == 0xFF -> y 653 (ANDconst [c] y:(MOVHZreg _)) && c&0xFFFF == 0xFFFF -> y 654 (ANDconst [c] y:(MOVWZreg _)) && c&0xFFFFFFFF == 0xFFFFFFFF -> y 655 // normal case 656 (ANDconst [c] (MOVBZreg x)) -> (ANDconst [c&0xFF] x) 657 (ANDconst [c] (MOVHZreg x)) -> (ANDconst [c&0xFFFF] x) 658 (ANDconst [c] (MOVWZreg x)) -> (ANDconst [c&0xFFFFFFFF] x) 659 660 // Various redundant zero/sign extension combinations. 661 (MOVBZreg y:(MOVBZreg _)) -> y // repeat 662 (MOVBreg y:(MOVBreg _)) -> y // repeat 663 (MOVBreg (MOVBZreg x)) -> (MOVBreg x) 664 (MOVBZreg (MOVBreg x)) -> (MOVBZreg x) 665 666 // H - there are more combinations than these 667 668 (MOVHZreg y:(MOVHZreg _)) -> y // repeat 669 (MOVHZreg y:(MOVBZreg _)) -> y // wide of narrow 670 671 (MOVHreg y:(MOVHreg _)) -> y // repeat 672 (MOVHreg y:(MOVBreg _)) -> y // wide of narrow 673 674 (MOVHreg y:(MOVHZreg x)) -> (MOVHreg x) 675 (MOVHZreg y:(MOVHreg x)) -> (MOVHZreg x) 676 677 // W - there are more combinations than these 678 679 (MOVWZreg y:(MOVWZreg _)) -> y // repeat 680 (MOVWZreg y:(MOVHZreg _)) -> y // wide of narrow 681 (MOVWZreg y:(MOVBZreg _)) -> y // wide of narrow 682 683 (MOVWreg y:(MOVWreg _)) -> y // repeat 684 (MOVWreg y:(MOVHreg _)) -> y // wide of narrow 685 (MOVWreg y:(MOVBreg _)) -> y // wide of narrow 686 687 (MOVWreg y:(MOVWZreg x)) -> (MOVWreg x) 688 (MOVWZreg y:(MOVWreg x)) -> (MOVWZreg x) 689 690 // Arithmetic constant ops 691 692 (ADD x (MOVDconst [c])) && is32Bit(c) -> (ADDconst [c] x) 693 (ADDconst [c] (ADDconst [d] x)) && is32Bit(c+d) -> (ADDconst [c+d] x) 694 (ADDconst [0] x) -> x 695 (SUB x (MOVDconst [c])) && is32Bit(-c) -> (ADDconst [-c] x) 696 // TODO deal with subtract-from-const 697 698 (ADDconst [c] (MOVDaddr [d] {sym} x)) -> (MOVDaddr [c+d] {sym} x) 699 700 // Fold offsets for stores. 701 (MOVDstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(off1+off2) -> (MOVDstore [off1+off2] {sym} x val mem) 702 (MOVWstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(off1+off2) -> (MOVWstore [off1+off2] {sym} x val mem) 703 (MOVHstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(off1+off2) -> (MOVHstore [off1+off2] {sym} x val mem) 704 (MOVBstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(off1+off2) -> (MOVBstore [off1+off2] {sym} x val mem) 705 706 (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is16Bit(off1+off2) -> (FMOVSstore [off1+off2] {sym} ptr val mem) 707 (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is16Bit(off1+off2) -> (FMOVDstore [off1+off2] {sym} ptr val mem) 708 709 (MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) -> 710 (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) 711 (MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) -> 712 (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) 713 (MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) -> 714 (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) 715 (MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) -> 716 (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) 717 718 (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) -> 719 (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) 720 (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) -> 721 (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) 722 723 (MOVBZload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) -> 724 (MOVBZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 725 (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) -> 726 (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 727 (MOVHZload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) -> 728 (MOVHZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 729 (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) -> 730 (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 731 (MOVWZload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) -> 732 (MOVWZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 733 (MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) -> 734 (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 735 (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) -> 736 (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 737 (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) -> 738 (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 739 740 // Fold offsets for loads. 741 (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) && is16Bit(off1+off2) -> (FMOVSload [off1+off2] {sym} ptr mem) 742 (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is16Bit(off1+off2) -> (FMOVDload [off1+off2] {sym} ptr mem) 743 744 (MOVDload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVDload [off1+off2] {sym} x mem) 745 (MOVWload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVWload [off1+off2] {sym} x mem) 746 (MOVWZload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVWZload [off1+off2] {sym} x mem) 747 (MOVHload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVHload [off1+off2] {sym} x mem) 748 (MOVHZload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVHZload [off1+off2] {sym} x mem) 749 (MOVBZload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVBZload [off1+off2] {sym} x mem) 750 751 // Store of zero -> storezero 752 (MOVDstore [off] {sym} ptr (MOVDconst [c]) mem) && c == 0 -> (MOVDstorezero [off] {sym} ptr mem) 753 (MOVWstore [off] {sym} ptr (MOVDconst [c]) mem) && c == 0 -> (MOVWstorezero [off] {sym} ptr mem) 754 (MOVHstore [off] {sym} ptr (MOVDconst [c]) mem) && c == 0 -> (MOVHstorezero [off] {sym} ptr mem) 755 (MOVBstore [off] {sym} ptr (MOVDconst [c]) mem) && c == 0 -> (MOVBstorezero [off] {sym} ptr mem) 756 757 // Fold offsets for storezero 758 (MOVDstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> 759 (MOVDstorezero [off1+off2] {sym} x mem) 760 (MOVWstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> 761 (MOVWstorezero [off1+off2] {sym} x mem) 762 (MOVHstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> 763 (MOVHstorezero [off1+off2] {sym} x mem) 764 (MOVBstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> 765 (MOVBstorezero [off1+off2] {sym} x mem) 766 767 // Fold symbols into storezero 768 (MOVDstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2) -> 769 (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem) 770 (MOVWstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2) -> 771 (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem) 772 (MOVHstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2) -> 773 (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem) 774 (MOVBstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2) -> 775 (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem) 776 777 // atomic intrinsics 778 (AtomicLoad32 ptr mem) -> (LoweredAtomicLoad32 ptr mem) 779 (AtomicLoad64 ptr mem) -> (LoweredAtomicLoad64 ptr mem) 780 (AtomicLoadPtr ptr mem) -> (LoweredAtomicLoadPtr ptr mem) 781 782 (AtomicStore32 ptr val mem) -> (LoweredAtomicStore32 ptr val mem) 783 (AtomicStore64 ptr val mem) -> (LoweredAtomicStore64 ptr val mem) 784 //(AtomicStorePtrNoWB ptr val mem) -> (STLR ptr val mem) 785 786 (AtomicExchange32 ptr val mem) -> (LoweredAtomicExchange32 ptr val mem) 787 (AtomicExchange64 ptr val mem) -> (LoweredAtomicExchange64 ptr val mem) 788 789 (AtomicAdd32 ptr val mem) -> (LoweredAtomicAdd32 ptr val mem) 790 (AtomicAdd64 ptr val mem) -> (LoweredAtomicAdd64 ptr val mem) 791 792 (AtomicCompareAndSwap32 ptr old new_ mem) -> (LoweredAtomicCas32 ptr old new_ mem) 793 (AtomicCompareAndSwap64 ptr old new_ mem) -> (LoweredAtomicCas64 ptr old new_ mem) 794 795 (AtomicAnd8 ptr val mem) -> (LoweredAtomicAnd8 ptr val mem) 796 (AtomicOr8 ptr val mem) -> (LoweredAtomicOr8 ptr val mem) 797 798 // Lowering extension 799 // Note: we always extend to 64 bits even though some ops don't need that many result bits. 800 (SignExt8to16 x) -> (MOVBreg x) 801 (SignExt8to32 x) -> (MOVBreg x) 802 (SignExt8to64 x) -> (MOVBreg x) 803 (SignExt16to32 x) -> (MOVHreg x) 804 (SignExt16to64 x) -> (MOVHreg x) 805 (SignExt32to64 x) -> (MOVWreg x) 806 807 (ZeroExt8to16 x) -> (MOVBZreg x) 808 (ZeroExt8to32 x) -> (MOVBZreg x) 809 (ZeroExt8to64 x) -> (MOVBZreg x) 810 (ZeroExt16to32 x) -> (MOVHZreg x) 811 (ZeroExt16to64 x) -> (MOVHZreg x) 812 (ZeroExt32to64 x) -> (MOVWZreg x) 813 814 (Trunc16to8 x) -> (MOVBreg x) 815 (Trunc32to8 x) -> (MOVBreg x) 816 (Trunc32to16 x) -> (MOVHreg x) 817 (Trunc64to8 x) -> (MOVBreg x) 818 (Trunc64to16 x) -> (MOVHreg x) 819 (Trunc64to32 x) -> (MOVWreg x) 820 821 (Slicemask <t> x) -> (SRADconst (NEG <t> x) [63]) 822 823 // Note that MOV??reg returns a 64-bit int, x is not necessarily that wide 824 // This may interact with other patterns in the future. (Compare with arm64) 825 (MOVBZreg x:(MOVBZload _ _)) -> x 826 (MOVHZreg x:(MOVHZload _ _)) -> x 827 (MOVHreg x:(MOVHload _ _)) -> x 828 829 (MOVBZreg (MOVDconst [c])) -> (MOVDconst [int64(uint8(c))]) 830 (MOVBreg (MOVDconst [c])) -> (MOVDconst [int64(int8(c))]) 831 (MOVHZreg (MOVDconst [c])) -> (MOVDconst [int64(uint16(c))]) 832 (MOVHreg (MOVDconst [c])) -> (MOVDconst [int64(int16(c))]) 833 834 // Lose widening ops fed to to stores 835 (MOVBstore [off] {sym} ptr (MOVBreg x) mem) -> (MOVBstore [off] {sym} ptr x mem) 836 (MOVBstore [off] {sym} ptr (MOVBZreg x) mem) -> (MOVBstore [off] {sym} ptr x mem) 837 (MOVHstore [off] {sym} ptr (MOVHreg x) mem) -> (MOVHstore [off] {sym} ptr x mem) 838 (MOVHstore [off] {sym} ptr (MOVHZreg x) mem) -> (MOVHstore [off] {sym} ptr x mem) 839 (MOVWstore [off] {sym} ptr (MOVWreg x) mem) -> (MOVWstore [off] {sym} ptr x mem) 840 (MOVWstore [off] {sym} ptr (MOVWZreg x) mem) -> (MOVWstore [off] {sym} ptr x mem) 841 842 // Lose W-widening ops fed to compare-W 843 (CMPW x (MOVWreg y)) -> (CMPW x y) 844 (CMPW (MOVWreg x) y) -> (CMPW x y) 845 (CMPWU x (MOVWZreg y)) -> (CMPWU x y) 846 (CMPWU (MOVWZreg x) y) -> (CMPWU x y) 847 848 (CMP x (MOVDconst [c])) && is16Bit(c) -> (CMPconst x [c]) 849 (CMP (MOVDconst [c]) y) && is16Bit(c) -> (InvertFlags (CMPconst y [c])) 850 (CMPW x (MOVDconst [c])) && is16Bit(c) -> (CMPWconst x [c]) 851 (CMPW (MOVDconst [c]) y) && is16Bit(c) -> (InvertFlags (CMPWconst y [c])) 852 853 (CMPU x (MOVDconst [c])) && isU16Bit(c) -> (CMPUconst x [c]) 854 (CMPU (MOVDconst [c]) y) && isU16Bit(c) -> (InvertFlags (CMPUconst y [c])) 855 (CMPWU x (MOVDconst [c])) && isU16Bit(c) -> (CMPWUconst x [c]) 856 (CMPWU (MOVDconst [c]) y) && isU16Bit(c) -> (InvertFlags (CMPWUconst y [c])) 857 858 // A particular pattern seen in cgo code: 859 (AND (MOVDconst [c]) x:(MOVBZload _ _)) -> (ANDconst [c&0xFF] x) 860 (AND x:(MOVBZload _ _) (MOVDconst [c])) -> (ANDconst [c&0xFF] x) 861 862 // floating-point fused multiply-add/sub 863 (FADD (FMUL x y) z) -> (FMADD x y z) 864 (FSUB (FMUL x y) z) -> (FMSUB x y z) 865 (FADDS (FMULS x y) z) -> (FMADDS x y z) 866 (FSUBS (FMULS x y) z) -> (FMSUBS x y z)