github.com/corona10/go@v0.0.0-20180224231303-7a218942be57/src/cmd/compile/internal/ssa/gen/ARM64.rules (about) 1 // Copyright 2016 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 (AddPtr x y) -> (ADD x y) 6 (Add64 x y) -> (ADD x y) 7 (Add32 x y) -> (ADD x y) 8 (Add16 x y) -> (ADD x y) 9 (Add8 x y) -> (ADD x y) 10 (Add32F x y) -> (FADDS x y) 11 (Add64F x y) -> (FADDD x y) 12 13 (SubPtr x y) -> (SUB x y) 14 (Sub64 x y) -> (SUB x y) 15 (Sub32 x y) -> (SUB x y) 16 (Sub16 x y) -> (SUB x y) 17 (Sub8 x y) -> (SUB x y) 18 (Sub32F x y) -> (FSUBS x y) 19 (Sub64F x y) -> (FSUBD x y) 20 21 (Mul64 x y) -> (MUL x y) 22 (Mul32 x y) -> (MULW x y) 23 (Mul16 x y) -> (MULW x y) 24 (Mul8 x y) -> (MULW x y) 25 (Mul32F x y) -> (FMULS x y) 26 (Mul64F x y) -> (FMULD x y) 27 28 (Hmul64 x y) -> (MULH x y) 29 (Hmul64u x y) -> (UMULH x y) 30 (Hmul32 x y) -> (SRAconst (MULL <typ.Int64> x y) [32]) 31 (Hmul32u x y) -> (SRAconst (UMULL <typ.UInt64> x y) [32]) 32 33 (Div64 x y) -> (DIV x y) 34 (Div64u x y) -> (UDIV x y) 35 (Div32 x y) -> (DIVW x y) 36 (Div32u x y) -> (UDIVW x y) 37 (Div16 x y) -> (DIVW (SignExt16to32 x) (SignExt16to32 y)) 38 (Div16u x y) -> (UDIVW (ZeroExt16to32 x) (ZeroExt16to32 y)) 39 (Div8 x y) -> (DIVW (SignExt8to32 x) (SignExt8to32 y)) 40 (Div8u x y) -> (UDIVW (ZeroExt8to32 x) (ZeroExt8to32 y)) 41 (Div32F x y) -> (FDIVS x y) 42 (Div64F x y) -> (FDIVD x y) 43 44 (Mod64 x y) -> (MOD x y) 45 (Mod64u x y) -> (UMOD x y) 46 (Mod32 x y) -> (MODW x y) 47 (Mod32u x y) -> (UMODW x y) 48 (Mod16 x y) -> (MODW (SignExt16to32 x) (SignExt16to32 y)) 49 (Mod16u x y) -> (UMODW (ZeroExt16to32 x) (ZeroExt16to32 y)) 50 (Mod8 x y) -> (MODW (SignExt8to32 x) (SignExt8to32 y)) 51 (Mod8u x y) -> (UMODW (ZeroExt8to32 x) (ZeroExt8to32 y)) 52 53 // (x + y) / 2 with x>=y -> (x - y) / 2 + y 54 (Avg64u <t> x y) -> (ADD (SRLconst <t> (SUB <t> x y) [1]) y) 55 56 (And64 x y) -> (AND x y) 57 (And32 x y) -> (AND x y) 58 (And16 x y) -> (AND x y) 59 (And8 x y) -> (AND x y) 60 61 (Or64 x y) -> (OR x y) 62 (Or32 x y) -> (OR x y) 63 (Or16 x y) -> (OR x y) 64 (Or8 x y) -> (OR x y) 65 66 (Xor64 x y) -> (XOR x y) 67 (Xor32 x y) -> (XOR x y) 68 (Xor16 x y) -> (XOR x y) 69 (Xor8 x y) -> (XOR x y) 70 71 // unary ops 72 (Neg64 x) -> (NEG x) 73 (Neg32 x) -> (NEG x) 74 (Neg16 x) -> (NEG x) 75 (Neg8 x) -> (NEG x) 76 (Neg32F x) -> (FNEGS x) 77 (Neg64F x) -> (FNEGD x) 78 79 (Com64 x) -> (MVN x) 80 (Com32 x) -> (MVN x) 81 (Com16 x) -> (MVN x) 82 (Com8 x) -> (MVN x) 83 84 // math package intrinsics 85 (Sqrt x) -> (FSQRTD x) 86 (Ceil x) -> (FRINTPD x) 87 (Floor x) -> (FRINTMD x) 88 (Round x) -> (FRINTAD x) 89 (Trunc x) -> (FRINTZD x) 90 91 (Ctz64 <t> x) -> (CLZ (RBIT <t> x)) 92 (Ctz32 <t> x) -> (CLZW (RBITW <t> x)) 93 94 (PopCount64 <t> x) -> (FMOVDfpgp <t> (VUADDLV <typ.Float64> (VCNT <typ.Float64> (FMOVDgpfp <typ.Float64> x)))) 95 (PopCount32 <t> x) -> (FMOVDfpgp <t> (VUADDLV <typ.Float64> (VCNT <typ.Float64> (FMOVDgpfp <typ.Float64> (ZeroExt32to64 x))))) 96 (PopCount16 <t> x) -> (FMOVDfpgp <t> (VUADDLV <typ.Float64> (VCNT <typ.Float64> (FMOVDgpfp <typ.Float64> (ZeroExt16to64 x))))) 97 98 // Load args directly into the register class where it will be used. 99 (FMOVDgpfp <t> (Arg [off] {sym})) -> @b.Func.Entry (Arg <t> [off] {sym}) 100 // Similarly for stores, if we see a store after FPR <-> GPR move, then redirect store to use the other register set. 101 (MOVDstore ptr (FMOVDfpgp val) mem) -> (FMOVDstore ptr val mem) 102 (FMOVDstore ptr (FMOVDgpfp val) mem) -> (MOVDstore ptr val mem) 103 104 (BitLen64 x) -> (SUB (MOVDconst [64]) (CLZ <typ.Int> x)) 105 106 (Bswap64 x) -> (REV x) 107 (Bswap32 x) -> (REVW x) 108 109 (BitRev64 x) -> (RBIT x) 110 (BitRev32 x) -> (RBITW x) 111 (BitRev16 x) -> (SRLconst [48] (RBIT <typ.UInt64> x)) 112 (BitRev8 x) -> (SRLconst [56] (RBIT <typ.UInt64> x)) 113 114 // boolean ops -- booleans are represented with 0=false, 1=true 115 (AndB x y) -> (AND x y) 116 (OrB x y) -> (OR x y) 117 (EqB x y) -> (XOR (MOVDconst [1]) (XOR <typ.Bool> x y)) 118 (NeqB x y) -> (XOR x y) 119 (Not x) -> (XOR (MOVDconst [1]) x) 120 121 // shifts 122 // hardware instruction uses only the low 6 bits of the shift 123 // we compare to 64 to ensure Go semantics for large shifts 124 (Lsh64x64 <t> x y) -> (CSEL {OpARM64LessThanU} (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y)) 125 (Lsh64x32 <t> x y) -> (CSEL {OpARM64LessThanU} (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y))) 126 (Lsh64x16 <t> x y) -> (CSEL {OpARM64LessThanU} (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y))) 127 (Lsh64x8 <t> x y) -> (CSEL {OpARM64LessThanU} (SLL <t> x (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y))) 128 129 (Lsh32x64 <t> x y) -> (CSEL {OpARM64LessThanU} (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y)) 130 (Lsh32x32 <t> x y) -> (CSEL {OpARM64LessThanU} (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y))) 131 (Lsh32x16 <t> x y) -> (CSEL {OpARM64LessThanU} (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y))) 132 (Lsh32x8 <t> x y) -> (CSEL {OpARM64LessThanU} (SLL <t> x (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y))) 133 134 (Lsh16x64 <t> x y) -> (CSEL {OpARM64LessThanU} (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y)) 135 (Lsh16x32 <t> x y) -> (CSEL {OpARM64LessThanU} (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y))) 136 (Lsh16x16 <t> x y) -> (CSEL {OpARM64LessThanU} (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y))) 137 (Lsh16x8 <t> x y) -> (CSEL {OpARM64LessThanU} (SLL <t> x (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y))) 138 139 (Lsh8x64 <t> x y) -> (CSEL {OpARM64LessThanU} (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y)) 140 (Lsh8x32 <t> x y) -> (CSEL {OpARM64LessThanU} (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y))) 141 (Lsh8x16 <t> x y) -> (CSEL {OpARM64LessThanU} (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y))) 142 (Lsh8x8 <t> x y) -> (CSEL {OpARM64LessThanU} (SLL <t> x (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y))) 143 144 (Rsh64Ux64 <t> x y) -> (CSEL {OpARM64LessThanU} (SRL <t> x y) (Const64 <t> [0]) (CMPconst [64] y)) 145 (Rsh64Ux32 <t> x y) -> (CSEL {OpARM64LessThanU} (SRL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y))) 146 (Rsh64Ux16 <t> x y) -> (CSEL {OpARM64LessThanU} (SRL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y))) 147 (Rsh64Ux8 <t> x y) -> (CSEL {OpARM64LessThanU} (SRL <t> x (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y))) 148 149 (Rsh32Ux64 <t> x y) -> (CSEL {OpARM64LessThanU} (SRL <t> (ZeroExt32to64 x) y) (Const64 <t> [0]) (CMPconst [64] y)) 150 (Rsh32Ux32 <t> x y) -> (CSEL {OpARM64LessThanU} (SRL <t> (ZeroExt32to64 x) (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y))) 151 (Rsh32Ux16 <t> x y) -> (CSEL {OpARM64LessThanU} (SRL <t> (ZeroExt32to64 x) (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y))) 152 (Rsh32Ux8 <t> x y) -> (CSEL {OpARM64LessThanU} (SRL <t> (ZeroExt32to64 x) (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y))) 153 154 (Rsh16Ux64 <t> x y) -> (CSEL {OpARM64LessThanU} (SRL <t> (ZeroExt16to64 x) y) (Const64 <t> [0]) (CMPconst [64] y)) 155 (Rsh16Ux32 <t> x y) -> (CSEL {OpARM64LessThanU} (SRL <t> (ZeroExt16to64 x) (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y))) 156 (Rsh16Ux16 <t> x y) -> (CSEL {OpARM64LessThanU} (SRL <t> (ZeroExt16to64 x) (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y))) 157 (Rsh16Ux8 <t> x y) -> (CSEL {OpARM64LessThanU} (SRL <t> (ZeroExt16to64 x) (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y))) 158 159 (Rsh8Ux64 <t> x y) -> (CSEL {OpARM64LessThanU} (SRL <t> (ZeroExt8to64 x) y) (Const64 <t> [0]) (CMPconst [64] y)) 160 (Rsh8Ux32 <t> x y) -> (CSEL {OpARM64LessThanU} (SRL <t> (ZeroExt8to64 x) (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y))) 161 (Rsh8Ux16 <t> x y) -> (CSEL {OpARM64LessThanU} (SRL <t> (ZeroExt8to64 x) (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y))) 162 (Rsh8Ux8 <t> x y) -> (CSEL {OpARM64LessThanU} (SRL <t> (ZeroExt8to64 x) (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y))) 163 164 (Rsh64x64 x y) -> (SRA x (CSEL {OpARM64LessThanU} <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y))) 165 (Rsh64x32 x y) -> (SRA x (CSEL {OpARM64LessThanU} <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y)))) 166 (Rsh64x16 x y) -> (SRA x (CSEL {OpARM64LessThanU} <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y)))) 167 (Rsh64x8 x y) -> (SRA x (CSEL {OpARM64LessThanU} <y.Type> (ZeroExt8to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64 y)))) 168 169 (Rsh32x64 x y) -> (SRA (SignExt32to64 x) (CSEL {OpARM64LessThanU} <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y))) 170 (Rsh32x32 x y) -> (SRA (SignExt32to64 x) (CSEL {OpARM64LessThanU} <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y)))) 171 (Rsh32x16 x y) -> (SRA (SignExt32to64 x) (CSEL {OpARM64LessThanU} <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y)))) 172 (Rsh32x8 x y) -> (SRA (SignExt32to64 x) (CSEL {OpARM64LessThanU} <y.Type> (ZeroExt8to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64 y)))) 173 174 (Rsh16x64 x y) -> (SRA (SignExt16to64 x) (CSEL {OpARM64LessThanU} <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y))) 175 (Rsh16x32 x y) -> (SRA (SignExt16to64 x) (CSEL {OpARM64LessThanU} <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y)))) 176 (Rsh16x16 x y) -> (SRA (SignExt16to64 x) (CSEL {OpARM64LessThanU} <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y)))) 177 (Rsh16x8 x y) -> (SRA (SignExt16to64 x) (CSEL {OpARM64LessThanU} <y.Type> (ZeroExt8to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64 y)))) 178 179 (Rsh8x64 x y) -> (SRA (SignExt8to64 x) (CSEL {OpARM64LessThanU} <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y))) 180 (Rsh8x32 x y) -> (SRA (SignExt8to64 x) (CSEL {OpARM64LessThanU} <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y)))) 181 (Rsh8x16 x y) -> (SRA (SignExt8to64 x) (CSEL {OpARM64LessThanU} <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y)))) 182 (Rsh8x8 x y) -> (SRA (SignExt8to64 x) (CSEL {OpARM64LessThanU} <y.Type> (ZeroExt8to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64 y)))) 183 184 // constants 185 (Const64 [val]) -> (MOVDconst [val]) 186 (Const32 [val]) -> (MOVDconst [val]) 187 (Const16 [val]) -> (MOVDconst [val]) 188 (Const8 [val]) -> (MOVDconst [val]) 189 (Const32F [val]) -> (FMOVSconst [val]) 190 (Const64F [val]) -> (FMOVDconst [val]) 191 (ConstNil) -> (MOVDconst [0]) 192 (ConstBool [b]) -> (MOVDconst [b]) 193 194 (Slicemask <t> x) -> (SRAconst (NEG <t> x) [63]) 195 196 // truncations 197 // Because we ignore high parts of registers, truncates are just copies. 198 (Trunc16to8 x) -> x 199 (Trunc32to8 x) -> x 200 (Trunc32to16 x) -> x 201 (Trunc64to8 x) -> x 202 (Trunc64to16 x) -> x 203 (Trunc64to32 x) -> x 204 205 // Zero-/Sign-extensions 206 (ZeroExt8to16 x) -> (MOVBUreg x) 207 (ZeroExt8to32 x) -> (MOVBUreg x) 208 (ZeroExt16to32 x) -> (MOVHUreg x) 209 (ZeroExt8to64 x) -> (MOVBUreg x) 210 (ZeroExt16to64 x) -> (MOVHUreg x) 211 (ZeroExt32to64 x) -> (MOVWUreg x) 212 213 (SignExt8to16 x) -> (MOVBreg x) 214 (SignExt8to32 x) -> (MOVBreg x) 215 (SignExt16to32 x) -> (MOVHreg x) 216 (SignExt8to64 x) -> (MOVBreg x) 217 (SignExt16to64 x) -> (MOVHreg x) 218 (SignExt32to64 x) -> (MOVWreg x) 219 220 // float <-> int conversion 221 (Cvt32to32F x) -> (SCVTFWS x) 222 (Cvt32to64F x) -> (SCVTFWD x) 223 (Cvt64to32F x) -> (SCVTFS x) 224 (Cvt64to64F x) -> (SCVTFD x) 225 (Cvt32Uto32F x) -> (UCVTFWS x) 226 (Cvt32Uto64F x) -> (UCVTFWD x) 227 (Cvt64Uto32F x) -> (UCVTFS x) 228 (Cvt64Uto64F x) -> (UCVTFD x) 229 (Cvt32Fto32 x) -> (FCVTZSSW x) 230 (Cvt64Fto32 x) -> (FCVTZSDW x) 231 (Cvt32Fto64 x) -> (FCVTZSS x) 232 (Cvt64Fto64 x) -> (FCVTZSD x) 233 (Cvt32Fto32U x) -> (FCVTZUSW x) 234 (Cvt64Fto32U x) -> (FCVTZUDW x) 235 (Cvt32Fto64U x) -> (FCVTZUS x) 236 (Cvt64Fto64U x) -> (FCVTZUD x) 237 (Cvt32Fto64F x) -> (FCVTSD x) 238 (Cvt64Fto32F x) -> (FCVTDS x) 239 240 (Round32F x) -> (LoweredRound32F x) 241 (Round64F x) -> (LoweredRound64F x) 242 243 // comparisons 244 (Eq8 x y) -> (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y))) 245 (Eq16 x y) -> (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y))) 246 (Eq32 x y) -> (Equal (CMPW x y)) 247 (Eq64 x y) -> (Equal (CMP x y)) 248 (EqPtr x y) -> (Equal (CMP x y)) 249 (Eq32F x y) -> (Equal (FCMPS x y)) 250 (Eq64F x y) -> (Equal (FCMPD x y)) 251 252 (Neq8 x y) -> (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y))) 253 (Neq16 x y) -> (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y))) 254 (Neq32 x y) -> (NotEqual (CMPW x y)) 255 (Neq64 x y) -> (NotEqual (CMP x y)) 256 (NeqPtr x y) -> (NotEqual (CMP x y)) 257 (Neq32F x y) -> (NotEqual (FCMPS x y)) 258 (Neq64F x y) -> (NotEqual (FCMPD x y)) 259 260 (Less8 x y) -> (LessThan (CMPW (SignExt8to32 x) (SignExt8to32 y))) 261 (Less16 x y) -> (LessThan (CMPW (SignExt16to32 x) (SignExt16to32 y))) 262 (Less32 x y) -> (LessThan (CMPW x y)) 263 (Less64 x y) -> (LessThan (CMP x y)) 264 (Less32F x y) -> (GreaterThan (FCMPS y x)) // reverse operands to work around NaN 265 (Less64F x y) -> (GreaterThan (FCMPD y x)) // reverse operands to work around NaN 266 267 (Less8U x y) -> (LessThanU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y))) 268 (Less16U x y) -> (LessThanU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y))) 269 (Less32U x y) -> (LessThanU (CMPW x y)) 270 (Less64U x y) -> (LessThanU (CMP x y)) 271 272 (Leq8 x y) -> (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y))) 273 (Leq16 x y) -> (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y))) 274 (Leq32 x y) -> (LessEqual (CMPW x y)) 275 (Leq64 x y) -> (LessEqual (CMP x y)) 276 (Leq32F x y) -> (GreaterEqual (FCMPS y x)) // reverse operands to work around NaN 277 (Leq64F x y) -> (GreaterEqual (FCMPD y x)) // reverse operands to work around NaN 278 279 (Leq8U x y) -> (LessEqualU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y))) 280 (Leq16U x y) -> (LessEqualU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y))) 281 (Leq32U x y) -> (LessEqualU (CMPW x y)) 282 (Leq64U x y) -> (LessEqualU (CMP x y)) 283 284 (Greater8 x y) -> (GreaterThan (CMPW (SignExt8to32 x) (SignExt8to32 y))) 285 (Greater16 x y) -> (GreaterThan (CMPW (SignExt16to32 x) (SignExt16to32 y))) 286 (Greater32 x y) -> (GreaterThan (CMPW x y)) 287 (Greater64 x y) -> (GreaterThan (CMP x y)) 288 (Greater32F x y) -> (GreaterThan (FCMPS x y)) 289 (Greater64F x y) -> (GreaterThan (FCMPD x y)) 290 291 (Greater8U x y) -> (GreaterThanU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y))) 292 (Greater16U x y) -> (GreaterThanU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y))) 293 (Greater32U x y) -> (GreaterThanU (CMPW x y)) 294 (Greater64U x y) -> (GreaterThanU (CMP x y)) 295 296 (Geq8 x y) -> (GreaterEqual (CMPW (SignExt8to32 x) (SignExt8to32 y))) 297 (Geq16 x y) -> (GreaterEqual (CMPW (SignExt16to32 x) (SignExt16to32 y))) 298 (Geq32 x y) -> (GreaterEqual (CMPW x y)) 299 (Geq64 x y) -> (GreaterEqual (CMP x y)) 300 (Geq32F x y) -> (GreaterEqual (FCMPS x y)) 301 (Geq64F x y) -> (GreaterEqual (FCMPD x y)) 302 303 (Geq8U x y) -> (GreaterEqualU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y))) 304 (Geq16U x y) -> (GreaterEqualU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y))) 305 (Geq32U x y) -> (GreaterEqualU (CMPW x y)) 306 (Geq64U x y) -> (GreaterEqualU (CMP x y)) 307 308 // CSEL needs a flag-generating argument. Synthesize a CMPW if necessary. 309 (CondSelect x y bool) && flagArg(bool) != nil -> (CSEL {bool.Op} x y flagArg(bool)) 310 (CondSelect x y bool) && flagArg(bool) == nil -> (CSEL {OpARM64NotEqual} x y (CMPWconst [0] bool)) 311 312 (OffPtr [off] ptr:(SP)) -> (MOVDaddr [off] ptr) 313 (OffPtr [off] ptr) -> (ADDconst [off] ptr) 314 315 (Addr {sym} base) -> (MOVDaddr {sym} base) 316 317 // loads 318 (Load <t> ptr mem) && t.IsBoolean() -> (MOVBUload ptr mem) 319 (Load <t> ptr mem) && (is8BitInt(t) && isSigned(t)) -> (MOVBload ptr mem) 320 (Load <t> ptr mem) && (is8BitInt(t) && !isSigned(t)) -> (MOVBUload ptr mem) 321 (Load <t> ptr mem) && (is16BitInt(t) && isSigned(t)) -> (MOVHload ptr mem) 322 (Load <t> ptr mem) && (is16BitInt(t) && !isSigned(t)) -> (MOVHUload ptr mem) 323 (Load <t> ptr mem) && (is32BitInt(t) && isSigned(t)) -> (MOVWload ptr mem) 324 (Load <t> ptr mem) && (is32BitInt(t) && !isSigned(t)) -> (MOVWUload ptr mem) 325 (Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) -> (MOVDload ptr mem) 326 (Load <t> ptr mem) && is32BitFloat(t) -> (FMOVSload ptr mem) 327 (Load <t> ptr mem) && is64BitFloat(t) -> (FMOVDload ptr mem) 328 329 // stores 330 (Store {t} ptr val mem) && t.(*types.Type).Size() == 1 -> (MOVBstore ptr val mem) 331 (Store {t} ptr val mem) && t.(*types.Type).Size() == 2 -> (MOVHstore ptr val mem) 332 (Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type) -> (MOVWstore ptr val mem) 333 (Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && !is64BitFloat(val.Type) -> (MOVDstore ptr val mem) 334 (Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) -> (FMOVSstore ptr val mem) 335 (Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) -> (FMOVDstore ptr val mem) 336 337 // zeroing 338 (Zero [0] _ mem) -> mem 339 (Zero [1] ptr mem) -> (MOVBstore ptr (MOVDconst [0]) mem) 340 (Zero [2] ptr mem) -> (MOVHstore ptr (MOVDconst [0]) mem) 341 (Zero [4] ptr mem) -> (MOVWstore ptr (MOVDconst [0]) mem) 342 (Zero [8] ptr mem) -> (MOVDstore ptr (MOVDconst [0]) mem) 343 344 (Zero [3] ptr mem) -> 345 (MOVBstore [2] ptr (MOVDconst [0]) 346 (MOVHstore ptr (MOVDconst [0]) mem)) 347 (Zero [5] ptr mem) -> 348 (MOVBstore [4] ptr (MOVDconst [0]) 349 (MOVWstore ptr (MOVDconst [0]) mem)) 350 (Zero [6] ptr mem) -> 351 (MOVHstore [4] ptr (MOVDconst [0]) 352 (MOVWstore ptr (MOVDconst [0]) mem)) 353 (Zero [7] ptr mem) -> 354 (MOVBstore [6] ptr (MOVDconst [0]) 355 (MOVHstore [4] ptr (MOVDconst [0]) 356 (MOVWstore ptr (MOVDconst [0]) mem))) 357 (Zero [9] ptr mem) -> 358 (MOVBstore [8] ptr (MOVDconst [0]) 359 (MOVDstore ptr (MOVDconst [0]) mem)) 360 (Zero [10] ptr mem) -> 361 (MOVHstore [8] ptr (MOVDconst [0]) 362 (MOVDstore ptr (MOVDconst [0]) mem)) 363 (Zero [11] ptr mem) -> 364 (MOVBstore [10] ptr (MOVDconst [0]) 365 (MOVHstore [8] ptr (MOVDconst [0]) 366 (MOVDstore ptr (MOVDconst [0]) mem))) 367 (Zero [12] ptr mem) -> 368 (MOVWstore [8] ptr (MOVDconst [0]) 369 (MOVDstore ptr (MOVDconst [0]) mem)) 370 (Zero [13] ptr mem) -> 371 (MOVBstore [12] ptr (MOVDconst [0]) 372 (MOVWstore [8] ptr (MOVDconst [0]) 373 (MOVDstore ptr (MOVDconst [0]) mem))) 374 (Zero [14] ptr mem) -> 375 (MOVHstore [12] ptr (MOVDconst [0]) 376 (MOVWstore [8] ptr (MOVDconst [0]) 377 (MOVDstore ptr (MOVDconst [0]) mem))) 378 (Zero [15] ptr mem) -> 379 (MOVBstore [14] ptr (MOVDconst [0]) 380 (MOVHstore [12] ptr (MOVDconst [0]) 381 (MOVWstore [8] ptr (MOVDconst [0]) 382 (MOVDstore ptr (MOVDconst [0]) mem)))) 383 (Zero [16] ptr mem) -> 384 (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem) 385 386 (Zero [32] ptr mem) -> 387 (STP [16] ptr (MOVDconst [0]) (MOVDconst [0]) 388 (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem)) 389 390 (Zero [48] ptr mem) -> 391 (STP [32] ptr (MOVDconst [0]) (MOVDconst [0]) 392 (STP [16] ptr (MOVDconst [0]) (MOVDconst [0]) 393 (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem))) 394 395 (Zero [64] ptr mem) -> 396 (STP [48] ptr (MOVDconst [0]) (MOVDconst [0]) 397 (STP [32] ptr (MOVDconst [0]) (MOVDconst [0]) 398 (STP [16] ptr (MOVDconst [0]) (MOVDconst [0]) 399 (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem)))) 400 401 // strip off fractional word zeroing 402 (Zero [s] ptr mem) && s%16 != 0 && s > 16 -> 403 (Zero [s-s%16] 404 (OffPtr <ptr.Type> ptr [s%16]) 405 (Zero [s%16] ptr mem)) 406 407 // medium zeroing uses a duff device 408 // 4, 16, and 64 are magic constants, see runtime/mkduff.go 409 (Zero [s] ptr mem) 410 && s%16 == 0 && s > 64 && s <= 16*64 411 && !config.noDuffDevice -> 412 (DUFFZERO [4 * (64 - int64(s/16))] ptr mem) 413 414 // large zeroing uses a loop 415 (Zero [s] ptr mem) 416 && s%16 == 0 && (s > 16*64 || config.noDuffDevice) -> 417 (LoweredZero 418 ptr 419 (ADDconst <ptr.Type> [s-16] ptr) 420 mem) 421 422 // moves 423 (Move [0] _ _ mem) -> mem 424 (Move [1] dst src mem) -> (MOVBstore dst (MOVBUload src mem) mem) 425 (Move [2] dst src mem) -> (MOVHstore dst (MOVHUload src mem) mem) 426 (Move [4] dst src mem) -> (MOVWstore dst (MOVWUload src mem) mem) 427 (Move [8] dst src mem) -> (MOVDstore dst (MOVDload src mem) mem) 428 429 (Move [3] dst src mem) -> 430 (MOVBstore [2] dst (MOVBUload [2] src mem) 431 (MOVHstore dst (MOVHUload src mem) mem)) 432 (Move [5] dst src mem) -> 433 (MOVBstore [4] dst (MOVBUload [4] src mem) 434 (MOVWstore dst (MOVWUload src mem) mem)) 435 (Move [6] dst src mem) -> 436 (MOVHstore [4] dst (MOVHUload [4] src mem) 437 (MOVWstore dst (MOVWUload src mem) mem)) 438 (Move [7] dst src mem) -> 439 (MOVBstore [6] dst (MOVBUload [6] src mem) 440 (MOVHstore [4] dst (MOVHUload [4] src mem) 441 (MOVWstore dst (MOVWUload src mem) mem))) 442 (Move [12] dst src mem) -> 443 (MOVWstore [8] dst (MOVWUload [8] src mem) 444 (MOVDstore dst (MOVDload src mem) mem)) 445 (Move [16] dst src mem) -> 446 (MOVDstore [8] dst (MOVDload [8] src mem) 447 (MOVDstore dst (MOVDload src mem) mem)) 448 (Move [24] dst src mem) -> 449 (MOVDstore [16] dst (MOVDload [16] src mem) 450 (MOVDstore [8] dst (MOVDload [8] src mem) 451 (MOVDstore dst (MOVDload src mem) mem))) 452 453 // strip off fractional word move 454 (Move [s] dst src mem) && s%8 != 0 && s > 8 -> 455 (Move [s%8] 456 (OffPtr <dst.Type> dst [s-s%8]) 457 (OffPtr <src.Type> src [s-s%8]) 458 (Move [s-s%8] dst src mem)) 459 460 // medium move uses a duff device 461 // 8 and 128 are magic constants, see runtime/mkduff.go 462 (Move [s] dst src mem) 463 && s%8 == 0 && s > 24 && s <= 8*128 464 && !config.noDuffDevice -> 465 (DUFFCOPY [8 * (128 - int64(s/8))] dst src mem) 466 467 // large move uses a loop 468 (Move [s] dst src mem) 469 && s > 24 && s%8 == 0 -> 470 (LoweredMove 471 dst 472 src 473 (ADDconst <src.Type> src [s-8]) 474 mem) 475 476 // calls 477 (StaticCall [argwid] {target} mem) -> (CALLstatic [argwid] {target} mem) 478 (ClosureCall [argwid] entry closure mem) -> (CALLclosure [argwid] entry closure mem) 479 (InterCall [argwid] entry mem) -> (CALLinter [argwid] entry mem) 480 481 // checks 482 (NilCheck ptr mem) -> (LoweredNilCheck ptr mem) 483 (IsNonNil ptr) -> (NotEqual (CMPconst [0] ptr)) 484 (IsInBounds idx len) -> (LessThanU (CMP idx len)) 485 (IsSliceInBounds idx len) -> (LessEqualU (CMP idx len)) 486 487 // pseudo-ops 488 (GetClosurePtr) -> (LoweredGetClosurePtr) 489 (GetCallerSP) -> (LoweredGetCallerSP) 490 (Convert x mem) -> (MOVDconvert x mem) 491 492 // Absorb pseudo-ops into blocks. 493 (If (Equal cc) yes no) -> (EQ cc yes no) 494 (If (NotEqual cc) yes no) -> (NE cc yes no) 495 (If (LessThan cc) yes no) -> (LT cc yes no) 496 (If (LessThanU cc) yes no) -> (ULT cc yes no) 497 (If (LessEqual cc) yes no) -> (LE cc yes no) 498 (If (LessEqualU cc) yes no) -> (ULE cc yes no) 499 (If (GreaterThan cc) yes no) -> (GT cc yes no) 500 (If (GreaterThanU cc) yes no) -> (UGT cc yes no) 501 (If (GreaterEqual cc) yes no) -> (GE cc yes no) 502 (If (GreaterEqualU cc) yes no) -> (UGE cc yes no) 503 504 (If cond yes no) -> (NZ cond yes no) 505 506 // atomic intrinsics 507 // Note: these ops do not accept offset. 508 (AtomicLoad32 ptr mem) -> (LDARW ptr mem) 509 (AtomicLoad64 ptr mem) -> (LDAR ptr mem) 510 (AtomicLoadPtr ptr mem) -> (LDAR ptr mem) 511 512 (AtomicStore32 ptr val mem) -> (STLRW ptr val mem) 513 (AtomicStore64 ptr val mem) -> (STLR ptr val mem) 514 (AtomicStorePtrNoWB ptr val mem) -> (STLR ptr val mem) 515 516 (AtomicExchange32 ptr val mem) -> (LoweredAtomicExchange32 ptr val mem) 517 (AtomicExchange64 ptr val mem) -> (LoweredAtomicExchange64 ptr val mem) 518 519 (AtomicAdd32 ptr val mem) -> (LoweredAtomicAdd32 ptr val mem) 520 (AtomicAdd64 ptr val mem) -> (LoweredAtomicAdd64 ptr val mem) 521 522 (AtomicCompareAndSwap32 ptr old new_ mem) -> (LoweredAtomicCas32 ptr old new_ mem) 523 (AtomicCompareAndSwap64 ptr old new_ mem) -> (LoweredAtomicCas64 ptr old new_ mem) 524 525 (AtomicAnd8 ptr val mem) -> (LoweredAtomicAnd8 ptr val mem) 526 (AtomicOr8 ptr val mem) -> (LoweredAtomicOr8 ptr val mem) 527 528 // Write barrier. 529 (WB {fn} destptr srcptr mem) -> (LoweredWB {fn} destptr srcptr mem) 530 531 // Optimizations 532 533 // Absorb boolean tests into block 534 (NZ (Equal cc) yes no) -> (EQ cc yes no) 535 (NZ (NotEqual cc) yes no) -> (NE cc yes no) 536 (NZ (LessThan cc) yes no) -> (LT cc yes no) 537 (NZ (LessThanU cc) yes no) -> (ULT cc yes no) 538 (NZ (LessEqual cc) yes no) -> (LE cc yes no) 539 (NZ (LessEqualU cc) yes no) -> (ULE cc yes no) 540 (NZ (GreaterThan cc) yes no) -> (GT cc yes no) 541 (NZ (GreaterThanU cc) yes no) -> (UGT cc yes no) 542 (NZ (GreaterEqual cc) yes no) -> (GE cc yes no) 543 (NZ (GreaterEqualU cc) yes no) -> (UGE cc yes no) 544 545 (EQ (CMPconst [0] x) yes no) -> (Z x yes no) 546 (NE (CMPconst [0] x) yes no) -> (NZ x yes no) 547 (EQ (CMPWconst [0] x) yes no) -> (ZW x yes no) 548 (NE (CMPWconst [0] x) yes no) -> (NZW x yes no) 549 550 // Absorb bit-tests into block 551 (Z (ANDconst [c] x) yes no) && oneBit(c) -> (TBZ {ntz(c)} x yes no) 552 (NZ (ANDconst [c] x) yes no) && oneBit(c) -> (TBNZ {ntz(c)} x yes no) 553 (ZW (ANDconst [c] x) yes no) && oneBit(int64(uint32(c))) -> (TBZ {ntz(int64(uint32(c)))} x yes no) 554 (NZW (ANDconst [c] x) yes no) && oneBit(int64(uint32(c))) -> (TBNZ {ntz(int64(uint32(c)))} x yes no) 555 556 // Test sign-bit for signed comparisons against zero 557 (GE (CMPWconst [0] x) yes no) -> (TBZ {int64(31)} x yes no) 558 (GE (CMPconst [0] x) yes no) -> (TBZ {int64(63)} x yes no) 559 (LT (CMPWconst [0] x) yes no) -> (TBNZ {int64(31)} x yes no) 560 (LT (CMPconst [0] x) yes no) -> (TBNZ {int64(63)} x yes no) 561 562 // fold offset into address 563 (ADDconst [off1] (MOVDaddr [off2] {sym} ptr)) -> (MOVDaddr [off1+off2] {sym} ptr) 564 565 // fold address into load/store 566 (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(off1+off2) 567 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> 568 (MOVBload [off1+off2] {sym} ptr mem) 569 (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(off1+off2) 570 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> 571 (MOVBUload [off1+off2] {sym} ptr mem) 572 (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(off1+off2) 573 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> 574 (MOVHload [off1+off2] {sym} ptr mem) 575 (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(off1+off2) 576 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> 577 (MOVHUload [off1+off2] {sym} ptr mem) 578 (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(off1+off2) 579 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> 580 (MOVWload [off1+off2] {sym} ptr mem) 581 (MOVWUload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(off1+off2) 582 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> 583 (MOVWUload [off1+off2] {sym} ptr mem) 584 (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(off1+off2) 585 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> 586 (MOVDload [off1+off2] {sym} ptr mem) 587 (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(off1+off2) 588 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> 589 (FMOVSload [off1+off2] {sym} ptr mem) 590 (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(off1+off2) 591 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> 592 (FMOVDload [off1+off2] {sym} ptr mem) 593 594 (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(off1+off2) 595 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> 596 (MOVBstore [off1+off2] {sym} ptr val mem) 597 (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(off1+off2) 598 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> 599 (MOVHstore [off1+off2] {sym} ptr val mem) 600 (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(off1+off2) 601 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> 602 (MOVWstore [off1+off2] {sym} ptr val mem) 603 (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(off1+off2) 604 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> 605 (MOVDstore [off1+off2] {sym} ptr val mem) 606 (STP [off1] {sym} (ADDconst [off2] ptr) val1 val2 mem) && is32Bit(off1+off2) 607 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> 608 (STP [off1+off2] {sym} ptr val1 val2 mem) 609 (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(off1+off2) 610 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> 611 (FMOVSstore [off1+off2] {sym} ptr val mem) 612 (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(off1+off2) 613 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> 614 (FMOVDstore [off1+off2] {sym} ptr val mem) 615 (MOVBstorezero [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(off1+off2) 616 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> 617 (MOVBstorezero [off1+off2] {sym} ptr mem) 618 (MOVHstorezero [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(off1+off2) 619 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> 620 (MOVHstorezero [off1+off2] {sym} ptr mem) 621 (MOVWstorezero [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(off1+off2) 622 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> 623 (MOVWstorezero [off1+off2] {sym} ptr mem) 624 (MOVDstorezero [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(off1+off2) 625 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> 626 (MOVDstorezero [off1+off2] {sym} ptr mem) 627 (MOVQstorezero [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(off1+off2) 628 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> 629 (MOVQstorezero [off1+off2] {sym} ptr mem) 630 631 (MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) 632 && canMergeSym(sym1,sym2) && is32Bit(off1+off2) 633 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> 634 (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 635 (MOVBUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) 636 && canMergeSym(sym1,sym2) && is32Bit(off1+off2) 637 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> 638 (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 639 (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) 640 && canMergeSym(sym1,sym2) && is32Bit(off1+off2) 641 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> 642 (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 643 (MOVHUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) 644 && canMergeSym(sym1,sym2) && is32Bit(off1+off2) 645 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> 646 (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 647 (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) 648 && canMergeSym(sym1,sym2) && is32Bit(off1+off2) 649 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> 650 (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 651 (MOVWUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) 652 && canMergeSym(sym1,sym2) && is32Bit(off1+off2) 653 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> 654 (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 655 (MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) 656 && canMergeSym(sym1,sym2) && is32Bit(off1+off2) 657 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> 658 (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 659 (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) 660 && canMergeSym(sym1,sym2) && is32Bit(off1+off2) 661 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> 662 (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 663 (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) 664 && canMergeSym(sym1,sym2) && is32Bit(off1+off2) 665 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> 666 (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 667 668 (MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) 669 && canMergeSym(sym1,sym2) && is32Bit(off1+off2) 670 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> 671 (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) 672 (MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) 673 && canMergeSym(sym1,sym2) && is32Bit(off1+off2) 674 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> 675 (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) 676 (MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) 677 && canMergeSym(sym1,sym2) && is32Bit(off1+off2) 678 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> 679 (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) 680 (MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) 681 && canMergeSym(sym1,sym2) && is32Bit(off1+off2) 682 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> 683 (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) 684 (STP [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val1 val2 mem) 685 && canMergeSym(sym1,sym2) && is32Bit(off1+off2) 686 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> 687 (STP [off1+off2] {mergeSym(sym1,sym2)} ptr val1 val2 mem) 688 (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) 689 && canMergeSym(sym1,sym2) && is32Bit(off1+off2) 690 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> 691 (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) 692 (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) 693 && canMergeSym(sym1,sym2) && is32Bit(off1+off2) 694 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> 695 (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) 696 (MOVBstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) 697 && canMergeSym(sym1,sym2) && is32Bit(off1+off2) 698 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> 699 (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 700 (MOVHstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) 701 && canMergeSym(sym1,sym2) && is32Bit(off1+off2) 702 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> 703 (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 704 (MOVWstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) 705 && canMergeSym(sym1,sym2) && is32Bit(off1+off2) 706 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> 707 (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 708 (MOVDstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) 709 && canMergeSym(sym1,sym2) && is32Bit(off1+off2) 710 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> 711 (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 712 (MOVQstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) 713 && canMergeSym(sym1,sym2) && is32Bit(off1+off2) 714 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> 715 (MOVQstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 716 717 // store zero 718 (MOVBstore [off] {sym} ptr (MOVDconst [0]) mem) -> (MOVBstorezero [off] {sym} ptr mem) 719 (MOVHstore [off] {sym} ptr (MOVDconst [0]) mem) -> (MOVHstorezero [off] {sym} ptr mem) 720 (MOVWstore [off] {sym} ptr (MOVDconst [0]) mem) -> (MOVWstorezero [off] {sym} ptr mem) 721 (MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) -> (MOVDstorezero [off] {sym} ptr mem) 722 (STP [off] {sym} ptr (MOVDconst [0]) (MOVDconst [0]) mem) -> (MOVQstorezero [off] {sym} ptr mem) 723 724 // replace load from same location as preceding store with zero/sign extension (or copy in case of full width) 725 // these seem to have bad interaction with other rules, resulting in slower code 726 //(MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVBreg x) 727 //(MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVBUreg x) 728 //(MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVHreg x) 729 //(MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVHUreg x) 730 //(MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVWreg x) 731 //(MOVWUload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVWUreg x) 732 //(MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x 733 //(FMOVSload [off] {sym} ptr (FMOVSstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x 734 //(FMOVDload [off] {sym} ptr (FMOVDstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x 735 736 (MOVBload [off] {sym} ptr (MOVBstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVDconst [0]) 737 (MOVBUload [off] {sym} ptr (MOVBstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVDconst [0]) 738 (MOVHload [off] {sym} ptr (MOVHstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVDconst [0]) 739 (MOVHUload [off] {sym} ptr (MOVHstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVDconst [0]) 740 (MOVWload [off] {sym} ptr (MOVWstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVDconst [0]) 741 (MOVWUload [off] {sym} ptr (MOVWstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVDconst [0]) 742 (MOVDload [off] {sym} ptr (MOVDstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVDconst [0]) 743 744 // don't extend after proper load 745 (MOVBreg x:(MOVBload _ _)) -> (MOVDreg x) 746 (MOVBUreg x:(MOVBUload _ _)) -> (MOVDreg x) 747 (MOVHreg x:(MOVBload _ _)) -> (MOVDreg x) 748 (MOVHreg x:(MOVBUload _ _)) -> (MOVDreg x) 749 (MOVHreg x:(MOVHload _ _)) -> (MOVDreg x) 750 (MOVHUreg x:(MOVBUload _ _)) -> (MOVDreg x) 751 (MOVHUreg x:(MOVHUload _ _)) -> (MOVDreg x) 752 (MOVWreg x:(MOVBload _ _)) -> (MOVDreg x) 753 (MOVWreg x:(MOVBUload _ _)) -> (MOVDreg x) 754 (MOVWreg x:(MOVHload _ _)) -> (MOVDreg x) 755 (MOVWreg x:(MOVHUload _ _)) -> (MOVDreg x) 756 (MOVWreg x:(MOVWload _ _)) -> (MOVDreg x) 757 (MOVWUreg x:(MOVBUload _ _)) -> (MOVDreg x) 758 (MOVWUreg x:(MOVHUload _ _)) -> (MOVDreg x) 759 (MOVWUreg x:(MOVWUload _ _)) -> (MOVDreg x) 760 761 // fold double extensions 762 (MOVBreg x:(MOVBreg _)) -> (MOVDreg x) 763 (MOVBUreg x:(MOVBUreg _)) -> (MOVDreg x) 764 (MOVHreg x:(MOVBreg _)) -> (MOVDreg x) 765 (MOVHreg x:(MOVBUreg _)) -> (MOVDreg x) 766 (MOVHreg x:(MOVHreg _)) -> (MOVDreg x) 767 (MOVHUreg x:(MOVBUreg _)) -> (MOVDreg x) 768 (MOVHUreg x:(MOVHUreg _)) -> (MOVDreg x) 769 (MOVWreg x:(MOVBreg _)) -> (MOVDreg x) 770 (MOVWreg x:(MOVBUreg _)) -> (MOVDreg x) 771 (MOVWreg x:(MOVHreg _)) -> (MOVDreg x) 772 (MOVWreg x:(MOVHreg _)) -> (MOVDreg x) 773 (MOVWreg x:(MOVWreg _)) -> (MOVDreg x) 774 (MOVWUreg x:(MOVBUreg _)) -> (MOVDreg x) 775 (MOVWUreg x:(MOVHUreg _)) -> (MOVDreg x) 776 (MOVWUreg x:(MOVWUreg _)) -> (MOVDreg x) 777 778 // don't extend before store 779 (MOVBstore [off] {sym} ptr (MOVBreg x) mem) -> (MOVBstore [off] {sym} ptr x mem) 780 (MOVBstore [off] {sym} ptr (MOVBUreg x) mem) -> (MOVBstore [off] {sym} ptr x mem) 781 (MOVBstore [off] {sym} ptr (MOVHreg x) mem) -> (MOVBstore [off] {sym} ptr x mem) 782 (MOVBstore [off] {sym} ptr (MOVHUreg x) mem) -> (MOVBstore [off] {sym} ptr x mem) 783 (MOVBstore [off] {sym} ptr (MOVWreg x) mem) -> (MOVBstore [off] {sym} ptr x mem) 784 (MOVBstore [off] {sym} ptr (MOVWUreg x) mem) -> (MOVBstore [off] {sym} ptr x mem) 785 (MOVHstore [off] {sym} ptr (MOVHreg x) mem) -> (MOVHstore [off] {sym} ptr x mem) 786 (MOVHstore [off] {sym} ptr (MOVHUreg x) mem) -> (MOVHstore [off] {sym} ptr x mem) 787 (MOVHstore [off] {sym} ptr (MOVWreg x) mem) -> (MOVHstore [off] {sym} ptr x mem) 788 (MOVHstore [off] {sym} ptr (MOVWUreg x) mem) -> (MOVHstore [off] {sym} ptr x mem) 789 (MOVWstore [off] {sym} ptr (MOVWreg x) mem) -> (MOVWstore [off] {sym} ptr x mem) 790 (MOVWstore [off] {sym} ptr (MOVWUreg x) mem) -> (MOVWstore [off] {sym} ptr x mem) 791 792 // if a register move has only 1 use, just use the same register without emitting instruction 793 // MOVDnop doesn't emit instruction, only for ensuring the type. 794 (MOVDreg x) && x.Uses == 1 -> (MOVDnop x) 795 796 // fold constant into arithmatic ops 797 (ADD x (MOVDconst [c])) -> (ADDconst [c] x) 798 (SUB x (MOVDconst [c])) -> (SUBconst [c] x) 799 (AND x (MOVDconst [c])) -> (ANDconst [c] x) 800 (OR x (MOVDconst [c])) -> (ORconst [c] x) 801 (XOR x (MOVDconst [c])) -> (XORconst [c] x) 802 (BIC x (MOVDconst [c])) -> (BICconst [c] x) 803 804 (SLL x (MOVDconst [c])) -> (SLLconst x [c&63]) // Note: I don't think we ever generate bad constant shifts (i.e. c>=64) 805 (SRL x (MOVDconst [c])) -> (SRLconst x [c&63]) 806 (SRA x (MOVDconst [c])) -> (SRAconst x [c&63]) 807 808 (CMP x (MOVDconst [c])) -> (CMPconst [c] x) 809 (CMP (MOVDconst [c]) x) -> (InvertFlags (CMPconst [c] x)) 810 (CMPW x (MOVDconst [c])) -> (CMPWconst [int64(int32(c))] x) 811 (CMPW (MOVDconst [c]) x) -> (InvertFlags (CMPWconst [int64(int32(c))] x)) 812 813 // mul-neg -> mneg 814 (NEG (MUL x y)) -> (MNEG x y) 815 (NEG (MULW x y)) -> (MNEGW x y) 816 (MUL (NEG x) y) -> (MNEG x y) 817 (MULW (NEG x) y) -> (MNEGW x y) 818 819 // mul by constant 820 (MUL x (MOVDconst [-1])) -> (NEG x) 821 (MUL _ (MOVDconst [0])) -> (MOVDconst [0]) 822 (MUL x (MOVDconst [1])) -> x 823 (MUL x (MOVDconst [c])) && isPowerOfTwo(c) -> (SLLconst [log2(c)] x) 824 (MUL x (MOVDconst [c])) && isPowerOfTwo(c-1) && c >= 3 -> (ADDshiftLL x x [log2(c-1)]) 825 (MUL x (MOVDconst [c])) && isPowerOfTwo(c+1) && c >= 7 -> (ADDshiftLL (NEG <x.Type> x) x [log2(c+1)]) 826 (MUL x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) -> (SLLconst [log2(c/3)] (ADDshiftLL <x.Type> x x [1])) 827 (MUL x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) -> (SLLconst [log2(c/5)] (ADDshiftLL <x.Type> x x [2])) 828 (MUL x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) -> (SLLconst [log2(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3])) 829 (MUL x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) -> (SLLconst [log2(c/9)] (ADDshiftLL <x.Type> x x [3])) 830 831 (MULW x (MOVDconst [c])) && int32(c)==-1 -> (NEG x) 832 (MULW _ (MOVDconst [c])) && int32(c)==0 -> (MOVDconst [0]) 833 (MULW x (MOVDconst [c])) && int32(c)==1 -> x 834 (MULW x (MOVDconst [c])) && isPowerOfTwo(c) -> (SLLconst [log2(c)] x) 835 (MULW x (MOVDconst [c])) && isPowerOfTwo(c-1) && int32(c) >= 3 -> (ADDshiftLL x x [log2(c-1)]) 836 (MULW x (MOVDconst [c])) && isPowerOfTwo(c+1) && int32(c) >= 7 -> (ADDshiftLL (NEG <x.Type> x) x [log2(c+1)]) 837 (MULW x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) -> (SLLconst [log2(c/3)] (ADDshiftLL <x.Type> x x [1])) 838 (MULW x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) -> (SLLconst [log2(c/5)] (ADDshiftLL <x.Type> x x [2])) 839 (MULW x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) -> (SLLconst [log2(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3])) 840 (MULW x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) -> (SLLconst [log2(c/9)] (ADDshiftLL <x.Type> x x [3])) 841 842 // mneg by constant 843 (MNEG x (MOVDconst [-1])) -> x 844 (MNEG _ (MOVDconst [0])) -> (MOVDconst [0]) 845 (MNEG x (MOVDconst [1])) -> (NEG x) 846 (MNEG x (MOVDconst [c])) && isPowerOfTwo(c) -> (NEG (SLLconst <x.Type> [log2(c)] x)) 847 (MNEG x (MOVDconst [c])) && isPowerOfTwo(c-1) && c >= 3 -> (NEG (ADDshiftLL <x.Type> x x [log2(c-1)])) 848 (MNEG x (MOVDconst [c])) && isPowerOfTwo(c+1) && c >= 7 -> (NEG (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log2(c+1)])) 849 (MNEG x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) -> (NEG (SLLconst <x.Type> [log2(c/3)] (ADDshiftLL <x.Type> x x [1]))) 850 (MNEG x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) -> (NEG (SLLconst <x.Type> [log2(c/5)] (ADDshiftLL <x.Type> x x [2]))) 851 (MNEG x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) -> (NEG (SLLconst <x.Type> [log2(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))) 852 (MNEG x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) -> (NEG (SLLconst <x.Type> [log2(c/9)] (ADDshiftLL <x.Type> x x [3]))) 853 854 (MNEGW x (MOVDconst [c])) && int32(c)==-1 -> x 855 (MNEGW _ (MOVDconst [c])) && int32(c)==0 -> (MOVDconst [0]) 856 (MNEGW x (MOVDconst [c])) && int32(c)==1 -> (NEG x) 857 (MNEGW x (MOVDconst [c])) && isPowerOfTwo(c) -> (NEG (SLLconst <x.Type> [log2(c)] x)) 858 (MNEGW x (MOVDconst [c])) && isPowerOfTwo(c-1) && int32(c) >= 3 -> (NEG (ADDshiftLL <x.Type> x x [log2(c-1)])) 859 (MNEGW x (MOVDconst [c])) && isPowerOfTwo(c+1) && int32(c) >= 7 -> (NEG (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log2(c+1)])) 860 (MNEGW x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) -> (NEG (SLLconst <x.Type> [log2(c/3)] (ADDshiftLL <x.Type> x x [1]))) 861 (MNEGW x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) -> (NEG (SLLconst <x.Type> [log2(c/5)] (ADDshiftLL <x.Type> x x [2]))) 862 (MNEGW x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) -> (NEG (SLLconst <x.Type> [log2(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))) 863 (MNEGW x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) -> (NEG (SLLconst <x.Type> [log2(c/9)] (ADDshiftLL <x.Type> x x [3]))) 864 865 // div by constant 866 (UDIV x (MOVDconst [1])) -> x 867 (UDIV x (MOVDconst [c])) && isPowerOfTwo(c) -> (SRLconst [log2(c)] x) 868 (UDIVW x (MOVDconst [c])) && uint32(c)==1 -> x 869 (UDIVW x (MOVDconst [c])) && isPowerOfTwo(c) && is32Bit(c) -> (SRLconst [log2(c)] x) 870 (UMOD _ (MOVDconst [1])) -> (MOVDconst [0]) 871 (UMOD x (MOVDconst [c])) && isPowerOfTwo(c) -> (ANDconst [c-1] x) 872 (UMODW _ (MOVDconst [c])) && uint32(c)==1 -> (MOVDconst [0]) 873 (UMODW x (MOVDconst [c])) && isPowerOfTwo(c) && is32Bit(c) -> (ANDconst [c-1] x) 874 875 // generic simplifications 876 (ADD x (NEG y)) -> (SUB x y) 877 (SUB x x) -> (MOVDconst [0]) 878 (AND x x) -> x 879 (OR x x) -> x 880 (XOR x x) -> (MOVDconst [0]) 881 (BIC x x) -> (MOVDconst [0]) 882 (AND x (MVN y)) -> (BIC x y) 883 (CSEL {cc} x (MOVDconst [0]) flag) -> (CSEL0 {cc} x flag) 884 (CSEL {cc} (MOVDconst [0]) y flag) -> (CSEL0 {arm64Negate(cc.(Op))} y flag) 885 (SUB x (SUB y z)) -> (SUB (ADD <v.Type> x z) y) 886 (SUB (SUB x y) z) -> (SUB x (ADD <y.Type> y z)) 887 888 // remove redundant *const ops 889 (ADDconst [0] x) -> x 890 (SUBconst [0] x) -> x 891 (ANDconst [0] _) -> (MOVDconst [0]) 892 (ANDconst [-1] x) -> x 893 (ORconst [0] x) -> x 894 (ORconst [-1] _) -> (MOVDconst [-1]) 895 (XORconst [0] x) -> x 896 (XORconst [-1] x) -> (MVN x) 897 (BICconst [0] x) -> x 898 (BICconst [-1] _) -> (MOVDconst [0]) 899 900 // generic constant folding 901 (ADDconst [c] (MOVDconst [d])) -> (MOVDconst [c+d]) 902 (ADDconst [c] (ADDconst [d] x)) -> (ADDconst [c+d] x) 903 (ADDconst [c] (SUBconst [d] x)) -> (ADDconst [c-d] x) 904 (SUBconst [c] (MOVDconst [d])) -> (MOVDconst [d-c]) 905 (SUBconst [c] (SUBconst [d] x)) -> (ADDconst [-c-d] x) 906 (SUBconst [c] (ADDconst [d] x)) -> (ADDconst [-c+d] x) 907 (SLLconst [c] (MOVDconst [d])) -> (MOVDconst [int64(d)<<uint64(c)]) 908 (SRLconst [c] (MOVDconst [d])) -> (MOVDconst [int64(uint64(d)>>uint64(c))]) 909 (SRAconst [c] (MOVDconst [d])) -> (MOVDconst [int64(d)>>uint64(c)]) 910 (MUL (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c*d]) 911 (MULW (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [int64(int32(c)*int32(d))]) 912 (MNEG (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [-c*d]) 913 (MNEGW (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [-int64(int32(c)*int32(d))]) 914 (DIV (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [int64(c)/int64(d)]) 915 (UDIV (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [int64(uint64(c)/uint64(d))]) 916 (DIVW (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [int64(int32(c)/int32(d))]) 917 (UDIVW (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [int64(uint32(c)/uint32(d))]) 918 (MOD (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [int64(c)%int64(d)]) 919 (UMOD (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [int64(uint64(c)%uint64(d))]) 920 (MODW (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [int64(int32(c)%int32(d))]) 921 (UMODW (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [int64(uint32(c)%uint32(d))]) 922 (ANDconst [c] (MOVDconst [d])) -> (MOVDconst [c&d]) 923 (ANDconst [c] (ANDconst [d] x)) -> (ANDconst [c&d] x) 924 (ORconst [c] (MOVDconst [d])) -> (MOVDconst [c|d]) 925 (ORconst [c] (ORconst [d] x)) -> (ORconst [c|d] x) 926 (XORconst [c] (MOVDconst [d])) -> (MOVDconst [c^d]) 927 (XORconst [c] (XORconst [d] x)) -> (XORconst [c^d] x) 928 (BICconst [c] (MOVDconst [d])) -> (MOVDconst [d&^c]) 929 (MVN (MOVDconst [c])) -> (MOVDconst [^c]) 930 (NEG (MOVDconst [c])) -> (MOVDconst [-c]) 931 (MOVBreg (MOVDconst [c])) -> (MOVDconst [int64(int8(c))]) 932 (MOVBUreg (MOVDconst [c])) -> (MOVDconst [int64(uint8(c))]) 933 (MOVHreg (MOVDconst [c])) -> (MOVDconst [int64(int16(c))]) 934 (MOVHUreg (MOVDconst [c])) -> (MOVDconst [int64(uint16(c))]) 935 (MOVWreg (MOVDconst [c])) -> (MOVDconst [int64(int32(c))]) 936 (MOVWUreg (MOVDconst [c])) -> (MOVDconst [int64(uint32(c))]) 937 (MOVDreg (MOVDconst [c])) -> (MOVDconst [c]) 938 939 // constant comparisons 940 (CMPconst (MOVDconst [x]) [y]) && x==y -> (FlagEQ) 941 (CMPconst (MOVDconst [x]) [y]) && int64(x)<int64(y) && uint64(x)<uint64(y) -> (FlagLT_ULT) 942 (CMPconst (MOVDconst [x]) [y]) && int64(x)<int64(y) && uint64(x)>uint64(y) -> (FlagLT_UGT) 943 (CMPconst (MOVDconst [x]) [y]) && int64(x)>int64(y) && uint64(x)<uint64(y) -> (FlagGT_ULT) 944 (CMPconst (MOVDconst [x]) [y]) && int64(x)>int64(y) && uint64(x)>uint64(y) -> (FlagGT_UGT) 945 (CMPWconst (MOVDconst [x]) [y]) && int32(x)==int32(y) -> (FlagEQ) 946 (CMPWconst (MOVDconst [x]) [y]) && int32(x)<int32(y) && uint32(x)<uint32(y) -> (FlagLT_ULT) 947 (CMPWconst (MOVDconst [x]) [y]) && int32(x)<int32(y) && uint32(x)>uint32(y) -> (FlagLT_UGT) 948 (CMPWconst (MOVDconst [x]) [y]) && int32(x)>int32(y) && uint32(x)<uint32(y) -> (FlagGT_ULT) 949 (CMPWconst (MOVDconst [x]) [y]) && int32(x)>int32(y) && uint32(x)>uint32(y) -> (FlagGT_UGT) 950 951 // other known comparisons 952 (CMPconst (MOVBUreg _) [c]) && 0xff < c -> (FlagLT_ULT) 953 (CMPconst (MOVHUreg _) [c]) && 0xffff < c -> (FlagLT_ULT) 954 (CMPconst (MOVWUreg _) [c]) && 0xffffffff < c -> (FlagLT_ULT) 955 (CMPconst (ANDconst _ [m]) [n]) && 0 <= m && m < n -> (FlagLT_ULT) 956 (CMPconst (SRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 63 && (1<<uint64(64-c)) <= uint64(n) -> (FlagLT_ULT) 957 (CMPWconst (MOVBUreg _) [c]) && 0xff < int32(c) -> (FlagLT_ULT) 958 (CMPWconst (MOVHUreg _) [c]) && 0xffff < int32(c) -> (FlagLT_ULT) 959 960 // absorb flag constants into branches 961 (EQ (FlagEQ) yes no) -> (First nil yes no) 962 (EQ (FlagLT_ULT) yes no) -> (First nil no yes) 963 (EQ (FlagLT_UGT) yes no) -> (First nil no yes) 964 (EQ (FlagGT_ULT) yes no) -> (First nil no yes) 965 (EQ (FlagGT_UGT) yes no) -> (First nil no yes) 966 967 (NE (FlagEQ) yes no) -> (First nil no yes) 968 (NE (FlagLT_ULT) yes no) -> (First nil yes no) 969 (NE (FlagLT_UGT) yes no) -> (First nil yes no) 970 (NE (FlagGT_ULT) yes no) -> (First nil yes no) 971 (NE (FlagGT_UGT) yes no) -> (First nil yes no) 972 973 (LT (FlagEQ) yes no) -> (First nil no yes) 974 (LT (FlagLT_ULT) yes no) -> (First nil yes no) 975 (LT (FlagLT_UGT) yes no) -> (First nil yes no) 976 (LT (FlagGT_ULT) yes no) -> (First nil no yes) 977 (LT (FlagGT_UGT) yes no) -> (First nil no yes) 978 979 (LE (FlagEQ) yes no) -> (First nil yes no) 980 (LE (FlagLT_ULT) yes no) -> (First nil yes no) 981 (LE (FlagLT_UGT) yes no) -> (First nil yes no) 982 (LE (FlagGT_ULT) yes no) -> (First nil no yes) 983 (LE (FlagGT_UGT) yes no) -> (First nil no yes) 984 985 (GT (FlagEQ) yes no) -> (First nil no yes) 986 (GT (FlagLT_ULT) yes no) -> (First nil no yes) 987 (GT (FlagLT_UGT) yes no) -> (First nil no yes) 988 (GT (FlagGT_ULT) yes no) -> (First nil yes no) 989 (GT (FlagGT_UGT) yes no) -> (First nil yes no) 990 991 (GE (FlagEQ) yes no) -> (First nil yes no) 992 (GE (FlagLT_ULT) yes no) -> (First nil no yes) 993 (GE (FlagLT_UGT) yes no) -> (First nil no yes) 994 (GE (FlagGT_ULT) yes no) -> (First nil yes no) 995 (GE (FlagGT_UGT) yes no) -> (First nil yes no) 996 997 (ULT (FlagEQ) yes no) -> (First nil no yes) 998 (ULT (FlagLT_ULT) yes no) -> (First nil yes no) 999 (ULT (FlagLT_UGT) yes no) -> (First nil no yes) 1000 (ULT (FlagGT_ULT) yes no) -> (First nil yes no) 1001 (ULT (FlagGT_UGT) yes no) -> (First nil no yes) 1002 1003 (ULE (FlagEQ) yes no) -> (First nil yes no) 1004 (ULE (FlagLT_ULT) yes no) -> (First nil yes no) 1005 (ULE (FlagLT_UGT) yes no) -> (First nil no yes) 1006 (ULE (FlagGT_ULT) yes no) -> (First nil yes no) 1007 (ULE (FlagGT_UGT) yes no) -> (First nil no yes) 1008 1009 (UGT (FlagEQ) yes no) -> (First nil no yes) 1010 (UGT (FlagLT_ULT) yes no) -> (First nil no yes) 1011 (UGT (FlagLT_UGT) yes no) -> (First nil yes no) 1012 (UGT (FlagGT_ULT) yes no) -> (First nil no yes) 1013 (UGT (FlagGT_UGT) yes no) -> (First nil yes no) 1014 1015 (UGE (FlagEQ) yes no) -> (First nil yes no) 1016 (UGE (FlagLT_ULT) yes no) -> (First nil no yes) 1017 (UGE (FlagLT_UGT) yes no) -> (First nil yes no) 1018 (UGE (FlagGT_ULT) yes no) -> (First nil no yes) 1019 (UGE (FlagGT_UGT) yes no) -> (First nil yes no) 1020 1021 (Z (MOVDconst [0]) yes no) -> (First nil yes no) 1022 (Z (MOVDconst [c]) yes no) && c != 0 -> (First nil no yes) 1023 (NZ (MOVDconst [0]) yes no) -> (First nil no yes) 1024 (NZ (MOVDconst [c]) yes no) && c != 0 -> (First nil yes no) 1025 (ZW (MOVDconst [c]) yes no) && int32(c) == 0 -> (First nil yes no) 1026 (ZW (MOVDconst [c]) yes no) && int32(c) != 0 -> (First nil no yes) 1027 (NZW (MOVDconst [c]) yes no) && int32(c) == 0 -> (First nil no yes) 1028 (NZW (MOVDconst [c]) yes no) && int32(c) != 0 -> (First nil yes no) 1029 1030 // absorb InvertFlags into branches 1031 (LT (InvertFlags cmp) yes no) -> (GT cmp yes no) 1032 (GT (InvertFlags cmp) yes no) -> (LT cmp yes no) 1033 (LE (InvertFlags cmp) yes no) -> (GE cmp yes no) 1034 (GE (InvertFlags cmp) yes no) -> (LE cmp yes no) 1035 (ULT (InvertFlags cmp) yes no) -> (UGT cmp yes no) 1036 (UGT (InvertFlags cmp) yes no) -> (ULT cmp yes no) 1037 (ULE (InvertFlags cmp) yes no) -> (UGE cmp yes no) 1038 (UGE (InvertFlags cmp) yes no) -> (ULE cmp yes no) 1039 (EQ (InvertFlags cmp) yes no) -> (EQ cmp yes no) 1040 (NE (InvertFlags cmp) yes no) -> (NE cmp yes no) 1041 1042 // absorb InvertFlags into CSEL(0) 1043 (CSEL {cc} x y (InvertFlags cmp)) -> (CSEL {arm64Invert(cc.(Op))} x y cmp) 1044 (CSEL0 {cc} x (InvertFlags cmp)) -> (CSEL0 {arm64Invert(cc.(Op))} x cmp) 1045 1046 // absorb flag constants into boolean values 1047 (Equal (FlagEQ)) -> (MOVDconst [1]) 1048 (Equal (FlagLT_ULT)) -> (MOVDconst [0]) 1049 (Equal (FlagLT_UGT)) -> (MOVDconst [0]) 1050 (Equal (FlagGT_ULT)) -> (MOVDconst [0]) 1051 (Equal (FlagGT_UGT)) -> (MOVDconst [0]) 1052 1053 (NotEqual (FlagEQ)) -> (MOVDconst [0]) 1054 (NotEqual (FlagLT_ULT)) -> (MOVDconst [1]) 1055 (NotEqual (FlagLT_UGT)) -> (MOVDconst [1]) 1056 (NotEqual (FlagGT_ULT)) -> (MOVDconst [1]) 1057 (NotEqual (FlagGT_UGT)) -> (MOVDconst [1]) 1058 1059 (LessThan (FlagEQ)) -> (MOVDconst [0]) 1060 (LessThan (FlagLT_ULT)) -> (MOVDconst [1]) 1061 (LessThan (FlagLT_UGT)) -> (MOVDconst [1]) 1062 (LessThan (FlagGT_ULT)) -> (MOVDconst [0]) 1063 (LessThan (FlagGT_UGT)) -> (MOVDconst [0]) 1064 1065 (LessThanU (FlagEQ)) -> (MOVDconst [0]) 1066 (LessThanU (FlagLT_ULT)) -> (MOVDconst [1]) 1067 (LessThanU (FlagLT_UGT)) -> (MOVDconst [0]) 1068 (LessThanU (FlagGT_ULT)) -> (MOVDconst [1]) 1069 (LessThanU (FlagGT_UGT)) -> (MOVDconst [0]) 1070 1071 (LessEqual (FlagEQ)) -> (MOVDconst [1]) 1072 (LessEqual (FlagLT_ULT)) -> (MOVDconst [1]) 1073 (LessEqual (FlagLT_UGT)) -> (MOVDconst [1]) 1074 (LessEqual (FlagGT_ULT)) -> (MOVDconst [0]) 1075 (LessEqual (FlagGT_UGT)) -> (MOVDconst [0]) 1076 1077 (LessEqualU (FlagEQ)) -> (MOVDconst [1]) 1078 (LessEqualU (FlagLT_ULT)) -> (MOVDconst [1]) 1079 (LessEqualU (FlagLT_UGT)) -> (MOVDconst [0]) 1080 (LessEqualU (FlagGT_ULT)) -> (MOVDconst [1]) 1081 (LessEqualU (FlagGT_UGT)) -> (MOVDconst [0]) 1082 1083 (GreaterThan (FlagEQ)) -> (MOVDconst [0]) 1084 (GreaterThan (FlagLT_ULT)) -> (MOVDconst [0]) 1085 (GreaterThan (FlagLT_UGT)) -> (MOVDconst [0]) 1086 (GreaterThan (FlagGT_ULT)) -> (MOVDconst [1]) 1087 (GreaterThan (FlagGT_UGT)) -> (MOVDconst [1]) 1088 1089 (GreaterThanU (FlagEQ)) -> (MOVDconst [0]) 1090 (GreaterThanU (FlagLT_ULT)) -> (MOVDconst [0]) 1091 (GreaterThanU (FlagLT_UGT)) -> (MOVDconst [1]) 1092 (GreaterThanU (FlagGT_ULT)) -> (MOVDconst [0]) 1093 (GreaterThanU (FlagGT_UGT)) -> (MOVDconst [1]) 1094 1095 (GreaterEqual (FlagEQ)) -> (MOVDconst [1]) 1096 (GreaterEqual (FlagLT_ULT)) -> (MOVDconst [0]) 1097 (GreaterEqual (FlagLT_UGT)) -> (MOVDconst [0]) 1098 (GreaterEqual (FlagGT_ULT)) -> (MOVDconst [1]) 1099 (GreaterEqual (FlagGT_UGT)) -> (MOVDconst [1]) 1100 1101 (GreaterEqualU (FlagEQ)) -> (MOVDconst [1]) 1102 (GreaterEqualU (FlagLT_ULT)) -> (MOVDconst [0]) 1103 (GreaterEqualU (FlagLT_UGT)) -> (MOVDconst [1]) 1104 (GreaterEqualU (FlagGT_ULT)) -> (MOVDconst [0]) 1105 (GreaterEqualU (FlagGT_UGT)) -> (MOVDconst [1]) 1106 1107 // absorb InvertFlags into boolean values 1108 (Equal (InvertFlags x)) -> (Equal x) 1109 (NotEqual (InvertFlags x)) -> (NotEqual x) 1110 (LessThan (InvertFlags x)) -> (GreaterThan x) 1111 (LessThanU (InvertFlags x)) -> (GreaterThanU x) 1112 (GreaterThan (InvertFlags x)) -> (LessThan x) 1113 (GreaterThanU (InvertFlags x)) -> (LessThanU x) 1114 (LessEqual (InvertFlags x)) -> (GreaterEqual x) 1115 (LessEqualU (InvertFlags x)) -> (GreaterEqualU x) 1116 (GreaterEqual (InvertFlags x)) -> (LessEqual x) 1117 (GreaterEqualU (InvertFlags x)) -> (LessEqualU x) 1118 1119 // Boolean-generating instructions always 1120 // zero upper bit of the register; no need to zero-extend 1121 (MOVBUreg x) && x.Type.IsBoolean() -> (MOVDreg x) 1122 1123 // absorb flag constants into conditional instructions 1124 (CSEL {cc} x _ flag) && ccARM64Eval(cc, flag) > 0 -> x 1125 (CSEL {cc} _ y flag) && ccARM64Eval(cc, flag) < 0 -> y 1126 (CSEL0 {cc} x flag) && ccARM64Eval(cc, flag) > 0 -> x 1127 (CSEL0 {cc} _ flag) && ccARM64Eval(cc, flag) < 0 -> (MOVDconst [0]) 1128 1129 // absorb flags back into boolean CSEL 1130 (CSEL {cc} x y (CMPWconst [0] bool)) && cc.(Op) == OpARM64NotEqual && flagArg(bool) != nil -> 1131 (CSEL {bool.Op} x y flagArg(bool)) 1132 (CSEL {cc} x y (CMPWconst [0] bool)) && cc.(Op) == OpARM64Equal && flagArg(bool) != nil -> 1133 (CSEL {arm64Negate(bool.Op)} x y flagArg(bool)) 1134 (CSEL0 {cc} x (CMPWconst [0] bool)) && cc.(Op) == OpARM64NotEqual && flagArg(bool) != nil -> 1135 (CSEL0 {bool.Op} x flagArg(bool)) 1136 (CSEL0 {cc} x (CMPWconst [0] bool)) && cc.(Op) == OpARM64Equal && flagArg(bool) != nil -> 1137 (CSEL0 {arm64Negate(bool.Op)} x flagArg(bool)) 1138 1139 // absorb shifts into ops 1140 (ADD x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) -> (ADDshiftLL x0 y [c]) 1141 (ADD x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) -> (ADDshiftRL x0 y [c]) 1142 (ADD x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) -> (ADDshiftRA x0 y [c]) 1143 (SUB x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) -> (SUBshiftLL x0 y [c]) 1144 (SUB x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) -> (SUBshiftRL x0 y [c]) 1145 (SUB x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) -> (SUBshiftRA x0 y [c]) 1146 (AND x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) -> (ANDshiftLL x0 y [c]) 1147 (AND x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) -> (ANDshiftRL x0 y [c]) 1148 (AND x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) -> (ANDshiftRA x0 y [c]) 1149 (OR x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) -> (ORshiftLL x0 y [c]) // useful for combined load 1150 (OR x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) -> (ORshiftRL x0 y [c]) 1151 (OR x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) -> (ORshiftRA x0 y [c]) 1152 (XOR x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) -> (XORshiftLL x0 y [c]) 1153 (XOR x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) -> (XORshiftRL x0 y [c]) 1154 (XOR x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) -> (XORshiftRA x0 y [c]) 1155 (BIC x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) -> (BICshiftLL x0 y [c]) 1156 (BIC x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) -> (BICshiftRL x0 y [c]) 1157 (BIC x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) -> (BICshiftRA x0 y [c]) 1158 (CMP x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) -> (CMPshiftLL x0 y [c]) 1159 (CMP x0:(SLLconst [c] y) x1) && clobberIfDead(x0) -> (InvertFlags (CMPshiftLL x1 y [c])) 1160 (CMP x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) -> (CMPshiftRL x0 y [c]) 1161 (CMP x0:(SRLconst [c] y) x1) && clobberIfDead(x0) -> (InvertFlags (CMPshiftRL x1 y [c])) 1162 (CMP x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) -> (CMPshiftRA x0 y [c]) 1163 (CMP x0:(SRAconst [c] y) x1) && clobberIfDead(x0) -> (InvertFlags (CMPshiftRA x1 y [c])) 1164 1165 // prefer *const ops to *shift ops 1166 (ADDshiftLL (MOVDconst [c]) x [d]) -> (ADDconst [c] (SLLconst <x.Type> x [d])) 1167 (ADDshiftRL (MOVDconst [c]) x [d]) -> (ADDconst [c] (SRLconst <x.Type> x [d])) 1168 (ADDshiftRA (MOVDconst [c]) x [d]) -> (ADDconst [c] (SRAconst <x.Type> x [d])) 1169 (ANDshiftLL (MOVDconst [c]) x [d]) -> (ANDconst [c] (SLLconst <x.Type> x [d])) 1170 (ANDshiftRL (MOVDconst [c]) x [d]) -> (ANDconst [c] (SRLconst <x.Type> x [d])) 1171 (ANDshiftRA (MOVDconst [c]) x [d]) -> (ANDconst [c] (SRAconst <x.Type> x [d])) 1172 (ORshiftLL (MOVDconst [c]) x [d]) -> (ORconst [c] (SLLconst <x.Type> x [d])) 1173 (ORshiftRL (MOVDconst [c]) x [d]) -> (ORconst [c] (SRLconst <x.Type> x [d])) 1174 (ORshiftRA (MOVDconst [c]) x [d]) -> (ORconst [c] (SRAconst <x.Type> x [d])) 1175 (XORshiftLL (MOVDconst [c]) x [d]) -> (XORconst [c] (SLLconst <x.Type> x [d])) 1176 (XORshiftRL (MOVDconst [c]) x [d]) -> (XORconst [c] (SRLconst <x.Type> x [d])) 1177 (XORshiftRA (MOVDconst [c]) x [d]) -> (XORconst [c] (SRAconst <x.Type> x [d])) 1178 (CMPshiftLL (MOVDconst [c]) x [d]) -> (InvertFlags (CMPconst [c] (SLLconst <x.Type> x [d]))) 1179 (CMPshiftRL (MOVDconst [c]) x [d]) -> (InvertFlags (CMPconst [c] (SRLconst <x.Type> x [d]))) 1180 (CMPshiftRA (MOVDconst [c]) x [d]) -> (InvertFlags (CMPconst [c] (SRAconst <x.Type> x [d]))) 1181 1182 // constant folding in *shift ops 1183 (ADDshiftLL x (MOVDconst [c]) [d]) -> (ADDconst x [int64(uint64(c)<<uint64(d))]) 1184 (ADDshiftRL x (MOVDconst [c]) [d]) -> (ADDconst x [int64(uint64(c)>>uint64(d))]) 1185 (ADDshiftRA x (MOVDconst [c]) [d]) -> (ADDconst x [int64(int64(c)>>uint64(d))]) 1186 (SUBshiftLL x (MOVDconst [c]) [d]) -> (SUBconst x [int64(uint64(c)<<uint64(d))]) 1187 (SUBshiftRL x (MOVDconst [c]) [d]) -> (SUBconst x [int64(uint64(c)>>uint64(d))]) 1188 (SUBshiftRA x (MOVDconst [c]) [d]) -> (SUBconst x [int64(int64(c)>>uint64(d))]) 1189 (ANDshiftLL x (MOVDconst [c]) [d]) -> (ANDconst x [int64(uint64(c)<<uint64(d))]) 1190 (ANDshiftRL x (MOVDconst [c]) [d]) -> (ANDconst x [int64(uint64(c)>>uint64(d))]) 1191 (ANDshiftRA x (MOVDconst [c]) [d]) -> (ANDconst x [int64(int64(c)>>uint64(d))]) 1192 (ORshiftLL x (MOVDconst [c]) [d]) -> (ORconst x [int64(uint64(c)<<uint64(d))]) 1193 (ORshiftRL x (MOVDconst [c]) [d]) -> (ORconst x [int64(uint64(c)>>uint64(d))]) 1194 (ORshiftRA x (MOVDconst [c]) [d]) -> (ORconst x [int64(int64(c)>>uint64(d))]) 1195 (XORshiftLL x (MOVDconst [c]) [d]) -> (XORconst x [int64(uint64(c)<<uint64(d))]) 1196 (XORshiftRL x (MOVDconst [c]) [d]) -> (XORconst x [int64(uint64(c)>>uint64(d))]) 1197 (XORshiftRA x (MOVDconst [c]) [d]) -> (XORconst x [int64(int64(c)>>uint64(d))]) 1198 (BICshiftLL x (MOVDconst [c]) [d]) -> (BICconst x [int64(uint64(c)<<uint64(d))]) 1199 (BICshiftRL x (MOVDconst [c]) [d]) -> (BICconst x [int64(uint64(c)>>uint64(d))]) 1200 (BICshiftRA x (MOVDconst [c]) [d]) -> (BICconst x [int64(int64(c)>>uint64(d))]) 1201 (CMPshiftLL x (MOVDconst [c]) [d]) -> (CMPconst x [int64(uint64(c)<<uint64(d))]) 1202 (CMPshiftRL x (MOVDconst [c]) [d]) -> (CMPconst x [int64(uint64(c)>>uint64(d))]) 1203 (CMPshiftRA x (MOVDconst [c]) [d]) -> (CMPconst x [int64(int64(c)>>uint64(d))]) 1204 1205 // simplification with *shift ops 1206 (SUBshiftLL x (SLLconst x [c]) [d]) && c==d -> (MOVDconst [0]) 1207 (SUBshiftRL x (SRLconst x [c]) [d]) && c==d -> (MOVDconst [0]) 1208 (SUBshiftRA x (SRAconst x [c]) [d]) && c==d -> (MOVDconst [0]) 1209 (ANDshiftLL x y:(SLLconst x [c]) [d]) && c==d -> y 1210 (ANDshiftRL x y:(SRLconst x [c]) [d]) && c==d -> y 1211 (ANDshiftRA x y:(SRAconst x [c]) [d]) && c==d -> y 1212 (ORshiftLL x y:(SLLconst x [c]) [d]) && c==d -> y 1213 (ORshiftRL x y:(SRLconst x [c]) [d]) && c==d -> y 1214 (ORshiftRA x y:(SRAconst x [c]) [d]) && c==d -> y 1215 (XORshiftLL x (SLLconst x [c]) [d]) && c==d -> (MOVDconst [0]) 1216 (XORshiftRL x (SRLconst x [c]) [d]) && c==d -> (MOVDconst [0]) 1217 (XORshiftRA x (SRAconst x [c]) [d]) && c==d -> (MOVDconst [0]) 1218 (BICshiftLL x (SLLconst x [c]) [d]) && c==d -> (MOVDconst [0]) 1219 (BICshiftRL x (SRLconst x [c]) [d]) && c==d -> (MOVDconst [0]) 1220 (BICshiftRA x (SRAconst x [c]) [d]) && c==d -> (MOVDconst [0]) 1221 1222 // Generate rotates 1223 (ADDshiftLL [c] (SRLconst x [64-c]) x) -> (RORconst [64-c] x) 1224 ( ORshiftLL [c] (SRLconst x [64-c]) x) -> (RORconst [64-c] x) 1225 (XORshiftLL [c] (SRLconst x [64-c]) x) -> (RORconst [64-c] x) 1226 (ADDshiftRL [c] (SLLconst x [64-c]) x) -> (RORconst [ c] x) 1227 ( ORshiftRL [c] (SLLconst x [64-c]) x) -> (RORconst [ c] x) 1228 (XORshiftRL [c] (SLLconst x [64-c]) x) -> (RORconst [ c] x) 1229 1230 (ADDshiftLL <t> [c] (SRLconst (MOVWUreg x) [32-c]) x) && c < 32 && t.Size() == 4 -> (RORWconst [32-c] x) 1231 ( ORshiftLL <t> [c] (SRLconst (MOVWUreg x) [32-c]) x) && c < 32 && t.Size() == 4 -> (RORWconst [32-c] x) 1232 (XORshiftLL <t> [c] (SRLconst (MOVWUreg x) [32-c]) x) && c < 32 && t.Size() == 4 -> (RORWconst [32-c] x) 1233 (ADDshiftRL <t> [c] (SLLconst x [32-c]) (MOVWUreg x)) && c < 32 && t.Size() == 4 -> (RORWconst [ c] x) 1234 ( ORshiftRL <t> [c] (SLLconst x [32-c]) (MOVWUreg x)) && c < 32 && t.Size() == 4 -> (RORWconst [ c] x) 1235 (XORshiftRL <t> [c] (SLLconst x [32-c]) (MOVWUreg x)) && c < 32 && t.Size() == 4 -> (RORWconst [ c] x) 1236 1237 // Generic rules rewrite certain AND to a pair of shifts. 1238 // However, on ARM64 the bitmask can fit into an instruction. 1239 // Rewrite it back to AND. 1240 (SRLconst [c] (SLLconst [c] x)) && 0 < c && c < 64 -> (ANDconst [1<<uint(64-c)-1] x) // mask out high bits 1241 (SLLconst [c] (SRLconst [c] x)) && 0 < c && c < 64 -> (ANDconst [^(1<<uint(c)-1)] x) // mask out low bits 1242 1243 // do combined loads 1244 // little endian loads 1245 // b[0] | b[1]<<8 -> load 16-bit 1246 (ORshiftLL <t> [8] 1247 y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem)) 1248 y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem))) 1249 && i1 == i0+1 1250 && x0.Uses == 1 && x1.Uses == 1 1251 && y0.Uses == 1 && y1.Uses == 1 1252 && mergePoint(b,x0,x1) != nil 1253 && clobber(x0) && clobber(x1) 1254 && clobber(y0) && clobber(y1) 1255 -> @mergePoint(b,x0,x1) (MOVHUload <t> {s} (OffPtr <p.Type> [i0] p) mem) 1256 1257 // b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 -> load 32-bit 1258 (ORshiftLL <t> [24] o0:(ORshiftLL [16] 1259 x0:(MOVHUload [i0] {s} p mem) 1260 y1:(MOVDnop x1:(MOVBUload [i2] {s} p mem))) 1261 y2:(MOVDnop x2:(MOVBUload [i3] {s} p mem))) 1262 && i2 == i0+2 1263 && i3 == i0+3 1264 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 1265 && y1.Uses == 1 && y2.Uses == 1 1266 && o0.Uses == 1 1267 && mergePoint(b,x0,x1,x2) != nil 1268 && clobber(x0) && clobber(x1) && clobber(x2) 1269 && clobber(y1) && clobber(y2) 1270 && clobber(o0) 1271 -> @mergePoint(b,x0,x1,x2) (MOVWUload <t> {s} (OffPtr <p.Type> [i0] p) mem) 1272 1273 // b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 | b[4]<<32 | b[5]<<40 | b[6]<<48 | b[7]<<56 -> load 64-bit 1274 (ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] 1275 x0:(MOVWUload [i0] {s} p mem) 1276 y1:(MOVDnop x1:(MOVBUload [i4] {s} p mem))) 1277 y2:(MOVDnop x2:(MOVBUload [i5] {s} p mem))) 1278 y3:(MOVDnop x3:(MOVBUload [i6] {s} p mem))) 1279 y4:(MOVDnop x4:(MOVBUload [i7] {s} p mem))) 1280 && i4 == i0+4 1281 && i5 == i0+5 1282 && i6 == i0+6 1283 && i7 == i0+7 1284 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 1285 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 1286 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 1287 && mergePoint(b,x0,x1,x2,x3,x4) != nil 1288 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) 1289 && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) 1290 && clobber(o0) && clobber(o1) && clobber(o2) 1291 -> @mergePoint(b,x0,x1,x2,x3,x4) (MOVDload <t> {s} (OffPtr <p.Type> [i0] p) mem) 1292 1293 // b[3]<<24 | b[2]<<16 | b[1]<<8 | b[0] -> load 32-bit 1294 (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] 1295 y0:(MOVDnop x0:(MOVBUload [i3] {s} p mem))) 1296 y1:(MOVDnop x1:(MOVBUload [i2] {s} p mem))) 1297 y2:(MOVDnop x2:(MOVBUload [i1] {s} p mem))) 1298 y3:(MOVDnop x3:(MOVBUload [i0] {s} p mem))) 1299 && i1 == i0+1 1300 && i2 == i0+2 1301 && i3 == i0+3 1302 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 1303 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 1304 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 1305 && mergePoint(b,x0,x1,x2,x3) != nil 1306 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) 1307 && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) 1308 && clobber(o0) && clobber(o1) && clobber(s0) 1309 -> @mergePoint(b,x0,x1,x2,x3) (MOVWUload <t> {s} (OffPtr <p.Type> [i0] p) mem) 1310 1311 // b[7]<<56 | b[6]<<48 | b[5]<<40 | b[4]<<32 | b[3]<<24 | b[2]<<16 | b[1]<<8 | b[0] -> load 64-bit, reverse 1312 (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] 1313 y0:(MOVDnop x0:(MOVBUload [i7] {s} p mem))) 1314 y1:(MOVDnop x1:(MOVBUload [i6] {s} p mem))) 1315 y2:(MOVDnop x2:(MOVBUload [i5] {s} p mem))) 1316 y3:(MOVDnop x3:(MOVBUload [i4] {s} p mem))) 1317 y4:(MOVDnop x4:(MOVBUload [i3] {s} p mem))) 1318 y5:(MOVDnop x5:(MOVBUload [i2] {s} p mem))) 1319 y6:(MOVDnop x6:(MOVBUload [i1] {s} p mem))) 1320 y7:(MOVDnop x7:(MOVBUload [i0] {s} p mem))) 1321 && i1 == i0+1 1322 && i2 == i0+2 1323 && i3 == i0+3 1324 && i4 == i0+4 1325 && i5 == i0+5 1326 && i6 == i0+6 1327 && i7 == i0+7 1328 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 1329 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 1330 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 1331 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 1332 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 1333 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 1334 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil 1335 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) 1336 && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) 1337 && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) 1338 && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) 1339 && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) 1340 && clobber(o4) && clobber(o5) && clobber(s0) 1341 -> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV <t> (MOVDload <t> {s} (OffPtr <p.Type> [i0] p) mem)) 1342 1343 // big endian loads 1344 // b[1] | b[0]<<8 -> load 16-bit, reverse 1345 (ORshiftLL <t> [8] 1346 y0:(MOVDnop x0:(MOVBUload [i1] {s} p mem)) 1347 y1:(MOVDnop x1:(MOVBUload [i0] {s} p mem))) 1348 && i1 == i0+1 1349 && x0.Uses == 1 && x1.Uses == 1 1350 && y0.Uses == 1 && y1.Uses == 1 1351 && mergePoint(b,x0,x1) != nil 1352 && clobber(x0) && clobber(x1) 1353 && clobber(y0) && clobber(y1) 1354 -> @mergePoint(b,x0,x1) (REV16W <t> (MOVHUload <t> [i0] {s} p mem)) 1355 1356 // b[3] | b[2]<<8 | b[1]<<16 | b[0]<<24 -> load 32-bit, reverse 1357 (ORshiftLL <t> [24] o0:(ORshiftLL [16] 1358 y0:(REV16W x0:(MOVHUload [i2] {s} p mem)) 1359 y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem))) 1360 y2:(MOVDnop x2:(MOVBUload [i0] {s} p mem))) 1361 && i1 == i0+1 1362 && i2 == i0+2 1363 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 1364 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 1365 && o0.Uses == 1 1366 && mergePoint(b,x0,x1,x2) != nil 1367 && clobber(x0) && clobber(x1) && clobber(x2) 1368 && clobber(y0) && clobber(y1) && clobber(y2) 1369 && clobber(o0) 1370 -> @mergePoint(b,x0,x1,x2) (REVW <t> (MOVWUload <t> {s} (OffPtr <p.Type> [i0] p) mem)) 1371 1372 // b[7] | b[6]<<8 | b[5]<<16 | b[4]<<24 | b[3]<<32 | b[2]<<40 | b[1]<<48 | b[0]<<56 -> load 64-bit, reverse 1373 (ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] 1374 y0:(REVW x0:(MOVWUload [i4] {s} p mem)) 1375 y1:(MOVDnop x1:(MOVBUload [i3] {s} p mem))) 1376 y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem))) 1377 y3:(MOVDnop x3:(MOVBUload [i1] {s} p mem))) 1378 y4:(MOVDnop x4:(MOVBUload [i0] {s} p mem))) 1379 && i1 == i0+1 1380 && i2 == i0+2 1381 && i3 == i0+3 1382 && i4 == i0+4 1383 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 1384 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 1385 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 1386 && mergePoint(b,x0,x1,x2,x3,x4) != nil 1387 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) 1388 && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) 1389 && clobber(o0) && clobber(o1) && clobber(o2) 1390 -> @mergePoint(b,x0,x1,x2,x3,x4) (REV <t> (MOVDload <t> {s} (OffPtr <p.Type> [i0] p) mem)) 1391 1392 // b[0]<<24 | b[1]<<16 | b[2]<<8 | b[3] -> load 32-bit, reverse 1393 (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] 1394 y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem))) 1395 y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem))) 1396 y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem))) 1397 y3:(MOVDnop x3:(MOVBUload [i3] {s} p mem))) 1398 && i1 == i0+1 1399 && i2 == i0+2 1400 && i3 == i0+3 1401 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 1402 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 1403 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 1404 && mergePoint(b,x0,x1,x2,x3) != nil 1405 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) 1406 && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) 1407 && clobber(o0) && clobber(o1) && clobber(s0) 1408 -> @mergePoint(b,x0,x1,x2,x3) (REVW <t> (MOVWUload <t> {s} (OffPtr <p.Type> [i0] p) mem)) 1409 1410 // b[0]<<56 | b[1]<<48 | b[2]<<40 | b[3]<<32 | b[4]<<24 | b[5]<<16 | b[6]<<8 | b[7] -> load 64-bit, reverse 1411 (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] 1412 y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem))) 1413 y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem))) 1414 y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem))) 1415 y3:(MOVDnop x3:(MOVBUload [i3] {s} p mem))) 1416 y4:(MOVDnop x4:(MOVBUload [i4] {s} p mem))) 1417 y5:(MOVDnop x5:(MOVBUload [i5] {s} p mem))) 1418 y6:(MOVDnop x6:(MOVBUload [i6] {s} p mem))) 1419 y7:(MOVDnop x7:(MOVBUload [i7] {s} p mem))) 1420 && i1 == i0+1 1421 && i2 == i0+2 1422 && i3 == i0+3 1423 && i4 == i0+4 1424 && i5 == i0+5 1425 && i6 == i0+6 1426 && i7 == i0+7 1427 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 1428 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 1429 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 1430 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 1431 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 1432 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 1433 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil 1434 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) 1435 && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) 1436 && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) 1437 && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) 1438 && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) 1439 && clobber(o4) && clobber(o5) && clobber(s0) 1440 -> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV <t> (MOVDload <t> {s} (OffPtr <p.Type> [i0] p) mem)) 1441 1442 // FP simplification 1443 (FNEGS (FMULS x y)) -> (FNMULS x y) 1444 (FNEGD (FMULD x y)) -> (FNMULD x y) 1445 (FMULS (FNEGS x) y) -> (FNMULS x y) 1446 (FMULD (FNEGD x) y) -> (FNMULD x y) 1447 (FNEGS (FNMULS x y)) -> (FMULS x y) 1448 (FNEGD (FNMULD x y)) -> (FMULD x y) 1449 (FNMULS (FNEGS x) y) -> (FMULS x y) 1450 (FNMULD (FNEGD x) y) -> (FMULD x y) 1451 (FADDS a (FMULS x y)) -> (FMADDS a x y) 1452 (FADDD a (FMULD x y)) -> (FMADDD a x y) 1453 (FSUBS a (FMULS x y)) -> (FMSUBS a x y) 1454 (FSUBD a (FMULD x y)) -> (FMSUBD a x y) 1455 (FSUBS (FMULS x y) a) -> (FNMSUBS a x y) 1456 (FSUBD (FMULD x y) a) -> (FNMSUBD a x y) 1457 (FADDS a (FNMULS x y)) -> (FMSUBS a x y) 1458 (FADDD a (FNMULD x y)) -> (FMSUBD a x y) 1459 (FSUBS a (FNMULS x y)) -> (FMADDS a x y) 1460 (FSUBD a (FNMULD x y)) -> (FMADDD a x y) 1461 (FSUBS (FNMULS x y) a) -> (FNMADDS a x y) 1462 (FSUBD (FNMULD x y) a) -> (FNMADDD a x y)