github.com/hikaru7719/go@v0.0.0-20181025140707-c8b2ac68906a/src/cmd/compile/internal/ssa/gen/PPC64.rules (about) 1 // Copyright 2016 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Lowering arithmetic 6 (Add(Ptr|64|32|16|8) x y) -> (ADD x y) 7 (Add64F x y) -> (FADD x y) 8 (Add32F x y) -> (FADDS x y) 9 10 (Sub(Ptr|64|32|16|8) x y) -> (SUB x y) 11 (Sub32F x y) -> (FSUBS x y) 12 (Sub64F x y) -> (FSUB x y) 13 14 (Mod16 x y) -> (Mod32 (SignExt16to32 x) (SignExt16to32 y)) 15 (Mod16u x y) -> (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y)) 16 (Mod8 x y) -> (Mod32 (SignExt8to32 x) (SignExt8to32 y)) 17 (Mod8u x y) -> (Mod32u (ZeroExt8to32 x) (ZeroExt8to32 y)) 18 (Mod64 x y) -> (SUB x (MULLD y (DIVD x y))) 19 (Mod64u x y) -> (SUB x (MULLD y (DIVDU x y))) 20 (Mod32 x y) -> (SUB x (MULLW y (DIVW x y))) 21 (Mod32u x y) -> (SUB x (MULLW y (DIVWU x y))) 22 23 // (x + y) / 2 with x>=y -> (x - y) / 2 + y 24 (Avg64u <t> x y) -> (ADD (SRDconst <t> (SUB <t> x y) [1]) y) 25 26 (Mul64 x y) -> (MULLD x y) 27 (Mul(32|16|8) x y) -> (MULLW x y) 28 (Mul64uhilo x y) -> (LoweredMuluhilo x y) 29 30 (Div64 x y) -> (DIVD x y) 31 (Div64u x y) -> (DIVDU x y) 32 (Div32 x y) -> (DIVW x y) 33 (Div32u x y) -> (DIVWU x y) 34 (Div16 x y) -> (DIVW (SignExt16to32 x) (SignExt16to32 y)) 35 (Div16u x y) -> (DIVWU (ZeroExt16to32 x) (ZeroExt16to32 y)) 36 (Div8 x y) -> (DIVW (SignExt8to32 x) (SignExt8to32 y)) 37 (Div8u x y) -> (DIVWU (ZeroExt8to32 x) (ZeroExt8to32 y)) 38 39 (Hmul(64|64u|32|32u) x y) -> (MULH(D|DU|W|WU) x y) 40 41 (Mul32F x y) -> (FMULS x y) 42 (Mul64F x y) -> (FMUL x y) 43 44 (Div32F x y) -> (FDIVS x y) 45 (Div64F x y) -> (FDIV x y) 46 47 // Lowering float <-> int 48 (Cvt32to32F x) -> (FCFIDS (MTVSRD (SignExt32to64 x))) 49 (Cvt32to64F x) -> (FCFID (MTVSRD (SignExt32to64 x))) 50 (Cvt64to32F x) -> (FCFIDS (MTVSRD x)) 51 (Cvt64to64F x) -> (FCFID (MTVSRD x)) 52 53 (Cvt32Fto32 x) -> (MFVSRD (FCTIWZ x)) 54 (Cvt32Fto64 x) -> (MFVSRD (FCTIDZ x)) 55 (Cvt64Fto32 x) -> (MFVSRD (FCTIWZ x)) 56 (Cvt64Fto64 x) -> (MFVSRD (FCTIDZ x)) 57 58 (Cvt32Fto64F x) -> x // Note x will have the wrong type for patterns dependent on Float32/Float64 59 (Cvt64Fto32F x) -> (FRSP x) 60 61 (Round(32|64)F x) -> (LoweredRound(32|64)F x) 62 63 (Sqrt x) -> (FSQRT x) 64 (Floor x) -> (FFLOOR x) 65 (Ceil x) -> (FCEIL x) 66 (Trunc x) -> (FTRUNC x) 67 (Round x) -> (FROUND x) 68 (Copysign x y) -> (FCPSGN y x) 69 (Abs x) -> (FABS x) 70 71 // Lowering constants 72 (Const(64|32|16|8) [val]) -> (MOVDconst [val]) 73 (Const(32|64)F [val]) -> (FMOV(S|D)const [val]) 74 (ConstNil) -> (MOVDconst [0]) 75 (ConstBool [b]) -> (MOVDconst [b]) 76 77 // Constant folding 78 (FABS (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Abs(auxTo64F(x)))]) 79 (FSQRT (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Sqrt(auxTo64F(x)))]) 80 (FFLOOR (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Floor(auxTo64F(x)))]) 81 (FCEIL (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Ceil(auxTo64F(x)))]) 82 (FTRUNC (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Trunc(auxTo64F(x)))]) 83 84 // Rotate generation with const shift 85 (ADD (SLDconst x [c]) (SRDconst x [d])) && d == 64-c -> (ROTLconst [c] x) 86 ( OR (SLDconst x [c]) (SRDconst x [d])) && d == 64-c -> (ROTLconst [c] x) 87 (XOR (SLDconst x [c]) (SRDconst x [d])) && d == 64-c -> (ROTLconst [c] x) 88 89 (ADD (SLWconst x [c]) (SRWconst x [d])) && d == 32-c -> (ROTLWconst [c] x) 90 ( OR (SLWconst x [c]) (SRWconst x [d])) && d == 32-c -> (ROTLWconst [c] x) 91 (XOR (SLWconst x [c]) (SRWconst x [d])) && d == 32-c -> (ROTLWconst [c] x) 92 93 // Rotate generation with non-const shift 94 // these match patterns from math/bits/RotateLeft[32|64], but there could be others 95 (ADD (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))) -> (ROTL x y) 96 ( OR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))) -> (ROTL x y) 97 (XOR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))) -> (ROTL x y) 98 99 (ADD (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))) -> (ROTLW x y) 100 ( OR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))) -> (ROTLW x y) 101 (XOR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))) -> (ROTLW x y) 102 103 (Lsh64x64 x (Const64 [c])) && uint64(c) < 64 -> (SLDconst x [c]) 104 (Rsh64x64 x (Const64 [c])) && uint64(c) < 64 -> (SRADconst x [c]) 105 (Rsh64Ux64 x (Const64 [c])) && uint64(c) < 64 -> (SRDconst x [c]) 106 (Lsh32x64 x (Const64 [c])) && uint64(c) < 32 -> (SLWconst x [c]) 107 (Rsh32x64 x (Const64 [c])) && uint64(c) < 32 -> (SRAWconst x [c]) 108 (Rsh32Ux64 x (Const64 [c])) && uint64(c) < 32 -> (SRWconst x [c]) 109 (Lsh16x64 x (Const64 [c])) && uint64(c) < 16 -> (SLWconst x [c]) 110 (Rsh16x64 x (Const64 [c])) && uint64(c) < 16 -> (SRAWconst (SignExt16to32 x) [c]) 111 (Rsh16Ux64 x (Const64 [c])) && uint64(c) < 16 -> (SRWconst (ZeroExt16to32 x) [c]) 112 (Lsh8x64 x (Const64 [c])) && uint64(c) < 8 -> (SLWconst x [c]) 113 (Rsh8x64 x (Const64 [c])) && uint64(c) < 8 -> (SRAWconst (SignExt8to32 x) [c]) 114 (Rsh8Ux64 x (Const64 [c])) && uint64(c) < 8 -> (SRWconst (ZeroExt8to32 x) [c]) 115 116 (Lsh64x32 x (Const64 [c])) && uint32(c) < 64 -> (SLDconst x [c]) 117 (Rsh64x32 x (Const64 [c])) && uint32(c) < 64 -> (SRADconst x [c]) 118 (Rsh64Ux32 x (Const64 [c])) && uint32(c) < 64 -> (SRDconst x [c]) 119 (Lsh32x32 x (Const64 [c])) && uint32(c) < 32 -> (SLWconst x [c]) 120 (Rsh32x32 x (Const64 [c])) && uint32(c) < 32 -> (SRAWconst x [c]) 121 (Rsh32Ux32 x (Const64 [c])) && uint32(c) < 32 -> (SRWconst x [c]) 122 (Lsh16x32 x (Const64 [c])) && uint32(c) < 16 -> (SLWconst x [c]) 123 (Rsh16x32 x (Const64 [c])) && uint32(c) < 16 -> (SRAWconst (SignExt16to32 x) [c]) 124 (Rsh16Ux32 x (Const64 [c])) && uint32(c) < 16 -> (SRWconst (ZeroExt16to32 x) [c]) 125 (Lsh8x32 x (Const64 [c])) && uint32(c) < 8 -> (SLWconst x [c]) 126 (Rsh8x32 x (Const64 [c])) && uint32(c) < 8 -> (SRAWconst (SignExt8to32 x) [c]) 127 (Rsh8Ux32 x (Const64 [c])) && uint32(c) < 8 -> (SRWconst (ZeroExt8to32 x) [c]) 128 129 // large constant shifts 130 (Lsh64x64 _ (Const64 [c])) && uint64(c) >= 64 -> (MOVDconst [0]) 131 (Rsh64Ux64 _ (Const64 [c])) && uint64(c) >= 64 -> (MOVDconst [0]) 132 (Lsh32x64 _ (Const64 [c])) && uint64(c) >= 32 -> (MOVDconst [0]) 133 (Rsh32Ux64 _ (Const64 [c])) && uint64(c) >= 32 -> (MOVDconst [0]) 134 (Lsh16x64 _ (Const64 [c])) && uint64(c) >= 16 -> (MOVDconst [0]) 135 (Rsh16Ux64 _ (Const64 [c])) && uint64(c) >= 16 -> (MOVDconst [0]) 136 (Lsh8x64 _ (Const64 [c])) && uint64(c) >= 8 -> (MOVDconst [0]) 137 (Rsh8Ux64 _ (Const64 [c])) && uint64(c) >= 8 -> (MOVDconst [0]) 138 139 // large constant signed right shift, we leave the sign bit 140 (Rsh64x64 x (Const64 [c])) && uint64(c) >= 64 -> (SRADconst x [63]) 141 (Rsh32x64 x (Const64 [c])) && uint64(c) >= 32 -> (SRAWconst x [63]) 142 (Rsh16x64 x (Const64 [c])) && uint64(c) >= 16 -> (SRAWconst (SignExt16to32 x) [63]) 143 (Rsh8x64 x (Const64 [c])) && uint64(c) >= 8 -> (SRAWconst (SignExt8to32 x) [63]) 144 145 // constant shifts 146 (Lsh64x64 x (MOVDconst [c])) && uint64(c) < 64 -> (SLDconst x [c]) 147 (Rsh64x64 x (MOVDconst [c])) && uint64(c) < 64 -> (SRADconst x [c]) 148 (Rsh64Ux64 x (MOVDconst [c])) && uint64(c) < 64 -> (SRDconst x [c]) 149 (Lsh32x64 x (MOVDconst [c])) && uint64(c) < 32 -> (SLWconst x [c]) 150 (Rsh32x64 x (MOVDconst [c])) && uint64(c) < 32 -> (SRAWconst x [c]) 151 (Rsh32Ux64 x (MOVDconst [c])) && uint64(c) < 32 -> (SRWconst x [c]) 152 (Lsh16x64 x (MOVDconst [c])) && uint64(c) < 16 -> (SLWconst x [c]) 153 (Rsh16x64 x (MOVDconst [c])) && uint64(c) < 16 -> (SRAWconst (SignExt16to32 x) [c]) 154 (Rsh16Ux64 x (MOVDconst [c])) && uint64(c) < 16 -> (SRWconst (ZeroExt16to32 x) [c]) 155 (Lsh8x64 x (MOVDconst [c])) && uint64(c) < 8 -> (SLWconst x [c]) 156 (Rsh8x64 x (MOVDconst [c])) && uint64(c) < 8 -> (SRAWconst (SignExt8to32 x) [c]) 157 (Rsh8Ux64 x (MOVDconst [c])) && uint64(c) < 8 -> (SRWconst (ZeroExt8to32 x) [c]) 158 159 (Lsh64x32 x (MOVDconst [c])) && uint32(c) < 64 -> (SLDconst x [c]) 160 (Rsh64x32 x (MOVDconst [c])) && uint32(c) < 64 -> (SRADconst x [c]) 161 (Rsh64Ux32 x (MOVDconst [c])) && uint32(c) < 64 -> (SRDconst x [c]) 162 (Lsh32x32 x (MOVDconst [c])) && uint32(c) < 32 -> (SLWconst x [c]) 163 (Rsh32x32 x (MOVDconst [c])) && uint32(c) < 32 -> (SRAWconst x [c]) 164 (Rsh32Ux32 x (MOVDconst [c])) && uint32(c) < 32 -> (SRWconst x [c]) 165 (Lsh16x32 x (MOVDconst [c])) && uint32(c) < 16 -> (SLWconst x [c]) 166 (Rsh16x32 x (MOVDconst [c])) && uint32(c) < 16 -> (SRAWconst (SignExt16to32 x) [c]) 167 (Rsh16Ux32 x (MOVDconst [c])) && uint32(c) < 16 -> (SRWconst (ZeroExt16to32 x) [c]) 168 (Lsh8x32 x (MOVDconst [c])) && uint32(c) < 8 -> (SLWconst x [c]) 169 (Rsh8x32 x (MOVDconst [c])) && uint32(c) < 8 -> (SRAWconst (SignExt8to32 x) [c]) 170 (Rsh8Ux32 x (MOVDconst [c])) && uint32(c) < 8 -> (SRWconst (ZeroExt8to32 x) [c]) 171 172 // Lower bounded shifts first. No need to check shift value. 173 (Lsh64x(64|32|16|8) x y) && shiftIsBounded(v) -> (SLD x y) 174 (Lsh32x(64|32|16|8) x y) && shiftIsBounded(v) -> (SLW x y) 175 (Lsh16x(64|32|16|8) x y) && shiftIsBounded(v) -> (SLW x y) 176 (Lsh8x(64|32|16|8) x y) && shiftIsBounded(v) -> (SLW x y) 177 (Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) -> (SRD x y) 178 (Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) -> (SRW x y) 179 (Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) -> (SRW (MOVHZreg x) y) 180 (Rsh8Ux(64|32|16|8) x y) && shiftIsBounded(v) -> (SRW (MOVBZreg x) y) 181 (Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) -> (SRAD x y) 182 (Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) -> (SRAW x y) 183 (Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) -> (SRAW (MOVHreg x) y) 184 (Rsh8x(64|32|16|8) x y) && shiftIsBounded(v) -> (SRAW (MOVBreg x) y) 185 186 // non-constant rotates 187 // These are subexpressions found in statements that can become rotates 188 // In these cases the shift count is known to be < 64 so the more complicated expressions 189 // with Mask & Carry is not needed 190 (Lsh64x64 x (AND y (MOVDconst [63]))) -> (SLD x (ANDconst <typ.Int64> [63] y)) 191 (Lsh64x64 x (ANDconst <typ.Int64> [63] y)) -> (SLD x (ANDconst <typ.Int64> [63] y)) 192 (Rsh64Ux64 x (AND y (MOVDconst [63]))) -> (SRD x (ANDconst <typ.Int64> [63] y)) 193 (Rsh64Ux64 x (ANDconst <typ.UInt> [63] y)) -> (SRD x (ANDconst <typ.UInt> [63] y)) 194 (Rsh64Ux64 x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y))) -> (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y))) 195 (Rsh64Ux64 x (SUB <typ.UInt> (MOVDconst [64]) (AND <typ.UInt> y (MOVDconst [63])))) -> (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y))) 196 (Rsh64x64 x (AND y (MOVDconst [63]))) -> (SRAD x (ANDconst <typ.Int64> [63] y)) 197 (Rsh64x64 x (ANDconst <typ.UInt> [63] y)) -> (SRAD x (ANDconst <typ.UInt> [63] y)) 198 (Rsh64x64 x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y))) -> (SRAD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y))) 199 (Rsh64x64 x (SUB <typ.UInt> (MOVDconst [64]) (AND <typ.UInt> y (MOVDconst [63])))) -> (SRAD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y))) 200 201 (Rsh64x64 x y) -> (SRAD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] y)))) 202 (Rsh64Ux64 x y) -> (SRD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] y)))) 203 (Lsh64x64 x y) -> (SLD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] y)))) 204 205 (Lsh32x64 x (AND y (MOVDconst [31]))) -> (SLW x (ANDconst <typ.Int32> [31] y)) 206 (Lsh32x64 x (ANDconst <typ.Int32> [31] y)) -> (SLW x (ANDconst <typ.Int32> [31] y)) 207 208 (Rsh32Ux64 x (AND y (MOVDconst [31]))) -> (SRW x (ANDconst <typ.Int32> [31] y)) 209 (Rsh32Ux64 x (ANDconst <typ.UInt> [31] y)) -> (SRW x (ANDconst <typ.UInt> [31] y)) 210 (Rsh32Ux64 x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))) -> (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))) 211 (Rsh32Ux64 x (SUB <typ.UInt> (MOVDconst [32]) (AND <typ.UInt> y (MOVDconst [31])))) -> (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))) 212 213 (Rsh32x64 x (AND y (MOVDconst [31]))) -> (SRAW x (ANDconst <typ.Int32> [31] y)) 214 (Rsh32x64 x (ANDconst <typ.UInt> [31] y)) -> (SRAW x (ANDconst <typ.UInt> [31] y)) 215 (Rsh32x64 x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))) -> (SRAW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))) 216 (Rsh32x64 x (SUB <typ.UInt> (MOVDconst [32]) (AND <typ.UInt> y (MOVDconst [31])))) -> (SRAW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))) 217 218 (Rsh32x64 x y) -> (SRAW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] y)))) 219 (Rsh32Ux64 x y) -> (SRW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] y)))) 220 (Lsh32x64 x y) -> (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] y)))) 221 222 (Rsh16x64 x y) -> (SRAW (SignExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] y)))) 223 (Rsh16Ux64 x y) -> (SRW (ZeroExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] y)))) 224 (Lsh16x64 x y) -> (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] y)))) 225 226 (Rsh8x64 x y) -> (SRAW (SignExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] y)))) 227 (Rsh8Ux64 x y) -> (SRW (ZeroExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] y)))) 228 (Lsh8x64 x y) -> (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] y)))) 229 230 (Rsh64x32 x y) -> (SRAD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y))))) 231 (Rsh64Ux32 x y) -> (SRD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y))))) 232 (Lsh64x32 x y) -> (SLD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y))))) 233 234 (Rsh32x32 x y) -> (SRAW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y))))) 235 (Rsh32Ux32 x y) -> (SRW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y))))) 236 (Lsh32x32 x y) -> (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y))))) 237 238 (Rsh16x32 x y) -> (SRAW (SignExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y))))) 239 (Rsh16Ux32 x y) -> (SRW (ZeroExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y))))) 240 (Lsh16x32 x y) -> (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y))))) 241 242 (Rsh8x32 x y) -> (SRAW (SignExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y))))) 243 (Rsh8Ux32 x y) -> (SRW (ZeroExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y))))) 244 (Lsh8x32 x y) -> (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y))))) 245 246 247 (Rsh64x16 x y) -> (SRAD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y))))) 248 (Rsh64Ux16 x y) -> (SRD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y))))) 249 (Lsh64x16 x y) -> (SLD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y))))) 250 251 (Rsh32x16 x y) -> (SRAW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y))))) 252 (Rsh32Ux16 x y) -> (SRW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y))))) 253 (Lsh32x16 x y) -> (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y))))) 254 255 (Rsh16x16 x y) -> (SRAW (SignExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y))))) 256 (Rsh16Ux16 x y) -> (SRW (ZeroExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y))))) 257 (Lsh16x16 x y) -> (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y))))) 258 259 (Rsh8x16 x y) -> (SRAW (SignExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y))))) 260 (Rsh8Ux16 x y) -> (SRW (ZeroExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y))))) 261 (Lsh8x16 x y) -> (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y))))) 262 263 264 (Rsh64x8 x y) -> (SRAD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y))))) 265 (Rsh64Ux8 x y) -> (SRD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y))))) 266 (Lsh64x8 x y) -> (SLD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y))))) 267 268 (Rsh32x8 x y) -> (SRAW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y))))) 269 (Rsh32Ux8 x y) -> (SRW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y))))) 270 (Lsh32x8 x y) -> (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y))))) 271 272 (Rsh16x8 x y) -> (SRAW (SignExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y))))) 273 (Rsh16Ux8 x y) -> (SRW (ZeroExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y))))) 274 (Lsh16x8 x y) -> (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y))))) 275 276 (Rsh8x8 x y) -> (SRAW (SignExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y))))) 277 (Rsh8Ux8 x y) -> (SRW (ZeroExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y))))) 278 (Lsh8x8 x y) -> (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y))))) 279 280 // Cleaning up shift ops when input is masked 281 (MaskIfNotCarry (ADDconstForCarry [c] (ANDconst [d] _))) && c < 0 && d > 0 && c + d < 0 -> (MOVDconst [-1]) 282 (ORN x (MOVDconst [-1])) -> x 283 284 // Potentially useful optimizing rewrites. 285 // (ADDconstForCarry [k] c), k < 0 && (c < 0 || k+c >= 0) -> CarrySet 286 // (ADDconstForCarry [k] c), K < 0 && (c >= 0 && k+c < 0) -> CarryClear 287 // (MaskIfNotCarry CarrySet) -> 0 288 // (MaskIfNotCarry CarrySet) -> -1 289 290 (Addr {sym} base) -> (MOVDaddr {sym} base) 291 (LocalAddr {sym} base _) -> (MOVDaddr {sym} base) 292 (OffPtr [off] ptr) -> (ADD (MOVDconst <typ.Int64> [off]) ptr) 293 294 // TODO: optimize these cases? 295 (Ctz32NonZero x) -> (Ctz32 x) 296 (Ctz64NonZero x) -> (Ctz64 x) 297 298 (Ctz64 x) -> (POPCNTD (ANDN <typ.Int64> (ADDconst <typ.Int64> [-1] x) x)) 299 (Ctz32 x) -> (POPCNTW (MOVWZreg (ANDN <typ.Int> (ADDconst <typ.Int> [-1] x) x))) 300 (Ctz16 x) -> (POPCNTW (MOVHZreg (ANDN <typ.Int16> (ADDconst <typ.Int16> [-1] x) x))) 301 (Ctz8 x) -> (POPCNTB (MOVBZreg (ANDN <typ.UInt8> (ADDconst <typ.UInt8> [-1] x) x))) 302 303 (BitLen64 x) -> (SUB (MOVDconst [64]) (CNTLZD <typ.Int> x)) 304 (BitLen32 x) -> (SUB (MOVDconst [32]) (CNTLZW <typ.Int> x)) 305 306 (PopCount64 x) -> (POPCNTD x) 307 (PopCount32 x) -> (POPCNTW (MOVWZreg x)) 308 (PopCount16 x) -> (POPCNTW (MOVHZreg x)) 309 (PopCount8 x) -> (POPCNTB (MOVBZreg x)) 310 311 (And(64|32|16|8) x y) -> (AND x y) 312 (Or(64|32|16|8) x y) -> (OR x y) 313 (Xor(64|32|16|8) x y) -> (XOR x y) 314 315 (Neg(64|32|16|8) x) -> (NEG x) 316 (Neg64F x) -> (FNEG x) 317 (Neg32F x) -> (FNEG x) 318 319 (Com(64|32|16|8) x) -> (NOR x x) 320 321 // Lowering boolean ops 322 (AndB x y) -> (AND x y) 323 (OrB x y) -> (OR x y) 324 (Not x) -> (XORconst [1] x) 325 326 // Use ANDN for AND x NOT y 327 (AND x (NOR y y)) -> (ANDN x y) 328 329 // Lowering comparisons 330 (EqB x y) -> (ANDconst [1] (EQV x y)) 331 // Sign extension dependence on operand sign sets up for sign/zero-extension elision later 332 (Eq8 x y) && isSigned(x.Type) && isSigned(y.Type) -> (Equal (CMPW (SignExt8to32 x) (SignExt8to32 y))) 333 (Eq16 x y) && isSigned(x.Type) && isSigned(y.Type) -> (Equal (CMPW (SignExt16to32 x) (SignExt16to32 y))) 334 (Eq8 x y) -> (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y))) 335 (Eq16 x y) -> (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y))) 336 (Eq32 x y) -> (Equal (CMPW x y)) 337 (Eq64 x y) -> (Equal (CMP x y)) 338 (Eq32F x y) -> (Equal (FCMPU x y)) 339 (Eq64F x y) -> (Equal (FCMPU x y)) 340 (EqPtr x y) -> (Equal (CMP x y)) 341 342 (NeqB x y) -> (XOR x y) 343 // Like Eq8 and Eq16, prefer sign extension likely to enable later elision. 344 (Neq8 x y) && isSigned(x.Type) && isSigned(y.Type) -> (NotEqual (CMPW (SignExt8to32 x) (SignExt8to32 y))) 345 (Neq16 x y) && isSigned(x.Type) && isSigned(y.Type) -> (NotEqual (CMPW (SignExt16to32 x) (SignExt16to32 y))) 346 (Neq8 x y) -> (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y))) 347 (Neq16 x y) -> (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y))) 348 (Neq32 x y) -> (NotEqual (CMPW x y)) 349 (Neq64 x y) -> (NotEqual (CMP x y)) 350 (Neq32F x y) -> (NotEqual (FCMPU x y)) 351 (Neq64F x y) -> (NotEqual (FCMPU x y)) 352 (NeqPtr x y) -> (NotEqual (CMP x y)) 353 354 (Less8 x y) -> (LessThan (CMPW (SignExt8to32 x) (SignExt8to32 y))) 355 (Less16 x y) -> (LessThan (CMPW (SignExt16to32 x) (SignExt16to32 y))) 356 (Less32 x y) -> (LessThan (CMPW x y)) 357 (Less64 x y) -> (LessThan (CMP x y)) 358 (Less32F x y) -> (FLessThan (FCMPU x y)) 359 (Less64F x y) -> (FLessThan (FCMPU x y)) 360 361 (Less8U x y) -> (LessThan (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y))) 362 (Less16U x y) -> (LessThan (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y))) 363 (Less32U x y) -> (LessThan (CMPWU x y)) 364 (Less64U x y) -> (LessThan (CMPU x y)) 365 366 (Leq8 x y) -> (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y))) 367 (Leq16 x y) -> (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y))) 368 (Leq32 x y) -> (LessEqual (CMPW x y)) 369 (Leq64 x y) -> (LessEqual (CMP x y)) 370 (Leq32F x y) -> (FLessEqual (FCMPU x y)) 371 (Leq64F x y) -> (FLessEqual (FCMPU x y)) 372 373 (Leq8U x y) -> (LessEqual (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y))) 374 (Leq16U x y) -> (LessEqual (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y))) 375 (Leq32U x y) -> (LessEqual (CMPWU x y)) 376 (Leq64U x y) -> (LessEqual (CMPU x y)) 377 378 (Greater8 x y) -> (GreaterThan (CMPW (SignExt8to32 x) (SignExt8to32 y))) 379 (Greater16 x y) -> (GreaterThan (CMPW (SignExt16to32 x) (SignExt16to32 y))) 380 (Greater32 x y) -> (GreaterThan (CMPW x y)) 381 (Greater64 x y) -> (GreaterThan (CMP x y)) 382 (Greater(32|64)F x y) -> (FGreaterThan (FCMPU x y)) 383 384 (Greater8U x y) -> (GreaterThan (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y))) 385 (Greater16U x y) -> (GreaterThan (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y))) 386 (Greater32U x y) -> (GreaterThan (CMPWU x y)) 387 (Greater64U x y) -> (GreaterThan (CMPU x y)) 388 389 (Geq8 x y) -> (GreaterEqual (CMPW (SignExt8to32 x) (SignExt8to32 y))) 390 (Geq16 x y) -> (GreaterEqual (CMPW (SignExt16to32 x) (SignExt16to32 y))) 391 (Geq32 x y) -> (GreaterEqual (CMPW x y)) 392 (Geq64 x y) -> (GreaterEqual (CMP x y)) 393 (Geq(32|64)F x y) -> (FGreaterEqual (FCMPU x y)) 394 395 (Geq8U x y) -> (GreaterEqual (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y))) 396 (Geq16U x y) -> (GreaterEqual (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y))) 397 (Geq32U x y) -> (GreaterEqual (CMPWU x y)) 398 (Geq64U x y) -> (GreaterEqual (CMPU x y)) 399 400 // Absorb pseudo-ops into blocks. 401 (If (Equal cc) yes no) -> (EQ cc yes no) 402 (If (NotEqual cc) yes no) -> (NE cc yes no) 403 (If (LessThan cc) yes no) -> (LT cc yes no) 404 (If (LessEqual cc) yes no) -> (LE cc yes no) 405 (If (GreaterThan cc) yes no) -> (GT cc yes no) 406 (If (GreaterEqual cc) yes no) -> (GE cc yes no) 407 (If (FLessThan cc) yes no) -> (FLT cc yes no) 408 (If (FLessEqual cc) yes no) -> (FLE cc yes no) 409 (If (FGreaterThan cc) yes no) -> (FGT cc yes no) 410 (If (FGreaterEqual cc) yes no) -> (FGE cc yes no) 411 412 (If cond yes no) -> (NE (CMPWconst [0] cond) yes no) 413 414 // Absorb boolean tests into block 415 (NE (CMPWconst [0] (Equal cc)) yes no) -> (EQ cc yes no) 416 (NE (CMPWconst [0] (NotEqual cc)) yes no) -> (NE cc yes no) 417 (NE (CMPWconst [0] (LessThan cc)) yes no) -> (LT cc yes no) 418 (NE (CMPWconst [0] (LessEqual cc)) yes no) -> (LE cc yes no) 419 (NE (CMPWconst [0] (GreaterThan cc)) yes no) -> (GT cc yes no) 420 (NE (CMPWconst [0] (GreaterEqual cc)) yes no) -> (GE cc yes no) 421 (NE (CMPWconst [0] (FLessThan cc)) yes no) -> (FLT cc yes no) 422 (NE (CMPWconst [0] (FLessEqual cc)) yes no) -> (FLE cc yes no) 423 (NE (CMPWconst [0] (FGreaterThan cc)) yes no) -> (FGT cc yes no) 424 (NE (CMPWconst [0] (FGreaterEqual cc)) yes no) -> (FGE cc yes no) 425 426 // Elide compares of bit tests // TODO need to make both CC and result of ANDCC available. 427 (EQ (CMPconst [0] (ANDconst [c] x)) yes no) -> (EQ (ANDCCconst [c] x) yes no) 428 (NE (CMPconst [0] (ANDconst [c] x)) yes no) -> (NE (ANDCCconst [c] x) yes no) 429 (EQ (CMPWconst [0] (ANDconst [c] x)) yes no) -> (EQ (ANDCCconst [c] x) yes no) 430 (NE (CMPWconst [0] (ANDconst [c] x)) yes no) -> (NE (ANDCCconst [c] x) yes no) 431 432 // absorb flag constants into branches 433 (EQ (FlagEQ) yes no) -> (First nil yes no) 434 (EQ (FlagLT) yes no) -> (First nil no yes) 435 (EQ (FlagGT) yes no) -> (First nil no yes) 436 437 (NE (FlagEQ) yes no) -> (First nil no yes) 438 (NE (FlagLT) yes no) -> (First nil yes no) 439 (NE (FlagGT) yes no) -> (First nil yes no) 440 441 (LT (FlagEQ) yes no) -> (First nil no yes) 442 (LT (FlagLT) yes no) -> (First nil yes no) 443 (LT (FlagGT) yes no) -> (First nil no yes) 444 445 (LE (FlagEQ) yes no) -> (First nil yes no) 446 (LE (FlagLT) yes no) -> (First nil yes no) 447 (LE (FlagGT) yes no) -> (First nil no yes) 448 449 (GT (FlagEQ) yes no) -> (First nil no yes) 450 (GT (FlagLT) yes no) -> (First nil no yes) 451 (GT (FlagGT) yes no) -> (First nil yes no) 452 453 (GE (FlagEQ) yes no) -> (First nil yes no) 454 (GE (FlagLT) yes no) -> (First nil no yes) 455 (GE (FlagGT) yes no) -> (First nil yes no) 456 457 // absorb InvertFlags into branches 458 (LT (InvertFlags cmp) yes no) -> (GT cmp yes no) 459 (GT (InvertFlags cmp) yes no) -> (LT cmp yes no) 460 (LE (InvertFlags cmp) yes no) -> (GE cmp yes no) 461 (GE (InvertFlags cmp) yes no) -> (LE cmp yes no) 462 (EQ (InvertFlags cmp) yes no) -> (EQ cmp yes no) 463 (NE (InvertFlags cmp) yes no) -> (NE cmp yes no) 464 465 // constant comparisons 466 (CMPWconst (MOVDconst [x]) [y]) && int32(x)==int32(y) -> (FlagEQ) 467 (CMPWconst (MOVDconst [x]) [y]) && int32(x)<int32(y) -> (FlagLT) 468 (CMPWconst (MOVDconst [x]) [y]) && int32(x)>int32(y) -> (FlagGT) 469 470 (CMPconst (MOVDconst [x]) [y]) && x==y -> (FlagEQ) 471 (CMPconst (MOVDconst [x]) [y]) && x<y -> (FlagLT) 472 (CMPconst (MOVDconst [x]) [y]) && x>y -> (FlagGT) 473 474 (CMPWUconst (MOVDconst [x]) [y]) && int32(x)==int32(y) -> (FlagEQ) 475 (CMPWUconst (MOVDconst [x]) [y]) && uint32(x)<uint32(y) -> (FlagLT) 476 (CMPWUconst (MOVDconst [x]) [y]) && uint32(x)>uint32(y) -> (FlagGT) 477 478 (CMPUconst (MOVDconst [x]) [y]) && x==y -> (FlagEQ) 479 (CMPUconst (MOVDconst [x]) [y]) && uint64(x)<uint64(y) -> (FlagLT) 480 (CMPUconst (MOVDconst [x]) [y]) && uint64(x)>uint64(y) -> (FlagGT) 481 482 // other known comparisons 483 //(CMPconst (MOVBUreg _) [c]) && 0xff < c -> (FlagLT) 484 //(CMPconst (MOVHUreg _) [c]) && 0xffff < c -> (FlagLT) 485 //(CMPconst (ANDconst _ [m]) [n]) && 0 <= int32(m) && int32(m) < int32(n) -> (FlagLT) 486 //(CMPconst (SRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 32 && (1<<uint32(32-c)) <= uint32(n) -> (FlagLT) 487 488 // absorb flag constants into boolean values 489 (Equal (FlagEQ)) -> (MOVDconst [1]) 490 (Equal (FlagLT)) -> (MOVDconst [0]) 491 (Equal (FlagGT)) -> (MOVDconst [0]) 492 493 (NotEqual (FlagEQ)) -> (MOVDconst [0]) 494 (NotEqual (FlagLT)) -> (MOVDconst [1]) 495 (NotEqual (FlagGT)) -> (MOVDconst [1]) 496 497 (LessThan (FlagEQ)) -> (MOVDconst [0]) 498 (LessThan (FlagLT)) -> (MOVDconst [1]) 499 (LessThan (FlagGT)) -> (MOVDconst [0]) 500 501 (LessEqual (FlagEQ)) -> (MOVDconst [1]) 502 (LessEqual (FlagLT)) -> (MOVDconst [1]) 503 (LessEqual (FlagGT)) -> (MOVDconst [0]) 504 505 (GreaterThan (FlagEQ)) -> (MOVDconst [0]) 506 (GreaterThan (FlagLT)) -> (MOVDconst [0]) 507 (GreaterThan (FlagGT)) -> (MOVDconst [1]) 508 509 (GreaterEqual (FlagEQ)) -> (MOVDconst [1]) 510 (GreaterEqual (FlagLT)) -> (MOVDconst [0]) 511 (GreaterEqual (FlagGT)) -> (MOVDconst [1]) 512 513 // absorb InvertFlags into boolean values 514 (Equal (InvertFlags x)) -> (Equal x) 515 (NotEqual (InvertFlags x)) -> (NotEqual x) 516 (LessThan (InvertFlags x)) -> (GreaterThan x) 517 (GreaterThan (InvertFlags x)) -> (LessThan x) 518 (LessEqual (InvertFlags x)) -> (GreaterEqual x) 519 (GreaterEqual (InvertFlags x)) -> (LessEqual x) 520 521 // Lowering loads 522 (Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) -> (MOVDload ptr mem) 523 (Load <t> ptr mem) && is32BitInt(t) && isSigned(t) -> (MOVWload ptr mem) 524 (Load <t> ptr mem) && is32BitInt(t) && !isSigned(t) -> (MOVWZload ptr mem) 525 (Load <t> ptr mem) && is16BitInt(t) && isSigned(t) -> (MOVHload ptr mem) 526 (Load <t> ptr mem) && is16BitInt(t) && !isSigned(t) -> (MOVHZload ptr mem) 527 (Load <t> ptr mem) && t.IsBoolean() -> (MOVBZload ptr mem) 528 (Load <t> ptr mem) && is8BitInt(t) && isSigned(t) -> (MOVBreg (MOVBZload ptr mem)) // PPC has no signed-byte load. 529 (Load <t> ptr mem) && is8BitInt(t) && !isSigned(t) -> (MOVBZload ptr mem) 530 531 (Load <t> ptr mem) && is32BitFloat(t) -> (FMOVSload ptr mem) 532 (Load <t> ptr mem) && is64BitFloat(t) -> (FMOVDload ptr mem) 533 534 (Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) -> (FMOVDstore ptr val mem) 535 (Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && is32BitFloat(val.Type) -> (FMOVDstore ptr val mem) // glitch from (Cvt32Fto64F x) -> x -- type is wrong 536 (Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) -> (FMOVSstore ptr val mem) 537 (Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && (is64BitInt(val.Type) || isPtr(val.Type)) -> (MOVDstore ptr val mem) 538 (Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && is32BitInt(val.Type) -> (MOVWstore ptr val mem) 539 (Store {t} ptr val mem) && t.(*types.Type).Size() == 2 -> (MOVHstore ptr val mem) 540 (Store {t} ptr val mem) && t.(*types.Type).Size() == 1 -> (MOVBstore ptr val mem) 541 542 // Using Zero instead of LoweredZero allows the 543 // target address to be folded where possible. 544 (Zero [0] _ mem) -> mem 545 (Zero [1] destptr mem) -> (MOVBstorezero destptr mem) 546 (Zero [2] destptr mem) -> 547 (MOVHstorezero destptr mem) 548 (Zero [3] destptr mem) -> 549 (MOVBstorezero [2] destptr 550 (MOVHstorezero destptr mem)) 551 (Zero [4] destptr mem) -> 552 (MOVWstorezero destptr mem) 553 (Zero [5] destptr mem) -> 554 (MOVBstorezero [4] destptr 555 (MOVWstorezero destptr mem)) 556 (Zero [6] destptr mem) -> 557 (MOVHstorezero [4] destptr 558 (MOVWstorezero destptr mem)) 559 (Zero [7] destptr mem) -> 560 (MOVBstorezero [6] destptr 561 (MOVHstorezero [4] destptr 562 (MOVWstorezero destptr mem))) 563 564 // MOVD for store with DS must have offsets that are multiple of 4 565 (Zero [8] {t} destptr mem) && t.(*types.Type).Alignment()%4 == 0 -> 566 (MOVDstorezero destptr mem) 567 (Zero [8] destptr mem) -> 568 (MOVWstorezero [4] destptr 569 (MOVWstorezero [0] destptr mem)) 570 // Handle these cases only if aligned properly, otherwise use general case below 571 (Zero [12] {t} destptr mem) && t.(*types.Type).Alignment()%4 == 0 -> 572 (MOVWstorezero [8] destptr 573 (MOVDstorezero [0] destptr mem)) 574 (Zero [16] {t} destptr mem) && t.(*types.Type).Alignment()%4 == 0 -> 575 (MOVDstorezero [8] destptr 576 (MOVDstorezero [0] destptr mem)) 577 (Zero [24] {t} destptr mem) && t.(*types.Type).Alignment()%4 == 0 -> 578 (MOVDstorezero [16] destptr 579 (MOVDstorezero [8] destptr 580 (MOVDstorezero [0] destptr mem))) 581 (Zero [32] {t} destptr mem) && t.(*types.Type).Alignment()%4 == 0 -> 582 (MOVDstorezero [24] destptr 583 (MOVDstorezero [16] destptr 584 (MOVDstorezero [8] destptr 585 (MOVDstorezero [0] destptr mem)))) 586 587 // Handle cases not handled above 588 (Zero [s] ptr mem) -> (LoweredZero [s] ptr mem) 589 590 // moves 591 // Only the MOVD and MOVW instructions require 4 byte 592 // alignment in the offset field. The other MOVx instructions 593 // allow any alignment. 594 (Move [0] _ _ mem) -> mem 595 (Move [1] dst src mem) -> (MOVBstore dst (MOVBZload src mem) mem) 596 (Move [2] dst src mem) -> 597 (MOVHstore dst (MOVHZload src mem) mem) 598 (Move [4] dst src mem) -> 599 (MOVWstore dst (MOVWZload src mem) mem) 600 // MOVD for load and store must have offsets that are multiple of 4 601 (Move [8] {t} dst src mem) && t.(*types.Type).Alignment()%4 == 0 -> 602 (MOVDstore dst (MOVDload src mem) mem) 603 (Move [8] dst src mem) -> 604 (MOVWstore [4] dst (MOVWZload [4] src mem) 605 (MOVWstore dst (MOVWZload src mem) mem)) 606 (Move [3] dst src mem) -> 607 (MOVBstore [2] dst (MOVBZload [2] src mem) 608 (MOVHstore dst (MOVHload src mem) mem)) 609 (Move [5] dst src mem) -> 610 (MOVBstore [4] dst (MOVBZload [4] src mem) 611 (MOVWstore dst (MOVWZload src mem) mem)) 612 (Move [6] dst src mem) -> 613 (MOVHstore [4] dst (MOVHZload [4] src mem) 614 (MOVWstore dst (MOVWZload src mem) mem)) 615 (Move [7] dst src mem) -> 616 (MOVBstore [6] dst (MOVBZload [6] src mem) 617 (MOVHstore [4] dst (MOVHZload [4] src mem) 618 (MOVWstore dst (MOVWZload src mem) mem))) 619 620 // Large move uses a loop. Since the address is computed and the 621 // offset is zero, any alignment can be used. 622 (Move [s] dst src mem) && s > 8 -> 623 (LoweredMove [s] dst src mem) 624 625 // Calls 626 // Lowering calls 627 (StaticCall [argwid] {target} mem) -> (CALLstatic [argwid] {target} mem) 628 (ClosureCall [argwid] entry closure mem) -> (CALLclosure [argwid] entry closure mem) 629 (InterCall [argwid] entry mem) -> (CALLinter [argwid] entry mem) 630 631 // Miscellaneous 632 (GetClosurePtr) -> (LoweredGetClosurePtr) 633 (GetCallerSP) -> (LoweredGetCallerSP) 634 (GetCallerPC) -> (LoweredGetCallerPC) 635 (IsNonNil ptr) -> (NotEqual (CMPconst [0] ptr)) 636 (IsInBounds idx len) -> (LessThan (CMPU idx len)) 637 (IsSliceInBounds idx len) -> (LessEqual (CMPU idx len)) 638 (NilCheck ptr mem) -> (LoweredNilCheck ptr mem) 639 640 // Write barrier. 641 (WB {fn} destptr srcptr mem) -> (LoweredWB {fn} destptr srcptr mem) 642 643 // Optimizations 644 // Note that PPC "logical" immediates come in 0:15 and 16:31 unsigned immediate forms, 645 // so ORconst, XORconst easily expand into a pair. 646 647 // Include very-large constants in the const-const case. 648 (AND (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c&d]) 649 (OR (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c|d]) 650 (XOR (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c^d]) 651 652 // Discover consts 653 (AND x (MOVDconst [c])) && isU16Bit(c) -> (ANDconst [c] x) 654 (XOR x (MOVDconst [c])) && isU32Bit(c) -> (XORconst [c] x) 655 (OR x (MOVDconst [c])) && isU32Bit(c) -> (ORconst [c] x) 656 657 // Simplify consts 658 (ANDconst [c] (ANDconst [d] x)) -> (ANDconst [c&d] x) 659 (ORconst [c] (ORconst [d] x)) -> (ORconst [c|d] x) 660 (XORconst [c] (XORconst [d] x)) -> (XORconst [c^d] x) 661 (ANDconst [-1] x) -> x 662 (ANDconst [0] _) -> (MOVDconst [0]) 663 (XORconst [0] x) -> x 664 (ORconst [-1] _) -> (MOVDconst [-1]) 665 (ORconst [0] x) -> x 666 667 // zero-extend of small and -> small and 668 (MOVBZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFF -> y 669 (MOVHZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFF -> y 670 (MOVWZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFFFFFF -> y 671 (MOVWZreg y:(AND (MOVDconst [c]) _)) && uint64(c) <= 0xFFFFFFFF -> y 672 673 // sign extend of small-positive and -> small-positive-and 674 (MOVBreg y:(ANDconst [c] _)) && uint64(c) <= 0x7F -> y 675 (MOVHreg y:(ANDconst [c] _)) && uint64(c) <= 0x7FFF -> y 676 (MOVWreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFF -> y // 0xFFFF is largest immediate constant, when regarded as 32-bit is > 0 677 (MOVWreg y:(AND (MOVDconst [c]) _)) && uint64(c) <= 0x7FFFFFFF -> y 678 679 // small and of zero-extend -> either zero-extend or small and 680 (ANDconst [c] y:(MOVBZreg _)) && c&0xFF == 0xFF -> y 681 (ANDconst [0xFF] y:(MOVBreg _)) -> y 682 (ANDconst [c] y:(MOVHZreg _)) && c&0xFFFF == 0xFFFF -> y 683 (ANDconst [0xFFFF] y:(MOVHreg _)) -> y 684 685 (AND (MOVDconst [c]) y:(MOVWZreg _)) && c&0xFFFFFFFF == 0xFFFFFFFF -> y 686 (AND (MOVDconst [0xFFFFFFFF]) y:(MOVWreg x)) -> (MOVWZreg x) 687 // normal case 688 (ANDconst [c] (MOV(B|BZ)reg x)) -> (ANDconst [c&0xFF] x) 689 (ANDconst [c] (MOV(H|HZ)reg x)) -> (ANDconst [c&0xFFFF] x) 690 (ANDconst [c] (MOV(W|WZ)reg x)) -> (ANDconst [c&0xFFFFFFFF] x) 691 692 // Eliminate unnecessary sign/zero extend following right shift 693 (MOV(B|H|W)Zreg (SRWconst [c] (MOVBZreg x))) -> (SRWconst [c] (MOVBZreg x)) 694 (MOV(H|W)Zreg (SRWconst [c] (MOVHZreg x))) -> (SRWconst [c] (MOVHZreg x)) 695 (MOVWZreg (SRWconst [c] (MOVWZreg x))) -> (SRWconst [c] (MOVWZreg x)) 696 (MOV(B|H|W)reg (SRAWconst [c] (MOVBreg x))) -> (SRAWconst [c] (MOVBreg x)) 697 (MOV(H|W)reg (SRAWconst [c] (MOVHreg x))) -> (SRAWconst [c] (MOVHreg x)) 698 (MOVWreg (SRAWconst [c] (MOVWreg x))) -> (SRAWconst [c] (MOVWreg x)) 699 700 (MOVWZreg (SRWconst [c] x)) && sizeof(x.Type) <= 32 -> (SRWconst [c] x) 701 (MOVHZreg (SRWconst [c] x)) && sizeof(x.Type) <= 16 -> (SRWconst [c] x) 702 (MOVBZreg (SRWconst [c] x)) && sizeof(x.Type) == 8 -> (SRWconst [c] x) 703 (MOVWreg (SRAWconst [c] x)) && sizeof(x.Type) <= 32 -> (SRAWconst [c] x) 704 (MOVHreg (SRAWconst [c] x)) && sizeof(x.Type) <= 16 -> (SRAWconst [c] x) 705 (MOVBreg (SRAWconst [c] x)) && sizeof(x.Type) == 8 -> (SRAWconst [c] x) 706 707 // initial right shift will handle sign/zero extend 708 (MOVBZreg (SRDconst [c] x)) && c>=56 -> (SRDconst [c] x) 709 (MOVBreg (SRDconst [c] x)) && c>56 -> (SRDconst [c] x) 710 (MOVBreg (SRDconst [c] x)) && c==56 -> (SRADconst [c] x) 711 (MOVBZreg (SRWconst [c] x)) && c>=24 -> (SRWconst [c] x) 712 (MOVBreg (SRWconst [c] x)) && c>24 -> (SRWconst [c] x) 713 (MOVBreg (SRWconst [c] x)) && c==24 -> (SRAWconst [c] x) 714 715 (MOVHZreg (SRDconst [c] x)) && c>=48 -> (SRDconst [c] x) 716 (MOVHreg (SRDconst [c] x)) && c>48 -> (SRDconst [c] x) 717 (MOVHreg (SRDconst [c] x)) && c==48 -> (SRADconst [c] x) 718 (MOVHZreg (SRWconst [c] x)) && c>=16 -> (SRWconst [c] x) 719 (MOVHreg (SRWconst [c] x)) && c>16 -> (SRWconst [c] x) 720 (MOVHreg (SRWconst [c] x)) && c==16 -> (SRAWconst [c] x) 721 722 (MOVWZreg (SRDconst [c] x)) && c>=32 -> (SRDconst [c] x) 723 (MOVWreg (SRDconst [c] x)) && c>32 -> (SRDconst [c] x) 724 (MOVWreg (SRDconst [c] x)) && c==32 -> (SRADconst [c] x) 725 726 // Various redundant zero/sign extension combinations. 727 (MOVBZreg y:(MOVBZreg _)) -> y // repeat 728 (MOVBreg y:(MOVBreg _)) -> y // repeat 729 (MOVBreg (MOVBZreg x)) -> (MOVBreg x) 730 (MOVBZreg (MOVBreg x)) -> (MOVBZreg x) 731 732 // H - there are more combinations than these 733 734 (MOVHZreg y:(MOVHZreg _)) -> y // repeat 735 (MOVHZreg y:(MOVBZreg _)) -> y // wide of narrow 736 (MOVHZreg y:(MOVHBRload _ _)) -> y 737 738 (MOVHreg y:(MOVHreg _)) -> y // repeat 739 (MOVHreg y:(MOVBreg _)) -> y // wide of narrow 740 741 (MOVHreg y:(MOVHZreg x)) -> (MOVHreg x) 742 (MOVHZreg y:(MOVHreg x)) -> (MOVHZreg x) 743 744 // W - there are more combinations than these 745 746 (MOVWZreg y:(MOVWZreg _)) -> y // repeat 747 (MOVWZreg y:(MOVHZreg _)) -> y // wide of narrow 748 (MOVWZreg y:(MOVBZreg _)) -> y // wide of narrow 749 (MOVWZreg y:(MOVHBRload _ _)) -> y 750 (MOVWZreg y:(MOVWBRload _ _)) -> y 751 752 (MOVWreg y:(MOVWreg _)) -> y // repeat 753 (MOVWreg y:(MOVHreg _)) -> y // wide of narrow 754 (MOVWreg y:(MOVBreg _)) -> y // wide of narrow 755 756 (MOVWreg y:(MOVWZreg x)) -> (MOVWreg x) 757 (MOVWZreg y:(MOVWreg x)) -> (MOVWZreg x) 758 759 // Arithmetic constant ops 760 761 (ADD x (MOVDconst [c])) && is32Bit(c) -> (ADDconst [c] x) 762 (ADDconst [c] (ADDconst [d] x)) && is32Bit(c+d) -> (ADDconst [c+d] x) 763 (ADDconst [0] x) -> x 764 (SUB x (MOVDconst [c])) && is32Bit(-c) -> (ADDconst [-c] x) 765 // TODO deal with subtract-from-const 766 767 (ADDconst [c] (MOVDaddr [d] {sym} x)) -> (MOVDaddr [c+d] {sym} x) 768 769 // Use register moves instead of stores and loads to move int<->float values 770 // Common with math Float64bits, Float64frombits 771 (MOVDload [off] {sym} ptr (FMOVDstore [off] {sym} ptr x _)) -> (MFVSRD x) 772 (FMOVDload [off] {sym} ptr (MOVDstore [off] {sym} ptr x _)) -> (MTVSRD x) 773 774 (FMOVDstore [off] {sym} ptr (MTVSRD x) mem) -> (MOVDstore [off] {sym} ptr x mem) 775 (MOVDstore [off] {sym} ptr (MFVSRD x) mem) -> (FMOVDstore [off] {sym} ptr x mem) 776 777 (MTVSRD (MOVDconst [c])) -> (FMOVDconst [c]) 778 (MFVSRD (FMOVDconst [c])) -> (MOVDconst [c]) 779 780 (MTVSRD x:(MOVDload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (FMOVDload [off] {sym} ptr mem) 781 (MFVSRD x:(FMOVDload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVDload [off] {sym} ptr mem) 782 783 // Fold offsets for stores. 784 (MOVDstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(off1+off2) -> (MOVDstore [off1+off2] {sym} x val mem) 785 (MOVWstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(off1+off2) -> (MOVWstore [off1+off2] {sym} x val mem) 786 (MOVHstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(off1+off2) -> (MOVHstore [off1+off2] {sym} x val mem) 787 (MOVBstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(off1+off2) -> (MOVBstore [off1+off2] {sym} x val mem) 788 789 (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is16Bit(off1+off2) -> (FMOVSstore [off1+off2] {sym} ptr val mem) 790 (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is16Bit(off1+off2) -> (FMOVDstore [off1+off2] {sym} ptr val mem) 791 792 // Fold address into load/store. 793 // The assembler needs to generate several instructions and use 794 // temp register for accessing global, and each time it will reload 795 // the temp register. So don't fold address of global, unless there 796 // is only one use. 797 (MOVBstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) 798 && (ptr.Op != OpSB || p.Uses == 1) -> 799 (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) 800 (MOVHstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) 801 && (ptr.Op != OpSB || p.Uses == 1) -> 802 (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) 803 (MOVWstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) 804 && (ptr.Op != OpSB || p.Uses == 1) -> 805 (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) 806 (MOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) 807 && (ptr.Op != OpSB || p.Uses == 1) -> 808 (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) 809 810 (FMOVSstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) 811 && (ptr.Op != OpSB || p.Uses == 1) -> 812 (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) 813 (FMOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) 814 && (ptr.Op != OpSB || p.Uses == 1) -> 815 (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) 816 817 (MOVBZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) 818 && (ptr.Op != OpSB || p.Uses == 1) -> 819 (MOVBZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 820 (MOVHload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) 821 && (ptr.Op != OpSB || p.Uses == 1) -> 822 (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 823 (MOVHZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) 824 && (ptr.Op != OpSB || p.Uses == 1) -> 825 (MOVHZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 826 (MOVWload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) 827 && (ptr.Op != OpSB || p.Uses == 1) -> 828 (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 829 (MOVWZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) 830 && (ptr.Op != OpSB || p.Uses == 1) -> 831 (MOVWZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 832 (MOVDload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) 833 && (ptr.Op != OpSB || p.Uses == 1) -> 834 (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 835 (FMOVSload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) 836 && (ptr.Op != OpSB || p.Uses == 1) -> 837 (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 838 (FMOVDload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) 839 && (ptr.Op != OpSB || p.Uses == 1) -> 840 (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 841 842 // Fold offsets for loads. 843 (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) && is16Bit(off1+off2) -> (FMOVSload [off1+off2] {sym} ptr mem) 844 (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is16Bit(off1+off2) -> (FMOVDload [off1+off2] {sym} ptr mem) 845 846 (MOVDload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVDload [off1+off2] {sym} x mem) 847 (MOVWload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVWload [off1+off2] {sym} x mem) 848 (MOVWZload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVWZload [off1+off2] {sym} x mem) 849 (MOVHload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVHload [off1+off2] {sym} x mem) 850 (MOVHZload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVHZload [off1+off2] {sym} x mem) 851 (MOVBZload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVBZload [off1+off2] {sym} x mem) 852 853 // Determine load + addressing that can be done as a register indexed load 854 (MOV(D|W|WZ|H|HZ|BZ)load [0] {sym} p:(ADD ptr idx) mem) && sym == nil && p.Uses == 1 -> (MOV(D|W|WZ|H|HZ|BZ)loadidx ptr idx mem) 855 856 // Determine indexed loads with constant values that can be done without index 857 (MOV(D|W|WZ|H|HZ|BZ)loadidx ptr (MOVDconst [c]) mem) && is16Bit(c) -> (MOV(D|W|WZ|H|HZ|BZ)load [c] ptr mem) 858 (MOV(D|W|WZ|H|HZ|BZ)loadidx (MOVDconst [c]) ptr mem) && is16Bit(c) -> (MOV(D|W|WZ|H|HZ|BZ)load [c] ptr mem) 859 860 861 // Store of zero -> storezero 862 (MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) -> (MOVDstorezero [off] {sym} ptr mem) 863 (MOVWstore [off] {sym} ptr (MOVDconst [0]) mem) -> (MOVWstorezero [off] {sym} ptr mem) 864 (MOVHstore [off] {sym} ptr (MOVDconst [0]) mem) -> (MOVHstorezero [off] {sym} ptr mem) 865 (MOVBstore [off] {sym} ptr (MOVDconst [0]) mem) -> (MOVBstorezero [off] {sym} ptr mem) 866 867 // Fold offsets for storezero 868 (MOVDstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> 869 (MOVDstorezero [off1+off2] {sym} x mem) 870 (MOVWstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> 871 (MOVWstorezero [off1+off2] {sym} x mem) 872 (MOVHstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> 873 (MOVHstorezero [off1+off2] {sym} x mem) 874 (MOVBstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> 875 (MOVBstorezero [off1+off2] {sym} x mem) 876 877 // Stores with addressing that can be done as indexed stores 878 (MOV(D|W|H|B)store [off] {sym} p:(ADD ptr idx) val mem) && off == 0 && sym == nil && p.Uses == 1 -> (MOV(D|W|H|B)storeidx ptr idx val mem) 879 880 // Stores with constant index values can be done without indexed instructions 881 (MOV(D|W|H|B)storeidx ptr (MOVDconst [c]) val mem) && is16Bit(c) -> (MOV(D|W|H|B)store [c] ptr val mem) 882 (MOV(D|W|H|B)storeidx (MOVDconst [c]) ptr val mem) && is16Bit(c) -> (MOV(D|W|H|B)store [c] ptr val mem) 883 884 // Fold symbols into storezero 885 (MOVDstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2) 886 && (x.Op != OpSB || p.Uses == 1) -> 887 (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem) 888 (MOVWstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2) 889 && (x.Op != OpSB || p.Uses == 1) -> 890 (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem) 891 (MOVHstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2) 892 && (x.Op != OpSB || p.Uses == 1) -> 893 (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem) 894 (MOVBstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2) 895 && (x.Op != OpSB || p.Uses == 1) -> 896 (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem) 897 898 // atomic intrinsics 899 (AtomicLoad(32|64|Ptr) ptr mem) -> (LoweredAtomicLoad(32|64|Ptr) [1] ptr mem) 900 (AtomicLoadAcq32 ptr mem) -> (LoweredAtomicLoad32 [0] ptr mem) 901 902 (AtomicStore(32|64) ptr val mem) -> (LoweredAtomicStore(32|64) [1] ptr val mem) 903 (AtomicStoreRel32 ptr val mem) -> (LoweredAtomicStore32 [0] ptr val mem) 904 //(AtomicStorePtrNoWB ptr val mem) -> (STLR ptr val mem) 905 906 (AtomicExchange(32|64) ptr val mem) -> (LoweredAtomicExchange(32|64) ptr val mem) 907 908 (AtomicAdd(32|64) ptr val mem) -> (LoweredAtomicAdd(32|64) ptr val mem) 909 910 (AtomicCompareAndSwap(32|64) ptr old new_ mem) -> (LoweredAtomicCas(32|64) [1] ptr old new_ mem) 911 (AtomicCompareAndSwapRel32 ptr old new_ mem) -> (LoweredAtomicCas32 [0] ptr old new_ mem) 912 913 (AtomicAnd8 ptr val mem) -> (LoweredAtomicAnd8 ptr val mem) 914 (AtomicOr8 ptr val mem) -> (LoweredAtomicOr8 ptr val mem) 915 916 // Lowering extension 917 // Note: we always extend to 64 bits even though some ops don't need that many result bits. 918 (SignExt8to(16|32|64) x) -> (MOVBreg x) 919 (SignExt16to(32|64) x) -> (MOVHreg x) 920 (SignExt32to64 x) -> (MOVWreg x) 921 922 (ZeroExt8to(16|32|64) x) -> (MOVBZreg x) 923 (ZeroExt16to(32|64) x) -> (MOVHZreg x) 924 (ZeroExt32to64 x) -> (MOVWZreg x) 925 926 (Trunc(16|32|64)to8 x) && isSigned(x.Type) -> (MOVBreg x) 927 (Trunc(16|32|64)to8 x) -> (MOVBZreg x) 928 (Trunc(32|64)to16 x) && isSigned(x.Type) -> (MOVHreg x) 929 (Trunc(32|64)to16 x) -> (MOVHZreg x) 930 (Trunc64to32 x) && isSigned(x.Type) -> (MOVWreg x) 931 (Trunc64to32 x) -> (MOVWZreg x) 932 933 (Slicemask <t> x) -> (SRADconst (NEG <t> x) [63]) 934 935 // Note that MOV??reg returns a 64-bit int, x is not necessarily that wide 936 // This may interact with other patterns in the future. (Compare with arm64) 937 (MOV(B|H|W)Zreg x:(MOVBZload _ _)) -> x 938 (MOV(B|H|W)Zreg x:(MOVBZloadidx _ _ _)) -> x 939 (MOV(H|W)Zreg x:(MOVHZload _ _)) -> x 940 (MOV(H|W)Zreg x:(MOVHZloadidx _ _ _)) -> x 941 (MOV(H|W)reg x:(MOVHload _ _)) -> x 942 (MOV(H|W)reg x:(MOVHloadidx _ _ _)) -> x 943 (MOVWZreg x:(MOVWZload _ _)) -> x 944 (MOVWZreg x:(MOVWZloadidx _ _ _)) -> x 945 (MOVWreg x:(MOVWload _ _)) -> x 946 (MOVWreg x:(MOVWloadidx _ _ _)) -> x 947 948 // don't extend if argument is already extended 949 (MOVBreg x:(Arg <t>)) && is8BitInt(t) && isSigned(t) -> x 950 (MOVBZreg x:(Arg <t>)) && is8BitInt(t) && !isSigned(t) -> x 951 (MOVHreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t)) && isSigned(t) -> x 952 (MOVHZreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t)) && !isSigned(t) -> x 953 (MOVWreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && isSigned(t) -> x 954 (MOVWZreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && !isSigned(t) -> x 955 956 (MOVBZreg (MOVDconst [c])) -> (MOVDconst [int64(uint8(c))]) 957 (MOVBreg (MOVDconst [c])) -> (MOVDconst [int64(int8(c))]) 958 (MOVHZreg (MOVDconst [c])) -> (MOVDconst [int64(uint16(c))]) 959 (MOVHreg (MOVDconst [c])) -> (MOVDconst [int64(int16(c))]) 960 (MOVWreg (MOVDconst [c])) -> (MOVDconst [int64(int32(c))]) 961 (MOVWZreg (MOVDconst [c])) -> (MOVDconst [int64(uint32(c))]) 962 963 964 // Lose widening ops fed to stores 965 (MOVBstore [off] {sym} ptr (MOV(B|BZ|H|HZ|W|WZ)reg x) mem) -> (MOVBstore [off] {sym} ptr x mem) 966 (MOVHstore [off] {sym} ptr (MOV(H|HZ|W|WZ)reg x) mem) -> (MOVHstore [off] {sym} ptr x mem) 967 (MOVWstore [off] {sym} ptr (MOV(W|WZ)reg x) mem) -> (MOVWstore [off] {sym} ptr x mem) 968 (MOVBstore [off] {sym} ptr (SRWconst (MOV(H|HZ)reg x) [c]) mem) && c <= 8 -> (MOVBstore [off] {sym} ptr (SRWconst <typ.UInt32> x [c]) mem) 969 (MOVBstore [off] {sym} ptr (SRWconst (MOV(W|WZ)reg x) [c]) mem) && c <= 24 -> (MOVBstore [off] {sym} ptr (SRWconst <typ.UInt32> x [c]) mem) 970 (MOVBstoreidx [off] {sym} ptr idx (MOV(B|BZ|H|HZ|W|WZ)reg x) mem) -> (MOVBstoreidx [off] {sym} ptr idx x mem) 971 (MOVHstoreidx [off] {sym} ptr idx (MOV(H|HZ|W|WZ)reg x) mem) -> (MOVHstoreidx [off] {sym} ptr idx x mem) 972 (MOVWstoreidx [off] {sym} ptr idx (MOV(W|WZ)reg x) mem) -> (MOVWstoreidx [off] {sym} ptr idx x mem) 973 (MOVBstoreidx [off] {sym} ptr idx (SRWconst (MOV(H|HZ)reg x) [c]) mem) && c <= 8 -> (MOVBstoreidx [off] {sym} ptr idx (SRWconst <typ.UInt32> x [c]) mem) 974 (MOVBstoreidx [off] {sym} ptr idx (SRWconst (MOV(W|WZ)reg x) [c]) mem) && c <= 24 -> (MOVBstoreidx [off] {sym} ptr idx (SRWconst <typ.UInt32> x [c]) mem) 975 (MOVHBRstore {sym} ptr (MOV(H|HZ|W|WZ)reg x) mem) -> (MOVHBRstore {sym} ptr x mem) 976 (MOVWBRstore {sym} ptr (MOV(W|WZ)reg x) mem) -> (MOVWBRstore {sym} ptr x mem) 977 978 // Lose W-widening ops fed to compare-W 979 (CMPW x (MOVWreg y)) -> (CMPW x y) 980 (CMPW (MOVWreg x) y) -> (CMPW x y) 981 (CMPWU x (MOVWZreg y)) -> (CMPWU x y) 982 (CMPWU (MOVWZreg x) y) -> (CMPWU x y) 983 984 (CMP x (MOVDconst [c])) && is16Bit(c) -> (CMPconst x [c]) 985 (CMP (MOVDconst [c]) y) && is16Bit(c) -> (InvertFlags (CMPconst y [c])) 986 (CMPW x (MOVDconst [c])) && is16Bit(c) -> (CMPWconst x [c]) 987 (CMPW (MOVDconst [c]) y) && is16Bit(c) -> (InvertFlags (CMPWconst y [c])) 988 989 (CMPU x (MOVDconst [c])) && isU16Bit(c) -> (CMPUconst x [c]) 990 (CMPU (MOVDconst [c]) y) && isU16Bit(c) -> (InvertFlags (CMPUconst y [c])) 991 (CMPWU x (MOVDconst [c])) && isU16Bit(c) -> (CMPWUconst x [c]) 992 (CMPWU (MOVDconst [c]) y) && isU16Bit(c) -> (InvertFlags (CMPWUconst y [c])) 993 994 // A particular pattern seen in cgo code: 995 (AND (MOVDconst [c]) x:(MOVBZload _ _)) -> (ANDconst [c&0xFF] x) 996 (AND x:(MOVBZload _ _) (MOVDconst [c])) -> (ANDconst [c&0xFF] x) 997 998 // floating point negative abs 999 (FNEG (FABS x)) -> (FNABS x) 1000 (FNEG (FNABS x)) -> (FABS x) 1001 1002 // floating-point fused multiply-add/sub 1003 (FADD (FMUL x y) z) -> (FMADD x y z) 1004 (FSUB (FMUL x y) z) -> (FMSUB x y z) 1005 (FADDS (FMULS x y) z) -> (FMADDS x y z) 1006 (FSUBS (FMULS x y) z) -> (FMSUBS x y z) 1007 1008 1009 // The following statements are found in encoding/binary functions UintXX (load) and PutUintXX (store) 1010 // and convert the statements in these functions from multiple single byte loads or stores to 1011 // the single largest possible load or store. 1012 // Some are marked big or little endian based on the order in which the bytes are loaded or stored, 1013 // not on the ordering of the machine. These are intended for little endian machines. 1014 // To implement for big endian machines, most rules would have to be duplicated but the 1015 // resulting rule would be reversed, i. e., MOVHZload on little endian would be MOVHBRload on big endian 1016 // and vice versa. 1017 // b[0] | b[1]<<8 -> load 16-bit Little endian 1018 (OR <t> x0:(MOVBZload [i0] {s} p mem) 1019 o1:(SL(W|D)const x1:(MOVBZload [i1] {s} p mem) [8])) 1020 && !config.BigEndian 1021 && i1 == i0+1 1022 && x0.Uses ==1 && x1.Uses == 1 1023 && o1.Uses == 1 1024 && mergePoint(b, x0, x1) != nil 1025 && clobber(x0) && clobber(x1) && clobber(o1) 1026 -> @mergePoint(b,x0,x1) (MOVHZload <t> {s} [i0] p mem) 1027 1028 // b[0]<<8 | b[1] -> load 16-bit Big endian on Little endian arch. 1029 // Use byte-reverse indexed load for 2 bytes. 1030 (OR <t> x0:(MOVBZload [i1] {s} p mem) 1031 o1:(SL(W|D)const x1:(MOVBZload [i0] {s} p mem) [8])) 1032 && !config.BigEndian 1033 && i1 == i0+1 1034 && x0.Uses ==1 && x1.Uses == 1 1035 && o1.Uses == 1 1036 && mergePoint(b, x0, x1) != nil 1037 && clobber(x0) && clobber(x1) && clobber(o1) 1038 -> @mergePoint(b,x0,x1) (MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) 1039 1040 // b[0]<<n+8 | b[1]<<n -> load 16-bit Big endian (where n%8== 0) 1041 // Use byte-reverse indexed load for 2 bytes, 1042 // then shift left to the correct position. Used to match subrules 1043 // from longer rules. 1044 (OR <t> s0:(SL(W|D)const x0:(MOVBZload [i1] {s} p mem) [n1]) 1045 s1:(SL(W|D)const x1:(MOVBZload [i0] {s} p mem) [n2])) 1046 && !config.BigEndian 1047 && i1 == i0+1 1048 && n1%8 == 0 1049 && n2 == n1+8 1050 && x0.Uses == 1 && x1.Uses == 1 1051 && s0.Uses == 1 && s1.Uses == 1 1052 && mergePoint(b, x0, x1) != nil 1053 && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) 1054 -> @mergePoint(b,x0,x1) (SLDconst <t> (MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [n1]) 1055 1056 // b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 -> load 32-bit Little endian 1057 // Use byte-reverse indexed load for 4 bytes. 1058 (OR <t> s1:(SL(W|D)const x2:(MOVBZload [i3] {s} p mem) [24]) 1059 o0:(OR <t> s0:(SL(W|D)const x1:(MOVBZload [i2] {s} p mem) [16]) 1060 x0:(MOVHZload [i0] {s} p mem))) 1061 && !config.BigEndian 1062 && i2 == i0+2 1063 && i3 == i0+3 1064 && x0.Uses ==1 && x1.Uses == 1 && x2.Uses == 1 1065 && o0.Uses == 1 1066 && s0.Uses == 1 && s1.Uses == 1 1067 && mergePoint(b, x0, x1, x2) != nil 1068 && clobber(x0) && clobber(x1) && clobber(x2) 1069 && clobber(s0) && clobber(s1) 1070 && clobber(o0) 1071 -> @mergePoint(b,x0,x1,x2) (MOVWZload <t> {s} [i0] p mem) 1072 1073 // b[0]<<24 | b[1]<<16 | b[2]<<8 | b[3] -> load 32-bit Big endian order on Little endian arch 1074 // Use byte-reverse indexed load for 4 bytes with computed address. 1075 // Could be used to match subrules of a longer rule. 1076 (OR <t> s1:(SL(W|D)const x2:(MOVBZload [i0] {s} p mem) [24]) 1077 o0:(OR <t> s0:(SL(W|D)const x1:(MOVBZload [i1] {s} p mem) [16]) 1078 x0:(MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i2] {s} p) mem))) 1079 && !config.BigEndian 1080 && i1 == i0+1 1081 && i2 == i0+2 1082 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 1083 && o0.Uses == 1 1084 && s0.Uses == 1 && s1.Uses == 1 1085 && mergePoint(b, x0, x1, x2) != nil 1086 && clobber(x0) && clobber(x1) && clobber(x2) 1087 && clobber(s0) && clobber(s1) 1088 && clobber(o0) 1089 -> @mergePoint(b,x0,x1,x2) (MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) 1090 1091 // b[3] | b[2]<<8 | b[1]<<16 | b[0]<<24 -> load 32-bit Big endian order on Little endian arch 1092 // Use byte-reverse indexed load for 4 bytes with computed address. 1093 // Could be used to match subrules of a longer rule. 1094 (OR <t> x0:(MOVBZload [i3] {s} p mem) 1095 o0:(OR <t> s0:(SL(W|D)const x1:(MOVBZload [i2] {s} p mem) [8]) 1096 s1:(SL(W|D)const x2:(MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [16]))) 1097 && !config.BigEndian 1098 && i2 == i0+2 1099 && i3 == i0+3 1100 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 1101 && o0.Uses == 1 1102 && s0.Uses == 1 && s1.Uses == 1 1103 && mergePoint(b, x0, x1, x2) != nil 1104 && clobber(x0) && clobber(x1) && clobber(x2) 1105 && clobber(s0) && clobber(s1) 1106 && clobber(o0) 1107 -> @mergePoint(b,x0,x1,x2) (MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) 1108 1109 // b[0]<<56 | b[1]<<48 | b[2]<<40 | b[3]<<32 -> load 32-bit Big endian order on Little endian arch 1110 // Use byte-reverse indexed load to for 4 bytes with computed address. 1111 // Used to match longer rules. 1112 (OR <t> s2:(SLDconst x2:(MOVBZload [i3] {s} p mem) [32]) 1113 o0:(OR <t> s1:(SLDconst x1:(MOVBZload [i2] {s} p mem) [40]) 1114 s0:(SLDconst x0:(MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [48]))) 1115 && !config.BigEndian 1116 && i2 == i0+2 1117 && i3 == i0+3 1118 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 1119 && o0.Uses == 1 1120 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 1121 && mergePoint(b, x0, x1, x2) != nil 1122 && clobber(x0) && clobber(x1) && clobber(x2) 1123 && clobber(s0) && clobber(s1) && clobber(s2) 1124 && clobber(o0) 1125 -> @mergePoint(b,x0,x1,x2) (SLDconst <t> (MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [32]) 1126 1127 // b[3]<<32 | b[2]<<40 | b[1]<<48 | b[0]<<56 -> load 32-bit Big endian order on Little endian arch 1128 // Use byte-reverse indexed load for 4 bytes with constant address. 1129 // Used to match longer rules. 1130 (OR <t> s2:(SLDconst x2:(MOVBZload [i0] {s} p mem) [56]) 1131 o0:(OR <t> s1:(SLDconst x1:(MOVBZload [i1] {s} p mem) [48]) 1132 s0:(SLDconst x0:(MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i2] {s} p) mem) [32]))) 1133 && !config.BigEndian 1134 && i1 == i0+1 1135 && i2 == i0+2 1136 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 1137 && o0.Uses == 1 1138 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 1139 && mergePoint(b, x0, x1, x2) != nil 1140 && clobber(x0) && clobber(x1) && clobber(x2) 1141 && clobber(s0) && clobber(s1) && clobber(s2) 1142 && clobber(o0) 1143 -> @mergePoint(b,x0,x1,x2) (SLDconst <t> (MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [32]) 1144 1145 // b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 | b[4] <<32 | b[5]<<40 | b[6]<<48 | b[7]<<56 -> load 64-bit Little endian 1146 // Rules with commutative ops and many operands will result in extremely large functions in rewritePPC64, 1147 // so matching shorter previously defined subrules is important. 1148 // Offset must be multiple of 4 for MOVD 1149 (OR <t> s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) 1150 o5:(OR <t> s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) 1151 o4:(OR <t> s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) 1152 o3:(OR <t> s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) 1153 x0:(MOVWZload {s} [i0] p mem))))) 1154 && !config.BigEndian 1155 && i0%4 == 0 1156 && i4 == i0+4 1157 && i5 == i0+5 1158 && i6 == i0+6 1159 && i7 == i0+7 1160 && x0.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 1161 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 1162 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 1163 && mergePoint(b, x0, x4, x5, x6, x7) != nil 1164 && clobber(x0) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) 1165 && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) 1166 && clobber(o3) && clobber(o4) && clobber(o5) 1167 -> @mergePoint(b,x0,x4,x5,x6,x7) (MOVDload <t> {s} [i0] p mem) 1168 1169 // b[7] | b[6]<<8 | b[5]<<16 | b[4]<<24 | b[3]<<32 | b[2]<<40 | b[1]<<48 | b[0]<<56 load 64-bit Big endian ordered bytes on Little endian arch 1170 // Use byte-reverse indexed load of 8 bytes. 1171 // Rules with commutative ops and many operands can result in extremely large functions in rewritePPC64, 1172 // so matching shorter previously defined subrules is important. 1173 (OR <t> s0:(SLDconst x0:(MOVBZload [i0] {s} p mem) [56]) 1174 o0:(OR <t> s1:(SLDconst x1:(MOVBZload [i1] {s} p mem) [48]) 1175 o1:(OR <t> s2:(SLDconst x2:(MOVBZload [i2] {s} p mem) [40]) 1176 o2:(OR <t> s3:(SLDconst x3:(MOVBZload [i3] {s} p mem) [32]) 1177 x4:(MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i4] p) mem))))) 1178 && !config.BigEndian 1179 && i1 == i0+1 1180 && i2 == i0+2 1181 && i3 == i0+3 1182 && i4 == i0+4 1183 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 1184 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 1185 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 1186 && mergePoint(b, x0, x1, x2, x3, x4) != nil 1187 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) 1188 && clobber(o0) && clobber(o1) && clobber(o2) 1189 && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) 1190 -> @mergePoint(b,x0,x1,x2,x3,x4) (MOVDBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) 1191 1192 // b[0]<<56 | b[1]<<48 | b[2]<<40 | b[3]<<32 | b[4]<<24 | b[5]<<16 | b[6]<<8 | b[7] -> load 64-bit Big endian ordered bytes on Little endian arch 1193 // Use byte-reverse indexed load of 8 bytes. 1194 // Rules with commutative ops and many operands can result in extremely large functions in rewritePPC64, 1195 // so matching shorter previously defined subrules is important. 1196 (OR <t> x7:(MOVBZload [i7] {s} p mem) 1197 o5:(OR <t> s6:(SLDconst x6:(MOVBZload [i6] {s} p mem) [8]) 1198 o4:(OR <t> s5:(SLDconst x5:(MOVBZload [i5] {s} p mem) [16]) 1199 o3:(OR <t> s4:(SLDconst x4:(MOVBZload [i4] {s} p mem) [24]) 1200 s0:(SL(W|D)const x3:(MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [32]))))) 1201 && !config.BigEndian 1202 && i4 == i0+4 1203 && i5 == i0+5 1204 && i6 == i0+6 1205 && i7 == i0+7 1206 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 1207 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 1208 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 1209 && mergePoint(b, x3, x4, x5, x6, x7) != nil 1210 && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) 1211 && clobber(o3) && clobber(o4) && clobber(o5) 1212 && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6) 1213 -> @mergePoint(b,x3,x4,x5,x6,x7) (MOVDBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) 1214 1215 // 2 byte store Little endian as in: 1216 // b[0] = byte(v >> 16) 1217 // b[1] = byte(v >> 24) 1218 // Added for use in matching longer rules. 1219 (MOVBstore [i1] {s} p (SR(W|D)const w [24]) 1220 x0:(MOVBstore [i0] {s} p (SR(W|D)const w [16]) mem)) 1221 && !config.BigEndian 1222 && x0.Uses == 1 1223 && i1 == i0+1 1224 && clobber(x0) 1225 -> (MOVHstore [i0] {s} p (SRWconst <typ.UInt16> w [16]) mem) 1226 1227 // 2 byte store Little endian as in: 1228 // b[0] = byte(v) 1229 // b[1] = byte(v >> 8) 1230 (MOVBstore [i1] {s} p (SR(W|D)const w [8]) 1231 x0:(MOVBstore [i0] {s} p w mem)) 1232 && !config.BigEndian 1233 && x0.Uses == 1 1234 && i1 == i0+1 1235 && clobber(x0) 1236 -> (MOVHstore [i0] {s} p w mem) 1237 1238 // 4 byte store Little endian as in: 1239 // b[0:1] = uint16(v) 1240 // b[2:3] = uint16(v >> 16) 1241 (MOVHstore [i1] {s} p (SR(W|D)const w [16]) 1242 x0:(MOVHstore [i0] {s} p w mem)) 1243 && !config.BigEndian 1244 && x0.Uses == 1 1245 && i1 == i0+2 1246 && clobber(x0) 1247 -> (MOVWstore [i0] {s} p w mem) 1248 1249 // 4 byte store Big endian as in: 1250 // b[0] = byte(v >> 24) 1251 // b[1] = byte(v >> 16) 1252 // b[2] = byte(v >> 8) 1253 // b[3] = byte(v) 1254 // Use byte-reverse indexed 4 byte store. 1255 (MOVBstore [i3] {s} p w 1256 x0:(MOVBstore [i2] {s} p (SRWconst w [8]) 1257 x1:(MOVBstore [i1] {s} p (SRWconst w [16]) 1258 x2:(MOVBstore [i0] {s} p (SRWconst w [24]) mem)))) 1259 && !config.BigEndian 1260 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 1261 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 1262 && clobber(x0) && clobber(x1) && clobber(x2) 1263 -> (MOVWBRstore (MOVDaddr <typ.Uintptr> [i0] {s} p) w mem) 1264 1265 // The 2 byte store appears after the 4 byte store so that the 1266 // match for the 2 byte store is not done first. 1267 // If the 4 byte store is based on the 2 byte store then there are 1268 // variations on the MOVDaddr subrule that would require additional 1269 // rules to be written. 1270 1271 // 2 byte store Big endian as in: 1272 // b[0] = byte(v >> 8) 1273 // b[1] = byte(v) 1274 (MOVBstore [i1] {s} p w x0:(MOVBstore [i0] {s} p (SRWconst w [8]) mem)) 1275 && !config.BigEndian 1276 && x0.Uses == 1 1277 && i1 == i0+1 1278 && clobber(x0) 1279 -> (MOVHBRstore (MOVDaddr <typ.Uintptr> [i0] {s} p) w mem) 1280 1281 // 8 byte store Little endian as in: 1282 // b[0] = byte(v) 1283 // b[1] = byte(v >> 8) 1284 // b[2] = byte(v >> 16) 1285 // b[3] = byte(v >> 24) 1286 // b[4] = byte(v >> 32) 1287 // b[5] = byte(v >> 40) 1288 // b[6] = byte(v >> 48) 1289 // b[7] = byte(v >> 56) 1290 // Built on previously defined rules 1291 // Offset must be multiple of 4 for MOVDstore 1292 (MOVBstore [i7] {s} p (SRDconst w [56]) 1293 x0:(MOVBstore [i6] {s} p (SRDconst w [48]) 1294 x1:(MOVBstore [i5] {s} p (SRDconst w [40]) 1295 x2:(MOVBstore [i4] {s} p (SRDconst w [32]) 1296 x3:(MOVWstore [i0] {s} p w mem))))) 1297 && !config.BigEndian 1298 && i0%4 == 0 1299 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 1300 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 1301 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) 1302 -> (MOVDstore [i0] {s} p w mem) 1303 1304 // 8 byte store Big endian as in: 1305 // b[0] = byte(v >> 56) 1306 // b[1] = byte(v >> 48) 1307 // b[2] = byte(v >> 40) 1308 // b[3] = byte(v >> 32) 1309 // b[4] = byte(v >> 24) 1310 // b[5] = byte(v >> 16) 1311 // b[6] = byte(v >> 8) 1312 // b[7] = byte(v) 1313 // Use byte-reverse indexed 8 byte store. 1314 (MOVBstore [i7] {s} p w 1315 x0:(MOVBstore [i6] {s} p (SRDconst w [8]) 1316 x1:(MOVBstore [i5] {s} p (SRDconst w [16]) 1317 x2:(MOVBstore [i4] {s} p (SRDconst w [24]) 1318 x3:(MOVBstore [i3] {s} p (SRDconst w [32]) 1319 x4:(MOVBstore [i2] {s} p (SRDconst w [40]) 1320 x5:(MOVBstore [i1] {s} p (SRDconst w [48]) 1321 x6:(MOVBstore [i0] {s} p (SRDconst w [56]) mem)))))))) 1322 && !config.BigEndian 1323 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 1324 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 1325 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) 1326 -> (MOVDBRstore (MOVDaddr <typ.Uintptr> [i0] {s} p) w mem)