github.com/corona10/go@v0.0.0-20180224231303-7a218942be57/src/cmd/compile/internal/ssa/gen/PPC64.rules (about)

     1  // Copyright 2016 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Lowering arithmetic
     6  (Add(Ptr|64|32|16|8)  x y) -> (ADD  x y)
     7  (Add64F x y) -> (FADD x y)
     8  (Add32F x y) -> (FADDS x y)
     9  
    10  (Sub(Ptr|64|32|16|8)  x y) -> (SUB  x y)
    11  (Sub32F x y) -> (FSUBS x y)
    12  (Sub64F x y) -> (FSUB x y)
    13  
    14  (Mod16 x y) -> (Mod32 (SignExt16to32 x) (SignExt16to32 y))
    15  (Mod16u x y) -> (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y))
    16  (Mod8 x y) -> (Mod32 (SignExt8to32 x) (SignExt8to32 y))
    17  (Mod8u x y) -> (Mod32u (ZeroExt8to32 x) (ZeroExt8to32 y))
    18  (Mod64 x y) -> (SUB x (MULLD y (DIVD x y)))
    19  (Mod64u x y) -> (SUB x (MULLD y (DIVDU x y)))
    20  (Mod32 x y) -> (SUB x (MULLW y (DIVW x y)))
    21  (Mod32u x y) -> (SUB x (MULLW y (DIVWU x y)))
    22  
    23  // (x + y) / 2 with x>=y -> (x - y) / 2 + y
    24  (Avg64u <t> x y) -> (ADD (SRDconst <t> (SUB <t> x y) [1]) y)
    25  
    26  (Mul64  x y) -> (MULLD  x y)
    27  (Mul(32|16|8)  x y) -> (MULLW  x y)
    28  
    29  (Div64  x y) -> (DIVD  x y)
    30  (Div64u x y) -> (DIVDU x y)
    31  (Div32  x y) -> (DIVW  x y)
    32  (Div32u x y) -> (DIVWU x y)
    33  (Div16  x y) -> (DIVW  (SignExt16to32 x) (SignExt16to32 y))
    34  (Div16u x y) -> (DIVWU (ZeroExt16to32 x) (ZeroExt16to32 y))
    35  (Div8   x y) -> (DIVW  (SignExt8to32 x) (SignExt8to32 y))
    36  (Div8u  x y) -> (DIVWU (ZeroExt8to32 x) (ZeroExt8to32 y))
    37  
    38  (Hmul(64|64u|32|32u)  x y) -> (MULH(D|DU|W|WU)  x y)
    39  
    40  (Mul32F x y) -> (FMULS x y)
    41  (Mul64F x y) -> (FMUL x y)
    42  
    43  (Div32F x y) -> (FDIVS x y)
    44  (Div64F x y) -> (FDIV x y)
    45  
    46  // Lowering float <-> int
    47  (Cvt32to32F x) -> (FCFIDS (MTVSRD (SignExt32to64 x)))
    48  (Cvt32to64F x) -> (FCFID (MTVSRD (SignExt32to64 x)))
    49  (Cvt64to32F x) -> (FCFIDS (MTVSRD x))
    50  (Cvt64to64F x) -> (FCFID (MTVSRD x))
    51  
    52  (Cvt32Fto32 x) -> (MFVSRD (FCTIWZ x))
    53  (Cvt32Fto64 x) -> (MFVSRD (FCTIDZ x))
    54  (Cvt64Fto32 x) -> (MFVSRD (FCTIWZ x))
    55  (Cvt64Fto64 x) -> (MFVSRD (FCTIDZ x))
    56  
    57  (Cvt32Fto64F x) -> x // Note x will have the wrong type for patterns dependent on Float32/Float64
    58  (Cvt64Fto32F x) -> (FRSP x)
    59  
    60  (Round(32|64)F x) -> (LoweredRound(32|64)F x)
    61  
    62  (Sqrt x) -> (FSQRT x)
    63  (Floor x) -> (FFLOOR x)
    64  (Ceil x) -> (FCEIL x)
    65  (Trunc x) -> (FTRUNC x)
    66  (Copysign x y) -> (FCPSGN y x)
    67  (Abs x) -> (FABS x)
    68  
    69  // Lowering constants
    70  (Const(64|32|16|8)  [val]) -> (MOVDconst [val])
    71  (Const(32|64)F [val]) -> (FMOV(S|D)const [val])
    72  (ConstNil) -> (MOVDconst [0])
    73  (ConstBool [b]) -> (MOVDconst [b])
    74  
    75  // Constant folding
    76  (FABS (FMOVDconst [x])) -> (FMOVDconst [f2i(math.Abs(i2f(x)))])
    77  (FSQRT (FMOVDconst [x])) -> (FMOVDconst [f2i(math.Sqrt(i2f(x)))])
    78  (FFLOOR (FMOVDconst [x])) -> (FMOVDconst [f2i(math.Floor(i2f(x)))])
    79  (FCEIL (FMOVDconst [x])) -> (FMOVDconst [f2i(math.Ceil(i2f(x)))])
    80  (FTRUNC (FMOVDconst [x])) -> (FMOVDconst [f2i(math.Trunc(i2f(x)))])
    81  
    82  // Rotate generation with const shift
    83  (ADD (SLDconst x [c]) (SRDconst x [d])) && d == 64-c -> (ROTLconst [c] x)
    84  ( OR (SLDconst x [c]) (SRDconst x [d])) && d == 64-c -> (ROTLconst [c] x)
    85  (XOR (SLDconst x [c]) (SRDconst x [d])) && d == 64-c -> (ROTLconst [c] x)
    86  
    87  (ADD (SLWconst x [c]) (SRWconst x [d])) && d == 32-c -> (ROTLWconst [c] x)
    88  ( OR (SLWconst x [c]) (SRWconst x [d])) && d == 32-c -> (ROTLWconst [c] x)
    89  (XOR (SLWconst x [c]) (SRWconst x [d])) && d == 32-c -> (ROTLWconst [c] x)
    90  
    91  // Rotate generation with non-const shift
    92  // these match patterns from math/bits/RotateLeft[32|64], but there could be others
    93  (ADD (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))) -> (ROTL x y)
    94  ( OR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))) -> (ROTL x y)
    95  (XOR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))) -> (ROTL x y)
    96  
    97  (ADD (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))) -> (ROTLW x y)
    98  ( OR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))) -> (ROTLW x y)
    99  (XOR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))) -> (ROTLW x y)
   100  
   101  (Lsh64x64  x (Const64 [c])) && uint64(c) < 64 -> (SLDconst x [c])
   102  (Rsh64x64  x (Const64 [c])) && uint64(c) < 64 -> (SRADconst x [c])
   103  (Rsh64Ux64 x (Const64 [c])) && uint64(c) < 64 -> (SRDconst x [c])
   104  (Lsh32x64  x (Const64 [c])) && uint64(c) < 32 -> (SLWconst x [c])
   105  (Rsh32x64  x (Const64 [c])) && uint64(c) < 32 -> (SRAWconst x [c])
   106  (Rsh32Ux64 x (Const64 [c])) && uint64(c) < 32 -> (SRWconst x [c])
   107  (Lsh16x64  x (Const64 [c])) && uint64(c) < 16 -> (SLWconst x [c])
   108  (Rsh16x64  x (Const64 [c])) && uint64(c) < 16 -> (SRAWconst (SignExt16to32 x) [c])
   109  (Rsh16Ux64 x (Const64 [c])) && uint64(c) < 16 -> (SRWconst (ZeroExt16to32 x) [c])
   110  (Lsh8x64   x (Const64 [c])) && uint64(c) < 8  -> (SLWconst x [c])
   111  (Rsh8x64   x (Const64 [c])) && uint64(c) < 8  -> (SRAWconst (SignExt8to32  x) [c])
   112  (Rsh8Ux64  x (Const64 [c])) && uint64(c) < 8  -> (SRWconst (ZeroExt8to32  x) [c])
   113  
   114  (Lsh64x32  x (Const64 [c])) && uint32(c) < 64 -> (SLDconst x [c])
   115  (Rsh64x32  x (Const64 [c])) && uint32(c) < 64 -> (SRADconst x [c])
   116  (Rsh64Ux32 x (Const64 [c])) && uint32(c) < 64 -> (SRDconst x [c])
   117  (Lsh32x32  x (Const64 [c])) && uint32(c) < 32 -> (SLWconst x [c])
   118  (Rsh32x32  x (Const64 [c])) && uint32(c) < 32 -> (SRAWconst x [c])
   119  (Rsh32Ux32 x (Const64 [c])) && uint32(c) < 32 -> (SRWconst x [c])
   120  (Lsh16x32  x (Const64 [c])) && uint32(c) < 16 -> (SLWconst x [c])
   121  (Rsh16x32  x (Const64 [c])) && uint32(c) < 16 -> (SRAWconst (SignExt16to32 x) [c])
   122  (Rsh16Ux32 x (Const64 [c])) && uint32(c) < 16 -> (SRWconst (ZeroExt16to32 x) [c])
   123  (Lsh8x32   x (Const64 [c])) && uint32(c) < 8  -> (SLWconst x [c])
   124  (Rsh8x32   x (Const64 [c])) && uint32(c) < 8  -> (SRAWconst (SignExt8to32  x) [c])
   125  (Rsh8Ux32  x (Const64 [c])) && uint32(c) < 8  -> (SRWconst (ZeroExt8to32  x) [c])
   126  
   127  // large constant shifts
   128  (Lsh64x64  _ (Const64 [c])) && uint64(c) >= 64 -> (MOVDconst [0])
   129  (Rsh64Ux64 _ (Const64 [c])) && uint64(c) >= 64 -> (MOVDconst [0])
   130  (Lsh32x64  _ (Const64 [c])) && uint64(c) >= 32 -> (MOVDconst [0])
   131  (Rsh32Ux64 _ (Const64 [c])) && uint64(c) >= 32 -> (MOVDconst [0])
   132  (Lsh16x64  _ (Const64 [c])) && uint64(c) >= 16 -> (MOVDconst [0])
   133  (Rsh16Ux64 _ (Const64 [c])) && uint64(c) >= 16 -> (MOVDconst [0])
   134  (Lsh8x64   _ (Const64 [c])) && uint64(c) >= 8  -> (MOVDconst [0])
   135  (Rsh8Ux64  _ (Const64 [c])) && uint64(c) >= 8  -> (MOVDconst [0])
   136  
   137  // large constant signed right shift, we leave the sign bit
   138  (Rsh64x64 x (Const64 [c])) && uint64(c) >= 64 -> (SRADconst x [63])
   139  (Rsh32x64 x (Const64 [c])) && uint64(c) >= 32 -> (SRAWconst x [63])
   140  (Rsh16x64 x (Const64 [c])) && uint64(c) >= 16 -> (SRAWconst (SignExt16to32 x) [63])
   141  (Rsh8x64  x (Const64 [c])) && uint64(c) >= 8  -> (SRAWconst (SignExt8to32  x) [63])
   142  
   143  // constant shifts
   144  (Lsh64x64  x (MOVDconst [c])) && uint64(c) < 64 -> (SLDconst x [c])
   145  (Rsh64x64  x (MOVDconst [c])) && uint64(c) < 64 -> (SRADconst x [c])
   146  (Rsh64Ux64 x (MOVDconst [c])) && uint64(c) < 64 -> (SRDconst x [c])
   147  (Lsh32x64  x (MOVDconst [c])) && uint64(c) < 32 -> (SLWconst x [c])
   148  (Rsh32x64  x (MOVDconst [c])) && uint64(c) < 32 -> (SRAWconst x [c])
   149  (Rsh32Ux64 x (MOVDconst [c])) && uint64(c) < 32 -> (SRWconst x [c])
   150  (Lsh16x64  x (MOVDconst [c])) && uint64(c) < 16 -> (SLWconst x [c])
   151  (Rsh16x64  x (MOVDconst [c])) && uint64(c) < 16 -> (SRAWconst (SignExt16to32 x) [c])
   152  (Rsh16Ux64 x (MOVDconst [c])) && uint64(c) < 16 -> (SRWconst (ZeroExt16to32 x) [c])
   153  (Lsh8x64   x (MOVDconst [c])) && uint64(c) < 8  -> (SLWconst x [c])
   154  (Rsh8x64   x (MOVDconst [c])) && uint64(c) < 8  -> (SRAWconst (SignExt8to32  x) [c])
   155  (Rsh8Ux64  x (MOVDconst [c])) && uint64(c) < 8  -> (SRWconst (ZeroExt8to32  x) [c])
   156  
   157  (Lsh64x32  x (MOVDconst [c])) && uint32(c) < 64 -> (SLDconst x [c])
   158  (Rsh64x32  x (MOVDconst [c])) && uint32(c) < 64 -> (SRADconst x [c])
   159  (Rsh64Ux32 x (MOVDconst [c])) && uint32(c) < 64 -> (SRDconst x [c])
   160  (Lsh32x32  x (MOVDconst [c])) && uint32(c) < 32 -> (SLWconst x [c])
   161  (Rsh32x32  x (MOVDconst [c])) && uint32(c) < 32 -> (SRAWconst x [c])
   162  (Rsh32Ux32 x (MOVDconst [c])) && uint32(c) < 32 -> (SRWconst x [c])
   163  (Lsh16x32  x (MOVDconst [c])) && uint32(c) < 16 -> (SLWconst x [c])
   164  (Rsh16x32  x (MOVDconst [c])) && uint32(c) < 16 -> (SRAWconst (SignExt16to32 x) [c])
   165  (Rsh16Ux32 x (MOVDconst [c])) && uint32(c) < 16 -> (SRWconst (ZeroExt16to32 x) [c])
   166  (Lsh8x32   x (MOVDconst [c])) && uint32(c) < 8  -> (SLWconst x [c])
   167  (Rsh8x32   x (MOVDconst [c])) && uint32(c) < 8  -> (SRAWconst (SignExt8to32  x) [c])
   168  (Rsh8Ux32  x (MOVDconst [c])) && uint32(c) < 8  -> (SRWconst (ZeroExt8to32  x) [c])
   169  
   170  // non-constant rotates
   171  // These are subexpressions found in statements that can become rotates
   172  // In these cases the shift count is known to be < 64 so the more complicated expressions
   173  // with Mask & Carry is not needed
   174  (Lsh64x64 x (AND y (MOVDconst [63]))) -> (SLD x (ANDconst <typ.Int64> [63] y))
   175  (Lsh64x64 x (ANDconst <typ.Int64> [63] y)) -> (SLD x (ANDconst <typ.Int64> [63] y))
   176  (Rsh64Ux64 x (AND y (MOVDconst [63]))) -> (SRD x (ANDconst <typ.Int64> [63] y))
   177  (Rsh64Ux64 x (ANDconst <typ.UInt> [63] y)) -> (SRD x (ANDconst <typ.UInt> [63] y))
   178  (Rsh64Ux64 x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y))) -> (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
   179  (Rsh64Ux64 x (SUB <typ.UInt> (MOVDconst [64]) (AND <typ.UInt> y (MOVDconst [63])))) -> (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
   180  (Rsh64x64 x (AND y (MOVDconst [63]))) -> (SRAD x (ANDconst <typ.Int64> [63] y))
   181  (Rsh64x64 x (ANDconst <typ.UInt> [63] y)) -> (SRAD x (ANDconst <typ.UInt> [63] y))
   182  (Rsh64x64 x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y))) -> (SRAD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
   183  (Rsh64x64 x (SUB <typ.UInt> (MOVDconst [64]) (AND <typ.UInt> y (MOVDconst [63])))) -> (SRAD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
   184  
   185  (Rsh64x64 x y)  -> (SRAD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
   186  (Rsh64Ux64 x y) -> (SRD  x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
   187  (Lsh64x64 x y)  -> (SLD  x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
   188  
   189  (Lsh32x64 x (AND y (MOVDconst [31]))) -> (SLW x (ANDconst <typ.Int32> [31] y))
   190  (Lsh32x64 x (ANDconst <typ.Int32> [31] y)) -> (SLW x (ANDconst <typ.Int32> [31] y))
   191  
   192  (Rsh32Ux64 x (AND y (MOVDconst [31]))) -> (SRW x (ANDconst <typ.Int32> [31] y))
   193  (Rsh32Ux64 x (ANDconst <typ.UInt> [31] y)) -> (SRW x (ANDconst <typ.UInt> [31] y))
   194  (Rsh32Ux64 x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))) -> (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
   195  (Rsh32Ux64 x (SUB <typ.UInt> (MOVDconst [32]) (AND <typ.UInt> y (MOVDconst [31])))) -> (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
   196  
   197  (Rsh32x64 x (AND y (MOVDconst [31]))) -> (SRAW x (ANDconst <typ.Int32> [31] y))
   198  (Rsh32x64 x (ANDconst <typ.UInt> [31] y)) -> (SRAW x (ANDconst <typ.UInt> [31] y))
   199  (Rsh32x64 x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))) -> (SRAW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
   200  (Rsh32x64 x (SUB <typ.UInt> (MOVDconst [32]) (AND <typ.UInt> y (MOVDconst [31])))) -> (SRAW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
   201  
   202  (Rsh32x64 x y)  -> (SRAW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
   203  (Rsh32Ux64 x y) -> (SRW  x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
   204  (Lsh32x64 x y)  -> (SLW  x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
   205  
   206  (Rsh16x64 x y)  -> (SRAW (SignExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
   207  (Rsh16Ux64 x y) -> (SRW  (ZeroExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
   208  (Lsh16x64 x y)  -> (SLW  x                 (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
   209  
   210  (Rsh8x64 x y)  -> (SRAW (SignExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
   211  (Rsh8Ux64 x y) -> (SRW  (ZeroExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
   212  (Lsh8x64 x y)  -> (SLW  x                (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
   213  
   214  (Rsh64x32 x y)  -> (SRAD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
   215  (Rsh64Ux32 x y) -> (SRD x  (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
   216  (Lsh64x32 x y)  -> (SLD x  (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
   217  
   218  (Rsh32x32 x y)  -> (SRAW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
   219  (Rsh32Ux32 x y) -> (SRW x  (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
   220  (Lsh32x32 x y)  -> (SLW x  (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
   221  
   222  (Rsh16x32 x y)  -> (SRAW (SignExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
   223  (Rsh16Ux32 x y) -> (SRW  (ZeroExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
   224  (Lsh16x32 x y)  -> (SLW  x                 (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
   225  
   226  (Rsh8x32 x y)  -> (SRAW (SignExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
   227  (Rsh8Ux32 x y) -> (SRW  (ZeroExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
   228  (Lsh8x32 x y)  -> (SLW  x                (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
   229  
   230  
   231  (Rsh64x16 x y)  -> (SRAD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
   232  (Rsh64Ux16 x y) -> (SRD x  (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
   233  (Lsh64x16 x y)  -> (SLD x  (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
   234  
   235  (Rsh32x16 x y)  -> (SRAW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
   236  (Rsh32Ux16 x y) -> (SRW x  (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
   237  (Lsh32x16 x y)  -> (SLW x  (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
   238  
   239  (Rsh16x16 x y)  -> (SRAW (SignExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
   240  (Rsh16Ux16 x y) -> (SRW  (ZeroExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
   241  (Lsh16x16 x y)  -> (SLW  x                 (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
   242  
   243  (Rsh8x16 x y)  -> (SRAW (SignExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
   244  (Rsh8Ux16 x y) -> (SRW  (ZeroExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
   245  (Lsh8x16 x y)  -> (SLW  x                (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
   246  
   247  
   248  (Rsh64x8 x y)  -> (SRAD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
   249  (Rsh64Ux8 x y) -> (SRD x  (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
   250  (Lsh64x8 x y)  -> (SLD x  (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
   251  
   252  (Rsh32x8 x y)  -> (SRAW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
   253  (Rsh32Ux8 x y) -> (SRW x  (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
   254  (Lsh32x8 x y)  -> (SLW x  (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
   255  
   256  (Rsh16x8 x y)  -> (SRAW (SignExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
   257  (Rsh16Ux8 x y) -> (SRW  (ZeroExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
   258  (Lsh16x8 x y)  -> (SLW  x                 (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
   259  
   260  (Rsh8x8 x y)  -> (SRAW (SignExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
   261  (Rsh8Ux8 x y) -> (SRW  (ZeroExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
   262  (Lsh8x8 x y)  -> (SLW  x                (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
   263  
   264  // Cleaning up shift ops when input is masked
   265  (MaskIfNotCarry (ADDconstForCarry [c] (ANDconst [d] _))) && c < 0 && d > 0 && c + d < 0 -> (MOVDconst [-1])
   266  (ORN x (MOVDconst [-1])) -> x
   267  
   268  // Potentially useful optimizing rewrites.
   269  // (ADDconstForCarry [k] c), k < 0 && (c < 0 || k+c >= 0) -> CarrySet
   270  // (ADDconstForCarry [k] c), K < 0 && (c >= 0 && k+c < 0) -> CarryClear
   271  // (MaskIfNotCarry CarrySet) -> 0
   272  // (MaskIfNotCarry CarrySet) -> -1
   273  
   274  (Addr {sym} base) -> (MOVDaddr {sym} base)
   275  (OffPtr [off] ptr) -> (ADD (MOVDconst <typ.Int64> [off]) ptr)
   276  
   277  (Ctz64 x) -> (POPCNTD (ANDN <typ.Int64> (ADDconst <typ.Int64> [-1] x) x))
   278  (Ctz32 x) -> (POPCNTW (MOVWZreg (ANDN <typ.Int> (ADDconst <typ.Int> [-1] x) x)))
   279  
   280  (BitLen64 x) -> (SUB (MOVDconst [64]) (CNTLZD <typ.Int> x))
   281  (BitLen32 x) -> (SUB (MOVDconst [32]) (CNTLZW <typ.Int> x))
   282  
   283  (PopCount64 x) -> (POPCNTD x)
   284  (PopCount32 x) -> (POPCNTW (MOVWZreg x))
   285  (PopCount16 x) -> (POPCNTW (MOVHZreg x))
   286  (PopCount8 x) -> (POPCNTB (MOVBreg x))
   287  
   288  (And(64|32|16|8) x y) -> (AND x y)
   289  (Or(64|32|16|8) x y) -> (OR x y)
   290  (Xor(64|32|16|8) x y) -> (XOR x y)
   291  
   292  (Neg(64|32|16|8)  x) -> (NEG x)
   293  (Neg64F x) -> (FNEG x)
   294  (Neg32F x) -> (FNEG x)
   295  
   296  (Com(64|32|16|8) x) -> (NOR x x)
   297  
   298  // Lowering boolean ops
   299  (AndB x y) -> (AND x y)
   300  (OrB x y) -> (OR x y)
   301  (Not x) -> (XORconst [1] x)
   302  
   303  // Use ANDN for AND x NOT y
   304  (AND x (NOR y y)) -> (ANDN x y)
   305  
   306  // Lowering comparisons
   307  (EqB x y)  -> (ANDconst [1] (EQV x y))
   308  // Sign extension dependence on operand sign sets up for sign/zero-extension elision later
   309  (Eq8 x y) && isSigned(x.Type) && isSigned(y.Type) -> (Equal (CMPW (SignExt8to32 x) (SignExt8to32 y)))
   310  (Eq16 x y) && isSigned(x.Type) && isSigned(y.Type) -> (Equal (CMPW (SignExt16to32 x) (SignExt16to32 y)))
   311  (Eq8 x y)  -> (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
   312  (Eq16 x y) -> (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
   313  (Eq32 x y) -> (Equal (CMPW x y))
   314  (Eq64 x y) -> (Equal (CMP x y))
   315  (Eq32F x y) -> (Equal (FCMPU x y))
   316  (Eq64F x y) -> (Equal (FCMPU x y))
   317  (EqPtr x y) -> (Equal (CMP x y))
   318  
   319  (NeqB x y)  -> (XOR x y)
   320  // Like Eq8 and Eq16, prefer sign extension likely to enable later elision.
   321  (Neq8 x y) && isSigned(x.Type) && isSigned(y.Type) -> (NotEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
   322  (Neq16 x y) && isSigned(x.Type) && isSigned(y.Type) -> (NotEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
   323  (Neq8 x y)  -> (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
   324  (Neq16 x y) -> (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
   325  (Neq32 x y) -> (NotEqual (CMPW x y))
   326  (Neq64 x y) -> (NotEqual (CMP x y))
   327  (Neq32F x y) -> (NotEqual (FCMPU x y))
   328  (Neq64F x y) -> (NotEqual (FCMPU x y))
   329  (NeqPtr x y) -> (NotEqual (CMP x y))
   330  
   331  (Less8 x y)  -> (LessThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
   332  (Less16 x y) -> (LessThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
   333  (Less32 x y) -> (LessThan (CMPW x y))
   334  (Less64 x y) -> (LessThan (CMP x y))
   335  (Less32F x y) -> (FLessThan (FCMPU x y))
   336  (Less64F x y) -> (FLessThan (FCMPU x y))
   337  
   338  (Less8U x y)  -> (LessThan (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
   339  (Less16U x y) -> (LessThan (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
   340  (Less32U x y) -> (LessThan (CMPWU x y))
   341  (Less64U x y) -> (LessThan (CMPU x y))
   342  
   343  (Leq8 x y)  -> (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
   344  (Leq16 x y) -> (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
   345  (Leq32 x y) -> (LessEqual (CMPW x y))
   346  (Leq64 x y) -> (LessEqual (CMP x y))
   347  (Leq32F x y) -> (FLessEqual (FCMPU x y))
   348  (Leq64F x y) -> (FLessEqual (FCMPU x y))
   349  
   350  (Leq8U x y)  -> (LessEqual (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
   351  (Leq16U x y) -> (LessEqual (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
   352  (Leq32U x y) -> (LessEqual (CMPWU x y))
   353  (Leq64U x y) -> (LessEqual (CMPU x y))
   354  
   355  (Greater8 x y)  -> (GreaterThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
   356  (Greater16 x y) -> (GreaterThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
   357  (Greater32 x y) -> (GreaterThan (CMPW x y))
   358  (Greater64 x y) -> (GreaterThan (CMP x y))
   359  (Greater(32|64)F x y) -> (FGreaterThan (FCMPU x y))
   360  
   361  (Greater8U x y)  -> (GreaterThan (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
   362  (Greater16U x y) -> (GreaterThan (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
   363  (Greater32U x y) -> (GreaterThan (CMPWU x y))
   364  (Greater64U x y) -> (GreaterThan (CMPU x y))
   365  
   366  (Geq8 x y)  -> (GreaterEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
   367  (Geq16 x y) -> (GreaterEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
   368  (Geq32 x y) -> (GreaterEqual (CMPW x y))
   369  (Geq64 x y) -> (GreaterEqual (CMP x y))
   370  (Geq(32|64)F x y) -> (FGreaterEqual (FCMPU x y))
   371  
   372  (Geq8U x y)  -> (GreaterEqual (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
   373  (Geq16U x y) -> (GreaterEqual (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
   374  (Geq32U x y) -> (GreaterEqual (CMPWU x y))
   375  (Geq64U x y) -> (GreaterEqual (CMPU x y))
   376  
   377  // Absorb pseudo-ops into blocks.
   378  (If (Equal cc) yes no) -> (EQ cc yes no)
   379  (If (NotEqual cc) yes no) -> (NE cc yes no)
   380  (If (LessThan cc) yes no) -> (LT cc yes no)
   381  (If (LessEqual cc) yes no) -> (LE cc yes no)
   382  (If (GreaterThan cc) yes no) -> (GT cc yes no)
   383  (If (GreaterEqual cc) yes no) -> (GE cc yes no)
   384  (If (FLessThan cc) yes no) -> (FLT cc yes no)
   385  (If (FLessEqual cc) yes no) -> (FLE cc yes no)
   386  (If (FGreaterThan cc) yes no) -> (FGT cc yes no)
   387  (If (FGreaterEqual cc) yes no) -> (FGE cc yes no)
   388  
   389  (If cond yes no) -> (NE (CMPWconst [0] cond) yes no)
   390  
   391  // Absorb boolean tests into block
   392  (NE (CMPWconst [0] (Equal cc)) yes no) -> (EQ cc yes no)
   393  (NE (CMPWconst [0] (NotEqual cc)) yes no) -> (NE cc yes no)
   394  (NE (CMPWconst [0] (LessThan cc)) yes no) -> (LT cc yes no)
   395  (NE (CMPWconst [0] (LessEqual cc)) yes no) -> (LE cc yes no)
   396  (NE (CMPWconst [0] (GreaterThan cc)) yes no) -> (GT cc yes no)
   397  (NE (CMPWconst [0] (GreaterEqual cc)) yes no) -> (GE cc yes no)
   398  (NE (CMPWconst [0] (FLessThan cc)) yes no) -> (FLT cc yes no)
   399  (NE (CMPWconst [0] (FLessEqual cc)) yes no) -> (FLE cc yes no)
   400  (NE (CMPWconst [0] (FGreaterThan cc)) yes no) -> (FGT cc yes no)
   401  (NE (CMPWconst [0] (FGreaterEqual cc)) yes no) -> (FGE cc yes no)
   402  
   403  // Elide compares of bit tests // TODO need to make both CC and result of ANDCC available.
   404  (EQ (CMPconst [0] (ANDconst [c] x)) yes no) -> (EQ (ANDCCconst [c] x) yes no)
   405  (NE (CMPconst [0] (ANDconst [c] x)) yes no) -> (NE (ANDCCconst [c] x) yes no)
   406  (EQ (CMPWconst [0] (ANDconst [c] x)) yes no) -> (EQ (ANDCCconst [c] x) yes no)
   407  (NE (CMPWconst [0] (ANDconst [c] x)) yes no) -> (NE (ANDCCconst [c] x) yes no)
   408  
   409  // absorb flag constants into branches
   410  (EQ (FlagEQ) yes no) -> (First nil yes no)
   411  (EQ (FlagLT) yes no) -> (First nil no yes)
   412  (EQ (FlagGT) yes no) -> (First nil no yes)
   413  
   414  (NE (FlagEQ) yes no) -> (First nil no yes)
   415  (NE (FlagLT) yes no) -> (First nil yes no)
   416  (NE (FlagGT) yes no) -> (First nil yes no)
   417  
   418  (LT (FlagEQ) yes no) -> (First nil no yes)
   419  (LT (FlagLT) yes no) -> (First nil yes no)
   420  (LT (FlagGT) yes no) -> (First nil no yes)
   421  
   422  (LE (FlagEQ) yes no) -> (First nil yes no)
   423  (LE (FlagLT) yes no) -> (First nil yes no)
   424  (LE (FlagGT) yes no) -> (First nil no yes)
   425  
   426  (GT (FlagEQ) yes no) -> (First nil no yes)
   427  (GT (FlagLT) yes no) -> (First nil no yes)
   428  (GT (FlagGT) yes no) -> (First nil yes no)
   429  
   430  (GE (FlagEQ) yes no) -> (First nil yes no)
   431  (GE (FlagLT) yes no) -> (First nil no yes)
   432  (GE (FlagGT) yes no) -> (First nil yes no)
   433  
   434  // absorb InvertFlags into branches
   435  (LT (InvertFlags cmp) yes no) -> (GT cmp yes no)
   436  (GT (InvertFlags cmp) yes no) -> (LT cmp yes no)
   437  (LE (InvertFlags cmp) yes no) -> (GE cmp yes no)
   438  (GE (InvertFlags cmp) yes no) -> (LE cmp yes no)
   439  (EQ (InvertFlags cmp) yes no) -> (EQ cmp yes no)
   440  (NE (InvertFlags cmp) yes no) -> (NE cmp yes no)
   441  
   442  // constant comparisons
   443  (CMPWconst (MOVDconst [x]) [y]) && int32(x)==int32(y) -> (FlagEQ)
   444  (CMPWconst (MOVDconst [x]) [y]) && int32(x)<int32(y)  -> (FlagLT)
   445  (CMPWconst (MOVDconst [x]) [y]) && int32(x)>int32(y)  -> (FlagGT)
   446  
   447  (CMPconst (MOVDconst [x]) [y]) && int64(x)==int64(y) -> (FlagEQ)
   448  (CMPconst (MOVDconst [x]) [y]) && int64(x)<int64(y)  -> (FlagLT)
   449  (CMPconst (MOVDconst [x]) [y]) && int64(x)>int64(y)  -> (FlagGT)
   450  
   451  (CMPWUconst (MOVDconst [x]) [y]) && int32(x)==int32(y)  -> (FlagEQ)
   452  (CMPWUconst (MOVDconst [x]) [y]) && uint32(x)<uint32(y) -> (FlagLT)
   453  (CMPWUconst (MOVDconst [x]) [y]) && uint32(x)>uint32(y) -> (FlagGT)
   454  
   455  (CMPUconst (MOVDconst [x]) [y]) && int64(x)==int64(y)  -> (FlagEQ)
   456  (CMPUconst (MOVDconst [x]) [y]) && uint64(x)<uint64(y) -> (FlagLT)
   457  (CMPUconst (MOVDconst [x]) [y]) && uint64(x)>uint64(y) -> (FlagGT)
   458  
   459  // other known comparisons
   460  //(CMPconst (MOVBUreg _) [c]) && 0xff < c -> (FlagLT)
   461  //(CMPconst (MOVHUreg _) [c]) && 0xffff < c -> (FlagLT)
   462  //(CMPconst (ANDconst _ [m]) [n]) && 0 <= int32(m) && int32(m) < int32(n) -> (FlagLT)
   463  //(CMPconst (SRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 32 && (1<<uint32(32-c)) <= uint32(n) -> (FlagLT)
   464  
   465  // absorb flag constants into boolean values
   466  (Equal (FlagEQ)) -> (MOVDconst [1])
   467  (Equal (FlagLT)) -> (MOVDconst [0])
   468  (Equal (FlagGT)) -> (MOVDconst [0])
   469  
   470  (NotEqual (FlagEQ)) -> (MOVDconst [0])
   471  (NotEqual (FlagLT)) -> (MOVDconst [1])
   472  (NotEqual (FlagGT)) -> (MOVDconst [1])
   473  
   474  (LessThan (FlagEQ)) -> (MOVDconst [0])
   475  (LessThan (FlagLT)) -> (MOVDconst [1])
   476  (LessThan (FlagGT)) -> (MOVDconst [0])
   477  
   478  (LessEqual (FlagEQ)) -> (MOVDconst [1])
   479  (LessEqual (FlagLT)) -> (MOVDconst [1])
   480  (LessEqual (FlagGT)) -> (MOVDconst [0])
   481  
   482  (GreaterThan (FlagEQ)) -> (MOVDconst [0])
   483  (GreaterThan (FlagLT)) -> (MOVDconst [0])
   484  (GreaterThan (FlagGT)) -> (MOVDconst [1])
   485  
   486  (GreaterEqual (FlagEQ)) -> (MOVDconst [1])
   487  (GreaterEqual (FlagLT)) -> (MOVDconst [0])
   488  (GreaterEqual (FlagGT)) -> (MOVDconst [1])
   489  
   490  // absorb InvertFlags into boolean values
   491  (Equal (InvertFlags x)) -> (Equal x)
   492  (NotEqual (InvertFlags x)) -> (NotEqual x)
   493  (LessThan (InvertFlags x)) -> (GreaterThan x)
   494  (GreaterThan (InvertFlags x)) -> (LessThan x)
   495  (LessEqual (InvertFlags x)) -> (GreaterEqual x)
   496  (GreaterEqual (InvertFlags x)) -> (LessEqual x)
   497  
   498  // Lowering loads
   499  (Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) -> (MOVDload ptr mem)
   500  (Load <t> ptr mem) && is32BitInt(t) && isSigned(t) -> (MOVWload ptr mem)
   501  (Load <t> ptr mem) && is32BitInt(t) && !isSigned(t) -> (MOVWZload ptr mem)
   502  (Load <t> ptr mem) && is16BitInt(t) && isSigned(t) -> (MOVHload ptr mem)
   503  (Load <t> ptr mem) && is16BitInt(t) && !isSigned(t) -> (MOVHZload ptr mem)
   504  (Load <t> ptr mem) && t.IsBoolean() -> (MOVBZload ptr mem)
   505  (Load <t> ptr mem) && is8BitInt(t) && isSigned(t) -> (MOVBreg (MOVBZload ptr mem)) // PPC has no signed-byte load.
   506  (Load <t> ptr mem) && is8BitInt(t) && !isSigned(t) -> (MOVBZload ptr mem)
   507  
   508  (Load <t> ptr mem) && is32BitFloat(t) -> (FMOVSload ptr mem)
   509  (Load <t> ptr mem) && is64BitFloat(t) -> (FMOVDload ptr mem)
   510  
   511  (Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) -> (FMOVDstore ptr val mem)
   512  (Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && is32BitFloat(val.Type) -> (FMOVDstore ptr val mem) // glitch from (Cvt32Fto64F x) -> x -- type is wrong
   513  (Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) -> (FMOVSstore ptr val mem)
   514  (Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && (is64BitInt(val.Type) || isPtr(val.Type)) -> (MOVDstore ptr val mem)
   515  (Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && is32BitInt(val.Type) -> (MOVWstore ptr val mem)
   516  (Store {t} ptr val mem) && t.(*types.Type).Size() == 2 -> (MOVHstore ptr val mem)
   517  (Store {t} ptr val mem) && t.(*types.Type).Size() == 1 -> (MOVBstore ptr val mem)
   518  
   519  // Using Zero instead of LoweredZero allows the
   520  // target address to be folded where possible.
   521  (Zero [0] _ mem) -> mem
   522  (Zero [1] destptr mem) -> (MOVBstorezero destptr mem)
   523  (Zero [2] destptr mem) ->
   524  	(MOVHstorezero destptr mem)
   525  (Zero [3] destptr mem) ->
   526  	(MOVBstorezero [2] destptr
   527  		(MOVHstorezero destptr mem))
   528  (Zero [4] destptr mem) ->
   529  	(MOVWstorezero destptr mem)
   530  (Zero [5] destptr mem) ->
   531  	(MOVBstorezero [4] destptr
   532          	(MOVWstorezero destptr mem))
   533  (Zero [6] destptr mem) ->
   534  	(MOVHstorezero [4] destptr
   535  		(MOVWstorezero destptr mem))
   536  (Zero [7] destptr mem) ->
   537  	(MOVBstorezero [6] destptr
   538  		(MOVHstorezero [4] destptr
   539  			(MOVWstorezero destptr mem)))
   540  
   541  // MOVD for store with DS must have offsets that are multiple of 4
   542  (Zero [8] {t} destptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
   543          (MOVDstorezero destptr mem)
   544  (Zero [8] destptr mem) ->
   545          (MOVWstorezero [4] destptr
   546                  (MOVWstorezero [0] destptr mem))
   547  // Handle these cases only if aligned properly, otherwise use general case below
   548  (Zero [12] {t} destptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
   549          (MOVWstorezero [8] destptr
   550                  (MOVDstorezero [0] destptr mem))
   551  (Zero [16] {t} destptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
   552         (MOVDstorezero [8] destptr
   553                  (MOVDstorezero [0] destptr mem))
   554  (Zero [24] {t} destptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
   555         (MOVDstorezero [16] destptr
   556                 (MOVDstorezero [8] destptr
   557                         (MOVDstorezero [0] destptr mem)))
   558  (Zero [32] {t} destptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
   559         (MOVDstorezero [24] destptr
   560                 (MOVDstorezero [16] destptr
   561                         (MOVDstorezero [8] destptr
   562                                 (MOVDstorezero [0] destptr mem))))
   563  
   564  // Handle cases not handled above
   565  (Zero [s] ptr mem) -> (LoweredZero [s] ptr mem)
   566  
   567  // moves
   568  // Only the MOVD and MOVW instructions require 4 byte
   569  // alignment in the offset field.  The other MOVx instructions
   570  // allow any alignment.
   571  (Move [0] _ _ mem) -> mem
   572  (Move [1] dst src mem) -> (MOVBstore dst (MOVBZload src mem) mem)
   573  (Move [2] dst src mem) ->
   574          (MOVHstore dst (MOVHZload src mem) mem)
   575  (Move [4] dst src mem) ->
   576  	(MOVWstore dst (MOVWZload src mem) mem)
   577  // MOVD for load and store must have offsets that are multiple of 4
   578  (Move [8] {t} dst src mem) && t.(*types.Type).Alignment()%4 == 0 ->
   579  	(MOVDstore dst (MOVDload src mem) mem)
   580  (Move [8] dst src mem) ->
   581  	(MOVWstore [4] dst (MOVWZload [4] src mem)
   582  		(MOVWstore dst (MOVWZload src mem) mem))
   583  (Move [3] dst src mem) ->
   584          (MOVBstore [2] dst (MOVBZload [2] src mem)
   585                  (MOVHstore dst (MOVHload src mem) mem))
   586  (Move [5] dst src mem) ->
   587          (MOVBstore [4] dst (MOVBZload [4] src mem)
   588                  (MOVWstore dst (MOVWZload src mem) mem))
   589  (Move [6] dst src mem) ->
   590          (MOVHstore [4] dst (MOVHZload [4] src mem)
   591                  (MOVWstore dst (MOVWZload src mem) mem))
   592  (Move [7] dst src mem) ->
   593          (MOVBstore [6] dst (MOVBZload [6] src mem)
   594                  (MOVHstore [4] dst (MOVHZload [4] src mem)
   595                          (MOVWstore dst (MOVWZload src mem) mem)))
   596  
   597  // Large move uses a loop. Since the address is computed and the
   598  // offset is zero, any alignment can be used.
   599  (Move [s] dst src mem) && s > 8 ->
   600          (LoweredMove [s] dst src mem)
   601  
   602  // Calls
   603  // Lowering calls
   604  (StaticCall [argwid] {target} mem) -> (CALLstatic [argwid] {target} mem)
   605  (ClosureCall [argwid] entry closure mem) -> (CALLclosure [argwid] entry closure mem)
   606  (InterCall [argwid] entry mem) -> (CALLinter [argwid] entry mem)
   607  
   608  // Miscellaneous
   609  (Convert <t> x mem) -> (MOVDconvert <t> x mem)
   610  (GetClosurePtr) -> (LoweredGetClosurePtr)
   611  (GetCallerSP) -> (LoweredGetCallerSP)
   612  (IsNonNil ptr) -> (NotEqual (CMPconst [0] ptr))
   613  (IsInBounds idx len) -> (LessThan (CMPU idx len))
   614  (IsSliceInBounds idx len) -> (LessEqual (CMPU idx len))
   615  (NilCheck ptr mem) -> (LoweredNilCheck ptr mem)
   616  
   617  // Write barrier.
   618  (WB {fn} destptr srcptr mem) -> (LoweredWB {fn} destptr srcptr mem)
   619  
   620  // Optimizations
   621  // Note that PPC "logical" immediates come in 0:15 and 16:31 unsigned immediate forms,
   622  // so ORconst, XORconst easily expand into a pair.
   623  
   624  // Include very-large constants in the const-const case.
   625  (AND (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c&d])
   626  (OR (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c|d])
   627  (XOR (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c^d])
   628  
   629  // Discover consts
   630  (AND x (MOVDconst [c])) && isU16Bit(c) -> (ANDconst [c] x)
   631  (XOR x (MOVDconst [c])) && isU32Bit(c) -> (XORconst [c] x)
   632  (OR x (MOVDconst [c])) && isU32Bit(c) -> (ORconst [c] x)
   633  
   634  // Simplify consts
   635  (ANDconst [c] (ANDconst [d] x)) -> (ANDconst [c&d] x)
   636  (ORconst [c] (ORconst [d] x)) -> (ORconst [c|d] x)
   637  (XORconst [c] (XORconst [d] x)) -> (XORconst [c^d] x)
   638  (ANDconst [-1] x) -> x
   639  (ANDconst [0] _) -> (MOVDconst [0])
   640  (XORconst [0] x) -> x
   641  (ORconst [-1] _) -> (MOVDconst [-1])
   642  (ORconst [0] x) -> x
   643  
   644  // zero-extend of small and -> small and
   645  (MOVBZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFF -> y
   646  (MOVHZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFF -> y
   647  (MOVWZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFFFFFF -> y
   648  (MOVWZreg y:(AND (MOVDconst [c]) _)) && uint64(c) <= 0xFFFFFFFF -> y
   649  
   650  // sign extend of small-positive and -> small-positive-and
   651  (MOVBreg y:(ANDconst [c] _)) && uint64(c) <= 0x7F -> y
   652  (MOVHreg y:(ANDconst [c] _)) && uint64(c) <= 0x7FFF -> y
   653  (MOVWreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFF -> y // 0xFFFF is largest immediate constant, when regarded as 32-bit is > 0
   654  (MOVWreg y:(AND (MOVDconst [c]) _)) && uint64(c) <= 0x7FFFFFFF -> y
   655  
   656  // small and of zero-extend -> either zero-extend or small and
   657    // degenerate-and
   658  (ANDconst [c] y:(MOVBZreg _)) && c&0xFF == 0xFF -> y
   659  (ANDconst [c] y:(MOVHZreg _))  && c&0xFFFF == 0xFFFF -> y
   660  (ANDconst [c] y:(MOVWZreg _))  && c&0xFFFFFFFF == 0xFFFFFFFF -> y
   661    // normal case
   662  (ANDconst [c] (MOVBZreg x)) -> (ANDconst [c&0xFF] x)
   663  (ANDconst [c] (MOVHZreg x)) -> (ANDconst [c&0xFFFF] x)
   664  (ANDconst [c] (MOVWZreg x)) -> (ANDconst [c&0xFFFFFFFF] x)
   665  
   666  // Various redundant zero/sign extension combinations.
   667  (MOVBZreg y:(MOVBZreg _)) -> y  // repeat
   668  (MOVBreg y:(MOVBreg _)) -> y // repeat
   669  (MOVBreg (MOVBZreg x)) -> (MOVBreg x)
   670  (MOVBZreg (MOVBreg x)) -> (MOVBZreg x)
   671  
   672  // H - there are more combinations than these
   673  
   674  (MOVHZreg y:(MOVHZreg _)) -> y // repeat
   675  (MOVHZreg y:(MOVBZreg _)) -> y // wide of narrow
   676  
   677  (MOVHreg y:(MOVHreg _)) -> y // repeat
   678  (MOVHreg y:(MOVBreg _)) -> y // wide of narrow
   679  
   680  (MOVHreg y:(MOVHZreg x)) -> (MOVHreg x)
   681  (MOVHZreg y:(MOVHreg x)) -> (MOVHZreg x)
   682  
   683  // W - there are more combinations than these
   684  
   685  (MOVWZreg y:(MOVWZreg _)) -> y // repeat
   686  (MOVWZreg y:(MOVHZreg _)) -> y // wide of narrow
   687  (MOVWZreg y:(MOVBZreg _)) -> y // wide of narrow
   688  
   689  (MOVWreg y:(MOVWreg _)) -> y // repeat
   690  (MOVWreg y:(MOVHreg _)) -> y // wide of narrow
   691  (MOVWreg y:(MOVBreg _)) -> y // wide of narrow
   692  
   693  (MOVWreg y:(MOVWZreg x)) -> (MOVWreg x)
   694  (MOVWZreg y:(MOVWreg x)) -> (MOVWZreg x)
   695  
   696  // Arithmetic constant ops
   697  
   698  (ADD x (MOVDconst [c])) && is32Bit(c) -> (ADDconst [c] x)
   699  (ADDconst [c] (ADDconst [d] x)) && is32Bit(c+d) -> (ADDconst [c+d] x)
   700  (ADDconst [0] x) -> x
   701  (SUB x (MOVDconst [c])) && is32Bit(-c) -> (ADDconst [-c] x)
   702  // TODO deal with subtract-from-const
   703  
   704  (ADDconst [c] (MOVDaddr [d] {sym} x)) -> (MOVDaddr [c+d] {sym} x)
   705  
   706  // Use register moves instead of stores and loads to move int<->float values
   707  // Common with math Float64bits, Float64frombits
   708  (MOVDload [off] {sym} ptr (FMOVDstore [off] {sym} ptr x _)) -> (MFVSRD x)
   709  (FMOVDload [off] {sym} ptr (MOVDstore [off] {sym} ptr x _)) -> (MTVSRD x)
   710  
   711  (FMOVDstore [off] {sym} ptr (MTVSRD x) mem) -> (MOVDstore [off] {sym} ptr x mem)
   712  (MOVDstore [off] {sym} ptr (MFVSRD x) mem) -> (FMOVDstore [off] {sym} ptr x mem)
   713  
   714  (MTVSRD (MOVDconst [c])) -> (FMOVDconst [c])
   715  (MFVSRD (FMOVDconst [c])) -> (MOVDconst [c])
   716  
   717  (MTVSRD x:(MOVDload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (FMOVDload [off] {sym} ptr mem)
   718  (MFVSRD x:(FMOVDload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVDload [off] {sym} ptr mem)
   719  
   720  // Fold offsets for stores.
   721  (MOVDstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(off1+off2) -> (MOVDstore [off1+off2] {sym} x val mem)
   722  (MOVWstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(off1+off2) -> (MOVWstore [off1+off2] {sym} x val mem)
   723  (MOVHstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(off1+off2) -> (MOVHstore [off1+off2] {sym} x val mem)
   724  (MOVBstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(off1+off2) -> (MOVBstore [off1+off2] {sym} x val mem)
   725  
   726  (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is16Bit(off1+off2) -> (FMOVSstore [off1+off2] {sym} ptr val mem)
   727  (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is16Bit(off1+off2) -> (FMOVDstore [off1+off2] {sym} ptr val mem)
   728  
   729  // Fold address into load/store.
   730  // The assembler needs to generate several instructions and use
   731  // temp register for accessing global, and each time it will reload
   732  // the temp register. So don't fold address of global, unless there
   733  // is only one use.
   734  (MOVBstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
   735  	&& (ptr.Op != OpSB || p.Uses == 1) ->
   736          (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   737  (MOVHstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
   738  	&& (ptr.Op != OpSB || p.Uses == 1) ->
   739          (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   740  (MOVWstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
   741  	&& (ptr.Op != OpSB || p.Uses == 1) ->
   742          (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   743  (MOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
   744  	&& (ptr.Op != OpSB || p.Uses == 1) ->
   745          (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   746  
   747  (FMOVSstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
   748  	&& (ptr.Op != OpSB || p.Uses == 1) ->
   749          (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   750  (FMOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
   751  	&& (ptr.Op != OpSB || p.Uses == 1) ->
   752          (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   753  
   754  (MOVBZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
   755  	&& (ptr.Op != OpSB || p.Uses == 1) ->
   756          (MOVBZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   757  (MOVHload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
   758  	&& (ptr.Op != OpSB || p.Uses == 1) ->
   759          (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   760  (MOVHZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
   761  	&& (ptr.Op != OpSB || p.Uses == 1) ->
   762          (MOVHZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   763  (MOVWload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
   764  	&& (ptr.Op != OpSB || p.Uses == 1) ->
   765          (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   766  (MOVWZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
   767  	&& (ptr.Op != OpSB || p.Uses == 1) ->
   768          (MOVWZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   769  (MOVDload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
   770  	&& (ptr.Op != OpSB || p.Uses == 1) ->
   771          (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   772  (FMOVSload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
   773  	&& (ptr.Op != OpSB || p.Uses == 1) ->
   774          (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   775  (FMOVDload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
   776  	&& (ptr.Op != OpSB || p.Uses == 1) ->
   777          (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   778  
   779  // Fold offsets for loads.
   780  (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) && is16Bit(off1+off2) -> (FMOVSload [off1+off2] {sym} ptr mem)
   781  (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is16Bit(off1+off2) -> (FMOVDload [off1+off2] {sym} ptr mem)
   782  
   783  (MOVDload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVDload [off1+off2] {sym} x mem)
   784  (MOVWload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVWload [off1+off2] {sym} x mem)
   785  (MOVWZload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVWZload [off1+off2] {sym} x mem)
   786  (MOVHload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVHload [off1+off2] {sym} x mem)
   787  (MOVHZload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVHZload [off1+off2] {sym} x mem)
   788  (MOVBZload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVBZload [off1+off2] {sym} x mem)
   789  
   790  // Store of zero -> storezero
   791  (MOVDstore [off] {sym} ptr (MOVDconst [c]) mem) && c == 0 -> (MOVDstorezero [off] {sym} ptr mem)
   792  (MOVWstore [off] {sym} ptr (MOVDconst [c]) mem) && c == 0 -> (MOVWstorezero [off] {sym} ptr mem)
   793  (MOVHstore [off] {sym} ptr (MOVDconst [c]) mem) && c == 0 -> (MOVHstorezero [off] {sym} ptr mem)
   794  (MOVBstore [off] {sym} ptr (MOVDconst [c]) mem) && c == 0 -> (MOVBstorezero [off] {sym} ptr mem)
   795  
   796  // Fold offsets for storezero
   797  (MOVDstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) ->
   798      (MOVDstorezero [off1+off2] {sym} x mem)
   799  (MOVWstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) ->
   800      (MOVWstorezero [off1+off2] {sym} x mem)
   801  (MOVHstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) ->
   802      (MOVHstorezero [off1+off2] {sym} x mem)
   803  (MOVBstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) ->
   804      (MOVBstorezero [off1+off2] {sym} x mem)
   805  
   806  // Fold symbols into storezero
   807  (MOVDstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2)
   808  	&& (x.Op != OpSB || p.Uses == 1) ->
   809      (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
   810  (MOVWstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2)
   811  	&& (x.Op != OpSB || p.Uses == 1) ->
   812      (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
   813  (MOVHstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2)
   814  	&& (x.Op != OpSB || p.Uses == 1) ->
   815      (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
   816  (MOVBstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2)
   817  	&& (x.Op != OpSB || p.Uses == 1) ->
   818      (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
   819  
   820  // atomic intrinsics
   821  (AtomicLoad(32|64|Ptr)  ptr mem) -> (LoweredAtomicLoad(32|64|Ptr) ptr mem)
   822  
   823  (AtomicStore(32|64)      ptr val mem) -> (LoweredAtomicStore(32|64) ptr val mem)
   824  //(AtomicStorePtrNoWB ptr val mem) -> (STLR  ptr val mem)
   825  
   826  (AtomicExchange(32|64) ptr val mem) -> (LoweredAtomicExchange(32|64) ptr val mem)
   827  
   828  (AtomicAdd(32|64) ptr val mem) -> (LoweredAtomicAdd(32|64) ptr val mem)
   829  
   830  (AtomicCompareAndSwap(32|64) ptr old new_ mem) -> (LoweredAtomicCas(32|64) ptr old new_ mem)
   831  
   832  (AtomicAnd8 ptr val mem) -> (LoweredAtomicAnd8 ptr val mem)
   833  (AtomicOr8  ptr val mem) -> (LoweredAtomicOr8  ptr val mem)
   834  
   835  // Lowering extension
   836  // Note: we always extend to 64 bits even though some ops don't need that many result bits.
   837  (SignExt8to(16|32|64)  x) -> (MOVBreg x)
   838  (SignExt16to(32|64) x) -> (MOVHreg x)
   839  (SignExt32to64 x) -> (MOVWreg x)
   840  
   841  (ZeroExt8to(16|32|64)  x) -> (MOVBZreg x)
   842  (ZeroExt16to(32|64) x) -> (MOVHZreg x)
   843  (ZeroExt32to64 x) -> (MOVWZreg x)
   844  
   845  (Trunc(16|32|64)to8  x) -> (MOVBreg x)
   846  (Trunc(32|64)to16 x) -> (MOVHreg x)
   847  (Trunc64to32 x) -> (MOVWreg x)
   848  
   849  (Slicemask <t> x) -> (SRADconst (NEG <t> x) [63])
   850  
   851  // Note that MOV??reg returns a 64-bit int, x is not necessarily that wide
   852  // This may interact with other patterns in the future. (Compare with arm64)
   853  (MOVBZreg x:(MOVBZload _ _))  -> x
   854  (MOVHZreg x:(MOVHZload _ _))  -> x
   855  (MOVHreg x:(MOVHload _ _))  -> x
   856  
   857  (MOVBZreg (MOVDconst [c]))  -> (MOVDconst [int64(uint8(c))])
   858  (MOVBreg (MOVDconst [c]))  -> (MOVDconst [int64(int8(c))])
   859  (MOVHZreg (MOVDconst [c]))  -> (MOVDconst [int64(uint16(c))])
   860  (MOVHreg (MOVDconst [c]))  -> (MOVDconst [int64(int16(c))])
   861  
   862  // Lose widening ops fed to to stores
   863  (MOVBstore [off] {sym} ptr (MOVBreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
   864  (MOVBstore [off] {sym} ptr (MOVBZreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
   865  (MOVHstore [off] {sym} ptr (MOVHreg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
   866  (MOVHstore [off] {sym} ptr (MOVHZreg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
   867  (MOVWstore [off] {sym} ptr (MOVWreg x) mem) -> (MOVWstore [off] {sym} ptr x mem)
   868  (MOVWstore [off] {sym} ptr (MOVWZreg x) mem) -> (MOVWstore [off] {sym} ptr x mem)
   869  
   870  // Lose W-widening ops fed to compare-W
   871  (CMPW x (MOVWreg y)) -> (CMPW x y)
   872  (CMPW (MOVWreg x) y) -> (CMPW x y)
   873  (CMPWU x (MOVWZreg y)) -> (CMPWU x y)
   874  (CMPWU (MOVWZreg x) y) -> (CMPWU x y)
   875  
   876  (CMP x (MOVDconst [c])) && is16Bit(c) -> (CMPconst x [c])
   877  (CMP (MOVDconst [c]) y) && is16Bit(c) -> (InvertFlags (CMPconst y [c]))
   878  (CMPW x (MOVDconst [c])) && is16Bit(c) -> (CMPWconst x [c])
   879  (CMPW (MOVDconst [c]) y) && is16Bit(c) -> (InvertFlags (CMPWconst y [c]))
   880  
   881  (CMPU x (MOVDconst [c])) && isU16Bit(c) -> (CMPUconst x [c])
   882  (CMPU (MOVDconst [c]) y) && isU16Bit(c) -> (InvertFlags (CMPUconst y [c]))
   883  (CMPWU x (MOVDconst [c])) && isU16Bit(c) -> (CMPWUconst x [c])
   884  (CMPWU (MOVDconst [c]) y) && isU16Bit(c) -> (InvertFlags (CMPWUconst y [c]))
   885  
   886  // A particular pattern seen in cgo code:
   887  (AND (MOVDconst [c]) x:(MOVBZload _ _)) -> (ANDconst [c&0xFF] x)
   888  (AND x:(MOVBZload _ _) (MOVDconst [c])) -> (ANDconst [c&0xFF] x)
   889  
   890  // floating point negative abs
   891  (FNEG (FABS x)) -> (FNABS x)
   892  (FNEG (FNABS x)) -> (FABS x)
   893  
   894  // floating-point fused multiply-add/sub
   895  (FADD (FMUL x y) z) -> (FMADD x y z)
   896  (FSUB (FMUL x y) z) -> (FMSUB x y z)
   897  (FADDS (FMULS x y) z) -> (FMADDS x y z)
   898  (FSUBS (FMULS x y) z) -> (FMSUBS x y z)
   899  
   900  
   901  // The following statements are found in encoding/binary functions UintXX (load) and PutUintXX (store)
   902  // and convert the statements in these functions from multiple single byte loads or stores to
   903  // the single largest possible load or store. For now only little endian loads and stores on
   904  // little endian machines are implemented. Longer rules make use of the match with shorter rules
   905  // where possible.
   906  // TODO implement big endian loads and stores for little endian machines (using byte reverse
   907  // loads and stores).
   908  // b[0] | b[1]<<8 -> load 16-bit Little endian
   909  (OR <t> x0:(MOVBZload [i0] {s} p mem)
   910  	o1:(SLWconst x1:(MOVBZload [i1] {s} p mem) [8]))
   911  	&& !config.BigEndian
   912  	&& i1 == i0+1
   913  	&& x0.Uses ==1 && x1.Uses == 1
   914  	&& o1.Uses == 1
   915  	&& mergePoint(b, x0, x1) != nil
   916  	&& clobber(x0) && clobber(x1) && clobber(o1)
   917  	 -> @mergePoint(b,x0,x1) (MOVHZload <t> {s} [i0] p mem)
   918  
   919  // b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 -> load 32-bit Little endian
   920  (OR <t> s1:(SLWconst x2:(MOVBZload [i3] {s} p mem) [24])
   921  	o0:(OR <t> s0:(SLWconst x1:(MOVBZload [i2] {s} p mem) [16]) x0:(MOVHZload [i0] {s} p mem)))
   922  	&& !config.BigEndian
   923  	&& i2 == i0+2
   924  	&& i3 == i0+3
   925  	&& x0.Uses ==1 && x1.Uses == 1 && x2.Uses == 1
   926  	&& o0.Uses == 1
   927  	&& s0.Uses == 1 && s1.Uses == 1
   928  	&& mergePoint(b, x0, x1, x2) != nil
   929  	&& clobber(x0) && clobber(x1) && clobber(x2)
   930  	&& clobber(s0) && clobber(s1)
   931  	&& clobber(o0)
   932  	 -> @mergePoint(b,x0,x1,x2) (MOVWZload <t> {s} [i0] p mem)
   933  
   934  // b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 | b[4] <<32 | b[5]<<40 | b[6]<<48 | b[7]<<56 -> load 64-bit Little endian
   935  // Can't build on shorter rules because they use SLW instead of SLD
   936  // Offset must be multiple of 4 for MOVD
   937  (OR <t> s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])
   938  	o5:(OR <t> s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48])
   939  	o4:(OR <t> s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])
   940  	o3:(OR <t> s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])
   941  	o2:(OR <t> s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24])
   942  	o1:(OR <t> s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16])
   943  	o0:(OR <t> s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem))))))))
   944  	&& !config.BigEndian
   945  	&& i0%4 == 0
   946  	&& i1 == i0+1
   947  	&& i2 == i0+2
   948  	&& i3 == i0+3
   949  	&& i4 == i0+4
   950  	&& i5 == i0+5
   951  	&& i6 == i0+6
   952  	&& i7 == i0+7
   953  	&& x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1
   954  	&& o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1
   955  	&& s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1
   956  	&& mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil
   957  	&& clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7)
   958  	&& clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6)
   959  	&& clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)
   960  	  -> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload <t> {s} [i0] p mem)
   961  
   962  // 2 byte store Little endian as in:
   963  //      b[0] = byte(v)
   964  //      b[1] = byte(v >> 8)
   965  (MOVBstore [i1] {s} p (SRWconst (MOVHZreg w) [8])
   966  	x0:(MOVBstore [i0] {s} p w mem))
   967  	&& !config.BigEndian
   968  	&& x0.Uses == 1
   969  	&& i1 == i0+1
   970  	&& clobber(x0)
   971  	  -> (MOVHstore [i0] {s} p w mem)
   972  
   973  // 4 byte store Little endian as in:
   974  //     b[0] = byte(v)
   975  //     b[1] = byte(v >> 8)
   976  //     b[2] = byte(v >> 16)
   977  //     b[3] = byte(v >> 24)
   978  (MOVBstore [i3] {s} p (SRWconst w [24])
   979  	x0:(MOVBstore [i2] {s} p (SRWconst w [16])
   980  	x1:(MOVBstore [i1] {s} p (SRWconst w [8])
   981  	x2:(MOVBstore [i0] {s} p w mem))))
   982  	&& !config.BigEndian
   983  	&& x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
   984  	&& i1 == i0+1 && i2 == i0+2 && i3 == i0+3
   985  	&& clobber(x0) && clobber(x1) && clobber(x2)
   986  	  -> (MOVWstore [i0] {s} p w mem)
   987  
   988  // 8 byte store Little endian as in:
   989  //	b[0] = byte(v)
   990  //	b[1] = byte(v >> 8)
   991  //	b[2] = byte(v >> 16)
   992  //	b[3] = byte(v >> 24)
   993  //	b[4] = byte(v >> 32)
   994  //	b[5] = byte(v >> 40)
   995  //	b[6] = byte(v >> 48)
   996  //	b[7] = byte(v >> 56)
   997  // Offset must be multiple of 4 for MOVDstore
   998  // Can't build on previous rules for 2 or 4 bytes because they use SRW not SRD
   999  (MOVBstore [i7] {s} p (SRDconst w [56])
  1000  	x0:(MOVBstore [i6] {s} p (SRDconst w [48])
  1001  	x1:(MOVBstore [i5] {s} p (SRDconst w [40])
  1002  	x2:(MOVBstore [i4] {s} p (SRDconst w [32])
  1003  	x3:(MOVBstore [i3] {s} p (SRDconst w [24])
  1004  	x4:(MOVBstore [i2] {s} p (SRDconst w [16])
  1005  	x5:(MOVBstore [i1] {s} p (SRDconst w [8])
  1006  	x6:(MOVBstore [i0] {s} p w mem))))))))
  1007  	&& !config.BigEndian
  1008  	&& i0%4 == 0
  1009  	&& x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1
  1010  	&& i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7
  1011  	&& clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)
  1012  	  -> (MOVDstore [i0] {s} p w mem)