github.com/mattn/go@v0.0.0-20171011075504-07f7db3ea99f/src/cmd/compile/internal/ssa/gen/PPC64.rules (about)

     1  // Copyright 2016 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Lowering arithmetic
     6  (Add64  x y) -> (ADD  x y)
     7  (AddPtr x y) -> (ADD  x y)
     8  (Add32  x y) -> (ADD x y)
     9  (Add16  x y) -> (ADD x y)
    10  (Add8   x y) -> (ADD x y)
    11  (Add64F x y) -> (FADD x y)
    12  (Add32F x y) -> (FADDS x y)
    13  
    14  (Sub64  x y) -> (SUB  x y)
    15  (SubPtr x y) -> (SUB  x y)
    16  (Sub32  x y) -> (SUB x y)
    17  (Sub16  x y) -> (SUB x y)
    18  (Sub8   x y) -> (SUB x y)
    19  (Sub32F x y) -> (FSUBS x y)
    20  (Sub64F x y) -> (FSUB x y)
    21  
    22  (Mod16 x y) -> (Mod32 (SignExt16to32 x) (SignExt16to32 y))
    23  (Mod16u x y) -> (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y))
    24  (Mod8 x y) -> (Mod32 (SignExt8to32 x) (SignExt8to32 y))
    25  (Mod8u x y) -> (Mod32u (ZeroExt8to32 x) (ZeroExt8to32 y))
    26  (Mod64 x y) -> (SUB x (MULLD y (DIVD x y)))
    27  (Mod64u x y) -> (SUB x (MULLD y (DIVDU x y)))
    28  (Mod32 x y) -> (SUB x (MULLW y (DIVW x y)))
    29  (Mod32u x y) -> (SUB x (MULLW y (DIVWU x y)))
    30  
    31  // (x + y) / 2 with x>=y -> (x - y) / 2 + y
    32  (Avg64u <t> x y) -> (ADD (SRDconst <t> (SUB <t> x y) [1]) y)
    33  
    34  (Mul64  x y) -> (MULLD  x y)
    35  (Mul32  x y) -> (MULLW  x y)
    36  (Mul16  x y) -> (MULLW x y)
    37  (Mul8   x y) -> (MULLW x y)
    38  
    39  (Div64  x y) -> (DIVD  x y)
    40  (Div64u x y) -> (DIVDU x y)
    41  (Div32  x y) -> (DIVW  x y)
    42  (Div32u x y) -> (DIVWU x y)
    43  (Div16  x y) -> (DIVW  (SignExt16to32 x) (SignExt16to32 y))
    44  (Div16u x y) -> (DIVWU (ZeroExt16to32 x) (ZeroExt16to32 y))
    45  (Div8   x y) -> (DIVW  (SignExt8to32 x) (SignExt8to32 y))
    46  (Div8u  x y) -> (DIVWU (ZeroExt8to32 x) (ZeroExt8to32 y))
    47  
    48  (Hmul64  x y) -> (MULHD  x y)
    49  (Hmul64u  x y) -> (MULHDU x y)
    50  (Hmul32  x y) -> (MULHW  x y)
    51  (Hmul32u  x y) -> (MULHWU x y)
    52  
    53  (Mul32F x y) -> (FMULS x y)
    54  (Mul64F x y) -> (FMUL x y)
    55  
    56  (Div32F x y) -> (FDIVS x y)
    57  (Div64F x y) -> (FDIV x y)
    58  
    59  // Lowering float <-> int
    60  (Cvt32to32F x) -> (FCFIDS (MTVSRD (SignExt32to64 x)))
    61  (Cvt32to64F x) -> (FCFID (MTVSRD (SignExt32to64 x)))
    62  (Cvt64to32F x) -> (FCFIDS (MTVSRD x))
    63  (Cvt64to64F x) -> (FCFID (MTVSRD x))
    64  
    65  (Cvt32Fto32 x) -> (MFVSRD (FCTIWZ x))
    66  (Cvt32Fto64 x) -> (MFVSRD (FCTIDZ x))
    67  (Cvt64Fto32 x) -> (MFVSRD (FCTIWZ x))
    68  (Cvt64Fto64 x) -> (MFVSRD (FCTIDZ x))
    69  
    70  (Cvt32Fto64F x) -> x // Note x will have the wrong type for patterns dependent on Float32/Float64
    71  (Cvt64Fto32F x) -> (FRSP x)
    72  
    73  (MOVDload [off] {sym} ptr (FMOVDstore [off] {sym} ptr x _)) -> (MFVSRD x)
    74  (FMOVDload [off] {sym} ptr (MOVDstore [off] {sym} ptr x _)) -> (MTVSRD x)
    75  
    76  (FMOVDstore [off] {sym} ptr (MTVSRD x) mem) -> (MOVDstore [off] {sym} ptr x mem)
    77  (MOVDstore [off] {sym} ptr (MFVSRD x) mem) -> (FMOVDstore [off] {sym} ptr x mem)
    78  
    79  (Round32F x) -> (LoweredRound32F x)
    80  (Round64F x) -> (LoweredRound64F x)
    81  
    82  (Sqrt x) -> (FSQRT x)
    83  (Floor x) -> (FFLOOR x)
    84  (Ceil x) -> (FCEIL x)
    85  (Trunc x) -> (FTRUNC x)
    86  
    87  // Lowering constants
    88  (Const8   [val]) -> (MOVDconst [val])
    89  (Const16  [val]) -> (MOVDconst [val])
    90  (Const32  [val]) -> (MOVDconst [val])
    91  (Const64  [val]) -> (MOVDconst [val])
    92  (Const32F [val]) -> (FMOVSconst [val])
    93  (Const64F [val]) -> (FMOVDconst [val])
    94  (ConstNil) -> (MOVDconst [0])
    95  (ConstBool [b]) -> (MOVDconst [b])
    96  
    97  // Rotate generation with const shift
    98  (ADD (SLDconst x [c]) (SRDconst x [d])) && d == 64-c -> (ROTLconst [c] x)
    99  ( OR (SLDconst x [c]) (SRDconst x [d])) && d == 64-c -> (ROTLconst [c] x)
   100  (XOR (SLDconst x [c]) (SRDconst x [d])) && d == 64-c -> (ROTLconst [c] x)
   101  
   102  (ADD (SLWconst x [c]) (SRWconst x [d])) && d == 32-c -> (ROTLWconst [c] x)
   103  ( OR (SLWconst x [c]) (SRWconst x [d])) && d == 32-c -> (ROTLWconst [c] x)
   104  (XOR (SLWconst x [c]) (SRWconst x [d])) && d == 32-c -> (ROTLWconst [c] x)
   105  
   106  // Rotate generation with non-const shift
   107  // these match patterns from math/bits/RotateLeft[32|64], but there could be others
   108  (ADD (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))) -> (ROTL x y)
   109  ( OR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))) -> (ROTL x y)
   110  (XOR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))) -> (ROTL x y)
   111  
   112  (ADD (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))) -> (ROTLW x y)
   113  ( OR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))) -> (ROTLW x y)
   114  (XOR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))) -> (ROTLW x y)
   115  
   116  (Lsh64x64  x (Const64 [c])) && uint64(c) < 64 -> (SLDconst x [c])
   117  (Rsh64x64  x (Const64 [c])) && uint64(c) < 64 -> (SRADconst x [c])
   118  (Rsh64Ux64 x (Const64 [c])) && uint64(c) < 64 -> (SRDconst x [c])
   119  (Lsh32x64  x (Const64 [c])) && uint64(c) < 32 -> (SLWconst x [c])
   120  (Rsh32x64  x (Const64 [c])) && uint64(c) < 32 -> (SRAWconst x [c])
   121  (Rsh32Ux64 x (Const64 [c])) && uint64(c) < 32 -> (SRWconst x [c])
   122  (Lsh16x64  x (Const64 [c])) && uint64(c) < 16 -> (SLWconst x [c])
   123  (Rsh16x64  x (Const64 [c])) && uint64(c) < 16 -> (SRAWconst (SignExt16to32 x) [c])
   124  (Rsh16Ux64 x (Const64 [c])) && uint64(c) < 16 -> (SRWconst (ZeroExt16to32 x) [c])
   125  (Lsh8x64   x (Const64 [c])) && uint64(c) < 8  -> (SLWconst x [c])
   126  (Rsh8x64   x (Const64 [c])) && uint64(c) < 8  -> (SRAWconst (SignExt8to32  x) [c])
   127  (Rsh8Ux64  x (Const64 [c])) && uint64(c) < 8  -> (SRWconst (ZeroExt8to32  x) [c])
   128  
   129  (Lsh64x32  x (Const64 [c])) && uint32(c) < 64 -> (SLDconst x [c])
   130  (Rsh64x32  x (Const64 [c])) && uint32(c) < 64 -> (SRADconst x [c])
   131  (Rsh64Ux32 x (Const64 [c])) && uint32(c) < 64 -> (SRDconst x [c])
   132  (Lsh32x32  x (Const64 [c])) && uint32(c) < 32 -> (SLWconst x [c])
   133  (Rsh32x32  x (Const64 [c])) && uint32(c) < 32 -> (SRAWconst x [c])
   134  (Rsh32Ux32 x (Const64 [c])) && uint32(c) < 32 -> (SRWconst x [c])
   135  (Lsh16x32  x (Const64 [c])) && uint32(c) < 16 -> (SLWconst x [c])
   136  (Rsh16x32  x (Const64 [c])) && uint32(c) < 16 -> (SRAWconst (SignExt16to32 x) [c])
   137  (Rsh16Ux32 x (Const64 [c])) && uint32(c) < 16 -> (SRWconst (ZeroExt16to32 x) [c])
   138  (Lsh8x32   x (Const64 [c])) && uint32(c) < 8  -> (SLWconst x [c])
   139  (Rsh8x32   x (Const64 [c])) && uint32(c) < 8  -> (SRAWconst (SignExt8to32  x) [c])
   140  (Rsh8Ux32  x (Const64 [c])) && uint32(c) < 8  -> (SRWconst (ZeroExt8to32  x) [c])
   141  
   142  // large constant shifts
   143  (Lsh64x64  _ (Const64 [c])) && uint64(c) >= 64 -> (MOVDconst [0])
   144  (Rsh64Ux64 _ (Const64 [c])) && uint64(c) >= 64 -> (MOVDconst [0])
   145  (Lsh32x64  _ (Const64 [c])) && uint64(c) >= 32 -> (MOVDconst [0])
   146  (Rsh32Ux64 _ (Const64 [c])) && uint64(c) >= 32 -> (MOVDconst [0])
   147  (Lsh16x64  _ (Const64 [c])) && uint64(c) >= 16 -> (MOVDconst [0])
   148  (Rsh16Ux64 _ (Const64 [c])) && uint64(c) >= 16 -> (MOVDconst [0])
   149  (Lsh8x64   _ (Const64 [c])) && uint64(c) >= 8  -> (MOVDconst [0])
   150  (Rsh8Ux64  _ (Const64 [c])) && uint64(c) >= 8  -> (MOVDconst [0])
   151  
   152  // large constant signed right shift, we leave the sign bit
   153  (Rsh64x64 x (Const64 [c])) && uint64(c) >= 64 -> (SRADconst x [63])
   154  (Rsh32x64 x (Const64 [c])) && uint64(c) >= 32 -> (SRAWconst x [63])
   155  (Rsh16x64 x (Const64 [c])) && uint64(c) >= 16 -> (SRAWconst (SignExt16to32 x) [63])
   156  (Rsh8x64  x (Const64 [c])) && uint64(c) >= 8  -> (SRAWconst (SignExt8to32  x) [63])
   157  
   158  // constant shifts
   159  (Lsh64x64  x (MOVDconst [c])) && uint64(c) < 64 -> (SLDconst x [c])
   160  (Rsh64x64  x (MOVDconst [c])) && uint64(c) < 64 -> (SRADconst x [c])
   161  (Rsh64Ux64 x (MOVDconst [c])) && uint64(c) < 64 -> (SRDconst x [c])
   162  (Lsh32x64  x (MOVDconst [c])) && uint64(c) < 32 -> (SLWconst x [c])
   163  (Rsh32x64  x (MOVDconst [c])) && uint64(c) < 32 -> (SRAWconst x [c])
   164  (Rsh32Ux64 x (MOVDconst [c])) && uint64(c) < 32 -> (SRWconst x [c])
   165  (Lsh16x64  x (MOVDconst [c])) && uint64(c) < 16 -> (SLWconst x [c])
   166  (Rsh16x64  x (MOVDconst [c])) && uint64(c) < 16 -> (SRAWconst (SignExt16to32 x) [c])
   167  (Rsh16Ux64 x (MOVDconst [c])) && uint64(c) < 16 -> (SRWconst (ZeroExt16to32 x) [c])
   168  (Lsh8x64   x (MOVDconst [c])) && uint64(c) < 8  -> (SLWconst x [c])
   169  (Rsh8x64   x (MOVDconst [c])) && uint64(c) < 8  -> (SRAWconst (SignExt8to32  x) [c])
   170  (Rsh8Ux64  x (MOVDconst [c])) && uint64(c) < 8  -> (SRWconst (ZeroExt8to32  x) [c])
   171  
   172  (Lsh64x32  x (MOVDconst [c])) && uint32(c) < 64 -> (SLDconst x [c])
   173  (Rsh64x32  x (MOVDconst [c])) && uint32(c) < 64 -> (SRADconst x [c])
   174  (Rsh64Ux32 x (MOVDconst [c])) && uint32(c) < 64 -> (SRDconst x [c])
   175  (Lsh32x32  x (MOVDconst [c])) && uint32(c) < 32 -> (SLWconst x [c])
   176  (Rsh32x32  x (MOVDconst [c])) && uint32(c) < 32 -> (SRAWconst x [c])
   177  (Rsh32Ux32 x (MOVDconst [c])) && uint32(c) < 32 -> (SRWconst x [c])
   178  (Lsh16x32  x (MOVDconst [c])) && uint32(c) < 16 -> (SLWconst x [c])
   179  (Rsh16x32  x (MOVDconst [c])) && uint32(c) < 16 -> (SRAWconst (SignExt16to32 x) [c])
   180  (Rsh16Ux32 x (MOVDconst [c])) && uint32(c) < 16 -> (SRWconst (ZeroExt16to32 x) [c])
   181  (Lsh8x32   x (MOVDconst [c])) && uint32(c) < 8  -> (SLWconst x [c])
   182  (Rsh8x32   x (MOVDconst [c])) && uint32(c) < 8  -> (SRAWconst (SignExt8to32  x) [c])
   183  (Rsh8Ux32  x (MOVDconst [c])) && uint32(c) < 8  -> (SRWconst (ZeroExt8to32  x) [c])
   184  
   185  // non-constant rotates
   186  // These are subexpressions found in statements that can become rotates
   187  // In these cases the shift count is known to be < 64 so the more complicated expressions
   188  // with Mask & Carry is not needed
   189  (Lsh64x64 x (AND y (MOVDconst [63]))) -> (SLD x (ANDconst <typ.Int64> [63] y))
   190  (Lsh64x64 x (ANDconst <typ.Int64> [63] y)) -> (SLD x (ANDconst <typ.Int64> [63] y))
   191  (Rsh64Ux64 x (AND y (MOVDconst [63]))) -> (SRD x (ANDconst <typ.Int64> [63] y))
   192  (Rsh64Ux64 x (ANDconst <typ.UInt> [63] y)) -> (SRD x (ANDconst <typ.UInt> [63] y))
   193  (Rsh64Ux64 x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y))) -> (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
   194  (Rsh64Ux64 x (SUB <typ.UInt> (MOVDconst [64]) (AND <typ.UInt> y (MOVDconst [63])))) -> (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
   195  (Rsh64x64 x (AND y (MOVDconst [63]))) -> (SRAD x (ANDconst <typ.Int64> [63] y))
   196  (Rsh64x64 x (ANDconst <typ.UInt> [63] y)) -> (SRAD x (ANDconst <typ.UInt> [63] y))
   197  (Rsh64x64 x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y))) -> (SRAD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
   198  (Rsh64x64 x (SUB <typ.UInt> (MOVDconst [64]) (AND <typ.UInt> y (MOVDconst [63])))) -> (SRAD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
   199  
   200  (Rsh64x64 x y)  -> (SRAD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
   201  (Rsh64Ux64 x y) -> (SRD  x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
   202  (Lsh64x64 x y)  -> (SLD  x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
   203  
   204  (Lsh32x64 x (AND y (MOVDconst [31]))) -> (SLW x (ANDconst <typ.Int32> [31] y))
   205  (Lsh32x64 x (ANDconst <typ.Int32> [31] y)) -> (SLW x (ANDconst <typ.Int32> [31] y))
   206  
   207  (Rsh32Ux64 x (AND y (MOVDconst [31]))) -> (SRW x (ANDconst <typ.Int32> [31] y))
   208  (Rsh32Ux64 x (ANDconst <typ.UInt> [31] y)) -> (SRW x (ANDconst <typ.UInt> [31] y))
   209  (Rsh32Ux64 x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))) -> (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
   210  (Rsh32Ux64 x (SUB <typ.UInt> (MOVDconst [32]) (AND <typ.UInt> y (MOVDconst [31])))) -> (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
   211  
   212  (Rsh32x64 x (AND y (MOVDconst [31]))) -> (SRAW x (ANDconst <typ.Int32> [31] y))
   213  (Rsh32x64 x (ANDconst <typ.UInt> [31] y)) -> (SRAW x (ANDconst <typ.UInt> [31] y))
   214  (Rsh32x64 x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))) -> (SRAW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
   215  (Rsh32x64 x (SUB <typ.UInt> (MOVDconst [32]) (AND <typ.UInt> y (MOVDconst [31])))) -> (SRAW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
   216  
   217  (Rsh32x64 x y)  -> (SRAW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
   218  (Rsh32Ux64 x y) -> (SRW  x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
   219  (Lsh32x64 x y)  -> (SLW  x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
   220  
   221  (Rsh16x64 x y)  -> (SRAW (SignExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
   222  (Rsh16Ux64 x y) -> (SRW  (ZeroExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
   223  (Lsh16x64 x y)  -> (SLW  x                 (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
   224  
   225  (Rsh8x64 x y)  -> (SRAW (SignExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
   226  (Rsh8Ux64 x y) -> (SRW  (ZeroExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
   227  (Lsh8x64 x y)  -> (SLW  x                (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
   228  
   229  (Rsh64x32 x y)  -> (SRAD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
   230  (Rsh64Ux32 x y) -> (SRD x  (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
   231  (Lsh64x32 x y)  -> (SLD x  (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
   232  
   233  (Rsh32x32 x y)  -> (SRAW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
   234  (Rsh32Ux32 x y) -> (SRW x  (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
   235  (Lsh32x32 x y)  -> (SLW x  (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
   236  
   237  (Rsh16x32 x y)  -> (SRAW (SignExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
   238  (Rsh16Ux32 x y) -> (SRW  (ZeroExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
   239  (Lsh16x32 x y)  -> (SLW  x                 (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
   240  
   241  (Rsh8x32 x y)  -> (SRAW (SignExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
   242  (Rsh8Ux32 x y) -> (SRW  (ZeroExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
   243  (Lsh8x32 x y)  -> (SLW  x                (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
   244  
   245  
   246  (Rsh64x16 x y)  -> (SRAD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
   247  (Rsh64Ux16 x y) -> (SRD x  (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
   248  (Lsh64x16 x y)  -> (SLD x  (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
   249  
   250  (Rsh32x16 x y)  -> (SRAW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
   251  (Rsh32Ux16 x y) -> (SRW x  (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
   252  (Lsh32x16 x y)  -> (SLW x  (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
   253  
   254  (Rsh16x16 x y)  -> (SRAW (SignExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
   255  (Rsh16Ux16 x y) -> (SRW  (ZeroExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
   256  (Lsh16x16 x y)  -> (SLW  x                 (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
   257  
   258  (Rsh8x16 x y)  -> (SRAW (SignExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
   259  (Rsh8Ux16 x y) -> (SRW  (ZeroExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
   260  (Lsh8x16 x y)  -> (SLW  x                (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
   261  
   262  
   263  (Rsh64x8 x y)  -> (SRAD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
   264  (Rsh64Ux8 x y) -> (SRD x  (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
   265  (Lsh64x8 x y)  -> (SLD x  (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
   266  
   267  (Rsh32x8 x y)  -> (SRAW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
   268  (Rsh32Ux8 x y) -> (SRW x  (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
   269  (Lsh32x8 x y)  -> (SLW x  (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
   270  
   271  (Rsh16x8 x y)  -> (SRAW (SignExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
   272  (Rsh16Ux8 x y) -> (SRW  (ZeroExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
   273  (Lsh16x8 x y)  -> (SLW  x                 (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
   274  
   275  (Rsh8x8 x y)  -> (SRAW (SignExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
   276  (Rsh8Ux8 x y) -> (SRW  (ZeroExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
   277  (Lsh8x8 x y)  -> (SLW  x                (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
   278  
   279  // Cleaning up shift ops when input is masked
   280  (MaskIfNotCarry (ADDconstForCarry [c] (ANDconst [d] _))) && c < 0 && d > 0 && c + d < 0 -> (MOVDconst [-1])
   281  (ORN x (MOVDconst [-1])) -> x
   282  
   283  // Potentially useful optimizing rewrites.
   284  // (ADDconstForCarry [k] c), k < 0 && (c < 0 || k+c >= 0) -> CarrySet
   285  // (ADDconstForCarry [k] c), K < 0 && (c >= 0 && k+c < 0) -> CarryClear
   286  // (MaskIfNotCarry CarrySet) -> 0
   287  // (MaskIfNotCarry CarrySet) -> -1
   288  
   289  (Addr {sym} base) -> (MOVDaddr {sym} base)
   290  (OffPtr [off] ptr) -> (ADD (MOVDconst <typ.Int64> [off]) ptr)
   291  
   292  (Ctz64 x) -> (POPCNTD (ANDN <typ.Int64> (ADDconst <typ.Int64> [-1] x) x))
   293  (Ctz32 x) -> (POPCNTW (MOVWZreg (ANDN <typ.Int> (ADDconst <typ.Int> [-1] x) x)))
   294  
   295  (BitLen64 x) -> (SUB (MOVDconst [64]) (CNTLZD <typ.Int> x))
   296  (BitLen32 x) -> (SUB (MOVDconst [32]) (CNTLZW <typ.Int> x))
   297  
   298  (PopCount64 x) -> (POPCNTD x)
   299  (PopCount32 x) -> (POPCNTW (MOVWZreg x))
   300  (PopCount16 x) -> (POPCNTW (MOVHZreg x))
   301  (PopCount8 x) -> (POPCNTB (MOVBreg x))
   302  
   303  (And64 x y) -> (AND x y)
   304  (And32 x y) -> (AND x y)
   305  (And16 x y) -> (AND x y)
   306  (And8  x y) -> (AND x y)
   307  
   308  (Or64 x y) -> (OR x y)
   309  (Or32 x y) -> (OR x y)
   310  (Or16 x y) -> (OR x y)
   311  (Or8  x y) -> (OR x y)
   312  
   313  (Xor64 x y) -> (XOR x y)
   314  (Xor32 x y) -> (XOR x y)
   315  (Xor16 x y) -> (XOR x y)
   316  (Xor8  x y) -> (XOR x y)
   317  
   318  (Neg64F x) -> (FNEG x)
   319  (Neg32F x) -> (FNEG x)
   320  (Neg64  x) -> (NEG x)
   321  (Neg32  x) -> (NEG x)
   322  (Neg16  x) -> (NEG x)
   323  (Neg8   x) -> (NEG x)
   324  
   325  (Com64 x) -> (NOR x x)
   326  (Com32 x) -> (NOR x x)
   327  (Com16 x) -> (NOR x x)
   328  (Com8  x) -> (NOR x x)
   329  
   330  // Lowering boolean ops
   331  (AndB x y) -> (AND x y)
   332  (OrB x y) -> (OR x y)
   333  (Not x) -> (XORconst [1] x)
   334  
   335  // Use ANDN for AND x NOT y
   336  (AND x (NOR y y)) -> (ANDN x y)
   337  
   338  // Lowering comparisons
   339  (EqB x y)  -> (ANDconst [1] (EQV x y))
   340  // Sign extension dependence on operand sign sets up for sign/zero-extension elision later
   341  (Eq8 x y) && isSigned(x.Type) && isSigned(y.Type) -> (Equal (CMPW (SignExt8to32 x) (SignExt8to32 y)))
   342  (Eq16 x y) && isSigned(x.Type) && isSigned(y.Type) -> (Equal (CMPW (SignExt16to32 x) (SignExt16to32 y)))
   343  (Eq8 x y)  -> (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
   344  (Eq16 x y) -> (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
   345  (Eq32 x y) -> (Equal (CMPW x y))
   346  (Eq64 x y) -> (Equal (CMP x y))
   347  (Eq32F x y) -> (Equal (FCMPU x y))
   348  (Eq64F x y) -> (Equal (FCMPU x y))
   349  (EqPtr x y) -> (Equal (CMP x y))
   350  
   351  (NeqB x y)  -> (XOR x y)
   352  // Like Eq8 and Eq16, prefer sign extension likely to enable later elision.
   353  (Neq8 x y) && isSigned(x.Type) && isSigned(y.Type) -> (NotEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
   354  (Neq16 x y) && isSigned(x.Type) && isSigned(y.Type) -> (NotEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
   355  (Neq8 x y)  -> (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
   356  (Neq16 x y) -> (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
   357  (Neq32 x y) -> (NotEqual (CMPW x y))
   358  (Neq64 x y) -> (NotEqual (CMP x y))
   359  (Neq32F x y) -> (NotEqual (FCMPU x y))
   360  (Neq64F x y) -> (NotEqual (FCMPU x y))
   361  (NeqPtr x y) -> (NotEqual (CMP x y))
   362  
   363  (Less8 x y)  -> (LessThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
   364  (Less16 x y) -> (LessThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
   365  (Less32 x y) -> (LessThan (CMPW x y))
   366  (Less64 x y) -> (LessThan (CMP x y))
   367  (Less32F x y) -> (FLessThan (FCMPU x y))
   368  (Less64F x y) -> (FLessThan (FCMPU x y))
   369  
   370  (Less8U x y)  -> (LessThan (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
   371  (Less16U x y) -> (LessThan (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
   372  (Less32U x y) -> (LessThan (CMPWU x y))
   373  (Less64U x y) -> (LessThan (CMPU x y))
   374  
   375  (Leq8 x y)  -> (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
   376  (Leq16 x y) -> (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
   377  (Leq32 x y) -> (LessEqual (CMPW x y))
   378  (Leq64 x y) -> (LessEqual (CMP x y))
   379  (Leq32F x y) -> (FLessEqual (FCMPU x y))
   380  (Leq64F x y) -> (FLessEqual (FCMPU x y))
   381  
   382  (Leq8U x y)  -> (LessEqual (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
   383  (Leq16U x y) -> (LessEqual (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
   384  (Leq32U x y) -> (LessEqual (CMPWU x y))
   385  (Leq64U x y) -> (LessEqual (CMPU x y))
   386  
   387  (Greater8 x y)  -> (GreaterThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
   388  (Greater16 x y) -> (GreaterThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
   389  (Greater32 x y) -> (GreaterThan (CMPW x y))
   390  (Greater64 x y) -> (GreaterThan (CMP x y))
   391  (Greater32F x y) -> (FGreaterThan (FCMPU x y))
   392  (Greater64F x y) -> (FGreaterThan (FCMPU x y))
   393  
   394  (Greater8U x y)  -> (GreaterThan (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
   395  (Greater16U x y) -> (GreaterThan (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
   396  (Greater32U x y) -> (GreaterThan (CMPWU x y))
   397  (Greater64U x y) -> (GreaterThan (CMPU x y))
   398  
   399  (Geq8 x y)  -> (GreaterEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
   400  (Geq16 x y) -> (GreaterEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
   401  (Geq32 x y) -> (GreaterEqual (CMPW x y))
   402  (Geq64 x y) -> (GreaterEqual (CMP x y))
   403  (Geq32F x y) -> (FGreaterEqual (FCMPU x y))
   404  (Geq64F x y) -> (FGreaterEqual (FCMPU x y))
   405  
   406  (Geq8U x y)  -> (GreaterEqual (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
   407  (Geq16U x y) -> (GreaterEqual (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
   408  (Geq32U x y) -> (GreaterEqual (CMPWU x y))
   409  (Geq64U x y) -> (GreaterEqual (CMPU x y))
   410  
   411  // Absorb pseudo-ops into blocks.
   412  (If (Equal cc) yes no) -> (EQ cc yes no)
   413  (If (NotEqual cc) yes no) -> (NE cc yes no)
   414  (If (LessThan cc) yes no) -> (LT cc yes no)
   415  (If (LessEqual cc) yes no) -> (LE cc yes no)
   416  (If (GreaterThan cc) yes no) -> (GT cc yes no)
   417  (If (GreaterEqual cc) yes no) -> (GE cc yes no)
   418  (If (FLessThan cc) yes no) -> (FLT cc yes no)
   419  (If (FLessEqual cc) yes no) -> (FLE cc yes no)
   420  (If (FGreaterThan cc) yes no) -> (FGT cc yes no)
   421  (If (FGreaterEqual cc) yes no) -> (FGE cc yes no)
   422  
   423  (If cond yes no) -> (NE (CMPWconst [0] cond) yes no)
   424  
   425  // Absorb boolean tests into block
   426  (NE (CMPWconst [0] (Equal cc)) yes no) -> (EQ cc yes no)
   427  (NE (CMPWconst [0] (NotEqual cc)) yes no) -> (NE cc yes no)
   428  (NE (CMPWconst [0] (LessThan cc)) yes no) -> (LT cc yes no)
   429  (NE (CMPWconst [0] (LessEqual cc)) yes no) -> (LE cc yes no)
   430  (NE (CMPWconst [0] (GreaterThan cc)) yes no) -> (GT cc yes no)
   431  (NE (CMPWconst [0] (GreaterEqual cc)) yes no) -> (GE cc yes no)
   432  (NE (CMPWconst [0] (FLessThan cc)) yes no) -> (FLT cc yes no)
   433  (NE (CMPWconst [0] (FLessEqual cc)) yes no) -> (FLE cc yes no)
   434  (NE (CMPWconst [0] (FGreaterThan cc)) yes no) -> (FGT cc yes no)
   435  (NE (CMPWconst [0] (FGreaterEqual cc)) yes no) -> (FGE cc yes no)
   436  
   437  // Elide compares of bit tests // TODO need to make both CC and result of ANDCC available.
   438  (EQ (CMPconst [0] (ANDconst [c] x)) yes no) -> (EQ (ANDCCconst [c] x) yes no)
   439  (NE (CMPconst [0] (ANDconst [c] x)) yes no) -> (NE (ANDCCconst [c] x) yes no)
   440  (EQ (CMPWconst [0] (ANDconst [c] x)) yes no) -> (EQ (ANDCCconst [c] x) yes no)
   441  (NE (CMPWconst [0] (ANDconst [c] x)) yes no) -> (NE (ANDCCconst [c] x) yes no)
   442  
   443  // absorb flag constants into branches
   444  (EQ (FlagEQ) yes no) -> (First nil yes no)
   445  (EQ (FlagLT) yes no) -> (First nil no yes)
   446  (EQ (FlagGT) yes no) -> (First nil no yes)
   447  
   448  (NE (FlagEQ) yes no) -> (First nil no yes)
   449  (NE (FlagLT) yes no) -> (First nil yes no)
   450  (NE (FlagGT) yes no) -> (First nil yes no)
   451  
   452  (LT (FlagEQ) yes no) -> (First nil no yes)
   453  (LT (FlagLT) yes no) -> (First nil yes no)
   454  (LT (FlagGT) yes no) -> (First nil no yes)
   455  
   456  (LE (FlagEQ) yes no) -> (First nil yes no)
   457  (LE (FlagLT) yes no) -> (First nil yes no)
   458  (LE (FlagGT) yes no) -> (First nil no yes)
   459  
   460  (GT (FlagEQ) yes no) -> (First nil no yes)
   461  (GT (FlagLT) yes no) -> (First nil no yes)
   462  (GT (FlagGT) yes no) -> (First nil yes no)
   463  
   464  (GE (FlagEQ) yes no) -> (First nil yes no)
   465  (GE (FlagLT) yes no) -> (First nil no yes)
   466  (GE (FlagGT) yes no) -> (First nil yes no)
   467  
   468  // absorb InvertFlags into branches
   469  (LT (InvertFlags cmp) yes no) -> (GT cmp yes no)
   470  (GT (InvertFlags cmp) yes no) -> (LT cmp yes no)
   471  (LE (InvertFlags cmp) yes no) -> (GE cmp yes no)
   472  (GE (InvertFlags cmp) yes no) -> (LE cmp yes no)
   473  (EQ (InvertFlags cmp) yes no) -> (EQ cmp yes no)
   474  (NE (InvertFlags cmp) yes no) -> (NE cmp yes no)
   475  
   476  // constant comparisons
   477  (CMPWconst (MOVDconst [x]) [y]) && int32(x)==int32(y) -> (FlagEQ)
   478  (CMPWconst (MOVDconst [x]) [y]) && int32(x)<int32(y)  -> (FlagLT)
   479  (CMPWconst (MOVDconst [x]) [y]) && int32(x)>int32(y)  -> (FlagGT)
   480  
   481  (CMPconst (MOVDconst [x]) [y]) && int64(x)==int64(y) -> (FlagEQ)
   482  (CMPconst (MOVDconst [x]) [y]) && int64(x)<int64(y)  -> (FlagLT)
   483  (CMPconst (MOVDconst [x]) [y]) && int64(x)>int64(y)  -> (FlagGT)
   484  
   485  (CMPWUconst (MOVDconst [x]) [y]) && int32(x)==int32(y)  -> (FlagEQ)
   486  (CMPWUconst (MOVDconst [x]) [y]) && uint32(x)<uint32(y) -> (FlagLT)
   487  (CMPWUconst (MOVDconst [x]) [y]) && uint32(x)>uint32(y) -> (FlagGT)
   488  
   489  (CMPUconst (MOVDconst [x]) [y]) && int64(x)==int64(y)  -> (FlagEQ)
   490  (CMPUconst (MOVDconst [x]) [y]) && uint64(x)<uint64(y) -> (FlagLT)
   491  (CMPUconst (MOVDconst [x]) [y]) && uint64(x)>uint64(y) -> (FlagGT)
   492  
   493  // other known comparisons
   494  //(CMPconst (MOVBUreg _) [c]) && 0xff < c -> (FlagLT)
   495  //(CMPconst (MOVHUreg _) [c]) && 0xffff < c -> (FlagLT)
   496  //(CMPconst (ANDconst _ [m]) [n]) && 0 <= int32(m) && int32(m) < int32(n) -> (FlagLT)
   497  //(CMPconst (SRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 32 && (1<<uint32(32-c)) <= uint32(n) -> (FlagLT)
   498  
   499  // absorb flag constants into boolean values
   500  (Equal (FlagEQ)) -> (MOVDconst [1])
   501  (Equal (FlagLT)) -> (MOVDconst [0])
   502  (Equal (FlagGT)) -> (MOVDconst [0])
   503  
   504  (NotEqual (FlagEQ)) -> (MOVDconst [0])
   505  (NotEqual (FlagLT)) -> (MOVDconst [1])
   506  (NotEqual (FlagGT)) -> (MOVDconst [1])
   507  
   508  (LessThan (FlagEQ)) -> (MOVDconst [0])
   509  (LessThan (FlagLT)) -> (MOVDconst [1])
   510  (LessThan (FlagGT)) -> (MOVDconst [0])
   511  
   512  (LessEqual (FlagEQ)) -> (MOVDconst [1])
   513  (LessEqual (FlagLT)) -> (MOVDconst [1])
   514  (LessEqual (FlagGT)) -> (MOVDconst [0])
   515  
   516  (GreaterThan (FlagEQ)) -> (MOVDconst [0])
   517  (GreaterThan (FlagLT)) -> (MOVDconst [0])
   518  (GreaterThan (FlagGT)) -> (MOVDconst [1])
   519  
   520  (GreaterEqual (FlagEQ)) -> (MOVDconst [1])
   521  (GreaterEqual (FlagLT)) -> (MOVDconst [0])
   522  (GreaterEqual (FlagGT)) -> (MOVDconst [1])
   523  
   524  // absorb InvertFlags into boolean values
   525  (Equal (InvertFlags x)) -> (Equal x)
   526  (NotEqual (InvertFlags x)) -> (NotEqual x)
   527  (LessThan (InvertFlags x)) -> (GreaterThan x)
   528  (GreaterThan (InvertFlags x)) -> (LessThan x)
   529  (LessEqual (InvertFlags x)) -> (GreaterEqual x)
   530  (GreaterEqual (InvertFlags x)) -> (LessEqual x)
   531  
   532  // Lowering loads
   533  (Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) -> (MOVDload ptr mem)
   534  (Load <t> ptr mem) && is32BitInt(t) && isSigned(t) -> (MOVWload ptr mem)
   535  (Load <t> ptr mem) && is32BitInt(t) && !isSigned(t) -> (MOVWZload ptr mem)
   536  (Load <t> ptr mem) && is16BitInt(t) && isSigned(t) -> (MOVHload ptr mem)
   537  (Load <t> ptr mem) && is16BitInt(t) && !isSigned(t) -> (MOVHZload ptr mem)
   538  (Load <t> ptr mem) && t.IsBoolean() -> (MOVBZload ptr mem)
   539  (Load <t> ptr mem) && is8BitInt(t) && isSigned(t) -> (MOVBreg (MOVBZload ptr mem)) // PPC has no signed-byte load.
   540  (Load <t> ptr mem) && is8BitInt(t) && !isSigned(t) -> (MOVBZload ptr mem)
   541  
   542  (Load <t> ptr mem) && is32BitFloat(t) -> (FMOVSload ptr mem)
   543  (Load <t> ptr mem) && is64BitFloat(t) -> (FMOVDload ptr mem)
   544  
   545  (Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) -> (FMOVDstore ptr val mem)
   546  (Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && is32BitFloat(val.Type) -> (FMOVDstore ptr val mem) // glitch from (Cvt32Fto64F x) -> x -- type is wrong
   547  (Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) -> (FMOVSstore ptr val mem)
   548  (Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && (is64BitInt(val.Type) || isPtr(val.Type)) -> (MOVDstore ptr val mem)
   549  (Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && is32BitInt(val.Type) -> (MOVWstore ptr val mem)
   550  (Store {t} ptr val mem) && t.(*types.Type).Size() == 2 -> (MOVHstore ptr val mem)
   551  (Store {t} ptr val mem) && t.(*types.Type).Size() == 1 -> (MOVBstore ptr val mem)
   552  
   553  // Using Zero instead of LoweredZero allows the
   554  // target address to be folded where possible.
   555  (Zero [0] _ mem) -> mem
   556  (Zero [1] destptr mem) -> (MOVBstorezero destptr mem)
   557  (Zero [2] destptr mem) ->
   558  	(MOVHstorezero destptr mem)
   559  (Zero [3] destptr mem) ->
   560  	(MOVBstorezero [2] destptr
   561  		(MOVHstorezero destptr mem))
   562  (Zero [4] destptr mem) ->
   563  	(MOVWstorezero destptr mem)
   564  (Zero [5] destptr mem) ->
   565  	(MOVBstorezero [4] destptr
   566          	(MOVWstorezero destptr mem))
   567  (Zero [6] destptr mem) ->
   568  	(MOVHstorezero [4] destptr
   569  		(MOVWstorezero destptr mem))
   570  (Zero [7] destptr mem) ->
   571  	(MOVBstorezero [6] destptr
   572  		(MOVHstorezero [4] destptr
   573  			(MOVWstorezero destptr mem)))
   574  
   575  // MOVD for store with DS must have offsets that are multiple of 4
   576  (Zero [8] {t} destptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
   577          (MOVDstorezero destptr mem)
   578  (Zero [8] destptr mem) ->
   579          (MOVWstorezero [4] destptr
   580                  (MOVWstorezero [0] destptr mem))
   581  // Handle these cases only if aligned properly, otherwise use general case below
   582  (Zero [12] {t} destptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
   583          (MOVWstorezero [8] destptr
   584                  (MOVDstorezero [0] destptr mem))
   585  (Zero [16] {t} destptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
   586         (MOVDstorezero [8] destptr
   587                  (MOVDstorezero [0] destptr mem))
   588  (Zero [24] {t} destptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
   589         (MOVDstorezero [16] destptr
   590                 (MOVDstorezero [8] destptr
   591                         (MOVDstorezero [0] destptr mem)))
   592  (Zero [32] {t} destptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
   593         (MOVDstorezero [24] destptr
   594                 (MOVDstorezero [16] destptr
   595                         (MOVDstorezero [8] destptr
   596                                 (MOVDstorezero [0] destptr mem))))
   597  
   598  // Handle cases not handled above
   599  (Zero [s] ptr mem) -> (LoweredZero [s] ptr mem)
   600  
   601  // moves
   602  // Only the MOVD and MOVW instructions require 4 byte
   603  // alignment in the offset field.  The other MOVx instructions
   604  // allow any alignment.
   605  (Move [0] _ _ mem) -> mem
   606  (Move [1] dst src mem) -> (MOVBstore dst (MOVBZload src mem) mem)
   607  (Move [2] dst src mem) ->
   608          (MOVHstore dst (MOVHZload src mem) mem)
   609  (Move [4] dst src mem) ->
   610  	(MOVWstore dst (MOVWZload src mem) mem)
   611  // MOVD for load and store must have offsets that are multiple of 4
   612  (Move [8] {t} dst src mem) && t.(*types.Type).Alignment()%4 == 0 ->
   613  	(MOVDstore dst (MOVDload src mem) mem)
   614  (Move [8] dst src mem) ->
   615  	(MOVWstore [4] dst (MOVWZload [4] src mem)
   616  		(MOVWstore dst (MOVWZload src mem) mem))
   617  (Move [3] dst src mem) ->
   618          (MOVBstore [2] dst (MOVBZload [2] src mem)
   619                  (MOVHstore dst (MOVHload src mem) mem))
   620  (Move [5] dst src mem) ->
   621          (MOVBstore [4] dst (MOVBZload [4] src mem)
   622                  (MOVWstore dst (MOVWZload src mem) mem))
   623  (Move [6] dst src mem) ->
   624          (MOVHstore [4] dst (MOVHZload [4] src mem)
   625                  (MOVWstore dst (MOVWZload src mem) mem))
   626  (Move [7] dst src mem) ->
   627          (MOVBstore [6] dst (MOVBZload [6] src mem)
   628                  (MOVHstore [4] dst (MOVHZload [4] src mem)
   629                          (MOVWstore dst (MOVWZload src mem) mem)))
   630  
   631  // Large move uses a loop. Since the address is computed and the
   632  // offset is zero, any alignment can be used.
   633  (Move [s] dst src mem) && s > 8 ->
   634          (LoweredMove [s] dst src mem)
   635  
   636  // Calls
   637  // Lowering calls
   638  (StaticCall [argwid] {target} mem) -> (CALLstatic [argwid] {target} mem)
   639  (ClosureCall [argwid] entry closure mem) -> (CALLclosure [argwid] entry closure mem)
   640  (InterCall [argwid] entry mem) -> (CALLinter [argwid] entry mem)
   641  
   642  // Miscellaneous
   643  (Convert <t> x mem) -> (MOVDconvert <t> x mem)
   644  (GetClosurePtr) -> (LoweredGetClosurePtr)
   645  (GetCallerSP) -> (LoweredGetCallerSP)
   646  (IsNonNil ptr) -> (NotEqual (CMPconst [0] ptr))
   647  (IsInBounds idx len) -> (LessThan (CMPU idx len))
   648  (IsSliceInBounds idx len) -> (LessEqual (CMPU idx len))
   649  (NilCheck ptr mem) -> (LoweredNilCheck ptr mem)
   650  
   651  // Optimizations
   652  // Note that PPC "logical" immediates come in 0:15 and 16:31 unsigned immediate forms,
   653  // so ORconst, XORconst easily expand into a pair.
   654  
   655  // Include very-large constants in the const-const case.
   656  (AND (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c&d])
   657  (OR (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c|d])
   658  (XOR (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c^d])
   659  
   660  // Discover consts
   661  (AND x (MOVDconst [c])) && isU16Bit(c) -> (ANDconst [c] x)
   662  (XOR x (MOVDconst [c])) && isU32Bit(c) -> (XORconst [c] x)
   663  (OR x (MOVDconst [c])) && isU32Bit(c) -> (ORconst [c] x)
   664  
   665  // Simplify consts
   666  (ANDconst [c] (ANDconst [d] x)) -> (ANDconst [c&d] x)
   667  (ORconst [c] (ORconst [d] x)) -> (ORconst [c|d] x)
   668  (XORconst [c] (XORconst [d] x)) -> (XORconst [c^d] x)
   669  (ANDconst [-1] x) -> x
   670  (ANDconst [0] _) -> (MOVDconst [0])
   671  (XORconst [0] x) -> x
   672  (ORconst [-1] _) -> (MOVDconst [-1])
   673  (ORconst [0] x) -> x
   674  
   675  // zero-extend of small and -> small and
   676  (MOVBZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFF -> y
   677  (MOVHZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFF -> y
   678  (MOVWZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFFFFFF -> y
   679  (MOVWZreg y:(AND (MOVDconst [c]) _)) && uint64(c) <= 0xFFFFFFFF -> y
   680  
   681  // sign extend of small-positive and -> small-positive-and
   682  (MOVBreg y:(ANDconst [c] _)) && uint64(c) <= 0x7F -> y
   683  (MOVHreg y:(ANDconst [c] _)) && uint64(c) <= 0x7FFF -> y
   684  (MOVWreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFF -> y // 0xFFFF is largest immediate constant, when regarded as 32-bit is > 0
   685  (MOVWreg y:(AND (MOVDconst [c]) _)) && uint64(c) <= 0x7FFFFFFF -> y
   686  
   687  // small and of zero-extend -> either zero-extend or small and
   688    // degenerate-and
   689  (ANDconst [c] y:(MOVBZreg _)) && c&0xFF == 0xFF -> y
   690  (ANDconst [c] y:(MOVHZreg _))  && c&0xFFFF == 0xFFFF -> y
   691  (ANDconst [c] y:(MOVWZreg _))  && c&0xFFFFFFFF == 0xFFFFFFFF -> y
   692    // normal case
   693  (ANDconst [c] (MOVBZreg x)) -> (ANDconst [c&0xFF] x)
   694  (ANDconst [c] (MOVHZreg x)) -> (ANDconst [c&0xFFFF] x)
   695  (ANDconst [c] (MOVWZreg x)) -> (ANDconst [c&0xFFFFFFFF] x)
   696  
   697  // Various redundant zero/sign extension combinations.
   698  (MOVBZreg y:(MOVBZreg _)) -> y  // repeat
   699  (MOVBreg y:(MOVBreg _)) -> y // repeat
   700  (MOVBreg (MOVBZreg x)) -> (MOVBreg x)
   701  (MOVBZreg (MOVBreg x)) -> (MOVBZreg x)
   702  
   703  // H - there are more combinations than these
   704  
   705  (MOVHZreg y:(MOVHZreg _)) -> y // repeat
   706  (MOVHZreg y:(MOVBZreg _)) -> y // wide of narrow
   707  
   708  (MOVHreg y:(MOVHreg _)) -> y // repeat
   709  (MOVHreg y:(MOVBreg _)) -> y // wide of narrow
   710  
   711  (MOVHreg y:(MOVHZreg x)) -> (MOVHreg x)
   712  (MOVHZreg y:(MOVHreg x)) -> (MOVHZreg x)
   713  
   714  // W - there are more combinations than these
   715  
   716  (MOVWZreg y:(MOVWZreg _)) -> y // repeat
   717  (MOVWZreg y:(MOVHZreg _)) -> y // wide of narrow
   718  (MOVWZreg y:(MOVBZreg _)) -> y // wide of narrow
   719  
   720  (MOVWreg y:(MOVWreg _)) -> y // repeat
   721  (MOVWreg y:(MOVHreg _)) -> y // wide of narrow
   722  (MOVWreg y:(MOVBreg _)) -> y // wide of narrow
   723  
   724  (MOVWreg y:(MOVWZreg x)) -> (MOVWreg x)
   725  (MOVWZreg y:(MOVWreg x)) -> (MOVWZreg x)
   726  
   727  // Arithmetic constant ops
   728  
   729  (ADD x (MOVDconst [c])) && is32Bit(c) -> (ADDconst [c] x)
   730  (ADDconst [c] (ADDconst [d] x)) && is32Bit(c+d) -> (ADDconst [c+d] x)
   731  (ADDconst [0] x) -> x
   732  (SUB x (MOVDconst [c])) && is32Bit(-c) -> (ADDconst [-c] x)
   733  // TODO deal with subtract-from-const
   734  
   735  (ADDconst [c] (MOVDaddr [d] {sym} x)) -> (MOVDaddr [c+d] {sym} x)
   736  
   737  // Fold offsets for stores.
   738  (MOVDstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(off1+off2) -> (MOVDstore [off1+off2] {sym} x val mem)
   739  (MOVWstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(off1+off2) -> (MOVWstore [off1+off2] {sym} x val mem)
   740  (MOVHstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(off1+off2) -> (MOVHstore [off1+off2] {sym} x val mem)
   741  (MOVBstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(off1+off2) -> (MOVBstore [off1+off2] {sym} x val mem)
   742  
   743  (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is16Bit(off1+off2) -> (FMOVSstore [off1+off2] {sym} ptr val mem)
   744  (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is16Bit(off1+off2) -> (FMOVDstore [off1+off2] {sym} ptr val mem)
   745  
   746  (MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) ->
   747          (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   748  (MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) ->
   749          (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   750  (MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) ->
   751          (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   752  (MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) ->
   753          (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   754  
   755  (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) ->
   756          (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   757  (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) ->
   758          (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   759  
   760  (MOVBZload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
   761          (MOVBZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   762  (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
   763          (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   764  (MOVHZload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
   765          (MOVHZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   766  (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
   767          (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   768  (MOVWZload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
   769          (MOVWZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   770  (MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
   771          (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   772  (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
   773          (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   774  (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
   775          (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   776  
   777  // Fold offsets for loads.
   778  (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) && is16Bit(off1+off2) -> (FMOVSload [off1+off2] {sym} ptr mem)
   779  (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is16Bit(off1+off2) -> (FMOVDload [off1+off2] {sym} ptr mem)
   780  
   781  (MOVDload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVDload [off1+off2] {sym} x mem)
   782  (MOVWload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVWload [off1+off2] {sym} x mem)
   783  (MOVWZload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVWZload [off1+off2] {sym} x mem)
   784  (MOVHload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVHload [off1+off2] {sym} x mem)
   785  (MOVHZload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVHZload [off1+off2] {sym} x mem)
   786  (MOVBZload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVBZload [off1+off2] {sym} x mem)
   787  
   788  // Store of zero -> storezero
   789  (MOVDstore [off] {sym} ptr (MOVDconst [c]) mem) && c == 0 -> (MOVDstorezero [off] {sym} ptr mem)
   790  (MOVWstore [off] {sym} ptr (MOVDconst [c]) mem) && c == 0 -> (MOVWstorezero [off] {sym} ptr mem)
   791  (MOVHstore [off] {sym} ptr (MOVDconst [c]) mem) && c == 0 -> (MOVHstorezero [off] {sym} ptr mem)
   792  (MOVBstore [off] {sym} ptr (MOVDconst [c]) mem) && c == 0 -> (MOVBstorezero [off] {sym} ptr mem)
   793  
   794  // Fold offsets for storezero
   795  (MOVDstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) ->
   796      (MOVDstorezero [off1+off2] {sym} x mem)
   797  (MOVWstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) ->
   798      (MOVWstorezero [off1+off2] {sym} x mem)
   799  (MOVHstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) ->
   800      (MOVHstorezero [off1+off2] {sym} x mem)
   801  (MOVBstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) ->
   802      (MOVBstorezero [off1+off2] {sym} x mem)
   803  
   804  // Fold symbols into storezero
   805  (MOVDstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2) ->
   806      (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
   807  (MOVWstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2) ->
   808      (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
   809  (MOVHstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2) ->
   810      (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
   811  (MOVBstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2) ->
   812      (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
   813  
   814  // atomic intrinsics
   815  (AtomicLoad32  ptr mem) -> (LoweredAtomicLoad32 ptr mem)
   816  (AtomicLoad64  ptr mem) -> (LoweredAtomicLoad64 ptr mem)
   817  (AtomicLoadPtr ptr mem) -> (LoweredAtomicLoadPtr ptr mem)
   818  
   819  (AtomicStore32      ptr val mem) -> (LoweredAtomicStore32 ptr val mem)
   820  (AtomicStore64      ptr val mem) -> (LoweredAtomicStore64 ptr val mem)
   821  //(AtomicStorePtrNoWB ptr val mem) -> (STLR  ptr val mem)
   822  
   823  (AtomicExchange32 ptr val mem) -> (LoweredAtomicExchange32 ptr val mem)
   824  (AtomicExchange64 ptr val mem) -> (LoweredAtomicExchange64 ptr val mem)
   825  
   826  (AtomicAdd32 ptr val mem) -> (LoweredAtomicAdd32 ptr val mem)
   827  (AtomicAdd64 ptr val mem) -> (LoweredAtomicAdd64 ptr val mem)
   828  
   829  (AtomicCompareAndSwap32 ptr old new_ mem) -> (LoweredAtomicCas32 ptr old new_ mem)
   830  (AtomicCompareAndSwap64 ptr old new_ mem) -> (LoweredAtomicCas64 ptr old new_ mem)
   831  
   832  (AtomicAnd8 ptr val mem) -> (LoweredAtomicAnd8 ptr val mem)
   833  (AtomicOr8  ptr val mem) -> (LoweredAtomicOr8  ptr val mem)
   834  
   835  // Lowering extension
   836  // Note: we always extend to 64 bits even though some ops don't need that many result bits.
   837  (SignExt8to16  x) -> (MOVBreg x)
   838  (SignExt8to32  x) -> (MOVBreg x)
   839  (SignExt8to64  x) -> (MOVBreg x)
   840  (SignExt16to32 x) -> (MOVHreg x)
   841  (SignExt16to64 x) -> (MOVHreg x)
   842  (SignExt32to64 x) -> (MOVWreg x)
   843  
   844  (ZeroExt8to16  x) -> (MOVBZreg x)
   845  (ZeroExt8to32  x) -> (MOVBZreg x)
   846  (ZeroExt8to64  x) -> (MOVBZreg x)
   847  (ZeroExt16to32 x) -> (MOVHZreg x)
   848  (ZeroExt16to64 x) -> (MOVHZreg x)
   849  (ZeroExt32to64 x) -> (MOVWZreg x)
   850  
   851  (Trunc16to8  x) -> (MOVBreg x)
   852  (Trunc32to8  x) -> (MOVBreg x)
   853  (Trunc32to16 x) -> (MOVHreg x)
   854  (Trunc64to8  x) -> (MOVBreg x)
   855  (Trunc64to16 x) -> (MOVHreg x)
   856  (Trunc64to32 x) -> (MOVWreg x)
   857  
   858  (Slicemask <t> x) -> (SRADconst (NEG <t> x) [63])
   859  
   860  // Note that MOV??reg returns a 64-bit int, x is not necessarily that wide
   861  // This may interact with other patterns in the future. (Compare with arm64)
   862  (MOVBZreg x:(MOVBZload _ _))  -> x
   863  (MOVHZreg x:(MOVHZload _ _))  -> x
   864  (MOVHreg x:(MOVHload _ _))  -> x
   865  
   866  (MOVBZreg (MOVDconst [c]))  -> (MOVDconst [int64(uint8(c))])
   867  (MOVBreg (MOVDconst [c]))  -> (MOVDconst [int64(int8(c))])
   868  (MOVHZreg (MOVDconst [c]))  -> (MOVDconst [int64(uint16(c))])
   869  (MOVHreg (MOVDconst [c]))  -> (MOVDconst [int64(int16(c))])
   870  
   871  // Lose widening ops fed to to stores
   872  (MOVBstore [off] {sym} ptr (MOVBreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
   873  (MOVBstore [off] {sym} ptr (MOVBZreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
   874  (MOVHstore [off] {sym} ptr (MOVHreg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
   875  (MOVHstore [off] {sym} ptr (MOVHZreg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
   876  (MOVWstore [off] {sym} ptr (MOVWreg x) mem) -> (MOVWstore [off] {sym} ptr x mem)
   877  (MOVWstore [off] {sym} ptr (MOVWZreg x) mem) -> (MOVWstore [off] {sym} ptr x mem)
   878  
   879  // Lose W-widening ops fed to compare-W
   880  (CMPW x (MOVWreg y)) -> (CMPW x y)
   881  (CMPW (MOVWreg x) y) -> (CMPW x y)
   882  (CMPWU x (MOVWZreg y)) -> (CMPWU x y)
   883  (CMPWU (MOVWZreg x) y) -> (CMPWU x y)
   884  
   885  (CMP x (MOVDconst [c])) && is16Bit(c) -> (CMPconst x [c])
   886  (CMP (MOVDconst [c]) y) && is16Bit(c) -> (InvertFlags (CMPconst y [c]))
   887  (CMPW x (MOVDconst [c])) && is16Bit(c) -> (CMPWconst x [c])
   888  (CMPW (MOVDconst [c]) y) && is16Bit(c) -> (InvertFlags (CMPWconst y [c]))
   889  
   890  (CMPU x (MOVDconst [c])) && isU16Bit(c) -> (CMPUconst x [c])
   891  (CMPU (MOVDconst [c]) y) && isU16Bit(c) -> (InvertFlags (CMPUconst y [c]))
   892  (CMPWU x (MOVDconst [c])) && isU16Bit(c) -> (CMPWUconst x [c])
   893  (CMPWU (MOVDconst [c]) y) && isU16Bit(c) -> (InvertFlags (CMPWUconst y [c]))
   894  
   895  // A particular pattern seen in cgo code:
   896  (AND (MOVDconst [c]) x:(MOVBZload _ _)) -> (ANDconst [c&0xFF] x)
   897  (AND x:(MOVBZload _ _) (MOVDconst [c])) -> (ANDconst [c&0xFF] x)
   898  
   899  // floating-point fused multiply-add/sub
   900  (FADD (FMUL x y) z) -> (FMADD x y z)
   901  (FSUB (FMUL x y) z) -> (FMSUB x y z)
   902  (FADDS (FMULS x y) z) -> (FMADDS x y z)
   903  (FSUBS (FMULS x y) z) -> (FMSUBS x y z)