github.com/zxy12/go_duplicate_112_new@v0.0.0-20200807091221-747231827200/src/cmd/compile/internal/ssa/gen/PPC64.rules (about)

     1  // Copyright 2016 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Lowering arithmetic
     6  (Add(Ptr|64|32|16|8)  x y) -> (ADD  x y)
     7  (Add64F x y) -> (FADD x y)
     8  (Add32F x y) -> (FADDS x y)
     9  
    10  (Sub(Ptr|64|32|16|8)  x y) -> (SUB  x y)
    11  (Sub32F x y) -> (FSUBS x y)
    12  (Sub64F x y) -> (FSUB x y)
    13  
    14  (Mod16 x y) -> (Mod32 (SignExt16to32 x) (SignExt16to32 y))
    15  (Mod16u x y) -> (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y))
    16  (Mod8 x y) -> (Mod32 (SignExt8to32 x) (SignExt8to32 y))
    17  (Mod8u x y) -> (Mod32u (ZeroExt8to32 x) (ZeroExt8to32 y))
    18  (Mod64 x y) -> (SUB x (MULLD y (DIVD x y)))
    19  (Mod64u x y) -> (SUB x (MULLD y (DIVDU x y)))
    20  (Mod32 x y) -> (SUB x (MULLW y (DIVW x y)))
    21  (Mod32u x y) -> (SUB x (MULLW y (DIVWU x y)))
    22  
    23  // (x + y) / 2 with x>=y -> (x - y) / 2 + y
    24  (Avg64u <t> x y) -> (ADD (SRDconst <t> (SUB <t> x y) [1]) y)
    25  
    26  (Mul64  x y) -> (MULLD  x y)
    27  (Mul(32|16|8)  x y) -> (MULLW  x y)
    28  (Mul64uhilo x y) -> (LoweredMuluhilo x y)
    29  
    30  (Div64  x y) -> (DIVD  x y)
    31  (Div64u x y) -> (DIVDU x y)
    32  (Div32  x y) -> (DIVW  x y)
    33  (Div32u x y) -> (DIVWU x y)
    34  (Div16  x y) -> (DIVW  (SignExt16to32 x) (SignExt16to32 y))
    35  (Div16u x y) -> (DIVWU (ZeroExt16to32 x) (ZeroExt16to32 y))
    36  (Div8   x y) -> (DIVW  (SignExt8to32 x) (SignExt8to32 y))
    37  (Div8u  x y) -> (DIVWU (ZeroExt8to32 x) (ZeroExt8to32 y))
    38  
    39  (Hmul(64|64u|32|32u)  x y) -> (MULH(D|DU|W|WU)  x y)
    40  
    41  (Mul32F x y) -> (FMULS x y)
    42  (Mul64F x y) -> (FMUL x y)
    43  
    44  (Div32F x y) -> (FDIVS x y)
    45  (Div64F x y) -> (FDIV x y)
    46  
    47  // Lowering float <-> int
    48  (Cvt32to32F x) -> (FCFIDS (MTVSRD (SignExt32to64 x)))
    49  (Cvt32to64F x) -> (FCFID (MTVSRD (SignExt32to64 x)))
    50  (Cvt64to32F x) -> (FCFIDS (MTVSRD x))
    51  (Cvt64to64F x) -> (FCFID (MTVSRD x))
    52  
    53  (Cvt32Fto32 x) -> (MFVSRD (FCTIWZ x))
    54  (Cvt32Fto64 x) -> (MFVSRD (FCTIDZ x))
    55  (Cvt64Fto32 x) -> (MFVSRD (FCTIWZ x))
    56  (Cvt64Fto64 x) -> (MFVSRD (FCTIDZ x))
    57  
    58  (Cvt32Fto64F x) -> x // Note x will have the wrong type for patterns dependent on Float32/Float64
    59  (Cvt64Fto32F x) -> (FRSP x)
    60  
    61  (Round(32|64)F x) -> (LoweredRound(32|64)F x)
    62  
    63  (Sqrt x) -> (FSQRT x)
    64  (Floor x) -> (FFLOOR x)
    65  (Ceil x) -> (FCEIL x)
    66  (Trunc x) -> (FTRUNC x)
    67  (Round x) -> (FROUND x)
    68  (Copysign x y) -> (FCPSGN y x)
    69  (Abs x) -> (FABS x)
    70  
    71  // Lowering constants
    72  (Const(64|32|16|8)  [val]) -> (MOVDconst [val])
    73  (Const(32|64)F [val]) -> (FMOV(S|D)const [val])
    74  (ConstNil) -> (MOVDconst [0])
    75  (ConstBool [b]) -> (MOVDconst [b])
    76  
    77  // Constant folding
    78  (FABS (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Abs(auxTo64F(x)))])
    79  (FSQRT (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Sqrt(auxTo64F(x)))])
    80  (FFLOOR (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Floor(auxTo64F(x)))])
    81  (FCEIL (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Ceil(auxTo64F(x)))])
    82  (FTRUNC (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Trunc(auxTo64F(x)))])
    83  
    84  // Rotate generation with const shift
    85  (ADD (SLDconst x [c]) (SRDconst x [d])) && d == 64-c -> (ROTLconst [c] x)
    86  ( OR (SLDconst x [c]) (SRDconst x [d])) && d == 64-c -> (ROTLconst [c] x)
    87  (XOR (SLDconst x [c]) (SRDconst x [d])) && d == 64-c -> (ROTLconst [c] x)
    88  
    89  (ADD (SLWconst x [c]) (SRWconst x [d])) && d == 32-c -> (ROTLWconst [c] x)
    90  ( OR (SLWconst x [c]) (SRWconst x [d])) && d == 32-c -> (ROTLWconst [c] x)
    91  (XOR (SLWconst x [c]) (SRWconst x [d])) && d == 32-c -> (ROTLWconst [c] x)
    92  
    93  // Rotate generation with non-const shift
    94  // these match patterns from math/bits/RotateLeft[32|64], but there could be others
    95  (ADD (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))) -> (ROTL x y)
    96  ( OR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))) -> (ROTL x y)
    97  (XOR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))) -> (ROTL x y)
    98  
    99  (ADD (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))) -> (ROTLW x y)
   100  ( OR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))) -> (ROTLW x y)
   101  (XOR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))) -> (ROTLW x y)
   102  
   103  (Lsh64x64  x (Const64 [c])) && uint64(c) < 64 -> (SLDconst x [c])
   104  (Rsh64x64  x (Const64 [c])) && uint64(c) < 64 -> (SRADconst x [c])
   105  (Rsh64Ux64 x (Const64 [c])) && uint64(c) < 64 -> (SRDconst x [c])
   106  (Lsh32x64  x (Const64 [c])) && uint64(c) < 32 -> (SLWconst x [c])
   107  (Rsh32x64  x (Const64 [c])) && uint64(c) < 32 -> (SRAWconst x [c])
   108  (Rsh32Ux64 x (Const64 [c])) && uint64(c) < 32 -> (SRWconst x [c])
   109  (Lsh16x64  x (Const64 [c])) && uint64(c) < 16 -> (SLWconst x [c])
   110  (Rsh16x64  x (Const64 [c])) && uint64(c) < 16 -> (SRAWconst (SignExt16to32 x) [c])
   111  (Rsh16Ux64 x (Const64 [c])) && uint64(c) < 16 -> (SRWconst (ZeroExt16to32 x) [c])
   112  (Lsh8x64   x (Const64 [c])) && uint64(c) < 8  -> (SLWconst x [c])
   113  (Rsh8x64   x (Const64 [c])) && uint64(c) < 8  -> (SRAWconst (SignExt8to32  x) [c])
   114  (Rsh8Ux64  x (Const64 [c])) && uint64(c) < 8  -> (SRWconst (ZeroExt8to32  x) [c])
   115  
   116  (Lsh64x32  x (Const64 [c])) && uint32(c) < 64 -> (SLDconst x [c])
   117  (Rsh64x32  x (Const64 [c])) && uint32(c) < 64 -> (SRADconst x [c])
   118  (Rsh64Ux32 x (Const64 [c])) && uint32(c) < 64 -> (SRDconst x [c])
   119  (Lsh32x32  x (Const64 [c])) && uint32(c) < 32 -> (SLWconst x [c])
   120  (Rsh32x32  x (Const64 [c])) && uint32(c) < 32 -> (SRAWconst x [c])
   121  (Rsh32Ux32 x (Const64 [c])) && uint32(c) < 32 -> (SRWconst x [c])
   122  (Lsh16x32  x (Const64 [c])) && uint32(c) < 16 -> (SLWconst x [c])
   123  (Rsh16x32  x (Const64 [c])) && uint32(c) < 16 -> (SRAWconst (SignExt16to32 x) [c])
   124  (Rsh16Ux32 x (Const64 [c])) && uint32(c) < 16 -> (SRWconst (ZeroExt16to32 x) [c])
   125  (Lsh8x32   x (Const64 [c])) && uint32(c) < 8  -> (SLWconst x [c])
   126  (Rsh8x32   x (Const64 [c])) && uint32(c) < 8  -> (SRAWconst (SignExt8to32  x) [c])
   127  (Rsh8Ux32  x (Const64 [c])) && uint32(c) < 8  -> (SRWconst (ZeroExt8to32  x) [c])
   128  
   129  // large constant shifts
   130  (Lsh64x64  _ (Const64 [c])) && uint64(c) >= 64 -> (MOVDconst [0])
   131  (Rsh64Ux64 _ (Const64 [c])) && uint64(c) >= 64 -> (MOVDconst [0])
   132  (Lsh32x64  _ (Const64 [c])) && uint64(c) >= 32 -> (MOVDconst [0])
   133  (Rsh32Ux64 _ (Const64 [c])) && uint64(c) >= 32 -> (MOVDconst [0])
   134  (Lsh16x64  _ (Const64 [c])) && uint64(c) >= 16 -> (MOVDconst [0])
   135  (Rsh16Ux64 _ (Const64 [c])) && uint64(c) >= 16 -> (MOVDconst [0])
   136  (Lsh8x64   _ (Const64 [c])) && uint64(c) >= 8  -> (MOVDconst [0])
   137  (Rsh8Ux64  _ (Const64 [c])) && uint64(c) >= 8  -> (MOVDconst [0])
   138  
   139  // large constant signed right shift, we leave the sign bit
   140  (Rsh64x64 x (Const64 [c])) && uint64(c) >= 64 -> (SRADconst x [63])
   141  (Rsh32x64 x (Const64 [c])) && uint64(c) >= 32 -> (SRAWconst x [63])
   142  (Rsh16x64 x (Const64 [c])) && uint64(c) >= 16 -> (SRAWconst (SignExt16to32 x) [63])
   143  (Rsh8x64  x (Const64 [c])) && uint64(c) >= 8  -> (SRAWconst (SignExt8to32  x) [63])
   144  
   145  // constant shifts
   146  (Lsh64x64  x (MOVDconst [c])) && uint64(c) < 64 -> (SLDconst x [c])
   147  (Rsh64x64  x (MOVDconst [c])) && uint64(c) < 64 -> (SRADconst x [c])
   148  (Rsh64Ux64 x (MOVDconst [c])) && uint64(c) < 64 -> (SRDconst x [c])
   149  (Lsh32x64  x (MOVDconst [c])) && uint64(c) < 32 -> (SLWconst x [c])
   150  (Rsh32x64  x (MOVDconst [c])) && uint64(c) < 32 -> (SRAWconst x [c])
   151  (Rsh32Ux64 x (MOVDconst [c])) && uint64(c) < 32 -> (SRWconst x [c])
   152  (Lsh16x64  x (MOVDconst [c])) && uint64(c) < 16 -> (SLWconst x [c])
   153  (Rsh16x64  x (MOVDconst [c])) && uint64(c) < 16 -> (SRAWconst (SignExt16to32 x) [c])
   154  (Rsh16Ux64 x (MOVDconst [c])) && uint64(c) < 16 -> (SRWconst (ZeroExt16to32 x) [c])
   155  (Lsh8x64   x (MOVDconst [c])) && uint64(c) < 8  -> (SLWconst x [c])
   156  (Rsh8x64   x (MOVDconst [c])) && uint64(c) < 8  -> (SRAWconst (SignExt8to32  x) [c])
   157  (Rsh8Ux64  x (MOVDconst [c])) && uint64(c) < 8  -> (SRWconst (ZeroExt8to32  x) [c])
   158  
   159  (Lsh64x32  x (MOVDconst [c])) && uint32(c) < 64 -> (SLDconst x [c])
   160  (Rsh64x32  x (MOVDconst [c])) && uint32(c) < 64 -> (SRADconst x [c])
   161  (Rsh64Ux32 x (MOVDconst [c])) && uint32(c) < 64 -> (SRDconst x [c])
   162  (Lsh32x32  x (MOVDconst [c])) && uint32(c) < 32 -> (SLWconst x [c])
   163  (Rsh32x32  x (MOVDconst [c])) && uint32(c) < 32 -> (SRAWconst x [c])
   164  (Rsh32Ux32 x (MOVDconst [c])) && uint32(c) < 32 -> (SRWconst x [c])
   165  (Lsh16x32  x (MOVDconst [c])) && uint32(c) < 16 -> (SLWconst x [c])
   166  (Rsh16x32  x (MOVDconst [c])) && uint32(c) < 16 -> (SRAWconst (SignExt16to32 x) [c])
   167  (Rsh16Ux32 x (MOVDconst [c])) && uint32(c) < 16 -> (SRWconst (ZeroExt16to32 x) [c])
   168  (Lsh8x32   x (MOVDconst [c])) && uint32(c) < 8  -> (SLWconst x [c])
   169  (Rsh8x32   x (MOVDconst [c])) && uint32(c) < 8  -> (SRAWconst (SignExt8to32  x) [c])
   170  (Rsh8Ux32  x (MOVDconst [c])) && uint32(c) < 8  -> (SRWconst (ZeroExt8to32  x) [c])
   171  
   172  // Lower bounded shifts first. No need to check shift value.
   173  (Lsh64x(64|32|16|8)  x y) && shiftIsBounded(v) -> (SLD x y)
   174  (Lsh32x(64|32|16|8)  x y) && shiftIsBounded(v) -> (SLW x y)
   175  (Lsh16x(64|32|16|8)  x y) && shiftIsBounded(v) -> (SLW x y)
   176  (Lsh8x(64|32|16|8)   x y) && shiftIsBounded(v) -> (SLW x y)
   177  (Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) -> (SRD x y)
   178  (Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) -> (SRW x y)
   179  (Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) -> (SRW (MOVHZreg x) y)
   180  (Rsh8Ux(64|32|16|8)  x y) && shiftIsBounded(v) -> (SRW (MOVBZreg x) y)
   181  (Rsh64x(64|32|16|8)  x y) && shiftIsBounded(v) -> (SRAD x y)
   182  (Rsh32x(64|32|16|8)  x y) && shiftIsBounded(v) -> (SRAW x y)
   183  (Rsh16x(64|32|16|8)  x y) && shiftIsBounded(v) -> (SRAW (MOVHreg x) y)
   184  (Rsh8x(64|32|16|8)   x y) && shiftIsBounded(v) -> (SRAW (MOVBreg x) y)
   185  
   186  // non-constant rotates
   187  // These are subexpressions found in statements that can become rotates
   188  // In these cases the shift count is known to be < 64 so the more complicated expressions
   189  // with Mask & Carry is not needed
   190  (Lsh64x64 x (AND y (MOVDconst [63]))) -> (SLD x (ANDconst <typ.Int64> [63] y))
   191  (Lsh64x64 x (ANDconst <typ.Int64> [63] y)) -> (SLD x (ANDconst <typ.Int64> [63] y))
   192  (Rsh64Ux64 x (AND y (MOVDconst [63]))) -> (SRD x (ANDconst <typ.Int64> [63] y))
   193  (Rsh64Ux64 x (ANDconst <typ.UInt> [63] y)) -> (SRD x (ANDconst <typ.UInt> [63] y))
   194  (Rsh64Ux64 x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y))) -> (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
   195  (Rsh64Ux64 x (SUB <typ.UInt> (MOVDconst [64]) (AND <typ.UInt> y (MOVDconst [63])))) -> (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
   196  (Rsh64x64 x (AND y (MOVDconst [63]))) -> (SRAD x (ANDconst <typ.Int64> [63] y))
   197  (Rsh64x64 x (ANDconst <typ.UInt> [63] y)) -> (SRAD x (ANDconst <typ.UInt> [63] y))
   198  (Rsh64x64 x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y))) -> (SRAD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
   199  (Rsh64x64 x (SUB <typ.UInt> (MOVDconst [64]) (AND <typ.UInt> y (MOVDconst [63])))) -> (SRAD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
   200  
   201  (Rsh64x64 x y)  -> (SRAD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
   202  (Rsh64Ux64 x y) -> (SRD  x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
   203  (Lsh64x64 x y)  -> (SLD  x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
   204  
   205  (Lsh32x64 x (AND y (MOVDconst [31]))) -> (SLW x (ANDconst <typ.Int32> [31] y))
   206  (Lsh32x64 x (ANDconst <typ.Int32> [31] y)) -> (SLW x (ANDconst <typ.Int32> [31] y))
   207  
   208  (Rsh32Ux64 x (AND y (MOVDconst [31]))) -> (SRW x (ANDconst <typ.Int32> [31] y))
   209  (Rsh32Ux64 x (ANDconst <typ.UInt> [31] y)) -> (SRW x (ANDconst <typ.UInt> [31] y))
   210  (Rsh32Ux64 x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))) -> (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
   211  (Rsh32Ux64 x (SUB <typ.UInt> (MOVDconst [32]) (AND <typ.UInt> y (MOVDconst [31])))) -> (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
   212  
   213  (Rsh32x64 x (AND y (MOVDconst [31]))) -> (SRAW x (ANDconst <typ.Int32> [31] y))
   214  (Rsh32x64 x (ANDconst <typ.UInt> [31] y)) -> (SRAW x (ANDconst <typ.UInt> [31] y))
   215  (Rsh32x64 x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))) -> (SRAW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
   216  (Rsh32x64 x (SUB <typ.UInt> (MOVDconst [32]) (AND <typ.UInt> y (MOVDconst [31])))) -> (SRAW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
   217  
   218  (Rsh32x64 x y)  -> (SRAW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
   219  (Rsh32Ux64 x y) -> (SRW  x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
   220  (Lsh32x64 x y)  -> (SLW  x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
   221  
   222  (Rsh16x64 x y)  -> (SRAW (SignExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
   223  (Rsh16Ux64 x y) -> (SRW  (ZeroExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
   224  (Lsh16x64 x y)  -> (SLW  x                 (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
   225  
   226  (Rsh8x64 x y)  -> (SRAW (SignExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
   227  (Rsh8Ux64 x y) -> (SRW  (ZeroExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
   228  (Lsh8x64 x y)  -> (SLW  x                (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
   229  
   230  (Rsh64x32 x y)  -> (SRAD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
   231  (Rsh64Ux32 x y) -> (SRD x  (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
   232  (Lsh64x32 x y)  -> (SLD x  (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
   233  
   234  (Rsh32x32 x y)  -> (SRAW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
   235  (Rsh32Ux32 x y) -> (SRW x  (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
   236  (Lsh32x32 x y)  -> (SLW x  (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
   237  
   238  (Rsh16x32 x y)  -> (SRAW (SignExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
   239  (Rsh16Ux32 x y) -> (SRW  (ZeroExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
   240  (Lsh16x32 x y)  -> (SLW  x                 (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
   241  
   242  (Rsh8x32 x y)  -> (SRAW (SignExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
   243  (Rsh8Ux32 x y) -> (SRW  (ZeroExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
   244  (Lsh8x32 x y)  -> (SLW  x                (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
   245  
   246  
   247  (Rsh64x16 x y)  -> (SRAD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
   248  (Rsh64Ux16 x y) -> (SRD x  (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
   249  (Lsh64x16 x y)  -> (SLD x  (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
   250  
   251  (Rsh32x16 x y)  -> (SRAW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
   252  (Rsh32Ux16 x y) -> (SRW x  (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
   253  (Lsh32x16 x y)  -> (SLW x  (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
   254  
   255  (Rsh16x16 x y)  -> (SRAW (SignExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
   256  (Rsh16Ux16 x y) -> (SRW  (ZeroExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
   257  (Lsh16x16 x y)  -> (SLW  x                 (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
   258  
   259  (Rsh8x16 x y)  -> (SRAW (SignExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
   260  (Rsh8Ux16 x y) -> (SRW  (ZeroExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
   261  (Lsh8x16 x y)  -> (SLW  x                (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
   262  
   263  
   264  (Rsh64x8 x y)  -> (SRAD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
   265  (Rsh64Ux8 x y) -> (SRD x  (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
   266  (Lsh64x8 x y)  -> (SLD x  (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
   267  
   268  (Rsh32x8 x y)  -> (SRAW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
   269  (Rsh32Ux8 x y) -> (SRW x  (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
   270  (Lsh32x8 x y)  -> (SLW x  (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
   271  
   272  (Rsh16x8 x y)  -> (SRAW (SignExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
   273  (Rsh16Ux8 x y) -> (SRW  (ZeroExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
   274  (Lsh16x8 x y)  -> (SLW  x                 (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
   275  
   276  (Rsh8x8 x y)  -> (SRAW (SignExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
   277  (Rsh8Ux8 x y) -> (SRW  (ZeroExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
   278  (Lsh8x8 x y)  -> (SLW  x                (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
   279  
   280  // Cleaning up shift ops when input is masked
   281  (MaskIfNotCarry (ADDconstForCarry [c] (ANDconst [d] _))) && c < 0 && d > 0 && c + d < 0 -> (MOVDconst [-1])
   282  (ORN x (MOVDconst [-1])) -> x
   283  
   284  // Potentially useful optimizing rewrites.
   285  // (ADDconstForCarry [k] c), k < 0 && (c < 0 || k+c >= 0) -> CarrySet
   286  // (ADDconstForCarry [k] c), K < 0 && (c >= 0 && k+c < 0) -> CarryClear
   287  // (MaskIfNotCarry CarrySet) -> 0
   288  // (MaskIfNotCarry CarrySet) -> -1
   289  
   290  (Addr {sym} base) -> (MOVDaddr {sym} base)
   291  (LocalAddr {sym} base _) -> (MOVDaddr {sym} base)
   292  (OffPtr [off] ptr) -> (ADD (MOVDconst <typ.Int64> [off]) ptr)
   293  
   294  // TODO: optimize these cases?
   295  (Ctz32NonZero x) -> (Ctz32 x)
   296  (Ctz64NonZero x) -> (Ctz64 x)
   297  
   298  (Ctz64 x) -> (POPCNTD (ANDN <typ.Int64> (ADDconst <typ.Int64> [-1] x) x))
   299  (Ctz32 x) -> (POPCNTW (MOVWZreg (ANDN <typ.Int> (ADDconst <typ.Int> [-1] x) x)))
   300  (Ctz16 x) -> (POPCNTW (MOVHZreg (ANDN <typ.Int16> (ADDconst <typ.Int16> [-1] x) x)))
   301  (Ctz8 x) -> (POPCNTB (MOVBZreg (ANDN <typ.UInt8> (ADDconst <typ.UInt8> [-1] x) x)))
   302  
   303  (BitLen64 x) -> (SUB (MOVDconst [64]) (CNTLZD <typ.Int> x))
   304  (BitLen32 x) -> (SUB (MOVDconst [32]) (CNTLZW <typ.Int> x))
   305  
   306  (PopCount64 x) -> (POPCNTD x)
   307  (PopCount32 x) -> (POPCNTW (MOVWZreg x))
   308  (PopCount16 x) -> (POPCNTW (MOVHZreg x))
   309  (PopCount8 x) -> (POPCNTB (MOVBZreg x))
   310  
   311  (And(64|32|16|8) x y) -> (AND x y)
   312  (Or(64|32|16|8) x y) -> (OR x y)
   313  (Xor(64|32|16|8) x y) -> (XOR x y)
   314  
   315  (Neg(64|32|16|8)  x) -> (NEG x)
   316  (Neg64F x) -> (FNEG x)
   317  (Neg32F x) -> (FNEG x)
   318  
   319  (Com(64|32|16|8) x) -> (NOR x x)
   320  
   321  // Lowering boolean ops
   322  (AndB x y) -> (AND x y)
   323  (OrB x y) -> (OR x y)
   324  (Not x) -> (XORconst [1] x)
   325  
   326  // Use ANDN for AND x NOT y
   327  (AND x (NOR y y)) -> (ANDN x y)
   328  
   329  // Lowering comparisons
   330  (EqB x y)  -> (ANDconst [1] (EQV x y))
   331  // Sign extension dependence on operand sign sets up for sign/zero-extension elision later
   332  (Eq8 x y) && isSigned(x.Type) && isSigned(y.Type) -> (Equal (CMPW (SignExt8to32 x) (SignExt8to32 y)))
   333  (Eq16 x y) && isSigned(x.Type) && isSigned(y.Type) -> (Equal (CMPW (SignExt16to32 x) (SignExt16to32 y)))
   334  (Eq8 x y)  -> (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
   335  (Eq16 x y) -> (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
   336  (Eq32 x y) -> (Equal (CMPW x y))
   337  (Eq64 x y) -> (Equal (CMP x y))
   338  (Eq32F x y) -> (Equal (FCMPU x y))
   339  (Eq64F x y) -> (Equal (FCMPU x y))
   340  (EqPtr x y) -> (Equal (CMP x y))
   341  
   342  (NeqB x y)  -> (XOR x y)
   343  // Like Eq8 and Eq16, prefer sign extension likely to enable later elision.
   344  (Neq8 x y) && isSigned(x.Type) && isSigned(y.Type) -> (NotEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
   345  (Neq16 x y) && isSigned(x.Type) && isSigned(y.Type) -> (NotEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
   346  (Neq8 x y)  -> (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
   347  (Neq16 x y) -> (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
   348  (Neq32 x y) -> (NotEqual (CMPW x y))
   349  (Neq64 x y) -> (NotEqual (CMP x y))
   350  (Neq32F x y) -> (NotEqual (FCMPU x y))
   351  (Neq64F x y) -> (NotEqual (FCMPU x y))
   352  (NeqPtr x y) -> (NotEqual (CMP x y))
   353  
   354  (Less8 x y)  -> (LessThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
   355  (Less16 x y) -> (LessThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
   356  (Less32 x y) -> (LessThan (CMPW x y))
   357  (Less64 x y) -> (LessThan (CMP x y))
   358  (Less32F x y) -> (FLessThan (FCMPU x y))
   359  (Less64F x y) -> (FLessThan (FCMPU x y))
   360  
   361  (Less8U x y)  -> (LessThan (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
   362  (Less16U x y) -> (LessThan (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
   363  (Less32U x y) -> (LessThan (CMPWU x y))
   364  (Less64U x y) -> (LessThan (CMPU x y))
   365  
   366  (Leq8 x y)  -> (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
   367  (Leq16 x y) -> (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
   368  (Leq32 x y) -> (LessEqual (CMPW x y))
   369  (Leq64 x y) -> (LessEqual (CMP x y))
   370  (Leq32F x y) -> (FLessEqual (FCMPU x y))
   371  (Leq64F x y) -> (FLessEqual (FCMPU x y))
   372  
   373  (Leq8U x y)  -> (LessEqual (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
   374  (Leq16U x y) -> (LessEqual (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
   375  (Leq32U x y) -> (LessEqual (CMPWU x y))
   376  (Leq64U x y) -> (LessEqual (CMPU x y))
   377  
   378  (Greater8 x y)  -> (GreaterThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
   379  (Greater16 x y) -> (GreaterThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
   380  (Greater32 x y) -> (GreaterThan (CMPW x y))
   381  (Greater64 x y) -> (GreaterThan (CMP x y))
   382  (Greater(32|64)F x y) -> (FGreaterThan (FCMPU x y))
   383  
   384  (Greater8U x y)  -> (GreaterThan (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
   385  (Greater16U x y) -> (GreaterThan (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
   386  (Greater32U x y) -> (GreaterThan (CMPWU x y))
   387  (Greater64U x y) -> (GreaterThan (CMPU x y))
   388  
   389  (Geq8 x y)  -> (GreaterEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
   390  (Geq16 x y) -> (GreaterEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
   391  (Geq32 x y) -> (GreaterEqual (CMPW x y))
   392  (Geq64 x y) -> (GreaterEqual (CMP x y))
   393  (Geq(32|64)F x y) -> (FGreaterEqual (FCMPU x y))
   394  
   395  (Geq8U x y)  -> (GreaterEqual (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
   396  (Geq16U x y) -> (GreaterEqual (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
   397  (Geq32U x y) -> (GreaterEqual (CMPWU x y))
   398  (Geq64U x y) -> (GreaterEqual (CMPU x y))
   399  
   400  // Absorb pseudo-ops into blocks.
   401  (If (Equal cc) yes no) -> (EQ cc yes no)
   402  (If (NotEqual cc) yes no) -> (NE cc yes no)
   403  (If (LessThan cc) yes no) -> (LT cc yes no)
   404  (If (LessEqual cc) yes no) -> (LE cc yes no)
   405  (If (GreaterThan cc) yes no) -> (GT cc yes no)
   406  (If (GreaterEqual cc) yes no) -> (GE cc yes no)
   407  (If (FLessThan cc) yes no) -> (FLT cc yes no)
   408  (If (FLessEqual cc) yes no) -> (FLE cc yes no)
   409  (If (FGreaterThan cc) yes no) -> (FGT cc yes no)
   410  (If (FGreaterEqual cc) yes no) -> (FGE cc yes no)
   411  
   412  (If cond yes no) -> (NE (CMPWconst [0] cond) yes no)
   413  
   414  // Absorb boolean tests into block
   415  (NE (CMPWconst [0] (Equal cc)) yes no) -> (EQ cc yes no)
   416  (NE (CMPWconst [0] (NotEqual cc)) yes no) -> (NE cc yes no)
   417  (NE (CMPWconst [0] (LessThan cc)) yes no) -> (LT cc yes no)
   418  (NE (CMPWconst [0] (LessEqual cc)) yes no) -> (LE cc yes no)
   419  (NE (CMPWconst [0] (GreaterThan cc)) yes no) -> (GT cc yes no)
   420  (NE (CMPWconst [0] (GreaterEqual cc)) yes no) -> (GE cc yes no)
   421  (NE (CMPWconst [0] (FLessThan cc)) yes no) -> (FLT cc yes no)
   422  (NE (CMPWconst [0] (FLessEqual cc)) yes no) -> (FLE cc yes no)
   423  (NE (CMPWconst [0] (FGreaterThan cc)) yes no) -> (FGT cc yes no)
   424  (NE (CMPWconst [0] (FGreaterEqual cc)) yes no) -> (FGE cc yes no)
   425  
   426  // Elide compares of bit tests // TODO need to make both CC and result of ANDCC available.
   427  (EQ (CMPconst [0] (ANDconst [c] x)) yes no) -> (EQ (ANDCCconst [c] x) yes no)
   428  (NE (CMPconst [0] (ANDconst [c] x)) yes no) -> (NE (ANDCCconst [c] x) yes no)
   429  (EQ (CMPWconst [0] (ANDconst [c] x)) yes no) -> (EQ (ANDCCconst [c] x) yes no)
   430  (NE (CMPWconst [0] (ANDconst [c] x)) yes no) -> (NE (ANDCCconst [c] x) yes no)
   431  
   432  // absorb flag constants into branches
   433  (EQ (FlagEQ) yes no) -> (First nil yes no)
   434  (EQ (FlagLT) yes no) -> (First nil no yes)
   435  (EQ (FlagGT) yes no) -> (First nil no yes)
   436  
   437  (NE (FlagEQ) yes no) -> (First nil no yes)
   438  (NE (FlagLT) yes no) -> (First nil yes no)
   439  (NE (FlagGT) yes no) -> (First nil yes no)
   440  
   441  (LT (FlagEQ) yes no) -> (First nil no yes)
   442  (LT (FlagLT) yes no) -> (First nil yes no)
   443  (LT (FlagGT) yes no) -> (First nil no yes)
   444  
   445  (LE (FlagEQ) yes no) -> (First nil yes no)
   446  (LE (FlagLT) yes no) -> (First nil yes no)
   447  (LE (FlagGT) yes no) -> (First nil no yes)
   448  
   449  (GT (FlagEQ) yes no) -> (First nil no yes)
   450  (GT (FlagLT) yes no) -> (First nil no yes)
   451  (GT (FlagGT) yes no) -> (First nil yes no)
   452  
   453  (GE (FlagEQ) yes no) -> (First nil yes no)
   454  (GE (FlagLT) yes no) -> (First nil no yes)
   455  (GE (FlagGT) yes no) -> (First nil yes no)
   456  
   457  // absorb InvertFlags into branches
   458  (LT (InvertFlags cmp) yes no) -> (GT cmp yes no)
   459  (GT (InvertFlags cmp) yes no) -> (LT cmp yes no)
   460  (LE (InvertFlags cmp) yes no) -> (GE cmp yes no)
   461  (GE (InvertFlags cmp) yes no) -> (LE cmp yes no)
   462  (EQ (InvertFlags cmp) yes no) -> (EQ cmp yes no)
   463  (NE (InvertFlags cmp) yes no) -> (NE cmp yes no)
   464  
   465  // constant comparisons
   466  (CMPWconst (MOVDconst [x]) [y]) && int32(x)==int32(y) -> (FlagEQ)
   467  (CMPWconst (MOVDconst [x]) [y]) && int32(x)<int32(y)  -> (FlagLT)
   468  (CMPWconst (MOVDconst [x]) [y]) && int32(x)>int32(y)  -> (FlagGT)
   469  
   470  (CMPconst (MOVDconst [x]) [y]) && x==y -> (FlagEQ)
   471  (CMPconst (MOVDconst [x]) [y]) && x<y  -> (FlagLT)
   472  (CMPconst (MOVDconst [x]) [y]) && x>y  -> (FlagGT)
   473  
   474  (CMPWUconst (MOVDconst [x]) [y]) && int32(x)==int32(y)  -> (FlagEQ)
   475  (CMPWUconst (MOVDconst [x]) [y]) && uint32(x)<uint32(y) -> (FlagLT)
   476  (CMPWUconst (MOVDconst [x]) [y]) && uint32(x)>uint32(y) -> (FlagGT)
   477  
   478  (CMPUconst (MOVDconst [x]) [y]) && x==y  -> (FlagEQ)
   479  (CMPUconst (MOVDconst [x]) [y]) && uint64(x)<uint64(y) -> (FlagLT)
   480  (CMPUconst (MOVDconst [x]) [y]) && uint64(x)>uint64(y) -> (FlagGT)
   481  
   482  // other known comparisons
   483  //(CMPconst (MOVBUreg _) [c]) && 0xff < c -> (FlagLT)
   484  //(CMPconst (MOVHUreg _) [c]) && 0xffff < c -> (FlagLT)
   485  //(CMPconst (ANDconst _ [m]) [n]) && 0 <= int32(m) && int32(m) < int32(n) -> (FlagLT)
   486  //(CMPconst (SRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 32 && (1<<uint32(32-c)) <= uint32(n) -> (FlagLT)
   487  
   488  // absorb flag constants into boolean values
   489  (Equal (FlagEQ)) -> (MOVDconst [1])
   490  (Equal (FlagLT)) -> (MOVDconst [0])
   491  (Equal (FlagGT)) -> (MOVDconst [0])
   492  
   493  (NotEqual (FlagEQ)) -> (MOVDconst [0])
   494  (NotEqual (FlagLT)) -> (MOVDconst [1])
   495  (NotEqual (FlagGT)) -> (MOVDconst [1])
   496  
   497  (LessThan (FlagEQ)) -> (MOVDconst [0])
   498  (LessThan (FlagLT)) -> (MOVDconst [1])
   499  (LessThan (FlagGT)) -> (MOVDconst [0])
   500  
   501  (LessEqual (FlagEQ)) -> (MOVDconst [1])
   502  (LessEqual (FlagLT)) -> (MOVDconst [1])
   503  (LessEqual (FlagGT)) -> (MOVDconst [0])
   504  
   505  (GreaterThan (FlagEQ)) -> (MOVDconst [0])
   506  (GreaterThan (FlagLT)) -> (MOVDconst [0])
   507  (GreaterThan (FlagGT)) -> (MOVDconst [1])
   508  
   509  (GreaterEqual (FlagEQ)) -> (MOVDconst [1])
   510  (GreaterEqual (FlagLT)) -> (MOVDconst [0])
   511  (GreaterEqual (FlagGT)) -> (MOVDconst [1])
   512  
   513  // absorb InvertFlags into boolean values
   514  (Equal (InvertFlags x)) -> (Equal x)
   515  (NotEqual (InvertFlags x)) -> (NotEqual x)
   516  (LessThan (InvertFlags x)) -> (GreaterThan x)
   517  (GreaterThan (InvertFlags x)) -> (LessThan x)
   518  (LessEqual (InvertFlags x)) -> (GreaterEqual x)
   519  (GreaterEqual (InvertFlags x)) -> (LessEqual x)
   520  
   521  // Elide compares of bit tests // TODO need to make both CC and result of ANDCC available.
   522  ((EQ|NE|LT|LE|GT|GE) (CMPconst [0] (ANDconst [c] x)) yes no) -> ((EQ|NE|LT|LE|GT|GE) (ANDCCconst [c] x) yes no)
   523  ((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] (ANDconst [c] x)) yes no) -> ((EQ|NE|LT|LE|GT|GE) (ANDCCconst [c] x) yes no)
   524  ((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(AND x y)) yes no) && z.Uses == 1 -> ((EQ|NE|LT|LE|GT|GE) (ANDCC x y) yes no)
   525  ((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(OR x y)) yes no) && z.Uses == 1 -> ((EQ|NE|LT|LE|GT|GE) (ORCC x y) yes no)
   526  ((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(XOR x y)) yes no) && z.Uses == 1 -> ((EQ|NE|LT|LE|GT|GE) (XORCC x y) yes no)
   527  
   528  // Lowering loads
   529  (Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) -> (MOVDload ptr mem)
   530  (Load <t> ptr mem) && is32BitInt(t) && isSigned(t) -> (MOVWload ptr mem)
   531  (Load <t> ptr mem) && is32BitInt(t) && !isSigned(t) -> (MOVWZload ptr mem)
   532  (Load <t> ptr mem) && is16BitInt(t) && isSigned(t) -> (MOVHload ptr mem)
   533  (Load <t> ptr mem) && is16BitInt(t) && !isSigned(t) -> (MOVHZload ptr mem)
   534  (Load <t> ptr mem) && t.IsBoolean() -> (MOVBZload ptr mem)
   535  (Load <t> ptr mem) && is8BitInt(t) && isSigned(t) -> (MOVBreg (MOVBZload ptr mem)) // PPC has no signed-byte load.
   536  (Load <t> ptr mem) && is8BitInt(t) && !isSigned(t) -> (MOVBZload ptr mem)
   537  
   538  (Load <t> ptr mem) && is32BitFloat(t) -> (FMOVSload ptr mem)
   539  (Load <t> ptr mem) && is64BitFloat(t) -> (FMOVDload ptr mem)
   540  
   541  (Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) -> (FMOVDstore ptr val mem)
   542  (Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && is32BitFloat(val.Type) -> (FMOVDstore ptr val mem) // glitch from (Cvt32Fto64F x) -> x -- type is wrong
   543  (Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) -> (FMOVSstore ptr val mem)
   544  (Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && (is64BitInt(val.Type) || isPtr(val.Type)) -> (MOVDstore ptr val mem)
   545  (Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && is32BitInt(val.Type) -> (MOVWstore ptr val mem)
   546  (Store {t} ptr val mem) && t.(*types.Type).Size() == 2 -> (MOVHstore ptr val mem)
   547  (Store {t} ptr val mem) && t.(*types.Type).Size() == 1 -> (MOVBstore ptr val mem)
   548  
   549  // Using Zero instead of LoweredZero allows the
   550  // target address to be folded where possible.
   551  (Zero [0] _ mem) -> mem
   552  (Zero [1] destptr mem) -> (MOVBstorezero destptr mem)
   553  (Zero [2] destptr mem) ->
   554  	(MOVHstorezero destptr mem)
   555  (Zero [3] destptr mem) ->
   556  	(MOVBstorezero [2] destptr
   557  		(MOVHstorezero destptr mem))
   558  (Zero [4] destptr mem) ->
   559  	(MOVWstorezero destptr mem)
   560  (Zero [5] destptr mem) ->
   561  	(MOVBstorezero [4] destptr
   562          	(MOVWstorezero destptr mem))
   563  (Zero [6] destptr mem) ->
   564  	(MOVHstorezero [4] destptr
   565  		(MOVWstorezero destptr mem))
   566  (Zero [7] destptr mem) ->
   567  	(MOVBstorezero [6] destptr
   568  		(MOVHstorezero [4] destptr
   569  			(MOVWstorezero destptr mem)))
   570  
   571  // MOVD for store with DS must have offsets that are multiple of 4
   572  (Zero [8] {t} destptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
   573          (MOVDstorezero destptr mem)
   574  (Zero [8] destptr mem) ->
   575          (MOVWstorezero [4] destptr
   576                  (MOVWstorezero [0] destptr mem))
   577  // Handle these cases only if aligned properly, otherwise use general case below
   578  (Zero [12] {t} destptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
   579          (MOVWstorezero [8] destptr
   580                  (MOVDstorezero [0] destptr mem))
   581  (Zero [16] {t} destptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
   582         (MOVDstorezero [8] destptr
   583                  (MOVDstorezero [0] destptr mem))
   584  (Zero [24] {t} destptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
   585         (MOVDstorezero [16] destptr
   586                 (MOVDstorezero [8] destptr
   587                         (MOVDstorezero [0] destptr mem)))
   588  (Zero [32] {t} destptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
   589         (MOVDstorezero [24] destptr
   590                 (MOVDstorezero [16] destptr
   591                         (MOVDstorezero [8] destptr
   592                                 (MOVDstorezero [0] destptr mem))))
   593  
   594  // Handle cases not handled above
   595  (Zero [s] ptr mem) -> (LoweredZero [s] ptr mem)
   596  
   597  // moves
   598  // Only the MOVD and MOVW instructions require 4 byte
   599  // alignment in the offset field.  The other MOVx instructions
   600  // allow any alignment.
   601  (Move [0] _ _ mem) -> mem
   602  (Move [1] dst src mem) -> (MOVBstore dst (MOVBZload src mem) mem)
   603  (Move [2] dst src mem) ->
   604          (MOVHstore dst (MOVHZload src mem) mem)
   605  (Move [4] dst src mem) ->
   606  	(MOVWstore dst (MOVWZload src mem) mem)
   607  // MOVD for load and store must have offsets that are multiple of 4
   608  (Move [8] {t} dst src mem) && t.(*types.Type).Alignment()%4 == 0 ->
   609  	(MOVDstore dst (MOVDload src mem) mem)
   610  (Move [8] dst src mem) ->
   611  	(MOVWstore [4] dst (MOVWZload [4] src mem)
   612  		(MOVWstore dst (MOVWZload src mem) mem))
   613  (Move [3] dst src mem) ->
   614          (MOVBstore [2] dst (MOVBZload [2] src mem)
   615                  (MOVHstore dst (MOVHload src mem) mem))
   616  (Move [5] dst src mem) ->
   617          (MOVBstore [4] dst (MOVBZload [4] src mem)
   618                  (MOVWstore dst (MOVWZload src mem) mem))
   619  (Move [6] dst src mem) ->
   620          (MOVHstore [4] dst (MOVHZload [4] src mem)
   621                  (MOVWstore dst (MOVWZload src mem) mem))
   622  (Move [7] dst src mem) ->
   623          (MOVBstore [6] dst (MOVBZload [6] src mem)
   624                  (MOVHstore [4] dst (MOVHZload [4] src mem)
   625                          (MOVWstore dst (MOVWZload src mem) mem)))
   626  
   627  // Large move uses a loop. Since the address is computed and the
   628  // offset is zero, any alignment can be used.
   629  (Move [s] dst src mem) && s > 8 ->
   630          (LoweredMove [s] dst src mem)
   631  
   632  // Calls
   633  // Lowering calls
   634  (StaticCall [argwid] {target} mem) -> (CALLstatic [argwid] {target} mem)
   635  (ClosureCall [argwid] entry closure mem) -> (CALLclosure [argwid] entry closure mem)
   636  (InterCall [argwid] entry mem) -> (CALLinter [argwid] entry mem)
   637  
   638  // Miscellaneous
   639  (GetClosurePtr) -> (LoweredGetClosurePtr)
   640  (GetCallerSP) -> (LoweredGetCallerSP)
   641  (GetCallerPC) -> (LoweredGetCallerPC)
   642  (IsNonNil ptr) -> (NotEqual (CMPconst [0] ptr))
   643  (IsInBounds idx len) -> (LessThan (CMPU idx len))
   644  (IsSliceInBounds idx len) -> (LessEqual (CMPU idx len))
   645  (NilCheck ptr mem) -> (LoweredNilCheck ptr mem)
   646  
   647  // Write barrier.
   648  (WB {fn} destptr srcptr mem) -> (LoweredWB {fn} destptr srcptr mem)
   649  
   650  // Optimizations
   651  // Note that PPC "logical" immediates come in 0:15 and 16:31 unsigned immediate forms,
   652  // so ORconst, XORconst easily expand into a pair.
   653  
   654  // Include very-large constants in the const-const case.
   655  (AND (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c&d])
   656  (OR (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c|d])
   657  (XOR (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c^d])
   658  
   659  // Discover consts
   660  (AND x (MOVDconst [c])) && isU16Bit(c) -> (ANDconst [c] x)
   661  (XOR x (MOVDconst [c])) && isU32Bit(c) -> (XORconst [c] x)
   662  (OR x (MOVDconst [c])) && isU32Bit(c) -> (ORconst [c] x)
   663  
   664  // Simplify consts
   665  (ANDconst [c] (ANDconst [d] x)) -> (ANDconst [c&d] x)
   666  (ORconst [c] (ORconst [d] x)) -> (ORconst [c|d] x)
   667  (XORconst [c] (XORconst [d] x)) -> (XORconst [c^d] x)
   668  (ANDconst [-1] x) -> x
   669  (ANDconst [0] _) -> (MOVDconst [0])
   670  (XORconst [0] x) -> x
   671  (ORconst [-1] _) -> (MOVDconst [-1])
   672  (ORconst [0] x) -> x
   673  
   674  // zero-extend of small and -> small and
   675  (MOVBZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFF -> y
   676  (MOVHZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFF -> y
   677  (MOVWZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFFFFFF -> y
   678  (MOVWZreg y:(AND (MOVDconst [c]) _)) && uint64(c) <= 0xFFFFFFFF -> y
   679  
   680  // sign extend of small-positive and -> small-positive-and
   681  (MOVBreg y:(ANDconst [c] _)) && uint64(c) <= 0x7F -> y
   682  (MOVHreg y:(ANDconst [c] _)) && uint64(c) <= 0x7FFF -> y
   683  (MOVWreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFF -> y // 0xFFFF is largest immediate constant, when regarded as 32-bit is > 0
   684  (MOVWreg y:(AND (MOVDconst [c]) _)) && uint64(c) <= 0x7FFFFFFF -> y
   685  
   686  // small and of zero-extend -> either zero-extend or small and
   687  (ANDconst [c] y:(MOVBZreg _)) && c&0xFF == 0xFF -> y
   688  (ANDconst [0xFF] y:(MOVBreg _)) -> y
   689  (ANDconst [c] y:(MOVHZreg _))  && c&0xFFFF == 0xFFFF -> y
   690  (ANDconst [0xFFFF] y:(MOVHreg _)) -> y
   691  
   692  (AND (MOVDconst [c]) y:(MOVWZreg _))  && c&0xFFFFFFFF == 0xFFFFFFFF -> y
   693  (AND (MOVDconst [0xFFFFFFFF]) y:(MOVWreg x)) -> (MOVWZreg x)
   694  // normal case
   695  (ANDconst [c] (MOV(B|BZ)reg x)) -> (ANDconst [c&0xFF] x)
   696  (ANDconst [c] (MOV(H|HZ)reg x)) -> (ANDconst [c&0xFFFF] x)
   697  (ANDconst [c] (MOV(W|WZ)reg x)) -> (ANDconst [c&0xFFFFFFFF] x)
   698  
   699  // Eliminate unnecessary sign/zero extend following right shift
   700  (MOV(B|H|W)Zreg (SRWconst [c] (MOVBZreg x))) -> (SRWconst [c] (MOVBZreg x))
   701  (MOV(H|W)Zreg (SRWconst [c] (MOVHZreg x))) -> (SRWconst [c] (MOVHZreg x))
   702  (MOVWZreg (SRWconst [c] (MOVWZreg x))) -> (SRWconst [c] (MOVWZreg x))
   703  (MOV(B|H|W)reg (SRAWconst [c] (MOVBreg x))) -> (SRAWconst [c] (MOVBreg x))
   704  (MOV(H|W)reg (SRAWconst [c] (MOVHreg x))) -> (SRAWconst [c] (MOVHreg x))
   705  (MOVWreg (SRAWconst [c] (MOVWreg x))) -> (SRAWconst [c] (MOVWreg x))
   706  
   707  (MOVWZreg (SRWconst [c] x)) && sizeof(x.Type) <= 32 -> (SRWconst [c] x)
   708  (MOVHZreg (SRWconst [c] x)) && sizeof(x.Type) <= 16 -> (SRWconst [c] x)
   709  (MOVBZreg (SRWconst [c] x)) && sizeof(x.Type) == 8 -> (SRWconst [c] x)
   710  (MOVWreg (SRAWconst [c] x)) && sizeof(x.Type) <= 32 -> (SRAWconst [c] x)
   711  (MOVHreg (SRAWconst [c] x)) && sizeof(x.Type) <= 16 -> (SRAWconst [c] x)
   712  (MOVBreg (SRAWconst [c] x)) && sizeof(x.Type) == 8 -> (SRAWconst [c] x)
   713  
   714  // initial right shift will handle sign/zero extend
   715  (MOVBZreg (SRDconst [c] x)) && c>=56 -> (SRDconst [c] x)
   716  (MOVBreg (SRDconst [c] x)) && c>56 -> (SRDconst [c] x)
   717  (MOVBreg (SRDconst [c] x)) && c==56 -> (SRADconst [c] x)
   718  (MOVBZreg (SRWconst [c] x)) && c>=24 -> (SRWconst [c] x)
   719  (MOVBreg (SRWconst [c] x)) && c>24 -> (SRWconst [c] x)
   720  (MOVBreg (SRWconst [c] x)) && c==24 -> (SRAWconst [c] x)
   721  
   722  (MOVHZreg (SRDconst [c] x)) && c>=48 -> (SRDconst [c] x)
   723  (MOVHreg (SRDconst [c] x)) && c>48 -> (SRDconst [c] x)
   724  (MOVHreg (SRDconst [c] x)) && c==48 -> (SRADconst [c] x)
   725  (MOVHZreg (SRWconst [c] x)) && c>=16 -> (SRWconst [c] x)
   726  (MOVHreg (SRWconst [c] x)) && c>16 -> (SRWconst [c] x)
   727  (MOVHreg (SRWconst [c] x)) && c==16 -> (SRAWconst [c] x)
   728  
   729  (MOVWZreg (SRDconst [c] x)) && c>=32 -> (SRDconst [c] x)
   730  (MOVWreg (SRDconst [c] x)) && c>32 -> (SRDconst [c] x)
   731  (MOVWreg (SRDconst [c] x)) && c==32 -> (SRADconst [c] x)
   732  
   733  // Various redundant zero/sign extension combinations.
   734  (MOVBZreg y:(MOVBZreg _)) -> y  // repeat
   735  (MOVBreg y:(MOVBreg _)) -> y // repeat
   736  (MOVBreg (MOVBZreg x)) -> (MOVBreg x)
   737  (MOVBZreg (MOVBreg x)) -> (MOVBZreg x)
   738  
   739  // H - there are more combinations than these
   740  
   741  (MOVHZreg y:(MOVHZreg _)) -> y // repeat
   742  (MOVHZreg y:(MOVBZreg _)) -> y // wide of narrow
   743  (MOVHZreg y:(MOVHBRload _ _)) -> y
   744  
   745  (MOVHreg y:(MOVHreg _)) -> y // repeat
   746  (MOVHreg y:(MOVBreg _)) -> y // wide of narrow
   747  
   748  (MOVHreg y:(MOVHZreg x)) -> (MOVHreg x)
   749  (MOVHZreg y:(MOVHreg x)) -> (MOVHZreg x)
   750  
   751  // W - there are more combinations than these
   752  
   753  (MOVWZreg y:(MOVWZreg _)) -> y // repeat
   754  (MOVWZreg y:(MOVHZreg _)) -> y // wide of narrow
   755  (MOVWZreg y:(MOVBZreg _)) -> y // wide of narrow
   756  (MOVWZreg y:(MOVHBRload _ _)) -> y
   757  (MOVWZreg y:(MOVWBRload _ _)) -> y
   758  
   759  (MOVWreg y:(MOVWreg _)) -> y // repeat
   760  (MOVWreg y:(MOVHreg _)) -> y // wide of narrow
   761  (MOVWreg y:(MOVBreg _)) -> y // wide of narrow
   762  
   763  (MOVWreg y:(MOVWZreg x)) -> (MOVWreg x)
   764  (MOVWZreg y:(MOVWreg x)) -> (MOVWZreg x)
   765  
   766  // Arithmetic constant ops
   767  
   768  (ADD x (MOVDconst [c])) && is32Bit(c) -> (ADDconst [c] x)
   769  (ADDconst [c] (ADDconst [d] x)) && is32Bit(c+d) -> (ADDconst [c+d] x)
   770  (ADDconst [0] x) -> x
   771  (SUB x (MOVDconst [c])) && is32Bit(-c) -> (ADDconst [-c] x)
   772  // TODO deal with subtract-from-const
   773  
   774  (ADDconst [c] (MOVDaddr [d] {sym} x)) -> (MOVDaddr [c+d] {sym} x)
   775  
   776  // Use register moves instead of stores and loads to move int<->float values
   777  // Common with math Float64bits, Float64frombits
   778  (MOVDload [off] {sym} ptr (FMOVDstore [off] {sym} ptr x _)) -> (MFVSRD x)
   779  (FMOVDload [off] {sym} ptr (MOVDstore [off] {sym} ptr x _)) -> (MTVSRD x)
   780  
   781  (FMOVDstore [off] {sym} ptr (MTVSRD x) mem) -> (MOVDstore [off] {sym} ptr x mem)
   782  (MOVDstore [off] {sym} ptr (MFVSRD x) mem) -> (FMOVDstore [off] {sym} ptr x mem)
   783  
   784  (MTVSRD (MOVDconst [c])) -> (FMOVDconst [c])
   785  (MFVSRD (FMOVDconst [c])) -> (MOVDconst [c])
   786  
   787  (MTVSRD x:(MOVDload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (FMOVDload [off] {sym} ptr mem)
   788  (MFVSRD x:(FMOVDload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVDload [off] {sym} ptr mem)
   789  
   790  // Fold offsets for stores.
   791  (MOVDstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(off1+off2) -> (MOVDstore [off1+off2] {sym} x val mem)
   792  (MOVWstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(off1+off2) -> (MOVWstore [off1+off2] {sym} x val mem)
   793  (MOVHstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(off1+off2) -> (MOVHstore [off1+off2] {sym} x val mem)
   794  (MOVBstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(off1+off2) -> (MOVBstore [off1+off2] {sym} x val mem)
   795  
   796  (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is16Bit(off1+off2) -> (FMOVSstore [off1+off2] {sym} ptr val mem)
   797  (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is16Bit(off1+off2) -> (FMOVDstore [off1+off2] {sym} ptr val mem)
   798  
   799  // Fold address into load/store.
   800  // The assembler needs to generate several instructions and use
   801  // temp register for accessing global, and each time it will reload
   802  // the temp register. So don't fold address of global, unless there
   803  // is only one use.
   804  (MOVBstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
   805  	&& (ptr.Op != OpSB || p.Uses == 1) ->
   806          (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   807  (MOVHstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
   808  	&& (ptr.Op != OpSB || p.Uses == 1) ->
   809          (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   810  (MOVWstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
   811  	&& (ptr.Op != OpSB || p.Uses == 1) ->
   812          (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   813  (MOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
   814  	&& (ptr.Op != OpSB || p.Uses == 1) ->
   815          (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   816  
   817  (FMOVSstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
   818  	&& (ptr.Op != OpSB || p.Uses == 1) ->
   819          (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   820  (FMOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
   821  	&& (ptr.Op != OpSB || p.Uses == 1) ->
   822          (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   823  
   824  (MOVBZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
   825  	&& (ptr.Op != OpSB || p.Uses == 1) ->
   826          (MOVBZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   827  (MOVHload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
   828  	&& (ptr.Op != OpSB || p.Uses == 1) ->
   829          (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   830  (MOVHZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
   831  	&& (ptr.Op != OpSB || p.Uses == 1) ->
   832          (MOVHZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   833  (MOVWload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
   834  	&& (ptr.Op != OpSB || p.Uses == 1) ->
   835          (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   836  (MOVWZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
   837  	&& (ptr.Op != OpSB || p.Uses == 1) ->
   838          (MOVWZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   839  (MOVDload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
   840  	&& (ptr.Op != OpSB || p.Uses == 1) ->
   841          (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   842  (FMOVSload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
   843  	&& (ptr.Op != OpSB || p.Uses == 1) ->
   844          (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   845  (FMOVDload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
   846  	&& (ptr.Op != OpSB || p.Uses == 1) ->
   847          (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   848  
   849  // Fold offsets for loads.
   850  (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) && is16Bit(off1+off2) -> (FMOVSload [off1+off2] {sym} ptr mem)
   851  (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is16Bit(off1+off2) -> (FMOVDload [off1+off2] {sym} ptr mem)
   852  
   853  (MOVDload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVDload [off1+off2] {sym} x mem)
   854  (MOVWload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVWload [off1+off2] {sym} x mem)
   855  (MOVWZload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVWZload [off1+off2] {sym} x mem)
   856  (MOVHload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVHload [off1+off2] {sym} x mem)
   857  (MOVHZload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVHZload [off1+off2] {sym} x mem)
   858  (MOVBZload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVBZload [off1+off2] {sym} x mem)
   859  
   860  // Determine load + addressing that can be done as a register indexed load
   861  (MOV(D|W|WZ|H|HZ|BZ)load [0] {sym} p:(ADD ptr idx) mem) && sym == nil && p.Uses == 1 -> (MOV(D|W|WZ|H|HZ|BZ)loadidx ptr idx mem)
   862  
   863  // Determine indexed loads with constant values that can be done without index
   864  (MOV(D|W|WZ|H|HZ|BZ)loadidx ptr (MOVDconst [c]) mem) && is16Bit(c) -> (MOV(D|W|WZ|H|HZ|BZ)load [c] ptr mem)
   865  (MOV(D|W|WZ|H|HZ|BZ)loadidx (MOVDconst [c]) ptr mem) && is16Bit(c) -> (MOV(D|W|WZ|H|HZ|BZ)load [c] ptr mem)
   866  
   867  
   868  // Store of zero -> storezero
   869  (MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) -> (MOVDstorezero [off] {sym} ptr mem)
   870  (MOVWstore [off] {sym} ptr (MOVDconst [0]) mem) -> (MOVWstorezero [off] {sym} ptr mem)
   871  (MOVHstore [off] {sym} ptr (MOVDconst [0]) mem) -> (MOVHstorezero [off] {sym} ptr mem)
   872  (MOVBstore [off] {sym} ptr (MOVDconst [0]) mem) -> (MOVBstorezero [off] {sym} ptr mem)
   873  
   874  // Fold offsets for storezero
   875  (MOVDstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) ->
   876      (MOVDstorezero [off1+off2] {sym} x mem)
   877  (MOVWstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) ->
   878      (MOVWstorezero [off1+off2] {sym} x mem)
   879  (MOVHstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) ->
   880      (MOVHstorezero [off1+off2] {sym} x mem)
   881  (MOVBstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) ->
   882      (MOVBstorezero [off1+off2] {sym} x mem)
   883  
   884  // Stores with addressing that can be done as indexed stores
   885  (MOV(D|W|H|B)store [off] {sym} p:(ADD ptr idx) val mem) && off == 0 && sym == nil && p.Uses == 1 -> (MOV(D|W|H|B)storeidx ptr idx val mem)
   886  
   887  // Stores with constant index values can be done without indexed instructions
   888  (MOV(D|W|H|B)storeidx ptr (MOVDconst [c]) val mem) && is16Bit(c) -> (MOV(D|W|H|B)store [c] ptr val mem)
   889  (MOV(D|W|H|B)storeidx (MOVDconst [c]) ptr val mem) && is16Bit(c) -> (MOV(D|W|H|B)store [c] ptr val mem)
   890  
   891  // Fold symbols into storezero
   892  (MOVDstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2)
   893  	&& (x.Op != OpSB || p.Uses == 1) ->
   894      (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
   895  (MOVWstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2)
   896  	&& (x.Op != OpSB || p.Uses == 1) ->
   897      (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
   898  (MOVHstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2)
   899  	&& (x.Op != OpSB || p.Uses == 1) ->
   900      (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
   901  (MOVBstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2)
   902  	&& (x.Op != OpSB || p.Uses == 1) ->
   903      (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
   904  
   905  // atomic intrinsics
   906  (AtomicLoad(32|64|Ptr)  ptr mem) -> (LoweredAtomicLoad(32|64|Ptr) [1] ptr mem)
   907  (AtomicLoadAcq32        ptr mem) -> (LoweredAtomicLoad32 [0] ptr mem)
   908  
   909  (AtomicStore(32|64)      ptr val mem) -> (LoweredAtomicStore(32|64) [1] ptr val mem)
   910  (AtomicStoreRel32        ptr val mem) -> (LoweredAtomicStore32 [0] ptr val mem)
   911  //(AtomicStorePtrNoWB ptr val mem) -> (STLR  ptr val mem)
   912  
   913  (AtomicExchange(32|64) ptr val mem) -> (LoweredAtomicExchange(32|64) ptr val mem)
   914  
   915  (AtomicAdd(32|64) ptr val mem) -> (LoweredAtomicAdd(32|64) ptr val mem)
   916  
   917  (AtomicCompareAndSwap(32|64) ptr old new_ mem) -> (LoweredAtomicCas(32|64) [1] ptr old new_ mem)
   918  (AtomicCompareAndSwapRel32   ptr old new_ mem) -> (LoweredAtomicCas32 [0] ptr old new_ mem)
   919  
   920  (AtomicAnd8 ptr val mem) -> (LoweredAtomicAnd8 ptr val mem)
   921  (AtomicOr8  ptr val mem) -> (LoweredAtomicOr8  ptr val mem)
   922  
   923  // Lowering extension
   924  // Note: we always extend to 64 bits even though some ops don't need that many result bits.
   925  (SignExt8to(16|32|64)  x) -> (MOVBreg x)
   926  (SignExt16to(32|64) x) -> (MOVHreg x)
   927  (SignExt32to64 x) -> (MOVWreg x)
   928  
   929  (ZeroExt8to(16|32|64)  x) -> (MOVBZreg x)
   930  (ZeroExt16to(32|64) x) -> (MOVHZreg x)
   931  (ZeroExt32to64 x) -> (MOVWZreg x)
   932  
   933  (Trunc(16|32|64)to8 <t> x) && isSigned(t) -> (MOVBreg x)
   934  (Trunc(16|32|64)to8  x) -> (MOVBZreg x)
   935  (Trunc(32|64)to16 <t> x) && isSigned(t) -> (MOVHreg x)
   936  (Trunc(32|64)to16 x) -> (MOVHZreg x)
   937  (Trunc64to32 <t> x) && isSigned(t) -> (MOVWreg x)
   938  (Trunc64to32 x) -> (MOVWZreg x)
   939  
   940  (Slicemask <t> x) -> (SRADconst (NEG <t> x) [63])
   941  
   942  // Note that MOV??reg returns a 64-bit int, x is not necessarily that wide
   943  // This may interact with other patterns in the future. (Compare with arm64)
   944  (MOV(B|H|W)Zreg x:(MOVBZload _ _)) -> x
   945  (MOV(B|H|W)Zreg x:(MOVBZloadidx _ _ _)) -> x
   946  (MOV(H|W)Zreg x:(MOVHZload _ _)) -> x
   947  (MOV(H|W)Zreg x:(MOVHZloadidx _ _ _)) -> x
   948  (MOV(H|W)reg x:(MOVHload _ _)) -> x
   949  (MOV(H|W)reg x:(MOVHloadidx _ _ _)) -> x
   950  (MOVWZreg x:(MOVWZload _ _)) -> x
   951  (MOVWZreg x:(MOVWZloadidx _ _ _)) -> x
   952  (MOVWreg x:(MOVWload _ _)) -> x
   953  (MOVWreg x:(MOVWloadidx _ _ _)) -> x
   954  
   955  // don't extend if argument is already extended
   956  (MOVBreg x:(Arg <t>)) && is8BitInt(t) && isSigned(t) -> x
   957  (MOVBZreg x:(Arg <t>)) && is8BitInt(t) && !isSigned(t) -> x
   958  (MOVHreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t)) && isSigned(t) -> x
   959  (MOVHZreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t)) && !isSigned(t) -> x
   960  (MOVWreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && isSigned(t) -> x
   961  (MOVWZreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && !isSigned(t) -> x
   962  
   963  (MOVBZreg (MOVDconst [c]))  -> (MOVDconst [int64(uint8(c))])
   964  (MOVBreg (MOVDconst [c]))  -> (MOVDconst [int64(int8(c))])
   965  (MOVHZreg (MOVDconst [c]))  -> (MOVDconst [int64(uint16(c))])
   966  (MOVHreg (MOVDconst [c]))  -> (MOVDconst [int64(int16(c))])
   967  (MOVWreg (MOVDconst [c])) -> (MOVDconst [int64(int32(c))])
   968  (MOVWZreg (MOVDconst [c])) -> (MOVDconst [int64(uint32(c))])
   969  
   970  
   971  // Lose widening ops fed to stores
   972  (MOVBstore [off] {sym} ptr (MOV(B|BZ|H|HZ|W|WZ)reg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
   973  (MOVHstore [off] {sym} ptr (MOV(H|HZ|W|WZ)reg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
   974  (MOVWstore [off] {sym} ptr (MOV(W|WZ)reg x) mem) -> (MOVWstore [off] {sym} ptr x mem)
   975  (MOVBstore [off] {sym} ptr (SRWconst (MOV(H|HZ)reg x) [c]) mem) && c <= 8 -> (MOVBstore [off] {sym} ptr (SRWconst <typ.UInt32> x [c]) mem)
   976  (MOVBstore [off] {sym} ptr (SRWconst (MOV(W|WZ)reg x) [c]) mem) && c <= 24 -> (MOVBstore [off] {sym} ptr (SRWconst <typ.UInt32> x [c]) mem)
   977  (MOVBstoreidx [off] {sym} ptr idx (MOV(B|BZ|H|HZ|W|WZ)reg x) mem) -> (MOVBstoreidx [off] {sym} ptr idx x mem)
   978  (MOVHstoreidx [off] {sym} ptr idx (MOV(H|HZ|W|WZ)reg x) mem) -> (MOVHstoreidx [off] {sym} ptr idx x mem)
   979  (MOVWstoreidx [off] {sym} ptr idx (MOV(W|WZ)reg x) mem) -> (MOVWstoreidx [off] {sym} ptr idx x mem)
   980  (MOVBstoreidx [off] {sym} ptr idx (SRWconst (MOV(H|HZ)reg x) [c]) mem) && c <= 8 -> (MOVBstoreidx [off] {sym} ptr idx (SRWconst <typ.UInt32> x [c]) mem)
   981  (MOVBstoreidx [off] {sym} ptr idx (SRWconst (MOV(W|WZ)reg x) [c]) mem) && c <= 24 -> (MOVBstoreidx [off] {sym} ptr idx (SRWconst <typ.UInt32> x [c]) mem)
   982  (MOVHBRstore {sym} ptr (MOV(H|HZ|W|WZ)reg x) mem) -> (MOVHBRstore {sym} ptr x mem)
   983  (MOVWBRstore {sym} ptr (MOV(W|WZ)reg x) mem) -> (MOVWBRstore {sym} ptr x mem)
   984  
   985  // Lose W-widening ops fed to compare-W
   986  (CMPW x (MOVWreg y)) -> (CMPW x y)
   987  (CMPW (MOVWreg x) y) -> (CMPW x y)
   988  (CMPWU x (MOVWZreg y)) -> (CMPWU x y)
   989  (CMPWU (MOVWZreg x) y) -> (CMPWU x y)
   990  
   991  (CMP x (MOVDconst [c])) && is16Bit(c) -> (CMPconst x [c])
   992  (CMP (MOVDconst [c]) y) && is16Bit(c) -> (InvertFlags (CMPconst y [c]))
   993  (CMPW x (MOVDconst [c])) && is16Bit(c) -> (CMPWconst x [c])
   994  (CMPW (MOVDconst [c]) y) && is16Bit(c) -> (InvertFlags (CMPWconst y [c]))
   995  
   996  (CMPU x (MOVDconst [c])) && isU16Bit(c) -> (CMPUconst x [c])
   997  (CMPU (MOVDconst [c]) y) && isU16Bit(c) -> (InvertFlags (CMPUconst y [c]))
   998  (CMPWU x (MOVDconst [c])) && isU16Bit(c) -> (CMPWUconst x [c])
   999  (CMPWU (MOVDconst [c]) y) && isU16Bit(c) -> (InvertFlags (CMPWUconst y [c]))
  1000  
  1001  // A particular pattern seen in cgo code:
  1002  (AND (MOVDconst [c]) x:(MOVBZload _ _)) -> (ANDconst [c&0xFF] x)
  1003  (AND x:(MOVBZload _ _) (MOVDconst [c])) -> (ANDconst [c&0xFF] x)
  1004  
  1005  // floating point negative abs
  1006  (FNEG (FABS x)) -> (FNABS x)
  1007  (FNEG (FNABS x)) -> (FABS x)
  1008  
  1009  // floating-point fused multiply-add/sub
  1010  (FADD (FMUL x y) z) -> (FMADD x y z)
  1011  (FSUB (FMUL x y) z) -> (FMSUB x y z)
  1012  (FADDS (FMULS x y) z) -> (FMADDS x y z)
  1013  (FSUBS (FMULS x y) z) -> (FMSUBS x y z)
  1014  
  1015  
  1016  // The following statements are found in encoding/binary functions UintXX (load) and PutUintXX (store)
  1017  // and convert the statements in these functions from multiple single byte loads or stores to
  1018  // the single largest possible load or store.
  1019  // Some are marked big or little endian based on the order in which the bytes are loaded or stored,
  1020  // not on the ordering of the machine. These are intended for little endian machines.
  1021  // To implement for big endian machines, most rules would have to be duplicated but the
  1022  // resulting rule would be reversed, i. e., MOVHZload on little endian would be MOVHBRload on big endian
  1023  // and vice versa.
  1024  // b[0] | b[1]<<8 -> load 16-bit Little endian
  1025  (OR <t> x0:(MOVBZload [i0] {s} p mem)
  1026  	o1:(SL(W|D)const x1:(MOVBZload [i1] {s} p mem) [8]))
  1027  	&& !config.BigEndian
  1028  	&& i1 == i0+1
  1029  	&& x0.Uses ==1 && x1.Uses == 1
  1030  	&& o1.Uses == 1
  1031  	&& mergePoint(b, x0, x1) != nil
  1032  	&& clobber(x0) && clobber(x1) && clobber(o1)
  1033  	 -> @mergePoint(b,x0,x1) (MOVHZload <t> {s} [i0] p mem)
  1034  
  1035  // b[0]<<8 | b[1] -> load 16-bit Big endian on Little endian arch.
  1036  // Use byte-reverse indexed load for 2 bytes.
  1037  (OR <t> x0:(MOVBZload [i1] {s} p mem)
  1038  	o1:(SL(W|D)const x1:(MOVBZload [i0] {s} p mem) [8]))
  1039  	&& !config.BigEndian
  1040  	&& i1 == i0+1
  1041  	&& x0.Uses ==1 && x1.Uses == 1
  1042  	&& o1.Uses == 1
  1043  	&& mergePoint(b, x0, x1) != nil
  1044  	&& clobber(x0) && clobber(x1) && clobber(o1)
  1045  	  -> @mergePoint(b,x0,x1) (MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
  1046  
  1047  // b[0]<<n+8 | b[1]<<n -> load 16-bit Big endian (where n%8== 0)
  1048  // Use byte-reverse indexed load for 2 bytes,
  1049  // then shift left to the correct position. Used to match subrules
  1050  // from longer rules.
  1051  (OR <t> s0:(SL(W|D)const x0:(MOVBZload [i1] {s} p mem) [n1])
  1052  	s1:(SL(W|D)const x1:(MOVBZload [i0] {s} p mem) [n2]))
  1053  	&& !config.BigEndian
  1054  	&& i1 == i0+1
  1055  	&& n1%8 == 0
  1056  	&& n2 == n1+8
  1057  	&& x0.Uses == 1 && x1.Uses == 1
  1058  	&& s0.Uses == 1 && s1.Uses == 1
  1059  	&& mergePoint(b, x0, x1) != nil
  1060  	&& clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1)
  1061  	  -> @mergePoint(b,x0,x1) (SLDconst <t> (MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [n1])
  1062  
  1063  // b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 -> load 32-bit Little endian
  1064  // Use byte-reverse indexed load for 4 bytes.
  1065  (OR <t> s1:(SL(W|D)const x2:(MOVBZload [i3] {s} p mem) [24])
  1066  	o0:(OR <t> s0:(SL(W|D)const x1:(MOVBZload [i2] {s} p mem) [16])
  1067  	x0:(MOVHZload [i0] {s} p mem)))
  1068  	&& !config.BigEndian
  1069  	&& i2 == i0+2
  1070  	&& i3 == i0+3
  1071  	&& x0.Uses ==1 && x1.Uses == 1 && x2.Uses == 1
  1072  	&& o0.Uses == 1
  1073  	&& s0.Uses == 1 && s1.Uses == 1
  1074  	&& mergePoint(b, x0, x1, x2) != nil
  1075  	&& clobber(x0) && clobber(x1) && clobber(x2)
  1076  	&& clobber(s0) && clobber(s1)
  1077  	&& clobber(o0)
  1078  	 -> @mergePoint(b,x0,x1,x2) (MOVWZload <t> {s} [i0] p mem)
  1079  
  1080  // b[0]<<24 | b[1]<<16 | b[2]<<8 | b[3] -> load 32-bit Big endian order on Little endian arch
  1081  // Use byte-reverse indexed load for 4 bytes with computed address.
  1082  // Could be used to match subrules of a longer rule.
  1083  (OR <t> s1:(SL(W|D)const x2:(MOVBZload [i0] {s} p mem) [24])
  1084  	o0:(OR <t> s0:(SL(W|D)const x1:(MOVBZload [i1] {s} p mem) [16])
  1085  	x0:(MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i2] {s} p) mem)))
  1086  	&& !config.BigEndian
  1087  	&& i1 == i0+1
  1088  	&& i2 == i0+2
  1089  	&& x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
  1090  	&& o0.Uses == 1
  1091  	&& s0.Uses == 1 && s1.Uses == 1
  1092  	&& mergePoint(b, x0, x1, x2) != nil
  1093  	&& clobber(x0) && clobber(x1) && clobber(x2)
  1094  	&& clobber(s0) && clobber(s1)
  1095  	&& clobber(o0)
  1096  	  -> @mergePoint(b,x0,x1,x2) (MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
  1097  
  1098  // b[3] | b[2]<<8 | b[1]<<16 | b[0]<<24 -> load 32-bit Big endian order on Little endian arch
  1099  // Use byte-reverse indexed load for 4 bytes with computed address.
  1100  // Could be used to match subrules of a longer rule.
  1101  (OR <t> x0:(MOVBZload [i3] {s} p mem)
  1102  	o0:(OR <t> s0:(SL(W|D)const x1:(MOVBZload [i2] {s} p mem) [8])
  1103  	s1:(SL(W|D)const x2:(MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [16])))
  1104  	&& !config.BigEndian
  1105  	&& i2 == i0+2
  1106  	&& i3 == i0+3
  1107  	&& x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
  1108  	&& o0.Uses == 1
  1109  	&& s0.Uses == 1 && s1.Uses == 1
  1110  	&& mergePoint(b, x0, x1, x2) != nil
  1111  	&& clobber(x0) && clobber(x1) && clobber(x2)
  1112  	&& clobber(s0) && clobber(s1)
  1113  	&& clobber(o0)
  1114  	  -> @mergePoint(b,x0,x1,x2) (MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
  1115  
  1116  // b[0]<<56 | b[1]<<48 | b[2]<<40 | b[3]<<32 -> load 32-bit Big endian order on Little endian arch
  1117  // Use byte-reverse indexed load to for 4 bytes with computed address.
  1118  // Used to match longer rules.
  1119  (OR <t> s2:(SLDconst x2:(MOVBZload [i3] {s} p mem) [32])
  1120  	o0:(OR <t> s1:(SLDconst x1:(MOVBZload [i2] {s} p mem) [40])
  1121  	s0:(SLDconst x0:(MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [48])))
  1122  	&& !config.BigEndian
  1123  	&& i2 == i0+2
  1124  	&& i3 == i0+3
  1125  	&& x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
  1126  	&& o0.Uses == 1
  1127  	&& s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1
  1128  	&& mergePoint(b, x0, x1, x2) != nil
  1129  	&& clobber(x0) && clobber(x1) && clobber(x2)
  1130  	&& clobber(s0) && clobber(s1) && clobber(s2)
  1131  	&& clobber(o0)
  1132  	  -> @mergePoint(b,x0,x1,x2) (SLDconst <t> (MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [32])
  1133  
  1134  // b[3]<<32 | b[2]<<40 | b[1]<<48 | b[0]<<56 -> load 32-bit Big endian order on Little endian arch
  1135  // Use byte-reverse indexed load for 4 bytes with constant address.
  1136  // Used to match longer rules.
  1137  (OR <t> s2:(SLDconst x2:(MOVBZload [i0] {s} p mem) [56])
  1138          o0:(OR <t> s1:(SLDconst x1:(MOVBZload [i1] {s} p mem) [48])
  1139          s0:(SLDconst x0:(MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i2] {s} p) mem) [32])))
  1140          && !config.BigEndian
  1141          && i1 == i0+1
  1142          && i2 == i0+2
  1143          && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
  1144          && o0.Uses == 1
  1145          && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1
  1146          && mergePoint(b, x0, x1, x2) != nil
  1147          && clobber(x0) && clobber(x1) && clobber(x2)
  1148          && clobber(s0) && clobber(s1) && clobber(s2)
  1149          && clobber(o0)
  1150            -> @mergePoint(b,x0,x1,x2) (SLDconst <t> (MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [32])
  1151  
  1152  // b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 | b[4] <<32 | b[5]<<40 | b[6]<<48 | b[7]<<56 -> load 64-bit Little endian
  1153  // Rules with commutative ops and many operands will result in extremely large functions in rewritePPC64,
  1154  // so matching shorter previously defined subrules is important.
  1155  // Offset must be multiple of 4 for MOVD
  1156  (OR <t> s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])
  1157  	o5:(OR <t> s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48])
  1158  	o4:(OR <t> s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])
  1159  	o3:(OR <t> s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])
  1160  	x0:(MOVWZload {s} [i0] p mem)))))
  1161  	&& !config.BigEndian
  1162  	&& i0%4 == 0
  1163  	&& i4 == i0+4
  1164  	&& i5 == i0+5
  1165  	&& i6 == i0+6
  1166  	&& i7 == i0+7
  1167  	&& x0.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1
  1168  	&& o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1
  1169  	&& s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1
  1170  	&& mergePoint(b, x0, x4, x5, x6, x7) != nil
  1171  	&& clobber(x0) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7)
  1172  	&& clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6)
  1173  	&& clobber(o3) && clobber(o4) && clobber(o5)
  1174  	  -> @mergePoint(b,x0,x4,x5,x6,x7) (MOVDload <t> {s} [i0] p mem)
  1175  
  1176  // b[7] | b[6]<<8 | b[5]<<16 | b[4]<<24 | b[3]<<32 | b[2]<<40 | b[1]<<48 | b[0]<<56 load 64-bit Big endian ordered bytes on Little endian arch
  1177  // Use byte-reverse indexed load of 8 bytes.
  1178  // Rules with commutative ops and many operands can result in extremely large functions in rewritePPC64,
  1179  // so matching shorter previously defined subrules is important.
  1180  (OR <t> s0:(SLDconst x0:(MOVBZload [i0] {s} p mem) [56])
  1181  	o0:(OR <t> s1:(SLDconst x1:(MOVBZload [i1] {s} p mem) [48])
  1182  	o1:(OR <t> s2:(SLDconst x2:(MOVBZload [i2] {s} p mem) [40])
  1183  	o2:(OR <t> s3:(SLDconst x3:(MOVBZload [i3] {s} p mem) [32])
  1184  	x4:(MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i4] p) mem)))))
  1185  	&& !config.BigEndian
  1186  	&& i1 == i0+1
  1187  	&& i2 == i0+2
  1188  	&& i3 == i0+3
  1189  	&& i4 == i0+4
  1190  	&& x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1
  1191  	&& o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1
  1192  	&& s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1
  1193  	&& mergePoint(b, x0, x1, x2, x3, x4) != nil
  1194  	&& clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4)
  1195  	&& clobber(o0) && clobber(o1) && clobber(o2)
  1196  	&& clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3)
  1197  	  -> @mergePoint(b,x0,x1,x2,x3,x4) (MOVDBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
  1198  
  1199  // b[0]<<56 | b[1]<<48 | b[2]<<40 | b[3]<<32 | b[4]<<24 | b[5]<<16 | b[6]<<8 | b[7] -> load 64-bit Big endian ordered bytes on Little endian arch
  1200  // Use byte-reverse indexed load of 8 bytes.
  1201  // Rules with commutative ops and many operands can result in extremely large functions in rewritePPC64,
  1202  // so matching shorter previously defined subrules is important.
  1203  (OR <t> x7:(MOVBZload [i7] {s} p mem)
  1204  	o5:(OR <t> s6:(SLDconst x6:(MOVBZload [i6] {s} p mem) [8])
  1205  	o4:(OR <t> s5:(SLDconst x5:(MOVBZload [i5] {s} p mem) [16])
  1206  	o3:(OR <t> s4:(SLDconst x4:(MOVBZload [i4] {s} p mem) [24])
  1207  	s0:(SL(W|D)const x3:(MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [32])))))
  1208  	&& !config.BigEndian
  1209  	&& i4 == i0+4
  1210  	&& i5 == i0+5
  1211  	&& i6 == i0+6
  1212  	&& i7 == i0+7
  1213  	&& x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1
  1214  	&& o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1
  1215  	&& s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1
  1216  	&& mergePoint(b, x3, x4, x5, x6, x7) != nil
  1217  	&& clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7)
  1218  	&& clobber(o3) && clobber(o4) && clobber(o5)
  1219  	&& clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6)
  1220  	-> @mergePoint(b,x3,x4,x5,x6,x7) (MOVDBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
  1221  
  1222  // 2 byte store Little endian as in:
  1223  //      b[0] = byte(v >> 16)
  1224  //      b[1] = byte(v >> 24)
  1225  // Added for use in matching longer rules.
  1226  (MOVBstore [i1] {s} p (SR(W|D)const w [24])
  1227          x0:(MOVBstore [i0] {s} p (SR(W|D)const w [16]) mem))
  1228          && !config.BigEndian
  1229          && x0.Uses == 1
  1230          && i1 == i0+1
  1231          && clobber(x0)
  1232            -> (MOVHstore [i0] {s} p (SRWconst <typ.UInt16> w [16]) mem)
  1233  
  1234  // 2 byte store Little endian as in:
  1235  //      b[0] = byte(v)
  1236  //      b[1] = byte(v >> 8)
  1237  (MOVBstore [i1] {s} p (SR(W|D)const w [8])
  1238  	x0:(MOVBstore [i0] {s} p w mem))
  1239  	&& !config.BigEndian
  1240  	&& x0.Uses == 1
  1241  	&& i1 == i0+1
  1242  	&& clobber(x0)
  1243  	  -> (MOVHstore [i0] {s} p w mem)
  1244  
  1245  // 4 byte store Little endian as in:
  1246  //     b[0:1] = uint16(v)
  1247  //     b[2:3] = uint16(v >> 16)
  1248  (MOVHstore [i1] {s} p (SR(W|D)const w [16])
  1249  	x0:(MOVHstore [i0] {s} p w mem))
  1250  	&& !config.BigEndian
  1251  	&& x0.Uses == 1
  1252  	&& i1 == i0+2
  1253  	&& clobber(x0)
  1254  	  -> (MOVWstore [i0] {s} p w mem)
  1255  
  1256  // 4 byte store Big endian as in:
  1257  //     b[0] = byte(v >> 24)
  1258  //     b[1] = byte(v >> 16)
  1259  //     b[2] = byte(v >> 8)
  1260  //     b[3] = byte(v)
  1261  // Use byte-reverse indexed 4 byte store.
  1262  (MOVBstore [i3] {s} p w
  1263  	x0:(MOVBstore [i2] {s} p (SRWconst w [8])
  1264  	x1:(MOVBstore [i1] {s} p (SRWconst w [16])
  1265  	x2:(MOVBstore [i0] {s} p (SRWconst w [24]) mem))))
  1266  	&& !config.BigEndian
  1267  	&& x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
  1268  	&& i1 == i0+1 && i2 == i0+2 && i3 == i0+3
  1269  	&& clobber(x0) && clobber(x1) && clobber(x2)
  1270  	  -> (MOVWBRstore (MOVDaddr <typ.Uintptr> [i0] {s} p) w mem)
  1271  
  1272  // The 2 byte store appears after the 4 byte store so that the
  1273  // match for the 2 byte store is not done first.
  1274  // If the 4 byte store is based on the 2 byte store then there are 
  1275  // variations on the MOVDaddr subrule that would require additional
  1276  // rules to be written.
  1277  
  1278  // 2 byte store Big endian as in:
  1279  //      b[0] = byte(v >> 8)
  1280  //      b[1] = byte(v)
  1281  (MOVBstore [i1] {s} p w x0:(MOVBstore [i0] {s} p (SRWconst w [8]) mem))
  1282  	&& !config.BigEndian
  1283  	&& x0.Uses == 1
  1284  	&& i1 == i0+1
  1285  	&& clobber(x0)
  1286  	  -> (MOVHBRstore (MOVDaddr <typ.Uintptr> [i0] {s} p) w mem)
  1287  
  1288  // 8 byte store Little endian as in:
  1289  //	b[0] = byte(v)
  1290  //	b[1] = byte(v >> 8)
  1291  //	b[2] = byte(v >> 16)
  1292  //	b[3] = byte(v >> 24)
  1293  //	b[4] = byte(v >> 32)
  1294  //	b[5] = byte(v >> 40)
  1295  //	b[6] = byte(v >> 48)
  1296  //	b[7] = byte(v >> 56)
  1297  // Built on previously defined rules
  1298  // Offset must be multiple of 4 for MOVDstore
  1299  (MOVBstore [i7] {s} p (SRDconst w [56])
  1300  	x0:(MOVBstore [i6] {s} p (SRDconst w [48])
  1301  	x1:(MOVBstore [i5] {s} p (SRDconst w [40])
  1302  	x2:(MOVBstore [i4] {s} p (SRDconst w [32])
  1303  	x3:(MOVWstore [i0] {s} p w mem)))))
  1304  	&& !config.BigEndian
  1305  	&& i0%4 == 0
  1306  	&& x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1
  1307  	&& i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7
  1308  	&& clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3)
  1309  	  -> (MOVDstore [i0] {s} p w mem)
  1310  
  1311  // 8 byte store Big endian as in:
  1312  //      b[0] = byte(v >> 56)
  1313  //      b[1] = byte(v >> 48)
  1314  //      b[2] = byte(v >> 40)
  1315  //      b[3] = byte(v >> 32)
  1316  //      b[4] = byte(v >> 24)
  1317  //      b[5] = byte(v >> 16)
  1318  //      b[6] = byte(v >> 8)
  1319  //      b[7] = byte(v)
  1320  // Use byte-reverse indexed 8 byte store.
  1321  (MOVBstore [i7] {s} p w
  1322          x0:(MOVBstore [i6] {s} p (SRDconst w [8])
  1323          x1:(MOVBstore [i5] {s} p (SRDconst w [16])
  1324          x2:(MOVBstore [i4] {s} p (SRDconst w [24])
  1325          x3:(MOVBstore [i3] {s} p (SRDconst w [32])
  1326          x4:(MOVBstore [i2] {s} p (SRDconst w [40])
  1327          x5:(MOVBstore [i1] {s} p (SRDconst w [48])
  1328          x6:(MOVBstore [i0] {s} p (SRDconst w [56]) mem))))))))
  1329          && !config.BigEndian
  1330          && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1
  1331          && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7
  1332          && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)
  1333            -> (MOVDBRstore (MOVDaddr <typ.Uintptr> [i0] {s} p) w mem)