github.com/bir3/gocompiler@v0.3.205/src/cmd/compile/internal/ssa/_gen/PPC64.rules (about)

     1  // Copyright 2016 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Lowering arithmetic
     6  (Add(Ptr|64|32|16|8) ...) => (ADD ...)
     7  (Add64F ...) => (FADD ...)
     8  (Add32F ...) => (FADDS ...)
     9  
    10  (Sub(Ptr|64|32|16|8) ...) => (SUB ...)
    11  (Sub32F ...) => (FSUBS ...)
    12  (Sub64F ...) => (FSUB ...)
    13  
    14  // Combine 64 bit integer multiply and adds
    15  (ADD l:(MULLD x y) z) && buildcfg.GOPPC64 >= 9 && l.Uses == 1 && clobber(l) => (MADDLD x y z)
    16  
    17  (Mod16 x y) => (Mod32 (SignExt16to32 x) (SignExt16to32 y))
    18  (Mod16u x y) => (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y))
    19  (Mod8 x y) => (Mod32 (SignExt8to32 x) (SignExt8to32 y))
    20  (Mod8u x y) => (Mod32u (ZeroExt8to32 x) (ZeroExt8to32 y))
    21  (Mod64 x y) && buildcfg.GOPPC64 >=9 => (MODSD x y)
    22  (Mod64 x y) && buildcfg.GOPPC64 <=8 => (SUB x (MULLD y (DIVD x y)))
    23  (Mod64u x y) && buildcfg.GOPPC64 >= 9 => (MODUD x y)
    24  (Mod64u x y) && buildcfg.GOPPC64 <= 8 => (SUB x (MULLD y (DIVDU x y)))
    25  (Mod32 x y) && buildcfg.GOPPC64 >= 9 => (MODSW x y)
    26  (Mod32 x y) && buildcfg.GOPPC64 <= 8 => (SUB x (MULLW y (DIVW x y)))
    27  (Mod32u x y) && buildcfg.GOPPC64 >= 9 => (MODUW x y)
    28  (Mod32u x y) && buildcfg.GOPPC64 <= 8 => (SUB x (MULLW y (DIVWU x y)))
    29  
    30  // (x + y) / 2 with x>=y => (x - y) / 2 + y
    31  (Avg64u <t> x y) => (ADD (SRDconst <t> (SUB <t> x y) [1]) y)
    32  
    33  (Mul64 ...) => (MULLD ...)
    34  (Mul(32|16|8) ...) => (MULLW ...)
    35  (Select0 (Mul64uhilo x y)) => (MULHDU x y)
    36  (Select1 (Mul64uhilo x y)) => (MULLD x y)
    37  
    38  (Div64 [false] x y) => (DIVD x y)
    39  (Div64u ...) => (DIVDU ...)
    40  (Div32 [false] x y) => (DIVW x y)
    41  (Div32u ...) => (DIVWU ...)
    42  (Div16 [false]  x y) => (DIVW  (SignExt16to32 x) (SignExt16to32 y))
    43  (Div16u x y) => (DIVWU (ZeroExt16to32 x) (ZeroExt16to32 y))
    44  (Div8 x y) => (DIVW  (SignExt8to32 x) (SignExt8to32 y))
    45  (Div8u x y) => (DIVWU (ZeroExt8to32 x) (ZeroExt8to32 y))
    46  
    47  (Hmul(64|64u|32|32u) ...) => (MULH(D|DU|W|WU) ...)
    48  
    49  (Mul(32|64)F ...) => ((FMULS|FMUL) ...)
    50  
    51  (Div(32|64)F ...) => ((FDIVS|FDIV) ...)
    52  
    53  // Lowering float <=> int
    54  (Cvt32to(32|64)F x) => ((FCFIDS|FCFID) (MTVSRD (SignExt32to64 x)))
    55  (Cvt64to(32|64)F x) => ((FCFIDS|FCFID) (MTVSRD x))
    56  
    57  (Cvt32Fto(32|64) x) => (MFVSRD (FCTI(W|D)Z x))
    58  (Cvt64Fto(32|64) x) => (MFVSRD (FCTI(W|D)Z x))
    59  
    60  (Cvt32Fto64F ...) => (Copy ...) // Note v will have the wrong type for patterns dependent on Float32/Float64
    61  (Cvt64Fto32F ...) => (FRSP ...)
    62  
    63  (CvtBoolToUint8 ...) => (Copy ...)
    64  
    65  (Round(32|64)F ...) => (LoweredRound(32|64)F ...)
    66  
    67  (Sqrt ...) => (FSQRT ...)
    68  (Sqrt32 ...) => (FSQRTS ...)
    69  (Floor ...) => (FFLOOR ...)
    70  (Ceil ...) => (FCEIL ...)
    71  (Trunc ...) => (FTRUNC ...)
    72  (Round ...) => (FROUND ...)
    73  (Copysign x y) => (FCPSGN y x)
    74  (Abs ...) => (FABS ...)
    75  (FMA ...) => (FMADD ...)
    76  
    77  // Lowering extension
    78  // Note: we always extend to 64 bits even though some ops don't need that many result bits.
    79  (SignExt8to(16|32|64) ...) => (MOVBreg ...)
    80  (SignExt16to(32|64) ...) => (MOVHreg ...)
    81  (SignExt32to64 ...) => (MOVWreg ...)
    82  
    83  (ZeroExt8to(16|32|64) ...) => (MOVBZreg ...)
    84  (ZeroExt16to(32|64) ...) => (MOVHZreg ...)
    85  (ZeroExt32to64 ...) => (MOVWZreg ...)
    86  
    87  (Trunc(16|32|64)to8 <t> x) && isSigned(t) => (MOVBreg x)
    88  (Trunc(16|32|64)to8  x) => (MOVBZreg x)
    89  (Trunc(32|64)to16 <t> x) && isSigned(t) => (MOVHreg x)
    90  (Trunc(32|64)to16 x) => (MOVHZreg x)
    91  (Trunc64to32 <t> x) && isSigned(t) => (MOVWreg x)
    92  (Trunc64to32 x) => (MOVWZreg x)
    93  
    94  // Lowering constants
    95  (Const(64|32|16|8) [val]) => (MOVDconst [int64(val)])
    96  (Const(32|64)F ...) => (FMOV(S|D)const ...)
    97  (ConstNil) => (MOVDconst [0])
    98  (ConstBool [t]) => (MOVDconst [b2i(t)])
    99  
   100  // Carrying addition.
   101  (Select0 (Add64carry x y c)) =>            (Select0 <typ.UInt64> (ADDE x y (Select1 <typ.UInt64> (ADDCconst c [-1]))))
   102  (Select1 (Add64carry x y c)) => (ADDZEzero (Select1 <typ.UInt64> (ADDE x y (Select1 <typ.UInt64> (ADDCconst c [-1])))))
   103  // Fold initial carry bit if 0.
   104  (ADDE x y (Select1 <typ.UInt64> (ADDCconst (MOVDconst [0]) [-1]))) => (ADDC x y)
   105  // Fold transfer of CA -> GPR -> CA. Note 2 uses when feeding into a chained Add64carry.
   106  (Select1 (ADDCconst n:(ADDZEzero x) [-1])) && n.Uses <= 2 => x
   107  
   108  // Borrowing subtraction.
   109  (Select0 (Sub64borrow x y c)) =>                 (Select0 <typ.UInt64> (SUBE x y (Select1 <typ.UInt64> (SUBCconst c [0]))))
   110  (Select1 (Sub64borrow x y c)) => (NEG (SUBZEzero (Select1 <typ.UInt64> (SUBE x y (Select1 <typ.UInt64> (SUBCconst c [0]))))))
   111  // Fold initial borrow bit if 0.
   112  (SUBE x y (Select1 <typ.UInt64> (SUBCconst (MOVDconst [0]) [0]))) => (SUBC x y)
   113  // Fold transfer of CA -> GPR -> CA. Note 2 uses when feeding into a chained Sub64borrow.
   114  (Select1 (SUBCconst n:(NEG (SUBZEzero x)) [0])) && n.Uses <= 2 => x
   115  
   116  // Constant folding
   117  (FABS (FMOVDconst [x])) => (FMOVDconst [math.Abs(x)])
   118  (FSQRT (FMOVDconst [x])) && x >= 0 => (FMOVDconst [math.Sqrt(x)])
   119  (FFLOOR (FMOVDconst [x])) => (FMOVDconst [math.Floor(x)])
   120  (FCEIL (FMOVDconst [x])) => (FMOVDconst [math.Ceil(x)])
   121  (FTRUNC (FMOVDconst [x])) => (FMOVDconst [math.Trunc(x)])
   122  
   123  // Rotates
   124  (RotateLeft8 <t> x (MOVDconst [c])) => (Or8 (Lsh8x64 <t> x (MOVDconst [c&7])) (Rsh8Ux64 <t> x (MOVDconst [-c&7])))
   125  (RotateLeft16 <t> x (MOVDconst [c])) => (Or16 (Lsh16x64 <t> x (MOVDconst [c&15])) (Rsh16Ux64 <t> x (MOVDconst [-c&15])))
   126  (RotateLeft(32|64) ...) => ((ROTLW|ROTL) ...)
   127  
   128  // Constant rotate generation
   129  (ROTLW  x (MOVDconst [c])) => (ROTLWconst  x [c&31])
   130  (ROTL   x (MOVDconst [c])) => (ROTLconst   x [c&63])
   131  
   132  // Combine rotate and mask operations
   133  (Select0 (ANDCCconst [m] (ROTLWconst [r] x))) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,m,32)] x)
   134  (AND (MOVDconst [m]) (ROTLWconst [r] x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,m,32)] x)
   135  (Select0 (ANDCCconst [m] (ROTLW x r))) && isPPC64WordRotateMask(m) => (RLWNM [encodePPC64RotateMask(0,m,32)] x r)
   136  (AND (MOVDconst [m]) (ROTLW x r)) && isPPC64WordRotateMask(m) => (RLWNM [encodePPC64RotateMask(0,m,32)] x r)
   137  
   138  // Note, any rotated word bitmask is still a valid word bitmask.
   139  (ROTLWconst [r] (AND (MOVDconst [m]) x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,rotateLeft32(m,r),32)] x)
   140  (ROTLWconst [r] (Select0 (ANDCCconst [m] x))) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,rotateLeft32(m,r),32)] x)
   141  
   142  (Select0 (ANDCCconst [m] (SRWconst x [s]))) && mergePPC64RShiftMask(m,s,32) == 0 => (MOVDconst [0])
   143  (Select0 (ANDCCconst [m] (SRWconst x [s]))) && mergePPC64AndSrwi(m,s) != 0 => (RLWINM [mergePPC64AndSrwi(m,s)] x)
   144  (AND (MOVDconst [m]) (SRWconst x [s])) && mergePPC64RShiftMask(m,s,32) == 0 => (MOVDconst [0])
   145  (AND (MOVDconst [m]) (SRWconst x [s])) && mergePPC64AndSrwi(m,s) != 0 => (RLWINM [mergePPC64AndSrwi(m,s)] x)
   146  
   147  (SRWconst (Select0 (ANDCCconst [m] x)) [s]) && mergePPC64RShiftMask(m>>uint(s),s,32) == 0 => (MOVDconst [0])
   148  (SRWconst (Select0 (ANDCCconst [m] x)) [s]) && mergePPC64AndSrwi(m>>uint(s),s) != 0 => (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x)
   149  (SRWconst (AND (MOVDconst [m]) x) [s]) && mergePPC64RShiftMask(m>>uint(s),s,32) == 0 => (MOVDconst [0])
   150  (SRWconst (AND (MOVDconst [m]) x) [s]) && mergePPC64AndSrwi(m>>uint(s),s) != 0 => (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x)
   151  
   152  // Merge shift right + shift left and clear left (e.g for a table lookup)
   153  (CLRLSLDI [c] (SRWconst [s] x)) && mergePPC64ClrlsldiSrw(int64(c),s) != 0 => (RLWINM [mergePPC64ClrlsldiSrw(int64(c),s)] x)
   154  (SLDconst [l] (SRWconst [r] x)) && mergePPC64SldiSrw(l,r) != 0 => (RLWINM [mergePPC64SldiSrw(l,r)] x)
   155  // The following reduction shows up frequently too. e.g b[(x>>14)&0xFF]
   156  (CLRLSLDI [c] i:(RLWINM [s] x)) && mergePPC64ClrlsldiRlwinm(c,s) != 0 => (RLWINM [mergePPC64ClrlsldiRlwinm(c,s)] x)
   157  
   158  // large constant signed right shift, we leave the sign bit
   159  (Rsh64x64 x (MOVDconst [c])) && uint64(c) >= 64 => (SRADconst x [63])
   160  (Rsh32x64 x (MOVDconst [c])) && uint64(c) >= 32 => (SRAWconst x [63])
   161  (Rsh16x64 x (MOVDconst [c])) && uint64(c) >= 16 => (SRAWconst (SignExt16to32 x) [63])
   162  (Rsh8x64  x (MOVDconst [c])) && uint64(c) >= 8  => (SRAWconst (SignExt8to32  x) [63])
   163  
   164  // constant shifts
   165  ((Lsh64|Rsh64|Rsh64U)x64  x (MOVDconst [c])) && uint64(c) < 64 => (S(L|RA|R)Dconst x [c])
   166  ((Lsh32|Rsh32|Rsh32U)x64  x (MOVDconst [c])) && uint64(c) < 32 => (S(L|RA|R)Wconst x [c])
   167  ((Rsh16|Rsh16U)x64  x (MOVDconst [c])) && uint64(c) < 16 => (SR(AW|W)const ((Sign|Zero)Ext16to32 x) [c])
   168  (Lsh16x64  x (MOVDconst [c])) && uint64(c) < 16 => (SLWconst x [c])
   169  ((Rsh8|Rsh8U)x64  x (MOVDconst [c])) && uint64(c) < 8 => (SR(AW|W)const ((Sign|Zero)Ext8to32 x) [c])
   170  (Lsh8x64  x (MOVDconst [c])) && uint64(c) < 8 => (SLWconst x [c])
   171  
   172  // Lower bounded shifts first. No need to check shift value.
   173  (Lsh64x(64|32|16|8)  x y) && shiftIsBounded(v) => (SLD x y)
   174  (Lsh32x(64|32|16|8)  x y) && shiftIsBounded(v) => (SLW x y)
   175  (Lsh16x(64|32|16|8)  x y) && shiftIsBounded(v) => (SLW x y)
   176  (Lsh8x(64|32|16|8)   x y) && shiftIsBounded(v) => (SLW x y)
   177  (Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRD x y)
   178  (Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRW x y)
   179  (Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRW (MOVHZreg x) y)
   180  (Rsh8Ux(64|32|16|8)  x y) && shiftIsBounded(v) => (SRW (MOVBZreg x) y)
   181  (Rsh64x(64|32|16|8)  x y) && shiftIsBounded(v) => (SRAD x y)
   182  (Rsh32x(64|32|16|8)  x y) && shiftIsBounded(v) => (SRAW x y)
   183  (Rsh16x(64|32|16|8)  x y) && shiftIsBounded(v) => (SRAW (MOVHreg x) y)
   184  (Rsh8x(64|32|16|8)   x y) && shiftIsBounded(v) => (SRAW (MOVBreg x) y)
   185  
   186  // non-constant rotates
   187  // If shift > 64 then use -1 as shift count to shift all bits.
   188  ((Lsh64|Rsh64|Rsh64U)x64 x y)  => (S(L|RA|R)D  x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64]))))
   189  ((Rsh32|Rsh32U|Lsh32)x64 x y)  => (S(RA|R|L)W x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32]))))
   190  
   191  (Rsh(16|16U)x64 x y)  => (SR(AW|W) ((Sign|Zero)Ext16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [16]))))
   192  (Lsh16x64 x y)  => (SLW  x                 (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [16]))))
   193  
   194  (Rsh(8|8U)x64 x y)  => (SR(AW|W) ((Sign|Zero)Ext8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [8]))))
   195  (Lsh8x64 x y)  => (SLW  x                (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [8]))))
   196  
   197  ((Rsh64|Rsh64U|Lsh64)x32 x y)  => (S(RA|R|L)D x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64]))))
   198  ((Rsh32|Rsh32U|Lsh32)x32 x y)  => (S(RA|R|L)W x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32]))))
   199  
   200  (Rsh(16|16U)x32 x y)  => (SR(AW|W) ((Sign|Zero)Ext16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [16]))))
   201  (Lsh16x32 x y)  => (SLW  x                 (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [16]))))
   202  
   203  (Rsh(8|8U)x32 x y)  => (SR(AW|W) ((Sign|Zero)Ext8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [8]))))
   204  (Lsh8x32 x y)  => (SLW  x                (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [8]))))
   205  
   206  ((Rsh64|Rsh64U|Lsh64)x16 x y)  => (S(RA|R|L)D x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [64]))))
   207  
   208  ((Rsh32|Rsh32U|Lsh32)x16 x y)  => (S(RA|R|L)W x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [32]))))
   209  
   210  (Rsh(16|16U)x16 x y)  => (S(RA|R)W ((Sign|Zero)Ext16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [16]))))
   211  (Lsh16x16 x y)  => (SLW  x                 (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [16]))))
   212  
   213  (Rsh(8|8U)x16 x y)  => (SR(AW|W) ((Sign|Zero)Ext8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [8]))))
   214  (Lsh8x16 x y)  => (SLW  x                (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [8]))))
   215  
   216  
   217  ((Rsh64|Rsh64U|Lsh64)x8 x y)  => (S(RA|R|L)D x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [64]))))
   218  
   219  ((Rsh32|Rsh32U|Lsh32)x8 x y)  => (S(RA|R|L)W x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [32]))))
   220  
   221  (Rsh(16|16U)x8 x y)  => (S(RA|R)W ((Sign|Zero)Ext16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [16]))))
   222  (Lsh16x8 x y)  => (SLW  x                 (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [16]))))
   223  
   224  (Rsh(8|8U)x8 x y)  => (S(RA|R)W ((Sign|Zero)Ext8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [8]))))
   225  (Lsh8x8 x y)  => (SLW  x                (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [8]))))
   226  
   227  // Cleaning up shift ops
   228  (ISEL [0] (Select0 (ANDCCconst [d] y)) (MOVDconst [-1]) (CMPU (Select0 (ANDCCconst [d] y)) (MOVDconst [c]))) && c >= d => (Select0 (ANDCCconst [d] y))
   229  (ISEL [0] (Select0 (ANDCCconst [d] y)) (MOVDconst [-1]) (CMPUconst [c] (Select0 (ANDCCconst [d] y)))) && c >= d => (Select0 (ANDCCconst [d] y))
   230  (ORN x (MOVDconst [-1])) => x
   231  
   232  (S(RAD|RD|LD) x (MOVDconst [c])) => (S(RAD|RD|LD)const [c&63 | (c>>6&1*63)] x)
   233  (S(RAW|RW|LW) x (MOVDconst [c])) => (S(RAW|RW|LW)const [c&31 | (c>>5&1*31)] x)
   234  
   235  (Addr {sym} base) => (MOVDaddr {sym} [0] base)
   236  (LocalAddr {sym} base _) => (MOVDaddr {sym} base)
   237  (OffPtr [off] ptr) => (ADD (MOVDconst <typ.Int64> [off]) ptr)
   238  
   239  // TODO: optimize these cases?
   240  (Ctz32NonZero ...) => (Ctz32 ...)
   241  (Ctz64NonZero ...) => (Ctz64 ...)
   242  
   243  (Ctz64 x) && buildcfg.GOPPC64<=8 => (POPCNTD (ANDN <typ.Int64> (ADDconst <typ.Int64> [-1] x) x))
   244  (Ctz64 x) => (CNTTZD x)
   245  (Ctz32 x) && buildcfg.GOPPC64<=8 => (POPCNTW (MOVWZreg (ANDN <typ.Int> (ADDconst <typ.Int> [-1] x) x)))
   246  (Ctz32 x) => (CNTTZW (MOVWZreg x))
   247  (Ctz16 x) => (POPCNTW (MOVHZreg (ANDN <typ.Int16> (ADDconst <typ.Int16> [-1] x) x)))
   248  (Ctz8 x)  => (POPCNTB (MOVBZreg (ANDN <typ.UInt8> (ADDconst <typ.UInt8> [-1] x) x)))
   249  
   250  (BitLen64 x) => (SUBFCconst [64] (CNTLZD <typ.Int> x))
   251  (BitLen32 x) => (SUBFCconst [32] (CNTLZW <typ.Int> x))
   252  
   253  (PopCount64 ...) => (POPCNTD ...)
   254  (PopCount(32|16|8) x) => (POPCNT(W|W|B) (MOV(W|H|B)Zreg x))
   255  
   256  (And(64|32|16|8) ...) => (AND ...)
   257  (Or(64|32|16|8) ...) => (OR ...)
   258  (Xor(64|32|16|8) ...) => (XOR ...)
   259  
   260  (Neg(64|32|16|8) ...) => (NEG ...)
   261  (Neg(64|32)F ...) => (FNEG ...)
   262  
   263  (Com(64|32|16|8) x) => (NOR x x)
   264  
   265  // Lowering boolean ops
   266  (AndB ...) => (AND ...)
   267  (OrB ...) => (OR ...)
   268  (Not x) => (XORconst [1] x)
   269  
   270  // Merge logical operations
   271  (AND x (NOR y y)) => (ANDN x y)
   272  (OR x (NOR y y)) => (ORN x y)
   273  
   274  // Lowering comparisons
   275  (EqB x y)  => (Select0 <typ.Int> (ANDCCconst [1] (EQV x y)))
   276  // Sign extension dependence on operand sign sets up for sign/zero-extension elision later
   277  (Eq(8|16) x y) && isSigned(x.Type) && isSigned(y.Type) => (Equal (CMPW (SignExt(8|16)to32 x) (SignExt(8|16)to32 y)))
   278  (Eq(8|16) x y) => (Equal (CMPW (ZeroExt(8|16)to32 x) (ZeroExt(8|16)to32 y)))
   279  (Eq(32|64|Ptr) x y) => (Equal ((CMPW|CMP|CMP) x y))
   280  (Eq(32|64)F x y) => (Equal (FCMPU x y))
   281  
   282  (NeqB ...) => (XOR ...)
   283  // Like Eq8 and Eq16, prefer sign extension likely to enable later elision.
   284  (Neq(8|16) x y) && isSigned(x.Type) && isSigned(y.Type) => (NotEqual (CMPW (SignExt(8|16)to32 x) (SignExt(8|16)to32 y)))
   285  (Neq(8|16) x y)  => (NotEqual (CMPW (ZeroExt(8|16)to32 x) (ZeroExt(8|16)to32 y)))
   286  (Neq(32|64|Ptr) x y) => (NotEqual ((CMPW|CMP|CMP) x y))
   287  (Neq(32|64)F x y) => (NotEqual (FCMPU x y))
   288  
   289  (Less(8|16) x y)  => (LessThan (CMPW (SignExt(8|16)to32 x) (SignExt(8|16)to32 y)))
   290  (Less(32|64) x y) => (LessThan ((CMPW|CMP) x y))
   291  (Less(32|64)F x y) => (FLessThan (FCMPU x y))
   292  
   293  (Less(8|16)U x y)  => (LessThan (CMPWU (ZeroExt(8|16)to32 x) (ZeroExt(8|16)to32 y)))
   294  (Less(32|64)U x y) => (LessThan ((CMPWU|CMPU) x y))
   295  
   296  (Leq(8|16) x y)  => (LessEqual (CMPW (SignExt(8|16)to32 x) (SignExt(8|16)to32 y)))
   297  (Leq(32|64) x y) => (LessEqual ((CMPW|CMP) x y))
   298  (Leq(32|64)F x y) => (FLessEqual (FCMPU x y))
   299  
   300  (Leq(8|16)U x y)  => (LessEqual (CMPWU (ZeroExt(8|16)to32 x) (ZeroExt(8|16)to32 y)))
   301  (Leq(32|64)U x y) => (LessEqual (CMP(WU|U) x y))
   302  
   303  // Absorb pseudo-ops into blocks.
   304  (If (Equal cc) yes no) => (EQ cc yes no)
   305  (If (NotEqual cc) yes no) => (NE cc yes no)
   306  (If (LessThan cc) yes no) => (LT cc yes no)
   307  (If (LessEqual cc) yes no) => (LE cc yes no)
   308  (If (GreaterThan cc) yes no) => (GT cc yes no)
   309  (If (GreaterEqual cc) yes no) => (GE cc yes no)
   310  (If (FLessThan cc) yes no) => (FLT cc yes no)
   311  (If (FLessEqual cc) yes no) => (FLE cc yes no)
   312  (If (FGreaterThan cc) yes no) => (FGT cc yes no)
   313  (If (FGreaterEqual cc) yes no) => (FGE cc yes no)
   314  
   315  (If cond yes no) => (NE (CMPWconst [0] (Select0 <typ.UInt32> (ANDCCconst [1] cond))) yes no)
   316  
   317  // Absorb boolean tests into block
   318  (NE (CMPWconst [0] (Select0 (ANDCCconst [1] ((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) cc)))) yes no) => ((EQ|NE|LT|LE|GT|GE) cc yes no)
   319  (NE (CMPWconst [0] (Select0 (ANDCCconst [1] ((FLessThan|FLessEqual|FGreaterThan|FGreaterEqual) cc)))) yes no) => ((FLT|FLE|FGT|FGE) cc yes no)
   320  
   321  // Elide compares of bit tests
   322  ((EQ|NE) (CMPconst [0] (Select0 (ANDCCconst [c] x))) yes no) => ((EQ|NE) (Select1 <types.TypeFlags> (ANDCCconst [c] x)) yes no)
   323  ((EQ|NE) (CMPWconst [0] (Select0 (ANDCCconst [c] x))) yes no) => ((EQ|NE) (Select1 <types.TypeFlags> (ANDCCconst [c] x)) yes no)
   324  
   325  // absorb flag constants into branches
   326  (EQ (FlagEQ) yes no) => (First yes no)
   327  (EQ (FlagLT) yes no) => (First no yes)
   328  (EQ (FlagGT) yes no) => (First no yes)
   329  
   330  (NE (FlagEQ) yes no) => (First no yes)
   331  (NE (FlagLT) yes no) => (First yes no)
   332  (NE (FlagGT) yes no) => (First yes no)
   333  
   334  (LT (FlagEQ) yes no) => (First no yes)
   335  (LT (FlagLT) yes no) => (First yes no)
   336  (LT (FlagGT) yes no) => (First no yes)
   337  
   338  (LE (FlagEQ) yes no) => (First yes no)
   339  (LE (FlagLT) yes no) => (First yes no)
   340  (LE (FlagGT) yes no) => (First no yes)
   341  
   342  (GT (FlagEQ) yes no) => (First no yes)
   343  (GT (FlagLT) yes no) => (First no yes)
   344  (GT (FlagGT) yes no) => (First yes no)
   345  
   346  (GE (FlagEQ) yes no) => (First yes no)
   347  (GE (FlagLT) yes no) => (First no yes)
   348  (GE (FlagGT) yes no) => (First yes no)
   349  
   350  // absorb InvertFlags into branches
   351  (LT (InvertFlags cmp) yes no) => (GT cmp yes no)
   352  (GT (InvertFlags cmp) yes no) => (LT cmp yes no)
   353  (LE (InvertFlags cmp) yes no) => (GE cmp yes no)
   354  (GE (InvertFlags cmp) yes no) => (LE cmp yes no)
   355  (EQ (InvertFlags cmp) yes no) => (EQ cmp yes no)
   356  (NE (InvertFlags cmp) yes no) => (NE cmp yes no)
   357  
   358  // constant comparisons
   359  (CMPWconst (MOVDconst [x]) [y]) && int32(x)==int32(y) => (FlagEQ)
   360  (CMPWconst (MOVDconst [x]) [y]) && int32(x)<int32(y)  => (FlagLT)
   361  (CMPWconst (MOVDconst [x]) [y]) && int32(x)>int32(y)  => (FlagGT)
   362  
   363  (CMPconst (MOVDconst [x]) [y]) && x==y => (FlagEQ)
   364  (CMPconst (MOVDconst [x]) [y]) && x<y  => (FlagLT)
   365  (CMPconst (MOVDconst [x]) [y]) && x>y  => (FlagGT)
   366  
   367  (CMPWUconst (MOVDconst [x]) [y]) && int32(x)==int32(y)  => (FlagEQ)
   368  (CMPWUconst (MOVDconst [x]) [y]) && uint32(x)<uint32(y) => (FlagLT)
   369  (CMPWUconst (MOVDconst [x]) [y]) && uint32(x)>uint32(y) => (FlagGT)
   370  
   371  (CMPUconst (MOVDconst [x]) [y]) && x==y  => (FlagEQ)
   372  (CMPUconst (MOVDconst [x]) [y]) && uint64(x)<uint64(y) => (FlagLT)
   373  (CMPUconst (MOVDconst [x]) [y]) && uint64(x)>uint64(y) => (FlagGT)
   374  
   375  // absorb flag constants into boolean values
   376  (Equal (FlagEQ)) => (MOVDconst [1])
   377  (Equal (FlagLT)) => (MOVDconst [0])
   378  (Equal (FlagGT)) => (MOVDconst [0])
   379  
   380  (NotEqual (FlagEQ)) => (MOVDconst [0])
   381  (NotEqual (FlagLT)) => (MOVDconst [1])
   382  (NotEqual (FlagGT)) => (MOVDconst [1])
   383  
   384  (LessThan (FlagEQ)) => (MOVDconst [0])
   385  (LessThan (FlagLT)) => (MOVDconst [1])
   386  (LessThan (FlagGT)) => (MOVDconst [0])
   387  
   388  (LessEqual (FlagEQ)) => (MOVDconst [1])
   389  (LessEqual (FlagLT)) => (MOVDconst [1])
   390  (LessEqual (FlagGT)) => (MOVDconst [0])
   391  
   392  (GreaterThan (FlagEQ)) => (MOVDconst [0])
   393  (GreaterThan (FlagLT)) => (MOVDconst [0])
   394  (GreaterThan (FlagGT)) => (MOVDconst [1])
   395  
   396  (GreaterEqual (FlagEQ)) => (MOVDconst [1])
   397  (GreaterEqual (FlagLT)) => (MOVDconst [0])
   398  (GreaterEqual (FlagGT)) => (MOVDconst [1])
   399  
   400  // absorb InvertFlags into boolean values
   401  ((Equal|NotEqual|LessThan|GreaterThan|LessEqual|GreaterEqual) (InvertFlags x)) => ((Equal|NotEqual|GreaterThan|LessThan|GreaterEqual|LessEqual) x)
   402  
   403  
   404  // Elide compares of bit tests
   405  ((EQ|NE|LT|LE|GT|GE) (CMPconst [0] (Select0 (ANDCCconst [c] x))) yes no) => ((EQ|NE|LT|LE|GT|GE) (Select1 <types.TypeFlags> (ANDCCconst [c] x)) yes no)
   406  ((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] (Select0 (ANDCCconst [c] x))) yes no) => ((EQ|NE|LT|LE|GT|GE) (Select1 <types.TypeFlags> (ANDCCconst [c] x)) yes no)
   407  ((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(AND x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (Select1 <types.TypeFlags> (ANDCC x y)) yes no)
   408  ((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(OR x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (Select1 <types.TypeFlags> (ORCC x y)) yes no)
   409  ((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(XOR x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (Select1 <types.TypeFlags> (XORCC x y)) yes no)
   410  
   411  // Only lower after bool is lowered. It should always lower. This helps ensure the folding below happens reliably.
   412  (CondSelect x y bool) && flagArg(bool) == nil => (ISEL [6] x y (Select1 <types.TypeFlags> (ANDCCconst [1] bool)))
   413  // Fold any CR -> GPR -> CR transfers when applying the above rule.
   414  (ISEL [6] x y (Select1 (ANDCCconst [1] (ISELB [c] one cmp)))) => (ISEL [c] x y cmp)
   415  
   416  // Lowering loads
   417  (Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVDload ptr mem)
   418  (Load <t> ptr mem) && is32BitInt(t) && isSigned(t) => (MOVWload ptr mem)
   419  (Load <t> ptr mem) && is32BitInt(t) && !isSigned(t) => (MOVWZload ptr mem)
   420  (Load <t> ptr mem) && is16BitInt(t) && isSigned(t) => (MOVHload ptr mem)
   421  (Load <t> ptr mem) && is16BitInt(t) && !isSigned(t) => (MOVHZload ptr mem)
   422  (Load <t> ptr mem) && t.IsBoolean() => (MOVBZload ptr mem)
   423  (Load <t> ptr mem) && is8BitInt(t) && isSigned(t) => (MOVBreg (MOVBZload ptr mem)) // PPC has no signed-byte load.
   424  (Load <t> ptr mem) && is8BitInt(t) && !isSigned(t) => (MOVBZload ptr mem)
   425  
   426  (Load <t> ptr mem) && is32BitFloat(t) => (FMOVSload ptr mem)
   427  (Load <t> ptr mem) && is64BitFloat(t) => (FMOVDload ptr mem)
   428  
   429  (Store {t} ptr val mem) && t.Size() == 8 && is64BitFloat(val.Type) => (FMOVDstore ptr val mem)
   430  (Store {t} ptr val mem) && t.Size() == 8 && is32BitFloat(val.Type) => (FMOVDstore ptr val mem) // glitch from (Cvt32Fto64F x) => x -- type is wrong
   431  (Store {t} ptr val mem) && t.Size() == 4 && is32BitFloat(val.Type) => (FMOVSstore ptr val mem)
   432  (Store {t} ptr val mem) && t.Size() == 8 && !is64BitFloat(val.Type) => (MOVDstore ptr val mem)
   433  (Store {t} ptr val mem) && t.Size() == 4 && is32BitInt(val.Type) => (MOVWstore ptr val mem)
   434  (Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
   435  (Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
   436  
   437  // Using Zero instead of LoweredZero allows the
   438  // target address to be folded where possible.
   439  (Zero [0] _ mem) => mem
   440  (Zero [1] destptr mem) => (MOVBstorezero destptr mem)
   441  (Zero [2] destptr mem) =>
   442  	(MOVHstorezero destptr mem)
   443  (Zero [3] destptr mem) =>
   444  	(MOVBstorezero [2] destptr
   445  		(MOVHstorezero destptr mem))
   446  (Zero [4] destptr mem) =>
   447  	(MOVWstorezero destptr mem)
   448  (Zero [5] destptr mem) =>
   449  	(MOVBstorezero [4] destptr
   450          	(MOVWstorezero destptr mem))
   451  (Zero [6] destptr mem) =>
   452  	(MOVHstorezero [4] destptr
   453  		(MOVWstorezero destptr mem))
   454  (Zero [7] destptr mem) =>
   455  	(MOVBstorezero [6] destptr
   456  		(MOVHstorezero [4] destptr
   457  			(MOVWstorezero destptr mem)))
   458  
   459  (Zero [8] {t} destptr mem) => (MOVDstorezero destptr mem)
   460  (Zero [12] {t} destptr mem) =>
   461          (MOVWstorezero [8] destptr
   462                  (MOVDstorezero [0] destptr mem))
   463  (Zero [16] {t} destptr mem) =>
   464         (MOVDstorezero [8] destptr
   465                  (MOVDstorezero [0] destptr mem))
   466  (Zero [24] {t} destptr mem) =>
   467         (MOVDstorezero [16] destptr
   468                 (MOVDstorezero [8] destptr
   469                         (MOVDstorezero [0] destptr mem)))
   470  (Zero [32] {t} destptr mem) =>
   471         (MOVDstorezero [24] destptr
   472                 (MOVDstorezero [16] destptr
   473                         (MOVDstorezero [8] destptr
   474                                 (MOVDstorezero [0] destptr mem))))
   475  
   476  // Handle cases not handled above
   477  // Lowered Short cases do not generate loops, and as a result don't clobber
   478  // the address registers or flags.
   479  (Zero [s] ptr mem) && buildcfg.GOPPC64 <= 8 && s < 64 => (LoweredZeroShort [s] ptr mem)
   480  (Zero [s] ptr mem) && buildcfg.GOPPC64 <= 8 => (LoweredZero [s] ptr mem)
   481  (Zero [s] ptr mem) && s < 128 && buildcfg.GOPPC64 >= 9 => (LoweredQuadZeroShort [s] ptr mem)
   482  (Zero [s] ptr mem) && buildcfg.GOPPC64 >= 9 => (LoweredQuadZero [s] ptr mem)
   483  
   484  // moves
   485  (Move [0] _ _ mem) => mem
   486  (Move [1] dst src mem) => (MOVBstore dst (MOVBZload src mem) mem)
   487  (Move [2] dst src mem) =>
   488          (MOVHstore dst (MOVHZload src mem) mem)
   489  (Move [4] dst src mem) =>
   490  	(MOVWstore dst (MOVWZload src mem) mem)
   491  // MOVD for load and store must have offsets that are multiple of 4
   492  (Move [8] {t} dst src mem) =>
   493  	(MOVDstore dst (MOVDload src mem) mem)
   494  (Move [3] dst src mem) =>
   495          (MOVBstore [2] dst (MOVBZload [2] src mem)
   496                  (MOVHstore dst (MOVHload src mem) mem))
   497  (Move [5] dst src mem) =>
   498          (MOVBstore [4] dst (MOVBZload [4] src mem)
   499                  (MOVWstore dst (MOVWZload src mem) mem))
   500  (Move [6] dst src mem) =>
   501          (MOVHstore [4] dst (MOVHZload [4] src mem)
   502                  (MOVWstore dst (MOVWZload src mem) mem))
   503  (Move [7] dst src mem) =>
   504          (MOVBstore [6] dst (MOVBZload [6] src mem)
   505                  (MOVHstore [4] dst (MOVHZload [4] src mem)
   506                          (MOVWstore dst (MOVWZload src mem) mem)))
   507  
   508  // Large move uses a loop. Since the address is computed and the
   509  // offset is zero, any alignment can be used.
   510  (Move [s] dst src mem) && s > 8 && buildcfg.GOPPC64 <= 8 && logLargeCopy(v, s) =>
   511          (LoweredMove [s] dst src mem)
   512  (Move [s] dst src mem) && s > 8 && s <= 64 && buildcfg.GOPPC64 >= 9 =>
   513          (LoweredQuadMoveShort [s] dst src mem)
   514  (Move [s] dst src mem) && s > 8 && buildcfg.GOPPC64 >= 9 && logLargeCopy(v, s) =>
   515          (LoweredQuadMove [s] dst src mem)
   516  
   517  // Calls
   518  // Lowering calls
   519  (StaticCall ...) => (CALLstatic ...)
   520  (ClosureCall ...) => (CALLclosure ...)
   521  (InterCall ...) => (CALLinter ...)
   522  (TailCall ...) => (CALLtail ...)
   523  
   524  // Miscellaneous
   525  (GetClosurePtr ...) => (LoweredGetClosurePtr ...)
   526  (GetCallerSP ...) => (LoweredGetCallerSP ...)
   527  (GetCallerPC ...) => (LoweredGetCallerPC ...)
   528  (IsNonNil ptr) => (NotEqual (CMPconst [0] ptr))
   529  (IsInBounds idx len) => (LessThan (CMPU idx len))
   530  (IsSliceInBounds idx len) => (LessEqual (CMPU idx len))
   531  (NilCheck ...) => (LoweredNilCheck ...)
   532  
   533  // Write barrier.
   534  (WB ...) => (LoweredWB ...)
   535  
   536  // Publication barrier as intrinsic
   537  (PubBarrier ...) => (LoweredPubBarrier ...)
   538  
   539  (PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
   540  (PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
   541  (PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
   542  
   543  // Optimizations
   544  // Note that PPC "logical" immediates come in 0:15 and 16:31 unsigned immediate forms,
   545  // so ORconst, XORconst easily expand into a pair.
   546  
   547  // Include very-large constants in the const-const case.
   548  (AND (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c&d])
   549  (OR (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c|d])
   550  (XOR (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c^d])
   551  (ORN (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c|^d])
   552  (ANDN (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c&^d])
   553  (NOR (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [^(c|d)])
   554  
   555  // Discover consts
   556  (AND x (MOVDconst [c])) && isU16Bit(c) => (Select0 (ANDCCconst [c] x))
   557  (XOR x (MOVDconst [c])) && isU32Bit(c) => (XORconst [c] x)
   558  (OR x (MOVDconst [c])) && isU32Bit(c) => (ORconst [c] x)
   559  
   560  // Simplify consts
   561  (Select0 (ANDCCconst [c] (Select0 (ANDCCconst [d] x)))) => (Select0 (ANDCCconst [c&d] x))
   562  (ORconst [c] (ORconst [d] x)) => (ORconst [c|d] x)
   563  (XORconst [c] (XORconst [d] x)) => (XORconst [c^d] x)
   564  (Select0 (ANDCCconst [-1] x)) => x
   565  (Select0 (ANDCCconst [0] _)) => (MOVDconst [0])
   566  (XORconst [0] x) => x
   567  (ORconst [-1] _) => (MOVDconst [-1])
   568  (ORconst [0] x) => x
   569  
   570  // zero-extend of small and => small and
   571  (MOVBZreg y:(Select0 (ANDCCconst [c] _))) && uint64(c) <= 0xFF => y
   572  (MOVHZreg y:(Select0 (ANDCCconst [c] _))) && uint64(c) <= 0xFFFF => y
   573  (MOVWZreg y:(Select0 (ANDCCconst [c] _))) && uint64(c) <= 0xFFFFFFFF => y
   574  (MOVWZreg y:(AND (MOVDconst [c]) _)) && uint64(c) <= 0xFFFFFFFF => y
   575  
   576  // sign extend of small-positive and => small-positive-and
   577  (MOVBreg y:(Select0 (ANDCCconst [c] _))) && uint64(c) <= 0x7F => y
   578  (MOVHreg y:(Select0 (ANDCCconst [c] _))) && uint64(c) <= 0x7FFF => y
   579  (MOVWreg y:(Select0 (ANDCCconst [c] _))) && uint64(c) <= 0xFFFF => y // 0xFFFF is largest immediate constant, when regarded as 32-bit is > 0
   580  (MOVWreg y:(AND (MOVDconst [c]) _)) && uint64(c) <= 0x7FFFFFFF => y
   581  
   582  // small and of zero-extend => either zero-extend or small and
   583  (Select0 (ANDCCconst [c] y:(MOVBZreg _))) && c&0xFF == 0xFF => y
   584  (Select0 (ANDCCconst [0xFF] y:(MOVBreg _))) => y
   585  (Select0 (ANDCCconst [c] y:(MOVHZreg _)))  && c&0xFFFF == 0xFFFF => y
   586  (Select0 (ANDCCconst [0xFFFF] y:(MOVHreg _))) => y
   587  
   588  (AND (MOVDconst [c]) y:(MOVWZreg _))  && c&0xFFFFFFFF == 0xFFFFFFFF => y
   589  (AND (MOVDconst [0xFFFFFFFF]) y:(MOVWreg x)) => (MOVWZreg x)
   590  // normal case
   591  (Select0 (ANDCCconst [c] (MOV(B|BZ)reg x))) => (Select0 (ANDCCconst [c&0xFF] x))
   592  (Select0 (ANDCCconst [c] (MOV(H|HZ)reg x))) => (Select0 (ANDCCconst [c&0xFFFF] x))
   593  (Select0 (ANDCCconst [c] (MOV(W|WZ)reg x))) => (Select0 (ANDCCconst [c&0xFFFFFFFF] x))
   594  
   595  // Eliminate unnecessary sign/zero extend following right shift
   596  (MOV(B|H|W)Zreg (SRWconst [c] (MOVBZreg x))) => (SRWconst [c] (MOVBZreg x))
   597  (MOV(H|W)Zreg (SRWconst [c] (MOVHZreg x))) => (SRWconst [c] (MOVHZreg x))
   598  (MOVWZreg (SRWconst [c] (MOVWZreg x))) => (SRWconst [c] (MOVWZreg x))
   599  (MOV(B|H|W)reg (SRAWconst [c] (MOVBreg x))) => (SRAWconst [c] (MOVBreg x))
   600  (MOV(H|W)reg (SRAWconst [c] (MOVHreg x))) => (SRAWconst [c] (MOVHreg x))
   601  (MOVWreg (SRAWconst [c] (MOVWreg x))) => (SRAWconst [c] (MOVWreg x))
   602  
   603  (MOV(WZ|W)reg (S(R|RA)Wconst [c] x)) && sizeof(x.Type) <= 32 => (S(R|RA)Wconst [c] x)
   604  (MOV(HZ|H)reg (S(R|RA)Wconst [c] x)) && sizeof(x.Type) <= 16 => (S(R|RA)Wconst [c] x)
   605  (MOV(BZ|B)reg (S(R|RA)Wconst [c] x)) && sizeof(x.Type) == 8 => (S(R|RA)Wconst [c] x)
   606  
   607  // initial right shift will handle sign/zero extend
   608  (MOVBZreg (SRDconst [c] x)) && c>=56 => (SRDconst [c] x)
   609  (MOVBreg (SRDconst [c] x)) && c>56 => (SRDconst [c] x)
   610  (MOVBreg (SRDconst [c] x)) && c==56 => (SRADconst [c] x)
   611  (MOVBreg (SRADconst [c] x)) && c>=56 => (SRADconst [c] x)
   612  (MOVBZreg (SRWconst [c] x)) && c>=24 => (SRWconst [c] x)
   613  (MOVBreg (SRWconst [c] x)) && c>24 => (SRWconst [c] x)
   614  (MOVBreg (SRWconst [c] x)) && c==24 => (SRAWconst [c] x)
   615  (MOVBreg (SRAWconst [c] x)) && c>=24 => (SRAWconst [c] x)
   616  
   617  (MOVHZreg (SRDconst [c] x)) && c>=48 => (SRDconst [c] x)
   618  (MOVHreg (SRDconst [c] x)) && c>48 => (SRDconst [c] x)
   619  (MOVHreg (SRDconst [c] x)) && c==48 => (SRADconst [c] x)
   620  (MOVHreg (SRADconst [c] x)) && c>=48 => (SRADconst [c] x)
   621  (MOVHZreg (SRWconst [c] x)) && c>=16 => (SRWconst [c] x)
   622  (MOVHreg (SRWconst [c] x)) && c>16 => (SRWconst [c] x)
   623  (MOVHreg (SRAWconst [c] x)) && c>=16 => (SRAWconst [c] x)
   624  (MOVHreg (SRWconst [c] x)) && c==16 => (SRAWconst [c] x)
   625  
   626  (MOVWZreg (SRDconst [c] x)) && c>=32 => (SRDconst [c] x)
   627  (MOVWreg (SRDconst [c] x)) && c>32 => (SRDconst [c] x)
   628  (MOVWreg (SRADconst [c] x)) && c>=32 => (SRADconst [c] x)
   629  (MOVWreg (SRDconst [c] x)) && c==32 => (SRADconst [c] x)
   630  
   631  // Various redundant zero/sign extension combinations.
   632  (MOVBZreg y:(MOVBZreg _)) => y  // repeat
   633  (MOVBreg y:(MOVBreg _)) => y // repeat
   634  (MOVBreg (MOVBZreg x)) => (MOVBreg x)
   635  (MOVBZreg (MOVBreg x)) => (MOVBZreg x)
   636  
   637  // H - there are more combinations than these
   638  
   639  (MOVHZreg y:(MOV(H|B)Zreg _)) => y // repeat
   640  (MOVHZreg y:(MOVHBRload _ _)) => y
   641  
   642  (MOVHreg y:(MOV(H|B)reg _)) => y // repeat
   643  
   644  (MOV(H|HZ)reg y:(MOV(HZ|H)reg x)) => (MOV(H|HZ)reg x)
   645  
   646  // W - there are more combinations than these
   647  
   648  (MOV(WZ|WZ|WZ|W|W|W)reg y:(MOV(WZ|HZ|BZ|W|H|B)reg _)) => y // repeat
   649  (MOVWZreg y:(MOV(H|W)BRload _ _)) => y
   650  
   651  (MOV(W|WZ)reg y:(MOV(WZ|W)reg x)) => (MOV(W|WZ)reg x)
   652  
   653  // Truncate then logical then truncate: omit first, lesser or equal truncate
   654  (MOVWZreg ((OR|XOR|AND) <t> x (MOVWZreg y))) => (MOVWZreg ((OR|XOR|AND) <t> x y))
   655  (MOVHZreg ((OR|XOR|AND) <t> x (MOVWZreg y))) => (MOVHZreg ((OR|XOR|AND) <t> x y))
   656  (MOVHZreg ((OR|XOR|AND) <t> x (MOVHZreg y))) => (MOVHZreg ((OR|XOR|AND) <t> x y))
   657  (MOVBZreg ((OR|XOR|AND) <t> x (MOVWZreg y))) => (MOVBZreg ((OR|XOR|AND) <t> x y))
   658  (MOVBZreg ((OR|XOR|AND) <t> x (MOVHZreg y))) => (MOVBZreg ((OR|XOR|AND) <t> x y))
   659  (MOVBZreg ((OR|XOR|AND) <t> x (MOVBZreg y))) => (MOVBZreg ((OR|XOR|AND) <t> x y))
   660  
   661  (MOV(B|H|W)Zreg z:(Select0 (ANDCCconst [c] (MOVBZload ptr x)))) => z
   662  (MOV(B|H|W)Zreg z:(AND y (MOV(B|H|W)Zload ptr x))) => z
   663  (MOV(H|W)Zreg z:(Select0 (ANDCCconst [c] (MOVHZload ptr x)))) => z
   664  (MOVWZreg z:(Select0 (ANDCCconst [c] (MOVWZload ptr x)))) => z
   665  
   666  // Arithmetic constant ops
   667  
   668  (ADD x (MOVDconst [c])) && is32Bit(c) => (ADDconst [c] x)
   669  (ADDconst [c] (ADDconst [d] x)) && is32Bit(c+d) => (ADDconst [c+d] x)
   670  (ADDconst [0] x) => x
   671  (SUB x (MOVDconst [c])) && is32Bit(-c) => (ADDconst [-c] x)
   672  
   673  (ADDconst [c] (MOVDaddr [d] {sym} x)) && is32Bit(c+int64(d)) => (MOVDaddr [int32(c+int64(d))] {sym} x)
   674  (ADDconst [c] x:(SP)) && is32Bit(c) => (MOVDaddr [int32(c)] x) // so it is rematerializeable
   675  
   676  (MULL(W|D) x (MOVDconst [c])) && is16Bit(c) => (MULL(W|D)const [int32(c)] x)
   677  
   678  // Subtract from (with carry, but ignored) constant.
   679  // Note, these clobber the carry bit.
   680  (SUB (MOVDconst [c]) x) && is32Bit(c) => (SUBFCconst [c] x)
   681  (SUBFCconst [c] (NEG x)) => (ADDconst [c] x)
   682  (SUBFCconst [c] (SUBFCconst [d] x)) && is32Bit(c-d) => (ADDconst [c-d] x)
   683  (SUBFCconst [0] x) => (NEG x)
   684  (ADDconst [c] (SUBFCconst [d] x)) && is32Bit(c+d) => (SUBFCconst [c+d] x)
   685  (NEG (ADDconst [c] x)) && is32Bit(-c) => (SUBFCconst [-c] x)
   686  (NEG (SUBFCconst [c] x)) && is32Bit(-c) => (ADDconst [-c] x)
   687  (NEG (SUB x y)) => (SUB y x)
   688  (NEG (NEG x)) => x
   689  
   690  // Use register moves instead of stores and loads to move int<=>float values
   691  // Common with math Float64bits, Float64frombits
   692  (MOVDload [off] {sym} ptr (FMOVDstore [off] {sym} ptr x _)) => (MFVSRD x)
   693  (FMOVDload [off] {sym} ptr (MOVDstore [off] {sym} ptr x _)) => (MTVSRD x)
   694  
   695  (FMOVDstore [off] {sym} ptr (MTVSRD x) mem) => (MOVDstore [off] {sym} ptr x mem)
   696  (MOVDstore [off] {sym} ptr (MFVSRD x) mem) => (FMOVDstore [off] {sym} ptr x mem)
   697  
   698  (MTVSRD (MOVDconst [c])) && !math.IsNaN(math.Float64frombits(uint64(c))) => (FMOVDconst [math.Float64frombits(uint64(c))])
   699  (MFVSRD (FMOVDconst [c])) => (MOVDconst [int64(math.Float64bits(c))])
   700  
   701  (MTVSRD x:(MOVDload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (FMOVDload [off] {sym} ptr mem)
   702  (MFVSRD x:(FMOVDload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVDload [off] {sym} ptr mem)
   703  
   704  // Fold offsets for stores.
   705  (MOV(D|W|H|B)store [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(int64(off1)+off2) => (MOV(D|W|H|B)store [off1+int32(off2)] {sym} x val mem)
   706  
   707  (FMOV(S|D)store [off1] {sym} (ADDconst [off2] ptr) val mem) && is16Bit(int64(off1)+off2) => (FMOV(S|D)store [off1+int32(off2)] {sym} ptr val mem)
   708  
   709  // Fold address into load/store.
   710  // The assembler needs to generate several instructions and use
   711  // temp register for accessing global, and each time it will reload
   712  // the temp register. So don't fold address of global, unless there
   713  // is only one use.
   714  (MOV(B|H|W|D)store [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
   715  	&& is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
   716          (MOV(B|H|W|D)store [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   717  
   718  (FMOV(S|D)store [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
   719  	&& is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
   720          (FMOV(S|D)store [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   721  
   722  (MOV(B|H|W)Zload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
   723  	&& is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
   724          (MOV(B|H|W)Zload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   725  (MOV(H|W|D)load [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
   726  	&& is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
   727          (MOV(H|W|D)load [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   728  (FMOV(S|D)load [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
   729  	&& is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
   730          (FMOV(S|D)load [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   731  
   732  // Fold offsets for loads.
   733  (FMOV(S|D)load [off1] {sym} (ADDconst [off2] ptr) mem) && is16Bit(int64(off1)+off2) => (FMOV(S|D)load [off1+int32(off2)] {sym} ptr mem)
   734  
   735  (MOV(D|W|WZ|H|HZ|BZ)load [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) => (MOV(D|W|WZ|H|HZ|BZ)load [off1+int32(off2)] {sym} x mem)
   736  
   737  // Determine load + addressing that can be done as a register indexed load
   738  (MOV(D|W|WZ|H|HZ|BZ)load [0] {sym} p:(ADD ptr idx) mem) && sym == nil && p.Uses == 1 => (MOV(D|W|WZ|H|HZ|BZ)loadidx ptr idx mem)
   739  
   740  // Determine if there is benefit to using a non-indexed load, since that saves the load
   741  // of the index register. With MOVDload and MOVWload, there is no benefit if the offset
   742  // value is not a multiple of 4, since that results in an extra instruction in the base
   743  // register address computation.
   744  (MOV(D|W)loadidx ptr (MOVDconst [c]) mem) && is16Bit(c) && c%4 == 0 => (MOV(D|W)load [int32(c)] ptr mem)
   745  (MOV(WZ|H|HZ|BZ)loadidx ptr (MOVDconst [c]) mem) && is16Bit(c) => (MOV(WZ|H|HZ|BZ)load [int32(c)] ptr mem)
   746  (MOV(D|W)loadidx (MOVDconst [c]) ptr mem) && is16Bit(c) && c%4 == 0 => (MOV(D|W)load [int32(c)] ptr mem)
   747  (MOV(WZ|H|HZ|BZ)loadidx (MOVDconst [c]) ptr mem) && is16Bit(c) => (MOV(WZ|H|HZ|BZ)load [int32(c)] ptr mem)
   748  
   749  // Store of zero => storezero
   750  (MOV(D|W|H|B)store [off] {sym} ptr (MOVDconst [0]) mem) => (MOV(D|W|H|B)storezero [off] {sym} ptr mem)
   751  
   752  // Fold offsets for storezero
   753  (MOV(D|W|H|B)storezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) =>
   754      (MOV(D|W|H|B)storezero [off1+int32(off2)] {sym} x mem)
   755  
   756  // Stores with addressing that can be done as indexed stores
   757  (MOV(D|W|H|B)store [0] {sym} p:(ADD ptr idx) val mem) && sym == nil && p.Uses == 1 => (MOV(D|W|H|B)storeidx ptr idx val mem)
   758  
   759  // Stores with constant index values can be done without indexed instructions
   760  // No need to lower the idx cases if c%4 is not 0
   761  (MOVDstoreidx ptr (MOVDconst [c]) val mem) && is16Bit(c) && c%4 == 0 => (MOVDstore [int32(c)] ptr val mem)
   762  (MOV(W|H|B)storeidx ptr (MOVDconst [c]) val mem) && is16Bit(c) => (MOV(W|H|B)store [int32(c)] ptr val mem)
   763  (MOVDstoreidx (MOVDconst [c]) ptr val mem) && is16Bit(c) && c%4 == 0 => (MOVDstore [int32(c)] ptr val mem)
   764  (MOV(W|H|B)storeidx (MOVDconst [c]) ptr val mem) && is16Bit(c) => (MOV(W|H|B)store [int32(c)] ptr val mem)
   765  
   766  // Fold symbols into storezero
   767  (MOV(D|W|H|B)storezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2)
   768  	&& (x.Op != OpSB || p.Uses == 1) =>
   769      (MOV(D|W|H|B)storezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
   770  
   771  // atomic intrinsics
   772  (AtomicLoad(8|32|64|Ptr)  ptr mem) => (LoweredAtomicLoad(8|32|64|Ptr) [1] ptr mem)
   773  (AtomicLoadAcq(32|64)     ptr mem) => (LoweredAtomicLoad(32|64) [0] ptr mem)
   774  
   775  (AtomicStore(8|32|64)    ptr val mem) => (LoweredAtomicStore(8|32|64) [1] ptr val mem)
   776  (AtomicStoreRel(32|64)   ptr val mem) => (LoweredAtomicStore(32|64) [0] ptr val mem)
   777  
   778  (AtomicExchange(32|64) ...) => (LoweredAtomicExchange(32|64) ...)
   779  
   780  (AtomicAdd(32|64) ...) => (LoweredAtomicAdd(32|64) ...)
   781  
   782  (AtomicCompareAndSwap(32|64) ptr old new_ mem) => (LoweredAtomicCas(32|64) [1] ptr old new_ mem)
   783  (AtomicCompareAndSwapRel32   ptr old new_ mem) => (LoweredAtomicCas32 [0] ptr old new_ mem)
   784  
   785  (AtomicAnd(8|32)  ...) => (LoweredAtomicAnd(8|32)  ...)
   786  (AtomicOr(8|32)   ...) => (LoweredAtomicOr(8|32)   ...)
   787  
   788  (Slicemask <t> x) => (SRADconst (NEG <t> x) [63])
   789  
   790  // Note that MOV??reg returns a 64-bit int, x is not necessarily that wide
   791  // This may interact with other patterns in the future. (Compare with arm64)
   792  (MOV(B|H|W)Zreg x:(MOVBZload _ _)) => x
   793  (MOV(B|H|W)Zreg x:(MOVBZloadidx _ _ _)) => x
   794  (MOV(H|W)Zreg x:(MOVHZload _ _)) => x
   795  (MOV(H|W)Zreg x:(MOVHZloadidx _ _ _)) => x
   796  (MOV(H|W)reg x:(MOVHload _ _)) => x
   797  (MOV(H|W)reg x:(MOVHloadidx _ _ _)) => x
   798  (MOV(WZ|W)reg x:(MOV(WZ|W)load _ _)) => x
   799  (MOV(WZ|W)reg x:(MOV(WZ|W)loadidx _ _ _)) => x
   800  (MOV(B|W)Zreg x:(Select0 (LoweredAtomicLoad(8|32) _ _))) => x
   801  
   802  // don't extend if argument is already extended
   803  (MOVBreg x:(Arg <t>)) && is8BitInt(t) && isSigned(t) => x
   804  (MOVBZreg x:(Arg <t>)) && is8BitInt(t) && !isSigned(t) => x
   805  (MOVHreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t)) && isSigned(t) => x
   806  (MOVHZreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t)) && !isSigned(t) => x
   807  (MOVWreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && isSigned(t) => x
   808  (MOVWZreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && !isSigned(t) => x
   809  
   810  (MOVBZreg (MOVDconst [c]))  => (MOVDconst [int64(uint8(c))])
   811  (MOVBreg (MOVDconst [c]))  => (MOVDconst [int64(int8(c))])
   812  (MOVHZreg (MOVDconst [c]))  => (MOVDconst [int64(uint16(c))])
   813  (MOVHreg (MOVDconst [c]))  => (MOVDconst [int64(int16(c))])
   814  (MOVWreg (MOVDconst [c])) => (MOVDconst [int64(int32(c))])
   815  (MOVWZreg (MOVDconst [c])) => (MOVDconst [int64(uint32(c))])
   816  
   817  // Implement clrsldi and clrslwi extended mnemonics as described in
   818  // ISA 3.0 section C.8. AuxInt field contains values needed for
   819  // the instructions, packed together since there is only one available.
   820  (SLDconst [c] z:(MOVBZreg x)) && c < 8 && z.Uses == 1 => (CLRLSLDI [newPPC64ShiftAuxInt(c,56,63,64)] x)
   821  (SLDconst [c] z:(MOVHZreg x)) && c < 16 && z.Uses == 1 => (CLRLSLDI [newPPC64ShiftAuxInt(c,48,63,64)] x)
   822  (SLDconst [c] z:(MOVWZreg x)) && c < 32 && z.Uses == 1 => (CLRLSLDI [newPPC64ShiftAuxInt(c,32,63,64)] x)
   823  
   824  (SLDconst [c] z:(Select0 (ANDCCconst [d] x))) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (64-getPPC64ShiftMaskLength(d)) => (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x)
   825  (SLDconst [c] z:(AND (MOVDconst [d]) x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(64-getPPC64ShiftMaskLength(d)) => (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x)
   826  (SLWconst [c] z:(MOVBZreg x)) && z.Uses == 1 && c < 8 => (CLRLSLWI [newPPC64ShiftAuxInt(c,24,31,32)] x)
   827  (SLWconst [c] z:(MOVHZreg x)) && z.Uses == 1 && c < 16 => (CLRLSLWI [newPPC64ShiftAuxInt(c,16,31,32)] x)
   828  (SLWconst [c] z:(Select0 (ANDCCconst [d] x))) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d)) => (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x)
   829  (SLWconst [c] z:(AND (MOVDconst [d]) x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d)) => (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x)
   830  // special case for power9
   831  (SL(W|D)const [c] z:(MOVWreg x)) && c < 32 && buildcfg.GOPPC64 >= 9 => (EXTSWSLconst [c] x)
   832  
   833  // Lose widening ops fed to stores
   834  (MOVBstore [off] {sym} ptr (MOV(B|BZ|H|HZ|W|WZ)reg x) mem) => (MOVBstore [off] {sym} ptr x mem)
   835  (MOVHstore [off] {sym} ptr (MOV(H|HZ|W|WZ)reg x) mem) => (MOVHstore [off] {sym} ptr x mem)
   836  (MOVWstore [off] {sym} ptr (MOV(W|WZ)reg x) mem) => (MOVWstore [off] {sym} ptr x mem)
   837  (MOVBstore [off] {sym} ptr (SRWconst (MOV(H|HZ)reg x) [c]) mem) && c <= 8 => (MOVBstore [off] {sym} ptr (SRWconst <typ.UInt32> x [c]) mem)
   838  (MOVBstore [off] {sym} ptr (SRWconst (MOV(W|WZ)reg x) [c]) mem) && c <= 24 => (MOVBstore [off] {sym} ptr (SRWconst <typ.UInt32> x [c]) mem)
   839  (MOVBstoreidx ptr idx (MOV(B|BZ|H|HZ|W|WZ)reg x) mem) => (MOVBstoreidx ptr idx x mem)
   840  (MOVHstoreidx ptr idx (MOV(H|HZ|W|WZ)reg x) mem) => (MOVHstoreidx ptr idx x mem)
   841  (MOVWstoreidx ptr idx (MOV(W|WZ)reg x) mem) => (MOVWstoreidx ptr idx x mem)
   842  (MOVBstoreidx ptr idx (SRWconst (MOV(H|HZ)reg x) [c]) mem) && c <= 8 => (MOVBstoreidx ptr idx (SRWconst <typ.UInt32> x [c]) mem)
   843  (MOVBstoreidx ptr idx (SRWconst (MOV(W|WZ)reg x) [c]) mem) && c <= 24 => (MOVBstoreidx ptr idx (SRWconst <typ.UInt32> x [c]) mem)
   844  (MOVHBRstore {sym} ptr (MOV(H|HZ|W|WZ)reg x) mem) => (MOVHBRstore {sym} ptr x mem)
   845  (MOVWBRstore {sym} ptr (MOV(W|WZ)reg x) mem) => (MOVWBRstore {sym} ptr x mem)
   846  
   847  // Lose W-widening ops fed to compare-W
   848  (CMP(W|WU) x (MOV(W|WZ)reg y)) => (CMP(W|WU) x y)
   849  (CMP(W|WU) (MOV(W|WZ)reg x) y) => (CMP(W|WU) x y)
   850  
   851  (CMP x (MOVDconst [c])) && is16Bit(c) => (CMPconst x [c])
   852  (CMP (MOVDconst [c]) y) && is16Bit(c) => (InvertFlags (CMPconst y [c]))
   853  (CMPW x (MOVDconst [c])) && is16Bit(c) => (CMPWconst x [int32(c)])
   854  (CMPW (MOVDconst [c]) y) && is16Bit(c) => (InvertFlags (CMPWconst y [int32(c)]))
   855  
   856  (CMPU x (MOVDconst [c])) && isU16Bit(c) => (CMPUconst x [c])
   857  (CMPU (MOVDconst [c]) y) && isU16Bit(c) => (InvertFlags (CMPUconst y [c]))
   858  (CMPWU x (MOVDconst [c])) && isU16Bit(c) => (CMPWUconst x [int32(c)])
   859  (CMPWU (MOVDconst [c]) y) && isU16Bit(c) => (InvertFlags (CMPWUconst y [int32(c)]))
   860  
   861  // Canonicalize the order of arguments to comparisons - helps with CSE.
   862  ((CMP|CMPW|CMPU|CMPWU) x y) && canonLessThan(x,y) => (InvertFlags ((CMP|CMPW|CMPU|CMPWU) y x))
   863  
   864  // ISEL auxInt values 0=LT 1=GT 2=EQ   arg2 ? arg0 : arg1
   865  // ISEL auxInt values 4=GE 5=LE 6=NE   !arg2 ? arg1 : arg0
   866  // ISELB special case where arg0, arg1 values are 0, 1
   867  
   868  (Equal cmp) => (ISELB [2] (MOVDconst [1]) cmp)
   869  (NotEqual cmp) => (ISELB [6] (MOVDconst [1]) cmp)
   870  (LessThan cmp) => (ISELB [0] (MOVDconst [1]) cmp)
   871  (FLessThan cmp) => (ISELB [0] (MOVDconst [1]) cmp)
   872  (FLessEqual cmp) => (ISEL [2] (MOVDconst [1]) (ISELB [0] (MOVDconst [1]) cmp) cmp)
   873  (GreaterEqual cmp) => (ISELB [4] (MOVDconst [1]) cmp)
   874  (GreaterThan cmp) => (ISELB [1] (MOVDconst [1]) cmp)
   875  (FGreaterThan cmp) => (ISELB [1] (MOVDconst [1]) cmp)
   876  (FGreaterEqual cmp) => (ISEL [2] (MOVDconst [1]) (ISELB [1] (MOVDconst [1]) cmp) cmp)
   877  (LessEqual cmp) => (ISELB [5] (MOVDconst [1]) cmp)
   878  
   879  (ISELB [0] _ (FlagLT)) => (MOVDconst [1])
   880  (ISELB [0] _ (Flag(GT|EQ))) => (MOVDconst [0])
   881  (ISELB [1] _ (FlagGT)) => (MOVDconst [1])
   882  (ISELB [1] _ (Flag(LT|EQ))) => (MOVDconst [0])
   883  (ISELB [2] _ (FlagEQ)) => (MOVDconst [1])
   884  (ISELB [2] _ (Flag(LT|GT))) => (MOVDconst [0])
   885  (ISELB [4] _ (FlagLT)) => (MOVDconst [0])
   886  (ISELB [4] _ (Flag(GT|EQ))) => (MOVDconst [1])
   887  (ISELB [5] _ (FlagGT)) => (MOVDconst [0])
   888  (ISELB [5] _ (Flag(LT|EQ))) => (MOVDconst [1])
   889  (ISELB [6] _ (FlagEQ)) => (MOVDconst [0])
   890  (ISELB [6] _ (Flag(LT|GT))) => (MOVDconst [1])
   891  
   892  (ISEL [2] x _ (FlagEQ)) => x
   893  (ISEL [2] _ y (Flag(LT|GT))) => y
   894  
   895  (ISEL [6] _ y (FlagEQ)) => y
   896  (ISEL [6] x _ (Flag(LT|GT))) => x
   897  
   898  (ISEL [0] _ y (Flag(EQ|GT))) => y
   899  (ISEL [0] x _ (FlagLT)) => x
   900  
   901  (ISEL [5] _ x (Flag(EQ|LT))) => x
   902  (ISEL [5] y _ (FlagGT)) => y
   903  
   904  (ISEL [1] _ y (Flag(EQ|LT))) => y
   905  (ISEL [1] x _ (FlagGT)) => x
   906  
   907  (ISEL [4] x _ (Flag(EQ|GT))) => x
   908  (ISEL [4] _ y (FlagLT)) => y
   909  
   910  (ISEL [2] x y ((CMP|CMPW)const [0] (Select0 (ANDCCconst [n] z)))) => (ISEL [2] x y (Select1 <types.TypeFlags> (ANDCCconst [n] z )))
   911  (ISEL [6] x y ((CMP|CMPW)const [0] (Select0 (ANDCCconst [n] z)))) => (ISEL [6] x y (Select1 <types.TypeFlags> (ANDCCconst [n] z )))
   912  (ISELB [2] x ((CMP|CMPW)const [0] (Select0 (ANDCCconst [1] z)))) => (XORconst [1] (Select0 <typ.UInt64> (ANDCCconst [1] z )))
   913  (ISELB [6] x ((CMP|CMPW)const [0] (Select0 (ANDCCconst [1] z)))) => (Select0 <typ.UInt64> (ANDCCconst [1] z ))
   914  
   915  (ISELB [2] x (CMPWconst [0] (Select0 (ANDCCconst [n] z)))) => (ISELB [2] x (Select1 <types.TypeFlags> (ANDCCconst [n] z )))
   916  (ISELB [6] x (CMPWconst [0] (Select0 (ANDCCconst [n] z)))) => (ISELB [6] x (Select1 <types.TypeFlags> (ANDCCconst [n] z )))
   917  
   918  // Only CMPconst for these in case AND|OR|XOR result is > 32 bits
   919  (ISELB [2] x (CMPconst [0] a:(AND y z))) && a.Uses == 1 => (ISELB [2] x (Select1 <types.TypeFlags> (ANDCC y z )))
   920  (ISELB [6] x (CMPconst [0] a:(AND y z))) && a.Uses == 1 => (ISELB [6] x (Select1 <types.TypeFlags> (ANDCC y z )))
   921  
   922  (ISELB [2] x (CMPconst [0] o:(OR y z))) && o.Uses == 1 => (ISELB [2] x (Select1 <types.TypeFlags> (ORCC y z )))
   923  (ISELB [6] x (CMPconst [0] o:(OR y z))) && o.Uses == 1 => (ISELB [6] x (Select1 <types.TypeFlags> (ORCC y z )))
   924  
   925  (ISELB [2] x (CMPconst [0] a:(XOR y z))) && a.Uses == 1 => (ISELB [2] x (Select1 <types.TypeFlags> (XORCC y z )))
   926  (ISELB [6] x (CMPconst [0] a:(XOR y z))) && a.Uses == 1 => (ISELB [6] x (Select1 <types.TypeFlags> (XORCC y z )))
   927  
   928  (ISELB [n] (MOVDconst [1]) (InvertFlags bool)) && n%4 == 0 => (ISELB [n+1] (MOVDconst [1]) bool)
   929  (ISELB [n] (MOVDconst [1]) (InvertFlags bool)) && n%4 == 1 => (ISELB [n-1] (MOVDconst [1]) bool)
   930  (ISELB [n] (MOVDconst [1]) (InvertFlags bool)) && n%4 == 2 => (ISELB [n] (MOVDconst [1]) bool)
   931  (ISEL [n] x y (InvertFlags bool)) && n%4 == 0 => (ISEL [n+1] x y bool)
   932  (ISEL [n] x y (InvertFlags bool)) && n%4 == 1 => (ISEL [n-1] x y bool)
   933  (ISEL [n] x y (InvertFlags bool)) && n%4 == 2 => (ISEL [n] x y bool)
   934  (XORconst [1] (ISELB [6] (MOVDconst [1]) cmp)) => (ISELB [2] (MOVDconst [1]) cmp)
   935  (XORconst [1] (ISELB [5] (MOVDconst [1]) cmp)) => (ISELB [1] (MOVDconst [1]) cmp)
   936  (XORconst [1] (ISELB [4] (MOVDconst [1]) cmp)) => (ISELB [0] (MOVDconst [1]) cmp)
   937  
   938  // A particular pattern seen in cgo code:
   939  (AND (MOVDconst [c]) x:(MOVBZload _ _)) => (Select0 (ANDCCconst [c&0xFF] x))
   940  
   941  // floating point negative abs
   942  (FNEG (F(ABS|NABS) x)) => (F(NABS|ABS) x)
   943  
   944  // floating-point fused multiply-add/sub
   945  (F(ADD|SUB) (FMUL x y) z) && x.Block.Func.useFMA(v) => (FM(ADD|SUB) x y z)
   946  (F(ADDS|SUBS) (FMULS x y) z) && x.Block.Func.useFMA(v) => (FM(ADDS|SUBS) x y z)
   947  
   948  // The following statements are found in encoding/binary functions UintXX (load) and PutUintXX (store)
   949  // and convert the statements in these functions from multiple single byte loads or stores to
   950  // the single largest possible load or store.
   951  // Some are marked big or little endian based on the order in which the bytes are loaded or stored,
   952  // not on the ordering of the machine. These are intended for little endian machines.
   953  // To implement for big endian machines, most rules would have to be duplicated but the
   954  // resulting rule would be reversed, i. e., MOVHZload on little endian would be MOVHBRload on big endian
   955  // and vice versa.
   956  // b[0] | b[1]<<8 => load 16-bit Little endian
   957  (OR <t> x0:(MOVBZload [i0] {s} p mem)
   958  	o1:(SL(W|D)const x1:(MOVBZload [i1] {s} p mem) [8]))
   959  	&& !config.BigEndian
   960  	&& i1 == i0+1
   961  	&& x0.Uses ==1 && x1.Uses == 1
   962  	&& o1.Uses == 1
   963  	&& mergePoint(b, x0, x1) != nil
   964  	&& clobber(x0, x1, o1)
   965  	 => @mergePoint(b,x0,x1) (MOVHZload <t> {s} [i0] p mem)
   966  
   967  // b[0]<<8 | b[1] => load 16-bit Big endian on Little endian arch.
   968  // Use byte-reverse indexed load for 2 bytes.
   969  (OR <t> x0:(MOVBZload [i1] {s} p mem)
   970  	o1:(SL(W|D)const x1:(MOVBZload [i0] {s} p mem) [8]))
   971  	&& !config.BigEndian
   972  	&& i1 == i0+1
   973  	&& x0.Uses ==1 && x1.Uses == 1
   974  	&& o1.Uses == 1
   975  	&& mergePoint(b, x0, x1) != nil
   976  	&& clobber(x0, x1, o1)
   977  	  => @mergePoint(b,x0,x1) (MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
   978  
   979  // b[0]<<n+8 | b[1]<<n => load 16-bit Big endian (where n%8== 0)
   980  // Use byte-reverse indexed load for 2 bytes,
   981  // then shift left to the correct position. Used to match subrules
   982  // from longer rules.
   983  (OR <t> s0:(SL(W|D)const x0:(MOVBZload [i1] {s} p mem) [n1])
   984  	s1:(SL(W|D)const x1:(MOVBZload [i0] {s} p mem) [n2]))
   985  	&& !config.BigEndian
   986  	&& i1 == i0+1
   987  	&& n1%8 == 0
   988  	&& n2 == n1+8
   989  	&& x0.Uses == 1 && x1.Uses == 1
   990  	&& s0.Uses == 1 && s1.Uses == 1
   991  	&& mergePoint(b, x0, x1) != nil
   992  	&& clobber(x0, x1, s0, s1)
   993  	  => @mergePoint(b,x0,x1) (SLDconst <t> (MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [n1])
   994  
   995  // b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 => load 32-bit Little endian
   996  // Use byte-reverse indexed load for 4 bytes.
   997  (OR <t> s1:(SL(W|D)const x2:(MOVBZload [i3] {s} p mem) [24])
   998  	o0:(OR <t> s0:(SL(W|D)const x1:(MOVBZload [i2] {s} p mem) [16])
   999  	x0:(MOVHZload [i0] {s} p mem)))
  1000  	&& !config.BigEndian
  1001  	&& i2 == i0+2
  1002  	&& i3 == i0+3
  1003  	&& x0.Uses ==1 && x1.Uses == 1 && x2.Uses == 1
  1004  	&& o0.Uses == 1
  1005  	&& s0.Uses == 1 && s1.Uses == 1
  1006  	&& mergePoint(b, x0, x1, x2) != nil
  1007  	&& clobber(x0, x1, x2, s0, s1, o0)
  1008  	 => @mergePoint(b,x0,x1,x2) (MOVWZload <t> {s} [i0] p mem)
  1009  
  1010  // b[0]<<24 | b[1]<<16 | b[2]<<8 | b[3] => load 32-bit Big endian order on Little endian arch
  1011  // Use byte-reverse indexed load for 4 bytes with computed address.
  1012  // Could be used to match subrules of a longer rule.
  1013  (OR <t> s1:(SL(W|D)const x2:(MOVBZload [i0] {s} p mem) [24])
  1014  	o0:(OR <t> s0:(SL(W|D)const x1:(MOVBZload [i1] {s} p mem) [16])
  1015  	x0:(MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i2] {s} p) mem)))
  1016  	&& !config.BigEndian
  1017  	&& i1 == i0+1
  1018  	&& i2 == i0+2
  1019  	&& x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
  1020  	&& o0.Uses == 1
  1021  	&& s0.Uses == 1 && s1.Uses == 1
  1022  	&& mergePoint(b, x0, x1, x2) != nil
  1023  	&& clobber(x0, x1, x2, s0, s1, o0)
  1024  	  => @mergePoint(b,x0,x1,x2) (MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
  1025  
  1026  // b[3] | b[2]<<8 | b[1]<<16 | b[0]<<24 => load 32-bit Big endian order on Little endian arch
  1027  // Use byte-reverse indexed load for 4 bytes with computed address.
  1028  // Could be used to match subrules of a longer rule.
  1029  (OR <t> x0:(MOVBZload [i3] {s} p mem)
  1030  	o0:(OR <t> s0:(SL(W|D)const x1:(MOVBZload [i2] {s} p mem) [8])
  1031  	s1:(SL(W|D)const x2:(MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [16])))
  1032  	&& !config.BigEndian
  1033  	&& i2 == i0+2
  1034  	&& i3 == i0+3
  1035  	&& x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
  1036  	&& o0.Uses == 1
  1037  	&& s0.Uses == 1 && s1.Uses == 1
  1038  	&& mergePoint(b, x0, x1, x2) != nil
  1039  	&& clobber(x0, x1, x2, s0, s1, o0)
  1040  	  => @mergePoint(b,x0,x1,x2) (MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
  1041  
  1042  // b[0]<<56 | b[1]<<48 | b[2]<<40 | b[3]<<32 => load 32-bit Big endian order on Little endian arch
  1043  // Use byte-reverse indexed load to for 4 bytes with computed address.
  1044  // Used to match longer rules.
  1045  (OR <t> s2:(SLDconst x2:(MOVBZload [i3] {s} p mem) [32])
  1046  	o0:(OR <t> s1:(SLDconst x1:(MOVBZload [i2] {s} p mem) [40])
  1047  	s0:(SLDconst x0:(MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [48])))
  1048  	&& !config.BigEndian
  1049  	&& i2 == i0+2
  1050  	&& i3 == i0+3
  1051  	&& x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
  1052  	&& o0.Uses == 1
  1053  	&& s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1
  1054  	&& mergePoint(b, x0, x1, x2) != nil
  1055  	&& clobber(x0, x1, x2, s0, s1, s2, o0)
  1056  	  => @mergePoint(b,x0,x1,x2) (SLDconst <t> (MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [32])
  1057  
  1058  // b[3]<<32 | b[2]<<40 | b[1]<<48 | b[0]<<56 => load 32-bit Big endian order on Little endian arch
  1059  // Use byte-reverse indexed load for 4 bytes with constant address.
  1060  // Used to match longer rules.
  1061  (OR <t> s2:(SLDconst x2:(MOVBZload [i0] {s} p mem) [56])
  1062          o0:(OR <t> s1:(SLDconst x1:(MOVBZload [i1] {s} p mem) [48])
  1063          s0:(SLDconst x0:(MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i2] {s} p) mem) [32])))
  1064          && !config.BigEndian
  1065          && i1 == i0+1
  1066          && i2 == i0+2
  1067          && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
  1068          && o0.Uses == 1
  1069          && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1
  1070          && mergePoint(b, x0, x1, x2) != nil
  1071          && clobber(x0, x1, x2, s0, s1, s2, o0)
  1072            => @mergePoint(b,x0,x1,x2) (SLDconst <t> (MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [32])
  1073  
  1074  // b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 | b[4] <<32 | b[5]<<40 | b[6]<<48 | b[7]<<56 => load 64-bit Little endian
  1075  // Rules with commutative ops and many operands will result in extremely large functions in rewritePPC64,
  1076  // so matching shorter previously defined subrules is important.
  1077  // Offset must be multiple of 4 for MOVD
  1078  (OR <t> s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])
  1079  	o5:(OR <t> s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48])
  1080  	o4:(OR <t> s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])
  1081  	o3:(OR <t> s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])
  1082  	x0:(MOVWZload {s} [i0] p mem)))))
  1083  	&& !config.BigEndian
  1084  	&& i4 == i0+4
  1085  	&& i5 == i0+5
  1086  	&& i6 == i0+6
  1087  	&& i7 == i0+7
  1088  	&& x0.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1
  1089  	&& o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1
  1090  	&& s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1
  1091  	&& mergePoint(b, x0, x4, x5, x6, x7) != nil
  1092  	&& clobber(x0, x4, x5, x6, x7, s3, s4, s5, s6, o3, o4, o5)
  1093  	  => @mergePoint(b,x0,x4,x5,x6,x7) (MOVDload <t> {s} [i0] p mem)
  1094  
  1095  // b[7] | b[6]<<8 | b[5]<<16 | b[4]<<24 | b[3]<<32 | b[2]<<40 | b[1]<<48 | b[0]<<56 load 64-bit Big endian ordered bytes on Little endian arch
  1096  // Use byte-reverse indexed load of 8 bytes.
  1097  // Rules with commutative ops and many operands can result in extremely large functions in rewritePPC64,
  1098  // so matching shorter previously defined subrules is important.
  1099  (OR <t> s0:(SLDconst x0:(MOVBZload [i0] {s} p mem) [56])
  1100  	o0:(OR <t> s1:(SLDconst x1:(MOVBZload [i1] {s} p mem) [48])
  1101  	o1:(OR <t> s2:(SLDconst x2:(MOVBZload [i2] {s} p mem) [40])
  1102  	o2:(OR <t> s3:(SLDconst x3:(MOVBZload [i3] {s} p mem) [32])
  1103  	x4:(MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i4] p) mem)))))
  1104  	&& !config.BigEndian
  1105  	&& i1 == i0+1
  1106  	&& i2 == i0+2
  1107  	&& i3 == i0+3
  1108  	&& i4 == i0+4
  1109  	&& x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1
  1110  	&& o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1
  1111  	&& s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1
  1112  	&& mergePoint(b, x0, x1, x2, x3, x4) != nil
  1113  	&& clobber(x0, x1, x2, x3, x4, o0, o1, o2, s0, s1, s2, s3)
  1114  	  => @mergePoint(b,x0,x1,x2,x3,x4) (MOVDBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
  1115  
  1116  // b[0]<<56 | b[1]<<48 | b[2]<<40 | b[3]<<32 | b[4]<<24 | b[5]<<16 | b[6]<<8 | b[7] => load 64-bit Big endian ordered bytes on Little endian arch
  1117  // Use byte-reverse indexed load of 8 bytes.
  1118  // Rules with commutative ops and many operands can result in extremely large functions in rewritePPC64,
  1119  // so matching shorter previously defined subrules is important.
  1120  (OR <t> x7:(MOVBZload [i7] {s} p mem)
  1121  	o5:(OR <t> s6:(SLDconst x6:(MOVBZload [i6] {s} p mem) [8])
  1122  	o4:(OR <t> s5:(SLDconst x5:(MOVBZload [i5] {s} p mem) [16])
  1123  	o3:(OR <t> s4:(SLDconst x4:(MOVBZload [i4] {s} p mem) [24])
  1124  	s0:(SL(W|D)const x3:(MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [32])))))
  1125  	&& !config.BigEndian
  1126  	&& i4 == i0+4
  1127  	&& i5 == i0+5
  1128  	&& i6 == i0+6
  1129  	&& i7 == i0+7
  1130  	&& x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1
  1131  	&& o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1
  1132  	&& s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1
  1133  	&& mergePoint(b, x3, x4, x5, x6, x7) != nil
  1134  	&& clobber(x3, x4, x5, x6, x7, o3, o4, o5, s0, s4, s5, s6)
  1135  	=> @mergePoint(b,x3,x4,x5,x6,x7) (MOVDBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
  1136  
  1137  // 2 byte store Little endian as in:
  1138  //      b[0] = byte(v >> 16)
  1139  //      b[1] = byte(v >> 24)
  1140  // Added for use in matching longer rules.
  1141  (MOVBstore [i1] {s} p (SR(W|D)const w [24])
  1142          x0:(MOVBstore [i0] {s} p (SR(W|D)const w [16]) mem))
  1143          && !config.BigEndian
  1144          && x0.Uses == 1
  1145          && i1 == i0+1
  1146          && clobber(x0)
  1147            => (MOVHstore [i0] {s} p (SRWconst <typ.UInt16> w [16]) mem)
  1148  
  1149  // 2 byte store Little endian as in:
  1150  //      b[0] = byte(v)
  1151  //      b[1] = byte(v >> 8)
  1152  (MOVBstore [i1] {s} p (SR(W|D)const w [8])
  1153  	x0:(MOVBstore [i0] {s} p w mem))
  1154  	&& !config.BigEndian
  1155  	&& x0.Uses == 1
  1156  	&& i1 == i0+1
  1157  	&& clobber(x0)
  1158  	  => (MOVHstore [i0] {s} p w mem)
  1159  
  1160  // 4 byte store Little endian as in:
  1161  //     b[0:1] = uint16(v)
  1162  //     b[2:3] = uint16(v >> 16)
  1163  (MOVHstore [i1] {s} p (SR(W|D)const w [16])
  1164  	x0:(MOVHstore [i0] {s} p w mem))
  1165  	&& !config.BigEndian
  1166  	&& x0.Uses == 1
  1167  	&& i1 == i0+2
  1168  	&& clobber(x0)
  1169  	  => (MOVWstore [i0] {s} p w mem)
  1170  
  1171  // 4 byte store Big endian as in:
  1172  //     b[0] = byte(v >> 24)
  1173  //     b[1] = byte(v >> 16)
  1174  //     b[2] = byte(v >> 8)
  1175  //     b[3] = byte(v)
  1176  // Use byte-reverse indexed 4 byte store.
  1177  (MOVBstore [i3] {s} p w
  1178  	x0:(MOVBstore [i2] {s} p (SRWconst w [8])
  1179  	x1:(MOVBstore [i1] {s} p (SRWconst w [16])
  1180  	x2:(MOVBstore [i0] {s} p (SRWconst w [24]) mem))))
  1181  	&& !config.BigEndian
  1182  	&& x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
  1183  	&& i1 == i0+1 && i2 == i0+2 && i3 == i0+3
  1184  	&& clobber(x0, x1, x2)
  1185  	  => (MOVWBRstore (MOVDaddr <typ.Uintptr> [i0] {s} p) w mem)
  1186  
  1187  // The 2 byte store appears after the 4 byte store so that the
  1188  // match for the 2 byte store is not done first.
  1189  // If the 4 byte store is based on the 2 byte store then there are
  1190  // variations on the MOVDaddr subrule that would require additional
  1191  // rules to be written.
  1192  
  1193  // 2 byte store Big endian as in:
  1194  //      b[0] = byte(v >> 8)
  1195  //      b[1] = byte(v)
  1196  (MOVBstore [i1] {s} p w x0:(MOVBstore [i0] {s} p (SRWconst w [8]) mem))
  1197  	&& !config.BigEndian
  1198  	&& x0.Uses == 1
  1199  	&& i1 == i0+1
  1200  	&& clobber(x0)
  1201  	  => (MOVHBRstore (MOVDaddr <typ.Uintptr> [i0] {s} p) w mem)
  1202  
  1203  // 8 byte store Little endian as in:
  1204  //	b[0] = byte(v)
  1205  //	b[1] = byte(v >> 8)
  1206  //	b[2] = byte(v >> 16)
  1207  //	b[3] = byte(v >> 24)
  1208  //	b[4] = byte(v >> 32)
  1209  //	b[5] = byte(v >> 40)
  1210  //	b[6] = byte(v >> 48)
  1211  //	b[7] = byte(v >> 56)
  1212  // Built on previously defined rules
  1213  // Offset must be multiple of 4 for MOVDstore
  1214  (MOVBstore [i7] {s} p (SRDconst w [56])
  1215  	x0:(MOVBstore [i6] {s} p (SRDconst w [48])
  1216  	x1:(MOVBstore [i5] {s} p (SRDconst w [40])
  1217  	x2:(MOVBstore [i4] {s} p (SRDconst w [32])
  1218  	x3:(MOVWstore [i0] {s} p w mem)))))
  1219  	&& !config.BigEndian
  1220  	&& x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1
  1221  	&& i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7
  1222  	&& clobber(x0, x1, x2, x3)
  1223  	  => (MOVDstore [i0] {s} p w mem)
  1224  
  1225  // 8 byte store Big endian as in:
  1226  //      b[0] = byte(v >> 56)
  1227  //      b[1] = byte(v >> 48)
  1228  //      b[2] = byte(v >> 40)
  1229  //      b[3] = byte(v >> 32)
  1230  //      b[4] = byte(v >> 24)
  1231  //      b[5] = byte(v >> 16)
  1232  //      b[6] = byte(v >> 8)
  1233  //      b[7] = byte(v)
  1234  // Use byte-reverse indexed 8 byte store.
  1235  (MOVBstore [i7] {s} p w
  1236          x0:(MOVBstore [i6] {s} p (SRDconst w [8])
  1237          x1:(MOVBstore [i5] {s} p (SRDconst w [16])
  1238          x2:(MOVBstore [i4] {s} p (SRDconst w [24])
  1239          x3:(MOVBstore [i3] {s} p (SRDconst w [32])
  1240          x4:(MOVBstore [i2] {s} p (SRDconst w [40])
  1241          x5:(MOVBstore [i1] {s} p (SRDconst w [48])
  1242          x6:(MOVBstore [i0] {s} p (SRDconst w [56]) mem))))))))
  1243          && !config.BigEndian
  1244          && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1
  1245          && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7
  1246          && clobber(x0, x1, x2, x3, x4, x5, x6)
  1247            => (MOVDBRstore (MOVDaddr <typ.Uintptr> [i0] {s} p) w mem)
  1248  
  1249  // Arch-specific inlining for small or disjoint runtime.memmove
  1250  (SelectN [0] call:(CALLstatic {sym} s1:(MOVDstore _ (MOVDconst [sz]) s2:(MOVDstore  _ src s3:(MOVDstore {t} _ dst mem)))))
  1251          && sz >= 0
  1252          && isSameCall(sym, "runtime.memmove")
  1253          && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1
  1254          && isInlinableMemmove(dst, src, sz, config)
  1255          && clobber(s1, s2, s3, call)
  1256          => (Move [sz] dst src mem)
  1257  
  1258  // Match post-lowering calls, register version.
  1259  (SelectN [0] call:(CALLstatic {sym} dst src (MOVDconst [sz]) mem))
  1260          && sz >= 0
  1261          && isSameCall(sym, "runtime.memmove")
  1262          && call.Uses == 1
  1263          && isInlinableMemmove(dst, src, sz, config)
  1264          && clobber(call)
  1265          => (Move [sz] dst src mem)
  1266  
  1267  // Prefetch instructions (TH specified using aux field)
  1268  // For DCBT Ra,Rb,TH, A value of TH indicates:
  1269  //     0, hint this cache line will be used soon. (PrefetchCache)
  1270  //     16, hint this cache line will not be used for long. (PrefetchCacheStreamed)
  1271  // See ISA 3.0 Book II 4.3.2 for more detail. https://openpower.foundation/specifications/isa/
  1272  (PrefetchCache ptr mem)          => (DCBT ptr mem [0])
  1273  (PrefetchCacheStreamed ptr mem)  => (DCBT ptr mem [16])
  1274