github.com/freddyisaac/sicortex-golang@v0.0.0-20231019035217-e03519e66f60/src/cmd/compile/internal/ssa/gen/S390X.rules (about)

     1  // Copyright 2016 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Lowering arithmetic
     6  (Add64  x y) -> (ADD  x y)
     7  (AddPtr x y) -> (ADD  x y)
     8  (Add32  x y) -> (ADDW  x y)
     9  (Add16  x y) -> (ADDW  x y)
    10  (Add8   x y) -> (ADDW  x y)
    11  (Add32F x y) -> (FADDS x y)
    12  (Add64F x y) -> (FADD x y)
    13  
    14  (Sub64  x y) -> (SUB  x y)
    15  (SubPtr x y) -> (SUB  x y)
    16  (Sub32  x y) -> (SUBW  x y)
    17  (Sub16  x y) -> (SUBW  x y)
    18  (Sub8   x y) -> (SUBW  x y)
    19  (Sub32F x y) -> (FSUBS x y)
    20  (Sub64F x y) -> (FSUB x y)
    21  
    22  (Mul64  x y) -> (MULLD  x y)
    23  (Mul32  x y) -> (MULLW  x y)
    24  (Mul16  x y) -> (MULLW  x y)
    25  (Mul8   x y) -> (MULLW  x y)
    26  (Mul32F x y) -> (FMULS x y)
    27  (Mul64F x y) -> (FMUL x y)
    28  
    29  (Div32F x y) -> (FDIVS x y)
    30  (Div64F x y) -> (FDIV x y)
    31  
    32  (Div64  x y) -> (DIVD  x y)
    33  (Div64u x y) -> (DIVDU x y)
    34  // DIVW/DIVWU has a 64-bit dividend and a 32-bit divisor,
    35  // so a sign/zero extension of the dividend is required.
    36  (Div32  x y) -> (DIVW  (MOVWreg x) y)
    37  (Div32u x y) -> (DIVWU (MOVWZreg x) y)
    38  (Div16  x y) -> (DIVW  (MOVHreg x) (MOVHreg y))
    39  (Div16u x y) -> (DIVWU (MOVHZreg x) (MOVHZreg y))
    40  (Div8   x y) -> (DIVW  (MOVBreg x) (MOVBreg y))
    41  (Div8u  x y) -> (DIVWU (MOVBZreg x) (MOVBZreg y))
    42  
    43  (Hmul64  x y) -> (MULHD  x y)
    44  (Hmul64u x y) -> (MULHDU x y)
    45  (Hmul32  x y) -> (SRDconst [32] (MULLD (MOVWreg x) (MOVWreg y)))
    46  (Hmul32u x y) -> (SRDconst [32] (MULLD (MOVWZreg x) (MOVWZreg y)))
    47  (Hmul16  x y) -> (SRDconst [16] (MULLW (MOVHreg x) (MOVHreg y)))
    48  (Hmul16u x y) -> (SRDconst [16] (MULLW (MOVHZreg x) (MOVHZreg y)))
    49  (Hmul8   x y) -> (SRDconst [8] (MULLW (MOVBreg x) (MOVBreg y)))
    50  (Hmul8u  x y) -> (SRDconst [8] (MULLW (MOVBZreg x) (MOVBZreg y)))
    51  
    52  (Mod64  x y) -> (MODD  x y)
    53  (Mod64u x y) -> (MODDU x y)
    54  // MODW/MODWU has a 64-bit dividend and a 32-bit divisor,
    55  // so a sign/zero extension of the dividend is required.
    56  (Mod32  x y) -> (MODW  (MOVWreg x) y)
    57  (Mod32u x y) -> (MODWU (MOVWZreg x) y)
    58  (Mod16  x y) -> (MODW  (MOVHreg x) (MOVHreg y))
    59  (Mod16u x y) -> (MODWU (MOVHZreg x) (MOVHZreg y))
    60  (Mod8   x y) -> (MODW  (MOVBreg x) (MOVBreg y))
    61  (Mod8u  x y) -> (MODWU (MOVBZreg x) (MOVBZreg y))
    62  
    63  (Avg64u <t> x y) -> (ADD (ADD <t> (SRDconst <t> x [1]) (SRDconst <t> y [1])) (ANDconst <t> (AND <t> x y) [1]))
    64  
    65  (And64 x y) -> (AND x y)
    66  (And32 x y) -> (ANDW x y)
    67  (And16 x y) -> (ANDW x y)
    68  (And8  x y) -> (ANDW x y)
    69  
    70  (Or64 x y) -> (OR x y)
    71  (Or32 x y) -> (ORW x y)
    72  (Or16 x y) -> (ORW x y)
    73  (Or8  x y) -> (ORW x y)
    74  
    75  (Xor64 x y) -> (XOR x y)
    76  (Xor32 x y) -> (XORW x y)
    77  (Xor16 x y) -> (XORW x y)
    78  (Xor8  x y) -> (XORW x y)
    79  
    80  (Neg64  x) -> (NEG x)
    81  (Neg32  x) -> (NEGW x)
    82  (Neg16  x) -> (NEGW (MOVHreg x))
    83  (Neg8   x) -> (NEGW (MOVBreg x))
    84  (Neg32F x) -> (FNEGS x)
    85  (Neg64F x) -> (FNEG x)
    86  
    87  (Com64 x) -> (NOT x)
    88  (Com32 x) -> (NOTW x)
    89  (Com16 x) -> (NOTW x)
    90  (Com8  x) -> (NOTW x)
    91  (NOT x) && true -> (XOR (MOVDconst [-1]) x)
    92  (NOTW x) && true -> (XORWconst [-1] x)
    93  
    94  // Lowering boolean ops
    95  (AndB x y) -> (ANDW x y)
    96  (OrB x y) -> (ORW x y)
    97  (Not x) -> (XORWconst [1] x)
    98  
    99  // Lowering pointer arithmetic
   100  (OffPtr [off] ptr:(SP)) -> (MOVDaddr [off] ptr)
   101  (OffPtr [off] ptr) && is32Bit(off) -> (ADDconst [off] ptr)
   102  (OffPtr [off] ptr) -> (ADD (MOVDconst [off]) ptr)
   103  
   104  // Ctz(x) = 64 - findLeftmostOne((x-1)&^x)
   105  (Ctz64 <t> x) -> (SUB (MOVDconst [64]) (FLOGR (AND <t> (SUBconst <t> [1] x) (NOT <t> x))))
   106  (Ctz32 <t> x) -> (SUB (MOVDconst [64]) (FLOGR (MOVWZreg (ANDW <t> (SUBWconst <t> [1] x) (NOTW <t> x)))))
   107  
   108  (Bswap64 x) -> (MOVDBR x)
   109  (Bswap32 x) -> (MOVWBR x)
   110  
   111  (Sqrt x) -> (FSQRT x)
   112  
   113  // Atomic loads.
   114  (AtomicLoad32 ptr mem) -> (MOVWZatomicload ptr mem)
   115  (AtomicLoad64 ptr mem) -> (MOVDatomicload ptr mem)
   116  (AtomicLoadPtr ptr mem) -> (MOVDatomicload ptr mem)
   117  
   118  // Atomic stores.
   119  (AtomicStore32 ptr val mem) -> (MOVWatomicstore ptr val mem)
   120  (AtomicStore64 ptr val mem) -> (MOVDatomicstore ptr val mem)
   121  (AtomicStorePtrNoWB ptr val mem) -> (MOVDatomicstore ptr val mem)
   122  
   123  // Atomic adds.
   124  (AtomicAdd32 ptr val mem) -> (AddTupleFirst32 (LAA ptr val mem) val)
   125  (AtomicAdd64 ptr val mem) -> (AddTupleFirst64 (LAAG ptr val mem) val)
   126  (Select0 <t> (AddTupleFirst32 tuple val)) -> (ADDW val (Select0 <t> tuple))
   127  (Select1     (AddTupleFirst32 tuple _  )) -> (Select1 tuple)
   128  (Select0 <t> (AddTupleFirst64 tuple val)) -> (ADD val (Select0 <t> tuple))
   129  (Select1     (AddTupleFirst64 tuple _  )) -> (Select1 tuple)
   130  
   131  // Atomic exchanges.
   132  (AtomicExchange32 ptr val mem) -> (LoweredAtomicExchange32 ptr val mem)
   133  (AtomicExchange64 ptr val mem) -> (LoweredAtomicExchange64 ptr val mem)
   134  
   135  // Atomic compare and swap.
   136  (AtomicCompareAndSwap32 ptr old new_ mem) -> (LoweredAtomicCas32 ptr old new_ mem)
   137  (AtomicCompareAndSwap64 ptr old new_ mem) -> (LoweredAtomicCas64 ptr old new_ mem)
   138  
   139  // Lowering extension
   140  // Note: we always extend to 64 bits even though some ops don't need that many result bits.
   141  (SignExt8to16  x) -> (MOVBreg x)
   142  (SignExt8to32  x) -> (MOVBreg x)
   143  (SignExt8to64  x) -> (MOVBreg x)
   144  (SignExt16to32 x) -> (MOVHreg x)
   145  (SignExt16to64 x) -> (MOVHreg x)
   146  (SignExt32to64 x) -> (MOVWreg x)
   147  
   148  (ZeroExt8to16  x) -> (MOVBZreg x)
   149  (ZeroExt8to32  x) -> (MOVBZreg x)
   150  (ZeroExt8to64  x) -> (MOVBZreg x)
   151  (ZeroExt16to32 x) -> (MOVHZreg x)
   152  (ZeroExt16to64 x) -> (MOVHZreg x)
   153  (ZeroExt32to64 x) -> (MOVWZreg x)
   154  
   155  (Slicemask <t> x) -> (XOR (MOVDconst [-1]) (SRADconst <t> (SUBconst <t> x [1]) [63]))
   156  
   157  // Lowering truncation
   158  // Because we ignore high parts of registers, truncates are just copies.
   159  (Trunc16to8  x) -> x
   160  (Trunc32to8  x) -> x
   161  (Trunc32to16 x) -> x
   162  (Trunc64to8  x) -> x
   163  (Trunc64to16 x) -> x
   164  (Trunc64to32 x) -> x
   165  
   166  // Lowering float <-> int
   167  (Cvt32to32F x) -> (CEFBRA x)
   168  (Cvt32to64F x) -> (CDFBRA x)
   169  (Cvt64to32F x) -> (CEGBRA x)
   170  (Cvt64to64F x) -> (CDGBRA x)
   171  
   172  (Cvt32Fto32 x) -> (CFEBRA x)
   173  (Cvt32Fto64 x) -> (CGEBRA x)
   174  (Cvt64Fto32 x) -> (CFDBRA x)
   175  (Cvt64Fto64 x) -> (CGDBRA x)
   176  
   177  (Cvt32Fto64F x) -> (LDEBR x)
   178  (Cvt64Fto32F x) -> (LEDBR x)
   179  
   180  // Lowering shifts
   181  // Unsigned shifts need to return 0 if shift amount is >= width of shifted value.
   182  //   result = (arg << shift) & (shift >= argbits ? 0 : 0xffffffffffffffff)
   183  (Lsh64x64 <t> x y) -> (AND (SLD <t> x y) (SUBEcarrymask <t> (CMPUconst y [63])))
   184  (Lsh64x32 <t> x y) -> (AND (SLD <t> x y) (SUBEcarrymask <t> (CMPWUconst y [63])))
   185  (Lsh64x16 <t> x y) -> (AND (SLD <t> x y) (SUBEcarrymask <t> (CMPWUconst (MOVHZreg y) [63])))
   186  (Lsh64x8  <t> x y) -> (AND (SLD <t> x y) (SUBEcarrymask <t> (CMPWUconst (MOVBZreg y) [63])))
   187  
   188  (Lsh32x64 <t> x y) -> (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPUconst y [31])))
   189  (Lsh32x32 <t> x y) -> (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst y [31])))
   190  (Lsh32x16 <t> x y) -> (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVHZreg y) [31])))
   191  (Lsh32x8  <t> x y) -> (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVBZreg y) [31])))
   192  
   193  (Lsh16x64 <t> x y) -> (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPUconst y [31])))
   194  (Lsh16x32 <t> x y) -> (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst y [31])))
   195  (Lsh16x16 <t> x y) -> (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVHZreg y) [31])))
   196  (Lsh16x8  <t> x y) -> (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVBZreg y) [31])))
   197  
   198  (Lsh8x64 <t> x y)  -> (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPUconst y [31])))
   199  (Lsh8x32 <t> x y)  -> (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst y [31])))
   200  (Lsh8x16 <t> x y)  -> (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVHZreg y) [31])))
   201  (Lsh8x8  <t> x y)  -> (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVBZreg y) [31])))
   202  
   203  (Lrot64 <t> x [c]) -> (RLLGconst <t> [c&63] x)
   204  (Lrot32 <t> x [c]) -> (RLLconst <t> [c&31] x)
   205  
   206  (Rsh64Ux64 <t> x y) -> (AND (SRD <t> x y) (SUBEcarrymask <t> (CMPUconst y [63])))
   207  (Rsh64Ux32 <t> x y) -> (AND (SRD <t> x y) (SUBEcarrymask <t> (CMPWUconst y [63])))
   208  (Rsh64Ux16 <t> x y) -> (AND (SRD <t> x y) (SUBEcarrymask <t> (CMPWUconst (MOVHZreg y) [63])))
   209  (Rsh64Ux8  <t> x y) -> (AND (SRD <t> x y) (SUBEcarrymask <t> (CMPWUconst (MOVBZreg y) [63])))
   210  
   211  (Rsh32Ux64 <t> x y) -> (ANDW (SRW <t> x y) (SUBEWcarrymask <t> (CMPUconst y [31])))
   212  (Rsh32Ux32 <t> x y) -> (ANDW (SRW <t> x y) (SUBEWcarrymask <t> (CMPWUconst y [31])))
   213  (Rsh32Ux16 <t> x y) -> (ANDW (SRW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVHZreg y) [31])))
   214  (Rsh32Ux8  <t> x y) -> (ANDW (SRW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVBZreg y) [31])))
   215  
   216  (Rsh16Ux64 <t> x y) -> (ANDW (SRW <t> (MOVHZreg x) y) (SUBEWcarrymask <t> (CMPUconst y [15])))
   217  (Rsh16Ux32 <t> x y) -> (ANDW (SRW <t> (MOVHZreg x) y) (SUBEWcarrymask <t> (CMPWUconst y [15])))
   218  (Rsh16Ux16 <t> x y) -> (ANDW (SRW <t> (MOVHZreg x) y) (SUBEWcarrymask <t> (CMPWUconst (MOVHZreg y) [15])))
   219  (Rsh16Ux8  <t> x y) -> (ANDW (SRW <t> (MOVHZreg x) y) (SUBEWcarrymask <t> (CMPWUconst (MOVBZreg y) [15])))
   220  
   221  (Rsh8Ux64 <t> x y)  -> (ANDW (SRW <t> (MOVBZreg x) y) (SUBEWcarrymask <t> (CMPUconst y [7])))
   222  (Rsh8Ux32 <t> x y)  -> (ANDW (SRW <t> (MOVBZreg x) y) (SUBEWcarrymask <t> (CMPWUconst y [7])))
   223  (Rsh8Ux16 <t> x y)  -> (ANDW (SRW <t> (MOVBZreg x) y) (SUBEWcarrymask <t> (CMPWUconst (MOVHZreg y) [7])))
   224  (Rsh8Ux8  <t> x y)  -> (ANDW (SRW <t> (MOVBZreg x) y) (SUBEWcarrymask <t> (CMPWUconst (MOVBZreg y) [7])))
   225  
   226  // Signed right shift needs to return 0/-1 if shift amount is >= width of shifted value.
   227  // We implement this by setting the shift value to -1 (all ones) if the shift value is >= width.
   228  (Rsh64x64 <t> x y) -> (SRAD <t> x (OR <y.Type> y (NOT <y.Type> (SUBEcarrymask <y.Type> (CMPUconst y [63])))))
   229  (Rsh64x32 <t> x y) -> (SRAD <t> x (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst y [63])))))
   230  (Rsh64x16 <t> x y) -> (SRAD <t> x (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVHZreg y) [63])))))
   231  (Rsh64x8  <t> x y) -> (SRAD <t> x (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVBZreg y) [63])))))
   232  
   233  (Rsh32x64 <t> x y) -> (SRAW <t> x (OR <y.Type> y (NOT <y.Type> (SUBEcarrymask <y.Type> (CMPUconst y [31])))))
   234  (Rsh32x32 <t> x y) -> (SRAW <t> x (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst y [31])))))
   235  (Rsh32x16 <t> x y) -> (SRAW <t> x (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVHZreg y) [31])))))
   236  (Rsh32x8  <t> x y) -> (SRAW <t> x (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVBZreg y) [31])))))
   237  
   238  (Rsh16x64 <t> x y) -> (SRAW <t> (MOVHreg x) (OR <y.Type> y (NOT <y.Type> (SUBEcarrymask <y.Type> (CMPUconst y [15])))))
   239  (Rsh16x32 <t> x y) -> (SRAW <t> (MOVHreg x) (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst y [15])))))
   240  (Rsh16x16 <t> x y) -> (SRAW <t> (MOVHreg x) (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVHZreg y) [15])))))
   241  (Rsh16x8  <t> x y) -> (SRAW <t> (MOVHreg x) (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVBZreg y) [15])))))
   242  
   243  (Rsh8x64 <t> x y)  -> (SRAW <t> (MOVBreg x) (OR <y.Type> y (NOT <y.Type> (SUBEcarrymask <y.Type> (CMPUconst y [7])))))
   244  (Rsh8x32 <t> x y)  -> (SRAW <t> (MOVBreg x) (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst y [7])))))
   245  (Rsh8x16 <t> x y)  -> (SRAW <t> (MOVBreg x) (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVHZreg y) [7])))))
   246  (Rsh8x8  <t> x y)  -> (SRAW <t> (MOVBreg x) (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVBZreg y) [7])))))
   247  
   248  // Lowering comparisons
   249  (Less64  x y) -> (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
   250  (Less32  x y) -> (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
   251  (Less16  x y) -> (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y)))
   252  (Less8   x y) -> (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
   253  (Less64U x y) -> (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPU x y))
   254  (Less32U x y) -> (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y))
   255  (Less16U x y) -> (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVHZreg x) (MOVHZreg y)))
   256  (Less8U  x y) -> (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVBZreg x) (MOVBZreg y)))
   257  // Use SETG with reversed operands to dodge NaN case.
   258  (Less64F x y) -> (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMP y x))
   259  (Less32F x y) -> (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMPS y x))
   260  
   261  (Leq64  x y) -> (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
   262  (Leq32  x y) -> (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
   263  (Leq16  x y) -> (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y)))
   264  (Leq8   x y) -> (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
   265  (Leq64U x y) -> (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPU x y))
   266  (Leq32U x y) -> (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y))
   267  (Leq16U x y) -> (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVHZreg x) (MOVHZreg y)))
   268  (Leq8U  x y) -> (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVBZreg x) (MOVBZreg y)))
   269  // Use SETGE with reversed operands to dodge NaN case.
   270  (Leq64F x y) -> (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMP y x))
   271  (Leq32F x y) -> (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMPS y x))
   272  
   273  (Greater64  x y) -> (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
   274  (Greater32  x y) -> (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
   275  (Greater16  x y) -> (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y)))
   276  (Greater8   x y) -> (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
   277  (Greater64U x y) -> (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMPU x y))
   278  (Greater32U x y) -> (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y))
   279  (Greater16U x y) -> (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVHZreg x) (MOVHZreg y)))
   280  (Greater8U  x y) -> (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVBZreg x) (MOVBZreg y)))
   281  (Greater64F x y) -> (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
   282  (Greater32F x y) -> (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
   283  
   284  (Geq64  x y) -> (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
   285  (Geq32  x y) -> (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
   286  (Geq16  x y) -> (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y)))
   287  (Geq8   x y) -> (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
   288  (Geq64U x y) -> (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMPU x y))
   289  (Geq32U x y) -> (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y))
   290  (Geq16U x y) -> (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVHZreg x) (MOVHZreg y)))
   291  (Geq8U  x y) -> (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVBZreg x) (MOVBZreg y)))
   292  (Geq64F x y) -> (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
   293  (Geq32F x y) -> (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
   294  
   295  (Eq64  x y) -> (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
   296  (Eq32  x y) -> (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
   297  (Eq16  x y) -> (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y)))
   298  (Eq8   x y) -> (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
   299  (EqB   x y) -> (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
   300  (EqPtr x y) -> (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
   301  (Eq64F x y) -> (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
   302  (Eq32F x y) -> (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
   303  
   304  (Neq64  x y) -> (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
   305  (Neq32  x y) -> (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
   306  (Neq16  x y) -> (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y)))
   307  (Neq8   x y) -> (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
   308  (NeqB   x y) -> (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
   309  (NeqPtr x y) -> (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
   310  (Neq64F x y) -> (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
   311  (Neq32F x y) -> (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
   312  
   313  // Lowering loads
   314  (Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) -> (MOVDload ptr mem)
   315  (Load <t> ptr mem) && is32BitInt(t) && isSigned(t) -> (MOVWload ptr mem)
   316  (Load <t> ptr mem) && is32BitInt(t) && !isSigned(t) -> (MOVWZload ptr mem)
   317  (Load <t> ptr mem) && is16BitInt(t) && isSigned(t) -> (MOVHload ptr mem)
   318  (Load <t> ptr mem) && is16BitInt(t) && !isSigned(t) -> (MOVHZload ptr mem)
   319  (Load <t> ptr mem) && is8BitInt(t) && isSigned(t) -> (MOVBload ptr mem)
   320  (Load <t> ptr mem) && (t.IsBoolean() || (is8BitInt(t) && !isSigned(t))) -> (MOVBZload ptr mem)
   321  (Load <t> ptr mem) && is32BitFloat(t) -> (FMOVSload ptr mem)
   322  (Load <t> ptr mem) && is64BitFloat(t) -> (FMOVDload ptr mem)
   323  
   324  // Lowering stores
   325  // These more-specific FP versions of Store pattern should come first.
   326  (Store [8] ptr val mem) && is64BitFloat(val.Type) -> (FMOVDstore ptr val mem)
   327  (Store [4] ptr val mem) && is32BitFloat(val.Type) -> (FMOVSstore ptr val mem)
   328  
   329  (Store [8] ptr val mem) -> (MOVDstore ptr val mem)
   330  (Store [4] ptr val mem) -> (MOVWstore ptr val mem)
   331  (Store [2] ptr val mem) -> (MOVHstore ptr val mem)
   332  (Store [1] ptr val mem) -> (MOVBstore ptr val mem)
   333  
   334  // Lowering moves
   335  
   336  // Load and store for small copies.
   337  (Move [s] _ _ mem) && SizeAndAlign(s).Size() == 0 -> mem
   338  (Move [s] dst src mem) && SizeAndAlign(s).Size() == 1 -> (MOVBstore dst (MOVBZload src mem) mem)
   339  (Move [s] dst src mem) && SizeAndAlign(s).Size() == 2 -> (MOVHstore dst (MOVHZload src mem) mem)
   340  (Move [s] dst src mem) && SizeAndAlign(s).Size() == 4 -> (MOVWstore dst (MOVWZload src mem) mem)
   341  (Move [s] dst src mem) && SizeAndAlign(s).Size() == 8 -> (MOVDstore dst (MOVDload src mem) mem)
   342  (Move [s] dst src mem) && SizeAndAlign(s).Size() == 16 ->
   343  	(MOVDstore [8] dst (MOVDload [8] src mem)
   344  		(MOVDstore dst (MOVDload src mem) mem))
   345  (Move [s] dst src mem) && SizeAndAlign(s).Size() == 24 ->
   346          (MOVDstore [16] dst (MOVDload [16] src mem)
   347  	        (MOVDstore [8] dst (MOVDload [8] src mem)
   348                  (MOVDstore dst (MOVDload src mem) mem)))
   349  (Move [s] dst src mem)  && SizeAndAlign(s).Size() == 3 ->
   350  	(MOVBstore [2] dst (MOVBZload [2] src mem)
   351  		(MOVHstore dst (MOVHZload src mem) mem))
   352  (Move [s] dst src mem) && SizeAndAlign(s).Size() == 5 ->
   353  	(MOVBstore [4] dst (MOVBZload [4] src mem)
   354  		(MOVWstore dst (MOVWZload src mem) mem))
   355  (Move [s] dst src mem) && SizeAndAlign(s).Size() == 6 ->
   356  	(MOVHstore [4] dst (MOVHZload [4] src mem)
   357  		(MOVWstore dst (MOVWZload src mem) mem))
   358  (Move [s] dst src mem) && SizeAndAlign(s).Size() == 7 ->
   359  	(MOVBstore [6] dst (MOVBZload [6] src mem)
   360  		(MOVHstore [4] dst (MOVHZload [4] src mem)
   361  			(MOVWstore dst (MOVWZload src mem) mem)))
   362  
   363  // MVC for other moves. Use up to 4 instructions (sizes up to 1024 bytes).
   364  (Move [s] dst src mem) && SizeAndAlign(s).Size() > 0 && SizeAndAlign(s).Size() <= 256 ->
   365  	(MVC [makeValAndOff(SizeAndAlign(s).Size(), 0)] dst src mem)
   366  (Move [s] dst src mem) && SizeAndAlign(s).Size() > 256 && SizeAndAlign(s).Size() <= 512 ->
   367  	(MVC [makeValAndOff(SizeAndAlign(s).Size()-256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem))
   368  (Move [s] dst src mem) && SizeAndAlign(s).Size() > 512 && SizeAndAlign(s).Size() <= 768 ->
   369  	(MVC [makeValAndOff(SizeAndAlign(s).Size()-512, 512)] dst src (MVC [makeValAndOff(256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem)))
   370  (Move [s] dst src mem) && SizeAndAlign(s).Size() > 768 && SizeAndAlign(s).Size() <= 1024 ->
   371  	(MVC [makeValAndOff(SizeAndAlign(s).Size()-768, 768)] dst src (MVC [makeValAndOff(256, 512)] dst src (MVC [makeValAndOff(256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem))))
   372  
   373  // Move more than 1024 bytes using a loop.
   374  (Move [s] dst src mem) && SizeAndAlign(s).Size() > 1024 ->
   375  	(LoweredMove [SizeAndAlign(s).Size()%256] dst src (ADDconst <src.Type> src [(SizeAndAlign(s).Size()/256)*256]) mem)
   376  
   377  // Lowering Zero instructions
   378  (Zero [s] _ mem) && SizeAndAlign(s).Size() == 0 -> mem
   379  (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 1 -> (MOVBstoreconst [0] destptr mem)
   380  (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 2 -> (MOVHstoreconst [0] destptr mem)
   381  (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 4 -> (MOVWstoreconst [0] destptr mem)
   382  (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 8 -> (MOVDstoreconst [0] destptr mem)
   383  (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 3 ->
   384  	(MOVBstoreconst [makeValAndOff(0,2)] destptr
   385  		(MOVHstoreconst [0] destptr mem))
   386  (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 5 ->
   387  	(MOVBstoreconst [makeValAndOff(0,4)] destptr
   388  		(MOVWstoreconst [0] destptr mem))
   389  (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 6 ->
   390  	(MOVHstoreconst [makeValAndOff(0,4)] destptr
   391  		(MOVWstoreconst [0] destptr mem))
   392  (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 7 ->
   393  	(MOVWstoreconst [makeValAndOff(0,3)] destptr
   394  		(MOVWstoreconst [0] destptr mem))
   395  
   396  (Zero [s] destptr mem) && SizeAndAlign(s).Size() > 0 && SizeAndAlign(s).Size() <= 1024 ->
   397  	(CLEAR [makeValAndOff(SizeAndAlign(s).Size(), 0)] destptr mem)
   398  
   399  // Move more than 1024 bytes using a loop.
   400  (Zero [s] destptr mem) && SizeAndAlign(s).Size() > 1024 ->
   401  	(LoweredZero [SizeAndAlign(s).Size()%256] destptr (ADDconst <destptr.Type> destptr [(SizeAndAlign(s).Size()/256)*256]) mem)
   402  
   403  // Lowering constants
   404  (Const8   [val]) -> (MOVDconst [val])
   405  (Const16  [val]) -> (MOVDconst [val])
   406  (Const32  [val]) -> (MOVDconst [val])
   407  (Const64  [val]) -> (MOVDconst [val])
   408  (Const32F [val]) -> (FMOVSconst [val])
   409  (Const64F [val]) -> (FMOVDconst [val])
   410  (ConstNil) -> (MOVDconst [0])
   411  (ConstBool [b]) -> (MOVDconst [b])
   412  
   413  // Lowering calls
   414  (StaticCall [argwid] {target} mem) -> (CALLstatic [argwid] {target} mem)
   415  (ClosureCall [argwid] entry closure mem) -> (CALLclosure [argwid] entry closure mem)
   416  (DeferCall [argwid] mem) -> (CALLdefer [argwid] mem)
   417  (GoCall [argwid] mem) -> (CALLgo [argwid] mem)
   418  (InterCall [argwid] entry mem) -> (CALLinter [argwid] entry mem)
   419  
   420  // Miscellaneous
   421  (Convert <t> x mem) -> (MOVDconvert <t> x mem)
   422  (IsNonNil p) -> (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMPconst p [0]))
   423  (IsInBounds idx len) -> (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPU idx len))
   424  (IsSliceInBounds idx len) -> (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPU idx len))
   425  (NilCheck ptr mem) -> (LoweredNilCheck ptr mem)
   426  (GetG mem) -> (LoweredGetG mem)
   427  (GetClosurePtr) -> (LoweredGetClosurePtr)
   428  (Addr {sym} base) -> (MOVDaddr {sym} base)
   429  (ITab (Load ptr mem)) -> (MOVDload ptr mem)
   430  
   431  // block rewrites
   432  (If (MOVDLT (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) -> (LT cmp yes no)
   433  (If (MOVDLE (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) -> (LE cmp yes no)
   434  (If (MOVDGT (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) -> (GT cmp yes no)
   435  (If (MOVDGE (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) -> (GE cmp yes no)
   436  (If (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) -> (EQ cmp yes no)
   437  (If (MOVDNE (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) -> (NE cmp yes no)
   438  
   439  // Special case for floating point - LF/LEF not generated.
   440  (If (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) -> (GTF cmp yes no)
   441  (If (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) -> (GEF cmp yes no)
   442  
   443  (If cond yes no) -> (NE (CMPWconst [0] (MOVBZreg <config.fe.TypeBool()> cond)) yes no)
   444  
   445  // ***************************
   446  // Above: lowering rules
   447  // Below: optimizations
   448  // ***************************
   449  // TODO: Should the optimizations be a separate pass?
   450  
   451  // Fold unnecessary type conversions.
   452  (MOVDreg <t> x) && t.Compare(x.Type) == CMPeq -> x
   453  (MOVDnop <t> x) && t.Compare(x.Type) == CMPeq -> x
   454  
   455  // Propagate constants through type conversions.
   456  (MOVDreg (MOVDconst [c])) -> (MOVDconst [c])
   457  (MOVDnop (MOVDconst [c])) -> (MOVDconst [c])
   458  
   459  // If a register move has only 1 use, just use the same register without emitting instruction.
   460  // MOVDnop doesn't emit instruction, only for ensuring the type.
   461  (MOVDreg x) && x.Uses == 1 -> (MOVDnop x)
   462  
   463  // Fold sign extensions into conditional moves of constants.
   464  // Designed to remove the MOVBZreg inserted by the If lowering.
   465  (MOVBZreg x:(MOVDLT (MOVDconst [c]) (MOVDconst [d]) _)) && int64(uint8(c)) == c && int64(uint8(d)) == d -> (MOVDreg x)
   466  (MOVBZreg x:(MOVDLE (MOVDconst [c]) (MOVDconst [d]) _)) && int64(uint8(c)) == c && int64(uint8(d)) == d -> (MOVDreg x)
   467  (MOVBZreg x:(MOVDGT (MOVDconst [c]) (MOVDconst [d]) _)) && int64(uint8(c)) == c && int64(uint8(d)) == d -> (MOVDreg x)
   468  (MOVBZreg x:(MOVDGE (MOVDconst [c]) (MOVDconst [d]) _)) && int64(uint8(c)) == c && int64(uint8(d)) == d -> (MOVDreg x)
   469  (MOVBZreg x:(MOVDEQ (MOVDconst [c]) (MOVDconst [d]) _)) && int64(uint8(c)) == c && int64(uint8(d)) == d -> (MOVDreg x)
   470  (MOVBZreg x:(MOVDNE (MOVDconst [c]) (MOVDconst [d]) _)) && int64(uint8(c)) == c && int64(uint8(d)) == d -> (MOVDreg x)
   471  (MOVBZreg x:(MOVDGTnoinv (MOVDconst [c]) (MOVDconst [d]) _)) && int64(uint8(c)) == c && int64(uint8(d)) == d -> (MOVDreg x)
   472  (MOVBZreg x:(MOVDGEnoinv (MOVDconst [c]) (MOVDconst [d]) _)) && int64(uint8(c)) == c && int64(uint8(d)) == d -> (MOVDreg x)
   473  
   474  // Fold boolean tests into blocks.
   475  (NE (CMPWconst [0] (MOVDLT (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no) -> (LT cmp yes no)
   476  (NE (CMPWconst [0] (MOVDLE (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no) -> (LE cmp yes no)
   477  (NE (CMPWconst [0] (MOVDGT (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no) -> (GT cmp yes no)
   478  (NE (CMPWconst [0] (MOVDGE (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no) -> (GE cmp yes no)
   479  (NE (CMPWconst [0] (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no) -> (EQ cmp yes no)
   480  (NE (CMPWconst [0] (MOVDNE (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no) -> (NE cmp yes no)
   481  (NE (CMPWconst [0] (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no) -> (GTF cmp yes no)
   482  (NE (CMPWconst [0] (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no) -> (GEF cmp yes no)
   483  
   484  // Fold constants into instructions.
   485  (ADD x (MOVDconst [c])) && is32Bit(c) -> (ADDconst [c] x)
   486  (ADD (MOVDconst [c]) x) && is32Bit(c) -> (ADDconst [c] x)
   487  (ADDW x (MOVDconst [c])) -> (ADDWconst [c] x)
   488  (ADDW (MOVDconst [c]) x) -> (ADDWconst [c] x)
   489  
   490  (SUB x (MOVDconst [c])) && is32Bit(c) -> (SUBconst x [c])
   491  (SUB (MOVDconst [c]) x) && is32Bit(c) -> (NEG (SUBconst <v.Type> x [c]))
   492  (SUBW x (MOVDconst [c])) -> (SUBWconst x [c])
   493  (SUBW (MOVDconst [c]) x) -> (NEGW (SUBWconst <v.Type> x [c]))
   494  
   495  (MULLD x (MOVDconst [c])) && is32Bit(c) -> (MULLDconst [c] x)
   496  (MULLD (MOVDconst [c]) x) && is32Bit(c) -> (MULLDconst [c] x)
   497  (MULLW x (MOVDconst [c])) -> (MULLWconst [c] x)
   498  (MULLW (MOVDconst [c]) x) -> (MULLWconst [c] x)
   499  
   500  // NILF instructions leave the high 32 bits unchanged which is
   501  // equivalent to the leftmost 32 bits being set.
   502  // TODO(mundaym): modify the assembler to accept 64-bit values
   503  // and use isU32Bit(^c).
   504  (AND x (MOVDconst [c])) && is32Bit(c) && c < 0 -> (ANDconst [c] x)
   505  (AND (MOVDconst [c]) x) && is32Bit(c) && c < 0 -> (ANDconst [c] x)
   506  (ANDW x (MOVDconst [c])) -> (ANDWconst [c] x)
   507  (ANDW (MOVDconst [c]) x) -> (ANDWconst [c] x)
   508  
   509  (ANDWconst [c] (ANDWconst [d] x)) -> (ANDWconst [c & d] x)
   510  (ANDconst [c] (ANDconst [d] x)) -> (ANDconst [c & d] x)
   511  
   512  (OR x (MOVDconst [c])) && isU32Bit(c) -> (ORconst [c] x)
   513  (OR (MOVDconst [c]) x) && isU32Bit(c) -> (ORconst [c] x)
   514  (ORW x (MOVDconst [c])) -> (ORWconst [c] x)
   515  (ORW (MOVDconst [c]) x) -> (ORWconst [c] x)
   516  
   517  (XOR x (MOVDconst [c])) && isU32Bit(c) -> (XORconst [c] x)
   518  (XOR (MOVDconst [c]) x) && isU32Bit(c) -> (XORconst [c] x)
   519  (XORW x (MOVDconst [c])) -> (XORWconst [c] x)
   520  (XORW (MOVDconst [c]) x) -> (XORWconst [c] x)
   521  
   522  (SLD x (MOVDconst [c])) -> (SLDconst [c&63] x)
   523  (SLW x (MOVDconst [c])) -> (SLWconst [c&63] x)
   524  (SRD x (MOVDconst [c])) -> (SRDconst [c&63] x)
   525  (SRW x (MOVDconst [c])) -> (SRWconst [c&63] x)
   526  (SRAD x (MOVDconst [c])) -> (SRADconst [c&63] x)
   527  (SRAW x (MOVDconst [c])) -> (SRAWconst [c&63] x)
   528  
   529  (SRAW x (ANDWconst [63] y)) -> (SRAW x y)
   530  (SRAD x (ANDconst [63] y)) -> (SRAD x y)
   531  (SLW x (ANDWconst [63] y)) -> (SLW x y)
   532  (SLD x (ANDconst [63] y)) -> (SLD x y)
   533  (SRW x (ANDWconst [63] y)) -> (SRW x y)
   534  (SRD x (ANDconst [63] y)) -> (SRD x y)
   535  
   536  (CMP x (MOVDconst [c])) && is32Bit(c) -> (CMPconst x [c])
   537  (CMP (MOVDconst [c]) x) && is32Bit(c) -> (InvertFlags (CMPconst x [c]))
   538  (CMPW x (MOVDconst [c])) -> (CMPWconst x [c])
   539  (CMPW (MOVDconst [c]) x) -> (InvertFlags (CMPWconst x [c]))
   540  (CMPU x (MOVDconst [c])) && isU32Bit(c) -> (CMPUconst x [int64(uint32(c))])
   541  (CMPU (MOVDconst [c]) x) && isU32Bit(c) -> (InvertFlags (CMPUconst x [int64(uint32(c))]))
   542  (CMPWU x (MOVDconst [c])) -> (CMPWUconst x [int64(uint32(c))])
   543  (CMPWU (MOVDconst [c]) x) -> (InvertFlags (CMPWUconst x [int64(uint32(c))]))
   544  
   545  // Using MOV{W,H,B}Zreg instead of AND is cheaper.
   546  (AND (MOVDconst [0xFF]) x) -> (MOVBZreg x)
   547  (AND x (MOVDconst [0xFF])) -> (MOVBZreg x)
   548  (AND (MOVDconst [0xFFFF]) x) -> (MOVHZreg x)
   549  (AND x (MOVDconst [0xFFFF])) -> (MOVHZreg x)
   550  (AND (MOVDconst [0xFFFFFFFF]) x) -> (MOVWZreg x)
   551  (AND x (MOVDconst [0xFFFFFFFF])) -> (MOVWZreg x)
   552  (ANDWconst [0xFF] x) -> (MOVBZreg x)
   553  (ANDWconst [0xFFFF] x) -> (MOVHZreg x)
   554  
   555  // strength reduction
   556  (MULLDconst [-1] x) -> (NEG x)
   557  (MULLDconst [0] _) -> (MOVDconst [0])
   558  (MULLDconst [1] x) -> x
   559  (MULLDconst [c] x) && isPowerOfTwo(c) -> (SLDconst [log2(c)] x)
   560  (MULLDconst [c] x) && isPowerOfTwo(c+1) && c >= 15 -> (SUB (SLDconst <v.Type> [log2(c+1)] x) x)
   561  (MULLDconst [c] x) && isPowerOfTwo(c-1) && c >= 17 -> (ADD (SLDconst <v.Type> [log2(c-1)] x) x)
   562  
   563  (MULLWconst [-1] x) -> (NEGW x)
   564  (MULLWconst [0] _) -> (MOVDconst [0])
   565  (MULLWconst [1] x) -> x
   566  (MULLWconst [c] x) && isPowerOfTwo(c) -> (SLWconst [log2(c)] x)
   567  (MULLWconst [c] x) && isPowerOfTwo(c+1) && c >= 15 -> (SUBW (SLWconst <v.Type> [log2(c+1)] x) x)
   568  (MULLWconst [c] x) && isPowerOfTwo(c-1) && c >= 17 -> (ADDW (SLWconst <v.Type> [log2(c-1)] x) x)
   569  
   570  // Fold ADD into MOVDaddr. Odd offsets from SB shouldn't be folded (LARL can't handle them).
   571  (ADDconst [c] (MOVDaddr [d] {s} x:(SB))) && ((c+d)&1 == 0) && is32Bit(c+d) -> (MOVDaddr [c+d] {s} x)
   572  (ADDconst [c] (MOVDaddr [d] {s} x)) && x.Op != OpSB && is20Bit(c+d) -> (MOVDaddr [c+d] {s} x)
   573  (ADD x (MOVDaddr [c] {s} y)) && x.Op != OpSB && y.Op != OpSB -> (MOVDaddridx [c] {s} x y)
   574  (ADD (MOVDaddr [c] {s} x) y) && x.Op != OpSB && y.Op != OpSB -> (MOVDaddridx [c] {s} x y)
   575  
   576  // fold ADDconst into MOVDaddrx
   577  (ADDconst [c] (MOVDaddridx [d] {s} x y)) && is20Bit(c+d) -> (MOVDaddridx [c+d] {s} x y)
   578  (MOVDaddridx [c] {s} (ADDconst [d] x) y) && is20Bit(c+d) && x.Op != OpSB -> (MOVDaddridx [c+d] {s} x y)
   579  (MOVDaddridx [c] {s} x (ADDconst [d] y)) && is20Bit(c+d) && y.Op != OpSB -> (MOVDaddridx [c+d] {s} x y)
   580  
   581  // reverse ordering of compare instruction
   582  (MOVDLT x y (InvertFlags cmp)) -> (MOVDGT x y cmp)
   583  (MOVDGT x y (InvertFlags cmp)) -> (MOVDLT x y cmp)
   584  (MOVDLE x y (InvertFlags cmp)) -> (MOVDGE x y cmp)
   585  (MOVDGE x y (InvertFlags cmp)) -> (MOVDLE x y cmp)
   586  (MOVDEQ x y (InvertFlags cmp)) -> (MOVDEQ x y cmp)
   587  (MOVDNE x y (InvertFlags cmp)) -> (MOVDNE x y cmp)
   588  
   589  // don't extend after proper load
   590  (MOVBreg x:(MOVBload _ _)) -> (MOVDreg x)
   591  (MOVBZreg x:(MOVBZload _ _)) -> (MOVDreg x)
   592  (MOVHreg x:(MOVBload _ _)) -> (MOVDreg x)
   593  (MOVHreg x:(MOVBZload _ _)) -> (MOVDreg x)
   594  (MOVHreg x:(MOVHload _ _)) -> (MOVDreg x)
   595  (MOVHZreg x:(MOVBZload _ _)) -> (MOVDreg x)
   596  (MOVHZreg x:(MOVHZload _ _)) -> (MOVDreg x)
   597  (MOVWreg x:(MOVBload _ _)) -> (MOVDreg x)
   598  (MOVWreg x:(MOVBZload _ _)) -> (MOVDreg x)
   599  (MOVWreg x:(MOVHload _ _)) -> (MOVDreg x)
   600  (MOVWreg x:(MOVHZload _ _)) -> (MOVDreg x)
   601  (MOVWreg x:(MOVWload _ _)) -> (MOVDreg x)
   602  (MOVWZreg x:(MOVBZload _ _)) -> (MOVDreg x)
   603  (MOVWZreg x:(MOVHZload _ _)) -> (MOVDreg x)
   604  (MOVWZreg x:(MOVWZload _ _)) -> (MOVDreg x)
   605  
   606  // don't extend if argument is already extended
   607  (MOVBreg x:(Arg <t>)) && is8BitInt(t) && isSigned(t) -> (MOVDreg x)
   608  (MOVBZreg x:(Arg <t>)) && is8BitInt(t) && !isSigned(t) -> (MOVDreg x)
   609  (MOVHreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t)) && isSigned(t) -> (MOVDreg x)
   610  (MOVHZreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t)) && !isSigned(t) -> (MOVDreg x)
   611  (MOVWreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && isSigned(t) -> (MOVDreg x)
   612  (MOVWZreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && !isSigned(t) -> (MOVDreg x)
   613  
   614  // fold double extensions
   615  (MOVBreg x:(MOVBreg _)) -> (MOVDreg x)
   616  (MOVBZreg x:(MOVBZreg _)) -> (MOVDreg x)
   617  (MOVHreg x:(MOVBreg _)) -> (MOVDreg x)
   618  (MOVHreg x:(MOVBZreg _)) -> (MOVDreg x)
   619  (MOVHreg x:(MOVHreg _)) -> (MOVDreg x)
   620  (MOVHZreg x:(MOVBZreg _)) -> (MOVDreg x)
   621  (MOVHZreg x:(MOVHZreg _)) -> (MOVDreg x)
   622  (MOVWreg x:(MOVBreg _)) -> (MOVDreg x)
   623  (MOVWreg x:(MOVBZreg _)) -> (MOVDreg x)
   624  (MOVWreg x:(MOVHreg _)) -> (MOVDreg x)
   625  (MOVWreg x:(MOVHreg _)) -> (MOVDreg x)
   626  (MOVWreg x:(MOVWreg _)) -> (MOVDreg x)
   627  (MOVWZreg x:(MOVBZreg _)) -> (MOVDreg x)
   628  (MOVWZreg x:(MOVHZreg _)) -> (MOVDreg x)
   629  (MOVWZreg x:(MOVWZreg _)) -> (MOVDreg x)
   630  
   631  // fold extensions into constants
   632  (MOVBreg (MOVDconst [c])) -> (MOVDconst [int64(int8(c))])
   633  (MOVBZreg (MOVDconst [c])) -> (MOVDconst [int64(uint8(c))])
   634  (MOVHreg (MOVDconst [c])) -> (MOVDconst [int64(int16(c))])
   635  (MOVHZreg (MOVDconst [c])) -> (MOVDconst [int64(uint16(c))])
   636  (MOVWreg (MOVDconst [c])) -> (MOVDconst [int64(int32(c))])
   637  (MOVWZreg (MOVDconst [c])) -> (MOVDconst [int64(uint32(c))])
   638  
   639  // sign extended loads
   640  // Note: The combined instruction must end up in the same block
   641  // as the original load. If not, we end up making a value with
   642  // memory type live in two different blocks, which can lead to
   643  // multiple memory values alive simultaneously.
   644  // Make sure we don't combine these ops if the load has another use.
   645  // This prevents a single load from being split into multiple loads
   646  // which then might return different values.  See test/atomicload.go.
   647  (MOVBreg x:(MOVBZload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
   648  (MOVBZreg x:(MOVBZload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBZload <v.Type> [off] {sym} ptr mem)
   649  (MOVHreg x:(MOVHZload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVHload <v.Type> [off] {sym} ptr mem)
   650  (MOVHZreg x:(MOVHZload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVHZload <v.Type> [off] {sym} ptr mem)
   651  (MOVWreg x:(MOVWZload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
   652  (MOVWZreg x:(MOVWZload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWZload <v.Type> [off] {sym} ptr mem)
   653  
   654  (MOVBZreg x:(MOVBZloadidx [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBZloadidx <v.Type> [off] {sym} ptr idx mem)
   655  (MOVHZreg x:(MOVHZloadidx [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVHZloadidx <v.Type> [off] {sym} ptr idx mem)
   656  (MOVWZreg x:(MOVWZloadidx [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWZloadidx <v.Type> [off] {sym} ptr idx mem)
   657  
   658  // replace load from same location as preceding store with copy
   659  (MOVBZload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVBZreg x)
   660  (MOVHZload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVHZreg x)
   661  (MOVWZload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVWZreg x)
   662  (MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVDreg x)
   663  
   664  // Don't extend before storing
   665  (MOVWstore [off] {sym} ptr (MOVWreg x) mem) -> (MOVWstore [off] {sym} ptr x mem)
   666  (MOVHstore [off] {sym} ptr (MOVHreg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
   667  (MOVBstore [off] {sym} ptr (MOVBreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
   668  (MOVWstore [off] {sym} ptr (MOVWZreg x) mem) -> (MOVWstore [off] {sym} ptr x mem)
   669  (MOVHstore [off] {sym} ptr (MOVHZreg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
   670  (MOVBstore [off] {sym} ptr (MOVBZreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
   671  
   672  // Fold constants into memory operations.
   673  // Note that this is not always a good idea because if not all the uses of
   674  // the ADDconst get eliminated, we still have to compute the ADDconst and we now
   675  // have potentially two live values (ptr and (ADDconst [off] ptr)) instead of one.
   676  // Nevertheless, let's do it!
   677  (MOVDload   [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(off1+off2) -> (MOVDload  [off1+off2] {sym} ptr mem)
   678  (MOVWload   [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(off1+off2) -> (MOVWload  [off1+off2] {sym} ptr mem)
   679  (MOVHload   [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(off1+off2) -> (MOVHload  [off1+off2] {sym} ptr mem)
   680  (MOVBload   [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(off1+off2) -> (MOVBload  [off1+off2] {sym} ptr mem)
   681  (MOVWZload  [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(off1+off2) -> (MOVWZload [off1+off2] {sym} ptr mem)
   682  (MOVHZload  [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(off1+off2) -> (MOVHZload [off1+off2] {sym} ptr mem)
   683  (MOVBZload  [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(off1+off2) -> (MOVBZload [off1+off2] {sym} ptr mem)
   684  (FMOVSload  [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(off1+off2) -> (FMOVSload [off1+off2] {sym} ptr mem)
   685  (FMOVDload  [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(off1+off2) -> (FMOVDload [off1+off2] {sym} ptr mem)
   686  
   687  (MOVDstore  [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(off1+off2) -> (MOVDstore  [off1+off2] {sym} ptr val mem)
   688  (MOVWstore  [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(off1+off2) -> (MOVWstore  [off1+off2] {sym} ptr val mem)
   689  (MOVHstore  [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(off1+off2) -> (MOVHstore  [off1+off2] {sym} ptr val mem)
   690  (MOVBstore  [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(off1+off2) -> (MOVBstore  [off1+off2] {sym} ptr val mem)
   691  (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(off1+off2) -> (FMOVSstore [off1+off2] {sym} ptr val mem)
   692  (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(off1+off2) -> (FMOVDstore [off1+off2] {sym} ptr val mem)
   693  
   694  // Fold constants into stores.
   695  (MOVDstore [off] {sym} ptr (MOVDconst [c]) mem) && validValAndOff(c,off) && int64(int16(c)) == c && ptr.Op != OpSB ->
   696  	(MOVDstoreconst [makeValAndOff(c,off)] {sym} ptr mem)
   697  (MOVWstore [off] {sym} ptr (MOVDconst [c]) mem) && validOff(off) && int64(int16(c)) == c && ptr.Op != OpSB ->
   698  	(MOVWstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem)
   699  (MOVHstore [off] {sym} ptr (MOVDconst [c]) mem) && validOff(off) && ptr.Op != OpSB ->
   700  	(MOVHstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem)
   701  (MOVBstore [off] {sym} ptr (MOVDconst [c]) mem) && validOff(off) && ptr.Op != OpSB ->
   702  	(MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem)
   703  
   704  // Fold address offsets into constant stores.
   705  (MOVDstoreconst [sc] {s} (ADDconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
   706  	(MOVDstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
   707  (MOVWstoreconst [sc] {s} (ADDconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
   708  	(MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
   709  (MOVHstoreconst [sc] {s} (ADDconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
   710  	(MOVHstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
   711  (MOVBstoreconst [sc] {s} (ADDconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
   712  	(MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
   713  
   714  // We need to fold MOVDaddr into the MOVx ops so that the live variable analysis knows
   715  // what variables are being read/written by the ops.
   716  (MOVDload  [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   717  	(MOVDload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
   718  (MOVWZload  [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   719  	(MOVWZload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
   720  (MOVHZload  [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   721  	(MOVHZload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
   722  (MOVBZload  [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   723  	(MOVBZload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
   724  (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   725  	(FMOVSload [off1+off2] {mergeSym(sym1,sym2)} base mem)
   726  (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   727  	(FMOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
   728  
   729  (MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   730  	(MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
   731  (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   732  	(MOVHload [off1+off2] {mergeSym(sym1,sym2)} base mem)
   733  (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   734  	(MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
   735  
   736  (MOVDstore  [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   737  	(MOVDstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
   738  (MOVWstore  [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   739  	(MOVWstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
   740  (MOVHstore  [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   741  	(MOVHstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
   742  (MOVBstore  [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   743  	(MOVBstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
   744  (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   745  	(FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
   746  (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   747  	(FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
   748  
   749  (MOVDstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
   750  	(MOVDstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
   751  (MOVWstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
   752  	(MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
   753  (MOVHstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
   754  	(MOVHstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
   755  (MOVBstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
   756  	(MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
   757  
   758  // generating indexed loads and stores
   759  (MOVBZload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   760  	(MOVBZloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
   761  (MOVHZload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   762  	(MOVHZloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
   763  (MOVWZload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   764  	(MOVWZloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
   765  (MOVDload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   766  	(MOVDloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
   767  (FMOVSload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   768  	(FMOVSloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
   769  (FMOVDload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   770  	(FMOVDloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
   771  
   772  (MOVBstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   773  	(MOVBstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
   774  (MOVHstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   775  	(MOVHstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
   776  (MOVWstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   777  	(MOVWstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
   778  (MOVDstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   779  	(MOVDstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
   780  (FMOVSstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   781  	(FMOVSstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
   782  (FMOVDstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   783  	(FMOVDstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
   784  
   785  (MOVBZload [off] {sym} (ADD ptr idx) mem) && ptr.Op != OpSB -> (MOVBZloadidx [off] {sym} ptr idx mem)
   786  (MOVHZload [off] {sym} (ADD ptr idx) mem) && ptr.Op != OpSB -> (MOVHZloadidx [off] {sym} ptr idx mem)
   787  (MOVWZload [off] {sym} (ADD ptr idx) mem) && ptr.Op != OpSB -> (MOVWZloadidx [off] {sym} ptr idx mem)
   788  (MOVDload [off] {sym} (ADD ptr idx) mem) && ptr.Op != OpSB -> (MOVDloadidx [off] {sym} ptr idx mem)
   789  (FMOVSload [off] {sym} (ADD ptr idx) mem) && ptr.Op != OpSB -> (FMOVSloadidx [off] {sym} ptr idx mem)
   790  (FMOVDload [off] {sym} (ADD ptr idx) mem) && ptr.Op != OpSB -> (FMOVDloadidx [off] {sym} ptr idx mem)
   791  (MOVBstore [off] {sym} (ADD ptr idx) val mem) && ptr.Op != OpSB -> (MOVBstoreidx [off] {sym} ptr idx val mem)
   792  (MOVHstore [off] {sym} (ADD ptr idx) val mem) && ptr.Op != OpSB -> (MOVHstoreidx [off] {sym} ptr idx val mem)
   793  (MOVWstore [off] {sym} (ADD ptr idx) val mem) && ptr.Op != OpSB -> (MOVWstoreidx [off] {sym} ptr idx val mem)
   794  (MOVDstore [off] {sym} (ADD ptr idx) val mem) && ptr.Op != OpSB -> (MOVDstoreidx [off] {sym} ptr idx val mem)
   795  (FMOVSstore [off] {sym} (ADD ptr idx) val mem) && ptr.Op != OpSB -> (FMOVSstoreidx [off] {sym} ptr idx val mem)
   796  (FMOVDstore [off] {sym} (ADD ptr idx) val mem) && ptr.Op != OpSB -> (FMOVDstoreidx [off] {sym} ptr idx val mem)
   797  
   798  // combine ADD into indexed loads and stores
   799  (MOVBZloadidx [c] {sym} (ADDconst [d] ptr) idx mem) -> (MOVBZloadidx [c+d] {sym} ptr idx mem)
   800  (MOVHZloadidx [c] {sym} (ADDconst [d] ptr) idx mem) -> (MOVHZloadidx [c+d] {sym} ptr idx mem)
   801  (MOVWZloadidx [c] {sym} (ADDconst [d] ptr) idx mem) -> (MOVWZloadidx [c+d] {sym} ptr idx mem)
   802  (MOVDloadidx [c] {sym} (ADDconst [d] ptr) idx mem) -> (MOVDloadidx [c+d] {sym} ptr idx mem)
   803  (FMOVSloadidx [c] {sym} (ADDconst [d] ptr) idx mem) -> (FMOVSloadidx [c+d] {sym} ptr idx mem)
   804  (FMOVDloadidx [c] {sym} (ADDconst [d] ptr) idx mem) -> (FMOVDloadidx [c+d] {sym} ptr idx mem)
   805  
   806  (MOVBstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) -> (MOVBstoreidx [c+d] {sym} ptr idx val mem)
   807  (MOVHstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) -> (MOVHstoreidx [c+d] {sym} ptr idx val mem)
   808  (MOVWstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) -> (MOVWstoreidx [c+d] {sym} ptr idx val mem)
   809  (MOVDstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) -> (MOVDstoreidx [c+d] {sym} ptr idx val mem)
   810  (FMOVSstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) -> (FMOVSstoreidx [c+d] {sym} ptr idx val mem)
   811  (FMOVDstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) -> (FMOVDstoreidx [c+d] {sym} ptr idx val mem)
   812  
   813  (MOVBZloadidx [c] {sym} ptr (ADDconst [d] idx) mem) -> (MOVBZloadidx [c+d] {sym} ptr idx mem)
   814  (MOVHZloadidx [c] {sym} ptr (ADDconst [d] idx) mem) -> (MOVHZloadidx [c+d] {sym} ptr idx mem)
   815  (MOVWZloadidx [c] {sym} ptr (ADDconst [d] idx) mem) -> (MOVWZloadidx [c+d] {sym} ptr idx mem)
   816  (MOVDloadidx [c] {sym} ptr (ADDconst [d] idx) mem) -> (MOVDloadidx [c+d] {sym} ptr idx mem)
   817  (FMOVSloadidx [c] {sym} ptr (ADDconst [d] idx) mem) -> (FMOVSloadidx [c+d] {sym} ptr idx mem)
   818  (FMOVDloadidx [c] {sym} ptr (ADDconst [d] idx) mem) -> (FMOVDloadidx [c+d] {sym} ptr idx mem)
   819  
   820  (MOVBstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) -> (MOVBstoreidx [c+d] {sym} ptr idx val mem)
   821  (MOVHstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) -> (MOVHstoreidx [c+d] {sym} ptr idx val mem)
   822  (MOVWstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) -> (MOVWstoreidx [c+d] {sym} ptr idx val mem)
   823  (MOVDstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) -> (MOVDstoreidx [c+d] {sym} ptr idx val mem)
   824  (FMOVSstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) -> (FMOVSstoreidx [c+d] {sym} ptr idx val mem)
   825  (FMOVDstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) -> (FMOVDstoreidx [c+d] {sym} ptr idx val mem)
   826  
   827  // MOVDaddr into MOVDaddridx
   828  (MOVDaddridx [off1] {sym1} (MOVDaddr [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
   829         (MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y)
   830  (MOVDaddridx [off1] {sym1} x (MOVDaddr [off2] {sym2} y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB ->
   831         (MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y)
   832  
   833  // Absorb InvertFlags into branches.
   834  (LT (InvertFlags cmp) yes no) -> (GT cmp yes no)
   835  (GT (InvertFlags cmp) yes no) -> (LT cmp yes no)
   836  (LE (InvertFlags cmp) yes no) -> (GE cmp yes no)
   837  (GE (InvertFlags cmp) yes no) -> (LE cmp yes no)
   838  (EQ (InvertFlags cmp) yes no) -> (EQ cmp yes no)
   839  (NE (InvertFlags cmp) yes no) -> (NE cmp yes no)
   840  
   841  // Constant comparisons.
   842  (CMPconst (MOVDconst [x]) [y]) && x==y -> (FlagEQ)
   843  (CMPconst (MOVDconst [x]) [y]) && x<y -> (FlagLT)
   844  (CMPconst (MOVDconst [x]) [y]) && x>y -> (FlagGT)
   845  (CMPUconst (MOVDconst [x]) [y]) && uint64(x)==uint64(y) -> (FlagEQ)
   846  (CMPUconst (MOVDconst [x]) [y]) && uint64(x)<uint64(y) -> (FlagLT)
   847  (CMPUconst (MOVDconst [x]) [y]) && uint64(x)>uint64(y) -> (FlagGT)
   848  
   849  (CMPWconst (MOVDconst [x]) [y]) && int32(x)==int32(y) -> (FlagEQ)
   850  (CMPWconst (MOVDconst [x]) [y]) && int32(x)<int32(y) -> (FlagLT)
   851  (CMPWconst (MOVDconst [x]) [y]) && int32(x)>int32(y) -> (FlagGT)
   852  (CMPWUconst (MOVDconst [x]) [y]) && uint32(x)==uint32(y) -> (FlagEQ)
   853  (CMPWUconst (MOVDconst [x]) [y]) && uint32(x)<uint32(y) -> (FlagLT)
   854  (CMPWUconst (MOVDconst [x]) [y]) && uint32(x)>uint32(y) -> (FlagGT)
   855  
   856  // Other known comparisons.
   857  (CMPconst (MOVBZreg _) [c]) && 0xFF < c -> (FlagLT)
   858  (CMPconst (MOVHZreg _) [c]) && 0xFFFF < c -> (FlagLT)
   859  (CMPconst (MOVWZreg _) [c]) && 0xFFFFFFFF < c -> (FlagLT)
   860  (CMPWconst (SRWconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n) -> (FlagLT)
   861  (CMPconst (SRDconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n) -> (FlagLT)
   862  (CMPconst (ANDconst _ [m]) [n]) && 0 <= m && m < n -> (FlagLT)
   863  (CMPWconst (ANDWconst _ [m]) [n]) && 0 <= int32(m) && int32(m) < int32(n) -> (FlagLT)
   864  
   865  // Absorb flag constants into SBB ops.
   866  (SUBEcarrymask (FlagEQ)) -> (MOVDconst [-1])
   867  (SUBEcarrymask (FlagLT)) -> (MOVDconst [-1])
   868  (SUBEcarrymask (FlagGT)) -> (MOVDconst [0])
   869  (SUBEWcarrymask (FlagEQ)) -> (MOVDconst [-1])
   870  (SUBEWcarrymask (FlagLT)) -> (MOVDconst [-1])
   871  (SUBEWcarrymask (FlagGT)) -> (MOVDconst [0])
   872  
   873  // Absorb flag constants into branches.
   874  (EQ (FlagEQ) yes no) -> (First nil yes no)
   875  (EQ (FlagLT) yes no) -> (First nil no yes)
   876  (EQ (FlagGT) yes no) -> (First nil no yes)
   877  
   878  (NE (FlagEQ) yes no) -> (First nil no yes)
   879  (NE (FlagLT) yes no) -> (First nil yes no)
   880  (NE (FlagGT) yes no) -> (First nil yes no)
   881  
   882  (LT (FlagEQ) yes no) -> (First nil no yes)
   883  (LT (FlagLT) yes no) -> (First nil yes no)
   884  (LT (FlagGT) yes no) -> (First nil no yes)
   885  
   886  (LE (FlagEQ) yes no) -> (First nil yes no)
   887  (LE (FlagLT) yes no) -> (First nil yes no)
   888  (LE (FlagGT) yes no) -> (First nil no yes)
   889  
   890  (GT (FlagEQ) yes no) -> (First nil no yes)
   891  (GT (FlagLT) yes no) -> (First nil no yes)
   892  (GT (FlagGT) yes no) -> (First nil yes no)
   893  
   894  (GE (FlagEQ) yes no) -> (First nil yes no)
   895  (GE (FlagLT) yes no) -> (First nil no yes)
   896  (GE (FlagGT) yes no) -> (First nil yes no)
   897  
   898  // Absorb flag constants into SETxx ops.
   899  (MOVDEQ _ x (FlagEQ)) -> x
   900  (MOVDEQ y _ (FlagLT)) -> y
   901  (MOVDEQ y _ (FlagGT)) -> y
   902  
   903  (MOVDNE y _ (FlagEQ)) -> y
   904  (MOVDNE _ x (FlagLT)) -> x
   905  (MOVDNE _ x (FlagGT)) -> x
   906  
   907  (MOVDLT y _ (FlagEQ)) -> y
   908  (MOVDLT _ x (FlagLT)) -> x
   909  (MOVDLT y _ (FlagGT)) -> y
   910  
   911  (MOVDLE _ x (FlagEQ)) -> x
   912  (MOVDLE _ x (FlagLT)) -> x
   913  (MOVDLE y _ (FlagGT)) -> y
   914  
   915  (MOVDGT y _ (FlagEQ)) -> y
   916  (MOVDGT y _ (FlagLT)) -> y
   917  (MOVDGT _ x (FlagGT)) -> x
   918  
   919  (MOVDGE _ x (FlagEQ)) -> x
   920  (MOVDGE y _ (FlagLT)) -> y
   921  (MOVDGE _ x (FlagGT)) -> x
   922  
   923  // Remove redundant *const ops
   924  (ADDconst [0] x) -> x
   925  (ADDWconst [c] x) && int32(c)==0 -> x
   926  (SUBconst [0] x) -> x
   927  (SUBWconst [c] x) && int32(c) == 0 -> x
   928  (ANDconst [0] _)                 -> (MOVDconst [0])
   929  (ANDWconst [c] _) && int32(c)==0  -> (MOVDconst [0])
   930  (ANDconst [-1] x)                -> x
   931  (ANDWconst [c] x) && int32(c)==-1 -> x
   932  (ORconst [0] x)                  -> x
   933  (ORWconst [c] x) && int32(c)==0   -> x
   934  (ORconst [-1] _)                 -> (MOVDconst [-1])
   935  (ORWconst [c] _) && int32(c)==-1  -> (MOVDconst [-1])
   936  (XORconst [0] x)                  -> x
   937  (XORWconst [c] x) && int32(c)==0   -> x
   938  
   939  // Convert constant subtracts to constant adds.
   940  (SUBconst [c] x) && c != -(1<<31) -> (ADDconst [-c] x)
   941  (SUBWconst [c] x) -> (ADDWconst [int64(int32(-c))] x)
   942  
   943  // generic constant folding
   944  // TODO: more of this
   945  (ADDconst [c] (MOVDconst [d])) -> (MOVDconst [c+d])
   946  (ADDWconst [c] (MOVDconst [d])) -> (MOVDconst [int64(int32(c+d))])
   947  (ADDconst [c] (ADDconst [d] x)) && is32Bit(c+d) -> (ADDconst [c+d] x)
   948  (ADDWconst [c] (ADDWconst [d] x)) -> (ADDWconst [int64(int32(c+d))] x)
   949  (SUBconst (MOVDconst [d]) [c]) -> (MOVDconst [d-c])
   950  (SUBconst (SUBconst x [d]) [c]) && is32Bit(-c-d) -> (ADDconst [-c-d] x)
   951  (SRADconst [c] (MOVDconst [d])) -> (MOVDconst [d>>uint64(c)])
   952  (SRAWconst [c] (MOVDconst [d])) -> (MOVDconst [d>>uint64(c)])
   953  (NEG (MOVDconst [c])) -> (MOVDconst [-c])
   954  (NEGW (MOVDconst [c])) -> (MOVDconst [int64(int32(-c))])
   955  (MULLDconst [c] (MOVDconst [d])) -> (MOVDconst [c*d])
   956  (MULLWconst [c] (MOVDconst [d])) -> (MOVDconst [int64(int32(c*d))])
   957  (AND (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c&d])
   958  (ANDconst [c] (MOVDconst [d])) -> (MOVDconst [c&d])
   959  (ANDWconst [c] (MOVDconst [d])) -> (MOVDconst [c&d])
   960  (OR (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c|d])
   961  (ORconst [c] (MOVDconst [d])) -> (MOVDconst [c|d])
   962  (ORWconst [c] (MOVDconst [d])) -> (MOVDconst [c|d])
   963  (XOR (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c^d])
   964  (XORconst [c] (MOVDconst [d])) -> (MOVDconst [c^d])
   965  (XORWconst [c] (MOVDconst [d])) -> (MOVDconst [c^d])
   966  
   967  // generic simplifications
   968  // TODO: more of this
   969  (ADD x (NEG y)) -> (SUB x y)
   970  (ADDW x (NEGW y)) -> (SUBW x y)
   971  (SUB x x) -> (MOVDconst [0])
   972  (SUBW x x) -> (MOVDconst [0])
   973  (AND x x) -> x
   974  (ANDW x x) -> x
   975  (OR x x) -> x
   976  (ORW x x) -> x
   977  (XOR x x) -> (MOVDconst [0])
   978  (XORW x x) -> (MOVDconst [0])
   979  
   980  // Fold memory operations into operations.
   981  // Exclude global data (SB) because these instructions cannot handle relative addresses.
   982  // TODO(mundaym): use LARL in the assembler to handle SB?
   983  // TODO(mundaym): indexed versions of these?
   984  (ADD <t> x g:(MOVDload [off] {sym} ptr mem)) && g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
   985  	-> (ADDload <t> [off] {sym} x ptr mem)
   986  (ADD <t> g:(MOVDload [off] {sym} ptr mem) x) && g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
   987  	-> (ADDload <t> [off] {sym} x ptr mem)
   988  (ADDW <t> x g:(MOVWload [off] {sym} ptr mem)) && g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
   989  	-> (ADDWload <t> [off] {sym} x ptr mem)
   990  (ADDW <t> g:(MOVWload [off] {sym} ptr mem) x) && g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
   991  	-> (ADDWload <t> [off] {sym} x ptr mem)
   992  (ADDW <t> x g:(MOVWZload [off] {sym} ptr mem)) && g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
   993  	-> (ADDWload <t> [off] {sym} x ptr mem)
   994  (ADDW <t> g:(MOVWZload [off] {sym} ptr mem) x) && g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
   995  	-> (ADDWload <t> [off] {sym} x ptr mem)
   996  (MULLD <t> x g:(MOVDload [off] {sym} ptr mem)) && g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
   997  	-> (MULLDload <t> [off] {sym} x ptr mem)
   998  (MULLD <t> g:(MOVDload [off] {sym} ptr mem) x) && g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
   999  	-> (MULLDload <t> [off] {sym} x ptr mem)
  1000  (MULLW <t> x g:(MOVWload [off] {sym} ptr mem)) && g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
  1001  	-> (MULLWload <t> [off] {sym} x ptr mem)
  1002  (MULLW <t> g:(MOVWload [off] {sym} ptr mem) x) && g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
  1003  	-> (MULLWload <t> [off] {sym} x ptr mem)
  1004  (MULLW <t> x g:(MOVWZload [off] {sym} ptr mem)) && g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
  1005  	-> (MULLWload <t> [off] {sym} x ptr mem)
  1006  (MULLW <t> g:(MOVWZload [off] {sym} ptr mem) x) && g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
  1007  	-> (MULLWload <t> [off] {sym} x ptr mem)
  1008  (SUB <t> x g:(MOVDload [off] {sym} ptr mem)) && g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
  1009  	-> (SUBload <t> [off] {sym} x ptr mem)
  1010  (SUBW <t> x g:(MOVWload [off] {sym} ptr mem)) && g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
  1011  	-> (SUBWload <t> [off] {sym} x ptr mem)
  1012  (SUBW <t> x g:(MOVWZload [off] {sym} ptr mem)) && g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
  1013  	-> (SUBWload <t> [off] {sym} x ptr mem)
  1014  (AND <t> x g:(MOVDload [off] {sym} ptr mem)) && g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
  1015  	-> (ANDload <t> [off] {sym} x ptr mem)
  1016  (AND <t> g:(MOVDload [off] {sym} ptr mem) x) && g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
  1017  	-> (ANDload <t> [off] {sym} x ptr mem)
  1018  (ANDW <t> x g:(MOVWload [off] {sym} ptr mem)) && g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
  1019  	-> (ANDWload <t> [off] {sym} x ptr mem)
  1020  (ANDW <t> g:(MOVWload [off] {sym} ptr mem) x) && g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
  1021  	-> (ANDWload <t> [off] {sym} x ptr mem)
  1022  (ANDW <t> x g:(MOVWZload [off] {sym} ptr mem)) && g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
  1023  	-> (ANDWload <t> [off] {sym} x ptr mem)
  1024  (ANDW <t> g:(MOVWZload [off] {sym} ptr mem) x) && g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
  1025  	-> (ANDWload <t> [off] {sym} x ptr mem)
  1026  (OR <t> x g:(MOVDload [off] {sym} ptr mem)) && g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
  1027  	-> (ORload <t> [off] {sym} x ptr mem)
  1028  (OR <t> g:(MOVDload [off] {sym} ptr mem) x) && g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
  1029  	-> (ORload <t> [off] {sym} x ptr mem)
  1030  (ORW <t> x g:(MOVWload [off] {sym} ptr mem)) && g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
  1031  	-> (ORWload <t> [off] {sym} x ptr mem)
  1032  (ORW <t> g:(MOVWload [off] {sym} ptr mem) x) && g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
  1033  	-> (ORWload <t> [off] {sym} x ptr mem)
  1034  (ORW <t> x g:(MOVWZload [off] {sym} ptr mem)) && g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
  1035  	-> (ORWload <t> [off] {sym} x ptr mem)
  1036  (ORW <t> g:(MOVWZload [off] {sym} ptr mem) x) && g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
  1037  	-> (ORWload <t> [off] {sym} x ptr mem)
  1038  (XOR <t> x g:(MOVDload [off] {sym} ptr mem)) && g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
  1039  	-> (XORload <t> [off] {sym} x ptr mem)
  1040  (XOR <t> g:(MOVDload [off] {sym} ptr mem) x) && g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
  1041  	-> (XORload <t> [off] {sym} x ptr mem)
  1042  (XORW <t> x g:(MOVWload [off] {sym} ptr mem)) && g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
  1043  	-> (XORWload <t> [off] {sym} x ptr mem)
  1044  (XORW <t> g:(MOVWload [off] {sym} ptr mem) x) && g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
  1045  	-> (XORWload <t> [off] {sym} x ptr mem)
  1046  (XORW <t> x g:(MOVWZload [off] {sym} ptr mem)) && g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
  1047  	-> (XORWload <t> [off] {sym} x ptr mem)
  1048  (XORW <t> g:(MOVWZload [off] {sym} ptr mem) x) && g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
  1049  	-> (XORWload <t> [off] {sym} x ptr mem)
  1050  
  1051  // Combine constant stores into larger (unaligned) stores.
  1052  // It doesn't work to global data (based on SB),
  1053  // because STGRL doesn't support unaligned address
  1054  (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem))
  1055    && p.Op != OpSB
  1056    && x.Uses == 1
  1057    && ValAndOff(a).Off() + 1 == ValAndOff(c).Off()
  1058    && clobber(x)
  1059    -> (MOVHstoreconst [makeValAndOff(ValAndOff(c).Val()&0xff | ValAndOff(a).Val()<<8, ValAndOff(a).Off())] {s} p mem)
  1060  (MOVHstoreconst [c] {s} p x:(MOVHstoreconst [a] {s} p mem))
  1061    && p.Op != OpSB
  1062    && x.Uses == 1
  1063    && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()
  1064    && clobber(x)
  1065    -> (MOVWstoreconst [makeValAndOff(ValAndOff(c).Val()&0xffff | ValAndOff(a).Val()<<16, ValAndOff(a).Off())] {s} p mem)
  1066  (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
  1067    && p.Op != OpSB
  1068    && x.Uses == 1
  1069    && ValAndOff(a).Off() + 4 == ValAndOff(c).Off()
  1070    && clobber(x)
  1071    -> (MOVDstore [ValAndOff(a).Off()] {s} p (MOVDconst [ValAndOff(c).Val()&0xffffffff | ValAndOff(a).Val()<<32]) mem)
  1072  
  1073  // Combine stores into larger (unaligned) stores.
  1074  // It doesn't work on global data (based on SB) because stores with relative addressing
  1075  // require that the memory operand be aligned.
  1076  (MOVBstore [i] {s} p w x:(MOVBstore [i-1] {s} p (SRDconst [8] w) mem))
  1077    && p.Op != OpSB
  1078    && x.Uses == 1
  1079    && clobber(x)
  1080    -> (MOVHstore [i-1] {s} p w mem)
  1081  (MOVBstore [i] {s} p w0:(SRDconst [j] w) x:(MOVBstore [i-1] {s} p (SRDconst [j+8] w) mem))
  1082    && p.Op != OpSB
  1083    && x.Uses == 1
  1084    && clobber(x)
  1085    -> (MOVHstore [i-1] {s} p w0 mem)
  1086  (MOVBstore [i] {s} p w x:(MOVBstore [i-1] {s} p (SRWconst [8] w) mem))
  1087    && p.Op != OpSB
  1088    && x.Uses == 1
  1089    && clobber(x)
  1090    -> (MOVHstore [i-1] {s} p w mem)
  1091  (MOVBstore [i] {s} p w0:(SRWconst [j] w) x:(MOVBstore [i-1] {s} p (SRWconst [j+8] w) mem))
  1092    && p.Op != OpSB
  1093    && x.Uses == 1
  1094    && clobber(x)
  1095    -> (MOVHstore [i-1] {s} p w0 mem)
  1096  (MOVHstore [i] {s} p w x:(MOVHstore [i-2] {s} p (SRDconst [16] w) mem))
  1097    && p.Op != OpSB
  1098    && x.Uses == 1
  1099    && clobber(x)
  1100    -> (MOVWstore [i-2] {s} p w mem)
  1101  (MOVHstore [i] {s} p w0:(SRDconst [j] w) x:(MOVHstore [i-2] {s} p (SRDconst [j+16] w) mem))
  1102    && p.Op != OpSB
  1103    && x.Uses == 1
  1104    && clobber(x)
  1105    -> (MOVWstore [i-2] {s} p w0 mem)
  1106  (MOVHstore [i] {s} p w x:(MOVHstore [i-2] {s} p (SRWconst [16] w) mem))
  1107    && p.Op != OpSB
  1108    && x.Uses == 1
  1109    && clobber(x)
  1110    -> (MOVWstore [i-2] {s} p w mem)
  1111  (MOVHstore [i] {s} p w0:(SRWconst [j] w) x:(MOVHstore [i-2] {s} p (SRWconst [j+16] w) mem))
  1112    && p.Op != OpSB
  1113    && x.Uses == 1
  1114    && clobber(x)
  1115    -> (MOVWstore [i-2] {s} p w0 mem)
  1116  (MOVWstore [i] {s} p (SRDconst [32] w) x:(MOVWstore [i-4] {s} p w mem))
  1117    && p.Op != OpSB
  1118    && x.Uses == 1
  1119    && clobber(x)
  1120    -> (MOVDstore [i-4] {s} p w mem)
  1121  (MOVWstore [i] {s} p w0:(SRDconst [j] w) x:(MOVWstore [i-4] {s} p (SRDconst [j+32] w) mem))
  1122    && p.Op != OpSB
  1123    && x.Uses == 1
  1124    && clobber(x)
  1125    -> (MOVDstore [i-4] {s} p w0 mem)
  1126  
  1127  (MOVBstoreidx [i] {s} p idx w x:(MOVBstoreidx [i-1] {s} p idx (SRDconst [8] w) mem))
  1128    && x.Uses == 1
  1129    && clobber(x)
  1130    -> (MOVHstoreidx [i-1] {s} p idx w mem)
  1131  (MOVBstoreidx [i] {s} p idx w0:(SRDconst [j] w) x:(MOVBstoreidx [i-1] {s} p idx (SRDconst [j+8] w) mem))
  1132    && x.Uses == 1
  1133    && clobber(x)
  1134    -> (MOVHstoreidx [i-1] {s} p idx w0 mem)
  1135  (MOVBstoreidx [i] {s} p idx w x:(MOVBstoreidx [i-1] {s} p idx (SRWconst [8] w) mem))
  1136    && x.Uses == 1
  1137    && clobber(x)
  1138    -> (MOVHstoreidx [i-1] {s} p idx w mem)
  1139  (MOVBstoreidx [i] {s} p idx w0:(SRWconst [j] w) x:(MOVBstoreidx [i-1] {s} p idx (SRWconst [j+8] w) mem))
  1140    && x.Uses == 1
  1141    && clobber(x)
  1142    -> (MOVHstoreidx [i-1] {s} p idx w0 mem)
  1143  (MOVHstoreidx [i] {s} p idx w x:(MOVHstoreidx [i-2] {s} p idx (SRDconst [16] w) mem))
  1144    && x.Uses == 1
  1145    && clobber(x)
  1146    -> (MOVWstoreidx [i-2] {s} p idx w mem)
  1147  (MOVHstoreidx [i] {s} p idx w0:(SRDconst [j] w) x:(MOVHstoreidx [i-2] {s} p idx (SRDconst [j+16] w) mem))
  1148    && x.Uses == 1
  1149    && clobber(x)
  1150    -> (MOVWstoreidx [i-2] {s} p idx w0 mem)
  1151  (MOVHstoreidx [i] {s} p idx w x:(MOVHstoreidx [i-2] {s} p idx (SRWconst [16] w) mem))
  1152    && x.Uses == 1
  1153    && clobber(x)
  1154    -> (MOVWstoreidx [i-2] {s} p idx w mem)
  1155  (MOVHstoreidx [i] {s} p idx w0:(SRWconst [j] w) x:(MOVHstoreidx [i-2] {s} p idx (SRWconst [j+16] w) mem))
  1156    && x.Uses == 1
  1157    && clobber(x)
  1158    -> (MOVWstoreidx [i-2] {s} p idx w0 mem)
  1159  (MOVWstoreidx [i] {s} p idx w x:(MOVWstoreidx [i-4] {s} p idx (SRDconst [32] w) mem))
  1160    && x.Uses == 1
  1161    && clobber(x)
  1162    -> (MOVDstoreidx [i-4] {s} p idx w mem)
  1163  (MOVWstoreidx [i] {s} p idx w0:(SRDconst [j] w) x:(MOVWstoreidx [i-4] {s} p idx (SRDconst [j+32] w) mem))
  1164    && x.Uses == 1
  1165    && clobber(x)
  1166    -> (MOVDstoreidx [i-4] {s} p idx w0 mem)
  1167  
  1168  // Combine stores into larger (unaligned) stores with the bytes reversed (little endian).
  1169  // Store-with-bytes-reversed instructions do not support relative memory addresses,
  1170  // so these stores can't operate on global data (SB).
  1171  (MOVBstore [i] {s} p (SRDconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
  1172    && p.Op != OpSB
  1173    && x.Uses == 1
  1174    && clobber(x)
  1175    -> (MOVHBRstore [i-1] {s} p w mem)
  1176  (MOVBstore [i] {s} p (SRDconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SRDconst [j-8] w) mem))
  1177    && p.Op != OpSB
  1178    && x.Uses == 1
  1179    && clobber(x)
  1180    -> (MOVHBRstore [i-1] {s} p w0 mem)
  1181  (MOVBstore [i] {s} p (SRWconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
  1182    && p.Op != OpSB
  1183    && x.Uses == 1
  1184    && clobber(x)
  1185    -> (MOVHBRstore [i-1] {s} p w mem)
  1186  (MOVBstore [i] {s} p (SRWconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SRWconst [j-8] w) mem))
  1187    && p.Op != OpSB
  1188    && x.Uses == 1
  1189    && clobber(x)
  1190    -> (MOVHBRstore [i-1] {s} p w0 mem)
  1191  (MOVHBRstore [i] {s} p (SRDconst [16] w) x:(MOVHBRstore [i-2] {s} p w mem))
  1192    && x.Uses == 1
  1193    && clobber(x)
  1194    -> (MOVWBRstore [i-2] {s} p w mem)
  1195  (MOVHBRstore [i] {s} p (SRDconst [j] w) x:(MOVHBRstore [i-2] {s} p w0:(SRDconst [j-16] w) mem))
  1196    && x.Uses == 1
  1197    && clobber(x)
  1198    -> (MOVWBRstore [i-2] {s} p w0 mem)
  1199  (MOVHBRstore [i] {s} p (SRWconst [16] w) x:(MOVHBRstore [i-2] {s} p w mem))
  1200    && x.Uses == 1
  1201    && clobber(x)
  1202    -> (MOVWBRstore [i-2] {s} p w mem)
  1203  (MOVHBRstore [i] {s} p (SRWconst [j] w) x:(MOVHBRstore [i-2] {s} p w0:(SRWconst [j-16] w) mem))
  1204    && x.Uses == 1
  1205    && clobber(x)
  1206    -> (MOVWBRstore [i-2] {s} p w0 mem)
  1207  (MOVWBRstore [i] {s} p (SRDconst [32] w) x:(MOVWBRstore [i-4] {s} p w mem))
  1208    && x.Uses == 1
  1209    && clobber(x)
  1210    -> (MOVDBRstore [i-4] {s} p w mem)
  1211  (MOVWBRstore [i] {s} p (SRDconst [j] w) x:(MOVWBRstore [i-4] {s} p w0:(SRDconst [j-32] w) mem))
  1212    && x.Uses == 1
  1213    && clobber(x)
  1214    -> (MOVDBRstore [i-4] {s} p w0 mem)
  1215  
  1216  (MOVBstoreidx [i] {s} p idx (SRDconst [8] w) x:(MOVBstoreidx [i-1] {s} p idx w mem))
  1217    && x.Uses == 1
  1218    && clobber(x)
  1219    -> (MOVHBRstoreidx [i-1] {s} p idx w mem)
  1220  (MOVBstoreidx [i] {s} p idx (SRDconst [j] w) x:(MOVBstoreidx [i-1] {s} p idx w0:(SRDconst [j-8] w) mem))
  1221    && x.Uses == 1
  1222    && clobber(x)
  1223    -> (MOVHBRstoreidx [i-1] {s} p idx w0 mem)
  1224  (MOVBstoreidx [i] {s} p idx (SRWconst [8] w) x:(MOVBstoreidx [i-1] {s} p idx w mem))
  1225    && x.Uses == 1
  1226    && clobber(x)
  1227    -> (MOVHBRstoreidx [i-1] {s} p idx w mem)
  1228  (MOVBstoreidx [i] {s} p idx (SRWconst [j] w) x:(MOVBstoreidx [i-1] {s} p idx w0:(SRWconst [j-8] w) mem))
  1229    && x.Uses == 1
  1230    && clobber(x)
  1231    -> (MOVHBRstoreidx [i-1] {s} p idx w0 mem)
  1232  (MOVHBRstoreidx [i] {s} p idx (SRDconst [16] w) x:(MOVHBRstoreidx [i-2] {s} p idx w mem))
  1233    && x.Uses == 1
  1234    && clobber(x)
  1235    -> (MOVWBRstoreidx [i-2] {s} p idx w mem)
  1236  (MOVHBRstoreidx [i] {s} p idx (SRDconst [j] w) x:(MOVHBRstoreidx [i-2] {s} p idx w0:(SRDconst [j-16] w) mem))
  1237    && x.Uses == 1
  1238    && clobber(x)
  1239    -> (MOVWBRstoreidx [i-2] {s} p idx w0 mem)
  1240  (MOVHBRstoreidx [i] {s} p idx (SRWconst [16] w) x:(MOVHBRstoreidx [i-2] {s} p idx w mem))
  1241    && x.Uses == 1
  1242    && clobber(x)
  1243    -> (MOVWBRstoreidx [i-2] {s} p idx w mem)
  1244  (MOVHBRstoreidx [i] {s} p idx (SRWconst [j] w) x:(MOVHBRstoreidx [i-2] {s} p idx w0:(SRWconst [j-16] w) mem))
  1245    && x.Uses == 1
  1246    && clobber(x)
  1247    -> (MOVWBRstoreidx [i-2] {s} p idx w0 mem)
  1248  (MOVWBRstoreidx [i] {s} p idx (SRDconst [32] w) x:(MOVWBRstoreidx [i-4] {s} p idx w mem))
  1249    && x.Uses == 1
  1250    && clobber(x)
  1251    -> (MOVDBRstoreidx [i-4] {s} p idx w mem)
  1252  (MOVWBRstoreidx [i] {s} p idx (SRDconst [j] w) x:(MOVWBRstoreidx [i-4] {s} p idx w0:(SRDconst [j-32] w) mem))
  1253    && x.Uses == 1
  1254    && clobber(x)
  1255    -> (MOVDBRstoreidx [i-4] {s} p idx w0 mem)
  1256  
  1257  // Combining byte loads into larger (unaligned) loads.
  1258  
  1259  // Little endian loads.
  1260  
  1261  // b[0] | b[1]<<8 -> load 16-bit, reverse bytes
  1262  (ORW                 x0:(MOVBZload [i]   {s} p mem)
  1263      s0:(SLWconst [8] x1:(MOVBZload [i+1] {s} p mem)))
  1264    && p.Op != OpSB
  1265    && x0.Uses == 1
  1266    && x1.Uses == 1
  1267    && s0.Uses == 1
  1268    && mergePoint(b,x0,x1) != nil
  1269    && clobber(x0)
  1270    && clobber(x1)
  1271    && clobber(s0)
  1272    -> @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRload [i] {s} p mem))
  1273  
  1274  // b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 -> load 32-bit, reverse bytes
  1275  (ORW o0:(ORW z0:(MOVHZreg x0:(MOVHBRload [i] {s} p mem))
  1276      s0:(SLWconst [16] x1:(MOVBZload [i+2] {s} p mem)))
  1277      s1:(SLWconst [24] x2:(MOVBZload [i+3] {s} p mem)))
  1278    && p.Op != OpSB
  1279    && z0.Uses == 1
  1280    && x0.Uses == 1
  1281    && x1.Uses == 1
  1282    && x2.Uses == 1
  1283    && s0.Uses == 1
  1284    && s1.Uses == 1
  1285    && o0.Uses == 1
  1286    && mergePoint(b,x0,x1,x2) != nil
  1287    && clobber(z0)
  1288    && clobber(x0)
  1289    && clobber(x1)
  1290    && clobber(x2)
  1291    && clobber(s0)
  1292    && clobber(s1)
  1293    && clobber(o0)
  1294    -> @mergePoint(b,x0,x1,x2) (MOVWBRload [i] {s} p mem)
  1295  
  1296  // b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 | b[4]<<32 | b[5]<<40 | b[6]<<48 | b[7]<<56 -> load 64-bit, reverse bytes
  1297  (OR o0:(OR o1:(OR o2:(OR o3:(OR o4:(OR o5:(OR
  1298                        x0:(MOVBZload [i]   {s} p mem)
  1299      s0:(SLDconst [8]  x1:(MOVBZload [i+1] {s} p mem)))
  1300      s1:(SLDconst [16] x2:(MOVBZload [i+2] {s} p mem)))
  1301      s2:(SLDconst [24] x3:(MOVBZload [i+3] {s} p mem)))
  1302      s3:(SLDconst [32] x4:(MOVBZload [i+4] {s} p mem)))
  1303      s4:(SLDconst [40] x5:(MOVBZload [i+5] {s} p mem)))
  1304      s5:(SLDconst [48] x6:(MOVBZload [i+6] {s} p mem)))
  1305      s6:(SLDconst [56] x7:(MOVBZload [i+7] {s} p mem)))
  1306    && p.Op != OpSB
  1307    && x0.Uses == 1
  1308    && x1.Uses == 1
  1309    && x2.Uses == 1
  1310    && x3.Uses == 1
  1311    && x4.Uses == 1
  1312    && x5.Uses == 1
  1313    && x6.Uses == 1
  1314    && x7.Uses == 1
  1315    && s0.Uses == 1
  1316    && s1.Uses == 1
  1317    && s2.Uses == 1
  1318    && s3.Uses == 1
  1319    && s4.Uses == 1
  1320    && s5.Uses == 1
  1321    && s6.Uses == 1
  1322    && o0.Uses == 1
  1323    && o1.Uses == 1
  1324    && o2.Uses == 1
  1325    && o3.Uses == 1
  1326    && o4.Uses == 1
  1327    && o5.Uses == 1
  1328    && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil
  1329    && clobber(x0)
  1330    && clobber(x1)
  1331    && clobber(x2)
  1332    && clobber(x3)
  1333    && clobber(x4)
  1334    && clobber(x5)
  1335    && clobber(x6)
  1336    && clobber(x7)
  1337    && clobber(s0)
  1338    && clobber(s1)
  1339    && clobber(s2)
  1340    && clobber(s3)
  1341    && clobber(s4)
  1342    && clobber(s5)
  1343    && clobber(s6)
  1344    && clobber(o0)
  1345    && clobber(o1)
  1346    && clobber(o2)
  1347    && clobber(o3)
  1348    && clobber(o4)
  1349    && clobber(o5)
  1350    -> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDBRload [i] {s} p mem)
  1351  
  1352  // b[0] | b[1]<<8 -> load 16-bit, reverse bytes
  1353  (ORW                 x0:(MOVBZloadidx [i]   {s} p idx mem)
  1354      s0:(SLWconst [8] x1:(MOVBZloadidx [i+1] {s} p idx mem)))
  1355    && x0.Uses == 1
  1356    && x1.Uses == 1
  1357    && s0.Uses == 1
  1358    && mergePoint(b,x0,x1) != nil
  1359    && clobber(x0)
  1360    && clobber(x1)
  1361    && clobber(s0)
  1362    -> @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx <v.Type> [i] {s} p idx mem))
  1363  
  1364  // b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 -> load 32-bit, reverse bytes
  1365  (ORW o0:(ORW z0:(MOVHZreg x0:(MOVHBRloadidx [i] {s} p idx mem))
  1366      s0:(SLWconst [16] x1:(MOVBZloadidx [i+2] {s} p idx mem)))
  1367      s1:(SLWconst [24] x2:(MOVBZloadidx [i+3] {s} p idx mem)))
  1368    && z0.Uses == 1
  1369    && x0.Uses == 1
  1370    && x1.Uses == 1
  1371    && x2.Uses == 1
  1372    && s0.Uses == 1
  1373    && s1.Uses == 1
  1374    && o0.Uses == 1
  1375    && mergePoint(b,x0,x1,x2) != nil
  1376    && clobber(z0)
  1377    && clobber(x0)
  1378    && clobber(x1)
  1379    && clobber(x2)
  1380    && clobber(s0)
  1381    && clobber(s1)
  1382    && clobber(o0)
  1383    -> @mergePoint(b,x0,x1,x2) (MOVWZreg (MOVWBRloadidx <v.Type> [i] {s} p idx mem))
  1384  
  1385  // b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 | b[4]<<32 | b[5]<<40 | b[6]<<48 | b[7]<<56 -> load 64-bit, reverse bytes
  1386  (OR o0:(OR o1:(OR o2:(OR o3:(OR o4:(OR o5:(OR
  1387                        x0:(MOVBZloadidx [i]   {s} p idx mem)
  1388      s0:(SLDconst [8]  x1:(MOVBZloadidx [i+1] {s} p idx mem)))
  1389      s1:(SLDconst [16] x2:(MOVBZloadidx [i+2] {s} p idx mem)))
  1390      s2:(SLDconst [24] x3:(MOVBZloadidx [i+3] {s} p idx mem)))
  1391      s3:(SLDconst [32] x4:(MOVBZloadidx [i+4] {s} p idx mem)))
  1392      s4:(SLDconst [40] x5:(MOVBZloadidx [i+5] {s} p idx mem)))
  1393      s5:(SLDconst [48] x6:(MOVBZloadidx [i+6] {s} p idx mem)))
  1394      s6:(SLDconst [56] x7:(MOVBZloadidx [i+7] {s} p idx mem)))
  1395    && x0.Uses == 1
  1396    && x1.Uses == 1
  1397    && x2.Uses == 1
  1398    && x3.Uses == 1
  1399    && x4.Uses == 1
  1400    && x5.Uses == 1
  1401    && x6.Uses == 1
  1402    && x7.Uses == 1
  1403    && s0.Uses == 1
  1404    && s1.Uses == 1
  1405    && s2.Uses == 1
  1406    && s3.Uses == 1
  1407    && s4.Uses == 1
  1408    && s5.Uses == 1
  1409    && s6.Uses == 1
  1410    && o0.Uses == 1
  1411    && o1.Uses == 1
  1412    && o2.Uses == 1
  1413    && o3.Uses == 1
  1414    && o4.Uses == 1
  1415    && o5.Uses == 1
  1416    && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil
  1417    && clobber(x0)
  1418    && clobber(x1)
  1419    && clobber(x2)
  1420    && clobber(x3)
  1421    && clobber(x4)
  1422    && clobber(x5)
  1423    && clobber(x6)
  1424    && clobber(x7)
  1425    && clobber(s0)
  1426    && clobber(s1)
  1427    && clobber(s2)
  1428    && clobber(s3)
  1429    && clobber(s4)
  1430    && clobber(s5)
  1431    && clobber(s6)
  1432    && clobber(o0)
  1433    && clobber(o1)
  1434    && clobber(o2)
  1435    && clobber(o3)
  1436    && clobber(o4)
  1437    && clobber(o5)
  1438    -> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDBRloadidx <v.Type> [i] {s} p idx mem)
  1439  
  1440  // Big endian loads.
  1441  
  1442  // b[1] | b[0]<<8 -> load 16-bit
  1443  (ORW                  x0:(MOVBZload [i]   {s} p mem)
  1444      s0:(SLWconst [8] x1:(MOVBZload [i-1] {s} p mem)))
  1445    && p.Op != OpSB
  1446    && x0.Uses == 1
  1447    && x1.Uses == 1
  1448    && s0.Uses == 1
  1449    && mergePoint(b,x0,x1) != nil
  1450    && clobber(x0)
  1451    && clobber(x1)
  1452    && clobber(s0)
  1453    -> @mergePoint(b,x0,x1) (MOVHZload [i-1] {s} p mem)
  1454  
  1455  // b[3] | b[2]<<8 | b[1]<<16 | b[0]<<24 -> load 32-bit
  1456  (ORW o0:(ORW x0:(MOVHZload [i] {s} p mem)
  1457      s0:(SLWconst [16] x1:(MOVBZload [i-1] {s} p mem)))
  1458      s1:(SLWconst [24] x2:(MOVBZload [i-2] {s} p mem)))
  1459    && p.Op != OpSB
  1460    && x0.Uses == 1
  1461    && x1.Uses == 1
  1462    && x2.Uses == 1
  1463    && s0.Uses == 1
  1464    && s1.Uses == 1
  1465    && o0.Uses == 1
  1466    && mergePoint(b,x0,x1,x2) != nil
  1467    && clobber(x0)
  1468    && clobber(x1)
  1469    && clobber(x2)
  1470    && clobber(s0)
  1471    && clobber(s1)
  1472    && clobber(o0)
  1473    -> @mergePoint(b,x0,x1,x2) (MOVWZload [i-2] {s} p mem)
  1474  
  1475  // b[7] | b[6]<<8 | b[5]<<16 | b[4]<<24 | b[3]<<32 | b[2]<<40 | b[1]<<48 | b[0]<<56 -> load 64-bit
  1476  (OR o0:(OR o1:(OR o2:(OR o3:(OR o4:(OR o5:(OR
  1477                        x0:(MOVBZload [i]   {s} p mem)
  1478      s0:(SLDconst [8]  x1:(MOVBZload [i-1] {s} p mem)))
  1479      s1:(SLDconst [16] x2:(MOVBZload [i-2] {s} p mem)))
  1480      s2:(SLDconst [24] x3:(MOVBZload [i-3] {s} p mem)))
  1481      s3:(SLDconst [32] x4:(MOVBZload [i-4] {s} p mem)))
  1482      s4:(SLDconst [40] x5:(MOVBZload [i-5] {s} p mem)))
  1483      s5:(SLDconst [48] x6:(MOVBZload [i-6] {s} p mem)))
  1484      s6:(SLDconst [56] x7:(MOVBZload [i-7] {s} p mem)))
  1485    && p.Op != OpSB
  1486    && x0.Uses == 1
  1487    && x1.Uses == 1
  1488    && x2.Uses == 1
  1489    && x3.Uses == 1
  1490    && x4.Uses == 1
  1491    && x5.Uses == 1
  1492    && x6.Uses == 1
  1493    && x7.Uses == 1
  1494    && s0.Uses == 1
  1495    && s1.Uses == 1
  1496    && s2.Uses == 1
  1497    && s3.Uses == 1
  1498    && s4.Uses == 1
  1499    && s5.Uses == 1
  1500    && s6.Uses == 1
  1501    && o0.Uses == 1
  1502    && o1.Uses == 1
  1503    && o2.Uses == 1
  1504    && o3.Uses == 1
  1505    && o4.Uses == 1
  1506    && o5.Uses == 1
  1507    && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil
  1508    && clobber(x0)
  1509    && clobber(x1)
  1510    && clobber(x2)
  1511    && clobber(x3)
  1512    && clobber(x4)
  1513    && clobber(x5)
  1514    && clobber(x6)
  1515    && clobber(x7)
  1516    && clobber(s0)
  1517    && clobber(s1)
  1518    && clobber(s2)
  1519    && clobber(s3)
  1520    && clobber(s4)
  1521    && clobber(s5)
  1522    && clobber(s6)
  1523    && clobber(o0)
  1524    && clobber(o1)
  1525    && clobber(o2)
  1526    && clobber(o3)
  1527    && clobber(o4)
  1528    && clobber(o5)
  1529    -> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload [i-7] {s} p mem)
  1530  
  1531  // b[1] | b[0]<<8 -> load 16-bit
  1532  (ORW                 x0:(MOVBZloadidx [i]   {s} p idx mem)
  1533      s0:(SLWconst [8] x1:(MOVBZloadidx [i-1] {s} p idx mem)))
  1534    && x0.Uses == 1
  1535    && x1.Uses == 1
  1536    && s0.Uses == 1
  1537    && mergePoint(b,x0,x1) != nil
  1538    && clobber(x0)
  1539    && clobber(x1)
  1540    && clobber(s0)
  1541    -> @mergePoint(b,x0,x1) (MOVHZloadidx <v.Type> [i-1] {s} p idx mem)
  1542  
  1543  // b[3] | b[2]<<8 | b[1]<<16 | b[0]<<24 -> load 32-bit
  1544  (ORW o0:(ORW x0:(MOVHZloadidx [i] {s} p idx mem)
  1545      s0:(SLWconst [16] x1:(MOVBZloadidx [i-1] {s} p idx mem)))
  1546      s1:(SLWconst [24] x2:(MOVBZloadidx [i-2] {s} p idx mem)))
  1547    && x0.Uses == 1
  1548    && x1.Uses == 1
  1549    && x2.Uses == 1
  1550    && s0.Uses == 1
  1551    && s1.Uses == 1
  1552    && o0.Uses == 1
  1553    && mergePoint(b,x0,x1,x2) != nil
  1554    && clobber(x0)
  1555    && clobber(x1)
  1556    && clobber(x2)
  1557    && clobber(s0)
  1558    && clobber(s1)
  1559    && clobber(o0)
  1560    -> @mergePoint(b,x0,x1,x2) (MOVWZloadidx <v.Type> [i-2] {s} p idx mem)
  1561  
  1562  // b[7] | b[6]<<8 | b[5]<<16 | b[4]<<24 | b[3]<<32 | b[2]<<40 | b[1]<<48 | b[0]<<56 -> load 64-bit
  1563  (OR o0:(OR o1:(OR o2:(OR o3:(OR o4:(OR o5:(OR
  1564                        x0:(MOVBZloadidx [i]   {s} p idx mem)
  1565      s0:(SLDconst [8]  x1:(MOVBZloadidx [i-1] {s} p idx mem)))
  1566      s1:(SLDconst [16] x2:(MOVBZloadidx [i-2] {s} p idx mem)))
  1567      s2:(SLDconst [24] x3:(MOVBZloadidx [i-3] {s} p idx mem)))
  1568      s3:(SLDconst [32] x4:(MOVBZloadidx [i-4] {s} p idx mem)))
  1569      s4:(SLDconst [40] x5:(MOVBZloadidx [i-5] {s} p idx mem)))
  1570      s5:(SLDconst [48] x6:(MOVBZloadidx [i-6] {s} p idx mem)))
  1571      s6:(SLDconst [56] x7:(MOVBZloadidx [i-7] {s} p idx mem)))
  1572    && x0.Uses == 1
  1573    && x1.Uses == 1
  1574    && x2.Uses == 1
  1575    && x3.Uses == 1
  1576    && x4.Uses == 1
  1577    && x5.Uses == 1
  1578    && x6.Uses == 1
  1579    && x7.Uses == 1
  1580    && s0.Uses == 1
  1581    && s1.Uses == 1
  1582    && s2.Uses == 1
  1583    && s3.Uses == 1
  1584    && s4.Uses == 1
  1585    && s5.Uses == 1
  1586    && s6.Uses == 1
  1587    && o0.Uses == 1
  1588    && o1.Uses == 1
  1589    && o2.Uses == 1
  1590    && o3.Uses == 1
  1591    && o4.Uses == 1
  1592    && o5.Uses == 1
  1593    && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil
  1594    && clobber(x0)
  1595    && clobber(x1)
  1596    && clobber(x2)
  1597    && clobber(x3)
  1598    && clobber(x4)
  1599    && clobber(x5)
  1600    && clobber(x6)
  1601    && clobber(x7)
  1602    && clobber(s0)
  1603    && clobber(s1)
  1604    && clobber(s2)
  1605    && clobber(s3)
  1606    && clobber(s4)
  1607    && clobber(s5)
  1608    && clobber(s6)
  1609    && clobber(o0)
  1610    && clobber(o1)
  1611    && clobber(o2)
  1612    && clobber(o3)
  1613    && clobber(o4)
  1614    && clobber(o5)
  1615    -> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDloadidx <v.Type> [i-7] {s} p idx mem)
  1616  
  1617  // Combine stores into store multiples.
  1618  // 32-bit
  1619  (MOVWstore [i] {s} p w1 x:(MOVWstore [i-4] {s} p w0 mem))
  1620    && p.Op != OpSB
  1621    && x.Uses == 1
  1622    && is20Bit(i-4)
  1623    && clobber(x)
  1624    -> (STM2 [i-4] {s} p w0 w1 mem)
  1625  (MOVWstore [i] {s} p w2 x:(STM2 [i-8] {s} p w0 w1 mem))
  1626    && x.Uses == 1
  1627    && is20Bit(i-8)
  1628    && clobber(x)
  1629    -> (STM3 [i-8] {s} p w0 w1 w2 mem)
  1630  (MOVWstore [i] {s} p w3 x:(STM3 [i-12] {s} p w0 w1 w2 mem))
  1631    && x.Uses == 1
  1632    && is20Bit(i-12)
  1633    && clobber(x)
  1634    -> (STM4 [i-12] {s} p w0 w1 w2 w3 mem)
  1635  (STM2 [i] {s} p w2 w3 x:(STM2 [i-8] {s} p w0 w1 mem))
  1636    && x.Uses == 1
  1637    && is20Bit(i-8)
  1638    && clobber(x)
  1639    -> (STM4 [i-8] {s} p w0 w1 w2 w3 mem)
  1640  // 64-bit
  1641  (MOVDstore [i] {s} p w1 x:(MOVDstore [i-8] {s} p w0 mem))
  1642    && p.Op != OpSB
  1643    && x.Uses == 1
  1644    && is20Bit(i-8)
  1645    && clobber(x)
  1646    -> (STMG2 [i-8] {s} p w0 w1 mem)
  1647  (MOVDstore [i] {s} p w2 x:(STMG2 [i-16] {s} p w0 w1 mem))
  1648    && x.Uses == 1
  1649    && is20Bit(i-16)
  1650    && clobber(x)
  1651    -> (STMG3 [i-16] {s} p w0 w1 w2 mem)
  1652  (MOVDstore [i] {s} p w3 x:(STMG3 [i-24] {s} p w0 w1 w2 mem))
  1653    && x.Uses == 1
  1654    && is20Bit(i-24)
  1655    && clobber(x)
  1656    -> (STMG4 [i-24] {s} p w0 w1 w2 w3 mem)
  1657  (STMG2 [i] {s} p w2 w3 x:(STMG2 [i-16] {s} p w0 w1 mem))
  1658    && x.Uses == 1
  1659    && is20Bit(i-16)
  1660    && clobber(x)
  1661    -> (STMG4 [i-16] {s} p w0 w1 w2 w3 mem)
  1662  
  1663  // Convert 32-bit store multiples into 64-bit stores.
  1664  (STM2 [i] {s} p (SRDconst [32] x) x mem) -> (MOVDstore [i] {s} p x mem)