github.com/mattn/go@v0.0.0-20171011075504-07f7db3ea99f/src/cmd/compile/internal/ssa/gen/AMD64.rules (about)

     1  // Copyright 2015 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Lowering arithmetic
     6  (Add64  x y) -> (ADDQ  x y)
     7  (AddPtr x y) && config.PtrSize == 8 -> (ADDQ x y)
     8  (AddPtr x y) && config.PtrSize == 4 -> (ADDL x y)
     9  (Add32  x y) -> (ADDL  x y)
    10  (Add16  x y) -> (ADDL  x y)
    11  (Add8   x y) -> (ADDL  x y)
    12  (Add32F x y) -> (ADDSS x y)
    13  (Add64F x y) -> (ADDSD x y)
    14  
    15  (Sub64  x y) -> (SUBQ  x y)
    16  (SubPtr x y) && config.PtrSize == 8 -> (SUBQ x y)
    17  (SubPtr x y) && config.PtrSize == 4 -> (SUBL x y)
    18  (Sub32  x y) -> (SUBL  x y)
    19  (Sub16  x y) -> (SUBL  x y)
    20  (Sub8   x y) -> (SUBL  x y)
    21  (Sub32F x y) -> (SUBSS x y)
    22  (Sub64F x y) -> (SUBSD x y)
    23  
    24  (Mul64  x y) -> (MULQ  x y)
    25  (Mul32  x y) -> (MULL  x y)
    26  (Mul16  x y) -> (MULL  x y)
    27  (Mul8   x y) -> (MULL  x y)
    28  (Mul32F x y) -> (MULSS x y)
    29  (Mul64F x y) -> (MULSD x y)
    30  
    31  (Div32F x y) -> (DIVSS x y)
    32  (Div64F x y) -> (DIVSD x y)
    33  
    34  (Div64  x y) -> (Select0 (DIVQ  x y))
    35  (Div64u x y) -> (Select0 (DIVQU x y))
    36  (Div32  x y) -> (Select0 (DIVL  x y))
    37  (Div32u x y) -> (Select0 (DIVLU x y))
    38  (Div16  x y) -> (Select0 (DIVW  x y))
    39  (Div16u x y) -> (Select0 (DIVWU x y))
    40  (Div8   x y) -> (Select0 (DIVW  (SignExt8to16 x) (SignExt8to16 y)))
    41  (Div8u  x y) -> (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
    42  
    43  (Hmul64  x y) -> (HMULQ  x y)
    44  (Hmul64u x y) -> (HMULQU x y)
    45  (Hmul32  x y) -> (HMULL  x y)
    46  (Hmul32u x y) -> (HMULLU x y)
    47  
    48  (Mul64uhilo x y) -> (MULQU2 x y)
    49  (Div128u xhi xlo y) -> (DIVQU2 xhi xlo y)
    50  
    51  (Avg64u x y) -> (AVGQU x y)
    52  
    53  (Mod64  x y) -> (Select1 (DIVQ  x y))
    54  (Mod64u x y) -> (Select1 (DIVQU x y))
    55  (Mod32  x y) -> (Select1 (DIVL  x y))
    56  (Mod32u x y) -> (Select1 (DIVLU x y))
    57  (Mod16  x y) -> (Select1 (DIVW  x y))
    58  (Mod16u x y) -> (Select1 (DIVWU x y))
    59  (Mod8   x y) -> (Select1 (DIVW  (SignExt8to16 x) (SignExt8to16 y)))
    60  (Mod8u  x y) -> (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
    61  
    62  (And64 x y) -> (ANDQ x y)
    63  (And32 x y) -> (ANDL x y)
    64  (And16 x y) -> (ANDL x y)
    65  (And8  x y) -> (ANDL x y)
    66  
    67  (Or64 x y) -> (ORQ x y)
    68  (Or32 x y) -> (ORL x y)
    69  (Or16 x y) -> (ORL x y)
    70  (Or8  x y) -> (ORL x y)
    71  
    72  (Xor64 x y) -> (XORQ x y)
    73  (Xor32 x y) -> (XORL x y)
    74  (Xor16 x y) -> (XORL x y)
    75  (Xor8  x y) -> (XORL x y)
    76  
    77  (Neg64  x) -> (NEGQ x)
    78  (Neg32  x) -> (NEGL x)
    79  (Neg16  x) -> (NEGL x)
    80  (Neg8   x) -> (NEGL x)
    81  (Neg32F x) -> (PXOR x (MOVSSconst <typ.Float32> [f2i(math.Copysign(0, -1))]))
    82  (Neg64F x) -> (PXOR x (MOVSDconst <typ.Float64> [f2i(math.Copysign(0, -1))]))
    83  
    84  (Com64 x) -> (NOTQ x)
    85  (Com32 x) -> (NOTL x)
    86  (Com16 x) -> (NOTL x)
    87  (Com8  x) -> (NOTL x)
    88  
    89  // Lowering boolean ops
    90  (AndB x y) -> (ANDL x y)
    91  (OrB x y) -> (ORL x y)
    92  (Not x) -> (XORLconst [1] x)
    93  
    94  // Lowering pointer arithmetic
    95  (OffPtr [off] ptr) && config.PtrSize == 8 && is32Bit(off) -> (ADDQconst [off] ptr)
    96  (OffPtr [off] ptr) && config.PtrSize == 8 -> (ADDQ (MOVQconst [off]) ptr)
    97  (OffPtr [off] ptr) && config.PtrSize == 4 -> (ADDLconst [off] ptr)
    98  
    99  // Lowering other arithmetic
   100  (Ctz64 <t> x) -> (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <types.TypeFlags> (BSFQ x)))
   101  (Ctz32 x) -> (Select0 (BSFQ (ORQ <typ.UInt64> (MOVQconst [1<<32]) x)))
   102  
   103  (BitLen64 <t> x) -> (ADDQconst [1] (CMOVQEQ <t> (Select0 <t> (BSRQ x)) (MOVQconst <t> [-1]) (Select1 <types.TypeFlags> (BSRQ x))))
   104  (BitLen32 x) -> (BitLen64 (MOVLQZX <typ.UInt64> x))
   105  
   106  (Bswap64 x) -> (BSWAPQ x)
   107  (Bswap32 x) -> (BSWAPL x)
   108  
   109  (PopCount64 x) -> (POPCNTQ x)
   110  (PopCount32 x) -> (POPCNTL x)
   111  (PopCount16 x) -> (POPCNTL (MOVWQZX <typ.UInt32> x))
   112  (PopCount8 x) -> (POPCNTL (MOVBQZX <typ.UInt32> x))
   113  
   114  (Sqrt x) -> (SQRTSD x)
   115  
   116  // Lowering extension
   117  // Note: we always extend to 64 bits even though some ops don't need that many result bits.
   118  (SignExt8to16  x) -> (MOVBQSX x)
   119  (SignExt8to32  x) -> (MOVBQSX x)
   120  (SignExt8to64  x) -> (MOVBQSX x)
   121  (SignExt16to32 x) -> (MOVWQSX x)
   122  (SignExt16to64 x) -> (MOVWQSX x)
   123  (SignExt32to64 x) -> (MOVLQSX x)
   124  
   125  (ZeroExt8to16  x) -> (MOVBQZX x)
   126  (ZeroExt8to32  x) -> (MOVBQZX x)
   127  (ZeroExt8to64  x) -> (MOVBQZX x)
   128  (ZeroExt16to32 x) -> (MOVWQZX x)
   129  (ZeroExt16to64 x) -> (MOVWQZX x)
   130  (ZeroExt32to64 x) -> (MOVLQZX x)
   131  
   132  (Slicemask <t> x) -> (SARQconst (NEGQ <t> x) [63])
   133  
   134  // Lowering truncation
   135  // Because we ignore high parts of registers, truncates are just copies.
   136  (Trunc16to8  x) -> x
   137  (Trunc32to8  x) -> x
   138  (Trunc32to16 x) -> x
   139  (Trunc64to8  x) -> x
   140  (Trunc64to16 x) -> x
   141  (Trunc64to32 x) -> x
   142  
   143  // Lowering float <-> int
   144  (Cvt32to32F x) -> (CVTSL2SS x)
   145  (Cvt32to64F x) -> (CVTSL2SD x)
   146  (Cvt64to32F x) -> (CVTSQ2SS x)
   147  (Cvt64to64F x) -> (CVTSQ2SD x)
   148  
   149  (Cvt32Fto32 x) -> (CVTTSS2SL x)
   150  (Cvt32Fto64 x) -> (CVTTSS2SQ x)
   151  (Cvt64Fto32 x) -> (CVTTSD2SL x)
   152  (Cvt64Fto64 x) -> (CVTTSD2SQ x)
   153  
   154  (Cvt32Fto64F x) -> (CVTSS2SD x)
   155  (Cvt64Fto32F x) -> (CVTSD2SS x)
   156  
   157  (Round32F x) -> x
   158  (Round64F x) -> x
   159  
   160  // Lowering shifts
   161  // Unsigned shifts need to return 0 if shift amount is >= width of shifted value.
   162  //   result = (arg << shift) & (shift >= argbits ? 0 : 0xffffffffffffffff)
   163  (Lsh64x64 <t> x y) -> (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64])))
   164  (Lsh64x32 <t> x y) -> (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64])))
   165  (Lsh64x16 <t> x y) -> (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64])))
   166  (Lsh64x8  <t> x y) -> (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64])))
   167  
   168  (Lsh32x64 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
   169  (Lsh32x32 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
   170  (Lsh32x16 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
   171  (Lsh32x8  <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
   172  
   173  (Lsh16x64 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
   174  (Lsh16x32 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
   175  (Lsh16x16 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
   176  (Lsh16x8  <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
   177  
   178  (Lsh8x64 <t> x y)  -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
   179  (Lsh8x32 <t> x y)  -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
   180  (Lsh8x16 <t> x y)  -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
   181  (Lsh8x8  <t> x y)  -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
   182  
   183  (Rsh64Ux64 <t> x y) -> (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64])))
   184  (Rsh64Ux32 <t> x y) -> (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64])))
   185  (Rsh64Ux16 <t> x y) -> (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64])))
   186  (Rsh64Ux8  <t> x y) -> (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64])))
   187  
   188  (Rsh32Ux64 <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
   189  (Rsh32Ux32 <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
   190  (Rsh32Ux16 <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
   191  (Rsh32Ux8  <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
   192  
   193  (Rsh16Ux64 <t> x y) -> (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16])))
   194  (Rsh16Ux32 <t> x y) -> (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16])))
   195  (Rsh16Ux16 <t> x y) -> (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16])))
   196  (Rsh16Ux8  <t> x y) -> (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16])))
   197  
   198  (Rsh8Ux64 <t> x y)  -> (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8])))
   199  (Rsh8Ux32 <t> x y)  -> (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8])))
   200  (Rsh8Ux16 <t> x y)  -> (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8])))
   201  (Rsh8Ux8  <t> x y)  -> (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8])))
   202  
   203  // Signed right shift needs to return 0/-1 if shift amount is >= width of shifted value.
   204  // We implement this by setting the shift value to -1 (all ones) if the shift value is >= width.
   205  (Rsh64x64 <t> x y) -> (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64])))))
   206  (Rsh64x32 <t> x y) -> (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64])))))
   207  (Rsh64x16 <t> x y) -> (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64])))))
   208  (Rsh64x8  <t> x y) -> (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64])))))
   209  
   210  (Rsh32x64 <t> x y) -> (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32])))))
   211  (Rsh32x32 <t> x y) -> (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32])))))
   212  (Rsh32x16 <t> x y) -> (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32])))))
   213  (Rsh32x8  <t> x y) -> (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32])))))
   214  
   215  (Rsh16x64 <t> x y) -> (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16])))))
   216  (Rsh16x32 <t> x y) -> (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16])))))
   217  (Rsh16x16 <t> x y) -> (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16])))))
   218  (Rsh16x8  <t> x y) -> (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16])))))
   219  
   220  (Rsh8x64 <t> x y)  -> (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8])))))
   221  (Rsh8x32 <t> x y)  -> (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8])))))
   222  (Rsh8x16 <t> x y)  -> (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8])))))
   223  (Rsh8x8  <t> x y)  -> (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8])))))
   224  
   225  // Lowering comparisons
   226  (Less64  x y) -> (SETL (CMPQ x y))
   227  (Less32  x y) -> (SETL (CMPL x y))
   228  (Less16  x y) -> (SETL (CMPW x y))
   229  (Less8   x y) -> (SETL (CMPB x y))
   230  (Less64U x y) -> (SETB (CMPQ x y))
   231  (Less32U x y) -> (SETB (CMPL x y))
   232  (Less16U x y) -> (SETB (CMPW x y))
   233  (Less8U  x y) -> (SETB (CMPB x y))
   234  // Use SETGF with reversed operands to dodge NaN case
   235  (Less64F x y) -> (SETGF (UCOMISD y x))
   236  (Less32F x y) -> (SETGF (UCOMISS y x))
   237  
   238  (Leq64  x y) -> (SETLE (CMPQ x y))
   239  (Leq32  x y) -> (SETLE (CMPL x y))
   240  (Leq16  x y) -> (SETLE (CMPW x y))
   241  (Leq8   x y) -> (SETLE (CMPB x y))
   242  (Leq64U x y) -> (SETBE (CMPQ x y))
   243  (Leq32U x y) -> (SETBE (CMPL x y))
   244  (Leq16U x y) -> (SETBE (CMPW x y))
   245  (Leq8U  x y) -> (SETBE (CMPB x y))
   246  // Use SETGEF with reversed operands to dodge NaN case
   247  (Leq64F x y) -> (SETGEF (UCOMISD y x))
   248  (Leq32F x y) -> (SETGEF (UCOMISS y x))
   249  
   250  (Greater64  x y) -> (SETG (CMPQ x y))
   251  (Greater32  x y) -> (SETG (CMPL x y))
   252  (Greater16  x y) -> (SETG (CMPW x y))
   253  (Greater8   x y) -> (SETG (CMPB x y))
   254  (Greater64U x y) -> (SETA (CMPQ x y))
   255  (Greater32U x y) -> (SETA (CMPL x y))
   256  (Greater16U x y) -> (SETA (CMPW x y))
   257  (Greater8U  x y) -> (SETA (CMPB x y))
   258  // Note Go assembler gets UCOMISx operand order wrong, but it is right here
   259  // Bug is accommodated at generation of assembly language.
   260  (Greater64F x y) -> (SETGF (UCOMISD x y))
   261  (Greater32F x y) -> (SETGF (UCOMISS x y))
   262  
   263  (Geq64  x y) -> (SETGE (CMPQ x y))
   264  (Geq32  x y) -> (SETGE (CMPL x y))
   265  (Geq16  x y) -> (SETGE (CMPW x y))
   266  (Geq8   x y) -> (SETGE (CMPB x y))
   267  (Geq64U x y) -> (SETAE (CMPQ x y))
   268  (Geq32U x y) -> (SETAE (CMPL x y))
   269  (Geq16U x y) -> (SETAE (CMPW x y))
   270  (Geq8U  x y) -> (SETAE (CMPB x y))
   271  // Note Go assembler gets UCOMISx operand order wrong, but it is right here
   272  // Bug is accommodated at generation of assembly language.
   273  (Geq64F x y) -> (SETGEF (UCOMISD x y))
   274  (Geq32F x y) -> (SETGEF (UCOMISS x y))
   275  
   276  (Eq64  x y) -> (SETEQ (CMPQ x y))
   277  (Eq32  x y) -> (SETEQ (CMPL x y))
   278  (Eq16  x y) -> (SETEQ (CMPW x y))
   279  (Eq8   x y) -> (SETEQ (CMPB x y))
   280  (EqB   x y) -> (SETEQ (CMPB x y))
   281  (EqPtr x y) && config.PtrSize == 8 -> (SETEQ (CMPQ x y))
   282  (EqPtr x y) && config.PtrSize == 4 -> (SETEQ (CMPL x y))
   283  (Eq64F x y) -> (SETEQF (UCOMISD x y))
   284  (Eq32F x y) -> (SETEQF (UCOMISS x y))
   285  
   286  (Neq64  x y) -> (SETNE (CMPQ x y))
   287  (Neq32  x y) -> (SETNE (CMPL x y))
   288  (Neq16  x y) -> (SETNE (CMPW x y))
   289  (Neq8   x y) -> (SETNE (CMPB x y))
   290  (NeqB   x y) -> (SETNE (CMPB x y))
   291  (NeqPtr x y) && config.PtrSize == 8 -> (SETNE (CMPQ x y))
   292  (NeqPtr x y) && config.PtrSize == 4 -> (SETNE (CMPL x y))
   293  (Neq64F x y) -> (SETNEF (UCOMISD x y))
   294  (Neq32F x y) -> (SETNEF (UCOMISS x y))
   295  
   296  (Int64Hi x) -> (SHRQconst [32] x) // needed for amd64p32
   297  
   298  // Lowering loads
   299  (Load <t> ptr mem) && (is64BitInt(t) || isPtr(t) && config.PtrSize == 8) -> (MOVQload ptr mem)
   300  (Load <t> ptr mem) && (is32BitInt(t) || isPtr(t) && config.PtrSize == 4) -> (MOVLload ptr mem)
   301  (Load <t> ptr mem) && is16BitInt(t) -> (MOVWload ptr mem)
   302  (Load <t> ptr mem) && (t.IsBoolean() || is8BitInt(t)) -> (MOVBload ptr mem)
   303  (Load <t> ptr mem) && is32BitFloat(t) -> (MOVSSload ptr mem)
   304  (Load <t> ptr mem) && is64BitFloat(t) -> (MOVSDload ptr mem)
   305  
   306  // Lowering stores
   307  // These more-specific FP versions of Store pattern should come first.
   308  (Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) -> (MOVSDstore ptr val mem)
   309  (Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) -> (MOVSSstore ptr val mem)
   310  
   311  (Store {t} ptr val mem) && t.(*types.Type).Size() == 8 -> (MOVQstore ptr val mem)
   312  (Store {t} ptr val mem) && t.(*types.Type).Size() == 4 -> (MOVLstore ptr val mem)
   313  (Store {t} ptr val mem) && t.(*types.Type).Size() == 2 -> (MOVWstore ptr val mem)
   314  (Store {t} ptr val mem) && t.(*types.Type).Size() == 1 -> (MOVBstore ptr val mem)
   315  
   316  // Lowering moves
   317  (Move [0] _ _ mem) -> mem
   318  (Move [1] dst src mem) -> (MOVBstore dst (MOVBload src mem) mem)
   319  (Move [2] dst src mem) -> (MOVWstore dst (MOVWload src mem) mem)
   320  (Move [4] dst src mem) -> (MOVLstore dst (MOVLload src mem) mem)
   321  (Move [8] dst src mem) -> (MOVQstore dst (MOVQload src mem) mem)
   322  (Move [16] dst src mem) && config.useSSE -> (MOVOstore dst (MOVOload src mem) mem)
   323  (Move [16] dst src mem) && !config.useSSE ->
   324  	(MOVQstore [8] dst (MOVQload [8] src mem)
   325  		(MOVQstore dst (MOVQload src mem) mem))
   326  (Move [3] dst src mem) ->
   327  	(MOVBstore [2] dst (MOVBload [2] src mem)
   328  		(MOVWstore dst (MOVWload src mem) mem))
   329  (Move [5] dst src mem) ->
   330  	(MOVBstore [4] dst (MOVBload [4] src mem)
   331  		(MOVLstore dst (MOVLload src mem) mem))
   332  (Move [6] dst src mem) ->
   333  	(MOVWstore [4] dst (MOVWload [4] src mem)
   334  		(MOVLstore dst (MOVLload src mem) mem))
   335  (Move [7] dst src mem) ->
   336  	(MOVLstore [3] dst (MOVLload [3] src mem)
   337  		(MOVLstore dst (MOVLload src mem) mem))
   338  (Move [s] dst src mem) && s > 8 && s < 16 ->
   339  	(MOVQstore [s-8] dst (MOVQload [s-8] src mem)
   340  		(MOVQstore dst (MOVQload src mem) mem))
   341  
   342  // Adjust moves to be a multiple of 16 bytes.
   343  (Move [s] dst src mem)
   344  	&& s > 16 && s%16 != 0 && s%16 <= 8 ->
   345  	(Move [s-s%16]
   346  		(OffPtr <dst.Type> dst [s%16])
   347  		(OffPtr <src.Type> src [s%16])
   348  		(MOVQstore dst (MOVQload src mem) mem))
   349  (Move [s] dst src mem)
   350  	&& s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE ->
   351  	(Move [s-s%16]
   352  		(OffPtr <dst.Type> dst [s%16])
   353  		(OffPtr <src.Type> src [s%16])
   354  		(MOVOstore dst (MOVOload src mem) mem))
   355  (Move [s] dst src mem)
   356  	&& s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE ->
   357  	(Move [s-s%16]
   358  		(OffPtr <dst.Type> dst [s%16])
   359  		(OffPtr <src.Type> src [s%16])
   360  		(MOVQstore [8] dst (MOVQload [8] src mem)
   361  			(MOVQstore dst (MOVQload src mem) mem)))
   362  
   363  // Medium copying uses a duff device.
   364  (Move [s] dst src mem)
   365  	&& s >= 32 && s <= 16*64 && s%16 == 0
   366  	&& !config.noDuffDevice ->
   367  	(DUFFCOPY [14*(64-s/16)] dst src mem)
   368  // 14 and 64 are magic constants.  14 is the number of bytes to encode:
   369  //	MOVUPS	(SI), X0
   370  //	ADDQ	$16, SI
   371  //	MOVUPS	X0, (DI)
   372  //	ADDQ	$16, DI
   373  // and 64 is the number of such blocks. See src/runtime/duff_amd64.s:duffcopy.
   374  
   375  // Large copying uses REP MOVSQ.
   376  (Move [s] dst src mem) && (s > 16*64 || config.noDuffDevice) && s%8 == 0 ->
   377  	(REPMOVSQ dst src (MOVQconst [s/8]) mem)
   378  
   379  // Lowering Zero instructions
   380  (Zero [0] _ mem) -> mem
   381  (Zero [1] destptr mem) -> (MOVBstoreconst [0] destptr mem)
   382  (Zero [2] destptr mem) -> (MOVWstoreconst [0] destptr mem)
   383  (Zero [4] destptr mem) -> (MOVLstoreconst [0] destptr mem)
   384  (Zero [8] destptr mem) -> (MOVQstoreconst [0] destptr mem)
   385  
   386  (Zero [3] destptr mem) ->
   387  	(MOVBstoreconst [makeValAndOff(0,2)] destptr
   388  		(MOVWstoreconst [0] destptr mem))
   389  (Zero [5] destptr mem) ->
   390  	(MOVBstoreconst [makeValAndOff(0,4)] destptr
   391  		(MOVLstoreconst [0] destptr mem))
   392  (Zero [6] destptr mem) ->
   393  	(MOVWstoreconst [makeValAndOff(0,4)] destptr
   394  		(MOVLstoreconst [0] destptr mem))
   395  (Zero [7] destptr mem) ->
   396  	(MOVLstoreconst [makeValAndOff(0,3)] destptr
   397  		(MOVLstoreconst [0] destptr mem))
   398  
   399  // Strip off any fractional word zeroing.
   400  (Zero [s] destptr mem) && s%8 != 0 && s > 8 && !config.useSSE ->
   401  	(Zero [s-s%8] (OffPtr <destptr.Type> destptr [s%8])
   402  		(MOVQstoreconst [0] destptr mem))
   403  
   404  // Zero small numbers of words directly.
   405  (Zero [16] destptr mem) && !config.useSSE ->
   406  	(MOVQstoreconst [makeValAndOff(0,8)] destptr
   407  		(MOVQstoreconst [0] destptr mem))
   408  (Zero [24] destptr mem) && !config.useSSE ->
   409  	(MOVQstoreconst [makeValAndOff(0,16)] destptr
   410  		(MOVQstoreconst [makeValAndOff(0,8)] destptr
   411  			(MOVQstoreconst [0] destptr mem)))
   412  (Zero [32] destptr mem) && !config.useSSE ->
   413  	(MOVQstoreconst [makeValAndOff(0,24)] destptr
   414  		(MOVQstoreconst [makeValAndOff(0,16)] destptr
   415  			(MOVQstoreconst [makeValAndOff(0,8)] destptr
   416  				(MOVQstoreconst [0] destptr mem))))
   417  
   418  (Zero [s] destptr mem) && s > 8 && s < 16 && config.useSSE ->
   419  	(MOVQstoreconst [makeValAndOff(0,s-8)] destptr
   420  		(MOVQstoreconst [0] destptr mem))
   421  
   422  // Adjust zeros to be a multiple of 16 bytes.
   423  (Zero [s] destptr mem) && s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE ->
   424  	(Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16])
   425  		(MOVOstore destptr (MOVOconst [0]) mem))
   426  
   427  (Zero [s] destptr mem) && s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE ->
   428  	(Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16])
   429  		(MOVQstoreconst [0] destptr mem))
   430  
   431  (Zero [16] destptr mem) && config.useSSE ->
   432  	(MOVOstore destptr (MOVOconst [0]) mem)
   433  (Zero [32] destptr mem) && config.useSSE ->
   434  	(MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0])
   435  		(MOVOstore destptr (MOVOconst [0]) mem))
   436  (Zero [48] destptr mem) && config.useSSE ->
   437  	(MOVOstore (OffPtr <destptr.Type> destptr [32]) (MOVOconst [0])
   438  		(MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0])
   439  			(MOVOstore destptr (MOVOconst [0]) mem)))
   440  (Zero [64] destptr mem) && config.useSSE ->
   441  	(MOVOstore (OffPtr <destptr.Type> destptr [48]) (MOVOconst [0])
   442  		(MOVOstore (OffPtr <destptr.Type> destptr [32]) (MOVOconst [0])
   443  			(MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0])
   444  				(MOVOstore destptr (MOVOconst [0]) mem))))
   445  
   446  // Medium zeroing uses a duff device.
   447  (Zero [s] destptr mem)
   448  	&& s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice ->
   449  	(DUFFZERO [s] destptr (MOVOconst [0]) mem)
   450  
   451  // Large zeroing uses REP STOSQ.
   452  (Zero [s] destptr mem)
   453  	&& (s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32))
   454  	&& s%8 == 0 ->
   455  	(REPSTOSQ destptr (MOVQconst [s/8]) (MOVQconst [0]) mem)
   456  
   457  // Lowering constants
   458  (Const8   [val]) -> (MOVLconst [val])
   459  (Const16  [val]) -> (MOVLconst [val])
   460  (Const32  [val]) -> (MOVLconst [val])
   461  (Const64  [val]) -> (MOVQconst [val])
   462  (Const32F [val]) -> (MOVSSconst [val])
   463  (Const64F [val]) -> (MOVSDconst [val])
   464  (ConstNil) && config.PtrSize == 8 -> (MOVQconst [0])
   465  (ConstNil) && config.PtrSize == 4 -> (MOVLconst [0])
   466  (ConstBool [b]) -> (MOVLconst [b])
   467  
   468  // Lowering calls
   469  (StaticCall [argwid] {target} mem) -> (CALLstatic [argwid] {target} mem)
   470  (ClosureCall [argwid] entry closure mem) -> (CALLclosure [argwid] entry closure mem)
   471  (InterCall [argwid] entry mem) -> (CALLinter [argwid] entry mem)
   472  
   473  // Miscellaneous
   474  (Convert <t> x mem) && config.PtrSize == 8 -> (MOVQconvert <t> x mem)
   475  (Convert <t> x mem) && config.PtrSize == 4 -> (MOVLconvert <t> x mem)
   476  (IsNonNil p) && config.PtrSize == 8 -> (SETNE (TESTQ p p))
   477  (IsNonNil p) && config.PtrSize == 4 -> (SETNE (TESTL p p))
   478  (IsInBounds idx len) && config.PtrSize == 8 -> (SETB (CMPQ idx len))
   479  (IsInBounds idx len) && config.PtrSize == 4 -> (SETB (CMPL idx len))
   480  (IsSliceInBounds idx len) && config.PtrSize == 8 -> (SETBE (CMPQ idx len))
   481  (IsSliceInBounds idx len) && config.PtrSize == 4 -> (SETBE (CMPL idx len))
   482  (NilCheck ptr mem) -> (LoweredNilCheck ptr mem)
   483  (GetG mem) -> (LoweredGetG mem)
   484  (GetClosurePtr) -> (LoweredGetClosurePtr)
   485  (GetCallerPC) -> (LoweredGetCallerPC)
   486  (GetCallerSP) -> (LoweredGetCallerSP)
   487  (Addr {sym} base) && config.PtrSize == 8 -> (LEAQ {sym} base)
   488  (Addr {sym} base) && config.PtrSize == 4 -> (LEAL {sym} base)
   489  
   490  (MOVBstore [off] {sym} ptr y:(SETL x) mem) && y.Uses == 1 -> (SETLmem [off] {sym} ptr x mem)
   491  (MOVBstore [off] {sym} ptr y:(SETLE x) mem) && y.Uses == 1 -> (SETLEmem [off] {sym} ptr x mem)
   492  (MOVBstore [off] {sym} ptr y:(SETG x) mem) && y.Uses == 1 -> (SETGmem [off] {sym} ptr x mem)
   493  (MOVBstore [off] {sym} ptr y:(SETGE x) mem) && y.Uses == 1 -> (SETGEmem [off] {sym} ptr x mem)
   494  (MOVBstore [off] {sym} ptr y:(SETEQ x) mem) && y.Uses == 1 -> (SETEQmem [off] {sym} ptr x mem)
   495  (MOVBstore [off] {sym} ptr y:(SETNE x) mem) && y.Uses == 1 -> (SETNEmem [off] {sym} ptr x mem)
   496  (MOVBstore [off] {sym} ptr y:(SETB x) mem) && y.Uses == 1 -> (SETBmem [off] {sym} ptr x mem)
   497  (MOVBstore [off] {sym} ptr y:(SETBE x) mem) && y.Uses == 1 -> (SETBEmem [off] {sym} ptr x mem)
   498  (MOVBstore [off] {sym} ptr y:(SETA x) mem) && y.Uses == 1 -> (SETAmem [off] {sym} ptr x mem)
   499  (MOVBstore [off] {sym} ptr y:(SETAE x) mem) && y.Uses == 1 -> (SETAEmem [off] {sym} ptr x mem)
   500  
   501  // block rewrites
   502  (If (SETL  cmp) yes no) -> (LT  cmp yes no)
   503  (If (SETLE cmp) yes no) -> (LE  cmp yes no)
   504  (If (SETG  cmp) yes no) -> (GT  cmp yes no)
   505  (If (SETGE cmp) yes no) -> (GE  cmp yes no)
   506  (If (SETEQ cmp) yes no) -> (EQ  cmp yes no)
   507  (If (SETNE cmp) yes no) -> (NE  cmp yes no)
   508  (If (SETB  cmp) yes no) -> (ULT cmp yes no)
   509  (If (SETBE cmp) yes no) -> (ULE cmp yes no)
   510  (If (SETA  cmp) yes no) -> (UGT cmp yes no)
   511  (If (SETAE cmp) yes no) -> (UGE cmp yes no)
   512  
   513  // Special case for floating point - LF/LEF not generated
   514  (If (SETGF  cmp) yes no) -> (UGT  cmp yes no)
   515  (If (SETGEF cmp) yes no) -> (UGE  cmp yes no)
   516  (If (SETEQF cmp) yes no) -> (EQF  cmp yes no)
   517  (If (SETNEF cmp) yes no) -> (NEF  cmp yes no)
   518  
   519  (If cond yes no) -> (NE (TESTB cond cond) yes no)
   520  
   521  // Atomic loads.  Other than preserving their ordering with respect to other loads, nothing special here.
   522  (AtomicLoad32 ptr mem) -> (MOVLatomicload ptr mem)
   523  (AtomicLoad64 ptr mem) -> (MOVQatomicload ptr mem)
   524  (AtomicLoadPtr ptr mem) && config.PtrSize == 8 -> (MOVQatomicload ptr mem)
   525  (AtomicLoadPtr ptr mem) && config.PtrSize == 4 -> (MOVLatomicload ptr mem)
   526  
   527  // Atomic stores.  We use XCHG to prevent the hardware reordering a subsequent load.
   528  // TODO: most runtime uses of atomic stores don't need that property.  Use normal stores for those?
   529  (AtomicStore32 ptr val mem) -> (Select1 (XCHGL <types.NewTuple(typ.UInt32,types.TypeMem)> val ptr mem))
   530  (AtomicStore64 ptr val mem) -> (Select1 (XCHGQ <types.NewTuple(typ.UInt64,types.TypeMem)> val ptr mem))
   531  (AtomicStorePtrNoWB ptr val mem) && config.PtrSize == 8 -> (Select1 (XCHGQ <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem))
   532  (AtomicStorePtrNoWB ptr val mem) && config.PtrSize == 4 -> (Select1 (XCHGL <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem))
   533  
   534  // Atomic exchanges.
   535  (AtomicExchange32 ptr val mem) -> (XCHGL val ptr mem)
   536  (AtomicExchange64 ptr val mem) -> (XCHGQ val ptr mem)
   537  
   538  // Atomic adds.
   539  (AtomicAdd32 ptr val mem) -> (AddTupleFirst32 val (XADDLlock val ptr mem))
   540  (AtomicAdd64 ptr val mem) -> (AddTupleFirst64 val (XADDQlock val ptr mem))
   541  (Select0 <t> (AddTupleFirst32 val tuple)) -> (ADDL val (Select0 <t> tuple))
   542  (Select1     (AddTupleFirst32   _ tuple)) -> (Select1 tuple)
   543  (Select0 <t> (AddTupleFirst64 val tuple)) -> (ADDQ val (Select0 <t> tuple))
   544  (Select1     (AddTupleFirst64   _ tuple)) -> (Select1 tuple)
   545  
   546  // Atomic compare and swap.
   547  (AtomicCompareAndSwap32 ptr old new_ mem) -> (CMPXCHGLlock ptr old new_ mem)
   548  (AtomicCompareAndSwap64 ptr old new_ mem) -> (CMPXCHGQlock ptr old new_ mem)
   549  
   550  // Atomic memory updates.
   551  (AtomicAnd8 ptr val mem) -> (ANDBlock ptr val mem)
   552  (AtomicOr8 ptr val mem) -> (ORBlock ptr val mem)
   553  
   554  // ***************************
   555  // Above: lowering rules
   556  // Below: optimizations
   557  // ***************************
   558  // TODO: Should the optimizations be a separate pass?
   559  
   560  // Fold boolean tests into blocks
   561  (NE (TESTB (SETL  cmp) (SETL  cmp)) yes no) -> (LT  cmp yes no)
   562  (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) -> (LE  cmp yes no)
   563  (NE (TESTB (SETG  cmp) (SETG  cmp)) yes no) -> (GT  cmp yes no)
   564  (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) -> (GE  cmp yes no)
   565  (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) -> (EQ  cmp yes no)
   566  (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) -> (NE  cmp yes no)
   567  (NE (TESTB (SETB  cmp) (SETB  cmp)) yes no) -> (ULT cmp yes no)
   568  (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) -> (ULE cmp yes no)
   569  (NE (TESTB (SETA  cmp) (SETA  cmp)) yes no) -> (UGT cmp yes no)
   570  (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) -> (UGE cmp yes no)
   571  
   572  // Recognize bit tests: a&(1<<b) != 0 for b suitably bounded
   573  // Note that ULT and SETB check the carry flag; they are identical to CS and SETCS.
   574  // Same, mutatis mutandis, for UGE and SETAE, and CC and SETCC.
   575  (NE (TESTL (SHLL (MOVLconst [1]) x) y)) && !config.nacl -> (ULT (BTL x y))
   576  (EQ (TESTL (SHLL (MOVLconst [1]) x) y)) && !config.nacl -> (UGE (BTL x y))
   577  (NE (TESTQ (SHLQ (MOVQconst [1]) x) y)) && !config.nacl -> (ULT (BTQ x y))
   578  (EQ (TESTQ (SHLQ (MOVQconst [1]) x) y)) && !config.nacl -> (UGE (BTQ x y))
   579  (NE (TESTLconst [c] x)) && isPowerOfTwo(c) && log2(c) < 32 && !config.nacl -> (ULT (BTLconst [log2(c)] x))
   580  (EQ (TESTLconst [c] x)) && isPowerOfTwo(c) && log2(c) < 32 && !config.nacl -> (UGE (BTLconst [log2(c)] x))
   581  (NE (TESTQconst [c] x)) && isPowerOfTwo(c) && log2(c) < 64 && !config.nacl -> (ULT (BTQconst [log2(c)] x))
   582  (EQ (TESTQconst [c] x)) && isPowerOfTwo(c) && log2(c) < 64 && !config.nacl -> (UGE (BTQconst [log2(c)] x))
   583  (NE (TESTQ (MOVQconst [c]) x)) && isPowerOfTwo(c) && log2(c) < 64 && !config.nacl -> (ULT (BTQconst [log2(c)] x))
   584  (EQ (TESTQ (MOVQconst [c]) x)) && isPowerOfTwo(c) && log2(c) < 64 && !config.nacl -> (UGE (BTQconst [log2(c)] x))
   585  (SETNE (TESTL (SHLL (MOVLconst [1]) x) y)) && !config.nacl -> (SETB  (BTL x y))
   586  (SETEQ (TESTL (SHLL (MOVLconst [1]) x) y)) && !config.nacl -> (SETAE (BTL x y))
   587  (SETNE (TESTQ (SHLQ (MOVQconst [1]) x) y)) && !config.nacl -> (SETB  (BTQ x y))
   588  (SETEQ (TESTQ (SHLQ (MOVQconst [1]) x) y)) && !config.nacl -> (SETAE (BTQ x y))
   589  (SETNE (TESTLconst [c] x)) && isPowerOfTwo(c) && log2(c) < 32 && !config.nacl -> (SETB  (BTLconst [log2(c)] x))
   590  (SETEQ (TESTLconst [c] x)) && isPowerOfTwo(c) && log2(c) < 32 && !config.nacl -> (SETAE (BTLconst [log2(c)] x))
   591  (SETNE (TESTQconst [c] x)) && isPowerOfTwo(c) && log2(c) < 64 && !config.nacl -> (SETB  (BTQconst [log2(c)] x))
   592  (SETEQ (TESTQconst [c] x)) && isPowerOfTwo(c) && log2(c) < 64 && !config.nacl -> (SETAE (BTQconst [log2(c)] x))
   593  (SETNE (TESTQ (MOVQconst [c]) x)) && isPowerOfTwo(c) && log2(c) < 64 && !config.nacl -> (SETB  (BTQconst [log2(c)] x))
   594  (SETEQ (TESTQ (MOVQconst [c]) x)) && isPowerOfTwo(c) && log2(c) < 64 && !config.nacl -> (SETAE (BTQconst [log2(c)] x))
   595  // SET..mem variant
   596  (SETNEmem [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem) && !config.nacl -> (SETBmem  [off] {sym} ptr (BTL x y) mem)
   597  (SETEQmem [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem) && !config.nacl -> (SETAEmem [off] {sym} ptr (BTL x y) mem)
   598  (SETNEmem [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem) && !config.nacl -> (SETBmem  [off] {sym} ptr (BTQ x y) mem)
   599  (SETEQmem [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem) && !config.nacl -> (SETAEmem [off] {sym} ptr (BTQ x y) mem)
   600  (SETNEmem [off] {sym} ptr (TESTLconst [c] x) mem) && isPowerOfTwo(c) && log2(c) < 32 && !config.nacl -> (SETBmem  [off] {sym} ptr (BTLconst [log2(c)] x) mem)
   601  (SETEQmem [off] {sym} ptr (TESTLconst [c] x) mem) && isPowerOfTwo(c) && log2(c) < 32 && !config.nacl -> (SETAEmem [off] {sym} ptr (BTLconst [log2(c)] x) mem)
   602  (SETNEmem [off] {sym} ptr (TESTQconst [c] x) mem) && isPowerOfTwo(c) && log2(c) < 64 && !config.nacl -> (SETBmem  [off] {sym} ptr (BTQconst [log2(c)] x) mem)
   603  (SETEQmem [off] {sym} ptr (TESTQconst [c] x) mem) && isPowerOfTwo(c) && log2(c) < 64 && !config.nacl -> (SETAEmem [off] {sym} ptr (BTQconst [log2(c)] x) mem)
   604  (SETNEmem [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) && isPowerOfTwo(c) && log2(c) < 64 && !config.nacl -> (SETBmem  [off] {sym} ptr (BTQconst [log2(c)] x) mem)
   605  (SETEQmem [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) && isPowerOfTwo(c) && log2(c) < 64 && !config.nacl -> (SETAEmem [off] {sym} ptr (BTQconst [log2(c)] x) mem)
   606  
   607  // Fold boolean negation into SETcc.
   608  (XORLconst [1] (SETNE x)) -> (SETEQ x)
   609  (XORLconst [1] (SETEQ x)) -> (SETNE x)
   610  (XORLconst [1] (SETL  x)) -> (SETGE x)
   611  (XORLconst [1] (SETGE x)) -> (SETL  x)
   612  (XORLconst [1] (SETLE x)) -> (SETG  x)
   613  (XORLconst [1] (SETG  x)) -> (SETLE x)
   614  (XORLconst [1] (SETB  x)) -> (SETAE x)
   615  (XORLconst [1] (SETAE x)) -> (SETB  x)
   616  (XORLconst [1] (SETBE x)) -> (SETA  x)
   617  (XORLconst [1] (SETA  x)) -> (SETBE x)
   618  
   619  // Convert BTQconst to BTLconst if possible. It has a shorter encoding.
   620  (BTQconst [c] x) && c < 32 -> (BTLconst [c] x)
   621  
   622  // Special case for floating point - LF/LEF not generated
   623  (NE (TESTB (SETGF  cmp) (SETGF  cmp)) yes no) -> (UGT  cmp yes no)
   624  (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) -> (UGE  cmp yes no)
   625  (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) -> (EQF  cmp yes no)
   626  (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) -> (NEF  cmp yes no)
   627  
   628  // Disabled because it interferes with the pattern match above and makes worse code.
   629  // (SETNEF x) -> (ORQ (SETNE <typ.Int8> x) (SETNAN <typ.Int8> x))
   630  // (SETEQF x) -> (ANDQ (SETEQ <typ.Int8> x) (SETORD <typ.Int8> x))
   631  
   632  // fold constants into instructions
   633  (ADDQ x (MOVQconst [c])) && is32Bit(c) -> (ADDQconst [c] x)
   634  (ADDL x (MOVLconst [c])) -> (ADDLconst [c] x)
   635  
   636  (SUBQ x (MOVQconst [c])) && is32Bit(c) -> (SUBQconst x [c])
   637  (SUBQ (MOVQconst [c]) x) && is32Bit(c) -> (NEGQ (SUBQconst <v.Type> x [c]))
   638  (SUBL x (MOVLconst [c])) -> (SUBLconst x [c])
   639  (SUBL (MOVLconst [c]) x) -> (NEGL (SUBLconst <v.Type> x [c]))
   640  
   641  (MULQ x (MOVQconst [c])) && is32Bit(c) -> (MULQconst [c] x)
   642  (MULL x (MOVLconst [c])) -> (MULLconst [c] x)
   643  
   644  (ANDQ x (MOVQconst [c])) && is32Bit(c) -> (ANDQconst [c] x)
   645  (ANDL x (MOVLconst [c])) -> (ANDLconst [c] x)
   646  
   647  (ANDLconst [c] (ANDLconst [d] x)) -> (ANDLconst [c & d] x)
   648  (ANDQconst [c] (ANDQconst [d] x)) -> (ANDQconst [c & d] x)
   649  
   650  (XORLconst [c] (XORLconst [d] x)) -> (XORLconst [c ^ d] x)
   651  (XORQconst [c] (XORQconst [d] x)) -> (XORQconst [c ^ d] x)
   652  
   653  (MULLconst [c] (MULLconst [d] x)) -> (MULLconst [int64(int32(c * d))] x)
   654  (MULQconst [c] (MULQconst [d] x)) && is32Bit(c*d) -> (MULQconst [c * d] x)
   655  
   656  (ORQ x (MOVQconst [c])) && is32Bit(c) -> (ORQconst [c] x)
   657  (ORL x (MOVLconst [c])) -> (ORLconst [c] x)
   658  
   659  (XORQ x (MOVQconst [c])) && is32Bit(c) -> (XORQconst [c] x)
   660  (XORL x (MOVLconst [c])) -> (XORLconst [c] x)
   661  
   662  (SHLQ x (MOVQconst [c])) -> (SHLQconst [c&63] x)
   663  (SHLQ x (MOVLconst [c])) -> (SHLQconst [c&63] x)
   664  
   665  (SHLL x (MOVQconst [c])) -> (SHLLconst [c&31] x)
   666  (SHLL x (MOVLconst [c])) -> (SHLLconst [c&31] x)
   667  
   668  (SHRQ x (MOVQconst [c])) -> (SHRQconst [c&63] x)
   669  (SHRQ x (MOVLconst [c])) -> (SHRQconst [c&63] x)
   670  
   671  (SHRL x (MOVQconst [c])) -> (SHRLconst [c&31] x)
   672  (SHRL x (MOVLconst [c])) -> (SHRLconst [c&31] x)
   673  
   674  (SHRW x (MOVQconst [c])) && c&31 < 16 -> (SHRWconst [c&31] x)
   675  (SHRW x (MOVLconst [c])) && c&31 < 16 -> (SHRWconst [c&31] x)
   676  (SHRW _ (MOVQconst [c])) && c&31 >= 16 -> (MOVLconst [0])
   677  (SHRW _ (MOVLconst [c])) && c&31 >= 16 -> (MOVLconst [0])
   678  
   679  (SHRB x (MOVQconst [c])) && c&31 < 8 -> (SHRBconst [c&31] x)
   680  (SHRB x (MOVLconst [c])) && c&31 < 8 -> (SHRBconst [c&31] x)
   681  (SHRB _ (MOVQconst [c])) && c&31 >= 8 -> (MOVLconst [0])
   682  (SHRB _ (MOVLconst [c])) && c&31 >= 8 -> (MOVLconst [0])
   683  
   684  (SARQ x (MOVQconst [c])) -> (SARQconst [c&63] x)
   685  (SARQ x (MOVLconst [c])) -> (SARQconst [c&63] x)
   686  
   687  (SARL x (MOVQconst [c])) -> (SARLconst [c&31] x)
   688  (SARL x (MOVLconst [c])) -> (SARLconst [c&31] x)
   689  
   690  (SARW x (MOVQconst [c])) -> (SARWconst [min(c&31,15)] x)
   691  (SARW x (MOVLconst [c])) -> (SARWconst [min(c&31,15)] x)
   692  
   693  (SARB x (MOVQconst [c])) -> (SARBconst [min(c&31,7)] x)
   694  (SARB x (MOVLconst [c])) -> (SARBconst [min(c&31,7)] x)
   695  
   696  // Operations which don't affect the low 6/5 bits of the shift amount are NOPs.
   697  (SHLQ x (ADDQconst [c] y)) && c & 63 == 0 -> (SHLQ x y)
   698  (SHRQ x (ADDQconst [c] y)) && c & 63 == 0 -> (SHRQ x y)
   699  (SARQ x (ADDQconst [c] y)) && c & 63 == 0 -> (SARQ x y)
   700  (SHLQ x (NEGQ <t> (ADDQconst [c] y))) && c & 63 == 0 -> (SHLQ x (NEGQ <t> y))
   701  (SHRQ x (NEGQ <t> (ADDQconst [c] y))) && c & 63 == 0 -> (SHRQ x (NEGQ <t> y))
   702  (SARQ x (NEGQ <t> (ADDQconst [c] y))) && c & 63 == 0 -> (SARQ x (NEGQ <t> y))
   703  (SHLQ x (ANDQconst [c] y)) && c & 63 == 63 -> (SHLQ x y)
   704  (SHRQ x (ANDQconst [c] y)) && c & 63 == 63 -> (SHRQ x y)
   705  (SARQ x (ANDQconst [c] y)) && c & 63 == 63 -> (SARQ x y)
   706  (SHLQ x (NEGQ <t> (ANDQconst [c] y))) && c & 63 == 63 -> (SHLQ x (NEGQ <t> y))
   707  (SHRQ x (NEGQ <t> (ANDQconst [c] y))) && c & 63 == 63 -> (SHRQ x (NEGQ <t> y))
   708  (SARQ x (NEGQ <t> (ANDQconst [c] y))) && c & 63 == 63 -> (SARQ x (NEGQ <t> y))
   709  
   710  (SHLL x (ADDQconst [c] y)) && c & 31 == 0 -> (SHLL x y)
   711  (SHRL x (ADDQconst [c] y)) && c & 31 == 0 -> (SHRL x y)
   712  (SARL x (ADDQconst [c] y)) && c & 31 == 0 -> (SARL x y)
   713  (SHLL x (NEGQ <t> (ADDQconst [c] y))) && c & 31 == 0 -> (SHLL x (NEGQ <t> y))
   714  (SHRL x (NEGQ <t> (ADDQconst [c] y))) && c & 31 == 0 -> (SHRL x (NEGQ <t> y))
   715  (SARL x (NEGQ <t> (ADDQconst [c] y))) && c & 31 == 0 -> (SARL x (NEGQ <t> y))
   716  (SHLL x (ANDQconst [c] y)) && c & 31 == 31 -> (SHLL x y)
   717  (SHRL x (ANDQconst [c] y)) && c & 31 == 31 -> (SHRL x y)
   718  (SARL x (ANDQconst [c] y)) && c & 31 == 31 -> (SARL x y)
   719  (SHLL x (NEGQ <t> (ANDQconst [c] y))) && c & 31 == 31 -> (SHLL x (NEGQ <t> y))
   720  (SHRL x (NEGQ <t> (ANDQconst [c] y))) && c & 31 == 31 -> (SHRL x (NEGQ <t> y))
   721  (SARL x (NEGQ <t> (ANDQconst [c] y))) && c & 31 == 31 -> (SARL x (NEGQ <t> y))
   722  
   723  (SHLQ x (ADDLconst [c] y)) && c & 63 == 0 -> (SHLQ x y)
   724  (SHRQ x (ADDLconst [c] y)) && c & 63 == 0 -> (SHRQ x y)
   725  (SARQ x (ADDLconst [c] y)) && c & 63 == 0 -> (SARQ x y)
   726  (SHLQ x (NEGL <t> (ADDLconst [c] y))) && c & 63 == 0 -> (SHLQ x (NEGL <t> y))
   727  (SHRQ x (NEGL <t> (ADDLconst [c] y))) && c & 63 == 0 -> (SHRQ x (NEGL <t> y))
   728  (SARQ x (NEGL <t> (ADDLconst [c] y))) && c & 63 == 0 -> (SARQ x (NEGL <t> y))
   729  (SHLQ x (ANDLconst [c] y)) && c & 63 == 63 -> (SHLQ x y)
   730  (SHRQ x (ANDLconst [c] y)) && c & 63 == 63 -> (SHRQ x y)
   731  (SARQ x (ANDLconst [c] y)) && c & 63 == 63 -> (SARQ x y)
   732  (SHLQ x (NEGL <t> (ANDLconst [c] y))) && c & 63 == 63 -> (SHLQ x (NEGL <t> y))
   733  (SHRQ x (NEGL <t> (ANDLconst [c] y))) && c & 63 == 63 -> (SHRQ x (NEGL <t> y))
   734  (SARQ x (NEGL <t> (ANDLconst [c] y))) && c & 63 == 63 -> (SARQ x (NEGL <t> y))
   735  
   736  (SHLL x (ADDLconst [c] y)) && c & 31 == 0 -> (SHLL x y)
   737  (SHRL x (ADDLconst [c] y)) && c & 31 == 0 -> (SHRL x y)
   738  (SARL x (ADDLconst [c] y)) && c & 31 == 0 -> (SARL x y)
   739  (SHLL x (NEGL <t> (ADDLconst [c] y))) && c & 31 == 0 -> (SHLL x (NEGL <t> y))
   740  (SHRL x (NEGL <t> (ADDLconst [c] y))) && c & 31 == 0 -> (SHRL x (NEGL <t> y))
   741  (SARL x (NEGL <t> (ADDLconst [c] y))) && c & 31 == 0 -> (SARL x (NEGL <t> y))
   742  (SHLL x (ANDLconst [c] y)) && c & 31 == 31 -> (SHLL x y)
   743  (SHRL x (ANDLconst [c] y)) && c & 31 == 31 -> (SHRL x y)
   744  (SARL x (ANDLconst [c] y)) && c & 31 == 31 -> (SARL x y)
   745  (SHLL x (NEGL <t> (ANDLconst [c] y))) && c & 31 == 31 -> (SHLL x (NEGL <t> y))
   746  (SHRL x (NEGL <t> (ANDLconst [c] y))) && c & 31 == 31 -> (SHRL x (NEGL <t> y))
   747  (SARL x (NEGL <t> (ANDLconst [c] y))) && c & 31 == 31 -> (SARL x (NEGL <t> y))
   748  
   749  // Constant rotate instructions
   750  (ADDQ (SHLQconst x [c]) (SHRQconst x [d])) && d==64-c -> (ROLQconst x [c])
   751  ( ORQ (SHLQconst x [c]) (SHRQconst x [d])) && d==64-c -> (ROLQconst x [c])
   752  (XORQ (SHLQconst x [c]) (SHRQconst x [d])) && d==64-c -> (ROLQconst x [c])
   753  
   754  (ADDL (SHLLconst x [c]) (SHRLconst x [d])) && d==32-c -> (ROLLconst x [c])
   755  ( ORL (SHLLconst x [c]) (SHRLconst x [d])) && d==32-c -> (ROLLconst x [c])
   756  (XORL (SHLLconst x [c]) (SHRLconst x [d])) && d==32-c -> (ROLLconst x [c])
   757  
   758  (ADDL <t> (SHLLconst x [c]) (SHRWconst x [d])) && d==16-c && c < 16 && t.Size() == 2 -> (ROLWconst x [c])
   759  ( ORL <t> (SHLLconst x [c]) (SHRWconst x [d])) && d==16-c && c < 16 && t.Size() == 2 -> (ROLWconst x [c])
   760  (XORL <t> (SHLLconst x [c]) (SHRWconst x [d])) && d==16-c && c < 16 && t.Size() == 2 -> (ROLWconst x [c])
   761  
   762  (ADDL <t> (SHLLconst x [c]) (SHRBconst x [d])) && d==8-c  && c < 8 && t.Size() == 1 -> (ROLBconst x [c])
   763  ( ORL <t> (SHLLconst x [c]) (SHRBconst x [d])) && d==8-c  && c < 8 && t.Size() == 1 -> (ROLBconst x [c])
   764  (XORL <t> (SHLLconst x [c]) (SHRBconst x [d])) && d==8-c  && c < 8 && t.Size() == 1 -> (ROLBconst x [c])
   765  
   766  (ROLQconst [c] (ROLQconst [d] x)) -> (ROLQconst [(c+d)&63] x)
   767  (ROLLconst [c] (ROLLconst [d] x)) -> (ROLLconst [(c+d)&31] x)
   768  (ROLWconst [c] (ROLWconst [d] x)) -> (ROLWconst [(c+d)&15] x)
   769  (ROLBconst [c] (ROLBconst [d] x)) -> (ROLBconst [(c+d)& 7] x)
   770  
   771  // Non-constant rotates.
   772  // We want to issue a rotate when the Go source contains code like
   773  //     y &= 63
   774  //     x << y | x >> (64-y)
   775  // The shift rules above convert << to SHLx and >> to SHRx.
   776  // SHRx converts its shift argument from 64-y to -y.
   777  // A tricky situation occurs when y==0. Then the original code would be:
   778  //     x << 0 | x >> 64
   779  // But x >> 64 is 0, not x. So there's an additional mask that is ANDed in
   780  // to force the second term to 0. We don't need that mask, but we must match
   781  // it in order to strip it out.
   782  (ORQ (SHLQ x y) (ANDQ (SHRQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])))) -> (ROLQ x y)
   783  (ORQ (SHLQ x y) (ANDQ (SHRQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])))) -> (ROLQ x y)
   784  (ORQ (SHRQ x y) (ANDQ (SHLQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])))) -> (RORQ x y)
   785  (ORQ (SHRQ x y) (ANDQ (SHLQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])))) -> (RORQ x y)
   786  
   787  (ORL (SHLL x y) (ANDL (SHRL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])))) -> (ROLL x y)
   788  (ORL (SHLL x y) (ANDL (SHRL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])))) -> (ROLL x y)
   789  (ORL (SHRL x y) (ANDL (SHLL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])))) -> (RORL x y)
   790  (ORL (SHRL x y) (ANDL (SHLL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])))) -> (RORL x y)
   791  
   792  // Help with rotate detection
   793  (CMPQconst (NEGQ (ADDQconst [-16] (ANDQconst [15] _))) [32]) -> (FlagLT_ULT)
   794  (CMPQconst (NEGQ (ADDQconst [ -8] (ANDQconst  [7] _))) [32]) -> (FlagLT_ULT)
   795  
   796  (ORL (SHLL x (ANDQconst y [15]))
   797       (ANDL (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16])))
   798             (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16]))))
   799    && v.Type.Size() == 2
   800    -> (ROLW x y)
   801  (ORL (SHLL x (ANDLconst y [15]))
   802       (ANDL (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16])))
   803             (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16]))))
   804    && v.Type.Size() == 2
   805    -> (ROLW x y)
   806  (ORL (SHRW x (ANDQconst y [15]))
   807       (SHLL x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))))
   808    && v.Type.Size() == 2
   809    -> (RORW x y)
   810  (ORL (SHRW x (ANDLconst y [15]))
   811       (SHLL x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))))
   812    && v.Type.Size() == 2
   813    -> (RORW x y)
   814  
   815  (ORL (SHLL x (ANDQconst y [ 7]))
   816       (ANDL (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])))
   817             (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8]))))
   818    && v.Type.Size() == 1
   819    -> (ROLB x y)
   820  (ORL (SHLL x (ANDLconst y [ 7]))
   821       (ANDL (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])))
   822             (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8]))))
   823    && v.Type.Size() == 1
   824    -> (ROLB x y)
   825  (ORL (SHRB x (ANDQconst y [ 7]))
   826       (SHLL x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))))
   827    && v.Type.Size() == 1
   828    -> (RORB x y)
   829  (ORL (SHRB x (ANDLconst y [ 7]))
   830       (SHLL x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))))
   831    && v.Type.Size() == 1
   832    -> (RORB x y)
   833  
   834  // rotate left negative = rotate right
   835  (ROLQ x (NEGQ y)) -> (RORQ x y)
   836  (ROLQ x (NEGL y)) -> (RORQ x y)
   837  (ROLL x (NEGQ y)) -> (RORL x y)
   838  (ROLL x (NEGL y)) -> (RORL x y)
   839  (ROLW x (NEGQ y)) -> (RORW x y)
   840  (ROLW x (NEGL y)) -> (RORW x y)
   841  (ROLB x (NEGQ y)) -> (RORB x y)
   842  (ROLB x (NEGL y)) -> (RORB x y)
   843  
   844  // rotate right negative = rotate left
   845  (RORQ x (NEGQ y)) -> (ROLQ x y)
   846  (RORQ x (NEGL y)) -> (ROLQ x y)
   847  (RORL x (NEGQ y)) -> (ROLL x y)
   848  (RORL x (NEGL y)) -> (ROLL x y)
   849  (RORW x (NEGQ y)) -> (ROLW x y)
   850  (RORW x (NEGL y)) -> (ROLW x y)
   851  (RORB x (NEGQ y)) -> (ROLB x y)
   852  (RORB x (NEGL y)) -> (ROLB x y)
   853  
   854  // rotate by constants
   855  (ROLQ x (MOVQconst [c])) -> (ROLQconst [c&63] x)
   856  (ROLQ x (MOVLconst [c])) -> (ROLQconst [c&63] x)
   857  (ROLL x (MOVQconst [c])) -> (ROLLconst [c&31] x)
   858  (ROLL x (MOVLconst [c])) -> (ROLLconst [c&31] x)
   859  (ROLW x (MOVQconst [c])) -> (ROLWconst [c&15] x)
   860  (ROLW x (MOVLconst [c])) -> (ROLWconst [c&15] x)
   861  (ROLB x (MOVQconst [c])) -> (ROLBconst [c&7 ] x)
   862  (ROLB x (MOVLconst [c])) -> (ROLBconst [c&7 ] x)
   863  
   864  (RORQ x (MOVQconst [c])) -> (ROLQconst [(-c)&63] x)
   865  (RORQ x (MOVLconst [c])) -> (ROLQconst [(-c)&63] x)
   866  (RORL x (MOVQconst [c])) -> (ROLLconst [(-c)&31] x)
   867  (RORL x (MOVLconst [c])) -> (ROLLconst [(-c)&31] x)
   868  (RORW x (MOVQconst [c])) -> (ROLWconst [(-c)&15] x)
   869  (RORW x (MOVLconst [c])) -> (ROLWconst [(-c)&15] x)
   870  (RORB x (MOVQconst [c])) -> (ROLBconst [(-c)&7 ] x)
   871  (RORB x (MOVLconst [c])) -> (ROLBconst [(-c)&7 ] x)
   872  
   873  // Constant shift simplifications
   874  (SHLQconst x [0]) -> x
   875  (SHRQconst x [0]) -> x
   876  (SARQconst x [0]) -> x
   877  
   878  (SHLLconst x [0]) -> x
   879  (SHRLconst x [0]) -> x
   880  (SARLconst x [0]) -> x
   881  
   882  (SHRWconst x [0]) -> x
   883  (SARWconst x [0]) -> x
   884  
   885  (SHRBconst x [0]) -> x
   886  (SARBconst x [0]) -> x
   887  
   888  (ROLQconst x [0]) -> x
   889  (ROLLconst x [0]) -> x
   890  (ROLWconst x [0]) -> x
   891  (ROLBconst x [0]) -> x
   892  
   893  // Note: the word and byte shifts keep the low 5 bits (not the low 4 or 3 bits)
   894  // because the x86 instructions are defined to use all 5 bits of the shift even
   895  // for the small shifts. I don't think we'll ever generate a weird shift (e.g.
   896  // (SHRW x (MOVLconst [24])), but just in case.
   897  
   898  (CMPQ x (MOVQconst [c])) && is32Bit(c) -> (CMPQconst x [c])
   899  (CMPQ (MOVQconst [c]) x) && is32Bit(c) -> (InvertFlags (CMPQconst x [c]))
   900  (CMPL x (MOVLconst [c])) -> (CMPLconst x [c])
   901  (CMPL (MOVLconst [c]) x) -> (InvertFlags (CMPLconst x [c]))
   902  (CMPW x (MOVLconst [c])) -> (CMPWconst x [int64(int16(c))])
   903  (CMPW (MOVLconst [c]) x) -> (InvertFlags (CMPWconst x [int64(int16(c))]))
   904  (CMPB x (MOVLconst [c])) -> (CMPBconst x [int64(int8(c))])
   905  (CMPB (MOVLconst [c]) x) -> (InvertFlags (CMPBconst x [int64(int8(c))]))
   906  
   907  // Using MOVZX instead of AND is cheaper.
   908  (ANDLconst [0xFF] x) -> (MOVBQZX x)
   909  (ANDLconst [0xFFFF] x) -> (MOVWQZX x)
   910  (ANDQconst [0xFF] x) -> (MOVBQZX x)
   911  (ANDQconst [0xFFFF] x) -> (MOVWQZX x)
   912  (ANDQconst [0xFFFFFFFF] x) -> (MOVLQZX x)
   913  
   914  // strength reduction
   915  // Assumes that the following costs from https://gmplib.org/~tege/x86-timing.pdf:
   916  //    1 - addq, shlq, leaq, negq
   917  //    3 - imulq
   918  // This limits the rewrites to two instructions.
   919  // TODO: 27, 81
   920  (MULQconst [-1] x) -> (NEGQ x)
   921  (MULQconst [0] _) -> (MOVQconst [0])
   922  (MULQconst [1] x) -> x
   923  (MULQconst [3] x) -> (LEAQ2 x x)
   924  (MULQconst [5] x) -> (LEAQ4 x x)
   925  (MULQconst [7] x) -> (LEAQ8 (NEGQ <v.Type> x) x)
   926  (MULQconst [9] x) -> (LEAQ8 x x)
   927  (MULQconst [11] x) -> (LEAQ2 x (LEAQ4 <v.Type> x x))
   928  (MULQconst [13] x) -> (LEAQ4 x (LEAQ2 <v.Type> x x))
   929  (MULQconst [21] x) -> (LEAQ4 x (LEAQ4 <v.Type> x x))
   930  (MULQconst [25] x) -> (LEAQ8 x (LEAQ2 <v.Type> x x))
   931  (MULQconst [37] x) -> (LEAQ4 x (LEAQ8 <v.Type> x x))
   932  (MULQconst [41] x) -> (LEAQ8 x (LEAQ4 <v.Type> x x))
   933  (MULQconst [73] x) -> (LEAQ8 x (LEAQ8 <v.Type> x x))
   934  
   935  (MULQconst [c] x) && isPowerOfTwo(c+1) && c >= 15 -> (SUBQ (SHLQconst <v.Type> [log2(c+1)] x) x)
   936  (MULQconst [c] x) && isPowerOfTwo(c-1) && c >= 17 -> (LEAQ1 (SHLQconst <v.Type> [log2(c-1)] x) x)
   937  (MULQconst [c] x) && isPowerOfTwo(c-2) && c >= 34 -> (LEAQ2 (SHLQconst <v.Type> [log2(c-2)] x) x)
   938  (MULQconst [c] x) && isPowerOfTwo(c-4) && c >= 68 -> (LEAQ4 (SHLQconst <v.Type> [log2(c-4)] x) x)
   939  (MULQconst [c] x) && isPowerOfTwo(c-8) && c >= 136 -> (LEAQ8 (SHLQconst <v.Type> [log2(c-8)] x) x)
   940  (MULQconst [c] x) && c%3 == 0 && isPowerOfTwo(c/3) -> (SHLQconst [log2(c/3)] (LEAQ2 <v.Type> x x))
   941  (MULQconst [c] x) && c%5 == 0 && isPowerOfTwo(c/5) -> (SHLQconst [log2(c/5)] (LEAQ4 <v.Type> x x))
   942  (MULQconst [c] x) && c%9 == 0 && isPowerOfTwo(c/9) -> (SHLQconst [log2(c/9)] (LEAQ8 <v.Type> x x))
   943  
   944  // combine add/shift into LEAQ
   945  (ADDQ x (SHLQconst [3] y)) -> (LEAQ8 x y)
   946  (ADDQ x (SHLQconst [2] y)) -> (LEAQ4 x y)
   947  (ADDQ x (SHLQconst [1] y)) -> (LEAQ2 x y)
   948  (ADDQ x (ADDQ y y)) -> (LEAQ2 x y)
   949  (ADDQ x (ADDQ x y)) -> (LEAQ2 y x)
   950  
   951  // combine ADDQ/ADDQconst into LEAQ1
   952  (ADDQconst [c] (ADDQ x y)) -> (LEAQ1 [c] x y)
   953  (ADDQ (ADDQconst [c] x) y) -> (LEAQ1 [c] x y)
   954  
   955  // fold ADDQ into LEAQ
   956  (ADDQconst [c] (LEAQ [d] {s} x)) && is32Bit(c+d) -> (LEAQ [c+d] {s} x)
   957  (LEAQ [c] {s} (ADDQconst [d] x)) && is32Bit(c+d) -> (LEAQ [c+d] {s} x)
   958  (LEAQ [c] {s} (ADDQ x y)) && x.Op != OpSB && y.Op != OpSB -> (LEAQ1 [c] {s} x y)
   959  (ADDQ x (LEAQ [c] {s} y)) && x.Op != OpSB && y.Op != OpSB -> (LEAQ1 [c] {s} x y)
   960  
   961  // fold ADDQconst into LEAQx
   962  (ADDQconst [c] (LEAQ1 [d] {s} x y)) && is32Bit(c+d) -> (LEAQ1 [c+d] {s} x y)
   963  (ADDQconst [c] (LEAQ2 [d] {s} x y)) && is32Bit(c+d) -> (LEAQ2 [c+d] {s} x y)
   964  (ADDQconst [c] (LEAQ4 [d] {s} x y)) && is32Bit(c+d) -> (LEAQ4 [c+d] {s} x y)
   965  (ADDQconst [c] (LEAQ8 [d] {s} x y)) && is32Bit(c+d) -> (LEAQ8 [c+d] {s} x y)
   966  (LEAQ1 [c] {s} (ADDQconst [d] x) y) && is32Bit(c+d)   && x.Op != OpSB -> (LEAQ1 [c+d] {s} x y)
   967  (LEAQ2 [c] {s} (ADDQconst [d] x) y) && is32Bit(c+d)   && x.Op != OpSB -> (LEAQ2 [c+d] {s} x y)
   968  (LEAQ2 [c] {s} x (ADDQconst [d] y)) && is32Bit(c+2*d) && y.Op != OpSB -> (LEAQ2 [c+2*d] {s} x y)
   969  (LEAQ4 [c] {s} (ADDQconst [d] x) y) && is32Bit(c+d)   && x.Op != OpSB -> (LEAQ4 [c+d] {s} x y)
   970  (LEAQ4 [c] {s} x (ADDQconst [d] y)) && is32Bit(c+4*d) && y.Op != OpSB -> (LEAQ4 [c+4*d] {s} x y)
   971  (LEAQ8 [c] {s} (ADDQconst [d] x) y) && is32Bit(c+d)   && x.Op != OpSB -> (LEAQ8 [c+d] {s} x y)
   972  (LEAQ8 [c] {s} x (ADDQconst [d] y)) && is32Bit(c+8*d) && y.Op != OpSB -> (LEAQ8 [c+8*d] {s} x y)
   973  
   974  // fold shifts into LEAQx
   975  (LEAQ1 [c] {s} x (SHLQconst [1] y)) -> (LEAQ2 [c] {s} x y)
   976  (LEAQ1 [c] {s} x (SHLQconst [2] y)) -> (LEAQ4 [c] {s} x y)
   977  (LEAQ1 [c] {s} x (SHLQconst [3] y)) -> (LEAQ8 [c] {s} x y)
   978  (LEAQ2 [c] {s} x (SHLQconst [1] y)) -> (LEAQ4 [c] {s} x y)
   979  (LEAQ2 [c] {s} x (SHLQconst [2] y)) -> (LEAQ8 [c] {s} x y)
   980  (LEAQ4 [c] {s} x (SHLQconst [1] y)) -> (LEAQ8 [c] {s} x y)
   981  
   982  // reverse ordering of compare instruction
   983  (SETL (InvertFlags x)) -> (SETG x)
   984  (SETG (InvertFlags x)) -> (SETL x)
   985  (SETB (InvertFlags x)) -> (SETA x)
   986  (SETA (InvertFlags x)) -> (SETB x)
   987  (SETLE (InvertFlags x)) -> (SETGE x)
   988  (SETGE (InvertFlags x)) -> (SETLE x)
   989  (SETBE (InvertFlags x)) -> (SETAE x)
   990  (SETAE (InvertFlags x)) -> (SETBE x)
   991  (SETEQ (InvertFlags x)) -> (SETEQ x)
   992  (SETNE (InvertFlags x)) -> (SETNE x)
   993  
   994  (SETLmem [off] {sym} ptr (InvertFlags x) mem) -> (SETGmem [off] {sym} ptr x mem)
   995  (SETGmem [off] {sym} ptr (InvertFlags x) mem) -> (SETLmem [off] {sym} ptr x mem)
   996  (SETBmem [off] {sym} ptr (InvertFlags x) mem) -> (SETAmem [off] {sym} ptr x mem)
   997  (SETAmem [off] {sym} ptr (InvertFlags x) mem) -> (SETBmem [off] {sym} ptr x mem)
   998  (SETLEmem [off] {sym} ptr (InvertFlags x) mem) -> (SETGEmem [off] {sym} ptr x mem)
   999  (SETGEmem [off] {sym} ptr (InvertFlags x) mem) -> (SETLEmem [off] {sym} ptr x mem)
  1000  (SETBEmem [off] {sym} ptr (InvertFlags x) mem) -> (SETAEmem [off] {sym} ptr x mem)
  1001  (SETAEmem [off] {sym} ptr (InvertFlags x) mem) -> (SETBEmem [off] {sym} ptr x mem)
  1002  (SETEQmem [off] {sym} ptr (InvertFlags x) mem) -> (SETEQmem [off] {sym} ptr x mem)
  1003  (SETNEmem [off] {sym} ptr (InvertFlags x) mem) -> (SETNEmem [off] {sym} ptr x mem)
  1004  
  1005  // sign extended loads
  1006  // Note: The combined instruction must end up in the same block
  1007  // as the original load. If not, we end up making a value with
  1008  // memory type live in two different blocks, which can lead to
  1009  // multiple memory values alive simultaneously.
  1010  // Make sure we don't combine these ops if the load has another use.
  1011  // This prevents a single load from being split into multiple loads
  1012  // which then might return different values.  See test/atomicload.go.
  1013  (MOVBQSX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
  1014  (MOVBQSX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
  1015  (MOVBQSX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
  1016  (MOVBQSX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
  1017  (MOVBQZX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
  1018  (MOVBQZX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
  1019  (MOVBQZX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
  1020  (MOVBQZX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
  1021  (MOVWQSX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
  1022  (MOVWQSX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
  1023  (MOVWQSX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
  1024  (MOVWQZX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
  1025  (MOVWQZX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
  1026  (MOVWQZX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
  1027  (MOVLQSX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem)
  1028  (MOVLQSX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem)
  1029  (MOVLQZX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVLload <v.Type> [off] {sym} ptr mem)
  1030  (MOVLQZX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVLload <v.Type> [off] {sym} ptr mem)
  1031  
  1032  (MOVLQZX x) && zeroUpper32Bits(x,3) -> x
  1033  
  1034  (MOVBQZX x:(MOVBloadidx1 [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBloadidx1 <v.Type> [off] {sym} ptr idx mem)
  1035  (MOVWQZX x:(MOVWloadidx1 [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWloadidx1 <v.Type> [off] {sym} ptr idx mem)
  1036  (MOVWQZX x:(MOVWloadidx2 [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWloadidx2 <v.Type> [off] {sym} ptr idx mem)
  1037  (MOVLQZX x:(MOVLloadidx1 [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVLloadidx1 <v.Type> [off] {sym} ptr idx mem)
  1038  (MOVLQZX x:(MOVLloadidx4 [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVLloadidx4 <v.Type> [off] {sym} ptr idx mem)
  1039  
  1040  // replace load from same location as preceding store with zero/sign extension (or copy in case of full width)
  1041  (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVBQZX x)
  1042  (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVWQZX x)
  1043  (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVLQZX x)
  1044  (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
  1045  (MOVBQSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVBQSX x)
  1046  (MOVWQSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVWQSX x)
  1047  (MOVLQSXload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVLQSX x)
  1048  
  1049  // Fold extensions and ANDs together.
  1050  (MOVBQZX (ANDLconst [c] x)) -> (ANDLconst [c & 0xff] x)
  1051  (MOVWQZX (ANDLconst [c] x)) -> (ANDLconst [c & 0xffff] x)
  1052  (MOVLQZX (ANDLconst [c] x)) -> (ANDLconst [c] x)
  1053  (MOVBQSX (ANDLconst [c] x)) && c & 0x80 == 0 -> (ANDLconst [c & 0x7f] x)
  1054  (MOVWQSX (ANDLconst [c] x)) && c & 0x8000 == 0 -> (ANDLconst [c & 0x7fff] x)
  1055  (MOVLQSX (ANDLconst [c] x)) && c & 0x80000000 == 0 -> (ANDLconst [c & 0x7fffffff] x)
  1056  
  1057  // Don't extend before storing
  1058  (MOVLstore [off] {sym} ptr (MOVLQSX x) mem) -> (MOVLstore [off] {sym} ptr x mem)
  1059  (MOVWstore [off] {sym} ptr (MOVWQSX x) mem) -> (MOVWstore [off] {sym} ptr x mem)
  1060  (MOVBstore [off] {sym} ptr (MOVBQSX x) mem) -> (MOVBstore [off] {sym} ptr x mem)
  1061  (MOVLstore [off] {sym} ptr (MOVLQZX x) mem) -> (MOVLstore [off] {sym} ptr x mem)
  1062  (MOVWstore [off] {sym} ptr (MOVWQZX x) mem) -> (MOVWstore [off] {sym} ptr x mem)
  1063  (MOVBstore [off] {sym} ptr (MOVBQZX x) mem) -> (MOVBstore [off] {sym} ptr x mem)
  1064  
  1065  // fold constants into memory operations
  1066  // Note that this is not always a good idea because if not all the uses of
  1067  // the ADDQconst get eliminated, we still have to compute the ADDQconst and we now
  1068  // have potentially two live values (ptr and (ADDQconst [off] ptr)) instead of one.
  1069  // Nevertheless, let's do it!
  1070  (MOVQload  [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVQload  [off1+off2] {sym} ptr mem)
  1071  (MOVLload  [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVLload  [off1+off2] {sym} ptr mem)
  1072  (MOVWload  [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVWload  [off1+off2] {sym} ptr mem)
  1073  (MOVBload  [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVBload  [off1+off2] {sym} ptr mem)
  1074  (MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVSSload [off1+off2] {sym} ptr mem)
  1075  (MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVSDload [off1+off2] {sym} ptr mem)
  1076  (MOVOload  [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVOload  [off1+off2] {sym} ptr mem)
  1077  
  1078  (MOVQstore  [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVQstore  [off1+off2] {sym} ptr val mem)
  1079  (MOVLstore  [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVLstore  [off1+off2] {sym} ptr val mem)
  1080  (MOVWstore  [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVWstore  [off1+off2] {sym} ptr val mem)
  1081  (MOVBstore  [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVBstore  [off1+off2] {sym} ptr val mem)
  1082  (MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVSSstore [off1+off2] {sym} ptr val mem)
  1083  (MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVSDstore [off1+off2] {sym} ptr val mem)
  1084  (MOVOstore  [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVOstore  [off1+off2] {sym} ptr val mem)
  1085  
  1086  // Fold constants into stores.
  1087  (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) && validValAndOff(c,off) ->
  1088  	(MOVQstoreconst [makeValAndOff(c,off)] {sym} ptr mem)
  1089  (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(off) ->
  1090  	(MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem)
  1091  (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(off) ->
  1092  	(MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem)
  1093  (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(off) ->
  1094  	(MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem)
  1095  
  1096  // Fold address offsets into constant stores.
  1097  (MOVQstoreconst [sc] {s} (ADDQconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
  1098  	(MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
  1099  (MOVLstoreconst [sc] {s} (ADDQconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
  1100  	(MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
  1101  (MOVWstoreconst [sc] {s} (ADDQconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
  1102  	(MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
  1103  (MOVBstoreconst [sc] {s} (ADDQconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
  1104  	(MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
  1105  
  1106  // We need to fold LEAQ into the MOVx ops so that the live variable analysis knows
  1107  // what variables are being read/written by the ops.
  1108  (MOVQload  [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1109  	(MOVQload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
  1110  (MOVLload  [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1111  	(MOVLload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
  1112  (MOVWload  [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1113  	(MOVWload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
  1114  (MOVBload  [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1115  	(MOVBload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
  1116  (MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1117  	(MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem)
  1118  (MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1119  	(MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
  1120  (MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1121  	(MOVOload [off1+off2] {mergeSym(sym1,sym2)} base mem)
  1122  
  1123  (MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1124  	(MOVBQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
  1125  (MOVWQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1126  	(MOVWQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
  1127  (MOVLQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1128  	(MOVLQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
  1129  
  1130  (MOVQstore  [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1131  	(MOVQstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  1132  (MOVLstore  [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1133  	(MOVLstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  1134  (MOVWstore  [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1135  	(MOVWstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  1136  (MOVBstore  [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1137  	(MOVBstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  1138  (MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1139  	(MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  1140  (MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1141  	(MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  1142  (MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1143  	(MOVOstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  1144  
  1145  (MOVQstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
  1146  	(MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
  1147  (MOVLstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
  1148  	(MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
  1149  (MOVWstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
  1150  	(MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
  1151  (MOVBstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
  1152  	(MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
  1153  
  1154  // generating indexed loads and stores
  1155  (MOVBload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1156  	(MOVBloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
  1157  (MOVWload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1158  	(MOVWloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
  1159  (MOVWload [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1160  	(MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
  1161  (MOVLload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1162  	(MOVLloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
  1163  (MOVLload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1164  	(MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
  1165  (MOVLload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1166  	(MOVLloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
  1167  (MOVQload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1168  	(MOVQloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
  1169  (MOVQload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1170  	(MOVQloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
  1171  (MOVSSload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1172  	(MOVSSloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
  1173  (MOVSSload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1174  	(MOVSSloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
  1175  (MOVSDload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1176  	(MOVSDloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
  1177  (MOVSDload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1178  	(MOVSDloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
  1179  
  1180  (MOVBstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1181  	(MOVBstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
  1182  (MOVWstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1183  	(MOVWstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
  1184  (MOVWstore [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1185  	(MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
  1186  (MOVLstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1187  	(MOVLstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
  1188  (MOVLstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1189  	(MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
  1190  (MOVLstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1191  	(MOVLstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
  1192  (MOVQstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1193  	(MOVQstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
  1194  (MOVQstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1195  	(MOVQstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
  1196  (MOVSSstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1197  	(MOVSSstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
  1198  (MOVSSstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1199  	(MOVSSstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
  1200  (MOVSDstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1201  	(MOVSDstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
  1202  (MOVSDstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1203  	(MOVSDstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
  1204  
  1205  (MOVBload [off] {sym} (ADDQ ptr idx) mem) && ptr.Op != OpSB -> (MOVBloadidx1 [off] {sym} ptr idx mem)
  1206  (MOVWload [off] {sym} (ADDQ ptr idx) mem) && ptr.Op != OpSB -> (MOVWloadidx1 [off] {sym} ptr idx mem)
  1207  (MOVLload [off] {sym} (ADDQ ptr idx) mem) && ptr.Op != OpSB -> (MOVLloadidx1 [off] {sym} ptr idx mem)
  1208  (MOVQload [off] {sym} (ADDQ ptr idx) mem) && ptr.Op != OpSB -> (MOVQloadidx1 [off] {sym} ptr idx mem)
  1209  (MOVSSload [off] {sym} (ADDQ ptr idx) mem) && ptr.Op != OpSB -> (MOVSSloadidx1 [off] {sym} ptr idx mem)
  1210  (MOVSDload [off] {sym} (ADDQ ptr idx) mem) && ptr.Op != OpSB -> (MOVSDloadidx1 [off] {sym} ptr idx mem)
  1211  (MOVBstore [off] {sym} (ADDQ ptr idx) val mem) && ptr.Op != OpSB -> (MOVBstoreidx1 [off] {sym} ptr idx val mem)
  1212  (MOVWstore [off] {sym} (ADDQ ptr idx) val mem) && ptr.Op != OpSB -> (MOVWstoreidx1 [off] {sym} ptr idx val mem)
  1213  (MOVLstore [off] {sym} (ADDQ ptr idx) val mem) && ptr.Op != OpSB -> (MOVLstoreidx1 [off] {sym} ptr idx val mem)
  1214  (MOVQstore [off] {sym} (ADDQ ptr idx) val mem) && ptr.Op != OpSB -> (MOVQstoreidx1 [off] {sym} ptr idx val mem)
  1215  (MOVSSstore [off] {sym} (ADDQ ptr idx) val mem) && ptr.Op != OpSB -> (MOVSSstoreidx1 [off] {sym} ptr idx val mem)
  1216  (MOVSDstore [off] {sym} (ADDQ ptr idx) val mem) && ptr.Op != OpSB -> (MOVSDstoreidx1 [off] {sym} ptr idx val mem)
  1217  
  1218  (MOVBstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
  1219  	(MOVBstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
  1220  (MOVWstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
  1221  	(MOVWstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
  1222  (MOVWstoreconst [x] {sym1} (LEAQ2 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
  1223  	(MOVWstoreconstidx2 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
  1224  (MOVLstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
  1225  	(MOVLstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
  1226  (MOVLstoreconst [x] {sym1} (LEAQ4 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
  1227  	(MOVLstoreconstidx4 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
  1228  (MOVQstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
  1229  	(MOVQstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
  1230  (MOVQstoreconst [x] {sym1} (LEAQ8 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
  1231  	(MOVQstoreconstidx8 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
  1232  
  1233  (MOVBstoreconst [x] {sym} (ADDQ ptr idx) mem) -> (MOVBstoreconstidx1 [x] {sym} ptr idx mem)
  1234  (MOVWstoreconst [x] {sym} (ADDQ ptr idx) mem) -> (MOVWstoreconstidx1 [x] {sym} ptr idx mem)
  1235  (MOVLstoreconst [x] {sym} (ADDQ ptr idx) mem) -> (MOVLstoreconstidx1 [x] {sym} ptr idx mem)
  1236  (MOVQstoreconst [x] {sym} (ADDQ ptr idx) mem) -> (MOVQstoreconstidx1 [x] {sym} ptr idx mem)
  1237  
  1238  // combine SHLQ into indexed loads and stores
  1239  (MOVWloadidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) -> (MOVWloadidx2 [c] {sym} ptr idx mem)
  1240  (MOVLloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) -> (MOVLloadidx4 [c] {sym} ptr idx mem)
  1241  (MOVLloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) -> (MOVLloadidx8 [c] {sym} ptr idx mem)
  1242  (MOVQloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) -> (MOVQloadidx8 [c] {sym} ptr idx mem)
  1243  (MOVSSloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) -> (MOVSSloadidx4 [c] {sym} ptr idx mem)
  1244  (MOVSDloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) -> (MOVSDloadidx8 [c] {sym} ptr idx mem)
  1245  (MOVWstoreidx1 [c] {sym} ptr (SHLQconst [1] idx) val mem) -> (MOVWstoreidx2 [c] {sym} ptr idx val mem)
  1246  (MOVLstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) -> (MOVLstoreidx4 [c] {sym} ptr idx val mem)
  1247  (MOVLstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) -> (MOVLstoreidx8 [c] {sym} ptr idx val mem)
  1248  (MOVQstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) -> (MOVQstoreidx8 [c] {sym} ptr idx val mem)
  1249  (MOVSSstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) -> (MOVSSstoreidx4 [c] {sym} ptr idx val mem)
  1250  (MOVSDstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) -> (MOVSDstoreidx8 [c] {sym} ptr idx val mem)
  1251  (MOVWstoreconstidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) -> (MOVWstoreconstidx2 [c] {sym} ptr idx mem)
  1252  (MOVLstoreconstidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) -> (MOVLstoreconstidx4 [c] {sym} ptr idx mem)
  1253  (MOVQstoreconstidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) -> (MOVQstoreconstidx8 [c] {sym} ptr idx mem)
  1254  
  1255  // combine ADDQ into indexed loads and stores
  1256  (MOVBloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVBloadidx1 [c+d] {sym} ptr idx mem)
  1257  (MOVWloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVWloadidx1 [c+d] {sym} ptr idx mem)
  1258  (MOVWloadidx2 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVWloadidx2 [c+d] {sym} ptr idx mem)
  1259  (MOVLloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVLloadidx1 [c+d] {sym} ptr idx mem)
  1260  (MOVLloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVLloadidx4 [c+d] {sym} ptr idx mem)
  1261  (MOVLloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVLloadidx8 [c+d] {sym} ptr idx mem)
  1262  (MOVQloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVQloadidx1 [c+d] {sym} ptr idx mem)
  1263  (MOVQloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVQloadidx8 [c+d] {sym} ptr idx mem)
  1264  (MOVSSloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVSSloadidx1 [c+d] {sym} ptr idx mem)
  1265  (MOVSSloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVSSloadidx4 [c+d] {sym} ptr idx mem)
  1266  (MOVSDloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVSDloadidx1 [c+d] {sym} ptr idx mem)
  1267  (MOVSDloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVSDloadidx8 [c+d] {sym} ptr idx mem)
  1268  
  1269  (MOVBstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVBstoreidx1 [c+d] {sym} ptr idx val mem)
  1270  (MOVWstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVWstoreidx1 [c+d] {sym} ptr idx val mem)
  1271  (MOVWstoreidx2 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVWstoreidx2 [c+d] {sym} ptr idx val mem)
  1272  (MOVLstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVLstoreidx1 [c+d] {sym} ptr idx val mem)
  1273  (MOVLstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVLstoreidx4 [c+d] {sym} ptr idx val mem)
  1274  (MOVLstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVLstoreidx8 [c+d] {sym} ptr idx val mem)
  1275  (MOVQstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVQstoreidx1 [c+d] {sym} ptr idx val mem)
  1276  (MOVQstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVQstoreidx8 [c+d] {sym} ptr idx val mem)
  1277  (MOVSSstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem)
  1278  (MOVSSstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem)
  1279  (MOVSDstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem)
  1280  (MOVSDstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem)
  1281  
  1282  (MOVBloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)  && is32Bit(c+d)   -> (MOVBloadidx1 [c+d] {sym} ptr idx mem)
  1283  (MOVWloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)  && is32Bit(c+d)   -> (MOVWloadidx1 [c+d] {sym} ptr idx mem)
  1284  (MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem)  && is32Bit(c+2*d) -> (MOVWloadidx2 [c+2*d] {sym} ptr idx mem)
  1285  (MOVLloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)  && is32Bit(c+d)   -> (MOVLloadidx1 [c+d] {sym} ptr idx mem)
  1286  (MOVLloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem)  && is32Bit(c+4*d) -> (MOVLloadidx4 [c+4*d] {sym} ptr idx mem)
  1287  (MOVLloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem)  && is32Bit(c+8*d) -> (MOVLloadidx8 [c+8*d] {sym} ptr idx mem)
  1288  (MOVQloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)  && is32Bit(c+d)   -> (MOVQloadidx1 [c+d] {sym} ptr idx mem)
  1289  (MOVQloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem)  && is32Bit(c+8*d) -> (MOVQloadidx8 [c+8*d] {sym} ptr idx mem)
  1290  (MOVSSloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+d)   -> (MOVSSloadidx1 [c+d] {sym} ptr idx mem)
  1291  (MOVSSloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+4*d) -> (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem)
  1292  (MOVSDloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+d)   -> (MOVSDloadidx1 [c+d] {sym} ptr idx mem)
  1293  (MOVSDloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+8*d) -> (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem)
  1294  
  1295  (MOVBstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)  && is32Bit(c+d)   -> (MOVBstoreidx1 [c+d] {sym} ptr idx val mem)
  1296  (MOVWstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)  && is32Bit(c+d)   -> (MOVWstoreidx1 [c+d] {sym} ptr idx val mem)
  1297  (MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem)  && is32Bit(c+2*d) -> (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem)
  1298  (MOVLstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)  && is32Bit(c+d)   -> (MOVLstoreidx1 [c+d] {sym} ptr idx val mem)
  1299  (MOVLstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem)  && is32Bit(c+4*d) -> (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem)
  1300  (MOVLstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem)  && is32Bit(c+8*d) -> (MOVLstoreidx8 [c+8*d] {sym} ptr idx val mem)
  1301  (MOVQstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)  && is32Bit(c+d)   -> (MOVQstoreidx1 [c+d] {sym} ptr idx val mem)
  1302  (MOVQstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem)  && is32Bit(c+8*d) -> (MOVQstoreidx8 [c+8*d] {sym} ptr idx val mem)
  1303  (MOVSSstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+d)   -> (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem)
  1304  (MOVSSstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+4*d) -> (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem)
  1305  (MOVSDstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+d)   -> (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem)
  1306  (MOVSDstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+8*d) -> (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem)
  1307  
  1308  (MOVBstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) ->
  1309  	(MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
  1310  (MOVWstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) ->
  1311  	(MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
  1312  (MOVWstoreconstidx2 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) ->
  1313  	(MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem)
  1314  (MOVLstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) ->
  1315  	(MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
  1316  (MOVLstoreconstidx4 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) ->
  1317  	(MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem)
  1318  (MOVQstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) ->
  1319  	(MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
  1320  (MOVQstoreconstidx8 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) ->
  1321  	(MOVQstoreconstidx8 [ValAndOff(x).add(c)] {sym} ptr idx mem)
  1322  
  1323  (MOVBstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(c) ->
  1324  	(MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
  1325  (MOVWstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(c) ->
  1326  	(MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
  1327  (MOVWstoreconstidx2 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(2*c) ->
  1328  	(MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem)
  1329  (MOVLstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(c) ->
  1330  	(MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
  1331  (MOVLstoreconstidx4 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(4*c) ->
  1332  	(MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem)
  1333  (MOVQstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(c) ->
  1334  	(MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
  1335  (MOVQstoreconstidx8 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(8*c) ->
  1336  	(MOVQstoreconstidx8 [ValAndOff(x).add(8*c)] {sym} ptr idx mem)
  1337  
  1338  // fold LEAQs together
  1339  (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1340        (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x)
  1341  
  1342  // LEAQ into LEAQ1
  1343  (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
  1344         (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
  1345  
  1346  // LEAQ1 into LEAQ
  1347  (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1348         (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
  1349  
  1350  // LEAQ into LEAQ[248]
  1351  (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
  1352         (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
  1353  (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
  1354         (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
  1355  (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
  1356         (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
  1357  
  1358  // LEAQ[248] into LEAQ
  1359  (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1360        (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
  1361  (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1362        (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
  1363  (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1364        (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
  1365  
  1366  // Absorb InvertFlags into branches.
  1367  (LT (InvertFlags cmp) yes no) -> (GT cmp yes no)
  1368  (GT (InvertFlags cmp) yes no) -> (LT cmp yes no)
  1369  (LE (InvertFlags cmp) yes no) -> (GE cmp yes no)
  1370  (GE (InvertFlags cmp) yes no) -> (LE cmp yes no)
  1371  (ULT (InvertFlags cmp) yes no) -> (UGT cmp yes no)
  1372  (UGT (InvertFlags cmp) yes no) -> (ULT cmp yes no)
  1373  (ULE (InvertFlags cmp) yes no) -> (UGE cmp yes no)
  1374  (UGE (InvertFlags cmp) yes no) -> (ULE cmp yes no)
  1375  (EQ (InvertFlags cmp) yes no) -> (EQ cmp yes no)
  1376  (NE (InvertFlags cmp) yes no) -> (NE cmp yes no)
  1377  
  1378  // Constant comparisons.
  1379  (CMPQconst (MOVQconst [x]) [y]) && x==y -> (FlagEQ)
  1380  (CMPQconst (MOVQconst [x]) [y]) && x<y && uint64(x)<uint64(y) -> (FlagLT_ULT)
  1381  (CMPQconst (MOVQconst [x]) [y]) && x<y && uint64(x)>uint64(y) -> (FlagLT_UGT)
  1382  (CMPQconst (MOVQconst [x]) [y]) && x>y && uint64(x)<uint64(y) -> (FlagGT_ULT)
  1383  (CMPQconst (MOVQconst [x]) [y]) && x>y && uint64(x)>uint64(y) -> (FlagGT_UGT)
  1384  (CMPLconst (MOVLconst [x]) [y]) && int32(x)==int32(y) -> (FlagEQ)
  1385  (CMPLconst (MOVLconst [x]) [y]) && int32(x)<int32(y) && uint32(x)<uint32(y) -> (FlagLT_ULT)
  1386  (CMPLconst (MOVLconst [x]) [y]) && int32(x)<int32(y) && uint32(x)>uint32(y) -> (FlagLT_UGT)
  1387  (CMPLconst (MOVLconst [x]) [y]) && int32(x)>int32(y) && uint32(x)<uint32(y) -> (FlagGT_ULT)
  1388  (CMPLconst (MOVLconst [x]) [y]) && int32(x)>int32(y) && uint32(x)>uint32(y) -> (FlagGT_UGT)
  1389  (CMPWconst (MOVLconst [x]) [y]) && int16(x)==int16(y) -> (FlagEQ)
  1390  (CMPWconst (MOVLconst [x]) [y]) && int16(x)<int16(y) && uint16(x)<uint16(y) -> (FlagLT_ULT)
  1391  (CMPWconst (MOVLconst [x]) [y]) && int16(x)<int16(y) && uint16(x)>uint16(y) -> (FlagLT_UGT)
  1392  (CMPWconst (MOVLconst [x]) [y]) && int16(x)>int16(y) && uint16(x)<uint16(y) -> (FlagGT_ULT)
  1393  (CMPWconst (MOVLconst [x]) [y]) && int16(x)>int16(y) && uint16(x)>uint16(y) -> (FlagGT_UGT)
  1394  (CMPBconst (MOVLconst [x]) [y]) && int8(x)==int8(y) -> (FlagEQ)
  1395  (CMPBconst (MOVLconst [x]) [y]) && int8(x)<int8(y) && uint8(x)<uint8(y) -> (FlagLT_ULT)
  1396  (CMPBconst (MOVLconst [x]) [y]) && int8(x)<int8(y) && uint8(x)>uint8(y) -> (FlagLT_UGT)
  1397  (CMPBconst (MOVLconst [x]) [y]) && int8(x)>int8(y) && uint8(x)<uint8(y) -> (FlagGT_ULT)
  1398  (CMPBconst (MOVLconst [x]) [y]) && int8(x)>int8(y) && uint8(x)>uint8(y) -> (FlagGT_UGT)
  1399  
  1400  // Other known comparisons.
  1401  (CMPQconst (MOVBQZX _) [c]) && 0xFF < c -> (FlagLT_ULT)
  1402  (CMPQconst (MOVWQZX _) [c]) && 0xFFFF < c -> (FlagLT_ULT)
  1403  (CMPQconst (MOVLQZX _) [c]) && 0xFFFFFFFF < c -> (FlagLT_ULT)
  1404  (CMPLconst (SHRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n) -> (FlagLT_ULT)
  1405  (CMPQconst (SHRQconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n) -> (FlagLT_ULT)
  1406  (CMPQconst (ANDQconst _ [m]) [n]) && 0 <= m && m < n -> (FlagLT_ULT)
  1407  (CMPQconst (ANDLconst _ [m]) [n]) && 0 <= m && m < n -> (FlagLT_ULT)
  1408  (CMPLconst (ANDLconst _ [m]) [n]) && 0 <= int32(m) && int32(m) < int32(n) -> (FlagLT_ULT)
  1409  (CMPWconst (ANDLconst _ [m]) [n]) && 0 <= int16(m) && int16(m) < int16(n) -> (FlagLT_ULT)
  1410  (CMPBconst (ANDLconst _ [m]) [n]) && 0 <= int8(m) && int8(m) < int8(n) -> (FlagLT_ULT)
  1411  
  1412  // TODO: DIVxU also.
  1413  
  1414  // Absorb flag constants into SBB ops.
  1415  (SBBQcarrymask (FlagEQ)) -> (MOVQconst [0])
  1416  (SBBQcarrymask (FlagLT_ULT)) -> (MOVQconst [-1])
  1417  (SBBQcarrymask (FlagLT_UGT)) -> (MOVQconst [0])
  1418  (SBBQcarrymask (FlagGT_ULT)) -> (MOVQconst [-1])
  1419  (SBBQcarrymask (FlagGT_UGT)) -> (MOVQconst [0])
  1420  (SBBLcarrymask (FlagEQ)) -> (MOVLconst [0])
  1421  (SBBLcarrymask (FlagLT_ULT)) -> (MOVLconst [-1])
  1422  (SBBLcarrymask (FlagLT_UGT)) -> (MOVLconst [0])
  1423  (SBBLcarrymask (FlagGT_ULT)) -> (MOVLconst [-1])
  1424  (SBBLcarrymask (FlagGT_UGT)) -> (MOVLconst [0])
  1425  
  1426  // Absorb flag constants into branches.
  1427  (EQ (FlagEQ) yes no) -> (First nil yes no)
  1428  (EQ (FlagLT_ULT) yes no) -> (First nil no yes)
  1429  (EQ (FlagLT_UGT) yes no) -> (First nil no yes)
  1430  (EQ (FlagGT_ULT) yes no) -> (First nil no yes)
  1431  (EQ (FlagGT_UGT) yes no) -> (First nil no yes)
  1432  
  1433  (NE (FlagEQ) yes no) -> (First nil no yes)
  1434  (NE (FlagLT_ULT) yes no) -> (First nil yes no)
  1435  (NE (FlagLT_UGT) yes no) -> (First nil yes no)
  1436  (NE (FlagGT_ULT) yes no) -> (First nil yes no)
  1437  (NE (FlagGT_UGT) yes no) -> (First nil yes no)
  1438  
  1439  (LT (FlagEQ) yes no) -> (First nil no yes)
  1440  (LT (FlagLT_ULT) yes no) -> (First nil yes no)
  1441  (LT (FlagLT_UGT) yes no) -> (First nil yes no)
  1442  (LT (FlagGT_ULT) yes no) -> (First nil no yes)
  1443  (LT (FlagGT_UGT) yes no) -> (First nil no yes)
  1444  
  1445  (LE (FlagEQ) yes no) -> (First nil yes no)
  1446  (LE (FlagLT_ULT) yes no) -> (First nil yes no)
  1447  (LE (FlagLT_UGT) yes no) -> (First nil yes no)
  1448  (LE (FlagGT_ULT) yes no) -> (First nil no yes)
  1449  (LE (FlagGT_UGT) yes no) -> (First nil no yes)
  1450  
  1451  (GT (FlagEQ) yes no) -> (First nil no yes)
  1452  (GT (FlagLT_ULT) yes no) -> (First nil no yes)
  1453  (GT (FlagLT_UGT) yes no) -> (First nil no yes)
  1454  (GT (FlagGT_ULT) yes no) -> (First nil yes no)
  1455  (GT (FlagGT_UGT) yes no) -> (First nil yes no)
  1456  
  1457  (GE (FlagEQ) yes no) -> (First nil yes no)
  1458  (GE (FlagLT_ULT) yes no) -> (First nil no yes)
  1459  (GE (FlagLT_UGT) yes no) -> (First nil no yes)
  1460  (GE (FlagGT_ULT) yes no) -> (First nil yes no)
  1461  (GE (FlagGT_UGT) yes no) -> (First nil yes no)
  1462  
  1463  (ULT (FlagEQ) yes no) -> (First nil no yes)
  1464  (ULT (FlagLT_ULT) yes no) -> (First nil yes no)
  1465  (ULT (FlagLT_UGT) yes no) -> (First nil no yes)
  1466  (ULT (FlagGT_ULT) yes no) -> (First nil yes no)
  1467  (ULT (FlagGT_UGT) yes no) -> (First nil no yes)
  1468  
  1469  (ULE (FlagEQ) yes no) -> (First nil yes no)
  1470  (ULE (FlagLT_ULT) yes no) -> (First nil yes no)
  1471  (ULE (FlagLT_UGT) yes no) -> (First nil no yes)
  1472  (ULE (FlagGT_ULT) yes no) -> (First nil yes no)
  1473  (ULE (FlagGT_UGT) yes no) -> (First nil no yes)
  1474  
  1475  (UGT (FlagEQ) yes no) -> (First nil no yes)
  1476  (UGT (FlagLT_ULT) yes no) -> (First nil no yes)
  1477  (UGT (FlagLT_UGT) yes no) -> (First nil yes no)
  1478  (UGT (FlagGT_ULT) yes no) -> (First nil no yes)
  1479  (UGT (FlagGT_UGT) yes no) -> (First nil yes no)
  1480  
  1481  (UGE (FlagEQ) yes no) -> (First nil yes no)
  1482  (UGE (FlagLT_ULT) yes no) -> (First nil no yes)
  1483  (UGE (FlagLT_UGT) yes no) -> (First nil yes no)
  1484  (UGE (FlagGT_ULT) yes no) -> (First nil no yes)
  1485  (UGE (FlagGT_UGT) yes no) -> (First nil yes no)
  1486  
  1487  // Absorb flag constants into SETxx ops.
  1488  (SETEQ (FlagEQ)) -> (MOVLconst [1])
  1489  (SETEQ (FlagLT_ULT)) -> (MOVLconst [0])
  1490  (SETEQ (FlagLT_UGT)) -> (MOVLconst [0])
  1491  (SETEQ (FlagGT_ULT)) -> (MOVLconst [0])
  1492  (SETEQ (FlagGT_UGT)) -> (MOVLconst [0])
  1493  
  1494  (SETNE (FlagEQ)) -> (MOVLconst [0])
  1495  (SETNE (FlagLT_ULT)) -> (MOVLconst [1])
  1496  (SETNE (FlagLT_UGT)) -> (MOVLconst [1])
  1497  (SETNE (FlagGT_ULT)) -> (MOVLconst [1])
  1498  (SETNE (FlagGT_UGT)) -> (MOVLconst [1])
  1499  
  1500  (SETL (FlagEQ)) -> (MOVLconst [0])
  1501  (SETL (FlagLT_ULT)) -> (MOVLconst [1])
  1502  (SETL (FlagLT_UGT)) -> (MOVLconst [1])
  1503  (SETL (FlagGT_ULT)) -> (MOVLconst [0])
  1504  (SETL (FlagGT_UGT)) -> (MOVLconst [0])
  1505  
  1506  (SETLE (FlagEQ)) -> (MOVLconst [1])
  1507  (SETLE (FlagLT_ULT)) -> (MOVLconst [1])
  1508  (SETLE (FlagLT_UGT)) -> (MOVLconst [1])
  1509  (SETLE (FlagGT_ULT)) -> (MOVLconst [0])
  1510  (SETLE (FlagGT_UGT)) -> (MOVLconst [0])
  1511  
  1512  (SETG (FlagEQ)) -> (MOVLconst [0])
  1513  (SETG (FlagLT_ULT)) -> (MOVLconst [0])
  1514  (SETG (FlagLT_UGT)) -> (MOVLconst [0])
  1515  (SETG (FlagGT_ULT)) -> (MOVLconst [1])
  1516  (SETG (FlagGT_UGT)) -> (MOVLconst [1])
  1517  
  1518  (SETGE (FlagEQ)) -> (MOVLconst [1])
  1519  (SETGE (FlagLT_ULT)) -> (MOVLconst [0])
  1520  (SETGE (FlagLT_UGT)) -> (MOVLconst [0])
  1521  (SETGE (FlagGT_ULT)) -> (MOVLconst [1])
  1522  (SETGE (FlagGT_UGT)) -> (MOVLconst [1])
  1523  
  1524  (SETB (FlagEQ)) -> (MOVLconst [0])
  1525  (SETB (FlagLT_ULT)) -> (MOVLconst [1])
  1526  (SETB (FlagLT_UGT)) -> (MOVLconst [0])
  1527  (SETB (FlagGT_ULT)) -> (MOVLconst [1])
  1528  (SETB (FlagGT_UGT)) -> (MOVLconst [0])
  1529  
  1530  (SETBE (FlagEQ)) -> (MOVLconst [1])
  1531  (SETBE (FlagLT_ULT)) -> (MOVLconst [1])
  1532  (SETBE (FlagLT_UGT)) -> (MOVLconst [0])
  1533  (SETBE (FlagGT_ULT)) -> (MOVLconst [1])
  1534  (SETBE (FlagGT_UGT)) -> (MOVLconst [0])
  1535  
  1536  (SETA (FlagEQ)) -> (MOVLconst [0])
  1537  (SETA (FlagLT_ULT)) -> (MOVLconst [0])
  1538  (SETA (FlagLT_UGT)) -> (MOVLconst [1])
  1539  (SETA (FlagGT_ULT)) -> (MOVLconst [0])
  1540  (SETA (FlagGT_UGT)) -> (MOVLconst [1])
  1541  
  1542  (SETAE (FlagEQ)) -> (MOVLconst [1])
  1543  (SETAE (FlagLT_ULT)) -> (MOVLconst [0])
  1544  (SETAE (FlagLT_UGT)) -> (MOVLconst [1])
  1545  (SETAE (FlagGT_ULT)) -> (MOVLconst [0])
  1546  (SETAE (FlagGT_UGT)) -> (MOVLconst [1])
  1547  
  1548  // Remove redundant *const ops
  1549  (ADDQconst [0] x) -> x
  1550  (ADDLconst [c] x) && int32(c)==0 -> x
  1551  (SUBQconst [0] x) -> x
  1552  (SUBLconst [c] x) && int32(c) == 0 -> x
  1553  (ANDQconst [0] _)                 -> (MOVQconst [0])
  1554  (ANDLconst [c] _) && int32(c)==0  -> (MOVLconst [0])
  1555  (ANDQconst [-1] x)                -> x
  1556  (ANDLconst [c] x) && int32(c)==-1 -> x
  1557  (ORQconst [0] x)                  -> x
  1558  (ORLconst [c] x) && int32(c)==0   -> x
  1559  (ORQconst [-1] _)                 -> (MOVQconst [-1])
  1560  (ORLconst [c] _) && int32(c)==-1  -> (MOVLconst [-1])
  1561  (XORQconst [0] x)                  -> x
  1562  (XORLconst [c] x) && int32(c)==0   -> x
  1563  // TODO: since we got rid of the W/B versions, we might miss
  1564  // things like (ANDLconst [0x100] x) which were formerly
  1565  // (ANDBconst [0] x).  Probably doesn't happen very often.
  1566  // If we cared, we might do:
  1567  //  (ANDLconst <t> [c] x) && t.Size()==1 && int8(x)==0 -> (MOVLconst [0])
  1568  
  1569  // Convert constant subtracts to constant adds
  1570  (SUBQconst [c] x) && c != -(1<<31) -> (ADDQconst [-c] x)
  1571  (SUBLconst [c] x) -> (ADDLconst [int64(int32(-c))] x)
  1572  
  1573  // generic constant folding
  1574  // TODO: more of this
  1575  (ADDQconst [c] (MOVQconst [d])) -> (MOVQconst [c+d])
  1576  (ADDLconst [c] (MOVLconst [d])) -> (MOVLconst [int64(int32(c+d))])
  1577  (ADDQconst [c] (ADDQconst [d] x)) && is32Bit(c+d) -> (ADDQconst [c+d] x)
  1578  (ADDLconst [c] (ADDLconst [d] x)) -> (ADDLconst [int64(int32(c+d))] x)
  1579  (SUBQconst (MOVQconst [d]) [c]) -> (MOVQconst [d-c])
  1580  (SUBQconst (SUBQconst x [d]) [c]) && is32Bit(-c-d) -> (ADDQconst [-c-d] x)
  1581  (SARQconst [c] (MOVQconst [d])) -> (MOVQconst [d>>uint64(c)])
  1582  (SARLconst [c] (MOVQconst [d])) -> (MOVQconst [d>>uint64(c)])
  1583  (SARWconst [c] (MOVQconst [d])) -> (MOVQconst [d>>uint64(c)])
  1584  (SARBconst [c] (MOVQconst [d])) -> (MOVQconst [d>>uint64(c)])
  1585  (NEGQ (MOVQconst [c])) -> (MOVQconst [-c])
  1586  (NEGL (MOVLconst [c])) -> (MOVLconst [int64(int32(-c))])
  1587  (MULQconst [c] (MOVQconst [d])) -> (MOVQconst [c*d])
  1588  (MULLconst [c] (MOVLconst [d])) -> (MOVLconst [int64(int32(c*d))])
  1589  (ANDQconst [c] (MOVQconst [d])) -> (MOVQconst [c&d])
  1590  (ANDLconst [c] (MOVLconst [d])) -> (MOVLconst [c&d])
  1591  (ORQconst [c] (MOVQconst [d])) -> (MOVQconst [c|d])
  1592  (ORLconst [c] (MOVLconst [d])) -> (MOVLconst [c|d])
  1593  (XORQconst [c] (MOVQconst [d])) -> (MOVQconst [c^d])
  1594  (XORLconst [c] (MOVLconst [d])) -> (MOVLconst [c^d])
  1595  (NOTQ (MOVQconst [c])) -> (MOVQconst [^c])
  1596  (NOTL (MOVLconst [c])) -> (MOVLconst [^c])
  1597  
  1598  // generic simplifications
  1599  // TODO: more of this
  1600  (ADDQ x (NEGQ y)) -> (SUBQ x y)
  1601  (ADDL x (NEGL y)) -> (SUBL x y)
  1602  (SUBQ x x) -> (MOVQconst [0])
  1603  (SUBL x x) -> (MOVLconst [0])
  1604  (ANDQ x x) -> x
  1605  (ANDL x x) -> x
  1606  (ORQ x x) -> x
  1607  (ORL x x) -> x
  1608  (XORQ x x) -> (MOVQconst [0])
  1609  (XORL x x) -> (MOVLconst [0])
  1610  (NEGQ (ADDQconst [c] (NEGQ x))) && c != -(1<<31) -> (ADDQconst [-c] x)
  1611  
  1612  // checking AND against 0.
  1613  (CMPQconst (ANDQ x y) [0]) -> (TESTQ x y)
  1614  (CMPLconst (ANDL x y) [0]) -> (TESTL x y)
  1615  (CMPWconst (ANDL x y) [0]) -> (TESTW x y)
  1616  (CMPBconst (ANDL x y) [0]) -> (TESTB x y)
  1617  (CMPQconst (ANDQconst [c] x) [0]) -> (TESTQconst [c] x)
  1618  (CMPLconst (ANDLconst [c] x) [0]) -> (TESTLconst [c] x)
  1619  (CMPWconst (ANDLconst [c] x) [0]) -> (TESTWconst [int64(int16(c))] x)
  1620  (CMPBconst (ANDLconst [c] x) [0]) -> (TESTBconst [int64(int8(c))] x)
  1621  
  1622  // Convert TESTx to TESTxconst if possible.
  1623  (TESTQ (MOVQconst [c]) x) && is32Bit(c) -> (TESTQconst [c] x)
  1624  (TESTL (MOVLconst [c]) x) -> (TESTLconst [c] x)
  1625  (TESTW (MOVLconst [c]) x) -> (TESTWconst [c] x)
  1626  (TESTB (MOVLconst [c]) x) -> (TESTBconst [c] x)
  1627  
  1628  // TEST %reg,%reg is shorter than CMP
  1629  (CMPQconst x [0]) -> (TESTQ x x)
  1630  (CMPLconst x [0]) -> (TESTL x x)
  1631  (CMPWconst x [0]) -> (TESTW x x)
  1632  (CMPBconst x [0]) -> (TESTB x x)
  1633  
  1634  // Combining byte loads into larger (unaligned) loads.
  1635  // There are many ways these combinations could occur.  This is
  1636  // designed to match the way encoding/binary.LittleEndian does it.
  1637  
  1638  // Little-endian loads
  1639  
  1640  (ORL                  x0:(MOVBload [i0] {s} p mem)
  1641      sh:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem)))
  1642    && i1 == i0+1
  1643    && x0.Uses == 1
  1644    && x1.Uses == 1
  1645    && sh.Uses == 1
  1646    && mergePoint(b,x0,x1) != nil
  1647    && clobber(x0)
  1648    && clobber(x1)
  1649    && clobber(sh)
  1650    -> @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem)
  1651  
  1652  (ORQ                  x0:(MOVBload [i0] {s} p mem)
  1653      sh:(SHLQconst [8] x1:(MOVBload [i1] {s} p mem)))
  1654    && i1 == i0+1
  1655    && x0.Uses == 1
  1656    && x1.Uses == 1
  1657    && sh.Uses == 1
  1658    && mergePoint(b,x0,x1) != nil
  1659    && clobber(x0)
  1660    && clobber(x1)
  1661    && clobber(sh)
  1662    -> @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem)
  1663  
  1664  (ORL                   x0:(MOVWload [i0] {s} p mem)
  1665      sh:(SHLLconst [16] x1:(MOVWload [i1] {s} p mem)))
  1666    && i1 == i0+2
  1667    && x0.Uses == 1
  1668    && x1.Uses == 1
  1669    && sh.Uses == 1
  1670    && mergePoint(b,x0,x1) != nil
  1671    && clobber(x0)
  1672    && clobber(x1)
  1673    && clobber(sh)
  1674    -> @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem)
  1675  
  1676  (ORQ                   x0:(MOVWload [i0] {s} p mem)
  1677      sh:(SHLQconst [16] x1:(MOVWload [i1] {s} p mem)))
  1678    && i1 == i0+2
  1679    && x0.Uses == 1
  1680    && x1.Uses == 1
  1681    && sh.Uses == 1
  1682    && mergePoint(b,x0,x1) != nil
  1683    && clobber(x0)
  1684    && clobber(x1)
  1685    && clobber(sh)
  1686    -> @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem)
  1687  
  1688  (ORQ                   x0:(MOVLload [i0] {s} p mem)
  1689      sh:(SHLQconst [32] x1:(MOVLload [i1] {s} p mem)))
  1690    && i1 == i0+4
  1691    && x0.Uses == 1
  1692    && x1.Uses == 1
  1693    && sh.Uses == 1
  1694    && mergePoint(b,x0,x1) != nil
  1695    && clobber(x0)
  1696    && clobber(x1)
  1697    && clobber(sh)
  1698    -> @mergePoint(b,x0,x1) (MOVQload [i0] {s} p mem)
  1699  
  1700  (ORL
  1701      s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))
  1702      or:(ORL
  1703          s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))
  1704  	y))
  1705    && i1 == i0+1
  1706    && j1 == j0+8
  1707    && j0 % 16 == 0
  1708    && x0.Uses == 1
  1709    && x1.Uses == 1
  1710    && s0.Uses == 1
  1711    && s1.Uses == 1
  1712    && or.Uses == 1
  1713    && mergePoint(b,x0,x1) != nil
  1714    && clobber(x0)
  1715    && clobber(x1)
  1716    && clobber(s0)
  1717    && clobber(s1)
  1718    && clobber(or)
  1719    -> @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y)
  1720  
  1721  (ORQ
  1722      s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))
  1723      or:(ORQ
  1724          s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))
  1725  	y))
  1726    && i1 == i0+1
  1727    && j1 == j0+8
  1728    && j0 % 16 == 0
  1729    && x0.Uses == 1
  1730    && x1.Uses == 1
  1731    && s0.Uses == 1
  1732    && s1.Uses == 1
  1733    && or.Uses == 1
  1734    && mergePoint(b,x0,x1) != nil
  1735    && clobber(x0)
  1736    && clobber(x1)
  1737    && clobber(s0)
  1738    && clobber(s1)
  1739    && clobber(or)
  1740    -> @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y)
  1741  
  1742  (ORQ
  1743      s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem))
  1744      or:(ORQ
  1745          s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem))
  1746  	y))
  1747    && i1 == i0+2
  1748    && j1 == j0+16
  1749    && j0 % 32 == 0
  1750    && x0.Uses == 1
  1751    && x1.Uses == 1
  1752    && s0.Uses == 1
  1753    && s1.Uses == 1
  1754    && or.Uses == 1
  1755    && mergePoint(b,x0,x1) != nil
  1756    && clobber(x0)
  1757    && clobber(x1)
  1758    && clobber(s0)
  1759    && clobber(s1)
  1760    && clobber(or)
  1761    -> @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y)
  1762  
  1763  // Little-endian indexed loads
  1764  
  1765  (ORL                  x0:(MOVBloadidx1 [i0] {s} p idx mem)
  1766      sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)))
  1767    && i1 == i0+1
  1768    && x0.Uses == 1
  1769    && x1.Uses == 1
  1770    && sh.Uses == 1
  1771    && mergePoint(b,x0,x1) != nil
  1772    && clobber(x0)
  1773    && clobber(x1)
  1774    && clobber(sh)
  1775    -> @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem)
  1776  
  1777  (ORQ                  x0:(MOVBloadidx1 [i0] {s} p idx mem)
  1778      sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)))
  1779    && i1 == i0+1
  1780    && x0.Uses == 1
  1781    && x1.Uses == 1
  1782    && sh.Uses == 1
  1783    && mergePoint(b,x0,x1) != nil
  1784    && clobber(x0)
  1785    && clobber(x1)
  1786    && clobber(sh)
  1787    -> @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem)
  1788  
  1789  (ORL                   x0:(MOVWloadidx1 [i0] {s} p idx mem)
  1790      sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)))
  1791    && i1 == i0+2
  1792    && x0.Uses == 1
  1793    && x1.Uses == 1
  1794    && sh.Uses == 1
  1795    && mergePoint(b,x0,x1) != nil
  1796    && clobber(x0)
  1797    && clobber(x1)
  1798    && clobber(sh)
  1799    -> @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem)
  1800  
  1801  (ORQ                   x0:(MOVWloadidx1 [i0] {s} p idx mem)
  1802      sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)))
  1803    && i1 == i0+2
  1804    && x0.Uses == 1
  1805    && x1.Uses == 1
  1806    && sh.Uses == 1
  1807    && mergePoint(b,x0,x1) != nil
  1808    && clobber(x0)
  1809    && clobber(x1)
  1810    && clobber(sh)
  1811    -> @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem)
  1812  
  1813  (ORQ                   x0:(MOVLloadidx1 [i0] {s} p idx mem)
  1814      sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem)))
  1815    && i1 == i0+4
  1816    && x0.Uses == 1
  1817    && x1.Uses == 1
  1818    && sh.Uses == 1
  1819    && mergePoint(b,x0,x1) != nil
  1820    && clobber(x0)
  1821    && clobber(x1)
  1822    && clobber(sh)
  1823    -> @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem)
  1824  
  1825  (ORL
  1826      s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))
  1827      or:(ORL
  1828          s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))
  1829  	y))
  1830    && i1 == i0+1
  1831    && j1 == j0+8
  1832    && j0 % 16 == 0
  1833    && x0.Uses == 1
  1834    && x1.Uses == 1
  1835    && s0.Uses == 1
  1836    && s1.Uses == 1
  1837    && or.Uses == 1
  1838    && mergePoint(b,x0,x1) != nil
  1839    && clobber(x0)
  1840    && clobber(x1)
  1841    && clobber(s0)
  1842    && clobber(s1)
  1843    && clobber(or)
  1844    -> @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y)
  1845  
  1846  (ORQ
  1847      s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))
  1848      or:(ORQ
  1849          s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))
  1850  	y))
  1851    && i1 == i0+1
  1852    && j1 == j0+8
  1853    && j0 % 16 == 0
  1854    && x0.Uses == 1
  1855    && x1.Uses == 1
  1856    && s0.Uses == 1
  1857    && s1.Uses == 1
  1858    && or.Uses == 1
  1859    && mergePoint(b,x0,x1) != nil
  1860    && clobber(x0)
  1861    && clobber(x1)
  1862    && clobber(s0)
  1863    && clobber(s1)
  1864    && clobber(or)
  1865    -> @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y)
  1866  
  1867  (ORQ
  1868      s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))
  1869      or:(ORQ
  1870          s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem))
  1871  	y))
  1872    && i1 == i0+2
  1873    && j1 == j0+16
  1874    && j0 % 32 == 0
  1875    && x0.Uses == 1
  1876    && x1.Uses == 1
  1877    && s0.Uses == 1
  1878    && s1.Uses == 1
  1879    && or.Uses == 1
  1880    && mergePoint(b,x0,x1) != nil
  1881    && clobber(x0)
  1882    && clobber(x1)
  1883    && clobber(s0)
  1884    && clobber(s1)
  1885    && clobber(or)
  1886    -> @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y)
  1887  
  1888  // Big-endian loads
  1889  
  1890  (ORL
  1891                         x1:(MOVBload [i1] {s} p mem)
  1892      sh:(SHLLconst [8]  x0:(MOVBload [i0] {s} p mem)))
  1893    && i1 == i0+1
  1894    && x0.Uses == 1
  1895    && x1.Uses == 1
  1896    && sh.Uses == 1
  1897    && mergePoint(b,x0,x1) != nil
  1898    && clobber(x0)
  1899    && clobber(x1)
  1900    && clobber(sh)
  1901    -> @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem))
  1902  
  1903  (ORQ
  1904                         x1:(MOVBload [i1] {s} p mem)
  1905      sh:(SHLQconst [8]  x0:(MOVBload [i0] {s} p mem)))
  1906    && i1 == i0+1
  1907    && x0.Uses == 1
  1908    && x1.Uses == 1
  1909    && sh.Uses == 1
  1910    && mergePoint(b,x0,x1) != nil
  1911    && clobber(x0)
  1912    && clobber(x1)
  1913    && clobber(sh)
  1914    -> @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem))
  1915  
  1916  (ORL
  1917                          r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))
  1918      sh:(SHLLconst [16]  r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))))
  1919    && i1 == i0+2
  1920    && x0.Uses == 1
  1921    && x1.Uses == 1
  1922    && r0.Uses == 1
  1923    && r1.Uses == 1
  1924    && sh.Uses == 1
  1925    && mergePoint(b,x0,x1) != nil
  1926    && clobber(x0)
  1927    && clobber(x1)
  1928    && clobber(r0)
  1929    && clobber(r1)
  1930    && clobber(sh)
  1931    -> @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem))
  1932  
  1933  (ORQ
  1934                          r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))
  1935      sh:(SHLQconst [16]  r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))))
  1936    && i1 == i0+2
  1937    && x0.Uses == 1
  1938    && x1.Uses == 1
  1939    && r0.Uses == 1
  1940    && r1.Uses == 1
  1941    && sh.Uses == 1
  1942    && mergePoint(b,x0,x1) != nil
  1943    && clobber(x0)
  1944    && clobber(x1)
  1945    && clobber(r0)
  1946    && clobber(r1)
  1947    && clobber(sh)
  1948    -> @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem))
  1949  
  1950  (ORQ
  1951                          r1:(BSWAPL x1:(MOVLload [i1] {s} p mem))
  1952      sh:(SHLQconst [32]  r0:(BSWAPL x0:(MOVLload [i0] {s} p mem))))
  1953    && i1 == i0+4
  1954    && x0.Uses == 1
  1955    && x1.Uses == 1
  1956    && r0.Uses == 1
  1957    && r1.Uses == 1
  1958    && sh.Uses == 1
  1959    && mergePoint(b,x0,x1) != nil
  1960    && clobber(x0)
  1961    && clobber(x1)
  1962    && clobber(r0)
  1963    && clobber(r1)
  1964    && clobber(sh)
  1965    -> @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i0] {s} p mem))
  1966  
  1967  (ORL
  1968      s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))
  1969      or:(ORL
  1970          s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))
  1971  	y))
  1972    && i1 == i0+1
  1973    && j1 == j0-8
  1974    && j1 % 16 == 0
  1975    && x0.Uses == 1
  1976    && x1.Uses == 1
  1977    && s0.Uses == 1
  1978    && s1.Uses == 1
  1979    && or.Uses == 1
  1980    && mergePoint(b,x0,x1) != nil
  1981    && clobber(x0)
  1982    && clobber(x1)
  1983    && clobber(s0)
  1984    && clobber(s1)
  1985    && clobber(or)
  1986    -> @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y)
  1987  
  1988  (ORQ
  1989      s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))
  1990      or:(ORQ
  1991          s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))
  1992  	y))
  1993    && i1 == i0+1
  1994    && j1 == j0-8
  1995    && j1 % 16 == 0
  1996    && x0.Uses == 1
  1997    && x1.Uses == 1
  1998    && s0.Uses == 1
  1999    && s1.Uses == 1
  2000    && or.Uses == 1
  2001    && mergePoint(b,x0,x1) != nil
  2002    && clobber(x0)
  2003    && clobber(x1)
  2004    && clobber(s0)
  2005    && clobber(s1)
  2006    && clobber(or)
  2007    -> @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y)
  2008  
  2009  (ORQ
  2010      s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))
  2011      or:(ORQ
  2012          s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)))
  2013  	y))
  2014    && i1 == i0+2
  2015    && j1 == j0-16
  2016    && j1 % 32 == 0
  2017    && x0.Uses == 1
  2018    && x1.Uses == 1
  2019    && r0.Uses == 1
  2020    && r1.Uses == 1
  2021    && s0.Uses == 1
  2022    && s1.Uses == 1
  2023    && or.Uses == 1
  2024    && mergePoint(b,x0,x1) != nil
  2025    && clobber(x0)
  2026    && clobber(x1)
  2027    && clobber(r0)
  2028    && clobber(r1)
  2029    && clobber(s0)
  2030    && clobber(s1)
  2031    && clobber(or)
  2032    -> @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y)
  2033  
  2034  // Big-endian indexed loads
  2035  
  2036  (ORL
  2037                         x1:(MOVBloadidx1 [i1] {s} p idx mem)
  2038      sh:(SHLLconst [8]  x0:(MOVBloadidx1 [i0] {s} p idx mem)))
  2039    && i1 == i0+1
  2040    && x0.Uses == 1
  2041    && x1.Uses == 1
  2042    && sh.Uses == 1
  2043    && mergePoint(b,x0,x1) != nil
  2044    && clobber(x0)
  2045    && clobber(x1)
  2046    && clobber(sh)
  2047    -> @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem))
  2048  
  2049  (ORQ
  2050                         x1:(MOVBloadidx1 [i1] {s} p idx mem)
  2051      sh:(SHLQconst [8]  x0:(MOVBloadidx1 [i0] {s} p idx mem)))
  2052    && i1 == i0+1
  2053    && x0.Uses == 1
  2054    && x1.Uses == 1
  2055    && sh.Uses == 1
  2056    && mergePoint(b,x0,x1) != nil
  2057    && clobber(x0)
  2058    && clobber(x1)
  2059    && clobber(sh)
  2060    -> @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem))
  2061  
  2062  (ORL
  2063                          r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))
  2064      sh:(SHLLconst [16]  r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))))
  2065    && i1 == i0+2
  2066    && x0.Uses == 1
  2067    && x1.Uses == 1
  2068    && r0.Uses == 1
  2069    && r1.Uses == 1
  2070    && sh.Uses == 1
  2071    && mergePoint(b,x0,x1) != nil
  2072    && clobber(x0)
  2073    && clobber(x1)
  2074    && clobber(r0)
  2075    && clobber(r1)
  2076    && clobber(sh)
  2077    -> @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem))
  2078  
  2079  (ORQ
  2080                          r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))
  2081      sh:(SHLQconst [16]  r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))))
  2082    && i1 == i0+2
  2083    && x0.Uses == 1
  2084    && x1.Uses == 1
  2085    && r0.Uses == 1
  2086    && r1.Uses == 1
  2087    && sh.Uses == 1
  2088    && mergePoint(b,x0,x1) != nil
  2089    && clobber(x0)
  2090    && clobber(x1)
  2091    && clobber(r0)
  2092    && clobber(r1)
  2093    && clobber(sh)
  2094    -> @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem))
  2095  
  2096  (ORQ
  2097                          r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem))
  2098      sh:(SHLQconst [32]  r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem))))
  2099    && i1 == i0+4
  2100    && x0.Uses == 1
  2101    && x1.Uses == 1
  2102    && r0.Uses == 1
  2103    && r1.Uses == 1
  2104    && sh.Uses == 1
  2105    && mergePoint(b,x0,x1) != nil
  2106    && clobber(x0)
  2107    && clobber(x1)
  2108    && clobber(r0)
  2109    && clobber(r1)
  2110    && clobber(sh)
  2111    -> @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem))
  2112  
  2113  (ORL
  2114      s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))
  2115      or:(ORL
  2116          s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))
  2117  	y))
  2118    && i1 == i0+1
  2119    && j1 == j0-8
  2120    && j1 % 16 == 0
  2121    && x0.Uses == 1
  2122    && x1.Uses == 1
  2123    && s0.Uses == 1
  2124    && s1.Uses == 1
  2125    && or.Uses == 1
  2126    && mergePoint(b,x0,x1) != nil
  2127    && clobber(x0)
  2128    && clobber(x1)
  2129    && clobber(s0)
  2130    && clobber(s1)
  2131    && clobber(or)
  2132    -> @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
  2133  
  2134  (ORQ
  2135      s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))
  2136      or:(ORQ
  2137          s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))
  2138  	y))
  2139    && i1 == i0+1
  2140    && j1 == j0-8
  2141    && j1 % 16 == 0
  2142    && x0.Uses == 1
  2143    && x1.Uses == 1
  2144    && s0.Uses == 1
  2145    && s1.Uses == 1
  2146    && or.Uses == 1
  2147    && mergePoint(b,x0,x1) != nil
  2148    && clobber(x0)
  2149    && clobber(x1)
  2150    && clobber(s0)
  2151    && clobber(s1)
  2152    && clobber(or)
  2153    -> @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
  2154  
  2155  (ORQ
  2156      s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))
  2157      or:(ORQ
  2158          s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)))
  2159  	y))
  2160    && i1 == i0+2
  2161    && j1 == j0-16
  2162    && j1 % 32 == 0
  2163    && x0.Uses == 1
  2164    && x1.Uses == 1
  2165    && r0.Uses == 1
  2166    && r1.Uses == 1
  2167    && s0.Uses == 1
  2168    && s1.Uses == 1
  2169    && or.Uses == 1
  2170    && mergePoint(b,x0,x1) != nil
  2171    && clobber(x0)
  2172    && clobber(x1)
  2173    && clobber(r0)
  2174    && clobber(r1)
  2175    && clobber(s0)
  2176    && clobber(s1)
  2177    && clobber(or)
  2178    -> @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
  2179  
  2180  // Combine 2 byte stores + shift into rolw 8 + word store
  2181  (MOVBstore [i] {s} p w
  2182    x0:(MOVBstore [i-1] {s} p (SHRWconst [8] w) mem))
  2183    && x0.Uses == 1
  2184    && clobber(x0)
  2185    -> (MOVWstore [i-1] {s} p (ROLWconst <w.Type> [8] w) mem)
  2186  
  2187  (MOVBstoreidx1 [i] {s} p idx w
  2188    x0:(MOVBstoreidx1 [i-1] {s} p idx (SHRWconst [8] w) mem))
  2189    && x0.Uses == 1
  2190    && clobber(x0)
  2191    -> (MOVWstoreidx1 [i-1] {s} p idx (ROLWconst <w.Type> [8] w) mem)
  2192  
  2193  // Combine stores + shifts into bswap and larger (unaligned) stores
  2194  (MOVBstore [i] {s} p w
  2195    x2:(MOVBstore [i-1] {s} p (SHRLconst [8] w)
  2196    x1:(MOVBstore [i-2] {s} p (SHRLconst [16] w)
  2197    x0:(MOVBstore [i-3] {s} p (SHRLconst [24] w) mem))))
  2198    && x0.Uses == 1
  2199    && x1.Uses == 1
  2200    && x2.Uses == 1
  2201    && clobber(x0)
  2202    && clobber(x1)
  2203    && clobber(x2)
  2204    -> (MOVLstore [i-3] {s} p (BSWAPL <w.Type> w) mem)
  2205  
  2206  (MOVBstoreidx1 [i] {s} p idx w
  2207    x2:(MOVBstoreidx1 [i-1] {s} p idx (SHRLconst [8] w)
  2208    x1:(MOVBstoreidx1 [i-2] {s} p idx (SHRLconst [16] w)
  2209    x0:(MOVBstoreidx1 [i-3] {s} p idx (SHRLconst [24] w) mem))))
  2210    && x0.Uses == 1
  2211    && x1.Uses == 1
  2212    && x2.Uses == 1
  2213    && clobber(x0)
  2214    && clobber(x1)
  2215    && clobber(x2)
  2216    -> (MOVLstoreidx1 [i-3] {s} p idx (BSWAPL <w.Type> w) mem)
  2217  
  2218  (MOVBstore [i] {s} p w
  2219    x6:(MOVBstore [i-1] {s} p (SHRQconst [8] w)
  2220    x5:(MOVBstore [i-2] {s} p (SHRQconst [16] w)
  2221    x4:(MOVBstore [i-3] {s} p (SHRQconst [24] w)
  2222    x3:(MOVBstore [i-4] {s} p (SHRQconst [32] w)
  2223    x2:(MOVBstore [i-5] {s} p (SHRQconst [40] w)
  2224    x1:(MOVBstore [i-6] {s} p (SHRQconst [48] w)
  2225    x0:(MOVBstore [i-7] {s} p (SHRQconst [56] w) mem))))))))
  2226    && x0.Uses == 1
  2227    && x1.Uses == 1
  2228    && x2.Uses == 1
  2229    && x3.Uses == 1
  2230    && x4.Uses == 1
  2231    && x5.Uses == 1
  2232    && x6.Uses == 1
  2233    && clobber(x0)
  2234    && clobber(x1)
  2235    && clobber(x2)
  2236    && clobber(x3)
  2237    && clobber(x4)
  2238    && clobber(x5)
  2239    && clobber(x6)
  2240    -> (MOVQstore [i-7] {s} p (BSWAPQ <w.Type> w) mem)
  2241  
  2242  (MOVBstoreidx1 [i] {s} p idx w
  2243    x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w)
  2244    x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w)
  2245    x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w)
  2246    x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w)
  2247    x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w)
  2248    x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w)
  2249    x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem))))))))
  2250    && x0.Uses == 1
  2251    && x1.Uses == 1
  2252    && x2.Uses == 1
  2253    && x3.Uses == 1
  2254    && x4.Uses == 1
  2255    && x5.Uses == 1
  2256    && x6.Uses == 1
  2257    && clobber(x0)
  2258    && clobber(x1)
  2259    && clobber(x2)
  2260    && clobber(x3)
  2261    && clobber(x4)
  2262    && clobber(x5)
  2263    && clobber(x6)
  2264    -> (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ <w.Type> w) mem)
  2265  
  2266  // Combine constant stores into larger (unaligned) stores.
  2267  (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem))
  2268    && x.Uses == 1
  2269    && ValAndOff(a).Off() + 1 == ValAndOff(c).Off()
  2270    && clobber(x)
  2271    -> (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem)
  2272  (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
  2273    && x.Uses == 1
  2274    && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()
  2275    && clobber(x)
  2276    -> (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem)
  2277  (MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem))
  2278    && x.Uses == 1
  2279    && ValAndOff(a).Off() + 4 == ValAndOff(c).Off()
  2280    && clobber(x)
  2281    -> (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem)
  2282  (MOVQstoreconst [c] {s} p x:(MOVQstoreconst [c2] {s} p mem))
  2283    && config.useSSE
  2284    && x.Uses == 1
  2285    && ValAndOff(c2).Off() + 8 == ValAndOff(c).Off()
  2286    && ValAndOff(c).Val() == 0
  2287    && ValAndOff(c2).Val() == 0
  2288    && clobber(x)
  2289    -> (MOVOstore [ValAndOff(c2).Off()] {s} p (MOVOconst [0]) mem)
  2290  
  2291  (MOVBstoreconstidx1 [c] {s} p i x:(MOVBstoreconstidx1 [a] {s} p i mem))
  2292    && x.Uses == 1
  2293    && ValAndOff(a).Off() + 1 == ValAndOff(c).Off()
  2294    && clobber(x)
  2295    -> (MOVWstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p i mem)
  2296  (MOVWstoreconstidx1 [c] {s} p i x:(MOVWstoreconstidx1 [a] {s} p i mem))
  2297    && x.Uses == 1
  2298    && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()
  2299    && clobber(x)
  2300    -> (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p i mem)
  2301  (MOVLstoreconstidx1 [c] {s} p i x:(MOVLstoreconstidx1 [a] {s} p i mem))
  2302    && x.Uses == 1
  2303    && ValAndOff(a).Off() + 4 == ValAndOff(c).Off()
  2304    && clobber(x)
  2305    -> (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p i (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem)
  2306  
  2307  (MOVWstoreconstidx2 [c] {s} p i x:(MOVWstoreconstidx2 [a] {s} p i mem))
  2308    && x.Uses == 1
  2309    && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()
  2310    && clobber(x)
  2311    -> (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p (SHLQconst <i.Type> [1] i) mem)
  2312  (MOVLstoreconstidx4 [c] {s} p i x:(MOVLstoreconstidx4 [a] {s} p i mem))
  2313    && x.Uses == 1
  2314    && ValAndOff(a).Off() + 4 == ValAndOff(c).Off()
  2315    && clobber(x)
  2316    -> (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p (SHLQconst <i.Type> [2] i) (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem)
  2317  
  2318  // Combine stores into larger (unaligned) stores.
  2319  (MOVBstore [i] {s} p (SHRQconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
  2320    && x.Uses == 1
  2321    && clobber(x)
  2322    -> (MOVWstore [i-1] {s} p w mem)
  2323  (MOVBstore [i] {s} p (SHRQconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRQconst [j-8] w) mem))
  2324    && x.Uses == 1
  2325    && clobber(x)
  2326    -> (MOVWstore [i-1] {s} p w0 mem)
  2327  (MOVWstore [i] {s} p (SHRQconst [16] w) x:(MOVWstore [i-2] {s} p w mem))
  2328    && x.Uses == 1
  2329    && clobber(x)
  2330    -> (MOVLstore [i-2] {s} p w mem)
  2331  (MOVWstore [i] {s} p (SHRQconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRQconst [j-16] w) mem))
  2332    && x.Uses == 1
  2333    && clobber(x)
  2334    -> (MOVLstore [i-2] {s} p w0 mem)
  2335  (MOVLstore [i] {s} p (SHRQconst [32] w) x:(MOVLstore [i-4] {s} p w mem))
  2336    && x.Uses == 1
  2337    && clobber(x)
  2338    -> (MOVQstore [i-4] {s} p w mem)
  2339  (MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem))
  2340    && x.Uses == 1
  2341    && clobber(x)
  2342    -> (MOVQstore [i-4] {s} p w0 mem)
  2343  
  2344  (MOVBstoreidx1 [i] {s} p idx (SHRQconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem))
  2345    && x.Uses == 1
  2346    && clobber(x)
  2347    -> (MOVWstoreidx1 [i-1] {s} p idx w mem)
  2348  (MOVBstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRQconst [j-8] w) mem))
  2349    && x.Uses == 1
  2350    && clobber(x)
  2351    -> (MOVWstoreidx1 [i-1] {s} p idx w0 mem)
  2352  (MOVWstoreidx1 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx1 [i-2] {s} p idx w mem))
  2353    && x.Uses == 1
  2354    && clobber(x)
  2355    -> (MOVLstoreidx1 [i-2] {s} p idx w mem)
  2356  (MOVWstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx1 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem))
  2357    && x.Uses == 1
  2358    && clobber(x)
  2359    -> (MOVLstoreidx1 [i-2] {s} p idx w0 mem)
  2360  (MOVLstoreidx1 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx1 [i-4] {s} p idx w mem))
  2361    && x.Uses == 1
  2362    && clobber(x)
  2363    -> (MOVQstoreidx1 [i-4] {s} p idx w mem)
  2364  (MOVLstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx1 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem))
  2365    && x.Uses == 1
  2366    && clobber(x)
  2367    -> (MOVQstoreidx1 [i-4] {s} p idx w0 mem)
  2368  
  2369  (MOVWstoreidx2 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem))
  2370    && x.Uses == 1
  2371    && clobber(x)
  2372    -> (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w mem)
  2373  (MOVWstoreidx2 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx2 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem))
  2374    && x.Uses == 1
  2375    && clobber(x)
  2376    -> (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w0 mem)
  2377  (MOVLstoreidx4 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx4 [i-4] {s} p idx w mem))
  2378    && x.Uses == 1
  2379    && clobber(x)
  2380    -> (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w mem)
  2381  (MOVLstoreidx4 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx4 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem))
  2382    && x.Uses == 1
  2383    && clobber(x)
  2384    -> (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w0 mem)
  2385  
  2386  (MOVBstore [i] {s} p
  2387    x1:(MOVBload [j] {s2} p2 mem)
  2388      mem2:(MOVBstore [i-1] {s} p
  2389        x2:(MOVBload [j-1] {s2} p2 mem) mem))
  2390    && x1.Uses == 1
  2391    && x2.Uses == 1
  2392    && mem2.Uses == 1
  2393    && clobber(x1)
  2394    && clobber(x2)
  2395    && clobber(mem2)
  2396    -> (MOVWstore [i-1] {s} p (MOVWload [j-1] {s2} p2 mem) mem)
  2397  
  2398  (MOVWstore [i] {s} p
  2399    x1:(MOVWload [j] {s2} p2 mem)
  2400      mem2:(MOVWstore [i-2] {s} p
  2401        x2:(MOVWload [j-2] {s2} p2 mem) mem))
  2402    && x1.Uses == 1
  2403    && x2.Uses == 1
  2404    && mem2.Uses == 1
  2405    && clobber(x1)
  2406    && clobber(x2)
  2407    && clobber(mem2)
  2408    -> (MOVLstore [i-2] {s} p (MOVLload [j-2] {s2} p2 mem) mem)
  2409  
  2410  (MOVLstore [i] {s} p
  2411    x1:(MOVLload [j] {s2} p2 mem)
  2412      mem2:(MOVLstore [i-4] {s} p
  2413        x2:(MOVLload [j-4] {s2} p2 mem) mem))
  2414    && x1.Uses == 1
  2415    && x2.Uses == 1
  2416    && mem2.Uses == 1
  2417    && clobber(x1)
  2418    && clobber(x2)
  2419    && clobber(mem2)
  2420    -> (MOVQstore [i-4] {s} p (MOVQload [j-4] {s2} p2 mem) mem)
  2421  
  2422  // This is somewhat tricky. There may be pointers in SSE registers due to rule below.
  2423  // However those register shouldn't live across GC safepoint.
  2424  (MOVQstore [i] {s} p
  2425    x1:(MOVQload [j] {s2} p2 mem)
  2426      mem2:(MOVQstore [i-8] {s} p
  2427        x2:(MOVQload [j-8] {s2} p2 mem) mem))
  2428    && x1.Uses == 1
  2429    && x2.Uses == 1
  2430    && mem2.Uses == 1
  2431    && config.useSSE
  2432    && clobber(x1)
  2433    && clobber(x2)
  2434    && clobber(mem2)
  2435    -> (MOVOstore [i-8] {s} p (MOVOload [j-8] {s2} p2 mem) mem)
  2436  
  2437  
  2438  // amd64p32 rules
  2439  // same as the rules above, but with 32 instead of 64 bit pointer arithmetic.
  2440  // LEAQ,ADDQ -> LEAL,ADDL
  2441  (ADDLconst [c] (LEAL [d] {s} x)) && is32Bit(c+d) -> (LEAL [c+d] {s} x)
  2442  (LEAL [c] {s} (ADDLconst [d] x)) && is32Bit(c+d) -> (LEAL [c+d] {s} x)
  2443  
  2444  (MOVQload  [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) ->
  2445  	(MOVQload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
  2446  (MOVLload  [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) ->
  2447  	(MOVLload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
  2448  (MOVWload  [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) ->
  2449  	(MOVWload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
  2450  (MOVBload  [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) ->
  2451  	(MOVBload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
  2452  
  2453  (MOVQstore  [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) ->
  2454  	(MOVQstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  2455  (MOVLstore  [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) ->
  2456  	(MOVLstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  2457  (MOVWstore  [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) ->
  2458  	(MOVWstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  2459  (MOVBstore  [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) ->
  2460  	(MOVBstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  2461  
  2462  (MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
  2463  	(MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
  2464  (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
  2465  	(MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
  2466  (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
  2467  	(MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
  2468  (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
  2469  	(MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
  2470  
  2471  (MOVQload  [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVQload  [off1+off2] {sym} ptr mem)
  2472  (MOVLload  [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVLload  [off1+off2] {sym} ptr mem)
  2473  (MOVWload  [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVWload  [off1+off2] {sym} ptr mem)
  2474  (MOVBload  [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVBload  [off1+off2] {sym} ptr mem)
  2475  (MOVQstore  [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVQstore  [off1+off2] {sym} ptr val mem)
  2476  (MOVLstore  [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVLstore  [off1+off2] {sym} ptr val mem)
  2477  (MOVWstore  [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVWstore  [off1+off2] {sym} ptr val mem)
  2478  (MOVBstore  [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVBstore  [off1+off2] {sym} ptr val mem)
  2479  (MOVQstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
  2480  	(MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
  2481  (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
  2482  	(MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
  2483  (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
  2484  	(MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
  2485  (MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
  2486  	(MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
  2487  
  2488  // Merge load and op
  2489  // TODO: add indexed variants?
  2490  (ADDQ x l:(MOVQload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (ADDQmem x [off] {sym} ptr mem)
  2491  (ADDL x l:(MOVLload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (ADDLmem x [off] {sym} ptr mem)
  2492  (SUBQ x l:(MOVQload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (SUBQmem x [off] {sym} ptr mem)
  2493  (SUBL x l:(MOVLload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (SUBLmem x [off] {sym} ptr mem)
  2494  (ANDQ x l:(MOVQload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (ANDQmem x [off] {sym} ptr mem)
  2495  (ANDL x l:(MOVLload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (ANDLmem x [off] {sym} ptr mem)
  2496  (ORQ  x l:(MOVQload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (ORQmem x [off] {sym} ptr mem)
  2497  (ORL  x l:(MOVLload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (ORLmem x [off] {sym} ptr mem)
  2498  (XORQ x l:(MOVQload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (XORQmem x [off] {sym} ptr mem)
  2499  (XORL x l:(MOVLload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (XORLmem x [off] {sym} ptr mem)
  2500  (ADDSD x l:(MOVSDload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (ADDSDmem x [off] {sym} ptr mem)
  2501  (ADDSS x l:(MOVSSload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (ADDSSmem x [off] {sym} ptr mem)
  2502  (SUBSD x l:(MOVSDload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (SUBSDmem x [off] {sym} ptr mem)
  2503  (SUBSS x l:(MOVSSload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (SUBSSmem x [off] {sym} ptr mem)
  2504  (MULSD x l:(MOVSDload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (MULSDmem x [off] {sym} ptr mem)
  2505  (MULSS x l:(MOVSSload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (MULSSmem x [off] {sym} ptr mem)
  2506  
  2507  // Merge ADDQconst and LEAQ into atomic loads.
  2508  (MOVQatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) ->
  2509  	(MOVQatomicload [off1+off2] {sym} ptr mem)
  2510  (MOVLatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) ->
  2511  	(MOVLatomicload [off1+off2] {sym} ptr mem)
  2512  (MOVQatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  2513  	(MOVQatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
  2514  (MOVLatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  2515  	(MOVLatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
  2516  
  2517  // Merge ADDQconst and LEAQ into atomic stores.
  2518  (XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) ->
  2519  	(XCHGQ [off1+off2] {sym} val ptr mem)
  2520  (XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB ->
  2521  	(XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem)
  2522  (XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) ->
  2523  	(XCHGL [off1+off2] {sym} val ptr mem)
  2524  (XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB ->
  2525  	(XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem)
  2526  
  2527  // Merge ADDQconst into atomic adds.
  2528  // TODO: merging LEAQ doesn't work, assembler doesn't like the resulting instructions.
  2529  (XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) ->
  2530  	(XADDQlock [off1+off2] {sym} val ptr mem)
  2531  (XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) ->
  2532  	(XADDLlock [off1+off2] {sym} val ptr mem)
  2533  
  2534  // Merge ADDQconst into atomic compare and swaps.
  2535  // TODO: merging LEAQ doesn't work, assembler doesn't like the resulting instructions.
  2536  (CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) && is32Bit(off1+off2) ->
  2537  	(CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem)
  2538  (CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) && is32Bit(off1+off2) ->
  2539  	(CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem)
  2540  
  2541  // We don't need the conditional move if we know the arg of BSF is not zero.
  2542  (CMOVQEQ x _ (Select1 (BSFQ (ORQconst [c] _)))) && c != 0 -> x
  2543  // Extension is unnecessary for trailing zeros.
  2544  (BSFQ (ORQconst <t> [1<<8] (MOVBQZX x))) -> (BSFQ (ORQconst <t> [1<<8] x))
  2545  (BSFQ (ORQconst <t> [1<<16] (MOVWQZX x))) -> (BSFQ (ORQconst <t> [1<<16] x))
  2546  
  2547  // Redundant sign/zero extensions
  2548  // Note: see issue 21963. We have to make sure we use the right type on
  2549  // the resulting extension (the outer type, not the inner type).
  2550  (MOVLQSX (MOVLQSX x)) -> (MOVLQSX x)
  2551  (MOVLQSX (MOVWQSX x)) -> (MOVWQSX x)
  2552  (MOVLQSX (MOVBQSX x)) -> (MOVBQSX x)
  2553  (MOVWQSX (MOVWQSX x)) -> (MOVWQSX x)
  2554  (MOVWQSX (MOVBQSX x)) -> (MOVBQSX x)
  2555  (MOVBQSX (MOVBQSX x)) -> (MOVBQSX x)
  2556  (MOVLQZX (MOVLQZX x)) -> (MOVLQZX x)
  2557  (MOVLQZX (MOVWQZX x)) -> (MOVWQZX x)
  2558  (MOVLQZX (MOVBQZX x)) -> (MOVBQZX x)
  2559  (MOVWQZX (MOVWQZX x)) -> (MOVWQZX x)
  2560  (MOVWQZX (MOVBQZX x)) -> (MOVBQZX x)
  2561  (MOVBQZX (MOVBQZX x)) -> (MOVBQZX x)
  2562  
  2563  (MOVQstore [off] {sym} ptr a:(ADDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
  2564  	&& isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) ->
  2565  	(ADDQconstmem {sym} [makeValAndOff(c,off)] ptr mem)
  2566  (MOVLstore [off] {sym} ptr a:(ADDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
  2567  	&& isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) ->
  2568  	(ADDLconstmem {sym} [makeValAndOff(c,off)] ptr mem)
  2569  
  2570  // float <-> int register moves, with no conversion.
  2571  // These come up when compiling math.{Float{32,64}bits,Float{32,64}frombits}.
  2572  (MOVQload  [off] {sym} ptr (MOVSDstore [off] {sym} ptr val _)) -> (MOVQf2i val)
  2573  (MOVLload  [off] {sym} ptr (MOVSSstore [off] {sym} ptr val _)) -> (MOVLf2i val)
  2574  (MOVSDload [off] {sym} ptr (MOVQstore  [off] {sym} ptr val _)) -> (MOVQi2f val)
  2575  (MOVSSload [off] {sym} ptr (MOVLstore  [off] {sym} ptr val _)) -> (MOVLi2f val)
  2576  
  2577  // Other load-like ops.
  2578  (ADDQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) -> (ADDQ x (MOVQf2i y))
  2579  (ADDLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) -> (ADDL x (MOVLf2i y))
  2580  (SUBQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) -> (SUBQ x (MOVQf2i y))
  2581  (SUBLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) -> (SUBL x (MOVLf2i y))
  2582  (ANDQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) -> (ANDQ x (MOVQf2i y))
  2583  (ANDLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) -> (ANDL x (MOVLf2i y))
  2584  ( ORQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) -> ( ORQ x (MOVQf2i y))
  2585  ( ORLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) -> ( ORL x (MOVLf2i y))
  2586  (XORQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) -> (XORQ x (MOVQf2i y))
  2587  (XORLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) -> (XORL x (MOVLf2i y))
  2588  
  2589  (ADDQconstmem [valOff] {sym} ptr (MOVSDstore [ValAndOff(valOff).Off()] {sym} ptr x _)) ->
  2590    (ADDQconst [ValAndOff(valOff).Val()] (MOVQf2i x))
  2591  (ADDLconstmem [valOff] {sym} ptr (MOVSSstore [ValAndOff(valOff).Off()] {sym} ptr x _)) ->
  2592    (ADDLconst [ValAndOff(valOff).Val()] (MOVLf2i x))
  2593  
  2594  (ADDSDmem x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) -> (ADDSD x (MOVQi2f y))
  2595  (ADDSSmem x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) -> (ADDSS x (MOVLi2f y))
  2596  (SUBSDmem x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) -> (SUBSD x (MOVQi2f y))
  2597  (SUBSSmem x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) -> (SUBSS x (MOVLi2f y))
  2598  (MULSDmem x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) -> (MULSD x (MOVQi2f y))
  2599  (MULSSmem x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) -> (MULSS x (MOVLi2f y))
  2600  
  2601  // Redirect stores to use the other register set.
  2602  (MOVQstore  [off] {sym} ptr (MOVQf2i val) mem) -> (MOVSDstore [off] {sym} ptr val mem)
  2603  (MOVLstore  [off] {sym} ptr (MOVLf2i val) mem) -> (MOVSSstore [off] {sym} ptr val mem)
  2604  (MOVSDstore [off] {sym} ptr (MOVQi2f val) mem) -> (MOVQstore  [off] {sym} ptr val mem)
  2605  (MOVSSstore [off] {sym} ptr (MOVLi2f val) mem) -> (MOVLstore  [off] {sym} ptr val mem)
  2606  
  2607  // Load args directly into the register class where it will be used.
  2608  // We do this by just modifying the type of the Arg.
  2609  (MOVQf2i <t> (Arg [off] {sym})) -> @b.Func.Entry (Arg <t> [off] {sym})
  2610  (MOVLf2i <t> (Arg [off] {sym})) -> @b.Func.Entry (Arg <t> [off] {sym})
  2611  (MOVQi2f <t> (Arg [off] {sym})) -> @b.Func.Entry (Arg <t> [off] {sym})
  2612  (MOVLi2f <t> (Arg [off] {sym})) -> @b.Func.Entry (Arg <t> [off] {sym})